from PythonInterface import Python
= Python.import_module("pathlib") # Python standard library
let pathlib = Python.import_module("gzip") # Python standard library
let gzip = Python.import_module("pickle") # Python standard library
let pickle = Python.import_module("numpy") let np
π₯ Dataloader
We will work with the MNIST dataset. The goal ist to read and load the data as a Matrix struct.
Get the data
= pathlib.Path('./lost+found/data/mnist.pkl.gz')
path_gz = gzip.open(path_gz, 'rb')
f = pickle._Unpickler(f)
u = 'latin1'
u.encoding = u.load()
data
= data[0]
data_train = data[1]
data_valid
= data_train[0]
x_train = data_train[1]
y_train = np.expand_dims(y_train, 1)
y_train
= data_valid[0]
x_valid = data_valid[1]
y_valid = np.expand_dims(y_valid, 1)
y_valid f.close()
Look at the data
print(x_train[0].shape)
(784,)
= Python.import_module("matplotlib")
let mpl = np.reshape(x_train[0], (28,28))
img 'gray')
mpl.pyplot.imshow(img, mpl.pyplot.show()
So now we have the data as numpy arrays. The next step is to see how we can get it into a Matrix.
Matrix
The implementation of the Matrix struct below is taken from the Mojo documentation.
from DType import DType
from Memory import memset_zero
from Object import object, Attr
from Pointer import DTypePointer, Pointer
from Random import rand
from Range import range
from TargetInfo import dtype_sizeof
type: DType]:
struct Matrix[type]
var data: DTypePointer[
var rows: Int
var cols: Int
__init__(inout self, rows: Int, cols: Int):
fn self.data = DTypePointer[type].alloc(rows * cols)
self.data, rows*cols)
rand(self.rows = rows
self.cols = cols
self, other: Self):
fn __copyinit__(inout self.data = other.data
self.rows = other.rows
self.cols = other.cols
__del__(owned self):
fn self.data.free()
self):
fn zero(inout self.data, self.rows * self.cols)
memset_zero(
@always_inline
__getitem__(self, y: Int, x: Int) -> SIMD[type, 1]:
fn return self.load[1](y, x)
@always_inline
self, y: Int, x: Int) -> SIMD[type, nelts]:
fn load[nelts:Int](return self.data.simd_load[nelts](y * self.cols + x)
@always_inline
__setitem__(self, y: Int, x: Int, val: SIMD[type, 1]):
fn return self.store[1](y, x, val)
@always_inline
self, y: Int, x: Int, val: SIMD[type, nelts]):
fn store[nelts:Int](self.data.simd_store[nelts](y * self.cols + x, val)
A Mojo dataloader
We need a way of getting a batch of samples. Since Mojo doesnt support yield
or anything similar we will resort to a more rustic implementation.
= 8 # batch-size
let bs: Int = x_train.shape[1].__index__() #28*28
let ni: Int
= Matrix[DType.float32](bs,ni) # x batch
let xb: Matrix[DType.float32] = Matrix[DType.float32](bs,1) # y batch
let yb: Matrix[DType.float32]
xb.zero() yb.zero()
type: DType]( a:PythonObject, o: Matrix[type], bs: Int, bindex: Int) raises:
fn matrix_dataloader[for i in range(bindex*bs, (bindex+1)*bs):
for j in range(o.cols):
-bindex*bs,j] = a[i][j].to_float64().cast[type]() o[i
0)
matrix_dataloader(x_train, xb, bs, 0) matrix_dataloader(y_train, yb, bs,
Letβs check a few entries to confirm that worked as expected.
print(yb.load[8](0,0))
[5.0, 0.0, 4.0, 1.0, 9.0, 2.0, 1.0, 3.0]
for row in range(0,8,1):
print(y_train[row])
[5]
[0]
[4]
[1]
[9]
[2]
[1]
[3]
Looks good. Now we have a way of getting a batch of data. Next, weβll build a linear layer based on matmul
.