🔥 Matmul -> Linear Layer

Let us put in the code fom the previous notebook, to do the imports an to load the data.

# Imports
from PythonInterface import Python

let pathlib = Python.import_module("pathlib") # Python standard library
let gzip = Python.import_module("gzip") # Python standard library
let pickle = Python.import_module("pickle") # Python standard library
let np = Python.import_module("numpy")

# Get the data
path_gz = pathlib.Path('./lost+found/data/mnist.pkl.gz')
f = gzip.open(path_gz, 'rb')
u = pickle._Unpickler(f)
u.encoding = 'latin1'
data = u.load()

data_train = data[0]
data_valid = data[1]

x_train = data_train[0]
y_train = data_train[1]
y_train = np.expand_dims(y_train, 1)

x_valid = data_valid[0]
y_valid = data_valid[1]
y_valid = np.expand_dims(y_valid, 1)
f.close()
from DType import DType
from Memory import memset_zero
from Object import object, Attr
from Pointer import DTypePointer, Pointer
from Random import rand
from Range import range
from TargetInfo import dtype_sizeof

struct Matrix[type: DType]:
    var data: DTypePointer[type]
    var rows: Int
    var cols: Int

    fn __init__(inout self, rows: Int, cols: Int):
        self.data = DTypePointer[type].alloc(rows * cols)
        rand(self.data, rows*cols)
        self.rows = rows
        self.cols = cols

    fn __copyinit__(inout self, other: Self):
        self.data = other.data
        self.rows = other.rows
        self.cols = other.cols

    fn __del__(owned self):
        self.data.free()

    fn zero(inout self):
        memset_zero(self.data, self.rows * self.cols)

    @always_inline
    fn __getitem__(self, y: Int, x: Int) -> SIMD[type, 1]:
        return self.load[1](y, x)

    @always_inline
    fn load[nelts:Int](self, y: Int, x: Int) -> SIMD[type, nelts]:
        return self.data.simd_load[nelts](y * self.cols + x)

    @always_inline
    fn __setitem__(self, y: Int, x: Int, val: SIMD[type, 1]):
        return self.store[1](y, x, val)

    @always_inline
    fn store[nelts:Int](self, y: Int, x: Int, val: SIMD[type, nelts]):
        self.data.simd_store[nelts](y * self.cols + x, val)
fn matrix_dataloader[type: DType]( a:PythonObject, o: Matrix[type], bs: Int) raises:
    for i in range(bs):
        for j in range(o.cols):
            o[i,j] = a[i][j].to_float64().cast[type]()
let bs: Int = 5 # batch-size
let ni: Int = x_train.shape[1].to_index() #28*28

let xb: Matrix[DType.float32] = Matrix[DType.float32](bs,ni)
let yb: Matrix[DType.float32] = Matrix[DType.float32](bs,1)
xb.zero()
yb.zero()

matrix_dataloader(x_train, xb, bs)
matrix_dataloader(y_train, yb, bs)

Linear layer from foundations

A linear layer is nothing but a matrix multiplication (weights and activations) followed by a vector addition (with the bias term).
So the basic idea here is to use the the matmul example functions from the Modular website as a starting point and add the bias term in it.

let no: Int = 10
var w: Matrix[DType.float32] = Matrix[DType.float32](ni, no) # weights
var b: Matrix[DType.float32] = Matrix[DType.float32](no, 1) # bias
b.zero()
var res = Matrix[DType.float32](xb.rows, w.cols) # result 
res.zero()
from TargetInfo import dtype_sizeof, dtype_simd_width
from Functional import vectorize

alias nelts = dtype_simd_width[DType.float32]() # The SIMD vector width.

fn lin_vectorized[type: DType](xb: Matrix[type], w: Matrix[type], b: Matrix[type], res: Matrix[type]) raises:
    for i in range(xb.rows): # 50000
        for j in range(xb.cols): # 784
            @parameter
            fn dotbias[nelts: Int](k: Int):
                res.store[nelts](i,k, res.load[nelts](i,k) + xb[i,j] * w.load[nelts](j,k) + b.load[nelts](k,0))
            vectorize[nelts, dotbias](w.cols)
res.zero()
lin_vectorized(xb, w, b, res)
print(res.rows)
print(res.cols)
5
10