前向传播简单实践

前向传播

  • out = relu{relu{relu[X@W1+b1]@W2+b2}@W3+b3}

  • pred = argmax(out)

  • loss=MSE(out,label)

  • minimize loss 优化[W1’,b1’,W2’,b2’,W3’,b3’,]

构建数据集

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
import  tensorflow as tf
from tensorflow import keras
from tensorflow.keras import datasets
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# x: [60k, 28, 28],
# y: [60k]
(x, y), _ = datasets.mnist.load_data()
# x: [0~255] => [0~1.]
x = tf.convert_to_tensor(x, dtype=tf.float32) / 255.
y = tf.convert_to_tensor(y, dtype=tf.int32)

print(x.shape, y.shape, x.dtype, y.dtype)
print(tf.reduce_min(x), tf.reduce_max(x))
print(tf.reduce_min(y), tf.reduce_max(y))

#创建数据集
train_db = tf.data.Dataset.from_tensor_slices((x,y)).batch(128)
train_iter = iter(train_db) #迭代器 可以对train做next
sample = next(train_iter)
print('batch:', sample[0].shape, sample[1].shape)
print(sample)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
# [b, 784] => [b, 256] => [b, 128] => [b, 10]
# [dim_in, dim_out], [dim_out]
#这里使用Variable包装是因为tape.gradient只追踪Variable类型
w1 = tf.Variable(tf.random.truncated_normal([784, 256], stddev=0.1))
b1 = tf.Variable(tf.zeros([256]))
w2 = tf.Variable(tf.random.truncated_normal([256, 128], stddev=0.1))
b2 = tf.Variable(tf.zeros([128]))
w3 = tf.Variable(tf.random.truncated_normal([128, 10], stddev=0.1))
b3 = tf.Variable(tf.zeros([10]))

lr = 1e-3

for epoch in range(10): # iterate db for 10
for step, (x, y) in enumerate(train_db): # for every batch
# x:[128, 28, 28]
# y: [128]

# [b, 28, 28] => [b, 28*28]
x = tf.reshape(x, [-1, 28*28])
#自动求导的过程 放在Gradient里面
with tf.GradientTape() as tape: # tf.Variable
# x: [b, 28*28]
# h1 = x@w1 + b1
# [b, 784]@[784, 256] + [256] => [b, 256] + [256] => [b, 256] + [b, 256]
h1 = x@w1 + tf.broadcast_to(b1, [x.shape[0], 256])
h1 = tf.nn.relu(h1)
# [b, 256] => [b, 128]
h2 = h1@w2 + b2
h2 = tf.nn.relu(h2)
# [b, 128] => [b, 10]
out = h2@w3 + b3

# compute loss
# out: [b, 10]
# y: [b] => [b, 10]
y_onehot = tf.one_hot(y, depth=10)

# mse = mean(sum(y-out)^2)
# [b, 10]
loss = tf.square(y_onehot - out)
# mean: scalar
loss = tf.reduce_mean(loss)

# compute gradients 需要求解梯度的元素 //默认只会跟踪tf.Variable类型
grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3])
#print(grads)
# w1 = w1 - lr * w1_grad
# w1 = w1 - lr * grads[0] 经过相减运算后会返回tf.tensor类型而不是Variable类型,
w1.assign_sub(lr * grads[0])#原地更新,不改变类型 如果还是想使用上面的操作,可以在每次 进入自动求导运算时将类型转变为Variable类型
b1.assign_sub(lr * grads[1])
w2.assign_sub(lr * grads[2])
b2.assign_sub(lr * grads[3])
w3.assign_sub(lr * grads[4])
b3.assign_sub(lr * grads[5])


if step % 100 == 0:
print(epoch, step, 'loss:', float(loss))
打赏
  • 版权声明: 本博客所有文章除特别声明外,均采用 Apache License 2.0 许可协议。转载请注明出处!

扫一扫,分享到微信

微信分享二维码

请我喝杯咖啡吧~

支付宝
微信