8
8
9
9
import tensorflow as tf
10
10
11
- # 定义数据流图
11
+ # Define data flow gragh
12
12
learning_rate_ = tf .placeholder (dtype = tf .float32 )
13
13
X_ = tf .placeholder (dtype = tf .float32 , shape = [5 ])
14
14
y_ = tf .placeholder (dtype = tf .float32 , shape = [5 ])
18
18
y_pred = a * X_ + b
19
19
loss = tf .constant (0.5 ) * tf .reduce_sum (tf .square (y_pred - y_ ))
20
20
21
- # 反向传播,手动计算变量(模型参数)的梯度
21
+ # Back propagation, calculate gradient of variables(model parameters) manually
22
22
grad_a = tf .reduce_sum ((y_pred - y_ ) * X_ )
23
23
grad_b = tf .reduce_sum (y_pred - y_ )
24
24
25
- # 梯度下降法,手动更新参数
25
+ # Gradient descent, update parameters manually
26
26
new_a = a - learning_rate_ * grad_a
27
27
new_b = b - learning_rate_ * grad_b
28
28
update_a = tf .assign (a , new_a )
29
29
update_b = tf .assign (b , new_b )
30
30
31
31
train_op = [update_a , update_b ]
32
- # 数据流图定义到此结束
33
- # 注意,直到目前,我们都没有进行任何实质的数据计算,仅仅是定义了一个数据图
32
+ # End of defining of data flow gragh
33
+ # Attention, until now, we haven't do any actually data calculation, just defined a data flow gragh
34
34
35
35
num_epoch = 10000
36
36
learning_rate = 1e-3
37
37
with tf .Session () as sess :
38
- # 初始化变量a和b
38
+ # Initialize variables a and b
39
39
tf .global_variables_initializer ().run ()
40
- # 循环将数据送入上面建立的数据流图中进行计算和更新变量
40
+ # Put data in the data flow gragh created above to calculate and update variables
41
41
for e in range (num_epoch ):
42
42
sess .run (train_op , feed_dict = {X_ : X , y_ : y , learning_rate_ : learning_rate })
43
- print (sess .run ([a , b ]))
43
+ print (sess .run ([a , b ]))
0 commit comments