1
+ import numpy as np
2
+
3
+ X_raw = np .array ([2013 , 2014 , 2015 , 2016 , 2017 ])
4
+ y_raw = np .array ([12000 , 14000 , 15000 , 16500 , 17500 ])
5
+
6
+ X = (X_raw - X_raw .min ()) / (X_raw .max () - X_raw .min ())
7
+ y = (y_raw - y_raw .min ()) / (y_raw .max () - y_raw .min ())
8
+
9
+ import tensorflow as tf
10
+
11
+ # 定义数据流图
12
+ learning_rate_ = tf .placeholder (dtype = tf .float32 )
13
+ X_ = tf .placeholder (dtype = tf .float32 , shape = [5 ])
14
+ y_ = tf .placeholder (dtype = tf .float32 , shape = [5 ])
15
+ a = tf .get_variable ('a' , dtype = tf .float32 , shape = [], initializer = tf .zeros_initializer )
16
+ b = tf .get_variable ('b' , dtype = tf .float32 , shape = [], initializer = tf .zeros_initializer )
17
+
18
+ y_pred = a * X_ + b
19
+ loss = tf .constant (0.5 ) * tf .reduce_sum (tf .square (y_pred - y_ ))
20
+
21
+ # 反向传播,手动计算变量(模型参数)的梯度
22
+ grad_a = tf .reduce_sum ((y_pred - y_ ) * X_ )
23
+ grad_b = tf .reduce_sum (y_pred - y_ )
24
+
25
+ # 手动更新参数
26
+ new_a = a - learning_rate_ * grad_a
27
+ new_b = b - learning_rate_ * grad_b
28
+ update_a = tf .assign (a , new_a )
29
+ update_b = tf .assign (b , new_b )
30
+ # 数据流图定义到此结束
31
+ # 注意,直到目前,我们都没有进行任何实质的数据计算,仅仅是定义了一个数据图
32
+
33
+ num_epoch = 10000
34
+ learning_rate = 1e-3
35
+ with tf .Session () as sess :
36
+ # 初始化变量a和b
37
+ tf .global_variables_initializer ().run ()
38
+ # 循环将数据送入上面建立的数据流图中进行计算和更新变量
39
+ for e in range (num_epoch ):
40
+ sess .run ([update_a , update_b ], feed_dict = {X_ : X , y_ : y , learning_rate_ : learning_rate })
41
+ print (sess .run ([a , b ]))
0 commit comments