Skip to content

Commit 478f32b

Browse files
committed
Merge branch 'master' of git@github.com:snowkylin/TensorFlow-cn.git
2 parents 18384a7 + 9fd032d commit 478f32b

10 files changed

+34
-34
lines changed

source/_static/code/en/basic/example/tensorflow_autograd.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
y_pred = a * X_ + b
1818
loss = tf.constant(0.5) * tf.reduce_sum(tf.square(y_pred - y_))
1919

20-
# 反向传播,利用TensorFlow的梯度下降优化器自动计算并更新变量(模型参数)的梯度
20+
# Back propagation,calculate and update gradient of varaibles(model parameters) with TensorFlow's GradientDescentOptimier
2121
train_op = tf.train.GradientDescentOptimizer(learning_rate=learning_rate_).minimize(loss)
2222

2323
num_epoch = 10000
@@ -26,4 +26,4 @@
2626
tf.global_variables_initializer().run()
2727
for e in range(num_epoch):
2828
sess.run(train_op, feed_dict={X_: X, y_: y, learning_rate_: learning_rate})
29-
print(sess.run([a, b]))
29+
print(sess.run([a, b]))

source/_static/code/en/basic/example/tensorflow_eager.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -18,15 +18,15 @@
1818
num_epoch = 10000
1919
learning_rate = 1e-3
2020
for e in range(num_epoch):
21-
# 前向传播
21+
# Forward propagation
2222
y_pred = a * X + b
2323
loss = 0.5 * tf.reduce_sum(tf.square(y_pred - y)) # loss = 0.5 * np.sum(np.square(a * X + b - y))
2424

25-
# 反向传播,手动计算变量(模型参数)的梯度
25+
# Back propagation, calculate gradient of variables(model parameters) manually
2626
grad_a = tf.reduce_sum((y_pred - y) * X)
2727
grad_b = tf.reduce_sum(y_pred - y)
2828

29-
# 更新参数
29+
# Update parameters
3030
a, b = a - learning_rate * grad_a, b - learning_rate * grad_b
3131

32-
print(a, b)
32+
print(a, b)

source/_static/code/en/basic/example/tensorflow_manual_grad.py

+8-8
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
import tensorflow as tf
1010

11-
# 定义数据流图
11+
# Define data flow gragh
1212
learning_rate_ = tf.placeholder(dtype=tf.float32)
1313
X_ = tf.placeholder(dtype=tf.float32, shape=[5])
1414
y_ = tf.placeholder(dtype=tf.float32, shape=[5])
@@ -18,26 +18,26 @@
1818
y_pred = a * X_ + b
1919
loss = tf.constant(0.5) * tf.reduce_sum(tf.square(y_pred - y_))
2020

21-
# 反向传播,手动计算变量(模型参数)的梯度
21+
# Back propagation, calculate gradient of variables(model parameters) manually
2222
grad_a = tf.reduce_sum((y_pred - y_) * X_)
2323
grad_b = tf.reduce_sum(y_pred - y_)
2424

25-
# 梯度下降法,手动更新参数
25+
# Gradient descent, update parameters manually
2626
new_a = a - learning_rate_ * grad_a
2727
new_b = b - learning_rate_ * grad_b
2828
update_a = tf.assign(a, new_a)
2929
update_b = tf.assign(b, new_b)
3030

3131
train_op = [update_a, update_b]
32-
# 数据流图定义到此结束
33-
# 注意,直到目前,我们都没有进行任何实质的数据计算,仅仅是定义了一个数据图
32+
# End of defining of data flow gragh
33+
# Attention, until now, we haven't do any actually data calculation, just defined a data flow gragh
3434

3535
num_epoch = 10000
3636
learning_rate = 1e-3
3737
with tf.Session() as sess:
38-
# 初始化变量a和b
38+
# Initialize variables a and b
3939
tf.global_variables_initializer().run()
40-
# 循环将数据送入上面建立的数据流图中进行计算和更新变量
40+
# Put data in the data flow gragh created above to calculate and update variables
4141
for e in range(num_epoch):
4242
sess.run(train_op, feed_dict={X_: X, y_: y, learning_rate_: learning_rate})
43-
print(sess.run([a, b]))
43+
print(sess.run([a, b]))
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
import tensorflow as tf
22

3-
# 定义一个“计算图”
4-
a = tf.constant(1) # 定义一个常量Tensor(张量)
3+
# Defince a "Computation Graph"
4+
a = tf.constant(1) # Defince a constant Tensor
55
b = tf.constant(1)
6-
c = a + b # 等价于 c = tf.add(a, b),c是张量a和张量b通过Add这一Operation(操作)所形成的新张量
6+
c = a + b # Equal to c = tf.add(a, b),c is a new Tensor created by Tensor a and Tesor b's add Operation
77

8-
sess = tf.Session() # 实例化一个Session(会话)
9-
c_ = sess.run(c) # 通过Session的run()方法对计算图里的节点(张量)进行实际的计算
10-
print(c_)
8+
sess = tf.Session() # Initailize a Session
9+
c_ = sess.run(c) # Session的run() will do actually computation to the nodes (Tensor) in the Computation Graph
10+
print(c_)
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
import tensorflow as tf
22

3-
A = tf.ones(shape=[2, 3]) # tf.ones(shape)定义了一个形状为shape的全1矩阵
3+
A = tf.ones(shape=[2, 3]) # tf.ones(shape) defines a all one matrix with shape
44
B = tf.ones(shape=[3, 2])
55
C = tf.matmul(A, B)
66

77
sess = tf.Session()
88
C_ = sess.run(C)
9-
print(C_)
9+
print(C_)
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
11
import tensorflow as tf
22

3-
a = tf.placeholder(dtype=tf.int32) # 定义一个占位符Tensor
3+
a = tf.placeholder(dtype=tf.int32) # Define a placeholder Tensor
44
b = tf.placeholder(dtype=tf.int32)
55
c = a + b
66

7-
a_ = input("a = ") # 从终端读入一个整数并放入变量a_
7+
a_ = input("a = ") # Read an Integer from terminal and put it into a_
88
b_ = input("b = ")
99

1010
sess = tf.Session()
11-
c_ = sess.run(c, feed_dict={a: a_, b: b_}) # feed_dict参数传入为了计算c所需要的张量的值
12-
print("a + b = %d" % c_)
11+
c_ = sess.run(c, feed_dict={a: a_, b: b_}) # feed_dict will input Tensors' value needed by computing c
12+
print("a + b = %d" % c_)
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,13 @@
11
import tensorflow as tf
22

33
a = tf.get_variable(name='a', shape=[])
4-
initializer = tf.assign(a, 0) # tf.assign(x, y)返回一个“将张量y的值赋给变量x”的操作
5-
a_plus_1 = a + 1 # 等价于 a + tf.constant(1)
4+
initializer = tf.assign(a, 0) # tf.assign(x, y) will return a operation “assign Tensor y's value to Tensor x”
5+
a_plus_1 = a + 1 # Equal to a + tf.constant(1)
66
plus_one_op = tf.assign(a, a_plus_1)
77

88
sess = tf.Session()
99
sess.run(initializer)
1010
for i in range(5):
11-
sess.run(plus_one_op) # 对变量a执行加一操作
12-
a_ = sess.run(a) # 获得变量a的值并存入a_
11+
sess.run(plus_one_op) # Do plus one operation to a
12+
a_ = sess.run(a) # Calculate a‘s value and put the result to a_
1313
print(a_)

source/_static/code/en/basic/graph/variable_with_initializer.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
import tensorflow as tf
22

3-
a = tf.get_variable(name='a', shape=[], initializer=tf.zeros_initializer) # 指定初始化器为全0初始化
3+
a = tf.get_variable(name='a', shape=[], initializer=tf.zeros_initializer) # Made initializer as a all zero initializer
44
a_plus_1 = a + 1
55
plus_one_op = tf.assign(a, a_plus_1)
66

77
sess = tf.Session()
8-
sess.run(tf.global_variables_initializer()) # 初始化所有变量
8+
sess.run(tf.global_variables_initializer()) # Initailize all the
99
for i in range(5):
1010
sess.run(plus_one_op)
1111
a_ = sess.run(a)

source/en/models.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ The specific implementation is as follows, which is very similar to MLP except s
113113
Figure of the CNN structure
114114

115115
.. literalinclude:: ../_static/code/en/model/cnn/cnn.py
116-
:lines: 4-37
116+
:lines: 4-38
117117

118118
By substituting ``model = MLP()`` in the last chapter with ``model = CNN()``, we get the following output::
119119

source/zh/models.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,7 @@ TensorFlow模型
113113
CNN结构图示
114114

115115
.. literalinclude:: ../_static/code/zh/model/cnn/cnn.py
116-
:lines: 4-37
116+
:lines: 4-38
117117

118118
将前节的 ``model = MLP()`` 更换成 ``model = CNN()`` ,输出如下::
119119

0 commit comments

Comments
 (0)