Skip to content

Commit 5e71979

Browse files
committed
0.3 alpha
1 parent af0b248 commit 5e71979

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

68 files changed

+10943
-0
lines changed

.gitignore

+3
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
11
.vscode
2+
book
23
build
34
reference
45
teaching
56
workspace.code-workspace
7+
source/.vscode
8+
source/_future
69
source/_static/code/.idea

make_singlehtml.bat

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
make singlehtml
+14
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
import tensorflow as tf
2+
tf.enable_eager_execution()
3+
4+
a = tf.constant(1)
5+
b = tf.constant(1)
6+
c = tf.add(a, b) # 也可以直接写 c = a + b,两者等价
7+
8+
print(c)
9+
10+
A = tf.constant([[1, 2], [3, 4]])
11+
B = tf.constant([[5, 6], [7, 8]])
12+
C = tf.matmul(A, B)
13+
14+
print(C)
+17
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
import tensorflow as tf
2+
tf.enable_eager_execution()
3+
4+
x = tf.get_variable('x', shape=[1], initializer=tf.constant_initializer(3.))
5+
with tf.GradientTape() as tape: # 在 tf.GradientTape() 的上下文内,所有计算步骤都会被记录以用于求导
6+
y = tf.square(x)
7+
y_grad = tape.gradient(y, x) # 计算y关于x的导数
8+
print([y.numpy(), y_grad.numpy()])
9+
10+
X = tf.constant([[1., 2.], [3., 4.]])
11+
y = tf.constant([[1.], [2.]])
12+
w = tf.get_variable('w', shape=[2, 1], initializer=tf.constant_initializer([[1.], [2.]]))
13+
b = tf.get_variable('b', shape=[1], initializer=tf.constant_initializer([1.]))
14+
with tf.GradientTape() as tape:
15+
L = 0.5 * tf.reduce_sum(tf.square(tf.matmul(X, w) + b - y))
16+
w_grad, b_grad = tape.gradient(L, [w, b]) # 计算L(w, b)关于w, b的偏导数
17+
print([L.numpy(), w_grad.numpy(), b_grad.numpy()])
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
import tensorflow as tf
2+
tf.enable_eager_execution()
3+
4+
X = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
5+
y = tf.constant([[10.0], [20.0]])
6+
7+
w = tf.get_variable('w', shape=[3, 1], initializer=tf.zeros_initializer())
8+
b = tf.get_variable('b', shape=[1], initializer=tf.zeros_initializer())
9+
variables = [w, b]
10+
11+
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
12+
13+
for i in range(100):
14+
with tf.GradientTape() as tape:
15+
y_pred = tf.matmul(X, w) + b
16+
loss = tf.reduce_mean(tf.square(y_pred - y))
17+
grads = tape.gradient(loss, variables)
18+
optimizer.apply_gradients(grads_and_vars=zip(grads, variables))
19+
print(variables)
+21
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
import numpy as np
2+
3+
X_raw = np.array([2013, 2014, 2015, 2016, 2017])
4+
y_raw = np.array([12000, 14000, 15000, 16500, 17500])
5+
6+
X = (X_raw - X_raw.min()) / (X_raw.max() - X_raw.min())
7+
y = (y_raw - y_raw.min()) / (y_raw.max() - y_raw.min())
8+
9+
a, b = 0, 0
10+
11+
num_epoch = 10000
12+
learning_rate = 1e-3
13+
for e in range(num_epoch):
14+
# 手动计算损失函数关于自变量(模型参数)的梯度
15+
y_pred = a * X + b
16+
grad_a, grad_b = (y_pred - y).dot(X), (y_pred - y).sum()
17+
18+
# 更新参数
19+
a, b = a - learning_rate * grad_a, b - learning_rate * grad_b
20+
21+
print(a, b)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
a = 0
2+
b = 0
3+
4+
def f(x):
5+
y_pred = a * x + b
6+
return y_pred
7+
8+
def loss(x, y):
9+
l = (a * x + b - y) ** 2
10+
return l
11+
12+
def gradient_loss(x, y):
13+
g_a = 2 * (a * x + b - y) * x
14+
g_b = 2 * (a * x + b - y)
15+
return g_a, g_b
16+
17+
X_raw = [2013, 2014, 2015, 2016, 2017]
18+
Y_raw = [12000, 14000, 15000, 16500, 17500]
19+
x_pred_raw = 2018
20+
X = [(x - min(X_raw)) / (max(X_raw) - min(X_raw)) for x in X_raw]
21+
Y = [(y - min(Y_raw)) / (max(Y_raw) - min(Y_raw)) for y in Y_raw]
22+
23+
num_epoch = 10000
24+
learning_rate = 1e-3
25+
for e in range(num_epoch):
26+
for i in range(len(X)):
27+
x, y = X[i], Y[i]
28+
g_a, g_b = gradient_loss(x, y)
29+
a = a - learning_rate * g_a
30+
b = b - learning_rate * g_b
31+
print(a, b)
32+
for i in range(len(X)):
33+
x, y = X[i], Y[i]
34+
print(f(x), y)
35+
x_pred = (x_pred_raw - min(X_raw)) / (max(X_raw) - min(X_raw))
36+
y_pred = f(x_pred)
37+
y_pred_raw = y_pred * (max(Y_raw) - min(Y_raw)) + min(Y_raw)
38+
print(x_pred_raw, y_pred_raw)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
import numpy as np
2+
3+
X_raw = np.array([2013, 2014, 2015, 2016, 2017])
4+
y_raw = np.array([12000, 14000, 15000, 16500, 17500])
5+
6+
X = (X_raw - X_raw.min()) / (X_raw.max() - X_raw.min())
7+
y = (y_raw - y_raw.min()) / (y_raw.max() - y_raw.min())
8+
9+
import tensorflow as tf
10+
11+
# 定义数据流图
12+
learning_rate_ = tf.placeholder(dtype=tf.float32)
13+
X_ = tf.placeholder(dtype=tf.float32, shape=[5])
14+
y_ = tf.placeholder(dtype=tf.float32, shape=[5])
15+
a = tf.get_variable('a', dtype=tf.float32, shape=[], initializer=tf.zeros_initializer)
16+
b = tf.get_variable('b', dtype=tf.float32, shape=[], initializer=tf.zeros_initializer)
17+
18+
y_pred = a * X_ + b
19+
loss = tf.constant(0.5) * tf.reduce_sum(tf.square(y_pred - y_))
20+
21+
# 反向传播,手动计算变量(模型参数)的梯度
22+
grad_a = tf.reduce_sum((y_pred - y_) * X_)
23+
grad_b = tf.reduce_sum(y_pred - y_)
24+
25+
# 梯度下降法,手动更新参数
26+
new_a = a - learning_rate_ * grad_a
27+
new_b = b - learning_rate_ * grad_b
28+
update_a = tf.assign(a, new_a)
29+
update_b = tf.assign(b, new_b)
30+
31+
train_op = [update_a, update_b]
32+
# 数据流图定义到此结束
33+
# 注意,直到目前,我们都没有进行任何实质的数据计算,仅仅是定义了一个数据图
34+
35+
num_epoch = 10000
36+
learning_rate = 1e-3
37+
with tf.Session() as sess:
38+
# 初始化变量a和b
39+
tf.global_variables_initializer().run()
40+
# 循环将数据送入上面建立的数据流图中进行计算和更新变量
41+
for e in range(num_epoch):
42+
sess.run(train_op, feed_dict={X_: X, y_: y, learning_rate_: learning_rate})
43+
print(sess.run([a, b]))
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
import numpy as np
2+
3+
X_raw = np.array([2013, 2014, 2015, 2016, 2017])
4+
y_raw = np.array([12000, 14000, 15000, 16500, 17500])
5+
6+
X = (X_raw - X_raw.min()) / (X_raw.max() - X_raw.min())
7+
y = (y_raw - y_raw.min()) / (y_raw.max() - y_raw.min())
8+
9+
import tensorflow as tf
10+
11+
learning_rate_ = tf.placeholder(dtype=tf.float32)
12+
X_ = tf.placeholder(dtype=tf.float32, shape=[5])
13+
y_ = tf.placeholder(dtype=tf.float32, shape=[5])
14+
a = tf.get_variable('a', dtype=tf.float32, shape=[], initializer=tf.zeros_initializer)
15+
b = tf.get_variable('b', dtype=tf.float32, shape=[], initializer=tf.zeros_initializer)
16+
17+
y_pred = a * X_ + b
18+
loss = tf.constant(0.5) * tf.reduce_sum(tf.square(y_pred - y_))
19+
20+
# 反向传播,利用TensorFlow的梯度下降优化器自动计算并更新变量(模型参数)的梯度
21+
train_op = tf.train.GradientDescentOptimizer(learning_rate=learning_rate_).minimize(loss)
22+
23+
num_epoch = 10000
24+
learning_rate = 1e-3
25+
with tf.Session() as sess:
26+
tf.global_variables_initializer().run()
27+
for e in range(num_epoch):
28+
sess.run(train_op, feed_dict={X_: X, y_: y, learning_rate_: learning_rate})
29+
print(sess.run([a, b]))
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
import tensorflow as tf
2+
import tensorflow.contrib.eager as tfe
3+
tfe.enable_eager_execution()
4+
import numpy as np
5+
6+
X_raw = np.array([2013, 2014, 2015, 2016, 2017], dtype=np.float32)
7+
y_raw = np.array([12000, 14000, 15000, 16500, 17500], dtype=np.float32)
8+
9+
X = (X_raw - X_raw.min()) / (X_raw.max() - X_raw.min())
10+
y = (y_raw - y_raw.min()) / (y_raw.max() - y_raw.min())
11+
12+
X = tf.constant(X)
13+
y = tf.constant(y)
14+
15+
a = tfe.Variable(0., name='a')
16+
b = tfe.Variable(0., name='b')
17+
18+
num_epoch = 10000
19+
learning_rate = 1e-3
20+
for e in range(num_epoch):
21+
# 前向传播
22+
y_pred = a * X + b
23+
loss = 0.5 * tf.reduce_sum(tf.square(y_pred - y)) # loss = 0.5 * np.sum(np.square(a * X + b - y))
24+
25+
# 反向传播,手动计算变量(模型参数)的梯度
26+
grad_a = tf.reduce_sum((y_pred - y) * X)
27+
grad_b = tf.reduce_sum(y_pred - y)
28+
29+
# 更新参数
30+
a, b = a - learning_rate * grad_a, b - learning_rate * grad_b
31+
32+
print(a, b)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
import numpy as np
2+
import tensorflow as tf
3+
import tensorflow.contrib.eager as tfe
4+
tf.enable_eager_execution()
5+
6+
X_raw = np.array([2013, 2014, 2015, 2016, 2017], dtype=np.float32)
7+
y_raw = np.array([12000, 14000, 15000, 16500, 17500], dtype=np.float32)
8+
9+
X = (X_raw - X_raw.min()) / (X_raw.max() - X_raw.min())
10+
y = (y_raw - y_raw.min()) / (y_raw.max() - y_raw.min())
11+
12+
X = tf.constant(X)
13+
y = tf.constant(y)
14+
15+
a = tf.get_variable('a', dtype=tf.float32, shape=[], initializer=tf.zeros_initializer)
16+
b = tf.get_variable('b', dtype=tf.float32, shape=[], initializer=tf.zeros_initializer)
17+
variables = [a, b]
18+
19+
num_epoch = 10000
20+
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-3)
21+
for e in range(num_epoch):
22+
# 使用tf.GradientTape()记录损失函数的梯度信息
23+
with tf.GradientTape() as tape:
24+
y_pred = a * X + b
25+
loss = 0.5 * tf.reduce_sum(tf.square(y_pred - y))
26+
# TensorFlow自动计算损失函数关于自变量(模型参数)的梯度
27+
grads = tape.gradient(loss, variables)
28+
# TensorFlow自动根据梯度更新参数
29+
optimizer.apply_gradients(grads_and_vars=zip(grads, variables))
30+
31+
print(a, b)
+10
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
import tensorflow as tf
2+
3+
# 定义一个“计算图”
4+
a = tf.constant(1) # 定义一个常量Tensor(张量)
5+
b = tf.constant(1)
6+
c = a + b # 等价于 c = tf.add(a, b),c是张量a和张量b通过Add这一Operation(操作)所形成的新张量
7+
8+
sess = tf.Session() # 实例化一个Session(会话)
9+
c_ = sess.run(c) # 通过Session的run()方法对计算图里的节点(张量)进行实际的计算
10+
print(c_)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
import tensorflow as tf
2+
3+
A = tf.ones(shape=[2, 3]) # tf.ones(shape)定义了一个形状为shape的全1矩阵
4+
B = tf.ones(shape=[3, 2])
5+
C = tf.matmul(A, B)
6+
7+
sess = tf.Session()
8+
C_ = sess.run(C)
9+
print(C_)
+12
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
import tensorflow as tf
2+
3+
a = tf.placeholder(dtype=tf.int32) # 定义一个占位符Tensor
4+
b = tf.placeholder(dtype=tf.int32)
5+
c = a + b
6+
7+
a_ = input("a = ") # 从终端读入一个整数并放入变量a_
8+
b_ = input("b = ")
9+
10+
sess = tf.Session()
11+
c_ = sess.run(c, feed_dict={a: a_, b: b_}) # feed_dict参数传入为了计算c所需要的张量的值
12+
print("a + b = %d" % c_)
+10
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
import tensorflow as tf
2+
3+
x = tf.Variable(initial_value=1.)
4+
y = tf.square(x) # y = x ^ 2
5+
y_grad = tf.gradients(y, x)
6+
7+
sess = tf.Session()
8+
sess.run(tf.global_variables_initializer())
9+
y_, y_grad_ = sess.run([y, y_grad])
10+
print([y_, y_grad_])
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
import tensorflow as tf
2+
3+
a = tf.get_variable(name='a', shape=[])
4+
initializer = tf.assign(a, 0) # tf.assign(x, y)返回一个“将张量y的值赋给变量x”的操作
5+
a_plus_1 = a + 1 # 等价于 a + tf.constant(1)
6+
plus_one_op = tf.assign(a, a_plus_1)
7+
8+
sess = tf.Session()
9+
sess.run(initializer)
10+
for i in range(5):
11+
sess.run(plus_one_op) # 对变量a执行加一操作
12+
a_ = sess.run(a) # 获得变量a的值并存入a_
13+
print(a_)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
import tensorflow as tf
2+
3+
a = tf.get_variable(name='a', shape=[], initializer=tf.zeros_initializer) # 指定初始化器为全0初始化
4+
a_plus_1 = a + 1
5+
plus_one_op = tf.assign(a, a_plus_1)
6+
7+
sess = tf.Session()
8+
sess.run(tf.global_variables_initializer()) # 初始化所有变量
9+
for i in range(5):
10+
sess.run(plus_one_op)
11+
a_ = sess.run(a)
12+
print(a_)
+16
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
import tensorflow as tf
2+
import os
3+
4+
config = tf.ConfigProto()
5+
# config.gpu_options.allow_growth = True
6+
config.gpu_options.per_process_gpu_memory_fraction = 0.4
7+
tf.enable_eager_execution(config=config)
8+
9+
A = tf.constant([[1, 2], [3, 4]])
10+
B = tf.constant([[5, 6], [7, 8]])
11+
C = tf.matmul(A, B)
12+
13+
print(C)
14+
15+
os.system('pause')
16+

0 commit comments

Comments
 (0)