Skip to content

Commit 02d594a

Browse files
committed
init commit
0 parents  commit 02d594a

28 files changed

+1216
-0
lines changed

.gitignore

+3
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
build
2+
reference
3+
teaching

Makefile

+20
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
# Minimal makefile for Sphinx documentation
2+
#
3+
4+
# You can set these variables from the command line.
5+
SPHINXOPTS =
6+
SPHINXBUILD = python -msphinx
7+
SPHINXPROJ = TensorFlow
8+
SOURCEDIR = source
9+
BUILDDIR = build
10+
11+
# Put it first so that "make" without argument is like "make help".
12+
help:
13+
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14+
15+
.PHONY: help Makefile
16+
17+
# Catch-all target: route all unknown targets to Sphinx using the new
18+
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19+
%: Makefile
20+
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

autobuild.bat

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
sphinx-autobuild source build\html

make.bat

+36
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
@ECHO OFF
2+
3+
pushd %~dp0
4+
5+
REM Command file for Sphinx documentation
6+
7+
if "%SPHINXBUILD%" == "" (
8+
set SPHINXBUILD=python -msphinx
9+
)
10+
set SOURCEDIR=source
11+
set BUILDDIR=build
12+
set SPHINXPROJ=TensorFlow
13+
14+
if "%1" == "" goto help
15+
16+
%SPHINXBUILD% >NUL 2>NUL
17+
if errorlevel 9009 (
18+
echo.
19+
echo.The Sphinx module was not found. Make sure you have Sphinx installed,
20+
echo.then set the SPHINXBUILD environment variable to point to the full
21+
echo.path of the 'sphinx-build' executable. Alternatively you may add the
22+
echo.Sphinx directory to PATH.
23+
echo.
24+
echo.If you don't have Sphinx installed, grab it from
25+
echo.http://sphinx-doc.org/
26+
exit /b 1
27+
)
28+
29+
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
30+
goto end
31+
32+
:help
33+
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
34+
35+
:end
36+
popd

source/.vscode/settings.json

+6
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
{
2+
"restructuredtext.confPath": "c:\\Users\\xihan\\Desktop\\TensorFlow-cn\\source",
3+
"restructuredtext.builtDocumentationPath" : "c:\\Users\\xihan\\Desktop\\TensorFlow-cn\\build\\html",
4+
"restructuredtext.updateOnTextChanged" : "true",
5+
"restructuredtext.sphinxBuildPath" : "C:\\ProgramData\\Anaconda3\\Scripts\\sphinx-build.exe"
6+
}

source/_static/code/.idea/code.iml

+12
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

source/_static/code/.idea/misc.xml

+4
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

source/_static/code/.idea/modules.xml

+8
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

source/_static/code/.idea/workspace.xml

+463
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

source/_static/code/basic/example.py

+38
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
a = 0
2+
b = 0
3+
4+
def f(x):
5+
y_pred = a * x + b
6+
return y_pred
7+
8+
def loss(x, y):
9+
l = (a * x + b - y) ** 2
10+
return l
11+
12+
def gradient_loss(x, y):
13+
g_a = 2 * (a * x + b - y) * x
14+
g_b = 2 * (a * x + b - y)
15+
return g_a, g_b
16+
17+
X_raw = [2013, 2014, 2015, 2016, 2017]
18+
Y_raw = [12000, 14000, 15000, 16500, 17500]
19+
x_pred_raw = 2018
20+
X = [(x - min(X_raw)) / (max(X_raw) - min(X_raw)) for x in X_raw]
21+
Y = [(y - min(Y_raw)) / (max(Y_raw) - min(Y_raw)) for y in Y_raw]
22+
23+
num_epoch = 10000
24+
learning_rate = 1e-3
25+
for e in range(num_epoch):
26+
for i in range(len(X)):
27+
x, y = X[i], Y[i]
28+
g_a, g_b = gradient_loss(x, y)
29+
a = a - learning_rate * g_a
30+
b = b - learning_rate * g_b
31+
print(a, b)
32+
for i in range(len(X)):
33+
x, y = X[i], Y[i]
34+
print(f(x), y)
35+
x_pred = (x_pred_raw - min(X_raw)) / (max(X_raw) - min(X_raw))
36+
y_pred = f(x_pred)
37+
y_pred_raw = y_pred * (max(Y_raw) - min(Y_raw)) + min(Y_raw)
38+
print(x_pred_raw, y_pred_raw)
+25
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
import numpy as np
2+
3+
X_raw = np.array([2013, 2014, 2015, 2016, 2017])
4+
y_raw = np.array([12000, 14000, 15000, 16500, 17500])
5+
6+
X = (X_raw - X_raw.min()) / (X_raw.max() - X_raw.min())
7+
y = (y_raw - y_raw.min()) / (y_raw.max() - y_raw.min())
8+
9+
a, b = 0, 0
10+
11+
num_epoch = 10000
12+
learning_rate = 1e-3
13+
for e in range(num_epoch):
14+
# 前向传播
15+
y_pred = a * X + b
16+
loss = 0.5 * (y_pred - y).dot(y_pred - y) # loss = 0.5 * np.sum(np.square(a * X + b - y))
17+
18+
# 反向传播,手动计算变量(模型参数)的梯度
19+
grad_a, grad_b = (y_pred - y).dot(X), (y_pred - y).sum()
20+
21+
# 更新参数
22+
a, b = a - learning_rate * grad_a, b - learning_rate * grad_b
23+
24+
print(a, b)
25+
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
import numpy as np
2+
3+
X_raw = np.array([2013, 2014, 2015, 2016, 2017])
4+
y_raw = np.array([12000, 14000, 15000, 16500, 17500])
5+
6+
X = (X_raw - X_raw.min()) / (X_raw.max() - X_raw.min())
7+
y = (y_raw - y_raw.min()) / (y_raw.max() - y_raw.min())
8+
9+
import tensorflow as tf
10+
11+
# 定义数据流图
12+
learning_rate_ = tf.placeholder(dtype=tf.float32)
13+
X_ = tf.placeholder(dtype=tf.float32, shape=[5])
14+
y_ = tf.placeholder(dtype=tf.float32, shape=[5])
15+
a = tf.get_variable('a', dtype=tf.float32, shape=[], initializer=tf.zeros_initializer)
16+
b = tf.get_variable('b', dtype=tf.float32, shape=[], initializer=tf.zeros_initializer)
17+
18+
y_pred = a * X_ + b
19+
loss = tf.constant(0.5) * tf.reduce_sum(tf.square(y_pred - y_))
20+
21+
# 反向传播,手动计算变量(模型参数)的梯度
22+
grad_a = tf.reduce_sum((y_pred - y_) * X_)
23+
grad_b = tf.reduce_sum(y_pred - y_)
24+
25+
# 手动更新参数
26+
new_a = a - learning_rate_ * grad_a
27+
new_b = b - learning_rate_ * grad_b
28+
update_a = tf.assign(a, new_a)
29+
update_b = tf.assign(b, new_b)
30+
# 数据流图定义到此结束
31+
# 注意,直到目前,我们都没有进行任何实质的数据计算,仅仅是定义了一个数据图
32+
33+
num_epoch = 10000
34+
learning_rate = 1e-3
35+
with tf.Session() as sess:
36+
# 初始化变量a和b
37+
tf.global_variables_initializer().run()
38+
# 循环将数据送入上面建立的数据流图中进行计算和更新变量
39+
for e in range(num_epoch):
40+
sess.run([update_a, update_b], feed_dict={X_: X, y_: y, learning_rate_: learning_rate})
41+
print(sess.run([a, b]))
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
import numpy as np
2+
3+
X_raw = np.array([2013, 2014, 2015, 2016, 2017])
4+
y_raw = np.array([12000, 14000, 15000, 16500, 17500])
5+
6+
X = (X_raw - X_raw.min()) / (X_raw.max() - X_raw.min())
7+
y = (y_raw - y_raw.min()) / (y_raw.max() - y_raw.min())
8+
9+
import tensorflow as tf
10+
11+
learning_rate_ = tf.placeholder(dtype=tf.float32)
12+
X_ = tf.placeholder(dtype=tf.float32, shape=[5])
13+
y_ = tf.placeholder(dtype=tf.float32, shape=[5])
14+
a = tf.get_variable('a', dtype=tf.float32, shape=[], initializer=tf.zeros_initializer)
15+
b = tf.get_variable('b', dtype=tf.float32, shape=[], initializer=tf.zeros_initializer)
16+
17+
y_pred = a * X_ + b
18+
loss = tf.constant(0.5) * tf.reduce_sum(tf.square(y_pred - y_))
19+
20+
# 反向传播,利用TensorFlow的梯度下降优化器自动计算并更新变量(模型参数)的梯度
21+
train_op = tf.train.GradientDescentOptimizer(learning_rate=learning_rate_).minimize(loss)
22+
23+
num_epoch = 10000
24+
learning_rate = 1e-3
25+
with tf.Session() as sess:
26+
tf.global_variables_initializer().run()
27+
for e in range(num_epoch):
28+
sess.run(train_op, feed_dict={X_: X, y_: y, learning_rate_: learning_rate})
29+
print(sess.run([a, b]))
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
import tensorflow as tf
2+
import tensorflow.contrib.eager as tfe
3+
tfe.enable_eager_execution()
4+
import numpy as np
5+
6+
X_raw = np.array([2013, 2014, 2015, 2016, 2017], dtype=np.float32)
7+
y_raw = np.array([12000, 14000, 15000, 16500, 17500], dtype=np.float32)
8+
9+
X = (X_raw - X_raw.min()) / (X_raw.max() - X_raw.min())
10+
y = (y_raw - y_raw.min()) / (y_raw.max() - y_raw.min())
11+
12+
X = tf.constant(X)
13+
y = tf.constant(y)
14+
15+
a = tfe.Variable(0., name='a')
16+
b = tfe.Variable(0., name='b')
17+
18+
num_epoch = 10000
19+
learning_rate = 1e-3
20+
for e in range(num_epoch):
21+
# 前向传播
22+
y_pred = a * X + b
23+
loss = 0.5 * tf.reduce_sum(tf.square(y_pred - y)) # loss = 0.5 * np.sum(np.square(a * X + b - y))
24+
25+
# 反向传播,手动计算变量(模型参数)的梯度
26+
grad_a = tf.reduce_sum((y_pred - y) * X)
27+
grad_b = tf.reduce_sum(y_pred - y)
28+
29+
# 更新参数
30+
a, b = a - learning_rate * grad_a, b - learning_rate * grad_b
31+
32+
print(a, b)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
import tensorflow as tf
2+
import tensorflow.contrib.eager as tfe
3+
tfe.enable_eager_execution()
4+
import numpy as np
5+
6+
X_raw = np.array([2013, 2014, 2015, 2016, 2017], dtype=np.float32)
7+
y_raw = np.array([12000, 14000, 15000, 16500, 17500], dtype=np.float32)
8+
9+
X = (X_raw - X_raw.min()) / (X_raw.max() - X_raw.min())
10+
y = (y_raw - y_raw.min()) / (y_raw.max() - y_raw.min())
11+
12+
X = tf.constant(X)
13+
y = tf.constant(y)
14+
15+
a = tfe.Variable(0., name='a')
16+
b = tfe.Variable(0., name='b')
17+
18+
def model(x):
19+
return a * x + b
20+
21+
def loss(X_, y_):
22+
return 0.5 * tf.reduce_sum(tf.square(model(X_) - y_))
23+
24+
grad_fn = tfe.implicit_gradients(loss)
25+
num_epoch = 10000
26+
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-3)
27+
for e in range(num_epoch):
28+
# 前向传播
29+
y_pred = a * X + b
30+
31+
# 反向传播,利用Eager模式下的tfe.implicit_gradients()自动计算梯度
32+
grad = grad_fn(X, y)
33+
optimizer.apply_gradients(grad)
34+
35+
# 更新参数
36+
# a, b = a - learning_rate * grad_a, b - learning_rate * grad_b
37+
38+
print(a, b)

source/appendix.rst

+11
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
附录
2+
======
3+
4+
Anaconda简易使用教程
5+
^^^^^^^^^^^^^^^^^^^^^^^^^^
6+
7+
NumPy基础
8+
^^^^^^^^^^^
9+
10+
线性回归基础
11+
^^^^^^^^^^^^^^^^^^

0 commit comments

Comments
 (0)