tensorflow-learning

相关链接

开发环境

在windows安装Anaconda以后切换版本python 3.5以后安装tensorflow一直失败,宝宝也是很委屈..
以下详细操作步骤可以看上边链接得视频哦

  1. 下载VMware、Ubuntu安装
  2. 安装python 2.7、pip

    1
    2
    sudo apt install python
    sudo apt install python-pip
  3. 安装tensorflow

  4. 检查

    1
    2
    3
    >>> python
    >>> import tensorflow as tf
    >>> tf.__version__

VMware相关

  1. 打开虚拟机
  2. 菜单> 虚拟机 > 安装VMware-Tools
  3. 点击桌面出现得光驱
  4. 复制其中的.tar.gz文件到其他文件夹,解压

    1
    tar zxpf xxxx.tar.gz
  5. 进入解压后的文件夹

    1
    sudo ./vmware-install.pl
  6. 一路回车默认选择后成功安装

    1
    -- the VMware team
  7. 菜单> 查看 > 自动调整大小

mnist 数字识别

  1. 下载mnist放入data文件夹
  2. 安装PIL

    1
    pip install -i https://pypi.douban.com/simple pillow
  3. 写代码

前向传播

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
#coding=utf-8
import tensorflow as tf
# 定义输入 & 输出 & 隐藏层
InNode = 784
OutNode = 10
LayNode = 500
# regularizer 正则化
# 随机生成w
def get_weight(shape,regularizer):
w = tf.Variable(tf.truncated_normal(shape,stddev=0.1))
if regularizer != None: tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(regularizer)(w))
return w
# get
def get_bias(shape):
b = tf.Variable(tf.zeros(shape))
return b
# 搭建神经元网络
def forward(x,regularizer):
w1 = get_weight([InNode,LayNode],regularizer)
b1 = get_bias([LayNode])
y1 = tf.nn.relu(tf.matmul(x,w1) + b1)

w2 = get_weight([LayNode,OutNode],regularizer)
b2 = get_bias([OutNode])
y2 = tf.matmul(y1,w2) + b2
return y2

反向传播

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
#coding=utf-8
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_forward
import os

# 输入量,学习率,衰减率,正则化系数,总轮数,滑动衰减,保存路径,保存名
BatchSize = 200
LearningBase = 0.1
LearningDecay = 0.99
Regularizer = 0.0001
Steps = 50000
MovingDecay = 0.99
ModelPath = './model/'
ModelName = 'mnist_model'
# 反向
def backward(mnist):
x = tf.placeholder(tf.float32,[None, mnist_forward.InNode])
y_ = tf.placeholder(tf.float32,[None, mnist_forward.OutNode])
y = mnist_forward.forward(x,Regularizer)
global_step = tf.Variable(0,trainable=False)

ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_,1))
cem = tf.reduce_mean(ce)
loss = cem + tf.add_n(tf.get_collection('losses'))

learningRate = tf.train.exponential_decay(
LearningBase,
global_step,
mnist.train.num_examples / BatchSize,
LearningDecay,
staircase=True
)

train_step = tf.train.GradientDescentOptimizer(learningRate).minimize(loss,global_step=global_step)
ema = tf.train.ExponentialMovingAverage(MovingDecay,global_step)
ema_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([train_step,ema_op]):
train_op = tf.no_op(name='train')

saver = tf.train.Saver()

with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
# 断点续训
ckpt = tf.train.get_checkpoint_state(ModelPath)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess,ckpt.model_checkpoint_path)

for i in range(Steps):
xs, ys = mnist.train.next_batch(BatchSize)
_, loss_value, step = sess.run([train_op,loss,global_step],feed_dict={x: xs,y_:ys})
if i % 1000 == 0:
print "%d steps,loss: %g" %(step,loss_value)
saver.save(sess,os.path.join(ModelPath,ModelName),global_step=global_step)

def main():
mnist = input_data.read_data_sets('./data/',one_hot=True)
backward(mnist)
if __name__ == '__main__':
main()

测试

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
#coding=utf-8
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_forward
import mnist_backward
TestSecs = 5

# 测试

def test(mnist):
with tf.Graph().as_default() as g:
# 定义x,y
x = tf.placeholder(tf.float32,[None,mnist_forward.InNode])
y_ = tf.placeholder(tf.float32,[None,mnist_forward.OutNode])
y = mnist_forward.forward(x,None)

ema = tf.train.ExponentialMovingAverage(mnist_backward.MovingDecay)
ema_restore = ema.variables_to_restore()
# 实例化saver
saver = tf.train.Saver(ema_restore)
# 计算正确率
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

while True:
with tf.Session() as sess:
# 加载模型
ckpt = tf.train.get_checkpoint_state(mnist_backward.ModelPath)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess,ckpt.model_checkpoint_path)
global_step = ckpt.model_checkpoint_path.split("/")[-1].split("-")[-1]
tempDict = {
x: mnist.test.images,
y_: mnist.test.labels
}
accuracy_score = sess.run(accuracy,feed_dict=tempDict)
print "%s steps,acc: %g" %(global_step,accuracy_score)
else:
print "not ckpt"
return
time.sleep(TestSecs)
def main():
mnist = input_data.read_data_sets('./data/',one_hot=True)
test(mnist)
if __name__ == '__main__':
main()

图片识别

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
#coding=utf-8
import tensorflow as tf
import numpy as np
from PIL import Image
import mnist_backward
import mnist_forward

def restore_model(testPicArr):
with tf.Graph().as_default() as tg:
x = tf.placeholder(tf.float32,[None,mnist_forward.InNode])
y = mnist_forward.forward(x,None)
preValue = tf.argmax(y,1)

variable_averages = tf.train.ExponentialMovingAverage(mnist_backward.MovingDecay)
variable_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variable_restore)

with tf.Session() as sess:
# 断点续训
ckpt = tf.train.get_checkpoint_state(mnist_backward.ModelPath)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess,ckpt.model_checkpoint_path)
preValue = sess.run(preValue,feed_dict={x: testPicArr})
return preValue
else:
print "not ckpt !"
return -1

def pre_pic(picName):
img = Image.open(picName)
reIm = img.resize((28,28),Image.ANTIALIAS)
im_arr = np.array(reIm.convert('L'))
threshold = 50
for i in range(28):
for j in range(28):
im_arr[i][j] = 255 - im_arr[i][j]
if(im_arr[i][j] < threshold):
im_arr[i][j] = 0
else: im_arr[i][j] = 255

nm_arr = im_arr.reshape([1,784])
nm_arr = nm_arr.astype(np.float32)
img_ready = np.multiply(nm_arr,1.0/255.0)
return img_ready

def application():
testPic = raw_input("img of :")
testPicArr = pre_pic(testPic)
preValue = restore_model(testPicArr)
print "the number is:",preValue
def main():
application()
if __name__ == '__main__':
main()

待续