从零基础入门Tensorflow2.0 ----五、22TF1.0计算图构建

every blog every motto:

0. 前言

计算图的构建

1. 代码部分

1. 导入模块

import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
from tensorflow import keras

print(tf.__version__)
print(sys.version_info)
for module in mpl,np,pd,sklearn,tf,keras:
    print(module.__name__,module.__version__)

2. 读取数据

fashion_mnist = keras.datasets.fashion_mnist
# print(fashion_mnist)
(x_train_all,y_train_all),(x_test,y_test) = fashion_mnist.load_data()
x_valid,x_train = x_train_all[:5000],x_train_all[5000:]
y_valid,y_train = y_train_all[:5000],y_train_all[5000:]
# 打印格式
print(x_valid.shape,y_valid.shape)
print(x_train.shape,y_train.shape)
print(x_test.shape,y_test.shape)

3. 数据归一化

print(np.max(x_train),np.min(x_train))
# 数据归一化
from sklearn.preprocessing import StandardScaler

scaler = StandardScaler()
# x_train:[None,28,28] -> [None,784]
x_train_scaled = scaler.fit_transform(x_train.astype(np.float32).reshape(-1,1)).reshape(-1,28,28)
x_valid_scaled = scaler.transform(x_valid.astype(np.float32).reshape(-1,1)).reshape(-1,28,28)
x_test_scaled = scaler.transform(x_test.astype(np.float32).reshape(-1,1)).reshape(-1,28,28)
print(np.max(x_train_scaled),np.min(x_train_scaled))

4. 计算图构建

hidden_units = [100,100]
class_num = 10

x = tf.placeholder(tf.float32,[None,28*28])
y = tf.placeholder(tf.int64,[None])
# 隐藏层
input_for_next_layer = x
for hidden_unit in hidden_units:
    input_for_next_layer = tf.layers.dense(input_for_next_layer,hidden_unit,activation=tf.nn.relu)

# 输出层
logits = tf.layers.dense(input_for_next_layer,class_num)
# last_hidden_output * w(logits) -> softmax -> pro
# 1. logit -> softmax -> prob
# 2. labels -> ont_hot
# 3. calculate cross entropy
loss = tf.losses.sparse_softmax_cross_entropy(labels=y,logits=logits)

# get accuracy
prediction = tf.argmax(logits,1)
correct_prediction = tf.equal(prediction,y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float64))

# 
train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)
print(x)
print(logits)

你可能感兴趣的:(Tensorflow2.0)