import tensorflow as tf
from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.models import Model
from keras.layers import Dense,GlobalAveragePooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras.import backend as K
import os
from keras.utils import plot_model
from matplotlib import pyplot as plt
import tensorflow as tf
tf.__version__
'1.9.0'
import keras as k
k.__version__
'2.2.0'
在keras中使用tensorboard,(使用callback)
RUN = RUN + 1 if 'RUN' in locals() else 1
LOG_DIR = model_save_path + '/training_logs/run{}'.format(RUN)
LOG_FILE_PATH = LOG_DIR + '/checkpoint-{epoch:02d}-{val_loss:.4f}.hdf5'
tensorboard = TensorBoard(log_dir=LOG_DIR, write_images=True)
checkpoint = ModelCheckpoint(filepath=LOG_FILE_PATH, monitor='val_loss', verbose=1, save_best_only=True)
early_stopping = EarlyStopping(monitor='val_loss', patience=5, verbose=1)
history = model.fit_generator(generator=gen.generate(True), steps_per_epoch=int(gen.train_batches / 4),
validation_data=gen.generate(False), validation_steps=int(gen.val_batches / 4),
epochs=EPOCHS, verbose=1, callbacks=[tensorboard, checkpoint, early_stopping])
fine-tune,先冻结模型,训练,等模型稳定后解冻,再次训练权重
base_model=InceptionV3(weights='imagenet',include_top=False)
print(base_model.summary())
plot_model(base_model,to_file='InceptionV3.png')
x=base_model.output
x=GlobalAveragePooling2D()(x)
x=Dense(1024,activation='relu')(x)
predictions=Dense(2,activation='softmax')(x)
model=Model(input=base_model.input,outputs=predictions)
print(base_model.summary())
plot_model(model,to_file='InceptionV3.png')
for layer in base_model.layers:
layer.trainable=False
model.compile(optimizer='rmsprop',loss='categorical_crossentropy')
train_datagen=ImageDateGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_filp=True
)
train_generator=train_datagen.flow_from_directory(
'',
target_size=(150,150),
batch_size=32,
class_mode='categorical')
test_datagen=ImageDateGenerator(rescale=1./255)
validation_generator=test_datagen.flow_from_directory(
'path',
target_size=(150,150),
batch_size=32,
class_mode='categorical')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epoch=1,
validation_data=validation_generator,
validation_steps=800)
for i,layer in enumerate(base_model.layers):
print(i,layer.name)
for layer in model.layers[:249]:
layer.trainable=False
for layer in model.layers[249:]:
layer.trainable=True
from keras.optimizers import SGD
model.compile(optimizer=SGD(lr=0.0001,momentum=0.9),loss='categorical_crossentropy')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epoch=1,
validation_data=validation_generator,
validation_steps=800)
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.preprocessing import image
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras import optimizers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np
import cv2
import yaml
from tensorflow.keras.models import_from_yaml
batch_size=16
l=tf.keras.layers
model=Sequential()
model.add(l.Conv2D(filters=96,kernel_size=(11,11),strides=(4,4),padding='valid',\
input_shape=(227,227,3),activation='relu'))
model.add(l.BatchNormalization())
model.add(l.MaxPooling2D(pool_size(3,3),strides(2,227),padding='valid'))
model.add(l.Conv2D(256,(5,5),(1,1),padding='same',activation='relu'))
model.add(l.BatchNormalization())
model.add(l.MaxPooling2D((3,3),(2,2),padding='valid'))
model.add(l.Conv2D(384,(3,3),(1,1),'same',activation='relu'))
model.add(l.Conv2D(384,(3,3),(1,1),'same',activation='relu'))
model.add(l.Conv2D(256,(3,3),(1,1),'same',activation='relu'))
model.add(l.MaxPooling2D((3,3),(2,2),'valid'))
model.add(l.Flatten())
model.add(l.Dense(4096,activation='relu'))
models.add(l.Dropout(0.5))
model.add(l.Dense(4096,activation'relu'))
model.add(l.Dropout(0.5))
model.add(l.Dense(1000,activation='relu'))
model.add(l.Dropout(0.5))
model.add(l.Dense(2,activation='softmax'))
model.compile(optimizer='sgd',loss='categorical_crossentropy',metrics=['accuracy'])
datagen=ImageDataGenerator(samplewise_center=True,rescale=1.0/255)
train_generator=datagen.flow_from_directory(\
path='',
classes=['cat','dog'],
target_size=(227,227),
class_mode='categorical',
batch_size=batch_size)
validation_generator=datagen.flow_from_directory(
path,
classes=['cat','dog'],
target_size=(227,227),
class_mode='categorical',
batch_size=batch_size)
model.fit_generator(generator=train_generator,steps_per_epoch=20000/16,epochs=10,
validation_data=validation_generator,validation_steps=2496/16)
yaml_string=model.to_yaml()
open('./model_architecture.yaml','w').write(yaml_string)
model.save_weights('./model.h5')
imgs=[]
img=cv2.imread('test.jpg')
img=cv2.resize(imgs,(227,227))
imgs.append(img)
a=np.array(imgs)
result=model.predict(a)
idx=np.argmax(result)
if idx==0:
print('cat\n')
else:
print('dog\n')
cv2.imshow('image',img)
cv2.waitKey(0)
import numpy as np
import os
from shutil import copyfile,rmtree
'''
data/
train/
class1/
img1
img2
...
class2/
img1
...
validation/
class1/
img1
img2
...
class2/
img1
...
test/
class1/
img1
img2
...
class2/
img1
...
'''
data_folder = '/Users/shidanlifuhetian/All/data/flowers17/'
new_data_folder = './flowers17/'
file_a = open(data_folder+'files.txt',mode='r')
text = file_a.readlines()
file_a.close()
labels = []
file_b = open('./newtext.txt',mode='a')
for i,item in enumerate(text):
if i%80 ==0:
class_num = i//80
t = item.split('\n')
newtext = t[0]+' flower_'+chr(65+class_num)+'\n'
print(newtext)
file_b.write(newtext)
file_b.close()
if os.path.exists(new_data_folder):
rmtree(new_data_folder)
train_size = 800
val_size = int((1360-train_size)/2)
test_size = int(val_size)
np.random.seed(0)
label_file = open('newtext.txt')
labels = label_file.readlines()
np.random.shuffle(labels)
current_i = 0
def save_images(current_i,phase,d_size):
if phase == 'train':
dst_folder = new_data_folder+'train/'
elif phase == 'test':
dst_folder = new_data_folder+'test/'
elif phase == 'validation':
dst_folder = new_data_folder+'validation/'
else:
print('phase error')
exit()
for i in range(current_i,current_i+d_size):
item = labels[i]
r = item.split(' ')
img_full_path = data_folder+r[0]
img_class = r[1].split('\n')[0]
img_new_path = dst_folder+img_class+'/'+r[0]
if not os.path.exists(dst_folder+img_class):
os.makedirs(dst_folder+img_class)
copyfile(img_full_path,img_new_path)
print(img_new_path,' copied')
current_i = i
return current_i
new_i = save_images(current_i=0,phase='train',d_size=train_size)
new_i = save_images(current_i=new_i,phase='test',d_size=test_size)
new_i = save_images(current_i=new_i,phase='validation',d_size=val_size)
from keras.applications import VGG16
train_dir=os.path.join(base_dir,'train')
conv_base=VGG16(weights='imagenet',
include_top=False,
input_shape=(150,150,3))
from keras import layers
from keras import models
model=models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256,activation='relu'))
model.add(layers.Dense(1,activation='sigmoid'))
conv_base.trainable=False
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
train_datagen=ImageDataGenerator(\
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
test_datagen=ImageDataGenerator(rescale=1./255)
train_generator=train_datagen.flow_from_dirctory(
train_dir,
target_size=(150,150),
batch_size=20,
class_mode='binary')
validation_generator=test_datagen.flow_from_dirctory(
validation_dir,
target_size=(150,150),
batch_size=20,
class_mode='binary')
model.compile(loss='binary_crossentropy',
optimizers=optimizers.RMSprop(2e-5),
metrics=['acc'])
history=model.fit_generator(
train_generator,
steps_per_epoch=100,
epoch=30,
validation_data=validation_generator,
validation_steps=100)
import matplotlib.pyplot as plt
acc=history.history['acc']
val_acc=history.history['val_acc']
loss=history.history['loss']
val_loss=history.history['val_loss']
epochs=range(1,len(acc)+1)
plt.plot(epochs,acc,a'bo',label='Training acc')
plt.plot(epochs,val_acc,'b',label='validation acc')
from keras import backend as K
model=load_model('')
layer_1=K.function([model.layers[0].input],[model.layers[1].output])
f1=layer_1([input_image])[0]
for _ in range(32):
show_img=f1[:,:,:,_]
show_img.shape=[149,149]
plt.subplot(4,8,_+1)
plt.imshow(show_img,cmap='gray')
plt.axis('off')
plt.show()
import numpy as np
from keras.models import load_model
from keras import backend as K
import matplotlib.pyplot as plt
import cv2
from keras.preprocessing.image import img_to_array, load_img
import numpy as np
def process_image(image, target_shape):
h, w, _ = target_shape
image = load_img(image, target_size=(h, w))
img_arr = img_to_array(image)
x = (img_arr / 255.).astype(np.float32)
return x
def main():
model = load_model('inception.026-1.07.hdf5')
image='img03.jpg'
images=cv2.imread('img03.jpg')
image_arr = process_image(image, (299, 299, 3))
image_arr = np.expand_dims(image_arr, axis=0)
layer_1 = K.function([model.layers[0].input], [model.layers[1].output])
f1 = layer_1([image_arr])[0]
for _ in range(32):
show_img = f1[:, :, :, _]
show_img.shape = [149, 149]
plt.figure(figsize=(10,10))
plt.subplot(4,8 , _ + 1)
plt.subplot(4,8, _ + 1)
plt.imshow(show_img, cmap='gray')
plt.axis('off')
plt.show()
layer_1 = K.function([model.layers[0].input], [model.layers[299].output])
f1 = layer_1([image_arr])[0]
for _ in range(81):
show_img = f1[:, :, :, _]
show_img.shape = [8, 8]
plt.figure(figsize=(10,10))
plt.subplot(9, 9, _ + 1)
plt.imshow(show_img, cmap='gray')
plt.axis('off')
plt.show()
print('This is the end !')
if __name__ == '__main__':
main()