?
安裝模型圖片導(dǎo)出模塊
sudo pip install pydot
sudo pip install graphviz
sudo pip install pydot-ng
sudo apt-get install graphviz
安裝h5py的命令如下(模型保存模塊):
sudo pip install cython
sudo apt-get install libhdf5-dev
sudo pip install h5py
記錄一下代碼:
# -*- coding: UTF-8 -*-
# mnist神經(jīng)網(wǎng)絡(luò)訓(xùn)練,采用LeNet-5模型
import os
import cv2
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.advanced_activations import PReLU
from keras.optimizers import SGD, Adadelta, Adagrad
from keras.utils import np_utils
from keras.utils.vis_utils import plot_model
import h5py
from keras.models import model_from_json
def loadData(path, number):
data = np.empty((number, 1, 28, 28), dtype="float32") # empty與ones差不多原理,但是數(shù)值隨機(jī),類型隨后面設(shè)定
labels = np.empty((number,), dtype="uint8")
listImg = os.listdir(path)
count = 0
for img in listImg:
imgData = cv2.imread(path + '/' + img, 0) # 數(shù)據(jù)
l = int(img.split('-')[0]) # 答案
arr = np.asarray(imgData, dtype="float32") # 將img數(shù)據(jù)轉(zhuǎn)化為數(shù)組形式
data[count, :, :, :] = arr # 將每個(gè)三維數(shù)組賦給data
labels[count] = l # 取該圖像的數(shù)值屬性作為標(biāo)簽
count = count + 1
print path, " loaded ", count
if count >= number:
break
return data, labels
# 從圖片文件加載數(shù)據(jù)
# the data, shuffled and split between train and test sets
(trainData, trainLabels), (testData, testLabels) = mnist.load_data()
# 訓(xùn)練數(shù)據(jù) 60000張手寫(xiě)圖片,28*28*1
# 測(cè)試數(shù)據(jù) 10000張手寫(xiě)圖片,28*28*1
trainData = trainData.reshape(60000, 784)
testData = testData.reshape(10000, 784)
trainLabels = np_utils.to_categorical(trainLabels, 10)
# label為0~9共10個(gè)類別,keras要求格式為binary class matrices,轉(zhuǎn)化一下,直接調(diào)用keras提供的這個(gè)函數(shù)
testLabels = np_utils.to_categorical(testLabels, 10)
# tf或th為后端,采取不同參數(shù)順序
# th
# if K.image_data_format() == 'channels_first':
# -x_train.shape[0]=6000
# x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
# -x_train.shape:(60000, 1, 28, 28)
# x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
# x_test.shape:(10000, 1, 28, 28)
# 單通道灰度圖像,channel=1
# input_shape = (1, img_rows, img_cols)
# else: #tf
# x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
# x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
# input_shape = (img_rows, img_cols, 1)
# tensorflow后端
trainData = trainData.reshape(trainData.shape[0], 28, 28, 1)
testData = testData.reshape(testData.shape[0], 28, 28, 1)
# 建立一個(gè)Sequential模型
model = Sequential()
# model.add(Conv2D(4, 5, 5, border_mode='valid',input_shape=(28,28,1)))
# 第一個(gè)卷積層,4個(gè)卷積核,每個(gè)卷積核5*5,卷積后24*24,第一個(gè)卷積核要申明input_shape(通道,大小) ,激活函數(shù)采用“tanh”
model.add(Conv2D(filters=4, kernel_size=(5, 5), padding='valid', input_shape=(28, 28, 1), activation='tanh'))
# model.add(Conv2D(8, 3, 3, subsample=(2,2), border_mode='valid'))
# 第二個(gè)卷積層,8個(gè)卷積核,不需要申明上一個(gè)卷積留下來(lái)的特征map,會(huì)自動(dòng)識(shí)別,下采樣層為2*2,卷完且采樣后是11*11
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(filters=8, kernel_size=(3, 3), padding='valid', activation='tanh'))
# model.add(Activation('tanh'))
# model.add(Conv2D(16, 3, 3, subsample=(2,2), border_mode='valid'))
# 第三個(gè)卷積層,16個(gè)卷積核,下采樣層為2*2,卷完采樣后是4*4
model.add(Conv2D(filters=16, kernel_size=(3, 3), padding='valid', activation='tanh'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Activation('tanh'))
model.add(Flatten())
# 把多維的模型壓平為一維的,用在卷積層到全連接層的過(guò)度
# model.add(Dense(128, input_dim=(16*4*4), init='normal'))
# 全連接層,首層的需要指定輸入維度16*4*4,128是輸出維度,默認(rèn)放第一位
model.add(Dense(128, activation='tanh'))
# model.add(Activation('tanh'))
# model.add(Dense(10, input_dim= 128, init='normal'))
# 第二層全連接層,其實(shí)不需要指定輸入維度,輸出為10維,因?yàn)槭?0類
model.add(Dense(10, activation='softmax'))
# model.add(Activation('softmax'))
# 激活函數(shù)“softmax”,用于分類
# 訓(xùn)練CNN模型
sgd = SGD(lr=0.05, momentum=0.9, decay=1e-6, nesterov=True)
# 采用隨機(jī)梯度下降法,學(xué)習(xí)率初始值0.05,動(dòng)量參數(shù)為0.9,學(xué)習(xí)率衰減值為1e-6,確定使用Nesterov動(dòng)量
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
# 配置模型學(xué)習(xí)過(guò)程,目標(biāo)函數(shù)為categorical_crossentropy:亦稱作多類的對(duì)數(shù)損失,注意使用該目標(biāo)函數(shù)時(shí),需要將標(biāo)簽轉(zhuǎn)化為形如(nb_samples, nb_classes)的二值序列,第18行已轉(zhuǎn)化,優(yōu)化器為sgd
model.fit(trainData, trainLabels, batch_size=100, epochs=20, shuffle=True, verbose=1, validation_split=0.2)
# 訓(xùn)練模型,訓(xùn)練nb_epoch次,bctch_size為梯度下降時(shí)每個(gè)batch包含的樣本數(shù),驗(yàn)證集比例0.2,verbose為顯示日志,shuffle是否打亂輸入樣本的順序
# 輸出模型圖片
plot_model(model, to_file='model2.png', show_shapes=True, show_layer_names=False)
print model.metrics_names
# 對(duì)測(cè)試數(shù)據(jù)進(jìn)行測(cè)試
print model.evaluate(testData, testLabels,
verbose=0,
batch_size=500);
# 保存model
json_string = model.to_json()
open('my_model_architecture.json', 'w').write(json_string)
model.save_weights('my_model_weights.h5')
本文摘自 :https://blog.51cto.com/u