网页站点wordpress app插件
网页站点,wordpress app插件,网站建设 模板,东莞有哪些好企业一、CNN训练模型
模型尺寸分析#xff1a;卷积层全都采用了补0#xff0c;所以经过卷积层长和宽不变#xff0c;只有深度加深。池化层全都没有补0#xff0c;所以经过池化层长和宽均减小#xff0c;深度不变。http://download.tensorflow.org/example_images/flower_photo…一、CNN训练模型
模型尺寸分析卷积层全都采用了补0所以经过卷积层长和宽不变只有深度加深。池化层全都没有补0所以经过池化层长和宽均减小深度不变。http://download.tensorflow.org/example_images/flower_photos.tgz
模型尺寸变化100×100×3-100×100×32-50×50×32-50×50×64-25×25×64-25×25×128-12×12×128-12×12×128-6×6×128
CNN训练代码如下
?123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174from skimage import io,transformimport globimport osimport tensorflow as tfimport numpy as npimport time#数据集地址pathE:/data/datasets/flower_photos/#模型保存地址model_pathE:/data/model/flower/model.ckpt#将所有的图片resize成100*100w100h100c3#读取图片def read_img(path): cate[pathx for x in os.listdir(path) if os.path.isdir(pathx)] imgs[] labels[] for idx,folder in enumerate(cate): for im in glob.glob(folder/*.jpg): print(reading the images:%s%(im)) imgio.imread(im) imgtransform.resize(img,(w,h)) imgs.append(img) labels.append(idx) return np.asarray(imgs,np.float32),np.asarray(labels,np.int32)data,labelread_img(path)#打乱顺序num_exampledata.shape[0]arrnp.arange(num_example)np.random.shuffle(arr)datadata[arr]labellabel[arr]#将所有数据分为训练集和验证集ratio0.8snp.int(num_example*ratio)x_traindata[:s]y_trainlabel[:s]x_valdata[s:]y_vallabel[s:]#-----------------构建网络----------------------#占位符xtf.placeholder(tf.float32,shape[None,w,h,c],namex)y_tf.placeholder(tf.int32,shape[None,],namey_)def inference(input_tensor, train, regularizer): with tf.variable_scope(layer1-conv1): conv1_weights tf.get_variable(weight,[5,5,3,32],initializertf.truncated_normal_initializer(stddev0.1)) conv1_biases tf.get_variable(bias, [32], initializertf.constant_initializer(0.0)) conv1 tf.nn.conv2d(input_tensor, conv1_weights, strides[1, 1, 1, 1], paddingSAME) relu1 tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases)) with tf.name_scope(layer2-pool1): pool1 tf.nn.max_pool(relu1, ksize [1,2,2,1],strides[1,2,2,1],paddingVALID) with tf.variable_scope(layer3-conv2): conv2_weights tf.get_variable(weight,[5,5,32,64],initializertf.truncated_normal_initializer(stddev0.1)) conv2_biases tf.get_variable(bias, [64], initializertf.constant_initializer(0.0)) conv2 tf.nn.conv2d(pool1, conv2_weights, strides[1, 1, 1, 1], paddingSAME) relu2 tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases)) with tf.name_scope(layer4-pool2): pool2 tf.nn.max_pool(relu2, ksize[1, 2, 2, 1], strides[1, 2, 2, 1], paddingVALID) with tf.variable_scope(layer5-conv3): conv3_weights tf.get_variable(weight,[3,3,64,128],initializertf.truncated_normal_initializer(stddev0.1)) conv3_biases tf.get_variable(bias, [128], initializertf.constant_initializer(0.0)) conv3 tf.nn.conv2d(pool2, conv3_weights, strides[1, 1, 1, 1], paddingSAME) relu3 tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases)) with tf.name_scope(layer6-pool3): pool3 tf.nn.max_pool(relu3, ksize[1, 2, 2, 1], strides[1, 2, 2, 1], paddingVALID) with tf.variable_scope(layer7-conv4): conv4_weights tf.get_variable(weight,[3,3,128,128],initializertf.truncated_normal_initializer(stddev0.1)) conv4_biases tf.get_variable(bias, [128], initializertf.constant_initializer(0.0)) conv4 tf.nn.conv2d(pool3, conv4_weights, strides[1, 1, 1, 1], paddingSAME) relu4 tf.nn.relu(tf.nn.bias_add(conv4, conv4_biases)) with tf.name_scope(layer8-pool4): pool4 tf.nn.max_pool(relu4, ksize[1, 2, 2, 1], strides[1, 2, 2, 1], paddingVALID) nodes 6*6*128 reshaped tf.reshape(pool4,[-1,nodes]) with tf.variable_scope(layer9-fc1): fc1_weights tf.get_variable(weight, [nodes, 1024], initializertf.truncated_normal_initializer(stddev0.1)) if regularizer ! None: tf.add_to_collection(losses, regularizer(fc1_weights)) fc1_biases tf.get_variable(bias, [1024], initializertf.constant_initializer(0.1)) fc1 tf.nn.relu(tf.matmul(reshaped, fc1_weights) fc1_biases) if train: fc1 tf.nn.dropout(fc1, 0.5) with tf.variable_scope(layer10-fc2): fc2_weights tf.get_variable(weight, [1024, 512], initializertf.truncated_normal_initializer(stddev0.1)) if regularizer ! None: tf.add_to_collection(losses, regularizer(fc2_weights)) fc2_biases tf.get_variable(bias, [512], initializertf.constant_initializer(0.1)) fc2 tf.nn.relu(tf.matmul(fc1, fc2_weights) fc2_biases) if train: fc2 tf.nn.dropout(fc2, 0.5) with tf.variable_scope(layer11-fc3): fc3_weights tf.get_variable(weight, [512, 5], initializertf.truncated_normal_initializer(stddev0.1)) if regularizer ! None: tf.add_to_collection(losses, regularizer(fc3_weights)) fc3_biases tf.get_variable(bias, [5], initializertf.constant_initializer(0.1)) logit tf.matmul(fc2, fc3_weights) fc3_biases return logit#---------------------------网络结束---------------------------regularizer tf.contrib.layers.l2_regularizer(0.0001)logits inference(x,False,regularizer)#(小处理)将logits乘以1赋值给logits_eval定义name方便在后续调用模型时通过tensor名字调用输出tensorb tf.constant(value1,dtypetf.float32)logits_eval tf.multiply(logits,b,namelogits_eval) losstf.nn.sparse_softmax_cross_entropy_with_logits(logitslogits, labelsy_)train_optf.train.AdamOptimizer(learning_rate0.001).minimize(loss)correct_prediction tf.equal(tf.cast(tf.argmax(logits,1),tf.int32), y_) acc tf.reduce_mean(tf.cast(correct_prediction, tf.float32))#定义一个函数按批次取数据def minibatches(inputsNone, targetsNone, batch_sizeNone, shuffleFalse): assert len(inputs) len(targets) if shuffle: indices np.arange(len(inputs)) np.random.shuffle(indices) for start_idx in range(0, len(inputs) - batch_size 1, batch_size): if shuffle: excerpt indices[start_idx:start_idx batch_size] else: excerpt slice(start_idx, start_idx batch_size) yield inputs[excerpt], targets[excerpt]#训练和测试数据可将n_epoch设置更大一些n_epoch10 batch_size64savertf.train.Saver()sesstf.Session() sess.run(tf.global_variables_initializer())for epoch in range(n_epoch): start_time time.time() #training train_loss, train_acc, n_batch 0, 0, 0 for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffleTrue): _,err,acsess.run([train_op,loss,acc], feed_dict{x: x_train_a, y_: y_train_a}) train_loss err; train_acc ac; n_batch 1 print( train loss: %f % (np.sum(train_loss)/ n_batch)) print( train acc: %f % (np.sum(train_acc)/ n_batch)) #validation val_loss, val_acc, n_batch 0, 0, 0 for x_val_a, y_val_a in minibatches(x_val, y_val, batch_size, shuffleFalse): err, ac sess.run([loss,acc], feed_dict{x: x_val_a, y_: y_val_a}) val_loss err; val_acc ac; n_batch 1 print( validation loss: %f % (np.sum(val_loss)/ n_batch)) print( validation acc: %f % (np.sum(val_acc)/ n_batch))saver.save(sess,model_path)sess.close()二、调用模型进行预测
调用模型进行花卉的预测代码如下
?12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455from skimage import io,transformimport tensorflow as tfimport numpy as nppath1 E:/data/datasets/flower_photos/daisy/5547758_eea9edfd54_n.jpgpath2 E:/data/datasets/flower_photos/dandelion/7355522_b66e5d3078_m.jpgpath3 E:/data/datasets/flower_photos/roses/394990940_7af082cf8d_n.jpgpath4 E:/data/datasets/flower_photos/sunflowers/6953297_8576bf4ea3.jpgpath5 E:/data/datasets/flower_photos/tulips/10791227_7168491604.jpgflower_dict {0:dasiy,1:dandelion,2:roses,3:sunflowers,4:tulips}w100h100c3def read_one_image(path): img io.imread(path) img transform.resize(img,(w,h)) return np.asarray(img)with tf.Session() as sess: data [] data1 read_one_image(path1) data2 read_one_image(path2) data3 read_one_image(path3) data4 read_one_image(path4) data5 read_one_image(path5) data.append(data1) data.append(data2) data.append(data3) data.append(data4) data.append(data5) saver tf.train.import_meta_graph(E:/data/model/flower/model.ckpt.meta) saver.restore(sess,tf.train.latest_checkpoint(E:/data/model/flower/)) graph tf.get_default_graph() x graph.get_tensor_by_name(x:0) feed_dict {x:data} logits graph.get_tensor_by_name(logits_eval:0) classification_result sess.run(logits,feed_dict) #打印出预测矩阵 print(classification_result) #打印出预测矩阵每一行最大值的索引 print(tf.argmax(classification_result,1).eval()) #根据索引通过字典对应花的分类 output [] output tf.argmax(classification_result,1).eval() for i in range(len(output)): print(第,i1,朵花预测:flower_dict[output[i]])运行结果
?1234567891011[[ 5.76620245 3.18228579 -3.89464641 -2.81310582 1.40294015] [ -1.01490593 3.55570269 -2.76053429 2.93104005 -3.47138596] [ -8.05292606 -7.26499033 11.70479774 0.59627819 2.15948296] [ -5.12940931 2.18423128 -3.33257103 9.0591135 5.03963232] [ -4.25288343 -0.95963973 -2.33347392 1.54485476 5.76069307]][0 1 2 3 4]第 1 朵花预测:dasiy第 2 朵花预测:dandelion第 3 朵花预测:roses第 4 朵花预测:sunflowers第 5 朵花预测:tulips预测结果和调用模型代码中的五个路径相比较是完全准确的。
本文的模型对于花卉的分类准确率大概在70%左右采用迁移学习调用Inception-v3模型对本文中的花卉数据集分类准确率在95%左右。主要的原因在于本文的CNN模型较于简单而且花卉数据集本身就比mnist手写数字数据集分类难度就要大一点同样的模型在mnist手写数字的识别上准确率要比花卉数据集准确率高不少。
本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/diannao/92117.shtml
如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!