mxnet系列教程之1-第一个例子

第一个例子当然是mnist的例子

假设已经成功安装了mxnet

例子的代码如下:

cd mxnet/example/image-classification
python train_mnist.py
这样就会运行下去

train_mnist.py的代码为

"""
Train mnist, see more explanation at http://mxnet.io/tutorials/python/mnist.html
"""
import os
import argparse
import logging
logging.basicConfig(level=logging.DEBUG)
from common import find_mxnet, fit
from common.util import download_file
import mxnet as mx
import numpy as np
import gzip, structdef read_data(label, image):"""download and read data into numpy"""base_url = 'http://yann.lecun.com/exdb/mnist/'with gzip.open(download_file(base_url+label, os.path.join('data',label))) as flbl:magic, num = struct.unpack(">II", flbl.read(8))label = np.fromstring(flbl.read(), dtype=np.int8)with gzip.open(download_file(base_url+image, os.path.join('data',image)), 'rb') as fimg:magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))image = np.fromstring(fimg.read(), dtype=np.uint8).reshape(len(label), rows, cols)return (label, image)def to4d(img):"""reshape to 4D arrays"""return img.reshape(img.shape[0], 1, 28, 28).astype(np.float32)/255def get_mnist_iter(args, kv):"""create data iterator with NDArrayIter"""(train_lbl, train_img) = read_data('train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz')(val_lbl, val_img) = read_data('t10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz')train = mx.io.NDArrayIter(to4d(train_img), train_lbl, args.batch_size, shuffle=True)val = mx.io.NDArrayIter(to4d(val_img), val_lbl, args.batch_size)return (train, val)if __name__ == '__main__':# parse argsparser = argparse.ArgumentParser(description="train mnist",formatter_class=argparse.ArgumentDefaultsHelpFormatter)parser.add_argument('--num-classes', type=int, default=10,help='the number of classes')parser.add_argument('--num-examples', type=int, default=60000,help='the number of training examples')fit.add_fit_args(parser)parser.set_defaults(# networknetwork        = 'mlp',# traingpus           = '0,1',batch_size      = 64,disp_batches = 100,num_epochs     = 20,lr             = .05,lr_step_epochs = '10',model_predix = './my')args = parser.parse_args()# load networkfrom importlib import import_modulenet = import_module('symbols.'+args.network)sym = net.get_symbol(**vars(args))# trainfit.fit(args, sym, get_mnist_iter)

net写在了symbol文件夹中,相当于caffe的prototxt文件

model_predix相当于caffe的保存模型前缀

打印的信息为Saved checkpoint to"./my-0001.params"

"""
a simple multilayer perceptron
"""
import mxnet as mxdef get_symbol(num_classes=10, **kwargs):data = mx.symbol.Variable('data')data = mx.sym.Flatten(data=data)fc1  = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128)act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")fc2  = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")fc3  = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=num_classes)mlp  = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')return mlp

fit.py里面写了运行的代码

import mxnet as mx
import logging
import os
import timedef _get_lr_scheduler(args, kv):if 'lr_factor' not in args or args.lr_factor >= 1:return (args.lr, None)epoch_size = args.num_examples / args.batch_sizeif 'dist' in args.kv_store:epoch_size /= kv.num_workersbegin_epoch = args.load_epoch if args.load_epoch else 0step_epochs = [int(l) for l in args.lr_step_epochs.split(',')]lr = args.lrfor s in step_epochs:if begin_epoch >= s:lr *= args.lr_factorif lr != args.lr:logging.info('Adjust learning rate to %e for epoch %d' %(lr, begin_epoch))steps = [epoch_size * (x-begin_epoch) for x in step_epochs if x-begin_epoch > 0]return (lr, mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=args.lr_factor))def _load_model(args, rank=0):if 'load_epoch' not in args or args.load_epoch is None:return (None, None, None)assert args.model_prefix is not Nonemodel_prefix = args.model_prefixif rank > 0 and os.path.exists("%s-%d-symbol.json" % (model_prefix, rank)):model_prefix += "-%d" % (rank)sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, args.load_epoch)logging.info('Loaded model %s_%04d.params', model_prefix, args.load_epoch)return (sym, arg_params, aux_params)def _save_model(args, rank=0):if args.model_prefix is None:return Nonedst_dir = os.path.dirname(args.model_prefix)if not os.path.isdir(dst_dir):os.mkdir(dst_dir)return mx.callback.do_checkpoint(args.model_prefix if rank == 0 else "%s-%d" % (args.model_prefix, rank))def add_fit_args(parser):"""parser : argparse.ArgumentParserreturn a parser added with args required by fit"""train = parser.add_argument_group('Training', 'model training')train.add_argument('--network', type=str,help='the neural network to use')train.add_argument('--num-layers', type=int,help='number of layers in the neural network, required by some networks such as resnet')train.add_argument('--gpus', type=str,help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu')train.add_argument('--kv-store', type=str, default='device',help='key-value store type')train.add_argument('--num-epochs', type=int, default=100,help='max num of epochs')train.add_argument('--lr', type=float, default=0.1,help='initial learning rate')train.add_argument('--lr-factor', type=float, default=0.1,help='the ratio to reduce lr on each step')train.add_argument('--lr-step-epochs', type=str,help='the epochs to reduce the lr, e.g. 30,60')train.add_argument('--optimizer', type=str, default='sgd',help='the optimizer type')train.add_argument('--mom', type=float, default=0.9,help='momentum for sgd')train.add_argument('--wd', type=float, default=0.0001,help='weight decay for sgd')train.add_argument('--batch-size', type=int, default=128,help='the batch size')train.add_argument('--disp-batches', type=int, default=20,help='show progress for every n batches')train.add_argument('--model-prefix', type=str,help='model prefix')parser.add_argument('--monitor', dest='monitor', type=int, default=0,help='log network parameters every N iters if larger than 0')train.add_argument('--load-epoch', type=int,help='load the model on an epoch using the model-load-prefix')train.add_argument('--top-k', type=int, default=0,help='report the top-k accuracy. 0 means no report.')train.add_argument('--test-io', type=int, default=0,help='1 means test reading speed without training')return traindef fit(args, network, data_loader, **kwargs):"""train a modelargs : argparse returnsnetwork : the symbol definition of the nerual networkdata_loader : function that returns the train and val data iterators"""# kvstorekv = mx.kvstore.create(args.kv_store)# logginghead = '%(asctime)-15s Node[' + str(kv.rank) + '] %(message)s'logging.basicConfig(level=logging.DEBUG, format=head)logging.info('start with arguments %s', args)# data iterators(train, val) = data_loader(args, kv)if args.test_io:tic = time.time()for i, batch in enumerate(train):for j in batch.data:j.wait_to_read()if (i+1) % args.disp_batches == 0:logging.info('Batch [%d]\tSpeed: %.2f samples/sec' % (i, args.disp_batches*args.batch_size/(time.time()-tic)))tic = time.time()return# load modelif 'arg_params' in kwargs and 'aux_params' in kwargs:arg_params = kwargs['arg_params']aux_params = kwargs['aux_params']else:sym, arg_params, aux_params = _load_model(args, kv.rank)if sym is not None:assert sym.tojson() == network.tojson()# save modelcheckpoint = _save_model(args, kv.rank)# devices for trainingdevs = mx.cpu() if args.gpus is None or args.gpus is '' else [mx.gpu(int(i)) for i in args.gpus.split(',')]# learning ratelr, lr_scheduler = _get_lr_scheduler(args, kv)# create modelmodel = mx.mod.Module(context       = devs,symbol        = network)lr_scheduler  = lr_scheduleroptimizer_params = {'learning_rate': lr,'momentum' : args.mom,'wd' : args.wd,'lr_scheduler': lr_scheduler}monitor = mx.mon.Monitor(args.monitor, pattern=".*") if args.monitor > 0 else Noneinitializer   = mx.init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2)# initializer   = mx.init.Xavier(factor_type="in", magnitude=2.34),# evaluation metriceseval_metrics = ['accuracy']if args.top_k > 0:eval_metrics.append(mx.metric.create('top_k_accuracy', top_k=args.top_k))# callbacks that run after each batchbatch_end_callbacks = [mx.callback.Speedometer(args.batch_size, args.disp_batches)]if 'batch_end_callback' in kwargs:cbs = kwargs['batch_end_callback']batch_end_callbacks += cbs if isinstance(cbs, list) else [cbs]# runmodel.fit(train,begin_epoch        = args.load_epoch if args.load_epoch else 0,num_epoch          = args.num_epochs,eval_data          = val,eval_metric        = eval_metrics,kvstore            = kv,optimizer          = args.optimizer,optimizer_params   = optimizer_params,initializer        = initializer,arg_params         = arg_params,aux_params         = aux_params,batch_end_callback = batch_end_callbacks,epoch_end_callback = checkpoint,allow_missing      = True,monitor            = monitor)





转载于:https://www.cnblogs.com/hellokittyblog/p/9128451.html

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/372360.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

Apache Shiro第3部分–密码学

除了保护网页和管理访问权限外, Apache Shiro还执行基本的加密任务。 该框架能够: 加密和解密数据, 哈希数据, 生成随机数。 Shiro没有实现任何加密算法。 所有计算都委托给Java密码学扩展(JCE)API。 使…

mysql数据存在就更新_Mysql:如果数据存在则更新,不存在则插入

mysql语法支持如果数据存在则更新,不存在则插入,首先判断数据存在还是不存在的那个字段要设置成unique索引,例如表tb_addrbook如下:索引:语句1:不存在插入INSERT INTO tb_addrbook(num,name,mobile) VALUE(1001,小李,1…

Memcached, Redis, MongoDB区别

mongodb和memcached不是一个范畴内的东西。mongodb是文档型的非关系型数据库,其优势在于查询功能比较强大,能存储海量数据。mongodb和memcached不存在谁替换谁的问题。和memcached更为接近的是redis。它们都是内存型数据库,数据保存在内存中&…

洛谷P1757 通天之分组背包 [2017年4月计划 动态规划06]

P1757 通天之分组背包 题目背景 直达通天路小A历险记第二篇 题目描述 自01背包问世之后,小A对此深感兴趣。一天,小A去远游,却发现他的背包不同于01背包,他的物品大致可分为k组,每组中的物品相互冲突,现在&a…

c3p0 0.9.1.2 配套mysql_连接数据库,使用c3p0技术连接MySQL数据库

读取配置文件连接MySQL数据库先确认已经导入了 mysql 的驱动包db.propertiesdrivercom.mysql.jdbc.Driverurljdbc:mysql://localhost:3306/v20?useUnicodetrue&characterEncodingutf8usernamerootpassword123456JdbcUtil.javapackage com.stu_mvc.utils;import java.io.Fi…

【Hadoop】Hadoop MR 自定义分组 Partition机制

1、概念 2、Hadoop默认分组机制--所有的Key分到一个组,一个Reduce任务处理 3、代码示例 FlowBean package com.ares.hadoop.mr.flowgroup;import java.io.DataInput; import java.io.DataOutput; import java.io.IOException;import org.apache.hadoop.io.WritableC…

Spring Framework 3.2 M1发布

SpringSource刚刚宣布了针对Spring 3.2的第一个里程碑版本。 现在可以从SpringSource存储库(位于http://repo.springsource.org/)获得新版本。 查看有关通过Maven 解决这些工件的快速教程 。 此版本包括: 最初支持异步Controller方法 早期…

两种动态SQL

参考:http://www.cnblogs.com/wanyuan8/archive/2011/11/09/2243483.htmlhttp://www.cnblogs.com/xbf321/archive/2008/11/02/1325067.html 两种动态SQL  1. EXEC (sql)   2. EXEC sp_executesql 性能:sp_executesql提供了输入输出接口,更…

mysql查询含有某个值的表_MYSQL查询数据表中某个字段包含某个数值

当某个字段中字符串是"1,2,3,4,5,6"或者"123456" 查询数据表中某个字段是否包含某个值 1:模糊查询 使用like select * from 表 where 字段 like %1%; 2:函数查找 find_in_set(str,数组) select * from 表 where find_in_set(1,字段); 注意:mysql字符串…

android学习笔记35——AnimationDrawable资源

AnimationDrawable资源 AnimationDrawable,代表一个动画。 android既支持传统的逐帧动画(类似于电影方式,一张图片一张图片的切换),也支持通过平移、变换计算出来的补间动画、属性动画。 下面以补间动画为例,介绍如何定义Animatio…

RESTEasy教程第2部分:Spring集成

RESTEasy提供了对Spring集成的支持&#xff0c;这使我们能够将Spring bean作为RESTful WebServices公开。 步骤&#xff03;1&#xff1a;使用Maven配置RESTEasy Spring依赖项。 <project xmlnshttp:maven.apache.orgPOM4.0.0 xmlns:xsihttp:www.w3.org2001XMLSchema-insta…

java RSA 加签验签【转】

引用自: http://blog.csdn.net/wangqiuyun/article/details/42143957/ java RSA 加签验签 package com.testdemo.core.service.impl.alipay;import java.security.KeyFactory; import java.security.PrivateKey; import java.security.PublicKey; import java.security.spec.PK…

mysql启动时执行sql server_常见 mysql 启动、运行.sql 文件错误处理

1、mysql 启动错误处理查看 log&#xff1a;Mac: /usr/local/var/mysql/lizhendeMacBook-Pro.local.err根据 log 针对性的进行调整&#xff0c;包治百病2、Mysql Incorrect datetime value问题描述&#xff1a;低版本的 mysql 中&#xff0c;数据库转储 sql 文件。导入到高版本…

带有谓词的Java中的函数样式-第2部分

在本文的第一部分中&#xff0c;我们介绍了谓词&#xff0c;这些谓词通过具有返回true或false的单个方法的简单接口&#xff0c;为Java等面向对象的语言带来了函数式编程的某些好处。 在第二部分和最后一部分中&#xff0c;我们将介绍一些更高级的概念&#xff0c;以使您的谓词…

Devxtreme 显示Master-Detail数据列表, 数据显示颜色

1 ////刷新3/4簇Grid2 //function GetClusterGrid(id, coverageId, clusterId) {3 4 // var region getRegionCityName();5 // $.ajax({6 // type: "POST",7 // url: "fast_index_overview.aspx/GetClusterGrid&q…

mysql 排序去重复_php mysql 过滤重复记录并排序

table1idname1a2b3ctable2idnamecont1aaa2bbb3aaaaaSELECT*,count(distincttable2.name)FROMtable1,table2WHEREtable1.nametable2.nameGROUPBYtable2.nameORDERBYtable2.idDESC";重复...table1id name1 a2 b3 ctable2id name cont1 a aa2 b bb3 a aaaaSELECT *,count(dis…

Java EE 6测试第I部分– EJB 3.1可嵌入API

我们从Enterprise JavaBeans开发人员那里听到的最常见的请求之一就是需要改进的单元/集成测试支持。 EJB 3.1 Specification引入了EJB 3.1 Embeddable API&#xff0c;用于在Java SE环境中执行EJB组件。 与传统的基于Java EE服务器的执行不同&#xff0c;可嵌入式用法允许客户端…

Flume 中文入门手冊

原文&#xff1a;https://cwiki.apache.org/confluence/display/FLUME/GettingStarted 什么是 Flume NG? Flume NG 旨在比起 Flume OG 变得明显更简单。更小。更easy部署。在这样的情况下&#xff0c;我们不提交Flume NG 到 Flume OG 的后向兼容。当前。我们期待来自感兴趣測试…

原生JavaScript+CSS3实现移动端滑块效果

在做web页面时&#xff0c;无论PC端还是移动端&#xff0c;我们会遇到滑块这样的效果&#xff0c;可能我们往往会想着去网上找插件&#xff0c;其实这个效果非常的简单&#xff0c;插件代码的的代码往往过于臃肿&#xff0c;不如自己动手&#xff0c;自给自足。首先看一下效果图…

mysql的连接名是哪个文件_mysql连接名是什么

{"moduleinfo":{"card_count":[{"count_phone":1,"count":1}],"search_count":[{"count_phone":4,"count":4}]},"card":[{"des":"阿里云数据库专家保驾护航&#xff0c;为用户…