d2lzh_pytorch 模块
import random
import torch
import matplotlib_inline
from matplotlib import pyplot as plt
import torchvision
import torchvision. transforms as transforms
import torchvision. datasets
import sys
from collections import OrderedDict
def use_svg_display ( ) : matplotlib_inline. backend_inline. set_matplotlib_formats( 'svg' ) def set_figsize ( figsize= ( 3.5 , 2.5 ) ) : use_svg_display( ) plt. rcParams[ 'figure.figsize' ] = figsize
'''
函数详解:
torch.linspace(start, end, steps, dtype) → Tensor 从start开始到end结束,生成steps个数据点,数据类型为dtype
torch.index_select(input, dim, index) 索引张量中的子集
**input:需要进行索引操作的输入张量dim:张量维度 0,1index:索引号,是张量类型
**
yield: 使用yield的函数返回迭代器对象,每次使用时会保存变量信息,使用next()或者使用for可以循环访问迭代器中的内容
''' def data_iter ( batch_size, features, labels) : num_examples = len ( features) indices = list ( range ( num_examples) ) random. shuffle( indices) for i in range ( 0 , num_examples, batch_size) : j = torch. LongTensor( indices[ i: min ( i + batch_size, num_examples) ] ) yield features. index_select( 0 , j) , labels. index_select( 0 , j)
def linreg ( X, w, b) : return torch. mm( X, w) + b
def square_loss ( y_hat, y) : return ( y_hat - y. view( y_hat. size( ) ) ) ** 2 / 2
def sgd ( params, lr, batch_size) : for param in params: param. data -= lr * param. grad / batch_size
'''
FashionMNIST 数据集
'''
def get_fashion_mnist_labels ( labels) : text_labels = [ 't-shirt' , 'trouser' , 'pullover' , 'dress' , 'coat' , 'sandal' , 'shirt' , 'sneaker' , 'bag' , 'ankle boot' ] return [ text_labels[ int ( i) ] for i in labels]
def show_fashion_mnist ( images, labels) : use_svg_display( ) _, figs = plt. subplots( 1 , len ( images) , figsize= ( 12 , 12 ) ) for f, img, lbl, in zip ( figs, images, labels) : f. imshow( img. view( ( 28 , 28 ) ) . numpy( ) ) f. set_title( lbl) f. axes. get_xaxis( ) . set_visible( False ) f. axes. get_yaxis( ) . set_visible( False ) plt. savefig( "路径" )
def load_data_fashion_mnist ( batch_size) : mnist_train = torchvision. datasets. FashionMNIST( root= '路径' , train= True , download= True , transform= transforms. ToTensor( ) ) mnist_test = torchvision. datasets. FashionMNIST( root= '路径' , train= False , download= True , transform= transforms. ToTensor( ) ) '''上面的mnist_train,mnist_test都是torch.utils.data.Dataset的子类,所以可以使用len()获取数据集的大小训练集和测试集中的每个类别的图像数分别是6000,1000,两个数据集分别有10个类别''' if sys. platform. startswith( 'win' ) : num_workers = 0 else : num_workers = 4 train_iter = torch. utils. data. DataLoader( mnist_train, batch_size= batch_size, shuffle= True , num_workers= num_workers) test_iter = torch. utils. data. DataLoader( mnist_test, batch_size= batch_size, shuffle= False , num_workers= num_workers) return train_iter, test_iter
def check_mnist ( ) : mnist_train = torchvision. datasets. FashionMNIST( root= '路径' , train= True , download= True , transform= transforms. ToTensor( ) ) mnist_test = torchvision. datasets. FashionMNIST( root= '路径' , train= False , download= True , transform= transforms. ToTensor( ) ) X, y = [ ] , [ ] for i in range ( 10 ) : X. append( mnist_train[ i] [ 0 ] ) y. append( mnist_train[ i] [ 1 ] ) show_fashion_mnist( X, get_fashion_mnist_labels( y) )
def evaluate_accuracy ( test_iter, net) : acc_sum, n, x = 0.0 , 0 , 0.0 for X, y in test_iter: acc_sum += ( net( X) . argmax( dim= 1 ) == y) . float ( ) . sum ( ) . item( ) n += y. shape[ 0 ] x = acc_sum / nreturn x
def train_ch3 ( net, train_iter, test_iter, loss, num_epochs, batch_size, params= None , lr= None , optimizer= None ) : for epochs in range ( num_epochs) : train_l_sum, train_acc_sum, n = 0.0 , 0.0 , 0 for X, y in train_iter: y_hat = net( X) l = loss( y_hat, y) . sum ( ) if optimizer is not None : optimizer. zero_grad( ) elif params is not None and params[ 0 ] . grad is not None : for param in params: param. grad. data. zero_( ) l. backward( ) if optimizer is None : sgd( params, lr, batch_size) else : optimizer. step( ) train_l_sum += l. item( ) train_acc_sum += ( y_hat. argmax( dim= 1 ) == y) . sum ( ) . item( ) n += y. shape[ 0 ] test_acc = evaluate_accuracy( test_iter, net) print ( f'epoch %d,loss %.4f,train_acc %.3f,test_acc %.3f' % ( epochs + 1 , train_l_sum / n, train_acc_sum / n, test_acc) )
class FlattenLayer ( torch. nn. Module) : def __init__ ( self) : super ( FlattenLayer, self) . __init__( ) def forward ( self, x) : return x. view( x. shape[ 0 ] , - 1 )
net = torch. nn. Sequential( OrderedDict( [ ( 'flatten' , FlattenLayer( ) ) , ( 'linear' , torch. nn. Linear( 2 , 3 ) ) ] )
) '''
-------------------------------------------------------------------作图函数
''' def semilogy ( x_vals, y_vals, xlabel, ylabel, label, x2_vals= None , y2_vals= None , legend= None ) : plt. xlabel( xlabel) plt. ylabel( ylabel) plt. semilogy( x_vals, y_vals) if x2_vals and y2_vals: plt. semilogy( x2_vals, y2_vals, linestyle= ':' ) plt. legend( legend) plt. savefig( "路径/多项式" + label + "模拟.png" )