复制代码
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109import torch import torch.nn as nn import torchvision.datasets as dsets import torchvision.transforms as transforms from torch.autograd import Variable import time ,os #卷积网络参数,迭代次数,一批量的样本数,学习率,准确率,时间成本 #迭代次数*一批量的样本数 = 总样本数 epochs = 10 batch_size = 100 learning_rate = [0.001, 0.005, 0.01, 0.05, 0.1] accuracy = [] time_cost = [] os.environ["CUDA_DEVIC_ORDER"] = "PCI_BUS_ID"#指定使用某一块GPU,编号从0号开始 os.environ["CUDA_VISIBLE_DEVICES"] = "0" #Minst数据集下载以及导入,root下载目录,是否设为训练集,转变成Tensor train_dataset = dsets.MNIST(root='./data/', train = True, transform=transforms.ToTensor(), download=True ) test_dataset =dsets.MNIST(root='./data/', train=False, transform=transforms.ToTensor(), download=True) #传入torch的数据集,DataLoader是一个迭代器,使用小批量梯度下降法,shuffle打乱数据顺序 train_loader = torch.utils.data.DataLoader(dataset = train_dataset, batch_size = batch_size, shuffle =True) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True) #卷积神经网络 class CNN(nn.Module): def __init__(self): super(CNN,self).__init__() # torch.nn.Sequential是一个Sequential容器,模块将按照构造函数中传递的顺序添加到模块中。 #两层卷积层,输入数据维度28*28,过滤器5*5,首尾零填充各2个零,即原输入数据维度32*32 self.layer1 = nn.Sequential( nn.Conv2d(1,16,kernel_size=5,padding=2), nn.BatchNorm2d(16), nn.ReLU(), nn.MaxPool2d(2) ) #第一层的过滤器过滤后得矩阵28*28*16,最大池化后得14*14*16,可以用 .size()查看维度数 self.layer2 = nn.Sequential( nn.Conv2d(16,32,kernel_size=5,padding=2), nn.BatchNorm2d(32), nn.ReLU(), nn.MaxPool2d(2) ) #第二层的过滤器过滤后得矩阵14*14*32,最大池化后得7*7*32 self.fc = nn.Linear(7*7*32,10) def forward(self,x): out = self.layer1(x) out = self.layer2(out) out = out.view(out.size(0), -1)#输出拉伸为一行,out.size(0)表示行数,view的第一个参数代表行数,第二个是列数 out = self.fc(out) return out cnn = CNN() cnn.cuda() for i in learning_rate: #使用CrossEntropyLoss交叉熵损失函数, Adagrad优化算法 criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adagrad(cnn.parameters(), lr=i) start = time.time() #训练模型 for e in range(epochs): for number, (images,labels) in enumerate(train_loader): images = Variable(images).cuda() labels = Variable(labels).cuda() optimizer.zero_grad() outputs = cnn(images) loss = criterion(outputs,labels) loss.backward() optimizer.step() cnn.eval() correct = 0 total = 0 for images,labels in test_loader: images = Variable(images).cuda() outputs = cnn(images) _, predicted = torch.max(outputs.detach(),1) total += labels.size(0) correct += (predicted.cpu() == labels).sum() end = time.time() time_cost.append(end-start) accu = 100*correct/total accuracy.append(accu) torch.save(cnn.state_dict(),'CNN.pkl') print('nTime cost are: ',time_cost) print('nThe accuracy are:',accuracy)
最后
以上就是粗暴母鸡最近收集整理的关于卷积神经网络实例的全部内容,更多相关卷积神经网络实例内容请搜索靠谱客的其他文章。
本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
发表评论 取消回复