MindSpore笔记:安装和实现LeNet-5手写数字识别

安装MindSpore(Windows环境)

一、安装MindSpore(Windows环境)

1. 安装python 3.7

安装Python3.7.5,可以点击链接下载:https://mirrors.huaweicloud.com/python/3.7.5/python-3.7.5-amd64.exe,双击打开,选择自定义安装,并将路径加入系统环境变量中。

选择安装pip和客户端,如下图所示,勾选pip选项和py laucncher选项,再点击“Next”下一步。

Hello World!

在cmd窗口中,拷贝并输入如下命令,回车进入Python环境中:

1
python

拷贝并输入如下Python代码,代码含义:打印Hello World,并回车:

1
print("Hello World!")

会输出:Hello World!

2. pip方式安装MindSpore

拷贝下方的命令,并输入到cmd窗口中,回车运行。

这时,之前安装的pip工具会在电脑上安装 MindSpore 1.5.0版本 (Python3.7.5)及其运行所需的第三方依赖软件。整个过程视网络速度,一般在10~20分钟左右。

1
pip install https://ms-release.obs.cn-north-4.myhuaweicloud.com/1.5.0/MindSpore/cpu/x86_64/mindspore-1.5.0-cp37-cp37m-win_amd64.whl --trusted-host ms-release.obs.cn-north-4.myhuaweicloud.com -i https://pypi.tuna.tsinghua.edu.cn/simple

安装验证

拷贝并输入下面的命令,打印MindSpore安装的版本。

1
python -c "import mindspore;mindspore.run_check()"

如果输出为1.5.0,如上图,则说明我们电脑上成功安装了MindSpore。

参考:MindSpore官网

3.安装Pycharm

pycharm官网下载community 版 https://www.jetbrains.com/pycharm/download 安装

二、MindSpore实现LeNet-5手写数字识别

1. 下载MNIST数据集

训练集train:包括train-images-idx3-ubyte 和 train-labels-idx1-ubyte

验证集test: 包括t10k-images-idx3-ubyte 和 t10k-labels-idx1-ubyte

目录结构:

1
2
3
4
5
6
7
./MNIST_Data    
├── test
│ ├── t10k-images-idx3-ubyte
│ └── t10k-labels-idx1-ubyte
└── train
├── train-images-idx3-ubyte
└── train-labels-idx1-ubyte

2. python安装matplotlib

1
pip install matplotlib

3.配置Pycharm 的 Python interpreter

4.运行LeNet-5手写数字识别

1.新建python项目

2.复制MNIST_Data文件夹和test.py到项目中

3.运行

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
# test.py
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import mindspore.dataset as ds
from mindspore import Tensor
import mindspore.common.parameter
from mindspore import context
import mindspore.nn as nn
from mindspore.common.initializer import Normal

context.set_context(mode = context.PYNATIVE_MODE)
train_data_path = "./MNIST_Data/train"
test_data_path = "./MNIST_Data/test"
# mnist_ds = ds.MnistDataset(train_data_path)
#生成的图像有两列[img, label],列图像张量是uint8类型,
#因为像素点的取值范围是(0, 255),需要注意的是这两列数据的数据类型是Tensor


#print('The type of mnist_ds:', type(mnist_ds))
#print("Number of pictures contained in the mnist_ds:", mnist_ds.get_dataset_size())
#int, number of batches.

#print(mnist_ds.get_batch_size()) #Return the size of batch.

# dic_ds = mnist_ds.create_dict_iterator() #数据集上创建迭代器,为字典数据类型,输出的为Tensor类型
# item = next(dic_ds)
# img = item["image"].asnumpy() #asnumpy为Tensor中的方法,功能是将张量转化为numpy数组,因为matplotlib.pyplot中不
# #能接受Tensor数据类型的参数
#
# label = item["label"].asnumpy()



#print("The item of mnist_ds:", item.keys())
#print("Tensor of image in item:", img.shape)
#print("The label of item:", label)

#plt.imshow(np.squeeze(img)) #squeeze将shape中为1的维度去掉,plt.imshow()函数负责对图像进行处理,并显示其格式
#plt.title("number:%s"% item["label"].asnumpy())
#plt.show() #show显示图像



import mindspore.dataset.vision.c_transforms as CV
import mindspore.dataset.transforms.c_transforms as C
from mindspore.dataset.vision import Inter
from mindspore import dtype as mstype
def create_dataset(data_path, batch_size=32, repeat_size=1,
num_parallel_workers=1):
mnist_ds = ds.MnistDataset(data_path)
# 定义数据增强和处理所需的一些参数
resize_height, resize_width = 32, 32
rescale = 1.0 / 255.0
shift = 0.0
rescale_nml = 1 / 0.3081
shift_nml = -1 * 0.1307 / 0.3081
# 根据上面所定义的参数生成对应的数据增强方法,即实例化对象
resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR)
rescale_nml_op = CV.Rescale(rescale_nml, shift_nml)
rescale_op = CV.Rescale(rescale, shift)
hwc2chw_op = CV.HWC2CHW()
type_cast_op = C.TypeCast(mstype.int32)
# 将数据增强处理方法映射到(使用)在对应数据集的相应部分(image,label)
mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns="label", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=resize_op, input_columns="image", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=rescale_op, input_columns="image", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns="image", num_parallel_workers=num_parallel_workers)
mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns="image", num_parallel_workers=num_parallel_workers)
# 处理生成的数据集
buffer_size = 10000
mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size)
mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)
mnist_ds = mnist_ds.repeat(repeat_size)
return mnist_ds


ms_dataset = create_dataset(train_data_path)

# print('Number of each group',ms_dataset.get_batch_size())
# print('Number of groups in the dataset:', ms_dataset.get_dataset_size())
#
#
# data = next(ms_dataset.create_dict_iterator(output_numpy=True))
# images = data["image"]
# labels = data["label"]
# print('Tensor of image:', images.shape)
# print('Labels:', labels)

# count = 1
# for i in images:
# plt.subplot(4, 8, count)
# plt.imshow(np.squeeze(i))
# plt.title('num:%s'%labels[count-1])
# plt.xticks([])
# count += 1
# plt.axis("off")
# plt.show()

class LeNet5(nn.Cell):
def __init__(self, num_class=10, num_channel=1):
super(LeNet5, self).__init__() # 继承父类nn.cell的__init__方法

# nn.Conv2d的第一个参数是输入图片的通道数,即单个过滤器应有的通道数,第二个参数是输出图片的通道数
# 即过滤器的个数,第三个参数是过滤器的二维属性,它可以是一个int元组,但由于一般过滤器都是a x a形
# 式的,而且为奇数。所以这里填入单个数即可,参数pad_mode为卷积方式,valid卷积即padding为0的卷积
# 现在也比较流行same卷积,即卷积后输出的图片不会缩小。需要注意的是卷积层我们是不需要设置参数的随机
# 方式的,因为它默认会给我们选择为Noremal。
self.conv1 = nn.Conv2d(num_channel, 6, 5, pad_mode='valid')
self.conv2 = nn.Conv2d(6, 16, 5, pad_mode='valid')

# nn.Dense为致密连接层,它的第一个参数为输入层的维度,第二个参数为输出的维度,第三个参数为神经网
# 络可训练参数W权重矩阵的初始化方式,默认为normal
self.fc1 = nn.Dense(16 * 5 * 5, 120, weight_init=Normal(0.02))
self.fc2 = nn.Dense(120, 84, weight_init=Normal(0.02))
self.fc3 = nn.Dense(84, num_class, weight_init=Normal(0.02))

# nn.ReLU()非线性激活函数,它往往比论文中的sigmoid激活函数具有更好的效益
self.relu = nn.ReLU()
# nn.MaxPool2d为最大池化层的定义,kernel_size为采样器的大小,stride为采样步长,本例中将其
# 都设置为2相当于将图片的宽度和高度都缩小一半
self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
# nn.Flatten为输入展成平图层,即去掉那些空的维度
self.flatten = nn.Flatten()

def construct(self, x):
# 输入x,下面即是将x通过LeNet5网络执行前向传播的过程
x = self.max_pool2d(self.relu(self.conv1(x)))
x = self.max_pool2d(self.relu(self.conv2(x)))
x = self.flatten(x)
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return x


# print(LeNet5())
from mindspore.nn import SoftmaxCrossEntropyWithLogits

lr = 0.01 #learingrate,学习率,可以使梯度下降的幅度变小,从而可以更好的训练参数
momentum = 0.9
network = LeNet5()

#使用了流行的Momentum优化器进行优化
#vt+1=vt∗u+gradients
#pt+1=pt−(grad∗lr+vt+1∗u∗lr)
#pt+1=pt−lr∗vt+1
#其中grad、lr、p、v和u分别表示梯度、学习率、参数、力矩和动量。
net_opt = nn.Momentum(network.trainable_params(), lr, momentum)

#相当于softmax分类器
#sparse指定标签(label)是否使用稀疏模式,默认为false,reduction为损失的减少类型:mean表示平均值,一般
#情况下都是选择平均地减少
net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')

from mindspore.train.callback import Callback


# custom callback function
class StepLossAccInfo(Callback):
def __init__(self, model, eval_dataset, steps_loss, steps_eval):
self.model = model # 计算图模型Model
self.eval_dataset = eval_dataset # 测试数据集
self.steps_loss = steps_loss
# 收集step和loss值之间的关系,数据格式{"step": [], "loss_value": []},会在后面定义
self.steps_eval = steps_eval
# 收集step对应模型精度值accuracy的信息,数据格式为{"step": [], "acc": []},会在后面定义

def step_end(self, run_context):
cb_params = run_context.original_args()
# cur_epoch_num是CallbackParam中的定义,获得当前处于第几个epoch,一个epoch意味着训练集
# 中每一个样本都训练了一次
cur_epoch = cb_params.cur_epoch_num

# 同理,cur_step_num是CallbackParam中的定义,获得当前执行到多少step
cur_step = (cur_epoch - 1) * 1875 + cb_params.cur_step_num
self.steps_loss["loss_value"].append(str(cb_params.net_outputs))
self.steps_loss["step"].append(str(cur_step))
if cur_step % 125 == 0:
# 调用model.eval返回测试数据集下模型的损失值和度量值,dic对象
acc = self.model.eval(self.eval_dataset, dataset_sink_mode=False)
self.steps_eval["step"].append(cur_step)
self.steps_eval["acc"].append(acc["Accuracy"])


from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor
from mindspore.nn import Accuracy
from mindspore import Model

epoch_size = 1 #每个epoch需要遍历完成图片的batch数,这里是只要遍历一次
eval_dataset = create_dataset(test_data_path)
model_path = "./models/ckpt/mindspore_quick_start/"
#调用Model高级API,将LeNet-5网络与损失函数和优化器连接到一起,具有训练和推理功能的对象。
#metrics 参数是指训练和测试期,模型要评估的一组度量,这里设置的是"Accuracy"准确度
model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()} )

#保存训练好的模型参数的路径
config_ck = CheckpointConfig(save_checkpoint_steps=375, keep_checkpoint_max=16)
ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", directory=model_path, config=config_ck)

#回调类中提到的我们要声明的数据格式
steps_loss = {"step": [], "loss_value": []}
steps_eval = {"step": [], "acc": []}
#使用model等对象实例化StepLossAccInfo,得到具体的对象
step_loss_acc_info = StepLossAccInfo(model , eval_dataset, steps_loss, steps_eval)

#调用Model类的train方法进行训练,LossMonitor(125)每隔125个step打印训练过程中的loss值,dataset_sink_mode为设置数据下沉模式,但该模式不支持CPU,所以这里我们只能设置为False
model.train(epoch_size, ms_dataset, callbacks=[ckpoint_cb, LossMonitor(125), step_loss_acc_info], dataset_sink_mode=False)

from mindspore import load_checkpoint, load_param_into_net

def test_net(network, model):
"""Define the evaluation method."""
print("============== Starting Testing ==============")
# load the saved model for evaluation
param_dict = load_checkpoint("./models/ckpt/mindspore_quick_start/checkpoint_lenet-1_1875.ckpt")
# load parameter to the network
load_param_into_net(network, param_dict)
# load testing dataset

acc = model.eval(eval_dataset, dataset_sink_mode=False)
print("============== Accuracy:{} ==============".format(acc))

test_net(network, model)


ds_test = eval_dataset.create_dict_iterator()
data = next(ds_test)
images = data["image"].asnumpy()
labels = data["label"].asnumpy()

output = model.predict(Tensor(data['image']))
#利用加载好的模型的predict进行预测,注意返回的是对应的(0到9)的概率
pred = np.argmax(output.asnumpy(), axis=1)
err_num = []
index = 1
for i in range(len(labels)):
plt.subplot(4, 8, i+1)
color = 'blue' if pred[i] == labels[i] else 'red'
plt.title("pre:{}".format(pred[i]), color=color)
plt.imshow(np.squeeze(images[i]))
plt.axis("off")
if color == 'red':
index = 0
print("Row {}, column {} is incorrectly identified as {}, the correct value should be {}".format(int(i/8)+1, i%8+1, pred[i], labels[i]), '\n')
if index:
print("All the figures in this group are predicted correctly!")
print(pred, "<--Predicted figures")
print(labels, "<--The right number")
plt.show()

参考:MindSpore快速入门

基于MindSpore的手写数字识别初体验(一)/MindSpore解读评注 (gitlink.org.cn)