昇思25天学习打卡营第25 天|Resnet for transition learning

first some params.

python 复制代码
batch_size = 18
image_size = 224
num_epochs = 5
lr = 0.001
momentum = 0.9
workers = 4    #parallel thread

we try to classify dogs and wolves.

def create_dataset_canidae(dataset_path, usage):
    data_set = ds.ImageFolderDataset(dataset_path, 
                                    num_parallel_workers = workers
                                    shuffle = True)
#augmentation    
    mean = [0.485*255, 0.456*255, 0.496*255]
    std = [0.229*255, 0.224*255, 0.225*255]
    scale = 32 
    if usage == 'train':
        trains = [
            vision.RandomCropDecodeResize(size=image_size, scale = (0.08,1.0), ratio = (0.75, 1.333)),
            vision.RandomHorizontalFilp(prob = 0.5),
            vision.Normalize(mean = mean, std = std),
            vision.HWC2CHW()
]
    else :
        trans= [
            vision.Decode(),
            vision.Resize(image_size + scale),
            vision.CenterCrop(image_size),
            vision.Normalize(mean = mean, std = std),
            vision.HWC2CHW()
]
    data_set = data_set.map(
        operations = trans,
        input_columns = 'image',
        num_parallel_workers = workers
)
    #batch op
    data_set = data_set.batch(batch_size)
    return data_set
dataset_train = create_dataset_canidae(data_path_train, 'train')
step_size_train = dataset_train.get_dataset_size()
dataset_val= create_dataest_canidae(data_path_val, 'val')
step_size_val = dataset_val.get_dataset_size()

we visualize the data.

python 复制代码
class_name = {0:'dogs', 1:'wolves'}
plt.figure(figsize = (5,5))
for i in range(4):
    data_image = images[i].asnumpy()
    data_label = labels[i]
    data_image = np.transpose(data_image, (1,2,0))
    mean = np.array([0.485, 0.456, 0.406])
    std = np.array([0.229, 0.224, 0.225])
    data_image = std*data_image + mean
    data_image = np.clip(data_image, 0, 1)
    plt.subplot(2,2,i+1)
    plt.imshow(data_image)
    plt.title(class_name[int(labels[i].asnumpy())])
    plt.axis('off')
plt.show()

model.

python 复制代码
weight_init = Normal(mean = 0, sigma = 0.02)
gamma_init = Normal(mean = 1, sigma = 0.02)
python 复制代码
class ResidualBlockBase(nn.Cell):
    expansion : int = 1 #the number of  last conv kernel == that of the first
    def __init__(self, in_channel: int, out_channel: int,
                stride: int = 1, norm: Optional[nn.Cell] = None,
                down_sample: Optional[nn.Cell] = None) ->None:
        super(ResidualBlockBase,self).__init__()
        if not norm:
            self.norm = nn.BatchNorm2d(out_channel)
        else:
            self.norm = norm
        self.conv1 = nn.Conv2d(in_channel, out_channel, kernel_size = 3, stride = stride, weight_init = weight_init)
        self.conv2 = nn.Conv2d(in_channel, out_channel, kernel_size =3, weight_init  =weight_init)
        self.relu = nn.ReLU()
        self.down_sample = down_sample
    def construct(self, x):
        identity =x
        out = self.conv1(x) #3x3
        out = self.norm1(out)
        out = self.relu(out)
        out = self.conv2(out)  #3x3
        out = self.norm(out)
        if self.down_sample is not None:
            identity = self.down_sample(x)
        out += identity #shortcut added
        out = self.relu(x)
        return out
class ResidualBlock(nn.Cell):
    expansion = 4 # means the number of last conv is 4 x that of the first
    def __init__(self, in_channel:int, out_channel:int, stride: int = 1, down_sample: Optional[nn.Cell] = None) -> None:
        super(ResidualBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channel, out_channel, kernel_size =1, weight_init = weight_init)
        self.norm1 = nn.BatchNorm2d(out_channel)
        self.conv2 = nn.Conv2d(out_channel, out_chanmel, kernel_size= 3, stride = stride, weight_init = weight_init)
        self.norm2 = nn.BatchNorm2d(out_channel)
        self.conv3 = nn.Conv2d(out_channel, out_channel * self.expansion,
                                    kernel_size = 1, weight_init = weight_init )
        self.norm3 = nn.BatchNorm2d(out_channel*self.expansion)
        self.relu = nn.ReLU()
        self.down_sample = down_sample
    def construct(self, x):
        identity = x
        out = self.conv1(x)
        out = self.norm1(out)
        out = self.relu(out)
        out = self.conv2(out)
        out = self.norm2(out)
        out = self.relu(out)
        out = self.conv3(out)
        out = self.norm3(out)
        if self.down_sample is not None:
            identity = self.down_sample(x)
        out += identity    
        out = self.relu(out)
        return out

def make_layer(last_out_channel, block: Type[Unionp[ResidualBlockBase, ResidualBlock]], channel:int, block_nums:int, stride: int  = 1):
    down_sample = None
    if stride != 1 or last_out_channel != channel * block.expansion:
        down_sample  = nn.SequentialCell([
            nn.Conv2d(last_out_channel, channel*block.expansion, kernel_size = 1, stride = stride, weight_init = weight_init),
            nn.BatchNorm2d(channel*block.expansion, gamma_init = gamma_init)
])
    layers = []
    layers.append(block(last_out_channel,channel, stride= stride, down_sample = down_sample))
    in_channel = channel * block.expansion
    for _ in range(1, block_nums):
        layers.append(block(in_channel, channel))
    return nn.SequentialCell(layers)

construct the resnet50.

python 复制代码
class ResNet(nn.Cell):
    def __init__(self, block:Type[Union[ResidualBlockBase , ResidualBlock]], 
                layer_nums: List[int], num_classes: int, input_channel:int) ->None:
        super(ResNet, self).__init__()
        self.relu = nn.ReLU()    
        self.conv1 = nn.Conv2d(3, 64, kernel_size = 7, stride = 2, weight_init = weight_init)
        self.norm = nn.BatchNorm2d(64)
        self.max_pool = nn.MaxPool2d(kernel_size = 3, stride = 2, pad_mode = 'same')
        self.layer1 = make_layer(64, block, 64, layer_nums[0])
        self.layer2 = make_layer(64* block.expansion, block, 128, layer_nums[1], stride = 2)
        self.layer3 = make_layer(128 * block.expansion, block, 256, layer_nums[2], stride = 2)
        self.layer4 = make_layer(256* block.expansion, block, 512, layer_nums[3], stride = 2)
        self.avg_pool = nn.AvgPool2d()
        self.flatten = nn.Flatten()
        self.fc = nn.Dense(in_channels = input_channel, out_channels = num_classes)
    def construct(self, x):
        x = self.conv1(x)    
        x = self.norm(x)
        x= self.relu(x)    
        x = self.max_pool(x)
        x= self.layer1(x)
        x = self.layer2(x)    
        x = self.layer3(x)
        x = self.layer4(x)
        x = self.avg_pool (x)
        x = self.flatten(x)
        x = self.fc(x)
        return x
def _resnet(model_url:str, block:Type[Union[ResidualBlockBase, ResidualBlock]],
                layers:List[int] , num_classes:int, pretrained:bool, pretrained_ckpt: str, input_channel:int):
        model = ResNet(block, layers, num_classes, input_channel)
        if pretrained:
            download(url = model_url, path =pretrained_ckpt, replace = True)
            param_dict = load_checkpoint(pretrained_ckpt)
            load_param_into_net(model, param_dict)
        return model
def resnet50(num_classe: int  = 1000, pretrained: bool = False):
        resnet50_url = "https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/models/application/resnet50_224_new.ckpt"
        resnet50_ckpt = './LoadPretrainedModel/resent50_224_new.ckpt'
        return _resnet(resnet50_url, ResidualBlock, [3,4,6,3], num_classes, pretrained, resnet50_ckpt, 2048)
##we freeze the backbone and finetune
net_work = resnet50(pretrained= True)
in_channels= net_work.fc.in_channels # the input of dense layer
head = nn.Dense(in_channels, 2) #2:dogs and wolves
net_work.fc  = head # reset
avg_pool = nn.AvgPool2d(kernel_size = 7)
net_work.avg_pool  =avg_pool#reset
for param in net_work.get_parameters():
        if param.name not in ['fc.weight', 'fc.bias']:
            param.requires_grad = False
opt = nn.Momentum(params = net_work.work.trainable_params(), learning_rate = lr, momentum = 0.5)
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse  = True , reduction = 'mean')
def forward_fn (inputs, targets):
        logits = net_work(inputs)
        loss = loss_fn(logits, targets)
        return loss
grad_fn = ms.value_and_grad(forward_fn, None, opt.parameters)
def train_step(inputs, targets):
    loss, grads = grad_fn (inputs, targets)
    opt(grads)
    return loss
model1 = train.Model(net_work, loss_fn, opt, metrics = {'Accuarcy':train_Accuracy()})

start to train the model. Time has been dramatically saved since it is based on rhe pretrained model.

python 复制代码
dataset_train = create_dataset_canidae(data_path_train, 'train')
step_size_train = dataset_train.get_dataset_size()
dataset_val = create_dataset_canidae(data_path_val, 'val')
step_size_val = dataset_val.get_dataset_size()

num_epochs = 5
data_loader_train = dataset_train.create_tuple_iterator(num_epochs= num_epochs)
data_loader_val  = dataset_val.create_tuple_iterator(num_epochs = num_epochs)
best_ckpt_dir = './BestCheckpoint'
best_ckpt_path = './BestCheckpoint/resnet50-best-freezing-param.ckpt'
python 复制代码
best_acc =0 
for epoch in range(num_epochs):
    losses = []
    net_work.set_train()
    epoch_start = time.time()
    for i , (images, labels) in enumerate(data_loader_train):
        labels = labels.astype(float32)
        loss = train_step(images, labels)
        losses.append(loss)

acc = model1.eval(dataset_val)['Accuracy']
epoch_end = time.time()
epochs_seconds = (epoch_end - epoch_start) * 1000
step_seconds = epoch_seconds / step_size_train
print('-' * 20)
print('Epoch:[%3d/%3d], Average Train Loss:[%5.3f], Accuarcy:[%5.3f]' %(epoch + 1, num_epochs, sum(losses) / len(losses), acc))
print('epoch time: %5.3f ms, per step time : %5.3f ms' %(epoch_seconds, step_seconds))
if acc> best_acc:    
    best_acc =acc
    if not os.path.exists(best_ckpt_dir):
        os.mkdir(best_ckpt_dir)
    ms.save_checkpoint(net_work, best_ckpt_path)
print('=' * 80)
print(f'End of validation the best Accuracy is :{best_acc: 5.3f},'
        f'save the best ckpt file in {best_ckpt_path}', flush = True)

we visualize the prediction to make blue color represents correction.

python 复制代码
def visualize_model(best_ckpt_path, val_ds):
    net = resnet50()
    in_channels = net.fc.in_channels
    head = nn.Dense(in_channels, 2)
    net.fc = head
    avg_pool = nn.AvgPool2d(kernel_size = 7)
    net.fc = head
    avg_pool = nn.AvgPool2d(kernel_size = 7)
    net.avg_pool = avg_pool
    param_dict = ms.load_checkpoint(best_ckpt_path)
    ms.load_param_into_net(net, param_dict)
    mdoel = train.Model(net)
    data = next(val_ds.create_dict_iterator())
    images = data['image'].asnumpy()
    labels = data['label'].asnumpy()
    class_name = {0:'dogs', 1:"wolves"}
    output = model.predict(ms.Tensor(data['image']))
    pred = np.argmax(output.asnumpy(), axis = 1)
    plt.figure(figsize = (5,5))
    for i in range(4):
        plt.subplot(2,2,i+1)
        color = 'blue' if pred[i] == labels[i] else 'red'
        picture_show = np.transpose(images[i], (1,2,0))
        mean = np.array([0.485,0.456,0.406])
        std = np.array([0.229, 0.224, 0.225])
        picture_show = std*picture_show + mean
        picture_show = np.clip(picture_show, 0,1)
        plt.imshow(picture_show)
        plt.axis('off')
    plt.show()

visualize_model(best_ckpt_path, dataset_val)

相关推荐
霍格沃兹测试开发学社测试人社区22 分钟前
软件测试学习笔记丨Flask操作数据库-数据库和表的管理
软件测试·笔记·测试开发·学习·flask
今天我又学废了38 分钟前
Scala学习记录,List
学习
王俊山IT1 小时前
C++学习笔记----10、模块、头文件及各种主题(一)---- 模块(5)
开发语言·c++·笔记·学习
Mephisto.java2 小时前
【大数据学习 | kafka高级部分】kafka中的选举机制
大数据·学习·kafka
南宫生2 小时前
贪心算法习题其三【力扣】【算法学习day.20】
java·数据结构·学习·算法·leetcode·贪心算法
武子康3 小时前
大数据-212 数据挖掘 机器学习理论 - 无监督学习算法 KMeans 基本原理 簇内误差平方和
大数据·人工智能·学习·算法·机器学习·数据挖掘
使者大牙4 小时前
【大语言模型学习笔记】第一篇:LLM大规模语言模型介绍
笔记·学习·语言模型
As977_4 小时前
前端学习Day12 CSS盒子的定位(相对定位篇“附练习”)
前端·css·学习
ajsbxi4 小时前
苍穹外卖学习记录
java·笔记·后端·学习·nginx·spring·servlet
Rattenking4 小时前
React 源码学习01 ---- React.Children.map 的实现与应用
javascript·学习·react.js