1DCNN添加Residual Block, python代码实现及讲解

:以下代码基于jupyter notebook编辑器,pytorch
以下为Residual block示意图 接下来将实现左边的示意图

构建一个 ResNet_basic_block类

import torch
import torch.nn as nn
import torch.nn.functional as F
class ResNet_basic_block(nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.conv1 = nn.Conv2d(in_channels = in_channels,
                              out_channels = out_channels,
                              kernel_size = 3,  #3*3卷积
                              padding = 1,  #通过padding操作使x维度不变
                              bias = False)
        self.bn1 = nn.BatchNorm2d(num_features = out_channels)
        self.conv2 = nn.Conv2d(in_channels = out_channels,
                              out_channels = out_channels,
                              kernel_size = 3,   #3*3卷积
                              padding = 1,   #通过padding操作使x维度不变
                              bias = False)
        self.bn2 = nn.BatchNorm2d(num_features = out_channels)
    def forward(self, x):
        out = self.conv1(x)
        out = self.bn1(out)
        out = F.relu(self.bn1(out), inplace = True)
        out = self.conv2(out)
        out = self.bn2(out)
        return out+x
Res = ResNet_basic_block(in_channels=32, out_channels=64)
x = torch.randn(16,32,1024) #产生一个batch=16, channel=32, length=1024
print(x.shape)
y = Res(x)
print(y.shape)
>>>
torch.Size([16, 32, 1024])
torch.Size([16, 64, 1024])

接下来在1DCNN中添加Residual Block模块

构建一个1DCNN,其名称为Net(),用作4分类


**注:**1DCNN模型参考学位论文《基于多尺度卷积神经网络的电机故障诊断方法研究-王威》

# 输入x = torch.rand(64, 1, 1024)的模型
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv1d(in_channels = 1,out_channels= 64,kernel_size = 32, stride = 8, padding = 12)
        self.pool1 = nn.MaxPool1d(kernel_size=2, stride=2)
        self.BN = nn.BatchNorm1d(num_features=64)
        
        self.conv7_1 = nn.Conv1d(in_channels=64, out_channels=64, kernel_size=7, stride=1, padding=3)
        self.pool7_1 = nn.MaxPool1d(kernel_size=2, stride=2)
        self.conv7_2 = nn.Conv1d(in_channels=64, out_channels=128, kernel_size=7, stride=1, padding=3)
        self.pool7_2 = nn.MaxPool1d(kernel_size=2, stride=2)
        self.conv7_3 = nn.Conv1d(in_channels=128, out_channels=256, kernel_size=7, stride=1, padding=3)
        self.pool7_3 = nn.MaxPool1d(kernel_size=2, stride=2)
        
        self.pool2 = nn.MaxPool1d(kernel_size=8, stride=1)
        self.fc = nn.Linear(in_features=256, out_features=4)  ##这里的4096是计算出来的
        self.softmax = nn.Softmax()
        
    def forward(self, x):
        x = self.conv1(x)  ## x:Batch, 1, 1024
        x = self.pool1(x)
        
        x3 = self.conv7_1(x)
        x3 = self.pool7_1(x3)
        x3  = self.conv7_2(x3)
        x3 = self.pool7_2(x3)
        x3 = self.conv7_3(x3)
        x3 = self.pool7_3(x3)
        
        x3 = self.pool2(x3)
        
        Batch, Channel, Length = x3.size()
        x3 = x3.view(Batch, -1)
        
        x3 = self.fc(x3)
        return x3

测试一下这个构建的1DCNN

model = Net()
x = torch.randn(64, 1, 1024) # batchsize = 64, channel=1, length=1024
y = model(x)
print(y.shape)
>>>结果输出
torch.Size([64, 4])

在Net()里添加Residual Block模块

# x = torch.rand(64, 1, 1024)的模型
class Net_with_Res(nn.Module):
    def __init__(self):
        super(Net_with_Res, self).__init__()
        self.conv1 = nn.Conv1d(in_channels = 1,out_channels= 64,kernel_size = 32, stride = 8, padding = 12)
        self.pool1 = nn.MaxPool1d(kernel_size=2, stride=2)
        self.BN = nn.BatchNorm1d(num_features=64)
        
        self.conv7_1 = nn.Conv1d(in_channels=64, out_channels=64, kernel_size=7, stride=1, padding=3)
        self.pool7_1 = nn.MaxPool1d(kernel_size=2, stride=2)
        self.Res7_1 = ResNet_basic_block(in_channels=64, out_channels=64) # 第1个Residual block
        self.conv7_2 = nn.Conv1d(in_channels=64, out_channels=128, kernel_size=7, stride=1, padding=3)
        self.pool7_2 = nn.MaxPool1d(kernel_size=2, stride=2)
        self.Res7_2 = ResNet_basic_block(in_channels=128, out_channels=128) # 第2个Residual block
        self.conv7_3 = nn.Conv1d(in_channels=128, out_channels=256, kernel_size=7, stride=1, padding=3)
        self.pool7_3 = nn.MaxPool1d(kernel_size=2, stride=2)
        
        self.pool2 = nn.MaxPool1d(kernel_size=8, stride=1)
        self.fc = nn.Linear(in_features=256, out_features=4)  ##这里的4096是计算出来的
        
    def forward(self, x):
        x = self.conv1(x)  ## x:Batch, 1, 1024
        x = self.pool1(x)
        
        x3 = self.conv7_1(x)
        x3 = self.pool7_1(x3)
        # 添加第1个Residual Block
        x3 = self.Res7_1(x3)
        x3  = self.conv7_2(x3)
        x3 = self.pool7_2(x3)
        # 添加第2个Residual Block
        x3 = self.Res7_2(x3)
        x3 = self.conv7_3(x3)
        x3 = self.pool7_3(x3)
        
        x3 = self.pool2(x3)
        
        Batch, Channel, Length = x3.size()
        x3 = x3.view(Batch, -1)
        
        x3 = self.fc(x3)
        return x3

测试一下这个加了Residual Block的1DCNN

model = Net_with_Res()
x = torch.randn(64, 1, 1024)
y = model(x)
print(y.shape)
>>>结果输出
torch.Size([64, 4])

参考资料:基于多尺度卷积神经网络的电机故障诊断方法研究-王威-中国矿业大学

欢迎关注公众号:故障诊断与python学习

作者:故障诊断与python学习 原文地址:https://blog.csdn.net/m0_47410750/article/details/123089874

%s 个评论

要回复文章请先登录注册