c语言sscanf函数的用法是什么
261
2022-11-30
PyTorch实现的GoogLeNet (InceptionV1)
PyTorch实现的GoogLeNet (InceptionV1) PyTorch: module
PyTorch代码:
import torchimport torch.nn as nnimport torchvisiondef ConvBNReLU(in_channels,out_channels,kernel_size): return nn.Sequential( nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1,padding=kernel_size//2), nn.BatchNorm2d(out_channels), nn.ReLU6(inplace=True) )class InceptionV1Module(nn.Module): def __init__(self, in_channels,out_channels1, out_channels2reduce,out_channels2, out_channels3reduce, out_channels3, out_channels4): super(InceptionV1Module, self).__init__() self.branch1_conv = ConvBNReLU(in_channels=in_channels,out_channels=out_channels1,kernel_size=1) self.branch2_conv1 = ConvBNReLU(in_channels=in_channels,out_channels=out_channels2reduce,kernel_size=1) self.branch2_conv2 = ConvBNReLU(in_channels=out_channels2reduce,out_channels=out_channels2,kernel_size=3) self.branch3_conv1 = ConvBNReLU(in_channels=in_channels, out_channels=out_channels3reduce, kernel_size=1) self.branch3_conv2 = ConvBNReLU(in_channels=out_channels3reduce, out_channels=out_channels3, kernel_size=5) self.branch4_pool = nn.MaxPool2d(kernel_size=3,stride=1,padding=1) self.branch4_conv1 = ConvBNReLU(in_channels=in_channels, out_channels=out_channels4, kernel_size=1) def forward(self,x): out1 = self.branch1_conv(x) out2 = self.branch2_conv2(self.branch2_conv1(x)) out3 = self.branch3_conv2(self.branch3_conv1(x)) out4 = self.branch4_conv1(self.branch4_pool(x)) out = torch.cat([out1, out2, out3, out4], dim=1) return outclass InceptionAux(nn.Module): def __init__(self, in_channels,out_channels): super(InceptionAux, self).__init__() self.auxiliary_avgpool = nn.AvgPool2d(kernel_size=5, stride=3) self.auxiliary_conv1 = ConvBNReLU(in_channels=in_channels, out_channels=128, kernel_size=1) self.auxiliary_linear1 = nn.Linear(in_features=128 * 4 * 4, out_features=1024) self.auxiliary_relu = nn.ReLU6(inplace=True) self.auxiliary_dropout = nn.Dropout(p=0.7) self.auxiliary_linear2 = nn.Linear(in_features=1024, out_features=out_channels) def forward(self, x): x = self.auxiliary_conv1(self.auxiliary_avgpool(x)) x = x.view(x.size(0), -1) x= self.auxiliary_relu(self.auxiliary_linear1(x)) out = self.auxiliary_linear2(self.auxiliary_dropout(x)) return outclass InceptionV1(nn.Module): def __init__(self, num_classes=1000, stage='train'): super(InceptionV1, self).__init__() self.stage = stage self.block1 = nn.Sequential( nn.Conv2d(in_channels=3,out_channels=64,kernel_size=7,stride=2,padding=3), nn.BatchNorm2d(64), nn.MaxPool2d(kernel_size=3,stride=2, padding=1), nn.Conv2d(in_channels=64, out_channels=64, kernel_size=1, stride=1), nn.BatchNorm2d(64), ) self.block2 = nn.Sequential( nn.Conv2d(in_channels=64, out_channels=192, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(192), nn.MaxPool2d(kernel_size=3, stride=2, padding=1), ) self.block3 = nn.Sequential( InceptionV1Module(in_channels=192,out_channels1=64, out_channels2reduce=96, out_channels2=128, out_channels3reduce = 16, out_channels3=32, out_channels4=32), InceptionV1Module(in_channels=256, out_channels1=128, out_channels2reduce=128, out_channels2=192,out_channels3reduce=32, out_channels3=96, out_channels4=64), nn.MaxPool2d(kernel_size=3, stride=2, padding=1), ) self.block4_1 = InceptionV1Module(in_channels=480, out_channels1=192, out_channels2reduce=96, out_channels2=208,out_channels3reduce=16, out_channels3=48, out_channels4=64) if self.stage == 'train': self.aux_logits1 = InceptionAux(in_channels=512,out_channels=num_classes) self.block4_2 = nn.Sequential( InceptionV1Module(in_channels=512, out_channels1=160, out_channels2reduce=112, out_channels2=224, out_channels3reduce=24, out_channels3=64, out_channels4=64), InceptionV1Module(in_channels=512, out_channels1=128, out_channels2reduce=128, out_channels2=256, out_channels3reduce=24, out_channels3=64, out_channels4=64), InceptionV1Module(in_channels=512, out_channels1=112, out_channels2reduce=144, out_channels2=288, out_channels3reduce=32, out_channels3=64, out_channels4=64), ) if self.stage == 'train': self.aux_logits2 = InceptionAux(in_channels=528,out_channels=num_classes) self.block4_3 = nn.Sequential( InceptionV1Module(in_channels=528, out_channels1=256, out_channels2reduce=160, out_channels2=320, out_channels3reduce=32, out_channels3=128, out_channels4=128), nn.MaxPool2d(kernel_size=3, stride=2, padding=1), ) self.block5 = nn.Sequential( InceptionV1Module(in_channels=832, out_channels1=256, out_channels2reduce=160, out_channels2=320,out_channels3reduce=32, out_channels3=128, out_channels4=128), InceptionV1Module(in_channels=832, out_channels1=384, out_channels2reduce=192, out_channels2=384,out_channels3reduce=48, out_channels3=128, out_channels4=128), ) self.avgpool = nn.AvgPool2d(kernel_size=7,stride=1) self.dropout = nn.Dropout(p=0.4) self.linear = nn.Linear(in_features=1024,out_features=num_classes) def forward(self, x): x = self.block1(x) x = self.block2(x) x = self.block3(x) aux1 = x = self.block4_1(x) aux2 = x = self.block4_2(x) x = self.block4_3(x) out = self.block5(x) out = self.avgpool(out) out = self.dropout(out) out = out.view(out.size(0), -1) out = self.linear(out) if self.stage == 'train': aux1 = self.aux_logits1(aux1) aux2 = self.aux_logits2(aux2) return aux1, aux2, out else: return outif __name__=='__main__': model = InceptionV1() print(model) input = torch.randn(1, 3, 224, 224) aux1, aux2, out = model(input) print(aux1.shape) print(aux2.shape) print(out.shape)
版权声明:本文内容由网络用户投稿,版权归原作者所有,本站不拥有其著作权,亦不承担相应法律责任。如果您发现本站中有涉嫌抄袭或描述失实的内容,请联系我们jiasou666@gmail.com 处理,核实后本网站将在24小时内删除侵权内容。
发表评论
暂时没有评论,来抢沙发吧~