精华内容
下载资源
问答
  • 自定义网络结构

    2020-12-30 16:28:22
    <div><p>您好,感谢您的工作。我对训练过程感到困惑, 这里面只加载了webface的目录,没看到它对应的list加载。请问这个list不需要吗,还是在别的地方加载了。...</p><p>该提问来源于开源项目...
  • 自定义网络结构 import os os.environ['CUDA_VISIBLE_DEVICES'] = '3' os.system('echo $CUDA_VISIBLE_DEVICES') import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data ...
    自定义网络结构 
    import os
    os.environ['CUDA_VISIBLE_DEVICES'] = '3'
    os.system('echo $CUDA_VISIBLE_DEVICES')
     
    import torch
    import torch.nn as nn
    import torch.nn.functional as F
    import torch.utils.data as data
    import torch.optim as optim
    from torch.autograd import Variable
    import numpy as np
    from Encoding import load_feature
     
    class TransientModel(nn.Module):
        def __init__(self):
            super(TransientModel,self).__init__()
            self.conv1 = nn.Conv2d(16, 8, kernel_size=1)
            self.conv2 = nn.Conv2d(8, 4, kernel_size=1)
            self.conv3 = nn.Conv2d(4, 2, kernel_size=1)
            self.conv4 = nn.Conv2d(2, 1, kernel_size=1)
        def forward(self, x):
            x = F.relu(self.conv1(x))
            x = F.relu(self.conv2(x))
            x = F.relu(self.conv3(x))
            x = F.relu(self.conv4(x))
            return x
     
    class MyLoss(nn.Module):
        def __init__(self):
            super(MyLoss, self).__init__()
            print '1'
        def forward(self, pred, truth):
            truth = torch.mean(truth,1)
            truth = truth.view(-1,2048)
            pred  = pred.view(-1,2048)
            return  torch.mean(torch.mean((pred-truth)**2,1),0)
     
    class MyTrainData(data.Dataset):
        def __init__(self):
            self.video_path = '/data/FrameFeature/Penn/'
            self.video_file = '/data/FrameFeature/Penn_train.txt'
            fp = open(self.video_file,'r')
            lines = fp.readlines()
            fp.close()
            self.video_name = []
            for line in lines:
                self.video_name.append(line.strip().split(' ')[0])
        def __len__(self):
            return len(self.video_name)
        def __getitem__(self, index):
            data = load_feature(os.path.join(self.video_path,self.video_name[index]))
            data = np.expand_dims(data,2)
            return data
     
    def train(model, train_loader, myloss, optimizer, epoch):
        model.train()
        for batch_idx, train_data in enumerate(train_loader):
            train_data = Variable(train_data).cuda()
            optimizer.zero_grad()
            output = model(train_data)
            loss = myloss(output, train_data)
            loss.backward()
            optimizer.step()
            if batch_idx%100 == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tloss: {:.6f}'.format(
                    epoch, batch_idx*len(train_data), len(train_loader.dataset),
                    100.*batch_idx/len(train_loader), loss.data.cpu().numpy()[0]))
     
    def main():
        model = TransientModel().cuda()
        myloss= MyLoss()
     
        train_data = MyTrainData()
        train_loader = data.DataLoader(train_data,batch_size=1,shuffle=True,num_workers=1)
     
        optimizer = optim.SGD(model.parameters(),lr=0.001)
     
        for epoch in range(10):
            train(model, train_loader, myloss, optimizer, epoch)
     
    if __name__=='__main__':
        main()

     

    展开全文
  • 3、自定义网络 class TransientModel ( nn . Module ) : def __init__ ( self ) : super ( TransientModel , self ) . __init__ ( ) self . conv1 = nn . Conv2d ( 16 , 8 , kernel_size = 1 ) ...

    1、限定使用GPU的序号

    import os
    os.environ['CUDA_VISIBLE_DEVICES'] = '3'
    os.system('echo $CUDA_VISIBLE_DEVICES')
    

    2、导入相关头文件

    import torch
    import torch.nn as nn
    import torch.nn.functional as F
    import torch.utils.data as data
    import torch.optim as optim
    from torch.autograd import Variable
    import numpy as np
    from Encoding import load_feature
    

    3、自定义网络

    class TransientModel(nn.Module):
        def __init__(self):
            super(TransientModel,self).__init__()
            self.conv1 = nn.Conv2d(16, 8, kernel_size=1)
            self.conv2 = nn.Conv2d(8, 4, kernel_size=1)
            self.conv3 = nn.Conv2d(4, 2, kernel_size=1)
            self.conv4 = nn.Conv2d(2, 1, kernel_size=1)
        def forward(self, x):
            x = F.relu(self.conv1(x))
            x = F.relu(self.conv2(x))
            x = F.relu(self.conv3(x))
            x = F.relu(self.conv4(x))
            return x
    

    4、自定义损失函数Loss

    class MyLoss(nn.Module):
        def __init__(self):
            super(MyLoss, self).__init__()
            print '1'
        def forward(self, pred, truth):
            truth = torch.mean(truth,1)
            truth = truth.view(-1,2048)
            pred  = pred.view(-1,2048)
            return  torch.mean(torch.mean((pred-truth)**2,1),0)
    

    5、自定义数据读取

    class MyTrainData(data.Dataset):
        def __init__(self):
            self.video_path = '/data/FrameFeature/Penn/'
            self.video_file = '/data/FrameFeature/Penn_train.txt'
            fp = open(self.video_file,'r')
            lines = fp.readlines()
            fp.close()
            self.video_name = []
            for line in lines:
                self.video_name.append(line.strip().split(' ')[0])
        def __len__(self):
            return len(self.video_name)
        def __getitem__(self, index):
            data = load_feature(os.path.join(self.video_path,self.video_name[index]))
            data = np.expand_dims(data,2)
            return data
    

    6、定义Train函数

    def train(model, train_loader, myloss, optimizer, epoch):
        model.train()
        for batch_idx, train_data in enumerate(train_loader):
            train_data = Variable(train_data).cuda()
            optimizer.zero_grad()
            output = model(train_data)
            loss = myloss(output, train_data)
            loss.backward()
            optimizer.step()
            if batch_idx%100 == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tloss: {:.6f}'.format(
                    epoch, batch_idx*len(train_data), len(train_loader.dataset),
                    100.*batch_idx/len(train_loader), loss.data.cpu().numpy()[0]))
    

    7、开始训练

    if __name__=='__main__':
        # main()
        model = TransientModel().cuda()
        myloss= MyLoss()
    
        train_data = MyTrainData()
        train_loader = data.DataLoader(train_data,batch_size=1,shuffle=True,num_workers=1)
     
        optimizer = optim.SGD(model.parameters(),lr=0.001)
     
        for epoch in range(10):
            train(model, train_loader, myloss, optimizer, epoch)
    

    8、结果展示
    在这里插入图片描述
    9、完整代码

    import os
    os.environ['CUDA_VISIBLE_DEVICES'] = '3'
    os.system('echo $CUDA_VISIBLE_DEVICES')
     
    import torch
    import torch.nn as nn
    import torch.nn.functional as F
    import torch.utils.data as data
    import torch.optim as optim
    from torch.autograd import Variable
    import numpy as np
    from Encoding import load_feature
     
    class TransientModel(nn.Module):
        def __init__(self):
            super(TransientModel,self).__init__()
            self.conv1 = nn.Conv2d(16, 8, kernel_size=1)
            self.conv2 = nn.Conv2d(8, 4, kernel_size=1)
            self.conv3 = nn.Conv2d(4, 2, kernel_size=1)
            self.conv4 = nn.Conv2d(2, 1, kernel_size=1)
        def forward(self, x):
            x = F.relu(self.conv1(x))
            x = F.relu(self.conv2(x))
            x = F.relu(self.conv3(x))
            x = F.relu(self.conv4(x))
            return x
     
    class MyLoss(nn.Module):
        def __init__(self):
            super(MyLoss, self).__init__()
            print '1'
        def forward(self, pred, truth):
            truth = torch.mean(truth,1)
            truth = truth.view(-1,2048)
            pred  = pred.view(-1,2048)
            return  torch.mean(torch.mean((pred-truth)**2,1),0)
     
    class MyTrainData(data.Dataset):
        def __init__(self):
            self.video_path = '/data/FrameFeature/Penn/'
            self.video_file = '/data/FrameFeature/Penn_train.txt'
            fp = open(self.video_file,'r')
            lines = fp.readlines()
            fp.close()
            self.video_name = []
            for line in lines:
                self.video_name.append(line.strip().split(' ')[0])
        def __len__(self):
            return len(self.video_name)
        def __getitem__(self, index):
            data = load_feature(os.path.join(self.video_path,self.video_name[index]))
            data = np.expand_dims(data,2)
            return data
     
    def train(model, train_loader, myloss, optimizer, epoch):
        model.train()
        for batch_idx, train_data in enumerate(train_loader):
            train_data = Variable(train_data).cuda()
            optimizer.zero_grad()
            output = model(train_data)
            loss = myloss(output, train_data)
            loss.backward()
            optimizer.step()
            if batch_idx%100 == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tloss: {:.6f}'.format(
                    epoch, batch_idx*len(train_data), len(train_loader.dataset),
                    100.*batch_idx/len(train_loader), loss.data.cpu().numpy()[0]))
     
    def main():
        model = TransientModel().cuda()
        myloss= MyLoss()
     
        train_data = MyTrainData()
        train_loader = data.DataLoader(train_data,batch_size=1,shuffle=True,num_workers=1)
     
        optimizer = optim.SGD(model.parameters(),lr=0.001)
     
        for epoch in range(10):
            train(model, train_loader, myloss, optimizer, epoch)
     
    if __name__=='__main__':
        main()
    

    原文信息:
    作者:Tsingzao-于廷照
    来源:CSDN
    原文:https://blog.csdn.net/yutingzhaomeng/article/details/80454807
    版权声明:本文为博主原创文章,转载请附上博文链接!

    展开全文
  • 数据集 MINIST数据集,可以自己在网上下载 训练工具 华为云的modelarts中的notebook,华为云近期有免费的P100服务器,供开发者使用,虽然每次只能用一个小时,但训练这种简单的模型已经够用了。...

    数据集

    MINIST数据集,可以自己在网上下载

    训练工具

    华为云的modelarts中的notebook,华为云近期有免费的P100服务器,供开发者使用,虽然每次只能用一个小时,但训练这种简单的模型已经够用了。

    训练代码

    导入使用的工具包

    import torch as t
    import torchvision
    from torchvision import datasets , transforms
    from torch.autograd import Variable
    import matplotlib.pyplot as plt
    import pylab
    
    use_gpu = t.cuda.is_available()
    print(use_gpu)
    
    #Compose是一个容器,传入的参数是列表,ToTensor(),类型变换,Normalize是数据标准化,去均值,除标准差
    transform = transforms.Compose([transforms.ToTensor(),
                                    transforms.Normalize(mean=[0.5],
                                                         std=[0.5])])
    # Normalize 数据标准化变换  mean和std为原始数据的均值和标准差(此处直接赋值)
    
    # 首先获取手写数字的训练集和测试集
    # root 用于指定数据集在下载之后的存放路径
    # transform 用于指定导入数据集需要对数据进行那种变化操作
    # train是指定在数据集下载完成后需要载入那部分数据,
    # 如果设置为True 则说明载入的是该数据集的训练集部分
    # 如果设置为FALSE 则说明载入的是该数据集的测试集部分
    data_train = datasets.MNIST(root = "../data/",
                                transform = transform,
                                train =True,
                                download = True)
    data_test = datasets.MNIST(root = "../data/",
                                transform = transform,
                                train =False)
    
    #数据预览和数据装载
    # 下面对数据进行装载,我们可以将数据的载入理解为对图片的处理,
    # 在处理完成后,我们就需要将这些图片打包好送给我们的模型进行训练 了  而装载就是这个打包的过程
    # dataset 参数用于指定我们载入的数据集名称
    # batch_size参数设置了每个包中的图片数据个数
    #  在装载的过程会将数据随机打乱顺序并进打包
    data_loader_train = t.utils.data.DataLoader(dataset = data_train,
                                                batch_size = 64,
                                                shuffle = True
                                                )
    
    data_loader_test = t.utils.data.DataLoader(dataset = data_test,
                                                batch_size = 64,
                                                shuffle = True  #将顺序随机打乱
                                                )
    
    
    class Model(t.nn.Module):
        def __init__(self):
            super(Model, self).__init__()
            self.convl = t.nn.Sequential(
                t.nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1),  # 卷积层
                # 1*28*28
                # 输入通道数,输出通道数 都为整形
                # kernel_size指卷积核的大小;stride指步长,即卷积核或者pooling窗口的滑动位移。
                # padding指对input的图像边界补充一定数量的像素,目的是为了计算位于图像边界的像素点的卷积响应;
                # ( input_size + 2*padding - kernel_size ) / stride+1 = output_size
                t.nn.ReLU(),
                t.nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),  # 卷积层
    
                t.nn.ReLU(),
                t.nn.MaxPool2d(stride=2, kernel_size=2)
            )
            self.dense = t.nn.Sequential(  # 全连接层
                t.nn.Linear(14*14*128, 1024),
                t.nn.ReLU(),
                t.nn.Dropout(p=0.5),  # 防止训练过程发生过拟合,torch.nn.Dropout对所有元素中每个元素按照概率0.5更改为零
                t.nn.Linear(1024, 10)
            )
    
        def forward(self, x):  # 前向传播
            x = self.convl(x)  # 输入卷积层
            x = x.view(-1, 14 * 14 * 128)  # torch里面,view函数相当于numpy的reshape
            # -1表示一个不确定的数,就是你如果不确定你想要reshape成几行,但是列确定,-1会自动更改为合适的值
            x = self.dense(x)  # 输入全连接层
            return x
    
    model = Model()
    cost = t.nn.CrossEntropyLoss()
    optimizer = t.optim.Adam(model.parameters())
    if use_gpu:
        model = model.cuda()
        cost = cost.cuda()
    #print(model)
    
    n_epochs = 5
    for epoch in range(n_epochs):
        running_loss = 0.0
        running_correct = 0.0
        print("Epoch {}/{}".format(epoch,n_epochs))
        print("-"*10)
        for data in data_loader_train:
            X_train, y_train = data
            if (use_gpu):
                X_train, y_train = X_train.cuda(), y_train.cuda()
            X_train, y_train = Variable(X_train), Variable(y_train)
            output = model(X_train)
            _, pred = t.max(output.data, 1)
            optimizer.zero_grad()
            loss = cost(output, y_train)
            #print("[l]:{:.4f}".format(loss))
            loss.backward()
            optimizer.step()
            running_loss += loss.data
            running_correct += t.sum(pred == y_train.data)
        test_correct = 0
        for data in data_loader_test:
            X_test, y_test = data
            if (use_gpu):
                X_test, y_test = X_test.cuda(), y_test.cuda()
            X_test, y_test = Variable(X_test), Variable(y_test)
            output = model(X_test)
            _, pred = t.max(output.data, 1)
            test_correct += t.sum(pred == y_test.data)
        print("Loss is:{:.4f}, Train_accuracy is {:.4f}%, Test_accuracy is {:.4f}%"
              .format(running_loss/len(data_train),100*running_correct/len(data_train), 100*test_correct/len(data_test)))
    
    t.save(model, 'model.pkl')
    #model = torch.load('model.pkl')
    
    展开全文
  • 原文链接 import os os.environ['CUDA_VISIBLE_DEVICES'] = '3' os.system('echo $CUDA_VISIBLE_DEVICES') import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data as data...

    原文链接

    import os
    os.environ['CUDA_VISIBLE_DEVICES'] = '3'
    os.system('echo $CUDA_VISIBLE_DEVICES')
     
    import torch
    import torch.nn as nn
    import torch.nn.functional as F
    import torch.utils.data as data
    import torch.optim as optim
    from torch.autograd import Variable
    import numpy as np
    from Encoding import load_feature
     
    class TransientModel(nn.Module):
        def __init__(self):
            super(TransientModel,self).__init__()
            self.conv1 = nn.Conv2d(16, 8, kernel_size=1)
            self.conv2 = nn.Conv2d(8, 4, kernel_size=1)
            self.conv3 = nn.Conv2d(4, 2, kernel_size=1)
            self.conv4 = nn.Conv2d(2, 1, kernel_size=1)
        def forward(self, x):
            x = F.relu(self.conv1(x))
            x = F.relu(self.conv2(x))
            x = F.relu(self.conv3(x))
            x = F.relu(self.conv4(x))
            return x
     
    class MyLoss(nn.Module):
        def __init__(self):
            super(MyLoss, self).__init__()
            print '1'
        def forward(self, pred, truth):
            truth = torch.mean(truth,1)
            truth = truth.view(-1,2048)
            pred  = pred.view(-1,2048)
            return  torch.mean(torch.mean((pred-truth)**2,1),0)
     
    class MyTrainData(data.Dataset):
        def __init__(self):
            self.video_path = '/data/FrameFeature/Penn/'
            self.video_file = '/data/FrameFeature/Penn_train.txt'
            fp = open(self.video_file,'r')
            lines = fp.readlines()
            fp.close()
            self.video_name = []
            for line in lines:
                self.video_name.append(line.strip().split(' ')[0])
        def __len__(self):
            return len(self.video_name)
        def __getitem__(self, index):
            data = load_feature(os.path.join(self.video_path,self.video_name[index]))
            data = np.expand_dims(data,2)
            return data
     
    def train(model, train_loader, myloss, optimizer, epoch):
        model.train()
        for batch_idx, train_data in enumerate(train_loader):
            train_data = Variable(train_data).cuda()
            optimizer.zero_grad()
            output = model(train_data)
            loss = myloss(output, train_data)
            loss.backward()
            optimizer.step()
            if batch_idx%100 == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tloss: {:.6f}'.format(
                    epoch, batch_idx*len(train_data), len(train_loader.dataset),
                    100.*batch_idx/len(train_loader), loss.data.cpu().numpy()[0]))
     
    def main():
        model = TransientModel().cuda()
        myloss= MyLoss()
     
        train_data = MyTrainData()
        train_loader = data.DataLoader(train_data,batch_size=1,shuffle=True,num_workers=1)
     
        optimizer = optim.SGD(model.parameters(),lr=0.001)
     
        for epoch in range(10):
            train(model, train_loader, myloss, optimizer, epoch)
     
    if __name__=='__main__':
        main()
    
    
    
    展开全文
  • 我想使用u版的yolov3剪层后网络结构进行tensorrt加速, (https://github.com/wang-xinyu/tensorrtx/blob/5d879b5886895bdf7f052e708794261679cd10db/yolov3/yolov3.cpp#L235) 是否从这里并根据自己剪层后的网络结构...
  • pytorch自定义网络结构不进行参数初始化会怎样?

    千次阅读 多人点赞 2018-08-14 17:07:15
    答案:不怕不怕,pytorch自己默认有初始化 证据如下: 1) torch.nn.Conv2d的参数初始化 ...Conv2d继承自_ConvNd,在_ConvNd中,可以看到默认参数就是进行初始化的,如下图所示 ...2)torch.nn....
  • pycaffe 自定义网络

    2018-03-01 14:36:42
    经过上一篇博文pycaffe的配置,本博文主要记录了使用pycaffe自定义网络结构
  • 1. 自定义网络层 实现自己的层的最佳方法是扩展tf.keras.layers.Layer类并实现: _init_()函数,在其中执行所有与输入无关的初始化 build()函数,获得输入张量的形状,并可以进行其余的初始化 call()函数,构建网络...
  • github源码地址:...内部是靠ApiResult进行解析的,如果你的数据结构跟ApiResult不同,你可以在你的项目中继承ApiResult,然后重写getCode()、getData()、getMsg()和isOk()等方法来实现自己的需求。 本库中Ap
  • 有时我们可能会需要修改LSTM的结构,比如用分段线性函数替代非线性函数,这篇博客主要写如何用pytorch自定义一个LSTM结构,并在IMDB数据集上搭建了一个单层反向的LSTM网络,验证了自定义LSTM结构的功能。 文章目录...
  • 0,0x01)+'abc' s.send(packet) #这样就发送了一个封包,用WPE截来看是这样的:33 12 00 00 00 01 61 62 63 #pack()用来构造封包的结构,后面可加字符串等...0x33是代表以十六进制格式发,不可直接写成33 #当然这样乱发...

空空如也

空空如也

1 2 3 4 5 ... 20
收藏数 2,707
精华内容 1,082
关键字:

自定义网络结构