searchusermenu
  • 发布文章
  • 消息中心
点赞
收藏
评论
分享
原创

如何使用C++优雅地训练一个模型(三):搭建一些基本单元

2023-11-28 05:38:52
50
0
  • MLP基本单元
    首先是线性层的声明和定义,包括初始化和前向传播函数。代码如下:

    class LinearBnReluImpl : public torch::nn::Module{
    public:
        LinearBnReluImpl(int intput_features, int output_features);
        torch::Tensor forward(torch::Tensor x);
    private:
        //layers
        torch::nn::Linear ln{nullptr};
        torch::nn::BatchNorm1d bn{nullptr};
    };
    TORCH_MODULE(LinearBnRelu);
    
    LinearBnReluImpl::LinearBnReluImpl(int in_features, int out_features){
        ln = register_module("ln", torch::nn::Linear(torch::nn::LinearOptions(in_features, out_features)));
        bn = register_module("bn", torch::nn::BatchNorm1d(out_features));
    }
    
    torch::Tensor LinearBnReluImpl::forward(torch::Tensor x){
        x = torch::relu(ln->forward(x));
        x = bn(x);
        return x;
    }

    在MLP的构造线性层模块类时,我们继承了torch::nn::Module类,将初始化和前向传播模块作为public,可以给对象使用,而里面的线性层torch::nn::Linear和归一化层torch::nn::BatchNorm1d被隐藏作为私有变量。

    定义初始化函数时,需要将原本的指针对象ln和bn进行赋值,同时将两者的名称也确定。前向传播函数就和pytorch中的forward类似。

  • CNN基本单元
    CNN的基本单元构建和MLP的构建类似,但是又稍有不同,首先需要定义的时卷积超参数确定函数。

    inline torch::nn::Conv2dOptions conv_options(int64_t in_planes, int64_t out_planes, int64_t kerner_size,
        int64_t stride = 1, int64_t padding = 0, bool with_bias = false) {
        torch::nn::Conv2dOptions conv_options = torch::nn::Conv2dOptions(in_planes, out_planes, kerner_size);
        conv_options.stride(stride);
        conv_options.padding(padding);
        conv_options.bias(with_bias);
        return conv_options;
    }

    该函数返回torch::nn::Conv2dOptions对象,对象的超参数由函数接口指定,这样可以方便使用。同时指定inline,提高Release模式下代码执行效率。

    随后则是和MLP的线性模块类似,CNN的基本模块由卷积层,激活函数和归一化层组成。代码如下:

  • class ConvReluBnImpl : public torch::nn::Module {
    public:
        ConvReluBnImpl(int input_channel=3, int output_channel=64, int kernel_size = 3, int stride = 1);
        torch::Tensor forward(torch::Tensor x);
    private:
        // Declare layers
        torch::nn::Conv2d conv{ nullptr };
        torch::nn::BatchNorm2d bn{ nullptr };
    };
    TORCH_MODULE(ConvReluBn);
    
    ConvReluBnImpl::ConvReluBnImpl(int input_channel, int output_channel, int kernel_size, int stride) {
        conv = register_module("conv", torch::nn::Conv2d(conv_options(input_channel,output_channel,kernel_size,stride,kernel_size/2)));
        bn = register_module("bn", torch::nn::BatchNorm2d(output_channel));
    
    }
    
    torch::Tensor ConvReluBnImpl::forward(torch::Tensor x) {
        x = torch::relu(conv->forward(x));
        x = bn(x);
        return x;
    }
  • 简单MLP
    在MLP的例子中,我们以搭建一个四层感知机为例,介绍如何使用cpp实现深度学习模型。该感知机接受in_features个特征,输出out_features个编码后的特征。中间特征数定义为32,64和128,其实一般逆序效果更佳,但是只是作为例子也无关紧要。

    class MLP: public torch::nn::Module{
    public:
        MLP(int in_features, int out_features);
        torch::Tensor forward(torch::Tensor x);
    private:
        int mid_features[3] = {32,64,128};
        LinearBnRelu ln1{nullptr};
        LinearBnRelu ln2{nullptr};
        LinearBnRelu ln3{nullptr};
        torch::nn::Linear out_ln{nullptr};
    };
    
    MLP::MLP(int in_features, int out_features){
        ln1 = LinearBnRelu(in_features, mid_features[0]);
        ln2 = LinearBnRelu(mid_features[0], mid_features[1]);
        ln3 = LinearBnRelu(mid_features[1], mid_features[2]);
        out_ln = torch::nn::Linear(mid_features[2], out_features);
    
        ln1 = register_module("ln1", ln1);
        ln2 = register_module("ln2", ln2);
        ln3 = register_module("ln3", ln3);
        out_ln = register_module("out_ln",out_ln);
    }
    
    torch::Tensor MLP::forward(torch::Tensor x){
        x = ln1->forward(x);
        x = ln2->forward(x);
        x = ln3->forward(x);
        x = out_ln->forward(x);
        return x;
    }

     

  • 简单CNN
    前面介绍了构建CNN的基本模块ConvReluBn,接下来尝试用c++搭建CNN模型。该CNN由三个stage组成,每个stage又由一个卷积层一个下采样层组成。这样相当于对原始输入图像进行了8倍下采样。中间层的通道数变化与前面MLP特征数变化相同,均为输入->32->64->128->输出。

    class plainCNN : public torch::nn::Module{
    public:
        plainCNN(int in_channels, int out_channels);
        torch::Tensor forward(torch::Tensor x);
    private:
        int mid_channels[3] = {32,64,128};
        ConvReluBn conv1{nullptr};
        ConvReluBn down1{nullptr};
        ConvReluBn conv2{nullptr};
        ConvReluBn down2{nullptr};
        ConvReluBn conv3{nullptr};
        ConvReluBn down3{nullptr};
        torch::nn::Conv2d out_conv{nullptr};
    };
    
    plainCNN::plainCNN(int in_channels, int out_channels){
        conv1 = ConvReluBn(in_channels,mid_channels[0],3);
        down1 = ConvReluBn(mid_channels[0],mid_channels[0],3,2);
        conv2 = ConvReluBn(mid_channels[0],mid_channels[1],3);
        down2 = ConvReluBn(mid_channels[1],mid_channels[1],3,2);
        conv3 = ConvReluBn(mid_channels[1],mid_channels[2],3);
        down3 = ConvReluBn(mid_channels[2],mid_channels[2],3,2);
        out_conv = torch::nn::Conv2d(conv_options(mid_channels[2],out_channels,3));
    
        conv1 = register_module("conv1",conv1);
        down1 = register_module("down1",down1);
        conv2 = register_module("conv2",conv2);
        down2 = register_module("down2",down2);
        conv3 = register_module("conv3",conv3);
        down3 = register_module("down3",down3);
        out_conv = register_module("out_conv",out_conv);
    }
    
    torch::Tensor plainCNN::forward(torch::Tensor x){
        x = conv1->forward(x);
        x = down1->forward(x);
        x = conv2->forward(x);
        x = down2->forward(x);
        x = conv3->forward(x);
        x = down3->forward(x);
        x = out_conv->forward(x);
        return x;
    }
    假定输入一个三通道图片,输出通道数定义为n,输入表示一个[1,3,224,224]的张量,将得到一个[1,n,28,28]的输出张量。
  • 简单LSTM
    最后则是一个简单的LSTM的例子,用以处理时序型特征。在直接使用torch::nn::LSTM类之前,我们先顶一个返回torch::nn::LSTMOptions对象的函数,该函数接受关于LSTM的超参数,返回这些超参数定义的结果。

    inline torch::nn::LSTMOptions lstmOption(int in_features, int hidden_layer_size, int num_layers, bool batch_first = false, bool bidirectional = false){
        torch::nn::LSTMOptions lstmOption = torch::nn::LSTMOptions(in_features, hidden_layer_size);
        lstmOption.num_layers(num_layers).batch_first(batch_first).bidirectional(bidirectional);
        return lstmOption;
    }
    
    //batch_first: true for io(batch, seq, feature) else io(seq, batch, feature)
    class LSTM: public torch::nn::Module{
    public:
        LSTM(int in_features, int hidden_layer_size, int out_size, int num_layers, bool batch_first);
        torch::Tensor forward(torch::Tensor x);
    private:
        torch::nn::LSTM lstm{nullptr};
        torch::nn::Linear ln{nullptr};
        std::tuple<torch::Tensor, torch::Tensor> hidden_cell;
    };

    声明好LSTM以后,我们将内部的初始化函数和前向传播函数实现如下:

    LSTM::LSTM(int in_features, int hidden_layer_size, int out_size, int num_layers, bool batch_first){
        lstm = torch::nn::LSTM(lstmOption(in_features, hidden_layer_size, num_layers, batch_first));
        ln = torch::nn::Linear(hidden_layer_size, out_size);
    
        lstm = register_module("lstm",lstm);
        ln = register_module("ln",ln);
    }
    
    torch::Tensor LSTM::forward(torch::Tensor x){
        auto lstm_out = lstm->forward(x);
        auto predictions = ln->forward(std::get<0>(lstm_out));
        return predictions.select(1,-1);
    }
0条评论
0 / 1000
z****n
5文章数
0粉丝数
z****n
5 文章 | 0 粉丝
原创

如何使用C++优雅地训练一个模型(三):搭建一些基本单元

2023-11-28 05:38:52
50
0
  • MLP基本单元
    首先是线性层的声明和定义,包括初始化和前向传播函数。代码如下:

    class LinearBnReluImpl : public torch::nn::Module{
    public:
        LinearBnReluImpl(int intput_features, int output_features);
        torch::Tensor forward(torch::Tensor x);
    private:
        //layers
        torch::nn::Linear ln{nullptr};
        torch::nn::BatchNorm1d bn{nullptr};
    };
    TORCH_MODULE(LinearBnRelu);
    
    LinearBnReluImpl::LinearBnReluImpl(int in_features, int out_features){
        ln = register_module("ln", torch::nn::Linear(torch::nn::LinearOptions(in_features, out_features)));
        bn = register_module("bn", torch::nn::BatchNorm1d(out_features));
    }
    
    torch::Tensor LinearBnReluImpl::forward(torch::Tensor x){
        x = torch::relu(ln->forward(x));
        x = bn(x);
        return x;
    }

    在MLP的构造线性层模块类时,我们继承了torch::nn::Module类,将初始化和前向传播模块作为public,可以给对象使用,而里面的线性层torch::nn::Linear和归一化层torch::nn::BatchNorm1d被隐藏作为私有变量。

    定义初始化函数时,需要将原本的指针对象ln和bn进行赋值,同时将两者的名称也确定。前向传播函数就和pytorch中的forward类似。

  • CNN基本单元
    CNN的基本单元构建和MLP的构建类似,但是又稍有不同,首先需要定义的时卷积超参数确定函数。

    inline torch::nn::Conv2dOptions conv_options(int64_t in_planes, int64_t out_planes, int64_t kerner_size,
        int64_t stride = 1, int64_t padding = 0, bool with_bias = false) {
        torch::nn::Conv2dOptions conv_options = torch::nn::Conv2dOptions(in_planes, out_planes, kerner_size);
        conv_options.stride(stride);
        conv_options.padding(padding);
        conv_options.bias(with_bias);
        return conv_options;
    }

    该函数返回torch::nn::Conv2dOptions对象,对象的超参数由函数接口指定,这样可以方便使用。同时指定inline,提高Release模式下代码执行效率。

    随后则是和MLP的线性模块类似,CNN的基本模块由卷积层,激活函数和归一化层组成。代码如下:

  • class ConvReluBnImpl : public torch::nn::Module {
    public:
        ConvReluBnImpl(int input_channel=3, int output_channel=64, int kernel_size = 3, int stride = 1);
        torch::Tensor forward(torch::Tensor x);
    private:
        // Declare layers
        torch::nn::Conv2d conv{ nullptr };
        torch::nn::BatchNorm2d bn{ nullptr };
    };
    TORCH_MODULE(ConvReluBn);
    
    ConvReluBnImpl::ConvReluBnImpl(int input_channel, int output_channel, int kernel_size, int stride) {
        conv = register_module("conv", torch::nn::Conv2d(conv_options(input_channel,output_channel,kernel_size,stride,kernel_size/2)));
        bn = register_module("bn", torch::nn::BatchNorm2d(output_channel));
    
    }
    
    torch::Tensor ConvReluBnImpl::forward(torch::Tensor x) {
        x = torch::relu(conv->forward(x));
        x = bn(x);
        return x;
    }
  • 简单MLP
    在MLP的例子中,我们以搭建一个四层感知机为例,介绍如何使用cpp实现深度学习模型。该感知机接受in_features个特征,输出out_features个编码后的特征。中间特征数定义为32,64和128,其实一般逆序效果更佳,但是只是作为例子也无关紧要。

    class MLP: public torch::nn::Module{
    public:
        MLP(int in_features, int out_features);
        torch::Tensor forward(torch::Tensor x);
    private:
        int mid_features[3] = {32,64,128};
        LinearBnRelu ln1{nullptr};
        LinearBnRelu ln2{nullptr};
        LinearBnRelu ln3{nullptr};
        torch::nn::Linear out_ln{nullptr};
    };
    
    MLP::MLP(int in_features, int out_features){
        ln1 = LinearBnRelu(in_features, mid_features[0]);
        ln2 = LinearBnRelu(mid_features[0], mid_features[1]);
        ln3 = LinearBnRelu(mid_features[1], mid_features[2]);
        out_ln = torch::nn::Linear(mid_features[2], out_features);
    
        ln1 = register_module("ln1", ln1);
        ln2 = register_module("ln2", ln2);
        ln3 = register_module("ln3", ln3);
        out_ln = register_module("out_ln",out_ln);
    }
    
    torch::Tensor MLP::forward(torch::Tensor x){
        x = ln1->forward(x);
        x = ln2->forward(x);
        x = ln3->forward(x);
        x = out_ln->forward(x);
        return x;
    }

     

  • 简单CNN
    前面介绍了构建CNN的基本模块ConvReluBn,接下来尝试用c++搭建CNN模型。该CNN由三个stage组成,每个stage又由一个卷积层一个下采样层组成。这样相当于对原始输入图像进行了8倍下采样。中间层的通道数变化与前面MLP特征数变化相同,均为输入->32->64->128->输出。

    class plainCNN : public torch::nn::Module{
    public:
        plainCNN(int in_channels, int out_channels);
        torch::Tensor forward(torch::Tensor x);
    private:
        int mid_channels[3] = {32,64,128};
        ConvReluBn conv1{nullptr};
        ConvReluBn down1{nullptr};
        ConvReluBn conv2{nullptr};
        ConvReluBn down2{nullptr};
        ConvReluBn conv3{nullptr};
        ConvReluBn down3{nullptr};
        torch::nn::Conv2d out_conv{nullptr};
    };
    
    plainCNN::plainCNN(int in_channels, int out_channels){
        conv1 = ConvReluBn(in_channels,mid_channels[0],3);
        down1 = ConvReluBn(mid_channels[0],mid_channels[0],3,2);
        conv2 = ConvReluBn(mid_channels[0],mid_channels[1],3);
        down2 = ConvReluBn(mid_channels[1],mid_channels[1],3,2);
        conv3 = ConvReluBn(mid_channels[1],mid_channels[2],3);
        down3 = ConvReluBn(mid_channels[2],mid_channels[2],3,2);
        out_conv = torch::nn::Conv2d(conv_options(mid_channels[2],out_channels,3));
    
        conv1 = register_module("conv1",conv1);
        down1 = register_module("down1",down1);
        conv2 = register_module("conv2",conv2);
        down2 = register_module("down2",down2);
        conv3 = register_module("conv3",conv3);
        down3 = register_module("down3",down3);
        out_conv = register_module("out_conv",out_conv);
    }
    
    torch::Tensor plainCNN::forward(torch::Tensor x){
        x = conv1->forward(x);
        x = down1->forward(x);
        x = conv2->forward(x);
        x = down2->forward(x);
        x = conv3->forward(x);
        x = down3->forward(x);
        x = out_conv->forward(x);
        return x;
    }
    假定输入一个三通道图片,输出通道数定义为n,输入表示一个[1,3,224,224]的张量,将得到一个[1,n,28,28]的输出张量。
  • 简单LSTM
    最后则是一个简单的LSTM的例子,用以处理时序型特征。在直接使用torch::nn::LSTM类之前,我们先顶一个返回torch::nn::LSTMOptions对象的函数,该函数接受关于LSTM的超参数,返回这些超参数定义的结果。

    inline torch::nn::LSTMOptions lstmOption(int in_features, int hidden_layer_size, int num_layers, bool batch_first = false, bool bidirectional = false){
        torch::nn::LSTMOptions lstmOption = torch::nn::LSTMOptions(in_features, hidden_layer_size);
        lstmOption.num_layers(num_layers).batch_first(batch_first).bidirectional(bidirectional);
        return lstmOption;
    }
    
    //batch_first: true for io(batch, seq, feature) else io(seq, batch, feature)
    class LSTM: public torch::nn::Module{
    public:
        LSTM(int in_features, int hidden_layer_size, int out_size, int num_layers, bool batch_first);
        torch::Tensor forward(torch::Tensor x);
    private:
        torch::nn::LSTM lstm{nullptr};
        torch::nn::Linear ln{nullptr};
        std::tuple<torch::Tensor, torch::Tensor> hidden_cell;
    };

    声明好LSTM以后,我们将内部的初始化函数和前向传播函数实现如下:

    LSTM::LSTM(int in_features, int hidden_layer_size, int out_size, int num_layers, bool batch_first){
        lstm = torch::nn::LSTM(lstmOption(in_features, hidden_layer_size, num_layers, batch_first));
        ln = torch::nn::Linear(hidden_layer_size, out_size);
    
        lstm = register_module("lstm",lstm);
        ln = register_module("ln",ln);
    }
    
    torch::Tensor LSTM::forward(torch::Tensor x){
        auto lstm_out = lstm->forward(x);
        auto predictions = ln->forward(std::get<0>(lstm_out));
        return predictions.select(1,-1);
    }
文章来自个人专栏
学习随记
5 文章 | 1 订阅
0条评论
0 / 1000
请输入你的评论
1
1