Creating a Simple 1D CNN in PyTorch with Multiple Channels

13,103

You are forgetting the "minibatch dimension", each "1D" sample has indeed two dimensions: the number of channels (7 in your example) and length (10 in your case). However, pytorch expects as input not a single sample, but rather a minibatch of B samples stacked together along the "minibatch dimension".
So a "1D" CNN in pytorch expects a 3D tensor as input: BxCxT. If you only have one signal, you can add a singleton dimension:

 out = model(torch.tensor(X)[None, ...])
Share:
13,103
Joseph Konan
Author by

Joseph Konan

Updated on June 21, 2022

Comments

  • Joseph Konan
    Joseph Konan almost 2 years

    The dimensionality of the PyTorch inputs are not what the model expects, and I am not sure why.

    To my understanding...

    in_channels is first the number of 1D inputs we would like to pass to the model, and is the previous out_channel for all subsequent layers.

    out_channels is the desired number of kernels (filters).

    kernel_size is the number of parameters per filter.

    Therefore, we would expect, as data passed to forward, a dataset with 7 1D channels (i.e. a 2D input).

    However, the following code throws an error that is not consistent with what I expect, where this code:

    import numpy
    import torch
    
    X = numpy.random.uniform(-10, 10, 70).reshape(-1, 7)
    # Y = np.random.randint(0, 9, 10).reshape(-1, 1)
    
    class Simple1DCNN(torch.nn.Module):
        def __init__(self):
            super(Simple1DCNN, self).__init__()
            self.layer1 = torch.nn.Conv1d(in_channels=7, out_channels=20, kernel_size=5, stride=2)
            self.act1 = torch.nn.ReLU()
            self.layer2 = torch.nn.Conv1d(in_channels=20, out_channels=10, kernel_size=1)
        def forward(self, x):
            x = self.layer1(x)
            x = self.act1(x)
            x = self.layer2(x)
    
            log_probs = torch.nn.functional.log_softmax(x, dim=1)
    
            return log_probs
    
    model = Simple1DCNN()
    print(model(torch.tensor(X)).size)
    

    Throws the following error:

    ---------------------------------------------------------------------------
    RuntimeError                              Traceback (most recent call last)
    <ipython-input-5-eca5856a2314> in <module>()
         21 
         22 model = Simple1DCNN()
    ---> 23 print(model(torch.tensor(X)).size)
    
    ~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
        487             result = self._slow_forward(*input, **kwargs)
        488         else:
    --> 489             result = self.forward(*input, **kwargs)
        490         for hook in self._forward_hooks.values():
        491             hook_result = hook(self, input, result)
    
    <ipython-input-5-eca5856a2314> in forward(self, x)
         12         self.layer2 = torch.nn.Conv1d(in_channels=20, out_channels=10, kernel_size=1)
         13     def forward(self, x):
    ---> 14         x = self.layer1(x)
         15         x = self.act1(x)
         16         x = self.layer2(x)
    
    ~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/torch/nn/modules/module.py in __call__(self, *input, **kwargs)
        487             result = self._slow_forward(*input, **kwargs)
        488         else:
    --> 489             result = self.forward(*input, **kwargs)
        490         for hook in self._forward_hooks.values():
        491             hook_result = hook(self, input, result)
    
    ~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/torch/nn/modules/conv.py in forward(self, input)
        185     def forward(self, input):
        186         return F.conv1d(input, self.weight, self.bias, self.stride,
    --> 187                         self.padding, self.dilation, self.groups)
        188 
        189 
    
    RuntimeError: Expected 3-dimensional input for 3-dimensional weight [20, 7, 5], but got 2-dimensional input of size [10, 7] instead
    

    Edit: See below for solution, motivated by Shai.

    import numpy
    import torch
    
    X = numpy.random.uniform(-10, 10, 70).reshape(1, 7, -1)
    # Y = np.random.randint(0, 9, 10).reshape(1, 1, -1)
    
    class Simple1DCNN(torch.nn.Module):
        def __init__(self):
            super(Simple1DCNN, self).__init__()
            self.layer1 = torch.nn.Conv1d(in_channels=7, out_channels=20, kernel_size=5, stride=2)
            self.act1 = torch.nn.ReLU()
            self.layer2 = torch.nn.Conv1d(in_channels=20, out_channels=10, kernel_size=1)
        def forward(self, x):
            x = self.layer1(x)
            x = self.act1(x)
            x = self.layer2(x)
    
            log_probs = torch.nn.functional.log_softmax(x, dim=1)
    
            return log_probs
    
    model = Simple1DCNN().double()
    print(model(torch.tensor(X)).shape)