--- title: Layers keywords: fastai sidebar: home_sidebar summary: "Helper function used to build PyTorch timeseries models." description: "Helper function used to build PyTorch timeseries models." nb_path: "nbs/100_models.layers.ipynb" ---
{% raw %}
{% endraw %} {% raw %}
{% endraw %} {% raw %}

noop[source]

noop(x=None, *args, **kwargs)

Do nothing

{% endraw %} {% raw %}
{% endraw %} {% raw %}

init_lin_zero[source]

init_lin_zero(m)

{% endraw %} {% raw %}
{% endraw %} {% raw %}

class SwishBeta[source]

SwishBeta(beta=1.0) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}
{% endraw %} {% raw %}

same_padding1d[source]

same_padding1d(seq_len, ks, stride=1, dilation=1)

Same padding formula as used in Tensorflow

{% endraw %} {% raw %}

class Pad1d[source]

Pad1d(padding, value=0.0) :: ConstantPad1d

Pads the input tensor boundaries with a constant value.

For N-dimensional padding, use :func:torch.nn.functional.pad().

Args: padding (int, tuple): the size of the padding. If is int, uses the same padding in both boundaries. If a 2-tuple, uses (:math:\text{padding\_left}, :math:\text{padding\_right})

Shape:

- Input: :math:`(N, C, W_{in})`
- Output: :math:`(N, C, W_{out})` where

  :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`

Examples::

>>> m = nn.ConstantPad1d(2, 3.5)
>>> input = torch.randn(1, 2, 4)
>>> input
tensor([[[-1.0491, -0.7152, -0.0749,  0.8530],
         [-1.3287,  1.8966,  0.1466, -0.2771]]])
>>> m(input)
tensor([[[ 3.5000,  3.5000, -1.0491, -0.7152, -0.0749,  0.8530,  3.5000,
           3.5000],
         [ 3.5000,  3.5000, -1.3287,  1.8966,  0.1466, -0.2771,  3.5000,
           3.5000]]])
>>> m = nn.ConstantPad1d(2, 3.5)
>>> input = torch.randn(1, 2, 3)
>>> input
tensor([[[ 1.6616,  1.4523, -1.1255],
         [-3.6372,  0.1182, -1.8652]]])
>>> m(input)
tensor([[[ 3.5000,  3.5000,  1.6616,  1.4523, -1.1255,  3.5000,  3.5000],
         [ 3.5000,  3.5000, -3.6372,  0.1182, -1.8652,  3.5000,  3.5000]]])
>>> # using different paddings for different sides
>>> m = nn.ConstantPad1d((3, 1), 3.5)
>>> m(input)
tensor([[[ 3.5000,  3.5000,  3.5000,  1.6616,  1.4523, -1.1255,  3.5000],
         [ 3.5000,  3.5000,  3.5000, -3.6372,  0.1182, -1.8652,  3.5000]]])
{% endraw %} {% raw %}

class Conv1dSame[source]

Conv1dSame(ni, nf, ks=3, stride=1, dilation=1, padding:Union[str, Tuple[~T]][int]]=0, groups:int=1, bias:bool=True, padding_mode:str='zeros', device=None, dtype=None) :: Module

Conv1d with padding='same'

{% endraw %} {% raw %}
{% endraw %} {% raw %}
init_linear(Conv1dSame(2, 3, 3), None, init='auto', bias_std=.01)
{% endraw %} {% raw %}
bs = 2
c_in = 3
c_out = 5
seq_len = 6
t = torch.rand(bs, c_in, seq_len)
test_eq(Conv1dSame(c_in, c_out, ks=3, stride=1, dilation=1, bias=False)(t).shape, (bs, c_out, seq_len))
test_eq(Conv1dSame(c_in, c_out, ks=3, stride=1, dilation=2, bias=False)(t).shape, (bs, c_out, seq_len))
test_eq(Conv1dSame(c_in, c_out, ks=3, stride=2, dilation=1, bias=False)(t).shape, (bs, c_out, seq_len//2))
test_eq(Conv1dSame(c_in, c_out, ks=3, stride=2, dilation=2, bias=False)(t).shape, (bs, c_out, seq_len//2))
{% endraw %} {% raw %}

same_padding2d[source]

same_padding2d(H, W, ks, stride=(1, 1), dilation=(1, 1))

Same padding formula as used in Tensorflow

{% endraw %} {% raw %}

class Pad2d[source]

Pad2d(padding, value=0.0) :: ConstantPad2d

Pads the input tensor boundaries with a constant value.

For N-dimensional padding, use :func:torch.nn.functional.pad().

Args: padding (int, tuple): the size of the padding. If is int, uses the same padding in all boundaries. If a 4-tuple, uses (:math:\text{padding\_left}, :math:\text{padding\_right}, :math:\text{padding\_top}, :math:\text{padding\_bottom})

Shape:

- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where

  :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`

  :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`

Examples::

>>> m = nn.ConstantPad2d(2, 3.5)
>>> input = torch.randn(1, 2, 2)
>>> input
tensor([[[ 1.6585,  0.4320],
         [-0.8701, -0.4649]]])
>>> m(input)
tensor([[[ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000,  3.5000],
         [ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000,  3.5000],
         [ 3.5000,  3.5000,  1.6585,  0.4320,  3.5000,  3.5000],
         [ 3.5000,  3.5000, -0.8701, -0.4649,  3.5000,  3.5000],
         [ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000,  3.5000],
         [ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000,  3.5000]]])
>>> # using different paddings for different sides
>>> m = nn.ConstantPad2d((3, 0, 2, 1), 3.5)
>>> m(input)
tensor([[[ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000],
         [ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000],
         [ 3.5000,  3.5000,  3.5000,  1.6585,  0.4320],
         [ 3.5000,  3.5000,  3.5000, -0.8701, -0.4649],
         [ 3.5000,  3.5000,  3.5000,  3.5000,  3.5000]]])
{% endraw %} {% raw %}

class Conv2dSame[source]

Conv2dSame(ni, nf, ks=(3, 3), stride=(1, 1), dilation=(1, 1), padding:Union[str, Tuple[~T, ~T]][int]]=0, groups:int=1, bias:bool=True, padding_mode:str='zeros', device=None, dtype=None) :: Module

Conv2d with padding='same'

{% endraw %} {% raw %}

Conv2d[source]

Conv2d(ni, nf, kernel_size=None, ks=None, stride=1, padding='same', dilation=1, init='auto', bias_std=0.01, groups:int=1, bias:bool=True, padding_mode:str='zeros', device=None, dtype=None)

conv1d layer with padding='same', 'valid', or any integer (defaults to 'same')

{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs = 2
c_in = 3
c_out = 5
h = 16
w = 20
t = torch.rand(bs, c_in, h, w)
test_eq(Conv2dSame(c_in, c_out, ks=3, stride=1, dilation=1, bias=False)(t).shape, (bs, c_out, h, w))
test_eq(Conv2dSame(c_in, c_out, ks=(3, 1), stride=1, dilation=1, bias=False)(t).shape, (bs, c_out, h, w))
test_eq(Conv2dSame(c_in, c_out, ks=3, stride=(1, 1), dilation=(2, 2), bias=False)(t).shape, (bs, c_out, h, w))
test_eq(Conv2dSame(c_in, c_out, ks=3, stride=(2, 2), dilation=(1, 1), bias=False)(t).shape, (bs, c_out, h//2, w//2))
test_eq(Conv2dSame(c_in, c_out, ks=3, stride=(2, 2), dilation=(2, 2), bias=False)(t).shape, (bs, c_out, h//2, w//2))
test_eq(Conv2d(c_in, c_out, ks=3, padding='same', stride=1, dilation=1, bias=False)(t).shape, (bs, c_out, h, w))
{% endraw %} {% raw %}

class Chomp1d[source]

Chomp1d(chomp_size) :: Module

Base class for all neural network modules.

Your models should also subclass this class.

Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes::

import torch.nn as nn
import torch.nn.functional as F

class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.conv1 = nn.Conv2d(1, 20, 5)
        self.conv2 = nn.Conv2d(20, 20, 5)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        return F.relu(self.conv2(x))

Submodules assigned in this way will be registered, and will have their parameters converted too when you call :meth:to, etc.

:ivar training: Boolean represents whether this module is in training or evaluation mode. :vartype training: bool

{% endraw %} {% raw %}
{% endraw %} {% raw %}

class Conv1dCausal[source]

Conv1dCausal(ni, nf, ks, stride=1, dilation=1, **kwargs) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}
{% endraw %} {% raw %}
init_linear(Conv1dCausal(2, 3, 3), None, init='auto', bias_std=.01)
{% endraw %} {% raw %}
bs = 2
c_in = 3
c_out = 5
seq_len = 512
t = torch.rand(bs, c_in, seq_len)
dilation = 1
test_eq(Conv1dCausal(c_in, c_out, ks=3, dilation=dilation)(t).shape, Conv1dSame(c_in, c_out, ks=3, dilation=dilation)(t).shape)
dilation = 2
test_eq(Conv1dCausal(c_in, c_out, ks=3, dilation=dilation)(t).shape, Conv1dSame(c_in, c_out, ks=3, dilation=dilation)(t).shape)
{% endraw %} {% raw %}

Conv1d[source]

Conv1d(ni, nf, kernel_size=None, ks=None, stride=1, padding='same', dilation=1, init='auto', bias_std=0.01, groups:int=1, bias:bool=True, padding_mode:str='zeros', device=None, dtype=None)

conv1d layer with padding='same', 'causal', 'valid', or any integer (defaults to 'same')

{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs = 2
ni = 3
nf = 5
seq_len = 6
ks = 3
t = torch.rand(bs, c_in, seq_len)
test_eq(Conv1d(ni, nf, ks, padding=0)(t).shape, (bs, c_out, seq_len - (2 * (ks//2))))
test_eq(Conv1d(ni, nf, ks, padding='valid')(t).shape, (bs, c_out, seq_len - (2 * (ks//2))))
test_eq(Conv1d(ni, nf, ks, padding='same')(t).shape, (bs, c_out, seq_len))
test_eq(Conv1d(ni, nf, ks, padding='causal')(t).shape, (bs, c_out, seq_len))
test_error('use kernel_size or ks but not both simultaneously', Conv1d, ni, nf, kernel_size=3, ks=3)
test_error('you need to pass a ks', Conv1d, ni, nf)
{% endraw %} {% raw %}
conv = Conv1d(ni, nf, ks, padding='same')
init_linear(conv, None, init='auto', bias_std=.01)
conv
Conv1d(3, 5, kernel_size=(3,), stride=(1,), padding=(1,))
{% endraw %} {% raw %}
conv = Conv1d(ni, nf, ks, padding='causal')
init_linear(conv, None, init='auto', bias_std=.01)
conv
Conv1dCausal(
  (conv_causal): Conv1d(3, 5, kernel_size=(3,), stride=(1,), padding=(2,))
)
{% endraw %} {% raw %}
conv = Conv1d(ni, nf, ks, padding='valid')
init_linear(conv, None, init='auto', bias_std=.01)
weight_norm(conv)
conv
Conv1d(3, 5, kernel_size=(3,), stride=(1,))
{% endraw %} {% raw %}
conv = Conv1d(ni, nf, ks, padding=0)
init_linear(conv, None, init='auto', bias_std=.01)
weight_norm(conv)
conv
Conv1d(3, 5, kernel_size=(3,), stride=(1,))
{% endraw %} {% raw %}

class SeparableConv1d[source]

SeparableConv1d(ni, nf, ks, stride=1, padding='same', dilation=1, bias=True, bias_std=0.01) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs = 64
c_in = 6
c_out = 5
seq_len = 512
t = torch.rand(bs, c_in, seq_len)
test_eq(SeparableConv1d(c_in, c_out, 3)(t).shape, (bs, c_out, seq_len))
{% endraw %} {% raw %}

class AddCoords1d[source]

AddCoords1d() :: Module

Add coordinates to ease position identification without modifying mean and std

{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs = 2
c_in = 3
c_out = 5
seq_len = 50

t = torch.rand(bs, c_in, seq_len)
t = (t - t.mean()) / t.std()
test_eq(AddCoords1d()(t).shape, (bs, c_in + 1, seq_len))
new_t = AddCoords1d()(t)
test_close(new_t.mean(),0, 1e-2)
test_close(new_t.std(), 1, 1e-2)
{% endraw %} {% raw %}

class ConvBlock[source]

ConvBlock(ni, nf, kernel_size=None, ks=3, stride=1, padding='same', bias=None, bias_std=0.01, norm='Batch', zero_norm=False, bn_1st=True, act=ReLU, act_kwargs={}, init='auto', dropout=0.0, xtra=None, coord=False, separable=False, **kwargs) :: Sequential

Create a sequence of conv1d (ni to nf), activation (if act_cls) and norm_type layers.

{% endraw %} {% raw %}
{% endraw %} {% raw %}

class ResBlock1dPlus[source]

ResBlock1dPlus(expansion, ni, nf, coord=False, stride=1, groups=1, reduction=None, nh1=None, nh2=None, dw=False, g2=1, sa=False, sym=False, norm='Batch', zero_norm=True, act_cls=ReLU, ks=3, pool=AvgPool, pool_first=True, padding=None, bias=None, ndim=2, norm_type=<NormType.Batch: 1>, bn_1st=True, transpose=False, init='auto', xtra=None, bias_std=0.01, dilation:Tuple[~T, ~T]][int]=1, padding_mode:str='zeros', device=None, dtype=None) :: Module

Resnet block from ni to nh with stride

{% endraw %} {% raw %}
{% endraw %} {% raw %}

SEModule1d[source]

SEModule1d(ni, reduction=16, act=ReLU, act_kwargs={})

Squeeze and excitation module for 1d

{% endraw %} {% raw %}
{% endraw %} {% raw %}
t = torch.rand(8, 32, 12)
test_eq(SEModule1d(t.shape[1], 16, act=nn.ReLU, act_kwargs={})(t).shape, t.shape)
{% endraw %} {% raw %}

Norm[source]

Norm(nf, ndim=1, norm='Batch', zero_norm=False, init=True, **kwargs)

Norm layer with nf features and ndim with auto init.

{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs = 2
ni = 3
nf = 5
sl = 4
ks = 5

t = torch.rand(bs, ni, sl)
test_eq(ConvBlock(ni, nf, ks)(t).shape, (bs, nf, sl))
test_eq(ConvBlock(ni, nf, ks, padding='causal')(t).shape, (bs, nf, sl))
test_eq(ConvBlock(ni, nf, ks, coord=True)(t).shape, (bs, nf, sl))
ConvBlock(ni, nf, ks, stride=2)(t).shape
test_eq(ConvBlock(ni, nf, ks, stride=2)(t).shape, (bs, nf, sl//2))
{% endraw %} {% raw %}
test_eq(BN1d(ni)(t).shape, (bs, ni, sl))
test_eq(BN1d(ni).weight.data.mean().item(), 1.)
test_eq(BN1d(ni, zero_norm=True).weight.data.mean().item(), 0.)
{% endraw %} {% raw %}
test_eq(ConvBlock(ni, nf, ks, norm='batch', zero_norm=True)[1].weight.data.unique().item(), 0)
test_ne(ConvBlock(ni, nf, ks, norm='batch', zero_norm=False)[1].weight.data.unique().item(), 0)
test_eq(ConvBlock(ni, nf, ks, bias=False)[0].bias, None)
ConvBlock(ni, nf, ks, act=Swish, coord=True)
ConvBlock(
  (0): AddCoords1d()
  (1): Conv1d(4, 5, kernel_size=(5,), stride=(1,), padding=(2,), bias=False)
  (2): BatchNorm1d(5, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (3): Swish()
)
{% endraw %} {% raw %}

class LinLnDrop[source]

LinLnDrop(n_in, n_out, ln=True, p=0.0, act=None, lin_first=False) :: Sequential

Module grouping LayerNorm1d, Dropout and Linear layers

{% endraw %} {% raw %}
{% endraw %} {% raw %}
LinLnDrop(2, 3, p=.5)
LinLnDrop(
  (0): LayerNorm((2,), eps=1e-05, elementwise_affine=True)
  (1): Dropout(p=0.5, inplace=False)
  (2): Linear(in_features=2, out_features=3, bias=False)
)
{% endraw %} {% raw %}

class LambdaPlus[source]

LambdaPlus(func, *args, **kwargs) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}
{% endraw %} {% raw %}

class Squeeze[source]

Squeeze(dim=-1) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}

class Unsqueeze[source]

Unsqueeze(dim=-1) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}

class Add[source]

Add() :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}

class Concat[source]

Concat(dim=1) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}

class Permute[source]

Permute(*dims) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}

class Transpose[source]

Transpose(*dims, contiguous=False) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}

class View[source]

View(*size) :: Module

Reshape x to size

{% endraw %} {% raw %}

class Reshape[source]

Reshape(*shape) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}

class Max[source]

Max(dim=None, keepdim=False) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}

class LastStep[source]

LastStep() :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}

class SoftMax[source]

SoftMax(dim=-1) :: Module

SoftMax layer

{% endraw %} {% raw %}

class Clamp[source]

Clamp(min=None, max=None) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}

class Clip[source]

Clip(min=None, max=None) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs = 2
nf = 5
sl = 4

t = torch.rand(bs, nf, sl)
test_eq(Permute(0,2,1)(t).shape, (bs, sl, nf))
test_eq(Max(1)(t).shape, (bs, sl))
test_eq(Transpose(1,2)(t).shape, (bs, sl, nf))
test_eq(Transpose(1,2, contiguous=True)(t).shape, (bs, sl, nf))
test_eq(View(-1, 2, 10)(t).shape, (bs, 1, 2, 10))
test_eq(Reshape(-1, 2, 10)(t).shape, (bs, 1, 2, 10))
Transpose(1,2), Permute(0,2,1), View(-1, 2, 10), Transpose(1,2, contiguous=True), Reshape(-1, 2, 10), Noop
(Transpose(1, 2),
 Permute(dims=0, 2, 1),
 View(bs, -1, 2, 10),
 Transpose(dims=1, 2).contiguous(),
 Reshape(bs, -1, 2, 10),
 Sequential())
{% endraw %} {% raw %}

class DropPath[source]

DropPath(p=None) :: Module

Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).

It's similar to Dropout but it drops individual connections instead of nodes. Original code in https://github.com/rwightman/pytorch-image-models (timm library)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
t = torch.ones(100,2,3)
test_eq(DropPath(0.)(t), t)
assert DropPath(0.5)(t).max() >= 1
{% endraw %} {% raw %}

class Sharpen[source]

Sharpen(T=0.5) :: Module

This is used to increase confidence in predictions - MixMatch paper

{% endraw %} {% raw %}
{% endraw %} {% raw %}
n_samples = 1000
n_classes = 3

t = (torch.rand(n_samples, n_classes) - .5) * 10
probas = F.softmax(t, -1)
sharpened_probas = Sharpen()(probas)
plt.plot(probas.flatten().sort().values, color='r')
plt.plot(sharpened_probas.flatten().sort().values, color='b')
plt.show()
test_gt(sharpened_probas[n_samples//2:].max(-1).values.sum().item(), probas[n_samples//2:].max(-1).values.sum().item())
{% endraw %} {% raw %}

class Sequential[source]

Sequential(*args) :: Sequential

Class that allows you to pass one or multiple inputs

{% endraw %} {% raw %}
{% endraw %} {% raw %}

class TimeDistributed[source]

TimeDistributed(module, low_mem=False, tdim=1) :: Module

Applies module over tdim identically for each step, use low_mem to compute one at a time.

{% endraw %} {% raw %}
{% endraw %} {% raw %}

class Temp_Scale[source]

Temp_Scale(temp=1.0, dirichlet=False) :: Module

Used to perform Temperature Scaling (dirichlet=False) or Single-parameter Dirichlet calibration (dirichlet=True)

{% endraw %} {% raw %}

class Vector_Scale[source]

Vector_Scale(n_classes=1, dirichlet=False) :: Module

Used to perform Vector Scaling (dirichlet=False) or Diagonal Dirichlet calibration (dirichlet=True)

{% endraw %} {% raw %}

class Matrix_Scale[source]

Matrix_Scale(n_classes=1, dirichlet=False) :: Module

Used to perform Matrix Scaling (dirichlet=False) or Dirichlet calibration (dirichlet=True)

{% endraw %} {% raw %}

get_calibrator[source]

get_calibrator(calibrator=None, n_classes=1, **kwargs)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs = 2
c_out = 3

t = torch.rand(bs, c_out)
for calibrator, cal_name in zip(['temp', 'vector', 'matrix'], ['Temp_Scale', 'Vector_Scale', 'Matrix_Scale']): 
    cal = get_calibrator(calibrator, n_classes=c_out)
#     print(calibrator)
#     print(cal.weight, cal.bias, '\n')
    test_eq(cal(t), t)
    test_eq(cal.__class__.__name__, cal_name)
for calibrator, cal_name in zip(['dtemp', 'dvector', 'dmatrix'], ['Temp_Scale', 'Vector_Scale', 'Matrix_Scale']):
    cal = get_calibrator(calibrator, n_classes=c_out)
#     print(calibrator)
#     print(cal.weight, cal.bias, '\n')
    test_eq(cal(t), F.log_softmax(t, dim=1))
    test_eq(cal.__class__.__name__, cal_name)
{% endraw %} {% raw %}
bs = 2
c_out = 3

t = torch.rand(bs, c_out)

test_eq(Temp_Scale()(t).shape, t.shape)
test_eq(Vector_Scale(c_out)(t).shape, t.shape)
test_eq(Matrix_Scale(c_out)(t).shape, t.shape)
test_eq(Temp_Scale(dirichlet=True)(t).shape, t.shape)
test_eq(Vector_Scale(c_out, dirichlet=True)(t).shape, t.shape)
test_eq(Matrix_Scale(c_out, dirichlet=True)(t).shape, t.shape)

test_eq(Temp_Scale()(t), t)
test_eq(Vector_Scale(c_out)(t), t)
test_eq(Matrix_Scale(c_out)(t), t)
{% endraw %} {% raw %}
bs = 2
c_out = 5

t = torch.rand(bs, c_out)
test_eq(Vector_Scale(c_out)(t), t)
test_eq(Vector_Scale(c_out).weight.data, torch.ones(c_out))
test_eq(Vector_Scale(c_out).weight.requires_grad, True)
test_eq(type(Vector_Scale(c_out).weight), torch.nn.parameter.Parameter)
{% endraw %} {% raw %}
bs = 2
c_out = 3
weight = 2
bias = 1

t = torch.rand(bs, c_out)
test_eq(Matrix_Scale(c_out)(t).shape, t.shape)
test_eq(Matrix_Scale(c_out).weight.requires_grad, True)
test_eq(type(Matrix_Scale(c_out).weight), torch.nn.parameter.Parameter)
{% endraw %} {% raw %}

class LogitAdjustmentLayer[source]

LogitAdjustmentLayer(class_priors) :: Module

Logit Adjustment for imbalanced datasets

{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs, n_classes = 16, 3
class_priors = torch.rand(n_classes)
logits = torch.randn(bs, n_classes) * 2
test_eq(LogitAdjLayer(class_priors)(logits), logits + class_priors)
{% endraw %} {% raw %}

class PPV[source]

PPV(dim=-1) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}

class PPAuc[source]

PPAuc(dim=-1) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}

class MaxPPVPool1d[source]

MaxPPVPool1d() :: Module

Drop-in replacement for AdaptiveConcatPool1d - multiplies nf by 2

{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs = 2
nf = 5
sl = 4

t = torch.rand(bs, nf, sl)
test_eq(MaxPPVPool1d()(t).shape, (bs, nf*2, 1))
test_eq(MaxPPVPool1d()(t).shape, AdaptiveConcatPool1d(1)(t).shape)
{% endraw %} {% raw %}

class AdaptiveWeightedAvgPool1d[source]

AdaptiveWeightedAvgPool1d(n_in, seq_len, mult=2, n_layers=2, ln=False, dropout=0.5, act=ReLU(), zero_init=True) :: Module

Global Pooling layer that performs a weighted average along the temporal axis

It can be considered as a channel-wise form of local temporal attention. Inspired by the paper: Hyun, J., Seong, H., & Kim, E. (2019). Universal Pooling--A New Pooling Method for Convolutional Neural Networks. arXiv preprint arXiv:1907.11440.

{% endraw %} {% raw %}
{% endraw %} {% raw %}

class GAP1d[source]

GAP1d(output_size=1) :: Module

Global Adaptive Pooling + Flatten

{% endraw %} {% raw %}

class GACP1d[source]

GACP1d(output_size=1) :: Module

Global AdaptiveConcatPool + Flatten

{% endraw %} {% raw %}

class GAWP1d[source]

GAWP1d(n_in, seq_len, n_layers=2, ln=False, dropout=0.5, act=ReLU(), zero_init=False) :: Module

Global AdaptiveWeightedAvgPool1d + Flatten

{% endraw %} {% raw %}
{% endraw %} {% raw %}

class GlobalWeightedAveragePool1d[source]

GlobalWeightedAveragePool1d(n_in, seq_len) :: Module

Global Weighted Average Pooling layer

Inspired by Building Efficient CNN Architecture for Offline Handwritten Chinese Character Recognition https://arxiv.org/pdf/1804.01259.pdf

{% endraw %} {% raw %}

gwa_pool_head[source]

gwa_pool_head(n_in, c_out, seq_len, bn=True, fc_dropout=0.0)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
t = torch.randn(16, 64, 50)
head = gwa_pool_head(64, 5, 50)
test_eq(head(t).shape, (16, 5))
{% endraw %} {% raw %}

class AttentionalPool1d[source]

AttentionalPool1d(n_in, c_out, bn=False) :: Module

Global Adaptive Pooling layer inspired by Attentional Pooling for Action Recognition https://arxiv.org/abs/1711.01467

{% endraw %} {% raw %}

class GAttP1d[source]

GAttP1d(n_in, c_out, bn=False) :: Sequential

A sequential container. Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of Sequential accepts any input and forwards it to the first module it contains. It then "chains" outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

The value a Sequential provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the Sequential applies to each of the modules it stores (which are each a registered submodule of the Sequential).

What's the difference between a Sequential and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like--a list for storing Module s! On the other hand, the layers in a Sequential are connected in a cascading way.

Example::

# Using Sequential to create a small model. When `model` is run,
# input will first be passed to `Conv2d(1,20,5)`. The output of
# `Conv2d(1,20,5)` will be used as the input to the first
# `ReLU`; the output of the first `ReLU` will become the input
# for `Conv2d(20,64,5)`. Finally, the output of
# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
model = nn.Sequential(
          nn.Conv2d(1,20,5),
          nn.ReLU(),
          nn.Conv2d(20,64,5),
          nn.ReLU()
        )

# Using Sequential with OrderedDict. This is functionally the
# same as the above code
model = nn.Sequential(OrderedDict([
          ('conv1', nn.Conv2d(1,20,5)),
          ('relu1', nn.ReLU()),
          ('conv2', nn.Conv2d(20,64,5)),
          ('relu2', nn.ReLU())
        ]))
{% endraw %} {% raw %}

attentional_pool_head[source]

attentional_pool_head(n_in, c_out, seq_len=None, bn=True, **kwargs)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs, c_in, seq_len = 16, 1, 50
c_out = 3
t = torch.rand(bs, c_in, seq_len)
test_eq(GAP1d()(t).shape, (bs, c_in))
test_eq(GACP1d()(t).shape, (bs, c_in*2))
bs, c_in, seq_len = 16, 4, 50
t = torch.rand(bs, c_in, seq_len)
test_eq(GAP1d()(t).shape, (bs, c_in))
test_eq(GACP1d()(t).shape, (bs, c_in*2))
test_eq(GAWP1d(c_in, seq_len, n_layers=2, ln=False, dropout=0.5, act=nn.ReLU(), zero_init=False)(t).shape, (bs, c_in))
test_eq(GAWP1d(c_in, seq_len, n_layers=2, ln=False, dropout=0.5, act=nn.ReLU(), zero_init=False)(t).shape, (bs, c_in))
test_eq(GAWP1d(c_in, seq_len, n_layers=1, ln=False, dropout=0.5, zero_init=False)(t).shape, (bs, c_in))
test_eq(GAWP1d(c_in, seq_len, n_layers=1, ln=False, dropout=0.5, zero_init=True)(t).shape, (bs, c_in))
test_eq(AttentionalPool1d(c_in, c_out)(t).shape, (bs, c_out, 1))
{% endraw %} {% raw %}
bs, c_in, seq_len = 16, 128, 50
c_out = 14
t = torch.rand(bs, c_in, seq_len)
attp = attentional_pool_head(c_in, c_out)
test_eq(attp(t).shape, (bs, c_out))
{% endraw %} {% raw %}

create_pool_head[source]

create_pool_head(n_in, c_out, seq_len=None, concat_pool=False, fc_dropout=0.0, bn=False, y_range=None, **kwargs)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs = 16
nf = 12
c_out = 2
seq_len = 20
t = torch.rand(bs, nf, seq_len)
test_eq(create_pool_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))
test_eq(create_pool_head(nf, c_out, seq_len, concat_pool=True, fc_dropout=0.5)(t).shape, (bs, c_out))
create_pool_head(nf, c_out, seq_len, concat_pool=True, bn=True, fc_dropout=.5)
Sequential(
  (0): GACP1d(
    (gacp): AdaptiveConcatPool1d(
      (ap): AdaptiveAvgPool1d(output_size=1)
      (mp): AdaptiveMaxPool1d(output_size=1)
    )
    (flatten): Flatten(full=False)
  )
  (1): LinBnDrop(
    (0): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (1): Dropout(p=0.5, inplace=False)
    (2): Linear(in_features=24, out_features=2, bias=False)
  )
)
{% endraw %} {% raw %}

max_pool_head[source]

max_pool_head(n_in, c_out, seq_len, fc_dropout=0.0, bn=False, y_range=None, **kwargs)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs = 16
nf = 12
c_out = 2
seq_len = 20
t = torch.rand(bs, nf, seq_len)
test_eq(max_pool_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))
{% endraw %} {% raw %}

create_pool_plus_head[source]

create_pool_plus_head(*args, lin_ftrs=None, fc_dropout=0.0, concat_pool=True, bn_final=False, lin_first=False, y_range=None)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs = 16
nf = 12
c_out = 2
seq_len = 20
t = torch.rand(bs, nf, seq_len)
test_eq(create_pool_plus_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))
test_eq(create_pool_plus_head(nf, c_out, concat_pool=True, fc_dropout=0.5)(t).shape, (bs, c_out))
create_pool_plus_head(nf, c_out, seq_len, fc_dropout=0.5)
Sequential(
  (0): AdaptiveConcatPool1d(
    (ap): AdaptiveAvgPool1d(output_size=1)
    (mp): AdaptiveMaxPool1d(output_size=1)
  )
  (1): Flatten(full=False)
  (2): BatchNorm1d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (3): Dropout(p=0.25, inplace=False)
  (4): Linear(in_features=24, out_features=512, bias=False)
  (5): ReLU(inplace=True)
  (6): BatchNorm1d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (7): Dropout(p=0.5, inplace=False)
  (8): Linear(in_features=512, out_features=2, bias=False)
)
{% endraw %} {% raw %}

create_conv_head[source]

create_conv_head(*args, adaptive_size=None, y_range=None)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs = 16
nf = 12
c_out = 2
seq_len = 20
t = torch.rand(bs, nf, seq_len)
test_eq(create_conv_head(nf, c_out, seq_len)(t).shape, (bs, c_out))
test_eq(create_conv_head(nf, c_out, adaptive_size=50)(t).shape, (bs, c_out))
create_conv_head(nf, c_out, 50)
Sequential(
  (0): ConvBlock(
    (0): Conv1d(12, 6, kernel_size=(1,), stride=(1,), bias=False)
    (1): BatchNorm1d(6, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): ReLU()
  )
  (1): ConvBlock(
    (0): Conv1d(6, 3, kernel_size=(1,), stride=(1,), bias=False)
    (1): BatchNorm1d(3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): ReLU()
  )
  (2): ConvBlock(
    (0): Conv1d(3, 2, kernel_size=(1,), stride=(1,), bias=False)
    (1): BatchNorm1d(2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): ReLU()
  )
  (3): GAP1d(
    (gap): AdaptiveAvgPool1d(output_size=1)
    (flatten): Flatten(full=False)
  )
)
{% endraw %} {% raw %}

create_mlp_head[source]

create_mlp_head(nf, c_out, seq_len=None, flatten=True, fc_dropout=0.0, bn=False, y_range=None)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs = 16
nf = 12
c_out = 2
seq_len = 20
t = torch.rand(bs, nf, seq_len)
test_eq(create_mlp_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))
t = torch.rand(bs, nf, seq_len)
create_mlp_head(nf, c_out, seq_len, bn=True, fc_dropout=.5)
Sequential(
  (0): Flatten(full=False)
  (1): LinBnDrop(
    (0): BatchNorm1d(240, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (1): Dropout(p=0.5, inplace=False)
    (2): Linear(in_features=240, out_features=2, bias=False)
  )
)
{% endraw %} {% raw %}

create_fc_head[source]

create_fc_head(nf, c_out, seq_len=None, flatten=True, lin_ftrs=None, y_range=None, fc_dropout=0.0, bn=False, bn_final=False, act=ReLU(inplace=True))

{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs = 16
nf = 12
c_out = 2
seq_len = 20
t = torch.rand(bs, nf, seq_len)
test_eq(create_fc_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))
create_mlp_head(nf, c_out, seq_len, bn=True, fc_dropout=.5)
Sequential(
  (0): Flatten(full=False)
  (1): LinBnDrop(
    (0): BatchNorm1d(240, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (1): Dropout(p=0.5, inplace=False)
    (2): Linear(in_features=240, out_features=2, bias=False)
  )
)
{% endraw %} {% raw %}

create_rnn_head[source]

create_rnn_head(*args, fc_dropout=0.0, bn=False, y_range=None)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs = 16
nf = 12
c_out = 2
seq_len = 20
t = torch.rand(bs, nf, seq_len)
test_eq(create_rnn_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))
create_rnn_head(nf, c_out, seq_len, bn=True, fc_dropout=.5)
Sequential(
  (0): LastStep()
  (1): LinBnDrop(
    (0): BatchNorm1d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (1): Dropout(p=0.5, inplace=False)
    (2): Linear(in_features=12, out_features=2, bias=False)
  )
)
{% endraw %} {% raw %}

imputation_head[source]

imputation_head(c_in, c_out, seq_len=None, ks=1, y_range=None, fc_dropout=0.0)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs = 16
nf = 12
ni = 2
seq_len = 20
t = torch.rand(bs, nf, seq_len)
head = imputation_head(nf, ni, seq_len=None, ks=1, y_range=None, fc_dropout=0.)
test_eq(head(t).shape, (bs, ni, seq_len))
head = imputation_head(nf, ni, seq_len=None, ks=1, y_range=(.3,.7), fc_dropout=0.)
test_ge(head(t).min(), .3)
test_le(head(t).max(), .7)
y_range = (tensor([0.1000, 0.1000, 0.1000, 0.1000, 0.2000, 0.2000, 0.2000, 0.2000, 0.3000,
                   0.3000, 0.3000, 0.3000]),
           tensor([0.6000, 0.6000, 0.6000, 0.6000, 0.7000, 0.7000, 0.7000, 0.7000, 0.8000,
                   0.8000, 0.8000, 0.8000]))
test_ge(head(t).min(), .1)
test_le(head(t).max(), .9)
head = imputation_head(nf, ni, seq_len=None, ks=1, y_range=y_range, fc_dropout=0.)
head
Sequential(
  (0): Dropout(p=0.0, inplace=False)
  (1): Conv1d(12, 2, kernel_size=(1,), stride=(1,))
  (2): SigmoidRange(low=tensor([0.1000, 0.1000, 0.1000, 0.1000, 0.2000, 0.2000, 0.2000, 0.2000, 0.3000,
          0.3000, 0.3000, 0.3000]), high=tensor([0.6000, 0.6000, 0.6000, 0.6000, 0.7000, 0.7000, 0.7000, 0.7000, 0.8000,
          0.8000, 0.8000, 0.8000]))
)
{% endraw %} {% raw %}

class create_conv_lin_3d_head[source]

create_conv_lin_3d_head(n_in, n_out, seq_len, d=(), conv_first=True, conv_bn=True, lin_first=False, lin_bn=True, act=None, fc_dropout=0.0, **kwargs) :: Sequential

Module to create a 3d output head

{% endraw %} {% raw %}
{% endraw %} {% raw %}
t = torch.randn(16, 3, 50)
head = conv_lin_3d_head(3, 20, 50, (4,5))
test_eq(head(t).shape, (16, 4, 5))
head = conv_lin_3d_head(3, 20, 50, (2, 10))
test_eq(head(t).shape, (16, 2, 10))
head
create_conv_lin_3d_head(
  (0): BatchNorm1d(3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (1): Conv1d(3, 2, kernel_size=(1,), stride=(1,), bias=False)
  (2): Transpose(-1, -2)
  (3): BatchNorm1d(50, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (4): Transpose(-1, -2)
  (5): Linear(in_features=50, out_features=10, bias=False)
)
{% endraw %} {% raw %}

class create_lin_3d_head[source]

create_lin_3d_head(n_in, n_out, seq_len, d=(), lin_first=False, bn=True, act=None, fc_dropout=0.0) :: Sequential

Module to create a 3d output head with linear layers

{% endraw %} {% raw %}
{% endraw %} {% raw %}
t = torch.randn(16, 64, 50)
head = lin_3d_head(64, 10, 50, (5,2))
test_eq(head(t).shape, (16, 5, 2))
head = lin_3d_head(64, 5, 50, (5, 1))
test_eq(head(t).shape, (16, 5, 1))
head
create_lin_3d_head(
  (0): Flatten(full=False)
  (1): BatchNorm1d(3200, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
  (2): Linear(in_features=3200, out_features=5, bias=False)
  (3): Reshape(bs, 5, 1)
)
{% endraw %} {% raw %}

class create_conv_3d_head[source]

create_conv_3d_head(n_in, c_out, seq_len, d=(), lin_first=False, bn=True, act=None, fc_dropout=0.0) :: Sequential

Module to create a 3d output head with a convolutional layer

{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs = 16
c_out = 4
seq_len = 50
d = (2,50)
nf = 128
t = torch.rand(bs, nf, seq_len)
test_eq(conv_3d_head(nf, c_out, seq_len, d)(t).shape, (bs, *d))
{% endraw %} {% raw %}

universal_pool_head[source]

universal_pool_head(n_in, c_out, seq_len, mult=2, pool_n_layers=2, pool_ln=True, pool_dropout=0.5, pool_act=ReLU(), zero_init=True, bn=True, fc_dropout=0.0)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs, c_in, seq_len = 16, 128, 50
c_out = 14
t = torch.rand(bs, c_in, seq_len)
uph = universal_pool_head(c_in, c_out, seq_len)
test_eq(uph(t).shape, (bs, c_out))
uph = universal_pool_head(c_in, c_out, seq_len, 2)
test_eq(uph(t).shape, (bs, c_out))
{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs, c_in, seq_len = 16, 128, 50
c_out = 14
d = (7, 2)
t = torch.rand(bs, c_in, seq_len)
for head in heads: 
    print(head.__name__)
    if head.__name__ == 'create_conv_3d_head': 
        test_eq(head(c_in, c_out, seq_len, (d[0], seq_len))(t).shape, (bs, *(d[0], seq_len)))
    elif '3d' in head.__name__: 
        test_eq(head(c_in, c_out, seq_len, d)(t).shape, (bs, *d))
    else: 
        test_eq(head(c_in, c_out, seq_len)(t).shape, (bs, c_out))
create_mlp_head
create_fc_head
average_pool_head
max_pool_head
concat_pool_head
create_pool_plus_head
create_conv_head
create_rnn_head
create_conv_lin_3d_head
create_lin_3d_head
create_conv_3d_head
attentional_pool_head
universal_pool_head
gwa_pool_head
{% endraw %} {% raw %}

class SqueezeExciteBlock[source]

SqueezeExciteBlock(ni, reduction=16) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}
{% endraw %} {% raw %}
bs = 2
ni = 32
sl = 4
t = torch.rand(bs, ni, sl)
test_eq(SqueezeExciteBlock(ni)(t).shape, (bs, ni, sl))
{% endraw %} {% raw %}

class GaussianNoise[source]

GaussianNoise(sigma=0.1, is_relative_detach=True) :: Module

Gaussian noise regularizer.

Args: sigma (float, optional): relative standard deviation used to generate the noise. Relative means that it will be multiplied by the magnitude of the value your are adding the noise to. This means that sigma can be the same regardless of the scale of the vector. is_relative_detach (bool, optional): whether to detach the variable before computing the scale of the noise. If False then the scale of the noise won't be seen as a constant but something to optimize: this will bias the network to generate vectors with smaller values.

{% endraw %} {% raw %}
{% endraw %} {% raw %}
t = torch.ones(2,3,4)
test_ne(GaussianNoise()(t), t)
test_eq(GaussianNoise()(t).shape, t.shape)
t = torch.ones(2,3)
test_ne(GaussianNoise()(t), t)
test_eq(GaussianNoise()(t).shape, t.shape)
t = torch.ones(2)
test_ne(GaussianNoise()(t), t)
test_eq(GaussianNoise()(t).shape, t.shape)
{% endraw %} {% raw %}

gambler_loss[source]

gambler_loss(reward=2)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
model_output = torch.rand(16, 3)
targets = torch.randint(0, 2, (16,))
criterion = gambler_loss(2)
criterion(model_output, targets)
tensor(0.7438)
{% endraw %} {% raw %}

CrossEntropyLossOneHot[source]

CrossEntropyLossOneHot(output, target, **kwargs)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
output = torch.rand(16, 2)
target = torch.randint(0, 2, (16,))
CrossEntropyLossOneHot(output, target)
tensor(0.6686)
{% endraw %} {% raw %}
from tsai.data.transforms import OneHot
output = nn.Parameter(torch.rand(16, 2))
target = torch.randint(0, 2, (16,))
one_hot_target = OneHot()(target)
CrossEntropyLossOneHot(output, one_hot_target)
tensor(0.6826, grad_fn=<NllLossBackward>)
{% endraw %} {% raw %}
ttest_tensor(a, b)
tensor(-1.5827)
{% endraw %} {% raw %}

ttest_bin_loss[source]

ttest_bin_loss(output, target)

{% endraw %} {% raw %}

ttest_reg_loss[source]

ttest_reg_loss(output, target)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
for _ in range(100):
    output = torch.rand(256, 2)
    target = torch.randint(0, 2, (256,))
    test_close(ttest_bin_loss(output, target).item(), 
               ttest_ind(nn.Softmax(dim=-1)(output[:, 1])[target == 0], nn.Softmax(dim=-1)(output[:, 1])[target == 1], equal_var=False)[0], eps=1e-3)
{% endraw %} {% raw %}

class CenterLoss[source]

CenterLoss(c_out, logits_dim=None) :: Module

Code in Pytorch has been slightly modified from: https://github.com/KaiyangZhou/pytorch-center-loss/blob/master/center_loss.py Based on paper: Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.

Args: c_out (int): number of classes. logits_dim (int): dim 1 of the logits. By default same as c_out (for one hot encoded logits)

{% endraw %} {% raw %}

class CenterPlusLoss[source]

CenterPlusLoss(loss, c_out, λ=0.01, logits_dim=None) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}
{% endraw %} {% raw %}
c_in = 10
x = torch.rand(64, c_in).to(device=default_device())
x = F.softmax(x, dim=1)
label = x.max(dim=1).indices
CenterLoss(c_in)(x, label), CenterPlusLoss(LabelSmoothingCrossEntropyFlat(), c_in)(x, label)
(tensor(11.2121, grad_fn=<DivBackward0>),
 TensorBase(2.3675, grad_fn=<AliasBackward>))
{% endraw %} {% raw %}
CenterPlusLoss(LabelSmoothingCrossEntropyFlat(), c_in)
CenterPlusLoss(loss=FlattenedLoss of LabelSmoothingCrossEntropy(), c_out=10, λ=0.01)
{% endraw %} {% raw %}

class FocalLoss[source]

FocalLoss(gamma=0, eps=1e-07) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}
{% endraw %} {% raw %}
c_in = 10
x = torch.rand(64, c_in).to(device=default_device())
x = F.softmax(x, dim=1)
label = x.max(dim=1).indices
FocalLoss(c_in)(x, label)
TensorBase(0.7469)
{% endraw %} {% raw %}

class TweedieLoss[source]

TweedieLoss(p=1.5, eps=1e-10) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}
{% endraw %} {% raw %}
c_in = 10
output = torch.rand(64).to(device=default_device())
target = torch.rand(64).to(device=default_device())
TweedieLoss()(output, target)
tensor(3.5073)
{% endraw %} {% raw %}

class GEGLU[source]

GEGLU() :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}

class ReGLU[source]

ReGLU() :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}

class PositionwiseFeedForward[source]

PositionwiseFeedForward(dim, dropout=0.0, act='reglu', mlp_ratio=1) :: Sequential

A sequential container. Modules will be added to it in the order they are passed in the constructor. Alternatively, an OrderedDict of modules can be passed in. The forward() method of Sequential accepts any input and forwards it to the first module it contains. It then "chains" outputs to inputs sequentially for each subsequent module, finally returning the output of the last module.

The value a Sequential provides over manually calling a sequence of modules is that it allows treating the whole container as a single module, such that performing a transformation on the Sequential applies to each of the modules it stores (which are each a registered submodule of the Sequential).

What's the difference between a Sequential and a :class:torch.nn.ModuleList? A ModuleList is exactly what it sounds like--a list for storing Module s! On the other hand, the layers in a Sequential are connected in a cascading way.

Example::

# Using Sequential to create a small model. When `model` is run,
# input will first be passed to `Conv2d(1,20,5)`. The output of
# `Conv2d(1,20,5)` will be used as the input to the first
# `ReLU`; the output of the first `ReLU` will become the input
# for `Conv2d(20,64,5)`. Finally, the output of
# `Conv2d(20,64,5)` will be used as input to the second `ReLU`
model = nn.Sequential(
          nn.Conv2d(1,20,5),
          nn.ReLU(),
          nn.Conv2d(20,64,5),
          nn.ReLU()
        )

# Using Sequential with OrderedDict. This is functionally the
# same as the above code
model = nn.Sequential(OrderedDict([
          ('conv1', nn.Conv2d(1,20,5)),
          ('relu1', nn.ReLU()),
          ('conv2', nn.Conv2d(20,64,5)),
          ('relu2', nn.ReLU())
        ]))
{% endraw %} {% raw %}

class TokenLayer[source]

TokenLayer(token=True) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}
{% endraw %} {% raw %}

class ScaledDotProductAttention[source]

ScaledDotProductAttention(res_attention:bool=False) :: Module

Scaled Dot-Product Attention module (Vaswani et al., 2017) with optional residual attention from previous layer (He et al, 2020)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
B = 16
C = 10
M = 1500 # seq_len

n_heads = 1
D = 128 # model dimension
N = 512 # max_seq_len - latent's index dimension
d_k = D // n_heads

xb = torch.randn(B, C, M)
xb = (xb - xb.mean()) / xb.std()

# Attention
# input (Q)
lin = nn.Linear(M, N, bias=False)
Q = lin(xb).transpose(1,2)
test_eq(Q.shape, (B, N, C))

# q
to_q = nn.Linear(C, D, bias=False)
q = to_q(Q)
q = nn.LayerNorm(D)(q)

# k, v
context = xb.transpose(1,2)
to_kv = nn.Linear(C, D * 2, bias=False)
k, v = to_kv(context).chunk(2, dim = -1)
k = k.transpose(-1, -2)
k = nn.LayerNorm(M)(k)
v = nn.LayerNorm(D)(v)

test_eq(q.shape, (B, N, D))
test_eq(k.shape, (B, D, M))
test_eq(v.shape, (B, M, D))

output, attn, scores = ScaledDotProductAttention(res_attention=True)(q.unsqueeze(1), k.unsqueeze(1), v.unsqueeze(1))
test_eq(output.shape, (B, 1, N, D))
test_eq(attn.shape, (B, 1, N, M))
test_eq(scores.shape, (B, 1, N, M))
scores.mean(), scores.std()
(tensor(1.6329e-10, grad_fn=<MeanBackward0>),
 tensor(0.4958, grad_fn=<StdBackward0>))
{% endraw %} {% raw %}

class MultiheadAttention[source]

MultiheadAttention(d_model:int, n_heads:int, d_k:Optional[int]=None, d_v:Optional[int]=None, res_attention:bool=False, dropout:float=0.0, qkv_bias:bool=True) :: Module

Same as nn.Module, but no need for subclasses to call super().__init__

{% endraw %} {% raw %}
{% endraw %} {% raw %}
q = torch.rand([16, 3, 50, 8]) 
k = torch.rand([16, 3, 50, 8]).transpose(-1, -2)
v = torch.rand([16, 3, 50, 6])
attn_mask = torch.triu(torch.ones(50, 50)) # shape: q_len x q_len
key_padding_mask = torch.zeros(16, 50)
key_padding_mask[[1, 3, 6, 15], -10:] = 1
key_padding_mask = key_padding_mask.bool()
print('attn_mask', attn_mask.shape, 'key_padding_mask', key_padding_mask.shape)
output, attn = ScaledDotProductAttention()(q, k, v, attn_mask=attn_mask, key_padding_mask=key_padding_mask)
output.shape, attn.shape
attn_mask torch.Size([50, 50]) key_padding_mask torch.Size([16, 50])
(torch.Size([16, 3, 50, 6]), torch.Size([16, 3, 50, 50]))
{% endraw %} {% raw %}
t = torch.rand(16, 50, 128)
output, attn = MultiheadAttention(d_model=128, n_heads=3, d_k=8, d_v=6)(t, t, t, key_padding_mask=key_padding_mask, attn_mask=attn_mask)
output.shape, attn.shape
(torch.Size([16, 50, 128]), torch.Size([16, 3, 50, 50]))
{% endraw %} {% raw %}
t = torch.rand(16, 50, 128)
att_mask = (torch.rand((50, 50)) > .85).float()
att_mask[att_mask == 1] = -np.inf

mha = MultiheadAttention(d_model=128, n_heads=3, d_k=8, d_v=6)
output, attn = mha(t, t, t, attn_mask=att_mask)
test_eq(torch.isnan(output).sum().item(), 0)
test_eq(torch.isnan(attn).sum().item(), 0)
loss = output[:2, :].sum()
test_eq(torch.isnan(loss).sum().item(), 0)
loss.backward()
for n, p in mha.named_parameters(): test_eq(torch.isnan(p.grad).sum().item(), 0)
{% endraw %} {% raw %}
t = torch.rand(16, 50, 128)
attn_mask = (torch.rand((50, 50)) > .85)

# True values will be masked
mha = MultiheadAttention(d_model=128, n_heads=3, d_k=8, d_v=6)
output, attn = mha(t, t, t, attn_mask=att_mask)
test_eq(torch.isnan(output).sum().item(), 0)
test_eq(torch.isnan(attn).sum().item(), 0)
loss = output[:2, :].sum()
test_eq(torch.isnan(loss).sum().item(), 0)
loss.backward()
for n, p in mha.named_parameters(): test_eq(torch.isnan(p.grad).sum().item(), 0)
{% endraw %} {% raw %}

class MultiConcatConv1d[source]

MultiConcatConv1d(ni, nf, kss=[3, 5, 7], kernel_sizes=None, maxpool=True, stride=1) :: Module

Module that applies one or multiple kernels (and optionally maxpool)

{% endraw %} {% raw %}
{% endraw %} {% raw %}
t = torch.rand(16, 6, 37)
test_eq(MultiConcatConv1d(t.shape[1], 128, kernel_sizes=[3,5,7], maxpool=True)(t).shape, (t.shape[0], nf, t.shape[-1]))
test_eq(MultiConcatConv1d(t.shape[1], 128, kernel_sizes=[3,5,7], maxpool=True, stride=2)(t).shape, (t.shape[0], nf, math.ceil(t.shape[-1]/2)))
{% endraw %}