Skip to content

Layers

MultiScalePeakEmbedding(h_size, dropout=0)

Bases: Module

Multi-scale sinusoidal embedding based on Voronov et. al.

Source code in instanovo/transformer/layers.py
def __init__(self, h_size: int, dropout: float = 0) -> None:
    super().__init__()
    self.h_size = h_size

    self.mlp = nn.Sequential(
        nn.Linear(h_size, h_size),
        nn.ReLU(),
        nn.Dropout(dropout),
        nn.Linear(h_size, h_size),
        nn.Dropout(dropout),
    )

    self.head = nn.Sequential(
        nn.Linear(h_size + 1, h_size),
        nn.ReLU(),
        nn.Dropout(dropout),
        nn.Linear(h_size, h_size),
        nn.Dropout(dropout),
    )

    freqs = 2 * np.pi / torch.logspace(-2, -3, int(h_size / 2), dtype=torch.float64)
    self.register_buffer("freqs", freqs)

encode_mass(x)

Encode mz.

Source code in instanovo/transformer/layers.py
def encode_mass(self, x: Tensor) -> Tensor:
    """Encode mz."""
    x = self.freqs[None, None, :] * x
    x = torch.cat([torch.sin(x), torch.cos(x)], axis=2)
    return x.float()

forward(mz_values, intensities)

Encode peaks.

Source code in instanovo/transformer/layers.py
def forward(self, mz_values: Tensor, intensities: Tensor) -> Tensor:
    """Encode peaks."""
    x = self.encode_mass(mz_values)
    x = self.mlp(x)
    x = torch.cat([x, intensities], axis=2)
    return self.head(x)

PositionalEncoding(d_model, dropout=0.1, max_len=5000)

Bases: Module

Standard sinusoidal positional encoding.

Source code in instanovo/transformer/layers.py
def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000):
    super().__init__()
    self.dropout = nn.Dropout(p=dropout)

    position = torch.arange(max_len).unsqueeze(1)
    div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
    pe = torch.zeros(1, max_len, d_model)
    pe[0, :, 0::2] = torch.sin(position * div_term)
    pe[0, :, 1::2] = torch.cos(position * div_term)
    self.register_buffer("pe", pe)

forward(x)

Positional encoding forward pass.

Parameters:

Name Type Description Default
x Tensor

Tensor, shape [seq_len, batch_size, embedding_dim]

required
Source code in instanovo/transformer/layers.py
def forward(self, x: Tensor) -> Tensor:
    """Positional encoding forward pass.

    Arguments:
        x: Tensor, shape ``[seq_len, batch_size, embedding_dim]``
    """
    x = x + self.pe[:, : x.size(1)]
    return self.dropout(x)