Module hummingbird._container

All custom model containers are listed here.

Expand source code
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------

"""
All custom model containers are listed here.
"""

import numpy as np
import torch


class PyTorchBackendModel(torch.nn.Module):
    """
    Container for a model compiled into PyTorch.
    """

    def __init__(self, input_names, output_names, operator_map, topology, extra_config):
        """
        Args:
            input_names: The names of the input `onnxconverter_common.topology.Variable`s for this model
            output_names: The names of the output `onnxconverter_common.topology.Variable`s generated by this model
            operator_map: A dictionary of operator aliases and related PyTorch implementations
            topology: A `onnxconverter_common.topology.Topology` object representing the model graph
            extra_config: Some additional custom configuration parameter
        """
        super(PyTorchBackendModel, self).__init__()
        self.input_names = input_names
        self.output_names = output_names
        self.operator_map = torch.nn.ModuleDict(operator_map)
        self.operators = list(topology.topological_operator_iterator())
        self.extra_config = extra_config
        self.is_regression = self.operator_map[self.operators[-1].full_name].regression

    def forward(self, *inputs):
        with torch.no_grad():
            inputs = [*inputs]
            variable_map = {}
            device = next(self.parameters()).device  # Assuming we are using a single device for all parameters

            # Maps data inputs to the expected variables.
            for i, input_name in enumerate(self.input_names):
                if type(inputs[i]) is np.ndarray:
                    inputs[i] = torch.from_numpy(inputs[i])
                elif type(inputs[i]) is not torch.Tensor:
                    raise RuntimeError("Inputer tensor {} of not supported type {}".format(input_name, type(inputs[i])))
                if device is not None:
                    inputs[i] = inputs[i].to(device)
                variable_map[input_name] = inputs[i]

            # Evaluate all the operators in the topology by properly wiring inputs \ outputs
            for operator in self.operators:
                pytorch_op = self.operator_map[operator.full_name]
                pytorch_outputs = pytorch_op(*(variable_map[input] for input in operator.input_full_names))

                if len(operator.output_full_names) == 1:
                    variable_map[operator.output_full_names[0]] = pytorch_outputs
                else:
                    for i, output in enumerate(operator.output_full_names):
                        variable_map[output] = pytorch_outputs[i]

            # Prepare and return the output.
            if len(self.output_names) == 1:
                return variable_map[self.output_names[0]]
            else:
                return list(variable_map[output_name] for output_name in self.output_names)

    def predict(self, *inputs):
        """
        Utility functions used to emulate the behavior of the Sklearn API.
        On regression returns the predicted values.
        On classification tasks returns the predicted class labels for the input data.
        """
        if self.is_regression:
            return self.forward(*inputs).cpu().numpy().flatten()
        else:
            return self.forward(*inputs)[0].cpu().numpy()

    def predict_proba(self, *inputs):
        """
        Utility functions used to emulate the behavior of the Sklearn API.
        On regression a call to this method returns a `RuntimeError`.
        On classification tasks returns the probability estimates.
        """
        if self.is_regression:
            raise RuntimeError("Predict_proba not available for regression tasks.")
        else:
            return self.forward(*inputs)[1].cpu().numpy()

Classes

class PyTorchBackendModel (input_names, output_names, operator_map, topology, extra_config)

Container for a model compiled into PyTorch.

Args

input_names
The names of the input onnxconverter_common.topology.Variables for this model
output_names
The names of the output onnxconverter_common.topology.Variables generated by this model
operator_map
A dictionary of operator aliases and related PyTorch implementations
topology
A onnxconverter_common.topology.Topology object representing the model graph
extra_config
Some additional custom configuration parameter
Expand source code
class PyTorchBackendModel(torch.nn.Module):
    """
    Container for a model compiled into PyTorch.
    """

    def __init__(self, input_names, output_names, operator_map, topology, extra_config):
        """
        Args:
            input_names: The names of the input `onnxconverter_common.topology.Variable`s for this model
            output_names: The names of the output `onnxconverter_common.topology.Variable`s generated by this model
            operator_map: A dictionary of operator aliases and related PyTorch implementations
            topology: A `onnxconverter_common.topology.Topology` object representing the model graph
            extra_config: Some additional custom configuration parameter
        """
        super(PyTorchBackendModel, self).__init__()
        self.input_names = input_names
        self.output_names = output_names
        self.operator_map = torch.nn.ModuleDict(operator_map)
        self.operators = list(topology.topological_operator_iterator())
        self.extra_config = extra_config
        self.is_regression = self.operator_map[self.operators[-1].full_name].regression

    def forward(self, *inputs):
        with torch.no_grad():
            inputs = [*inputs]
            variable_map = {}
            device = next(self.parameters()).device  # Assuming we are using a single device for all parameters

            # Maps data inputs to the expected variables.
            for i, input_name in enumerate(self.input_names):
                if type(inputs[i]) is np.ndarray:
                    inputs[i] = torch.from_numpy(inputs[i])
                elif type(inputs[i]) is not torch.Tensor:
                    raise RuntimeError("Inputer tensor {} of not supported type {}".format(input_name, type(inputs[i])))
                if device is not None:
                    inputs[i] = inputs[i].to(device)
                variable_map[input_name] = inputs[i]

            # Evaluate all the operators in the topology by properly wiring inputs \ outputs
            for operator in self.operators:
                pytorch_op = self.operator_map[operator.full_name]
                pytorch_outputs = pytorch_op(*(variable_map[input] for input in operator.input_full_names))

                if len(operator.output_full_names) == 1:
                    variable_map[operator.output_full_names[0]] = pytorch_outputs
                else:
                    for i, output in enumerate(operator.output_full_names):
                        variable_map[output] = pytorch_outputs[i]

            # Prepare and return the output.
            if len(self.output_names) == 1:
                return variable_map[self.output_names[0]]
            else:
                return list(variable_map[output_name] for output_name in self.output_names)

    def predict(self, *inputs):
        """
        Utility functions used to emulate the behavior of the Sklearn API.
        On regression returns the predicted values.
        On classification tasks returns the predicted class labels for the input data.
        """
        if self.is_regression:
            return self.forward(*inputs).cpu().numpy().flatten()
        else:
            return self.forward(*inputs)[0].cpu().numpy()

    def predict_proba(self, *inputs):
        """
        Utility functions used to emulate the behavior of the Sklearn API.
        On regression a call to this method returns a `RuntimeError`.
        On classification tasks returns the probability estimates.
        """
        if self.is_regression:
            raise RuntimeError("Predict_proba not available for regression tasks.")
        else:
            return self.forward(*inputs)[1].cpu().numpy()

Ancestors

  • torch.nn.modules.module.Module

Methods

def forward(self, *inputs)

Defines the computation performed at every call.

Should be overridden by all subclasses.

Note

Although the recipe for forward pass needs to be defined within this function, one should call the :class:Module instance afterwards instead of this since the former takes care of running the registered hooks while the latter silently ignores them.

Expand source code
def forward(self, *inputs):
    with torch.no_grad():
        inputs = [*inputs]
        variable_map = {}
        device = next(self.parameters()).device  # Assuming we are using a single device for all parameters

        # Maps data inputs to the expected variables.
        for i, input_name in enumerate(self.input_names):
            if type(inputs[i]) is np.ndarray:
                inputs[i] = torch.from_numpy(inputs[i])
            elif type(inputs[i]) is not torch.Tensor:
                raise RuntimeError("Inputer tensor {} of not supported type {}".format(input_name, type(inputs[i])))
            if device is not None:
                inputs[i] = inputs[i].to(device)
            variable_map[input_name] = inputs[i]

        # Evaluate all the operators in the topology by properly wiring inputs \ outputs
        for operator in self.operators:
            pytorch_op = self.operator_map[operator.full_name]
            pytorch_outputs = pytorch_op(*(variable_map[input] for input in operator.input_full_names))

            if len(operator.output_full_names) == 1:
                variable_map[operator.output_full_names[0]] = pytorch_outputs
            else:
                for i, output in enumerate(operator.output_full_names):
                    variable_map[output] = pytorch_outputs[i]

        # Prepare and return the output.
        if len(self.output_names) == 1:
            return variable_map[self.output_names[0]]
        else:
            return list(variable_map[output_name] for output_name in self.output_names)
def predict(self, *inputs)

Utility functions used to emulate the behavior of the Sklearn API. On regression returns the predicted values. On classification tasks returns the predicted class labels for the input data.

Expand source code
def predict(self, *inputs):
    """
    Utility functions used to emulate the behavior of the Sklearn API.
    On regression returns the predicted values.
    On classification tasks returns the predicted class labels for the input data.
    """
    if self.is_regression:
        return self.forward(*inputs).cpu().numpy().flatten()
    else:
        return self.forward(*inputs)[0].cpu().numpy()
def predict_proba(self, *inputs)

Utility functions used to emulate the behavior of the Sklearn API. On regression a call to this method returns a RuntimeError. On classification tasks returns the probability estimates.

Expand source code
def predict_proba(self, *inputs):
    """
    Utility functions used to emulate the behavior of the Sklearn API.
    On regression a call to this method returns a `RuntimeError`.
    On classification tasks returns the probability estimates.
    """
    if self.is_regression:
        raise RuntimeError("Predict_proba not available for regression tasks.")
    else:
        return self.forward(*inputs)[1].cpu().numpy()