Module flowcon.flows

Sub-modules

flowcon.flows.autoregressive

Implementations of autoregressive flows.

flowcon.flows.base

Basic definitions for the flows module.

flowcon.flows.realnvp

Implementations of Real NVP.

Classes

class Flow (transform, distribution, embedding_net=None)

Base class for all flow objects.

Constructor.

Args

transform
A Transform object, it transforms data into noise.
distribution
A AutoregressiveTransform object, the base distribution of the flow that generates the noise.
embedding_net
A nn.Module which has trainable parameters to encode the context (condition). It is trained jointly with the flow.
Expand source code
class Flow(Distribution):
    """Base class for all flow objects."""

    def __init__(self, transform, distribution, embedding_net=None):
        """Constructor.

        Args:
            transform: A `Transform` object, it transforms data into noise.
            distribution: A `AutoregressiveTransform` object, the base distribution of the flow that
                generates the noise.
            embedding_net: A `nn.Module` which has trainable parameters to encode the
                context (condition). It is trained jointly with the flow.
        """
        super().__init__()
        self._transform = transform
        self._distribution = distribution
        distribution_signature = signature(self._distribution.log_prob)
        distribution_arguments = distribution_signature.parameters.keys()
        self._context_used_in_base = 'context' in distribution_arguments
        if embedding_net is not None:
            assert isinstance(embedding_net, torch.nn.Module), (
                "embedding_net is not a nn.Module. "
                "If you want to use hard-coded summary features, "
                "please simply pass the encoded features and pass "
                "embedding_net=None"
            )
            self._embedding_net = embedding_net
        else:
            self._embedding_net = torch.nn.Identity()

    def _log_prob(self, inputs, context):
        embedded_context = self._embedding_net(context)
        noise, logabsdet = self._transform(inputs, context=embedded_context)
        if self._context_used_in_base:
            log_prob = self._distribution.log_prob(noise, context=embedded_context)
        else:
            log_prob = self._distribution.log_prob(noise)
        return log_prob + logabsdet

    def _sample(self, num_samples, context):
        embedded_context = self._embedding_net(context)
        if self._context_used_in_base:
            noise = self._distribution.sample(num_samples, context=embedded_context)
        else:
            repeat_noise = self._distribution.sample(num_samples * embedded_context.shape[0])
            noise = torch.reshape(
                repeat_noise,
                (embedded_context.shape[0], -1, repeat_noise.shape[1])
            )

        if embedded_context is not None:
            # Merge the context dimension with sample dimension in order to apply the transform.
            noise = torchutils.merge_leading_dims(noise, num_dims=2)
            embedded_context = torchutils.repeat_rows(
                embedded_context, num_reps=num_samples
            )

        samples, _ = self._transform.inverse(noise, context=embedded_context)

        if embedded_context is not None:
            # Split the context dimension from sample dimension.
            samples = torchutils.split_leading_dim(samples, shape=[-1, num_samples])

        return samples

    def sample_and_log_prob(self, num_samples, context=None):
        """Generates samples from the flow, together with their log probabilities.

        For flows, this is more efficient that calling `sample` and `log_prob` separately.
        """
        embedded_context = self._embedding_net(context)
        if self._context_used_in_base:
            noise, log_prob = self._distribution.sample_and_log_prob(
                num_samples, context=embedded_context
            )
        else:
            noise, log_prob = self._distribution.sample_and_log_prob(
                num_samples
            )

        if embedded_context is not None:
            # Merge the context dimension with sample dimension in order to apply the transform.
            noise = torchutils.merge_leading_dims(noise, num_dims=2)
            embedded_context = torchutils.repeat_rows(
                embedded_context, num_reps=num_samples
            )

        samples, logabsdet = self._transform.inverse(noise, context=embedded_context)

        if embedded_context is not None:
            # Split the context dimension from sample dimension.
            samples = torchutils.split_leading_dim(samples, shape=[-1, num_samples])
            logabsdet = torchutils.split_leading_dim(logabsdet, shape=[-1, num_samples])

        return samples, log_prob - logabsdet

    def transform_to_noise(self, inputs, context=None):
        """Transforms given data into noise. Useful for goodness-of-fit checking.

        Args:
            inputs: A `Tensor` of shape [batch_size, ...], the data to be transformed.
            context: A `Tensor` of shape [batch_size, ...] or None, optional context associated
                with the data.

        Returns:
            A `Tensor` of shape [batch_size, ...], the noise.
        """
        noise, _ = self._transform(inputs, context=self._embedding_net(context))
        return noise

Ancestors

Subclasses

Class variables

var call_super_init : bool
var dump_patches : bool
var training : bool

Methods

def sample_and_log_prob(self, num_samples, context=None)

Generates samples from the flow, together with their log probabilities.

For flows, this is more efficient that calling sample and log_prob separately.

def transform_to_noise(self, inputs, context=None)

Transforms given data into noise. Useful for goodness-of-fit checking.

Args

inputs
A Tensor of shape [batch_size, …], the data to be transformed.
context
A Tensor of shape [batch_size, …] or None, optional context associated with the data.

Returns

A Tensor of shape [batch_size, …], the noise.

Inherited members

class MaskedAutoregressiveFlow (features, hidden_features, num_layers, num_blocks_per_layer, use_residual_blocks=True, use_random_masks=False, use_random_permutations=False, activation=<function relu>, dropout_probability=0.0, batch_norm_within_layers=False, batch_norm_between_layers=False)

An autoregressive flow that uses affine transforms with masking.

Reference:

G. Papamakarios et al., Masked Autoregressive Flow for Density Estimation, Advances in Neural Information Processing Systems, 2017.

Constructor.

Args

transform
A Transform object, it transforms data into noise.
distribution
A AutoregressiveTransform object, the base distribution of the flow that generates the noise.
embedding_net
A nn.Module which has trainable parameters to encode the context (condition). It is trained jointly with the flow.
Expand source code
class MaskedAutoregressiveFlow(Flow):
    """An autoregressive flow that uses affine transforms with masking.

    Reference:
    > G. Papamakarios et al., Masked Autoregressive Flow for Density Estimation,
    > Advances in Neural Information Processing Systems, 2017.
    """

    def __init__(
        self,
        features,
        hidden_features,
        num_layers,
        num_blocks_per_layer,
        use_residual_blocks=True,
        use_random_masks=False,
        use_random_permutations=False,
        activation=F.relu,
        dropout_probability=0.0,
        batch_norm_within_layers=False,
        batch_norm_between_layers=False,
    ):

        if use_random_permutations:
            permutation_constructor = RandomPermutation
        else:
            permutation_constructor = ReversePermutation

        layers = []
        for _ in range(num_layers):
            layers.append(permutation_constructor(features))
            layers.append(
                MaskedAffineAutoregressiveTransform(
                    features=features,
                    hidden_features=hidden_features,
                    num_blocks=num_blocks_per_layer,
                    use_residual_blocks=use_residual_blocks,
                    random_mask=use_random_masks,
                    activation=activation,
                    dropout_probability=dropout_probability,
                    use_batch_norm=batch_norm_within_layers,
                )
            )
            if batch_norm_between_layers:
                layers.append(BatchNorm(features))

        super().__init__(
            transform=CompositeTransform(layers),
            distribution=StandardNormal([features]),
        )

Ancestors

Class variables

var call_super_init : bool
var dump_patches : bool
var training : bool

Inherited members

class SimpleRealNVP (features, hidden_features, num_layers, num_blocks_per_layer, use_volume_preserving=False, activation=<function relu>, dropout_probability=0.0, batch_norm_within_layers=False, batch_norm_between_layers=False)

An simplified version of Real NVP for 1-dim inputs.

This implementation uses 1-dim checkerboard masking but doesn't use multi-scaling.

Reference:

L. Dinh et al., Density estimation using Real NVP, ICLR 2017.

Constructor.

Args

transform
A Transform object, it transforms data into noise.
distribution
A AutoregressiveTransform object, the base distribution of the flow that generates the noise.
embedding_net
A nn.Module which has trainable parameters to encode the context (condition). It is trained jointly with the flow.
Expand source code
class SimpleRealNVP(Flow):
    """An simplified version of Real NVP for 1-dim inputs.

    This implementation uses 1-dim checkerboard masking but doesn't use multi-scaling.

    Reference:
    > L. Dinh et al., Density estimation using Real NVP, ICLR 2017.
    """

    def __init__(
        self,
        features,
        hidden_features,
        num_layers,
        num_blocks_per_layer,
        use_volume_preserving=False,
        activation=F.relu,
        dropout_probability=0.0,
        batch_norm_within_layers=False,
        batch_norm_between_layers=False,
    ):

        if use_volume_preserving:
            coupling_constructor = AdditiveCouplingTransform
        else:
            coupling_constructor = AffineCouplingTransform

        mask = torch.ones(features)
        mask[::2] = -1

        def create_resnet(in_features, out_features):
            return nets.ResidualNet(
                in_features,
                out_features,
                hidden_features=hidden_features,
                num_blocks=num_blocks_per_layer,
                activation=activation,
                dropout_probability=dropout_probability,
                use_batch_norm=batch_norm_within_layers,
            )

        layers = []
        for _ in range(num_layers):
            transform = coupling_constructor(
                mask=mask, transform_net_create_fn=create_resnet
            )
            layers.append(transform)
            mask *= -1
            if batch_norm_between_layers:
                layers.append(BatchNorm(features=features))

        super().__init__(
            transform=CompositeTransform(layers),
            distribution=StandardNormal([features]),
        )

Ancestors

Class variables

var call_super_init : bool
var dump_patches : bool
var training : bool

Inherited members