Module flowcon.transforms.autoregressive.autoregressive
Implementations of autoregressive transforms.
Functions
def main()
Classes
class AutoregressiveTransform (autoregressive_net)
-
Transforms each input variable with an invertible elementwise transformation.
The parameters of each invertible elementwise transformation can be functions of previous input variables, but they must not depend on the current or any following input variables.
NOTE: Calculating the inverse transform is D times slower than calculating the forward transform, where D is the dimensionality of the input to the transform.
Initialize internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class AutoregressiveTransform(Transform): """Transforms each input variable with an invertible elementwise transformation. The parameters of each invertible elementwise transformation can be functions of previous input variables, but they must not depend on the current or any following input variables. NOTE: Calculating the inverse transform is D times slower than calculating the forward transform, where D is the dimensionality of the input to the transform. """ def __init__(self, autoregressive_net): super(AutoregressiveTransform, self).__init__() self.autoregressive_net = autoregressive_net def forward(self, inputs, context=None): autoregressive_params = self.autoregressive_net(inputs, context) outputs, logabsdet = self._elementwise_forward(inputs, autoregressive_params) return outputs, logabsdet def inverse(self, inputs, context=None): num_inputs = int(np.prod(inputs.shape[1:])) outputs = torch.zeros_like(inputs) logabsdet = None for _ in range(num_inputs): autoregressive_params = self.autoregressive_net(outputs, context) outputs, logabsdet = self._elementwise_inverse( inputs, autoregressive_params ) return outputs, logabsdet def _output_dim_multiplier(self): raise NotImplementedError() def _elementwise_forward(self, inputs, autoregressive_params): raise NotImplementedError() def _elementwise_inverse(self, inputs, autoregressive_params): raise NotImplementedError()
Ancestors
- Transform
- torch.nn.modules.module.Module
Subclasses
- MaskedAffineAutoregressiveTransform
- MaskedPiecewiseCubicAutoregressiveTransform
- MaskedPiecewiseLinearAutoregressiveTransform
- MaskedPiecewiseQuadraticAutoregressiveTransform
- MaskedPiecewiseRationalQuadraticAutoregressiveTransform
- MaskedShiftAutoregressiveTransform
- MaskedSumOfSigmoidsTransform
- MaskedUMNNAutoregressiveTransform
- MaskedDeepSigmoidTransform
Class variables
var call_super_init : bool
var dump_patches : bool
var training : bool
Methods
def inverse(self, inputs, context=None)
Inherited members
class MaskedAffineAutoregressiveTransform (features, hidden_features, context_features=None, num_blocks=2, use_residual_blocks=True, random_mask=False, activation=<function relu>, dropout_probability=0.0, use_batch_norm=False)
-
Transforms each input variable with an invertible elementwise transformation.
The parameters of each invertible elementwise transformation can be functions of previous input variables, but they must not depend on the current or any following input variables.
NOTE: Calculating the inverse transform is D times slower than calculating the forward transform, where D is the dimensionality of the input to the transform.
Initialize internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class MaskedAffineAutoregressiveTransform(AutoregressiveTransform): def __init__( self, features, hidden_features, context_features=None, num_blocks=2, use_residual_blocks=True, random_mask=False, activation=F.relu, dropout_probability=0.0, use_batch_norm=False, ): self.features = features made = made_module.MADE( features=features, hidden_features=hidden_features, context_features=context_features, num_blocks=num_blocks, output_multiplier=self._output_dim_multiplier(), use_residual_blocks=use_residual_blocks, random_mask=random_mask, activation=activation, dropout_probability=dropout_probability, use_batch_norm=use_batch_norm, ) self._epsilon = 1e-3 super(MaskedAffineAutoregressiveTransform, self).__init__(made) def _output_dim_multiplier(self): return 2 def _elementwise_forward(self, inputs, autoregressive_params): unconstrained_scale, shift = self._unconstrained_scale_and_shift( autoregressive_params ) # scale = torch.sigmoid(unconstrained_scale + 2.0) + self._epsilon scale = F.softplus(unconstrained_scale) + self._epsilon log_scale = torch.log(scale) outputs = scale * inputs + shift logabsdet = torchutils.sum_except_batch(log_scale, num_batch_dims=1) return outputs, logabsdet def _elementwise_inverse(self, inputs, autoregressive_params): unconstrained_scale, shift = self._unconstrained_scale_and_shift( autoregressive_params ) # scale = torch.sigmoid(unconstrained_scale + 2.0) + self._epsilon scale = F.softplus(unconstrained_scale) + self._epsilon log_scale = torch.log(scale) outputs = (inputs - shift) / scale logabsdet = -torchutils.sum_except_batch(log_scale, num_batch_dims=1) return outputs, logabsdet def _unconstrained_scale_and_shift(self, autoregressive_params): # split_idx = autoregressive_params.size(1) // 2 # unconstrained_scale = autoregressive_params[..., :split_idx] # shift = autoregressive_params[..., split_idx:] # return unconstrained_scale, shift autoregressive_params = autoregressive_params.view( -1, self.features, self._output_dim_multiplier() ) unconstrained_scale = autoregressive_params[..., 0] shift = autoregressive_params[..., 1] return unconstrained_scale, shift
Ancestors
- AutoregressiveTransform
- Transform
- torch.nn.modules.module.Module
Class variables
var call_super_init : bool
var dump_patches : bool
var training : bool
Inherited members
class MaskedPiecewiseCubicAutoregressiveTransform (num_bins, features, hidden_features, context_features=None, num_blocks=2, use_residual_blocks=True, random_mask=False, activation=<function relu>, dropout_probability=0.0, use_batch_norm=False)
-
Transforms each input variable with an invertible elementwise transformation.
The parameters of each invertible elementwise transformation can be functions of previous input variables, but they must not depend on the current or any following input variables.
NOTE: Calculating the inverse transform is D times slower than calculating the forward transform, where D is the dimensionality of the input to the transform.
Initialize internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class MaskedPiecewiseCubicAutoregressiveTransform(AutoregressiveTransform): def __init__( self, num_bins, features, hidden_features, context_features=None, num_blocks=2, use_residual_blocks=True, random_mask=False, activation=F.relu, dropout_probability=0.0, use_batch_norm=False, ): self.num_bins = num_bins self.features = features made = made_module.MADE( features=features, hidden_features=hidden_features, context_features=context_features, num_blocks=num_blocks, output_multiplier=self._output_dim_multiplier(), use_residual_blocks=use_residual_blocks, random_mask=random_mask, activation=activation, dropout_probability=dropout_probability, use_batch_norm=use_batch_norm, ) super(MaskedPiecewiseCubicAutoregressiveTransform, self).__init__(made) def _output_dim_multiplier(self): return self.num_bins * 2 + 2 def _elementwise(self, inputs, autoregressive_params, inverse=False): batch_size = inputs.shape[0] transform_params = autoregressive_params.view( batch_size, self.features, self.num_bins * 2 + 2 ) unnormalized_widths = transform_params[..., : self.num_bins] unnormalized_heights = transform_params[..., self.num_bins: 2 * self.num_bins] derivatives = transform_params[..., 2 * self.num_bins:] unnorm_derivatives_left = derivatives[..., 0][..., None] unnorm_derivatives_right = derivatives[..., 1][..., None] if hasattr(self.autoregressive_net, "hidden_features"): unnormalized_widths /= np.sqrt(self.autoregressive_net.hidden_features) unnormalized_heights /= np.sqrt(self.autoregressive_net.hidden_features) outputs, logabsdet = cubic_spline( inputs=inputs, unnormalized_widths=unnormalized_widths, unnormalized_heights=unnormalized_heights, unnorm_derivatives_left=unnorm_derivatives_left, unnorm_derivatives_right=unnorm_derivatives_right, inverse=inverse, ) return outputs, torchutils.sum_except_batch(logabsdet) def _elementwise_forward(self, inputs, autoregressive_params): return self._elementwise(inputs, autoregressive_params) def _elementwise_inverse(self, inputs, autoregressive_params): return self._elementwise(inputs, autoregressive_params, inverse=True)
Ancestors
- AutoregressiveTransform
- Transform
- torch.nn.modules.module.Module
Class variables
var call_super_init : bool
var dump_patches : bool
var training : bool
Inherited members
class MaskedPiecewiseLinearAutoregressiveTransform (num_bins, features, hidden_features, context_features=None, num_blocks=2, use_residual_blocks=True, random_mask=False, activation=<function relu>, dropout_probability=0.0, use_batch_norm=False)
-
Transforms each input variable with an invertible elementwise transformation.
The parameters of each invertible elementwise transformation can be functions of previous input variables, but they must not depend on the current or any following input variables.
NOTE: Calculating the inverse transform is D times slower than calculating the forward transform, where D is the dimensionality of the input to the transform.
Initialize internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class MaskedPiecewiseLinearAutoregressiveTransform(AutoregressiveTransform): def __init__( self, num_bins, features, hidden_features, context_features=None, num_blocks=2, use_residual_blocks=True, random_mask=False, activation=F.relu, dropout_probability=0.0, use_batch_norm=False, ): self.num_bins = num_bins self.features = features made = made_module.MADE( features=features, hidden_features=hidden_features, context_features=context_features, num_blocks=num_blocks, output_multiplier=self._output_dim_multiplier(), use_residual_blocks=use_residual_blocks, random_mask=random_mask, activation=activation, dropout_probability=dropout_probability, use_batch_norm=use_batch_norm, ) super().__init__(made) def _output_dim_multiplier(self): return self.num_bins def _elementwise(self, inputs, autoregressive_params, inverse=False): batch_size = inputs.shape[0] unnormalized_pdf = autoregressive_params.view( batch_size, self.features, self._output_dim_multiplier() ) outputs, logabsdet = linear_spline( inputs=inputs, unnormalized_pdf=unnormalized_pdf, inverse=inverse ) return outputs, torchutils.sum_except_batch(logabsdet) def _elementwise_forward(self, inputs, autoregressive_params): return self._elementwise(inputs, autoregressive_params) def _elementwise_inverse(self, inputs, autoregressive_params): return self._elementwise(inputs, autoregressive_params, inverse=True)
Ancestors
- AutoregressiveTransform
- Transform
- torch.nn.modules.module.Module
Class variables
var call_super_init : bool
var dump_patches : bool
var training : bool
Inherited members
class MaskedPiecewiseQuadraticAutoregressiveTransform (features, hidden_features, context_features=None, num_bins=10, num_blocks=2, tails=None, tail_bound=1.0, use_residual_blocks=True, random_mask=False, activation=<function relu>, dropout_probability=0.0, use_batch_norm=False, min_bin_width=0.001, min_bin_height=0.001, min_derivative=0.001)
-
Transforms each input variable with an invertible elementwise transformation.
The parameters of each invertible elementwise transformation can be functions of previous input variables, but they must not depend on the current or any following input variables.
NOTE: Calculating the inverse transform is D times slower than calculating the forward transform, where D is the dimensionality of the input to the transform.
Initialize internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class MaskedPiecewiseQuadraticAutoregressiveTransform(AutoregressiveTransform): def __init__( self, features, hidden_features, context_features=None, num_bins=10, num_blocks=2, tails=None, tail_bound=1.0, use_residual_blocks=True, random_mask=False, activation=F.relu, dropout_probability=0.0, use_batch_norm=False, min_bin_width=rational_quadratic.DEFAULT_MIN_BIN_WIDTH, min_bin_height=rational_quadratic.DEFAULT_MIN_BIN_HEIGHT, min_derivative=rational_quadratic.DEFAULT_MIN_DERIVATIVE, ): self.num_bins = num_bins self.min_bin_width = min_bin_width self.min_bin_height = min_bin_height self.min_derivative = min_derivative self.tails = tails self.tail_bound = tail_bound self.features = features made = made_module.MADE( features=features, hidden_features=hidden_features, context_features=context_features, num_blocks=num_blocks, output_multiplier=self._output_dim_multiplier(), use_residual_blocks=use_residual_blocks, random_mask=random_mask, activation=activation, dropout_probability=dropout_probability, use_batch_norm=use_batch_norm, ) super().__init__(made) def _output_dim_multiplier(self): if self.tails == "linear": return self.num_bins * 2 - 1 else: return self.num_bins * 2 + 1 def _elementwise(self, inputs, autoregressive_params, inverse=False): batch_size = inputs.shape[0] transform_params = autoregressive_params.view( batch_size, self.features, self._output_dim_multiplier() ) unnormalized_widths = transform_params[..., : self.num_bins] unnormalized_heights = transform_params[..., self.num_bins:] if hasattr(self.autoregressive_net, "hidden_features"): unnormalized_widths /= np.sqrt(self.autoregressive_net.hidden_features) # unnormalized_heights /= np.sqrt(self.autoregressive_net.hidden_features) if self.tails is None: spline_fn = quadratic_spline spline_kwargs = {} elif self.tails == "linear": spline_fn = unconstrained_quadratic_spline spline_kwargs = {"tails": self.tails, "tail_bound": self.tail_bound} else: raise ValueError outputs, logabsdet = spline_fn( inputs=inputs, unnormalized_heights=unnormalized_heights, unnormalized_widths=unnormalized_widths, inverse=inverse, min_bin_width=self.min_bin_width, min_bin_height=self.min_bin_height, **spline_kwargs ) return outputs, torchutils.sum_except_batch(logabsdet) def _elementwise_forward(self, inputs, autoregressive_params): return self._elementwise(inputs, autoregressive_params) def _elementwise_inverse(self, inputs, autoregressive_params): return self._elementwise(inputs, autoregressive_params, inverse=True)
Ancestors
- AutoregressiveTransform
- Transform
- torch.nn.modules.module.Module
Class variables
var call_super_init : bool
var dump_patches : bool
var training : bool
Inherited members
class MaskedPiecewiseRationalQuadraticAutoregressiveTransform (features, hidden_features, context_features=None, num_bins=10, tails=None, tail_bound=1.0, num_blocks=2, use_residual_blocks=True, random_mask=False, activation=<function relu>, dropout_probability=0.0, use_batch_norm=False, min_bin_width=0.001, min_bin_height=0.001, min_derivative=0.001)
-
Transforms each input variable with an invertible elementwise transformation.
The parameters of each invertible elementwise transformation can be functions of previous input variables, but they must not depend on the current or any following input variables.
NOTE: Calculating the inverse transform is D times slower than calculating the forward transform, where D is the dimensionality of the input to the transform.
Initialize internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class MaskedPiecewiseRationalQuadraticAutoregressiveTransform(AutoregressiveTransform): def __init__( self, features, hidden_features, context_features=None, num_bins=10, tails=None, tail_bound=1.0, num_blocks=2, use_residual_blocks=True, random_mask=False, activation=F.relu, dropout_probability=0.0, use_batch_norm=False, min_bin_width=rational_quadratic.DEFAULT_MIN_BIN_WIDTH, min_bin_height=rational_quadratic.DEFAULT_MIN_BIN_HEIGHT, min_derivative=rational_quadratic.DEFAULT_MIN_DERIVATIVE, ): self.num_bins = num_bins self.min_bin_width = min_bin_width self.min_bin_height = min_bin_height self.min_derivative = min_derivative self.tails = tails self.tail_bound = tail_bound autoregressive_net = made_module.MADE( features=features, hidden_features=hidden_features, context_features=context_features, num_blocks=num_blocks, output_multiplier=self._output_dim_multiplier(), use_residual_blocks=use_residual_blocks, random_mask=random_mask, activation=activation, dropout_probability=dropout_probability, use_batch_norm=use_batch_norm, ) super().__init__(autoregressive_net) def _output_dim_multiplier(self): if self.tails == "linear": return self.num_bins * 3 - 1 elif self.tails is None: return self.num_bins * 3 + 1 else: raise ValueError def _elementwise(self, inputs, autoregressive_params, inverse=False): batch_size, features = inputs.shape[0], inputs.shape[1] transform_params = autoregressive_params.view( batch_size, features, self._output_dim_multiplier() ) unnormalized_widths = transform_params[..., : self.num_bins] unnormalized_heights = transform_params[..., self.num_bins: 2 * self.num_bins] unnormalized_derivatives = transform_params[..., 2 * self.num_bins:] if hasattr(self.autoregressive_net, "hidden_features"): unnormalized_widths /= np.sqrt(self.autoregressive_net.hidden_features) unnormalized_heights /= np.sqrt(self.autoregressive_net.hidden_features) if self.tails is None: spline_fn = rational_quadratic_spline spline_kwargs = {"left": -1.2, "right": 1.2, "bottom": -1.2, "top": 1.2} elif self.tails == "linear": spline_fn = unconstrained_rational_quadratic_spline spline_kwargs = {"tails": self.tails, "tail_bound": self.tail_bound} else: raise ValueError outputs, logabsdet = spline_fn( inputs=inputs, unnormalized_widths=unnormalized_widths, unnormalized_heights=unnormalized_heights, unnormalized_derivatives=unnormalized_derivatives, inverse=inverse, min_bin_width=self.min_bin_width, min_bin_height=self.min_bin_height, min_derivative=self.min_derivative, enable_identity_init=True, **spline_kwargs ) return outputs, torchutils.sum_except_batch(logabsdet) def _elementwise_forward(self, inputs, autoregressive_params): return self._elementwise(inputs, autoregressive_params) def _elementwise_inverse(self, inputs, autoregressive_params): return self._elementwise(inputs, autoregressive_params, inverse=True)
Ancestors
- AutoregressiveTransform
- Transform
- torch.nn.modules.module.Module
Class variables
var call_super_init : bool
var dump_patches : bool
var training : bool
Inherited members
class MaskedShiftAutoregressiveTransform (features, hidden_features, context_features=None, num_blocks=2, use_residual_blocks=True, random_mask=False, activation=<function relu>, dropout_probability=0.0, use_batch_norm=False)
-
Transforms each input variable with an invertible elementwise transformation.
The parameters of each invertible elementwise transformation can be functions of previous input variables, but they must not depend on the current or any following input variables.
NOTE: Calculating the inverse transform is D times slower than calculating the forward transform, where D is the dimensionality of the input to the transform.
Initialize internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class MaskedShiftAutoregressiveTransform(AutoregressiveTransform): def __init__( self, features, hidden_features, context_features=None, num_blocks=2, use_residual_blocks=True, random_mask=False, activation=F.relu, dropout_probability=0.0, use_batch_norm=False, ): self.features = features made = made_module.MADE( features=features, hidden_features=hidden_features, context_features=context_features, num_blocks=num_blocks, output_multiplier=self._output_dim_multiplier(), use_residual_blocks=use_residual_blocks, random_mask=random_mask, activation=activation, dropout_probability=dropout_probability, use_batch_norm=use_batch_norm, ) self._epsilon = 1e-3 self.shift_scale = 1. super(MaskedShiftAutoregressiveTransform, self).__init__(made) def _output_dim_multiplier(self): return 1 def _elementwise_forward(self, inputs, autoregressive_params): shift = self._unconstrained_shift( autoregressive_params ) # scale = torch.sigmoid(unconstrained_scale + 2.0) + self._epsilon # scale = F.softplus(unconstrained_scale) + self._epsilon # log_scale = torch.log(scale) outputs = inputs + torch.tanh(shift) * 2 # logabsdet = torchutils.sum_except_batch(log_scale, num_batch_dims=1) logabsdet = torch.zeros(inputs.shape[0], device=inputs.device) return outputs, logabsdet def _elementwise_inverse(self, inputs, autoregressive_params): shift = self._unconstrained_shift( autoregressive_params ) # scale = torch.sigmoid(unconstrained_scale + 2.0) + self._epsilon # scale = F.softplus(unconstrained_scale) + self._epsilon # log_scale = torch.log(scale) outputs = (inputs - shift) # / scale logabsdet = torch.zeros(inputs.shape[0], device=inputs.device) return outputs, logabsdet def _unconstrained_shift(self, autoregressive_params): # split_idx = autoregressive_params.size(1) // 2 # unconstrained_scale = autoregressive_params[..., :split_idx] # shift = autoregressive_params[..., split_idx:] # return unconstrained_scale, shift shift = autoregressive_params.view( -1, self.features ) # unconstrained_scale = autoregressive_params[..., 0] return shift * self.shift_scale
Ancestors
- AutoregressiveTransform
- Transform
- torch.nn.modules.module.Module
Class variables
var call_super_init : bool
var dump_patches : bool
var training : bool
Inherited members
class MaskedSumOfSigmoidsTransform (features, hidden_features, n_sigmoids=30, context_features=None, num_blocks=2, use_residual_blocks=True, random_mask=False, activation=<function relu>, dropout_probability=0.0, use_batch_norm=False)
-
An unconstrained monotonic neural networks autoregressive layer that transforms the variables.
Initialize internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class MaskedSumOfSigmoidsTransform(AutoregressiveTransform): """An unconstrained monotonic neural networks autoregressive layer that transforms the variables. """ def __init__( self, features, hidden_features, n_sigmoids=30, context_features=None, num_blocks=2, use_residual_blocks=True, random_mask=False, activation=F.relu, dropout_probability=0.0, use_batch_norm=False, ): self.features = features self.n_sigmoids = n_sigmoids made = made_module.MADE( features=features, hidden_features=hidden_features, context_features=context_features, num_blocks=num_blocks, output_multiplier=self._output_dim_multiplier(), use_residual_blocks=use_residual_blocks, random_mask=random_mask, activation=activation, dropout_probability=dropout_probability, use_batch_norm=use_batch_norm, ) super().__init__(made) def _output_dim_multiplier(self): return 3 * self.n_sigmoids + 1 def _elementwise_forward(self, inputs, autoregressive_params): transformer = SumOfSigmoids(n_sigmoids=self.n_sigmoids, features=self.features, raw_params=autoregressive_params.view(inputs.shape[0], self.features, self._output_dim_multiplier())) z, logabsdet = transformer(inputs) return z - 0.5, logabsdet def _elementwise_inverse(self, inputs, autoregressive_params): # self.transformer.set_raw_params(self.features, autoregressive_params.reshape(inputs.shape[0], -1)) inputs = inputs + 0.5 transformer = SumOfSigmoids(n_sigmoids=self.n_sigmoids, features=self.features, raw_params=autoregressive_params.view(inputs.shape[0], self.features, self._output_dim_multiplier())) x, logabsdet = transformer.inverse(inputs) return x, logabsdet
Ancestors
- AutoregressiveTransform
- Transform
- torch.nn.modules.module.Module
Class variables
var call_super_init : bool
var dump_patches : bool
var training : bool
Inherited members
class MaskedUMNNAutoregressiveTransform (features, hidden_features, context_features=None, num_blocks=2, use_residual_blocks=True, random_mask=False, activation=<function relu>, dropout_probability=0.0, use_batch_norm=False, integrand_net_layers=[50, 50, 50], cond_size=20, nb_steps=20, solver='CCParallel')
-
An unconstrained monotonic neural networks autoregressive layer that transforms the variables.
Reference:
A. Wehenkel and G. Louppe, Unconstrained Monotonic Neural Networks, NeurIPS2019.
---- Specific arguments ---- integrand_net_layers: the layers dimension to put in the integrand network. cond_size: The embedding size for the conditioning factors. nb_steps: The number of integration steps. solver: The quadrature algorithm - CC or CCParallel. Both implements Clenshaw-Curtis quadrature with Leibniz rule for backward computation. CCParallel pass all the evaluation points (nb_steps) at once, it is faster but requires more memory.
Initialize internal Module state, shared by both nn.Module and ScriptModule.
Expand source code
class MaskedUMNNAutoregressiveTransform(AutoregressiveTransform): """An unconstrained monotonic neural networks autoregressive layer that transforms the variables. Reference: > A. Wehenkel and G. Louppe, Unconstrained Monotonic Neural Networks, NeurIPS2019. ---- Specific arguments ---- integrand_net_layers: the layers dimension to put in the integrand network. cond_size: The embedding size for the conditioning factors. nb_steps: The number of integration steps. solver: The quadrature algorithm - CC or CCParallel. Both implements Clenshaw-Curtis quadrature with Leibniz rule for backward computation. CCParallel pass all the evaluation points (nb_steps) at once, it is faster but requires more memory. """ def __init__( self, features, hidden_features, context_features=None, num_blocks=2, use_residual_blocks=True, random_mask=False, activation=F.relu, dropout_probability=0.0, use_batch_norm=False, integrand_net_layers=[50, 50, 50], cond_size=20, nb_steps=20, solver="CCParallel", ): self.features = features self.cond_size = cond_size made = made_module.MADE( features=features, hidden_features=hidden_features, context_features=context_features, num_blocks=num_blocks, output_multiplier=self._output_dim_multiplier(), use_residual_blocks=use_residual_blocks, random_mask=random_mask, activation=activation, dropout_probability=dropout_probability, use_batch_norm=use_batch_norm, ) self._epsilon = 1e-3 super().__init__(made) self.transformer = MonotonicNormalizer(integrand_net_layers, cond_size, nb_steps, solver) def _output_dim_multiplier(self): return self.cond_size def _elementwise_forward(self, inputs, autoregressive_params): z, jac = self.transformer(inputs, autoregressive_params.reshape(inputs.shape[0], inputs.shape[1], -1)) log_det_jac = jac.log().sum(1) return z, log_det_jac def _elementwise_inverse(self, inputs, autoregressive_params): x = self.transformer.inverse_transform(inputs, autoregressive_params.reshape(inputs.shape[0], inputs.shape[1], -1)) z, jac = self.transformer(x, autoregressive_params.reshape(inputs.shape[0], inputs.shape[1], -1)) log_det_jac = -jac.log().sum(1) return x, log_det_jac
Ancestors
- AutoregressiveTransform
- Transform
- torch.nn.modules.module.Module
Class variables
var call_super_init : bool
var dump_patches : bool
var training : bool
Inherited members