#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# This file is auto-generated by h2o-3/h2o-bindings/bin/gen_python.py
# Copyright 2016 H2O.ai;  Apache License Version 2.0 (see LICENSE for details)
#
from __future__ import absolute_import, division, print_function, unicode_literals
from h2o.estimators.estimator_base import H2OEstimator
from h2o.exceptions import H2OValueError
from h2o.frame import H2OFrame
from h2o.utils.typechecks import assert_is_type, Enum, numeric
[docs]class H2ODeepLearningEstimator(H2OEstimator):
    """
    Deep Learning
    Build a Deep Neural Network model using CPUs
    Builds a feed-forward multilayer artificial neural network on an H2OFrame
    :examples:
    >>> import h2o
    >>> from h2o.estimators.deeplearning import H2ODeepLearningEstimator
    >>> h2o.connect()
    >>> rows = [[1,2,3,4,0], [2,1,2,4,1], [2,1,4,2,1], [0,1,2,34,1], [2,3,4,1,0]] * 50
    >>> fr = h2o.H2OFrame(rows)
    >>> fr[4] = fr[4].asfactor()
    >>> model = H2ODeepLearningEstimator()
    >>> model.train(x=range(4), y=4, training_frame=fr)
    """
    algo = "deeplearning"
    param_names = {"model_id", "training_frame", "validation_frame", "nfolds", "keep_cross_validation_models",
                   "keep_cross_validation_predictions", "keep_cross_validation_fold_assignment", "fold_assignment",
                   "fold_column", "response_column", "ignored_columns", "ignore_const_cols", "score_each_iteration",
                   "weights_column", "offset_column", "balance_classes", "class_sampling_factors",
                   "max_after_balance_size", "max_confusion_matrix_size", "max_hit_ratio_k", "checkpoint",
                   "pretrained_autoencoder", "overwrite_with_best_model", "use_all_factor_levels", "standardize",
                   "activation", "hidden", "epochs", "train_samples_per_iteration", "target_ratio_comm_to_comp", "seed",
                   "adaptive_rate", "rho", "epsilon", "rate", "rate_annealing", "rate_decay", "momentum_start",
                   "momentum_ramp", "momentum_stable", "nesterov_accelerated_gradient", "input_dropout_ratio",
                   "hidden_dropout_ratios", "l1", "l2", "max_w2", "initial_weight_distribution", "initial_weight_scale",
                   "initial_weights", "initial_biases", "loss", "distribution", "quantile_alpha", "tweedie_power",
                   "huber_alpha", "score_interval", "score_training_samples", "score_validation_samples",
                   "score_duty_cycle", "classification_stop", "regression_stop", "stopping_rounds", "stopping_metric",
                   "stopping_tolerance", "max_runtime_secs", "score_validation_sampling", "diagnostics", "fast_mode",
                   "force_load_balance", "variable_importances", "replicate_training_data", "single_node_mode",
                   "shuffle_training_data", "missing_values_handling", "quiet_mode", "autoencoder", "sparse",
                   "col_major", "average_activation", "sparsity_beta", "max_categorical_features", "reproducible",
                   "export_weights_and_biases", "mini_batch_size", "categorical_encoding", "elastic_averaging",
                   "elastic_averaging_moving_rate", "elastic_averaging_regularization", "export_checkpoints_dir"}
    def __init__(self, **kwargs):
        super(H2ODeepLearningEstimator, self).__init__()
        self._parms = {}
        for pname, pvalue in kwargs.items():
            if pname == 'model_id':
                self._id = pvalue
                self._parms["model_id"] = pvalue
            elif pname in self.param_names:
                # Using setattr(...) will invoke type-checking of the arguments
                setattr(self, pname, pvalue)
            else:
                raise H2OValueError("Unknown parameter %s = %r" % (pname, pvalue))
    @property
    def training_frame(self):
        """
        Id of the training data frame.
        Type: ``H2OFrame``.
        """
        return self._parms.get("training_frame")
    @training_frame.setter
    def training_frame(self, training_frame):
        self._parms["training_frame"] = H2OFrame._validate(training_frame, 'training_frame')
    @property
    def validation_frame(self):
        """
        Id of the validation data frame.
        Type: ``H2OFrame``.
        """
        return self._parms.get("validation_frame")
    @validation_frame.setter
    def validation_frame(self, validation_frame):
        self._parms["validation_frame"] = H2OFrame._validate(validation_frame, 'validation_frame')
    @property
    def nfolds(self):
        """
        Number of folds for K-fold cross-validation (0 to disable or >= 2).
        Type: ``int``  (default: ``0``).
        """
        return self._parms.get("nfolds")
    @nfolds.setter
    def nfolds(self, nfolds):
        assert_is_type(nfolds, None, int)
        self._parms["nfolds"] = nfolds
    @property
    def keep_cross_validation_models(self):
        """
        Whether to keep the cross-validation models.
        Type: ``bool``  (default: ``True``).
        """
        return self._parms.get("keep_cross_validation_models")
    @keep_cross_validation_models.setter
    def keep_cross_validation_models(self, keep_cross_validation_models):
        assert_is_type(keep_cross_validation_models, None, bool)
        self._parms["keep_cross_validation_models"] = keep_cross_validation_models
    @property
    def keep_cross_validation_predictions(self):
        """
        Whether to keep the predictions of the cross-validation models.
        Type: ``bool``  (default: ``False``).
        """
        return self._parms.get("keep_cross_validation_predictions")
    @keep_cross_validation_predictions.setter
    def keep_cross_validation_predictions(self, keep_cross_validation_predictions):
        assert_is_type(keep_cross_validation_predictions, None, bool)
        self._parms["keep_cross_validation_predictions"] = keep_cross_validation_predictions
    @property
    def keep_cross_validation_fold_assignment(self):
        """
        Whether to keep the cross-validation fold assignment.
        Type: ``bool``  (default: ``False``).
        """
        return self._parms.get("keep_cross_validation_fold_assignment")
    @keep_cross_validation_fold_assignment.setter
    def keep_cross_validation_fold_assignment(self, keep_cross_validation_fold_assignment):
        assert_is_type(keep_cross_validation_fold_assignment, None, bool)
        self._parms["keep_cross_validation_fold_assignment"] = keep_cross_validation_fold_assignment
    @property
    def fold_assignment(self):
        """
        Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified' option will stratify
        the folds based on the response variable, for classification problems.
        One of: ``"auto"``, ``"random"``, ``"modulo"``, ``"stratified"``  (default: ``"auto"``).
        """
        return self._parms.get("fold_assignment")
    @fold_assignment.setter
    def fold_assignment(self, fold_assignment):
        assert_is_type(fold_assignment, None, Enum("auto", "random", "modulo", "stratified"))
        self._parms["fold_assignment"] = fold_assignment
    @property
    def fold_column(self):
        """
        Column with cross-validation fold index assignment per observation.
        Type: ``str``.
        """
        return self._parms.get("fold_column")
    @fold_column.setter
    def fold_column(self, fold_column):
        assert_is_type(fold_column, None, str)
        self._parms["fold_column"] = fold_column
    @property
    def response_column(self):
        """
        Response variable column.
        Type: ``str``.
        """
        return self._parms.get("response_column")
    @response_column.setter
    def response_column(self, response_column):
        assert_is_type(response_column, None, str)
        self._parms["response_column"] = response_column
    @property
    def ignored_columns(self):
        """
        Names of columns to ignore for training.
        Type: ``List[str]``.
        """
        return self._parms.get("ignored_columns")
    @ignored_columns.setter
    def ignored_columns(self, ignored_columns):
        assert_is_type(ignored_columns, None, [str])
        self._parms["ignored_columns"] = ignored_columns
    @property
    def ignore_const_cols(self):
        """
        Ignore constant columns.
        Type: ``bool``  (default: ``True``).
        """
        return self._parms.get("ignore_const_cols")
    @ignore_const_cols.setter
    def ignore_const_cols(self, ignore_const_cols):
        assert_is_type(ignore_const_cols, None, bool)
        self._parms["ignore_const_cols"] = ignore_const_cols
    @property
    def score_each_iteration(self):
        """
        Whether to score during each iteration of model training.
        Type: ``bool``  (default: ``False``).
        """
        return self._parms.get("score_each_iteration")
    @score_each_iteration.setter
    def score_each_iteration(self, score_each_iteration):
        assert_is_type(score_each_iteration, None, bool)
        self._parms["score_each_iteration"] = score_each_iteration
    @property
    def weights_column(self):
        """
        Column with observation weights. Giving some observation a weight of zero is equivalent to excluding it from the
        dataset; giving an observation a relative weight of 2 is equivalent to repeating that row twice. Negative
        weights are not allowed. Note: Weights are per-row observation weights and do not increase the size of the data
        frame. This is typically the number of times a row is repeated, but non-integer values are supported as well.
        During training, rows with higher weights matter more, due to the larger loss function pre-factor.
        Type: ``str``.
        """
        return self._parms.get("weights_column")
    @weights_column.setter
    def weights_column(self, weights_column):
        assert_is_type(weights_column, None, str)
        self._parms["weights_column"] = weights_column
    @property
    def offset_column(self):
        """
        Offset column. This will be added to the combination of columns before applying the link function.
        Type: ``str``.
        """
        return self._parms.get("offset_column")
    @offset_column.setter
    def offset_column(self, offset_column):
        assert_is_type(offset_column, None, str)
        self._parms["offset_column"] = offset_column
    @property
    def balance_classes(self):
        """
        Balance training data class counts via over/under-sampling (for imbalanced data).
        Type: ``bool``  (default: ``False``).
        """
        return self._parms.get("balance_classes")
    @balance_classes.setter
    def balance_classes(self, balance_classes):
        assert_is_type(balance_classes, None, bool)
        self._parms["balance_classes"] = balance_classes
    @property
    def class_sampling_factors(self):
        """
        Desired over/under-sampling ratios per class (in lexicographic order). If not specified, sampling factors will
        be automatically computed to obtain class balance during training. Requires balance_classes.
        Type: ``List[float]``.
        """
        return self._parms.get("class_sampling_factors")
    @class_sampling_factors.setter
    def class_sampling_factors(self, class_sampling_factors):
        assert_is_type(class_sampling_factors, None, [float])
        self._parms["class_sampling_factors"] = class_sampling_factors
    @property
    def max_after_balance_size(self):
        """
        Maximum relative size of the training data after balancing class counts (can be less than 1.0). Requires
        balance_classes.
        Type: ``float``  (default: ``5``).
        """
        return self._parms.get("max_after_balance_size")
    @max_after_balance_size.setter
    def max_after_balance_size(self, max_after_balance_size):
        assert_is_type(max_after_balance_size, None, float)
        self._parms["max_after_balance_size"] = max_after_balance_size
    @property
    def max_confusion_matrix_size(self):
        """
        [Deprecated] Maximum size (# classes) for confusion matrices to be printed in the Logs.
        Type: ``int``  (default: ``20``).
        """
        return self._parms.get("max_confusion_matrix_size")
    @max_confusion_matrix_size.setter
    def max_confusion_matrix_size(self, max_confusion_matrix_size):
        assert_is_type(max_confusion_matrix_size, None, int)
        self._parms["max_confusion_matrix_size"] = max_confusion_matrix_size
    @property
    def max_hit_ratio_k(self):
        """
        Max. number (top K) of predictions to use for hit ratio computation (for multi-class only, 0 to disable).
        Type: ``int``  (default: ``0``).
        """
        return self._parms.get("max_hit_ratio_k")
    @max_hit_ratio_k.setter
    def max_hit_ratio_k(self, max_hit_ratio_k):
        assert_is_type(max_hit_ratio_k, None, int)
        self._parms["max_hit_ratio_k"] = max_hit_ratio_k
    @property
    def checkpoint(self):
        """
        Model checkpoint to resume training with.
        Type: ``str``.
        """
        return self._parms.get("checkpoint")
    @checkpoint.setter
    def checkpoint(self, checkpoint):
        assert_is_type(checkpoint, None, str, H2OEstimator)
        self._parms["checkpoint"] = checkpoint
    @property
    def pretrained_autoencoder(self):
        """
        Pretrained autoencoder model to initialize this model with.
        Type: ``str``.
        """
        return self._parms.get("pretrained_autoencoder")
    @pretrained_autoencoder.setter
    def pretrained_autoencoder(self, pretrained_autoencoder):
        assert_is_type(pretrained_autoencoder, None, str, H2OEstimator)
        self._parms["pretrained_autoencoder"] = pretrained_autoencoder
    @property
    def overwrite_with_best_model(self):
        """
        If enabled, override the final model with the best model found during training.
        Type: ``bool``  (default: ``True``).
        """
        return self._parms.get("overwrite_with_best_model")
    @overwrite_with_best_model.setter
    def overwrite_with_best_model(self, overwrite_with_best_model):
        assert_is_type(overwrite_with_best_model, None, bool)
        self._parms["overwrite_with_best_model"] = overwrite_with_best_model
    @property
    def use_all_factor_levels(self):
        """
        Use all factor levels of categorical variables. Otherwise, the first factor level is omitted (without loss of
        accuracy). Useful for variable importances and auto-enabled for autoencoder.
        Type: ``bool``  (default: ``True``).
        """
        return self._parms.get("use_all_factor_levels")
    @use_all_factor_levels.setter
    def use_all_factor_levels(self, use_all_factor_levels):
        assert_is_type(use_all_factor_levels, None, bool)
        self._parms["use_all_factor_levels"] = use_all_factor_levels
    @property
    def standardize(self):
        """
        If enabled, automatically standardize the data. If disabled, the user must provide properly scaled input data.
        Type: ``bool``  (default: ``True``).
        """
        return self._parms.get("standardize")
    @standardize.setter
    def standardize(self, standardize):
        assert_is_type(standardize, None, bool)
        self._parms["standardize"] = standardize
    @property
    def activation(self):
        """
        Activation function.
        One of: ``"tanh"``, ``"tanh_with_dropout"``, ``"rectifier"``, ``"rectifier_with_dropout"``, ``"maxout"``,
        ``"maxout_with_dropout"``  (default: ``"rectifier"``).
        """
        return self._parms.get("activation")
    @activation.setter
    def activation(self, activation):
        assert_is_type(activation, None, Enum("tanh", "tanh_with_dropout", "rectifier", "rectifier_with_dropout", "maxout", "maxout_with_dropout"))
        self._parms["activation"] = activation
    @property
    def hidden(self):
        """
        Hidden layer sizes (e.g. [100, 100]).
        Type: ``List[int]``  (default: ``[200, 200]``).
        """
        return self._parms.get("hidden")
    @hidden.setter
    def hidden(self, hidden):
        assert_is_type(hidden, None, [int])
        self._parms["hidden"] = hidden
    @property
    def epochs(self):
        """
        How many times the dataset should be iterated (streamed), can be fractional.
        Type: ``float``  (default: ``10``).
        """
        return self._parms.get("epochs")
    @epochs.setter
    def epochs(self, epochs):
        assert_is_type(epochs, None, numeric)
        self._parms["epochs"] = epochs
    @property
    def train_samples_per_iteration(self):
        """
        Number of training samples (globally) per MapReduce iteration. Special values are 0: one epoch, -1: all
        available data (e.g., replicated training data), -2: automatic.
        Type: ``int``  (default: ``-2``).
        """
        return self._parms.get("train_samples_per_iteration")
    @train_samples_per_iteration.setter
    def train_samples_per_iteration(self, train_samples_per_iteration):
        assert_is_type(train_samples_per_iteration, None, int)
        self._parms["train_samples_per_iteration"] = train_samples_per_iteration
    @property
    def target_ratio_comm_to_comp(self):
        """
        Target ratio of communication overhead to computation. Only for multi-node operation and
        train_samples_per_iteration = -2 (auto-tuning).
        Type: ``float``  (default: ``0.05``).
        """
        return self._parms.get("target_ratio_comm_to_comp")
    @target_ratio_comm_to_comp.setter
    def target_ratio_comm_to_comp(self, target_ratio_comm_to_comp):
        assert_is_type(target_ratio_comm_to_comp, None, numeric)
        self._parms["target_ratio_comm_to_comp"] = target_ratio_comm_to_comp
    @property
    def seed(self):
        """
        Seed for random numbers (affects sampling) - Note: only reproducible when running single threaded.
        Type: ``int``  (default: ``-1``).
        """
        return self._parms.get("seed")
    @seed.setter
    def seed(self, seed):
        assert_is_type(seed, None, int)
        self._parms["seed"] = seed
    @property
    def adaptive_rate(self):
        """
        Adaptive learning rate.
        Type: ``bool``  (default: ``True``).
        """
        return self._parms.get("adaptive_rate")
    @adaptive_rate.setter
    def adaptive_rate(self, adaptive_rate):
        assert_is_type(adaptive_rate, None, bool)
        self._parms["adaptive_rate"] = adaptive_rate
    @property
    def rho(self):
        """
        Adaptive learning rate time decay factor (similarity to prior updates).
        Type: ``float``  (default: ``0.99``).
        """
        return self._parms.get("rho")
    @rho.setter
    def rho(self, rho):
        assert_is_type(rho, None, numeric)
        self._parms["rho"] = rho
    @property
    def epsilon(self):
        """
        Adaptive learning rate smoothing factor (to avoid divisions by zero and allow progress).
        Type: ``float``  (default: ``1e-08``).
        """
        return self._parms.get("epsilon")
    @epsilon.setter
    def epsilon(self, epsilon):
        assert_is_type(epsilon, None, numeric)
        self._parms["epsilon"] = epsilon
    @property
    def rate(self):
        """
        Learning rate (higher => less stable, lower => slower convergence).
        Type: ``float``  (default: ``0.005``).
        """
        return self._parms.get("rate")
    @rate.setter
    def rate(self, rate):
        assert_is_type(rate, None, numeric)
        self._parms["rate"] = rate
    @property
    def rate_annealing(self):
        """
        Learning rate annealing: rate / (1 + rate_annealing * samples).
        Type: ``float``  (default: ``1e-06``).
        """
        return self._parms.get("rate_annealing")
    @rate_annealing.setter
    def rate_annealing(self, rate_annealing):
        assert_is_type(rate_annealing, None, numeric)
        self._parms["rate_annealing"] = rate_annealing
    @property
    def rate_decay(self):
        """
        Learning rate decay factor between layers (N-th layer: rate * rate_decay ^ (n - 1).
        Type: ``float``  (default: ``1``).
        """
        return self._parms.get("rate_decay")
    @rate_decay.setter
    def rate_decay(self, rate_decay):
        assert_is_type(rate_decay, None, numeric)
        self._parms["rate_decay"] = rate_decay
    @property
    def momentum_start(self):
        """
        Initial momentum at the beginning of training (try 0.5).
        Type: ``float``  (default: ``0``).
        """
        return self._parms.get("momentum_start")
    @momentum_start.setter
    def momentum_start(self, momentum_start):
        assert_is_type(momentum_start, None, numeric)
        self._parms["momentum_start"] = momentum_start
    @property
    def momentum_ramp(self):
        """
        Number of training samples for which momentum increases.
        Type: ``float``  (default: ``1000000``).
        """
        return self._parms.get("momentum_ramp")
    @momentum_ramp.setter
    def momentum_ramp(self, momentum_ramp):
        assert_is_type(momentum_ramp, None, numeric)
        self._parms["momentum_ramp"] = momentum_ramp
    @property
    def momentum_stable(self):
        """
        Final momentum after the ramp is over (try 0.99).
        Type: ``float``  (default: ``0``).
        """
        return self._parms.get("momentum_stable")
    @momentum_stable.setter
    def momentum_stable(self, momentum_stable):
        assert_is_type(momentum_stable, None, numeric)
        self._parms["momentum_stable"] = momentum_stable
    @property
    def nesterov_accelerated_gradient(self):
        """
        Use Nesterov accelerated gradient (recommended).
        Type: ``bool``  (default: ``True``).
        """
        return self._parms.get("nesterov_accelerated_gradient")
    @nesterov_accelerated_gradient.setter
    def nesterov_accelerated_gradient(self, nesterov_accelerated_gradient):
        assert_is_type(nesterov_accelerated_gradient, None, bool)
        self._parms["nesterov_accelerated_gradient"] = nesterov_accelerated_gradient
    @property
    def input_dropout_ratio(self):
        """
        Input layer dropout ratio (can improve generalization, try 0.1 or 0.2).
        Type: ``float``  (default: ``0``).
        """
        return self._parms.get("input_dropout_ratio")
    @input_dropout_ratio.setter
    def input_dropout_ratio(self, input_dropout_ratio):
        assert_is_type(input_dropout_ratio, None, numeric)
        self._parms["input_dropout_ratio"] = input_dropout_ratio
    @property
    def hidden_dropout_ratios(self):
        """
        Hidden layer dropout ratios (can improve generalization), specify one value per hidden layer, defaults to 0.5.
        Type: ``List[float]``.
        """
        return self._parms.get("hidden_dropout_ratios")
    @hidden_dropout_ratios.setter
    def hidden_dropout_ratios(self, hidden_dropout_ratios):
        assert_is_type(hidden_dropout_ratios, None, [numeric])
        self._parms["hidden_dropout_ratios"] = hidden_dropout_ratios
    @property
    def l1(self):
        """
        L1 regularization (can add stability and improve generalization, causes many weights to become 0).
        Type: ``float``  (default: ``0``).
        """
        return self._parms.get("l1")
    @l1.setter
    def l1(self, l1):
        assert_is_type(l1, None, numeric)
        self._parms["l1"] = l1
    @property
    def l2(self):
        """
        L2 regularization (can add stability and improve generalization, causes many weights to be small.
        Type: ``float``  (default: ``0``).
        """
        return self._parms.get("l2")
    @l2.setter
    def l2(self, l2):
        assert_is_type(l2, None, numeric)
        self._parms["l2"] = l2
    @property
    def max_w2(self):
        """
        Constraint for squared sum of incoming weights per unit (e.g. for Rectifier).
        Type: ``float``  (default: ``3.4028235e+38``).
        """
        return self._parms.get("max_w2")
    @max_w2.setter
    def max_w2(self, max_w2):
        assert_is_type(max_w2, None, float)
        self._parms["max_w2"] = max_w2
    @property
    def initial_weight_distribution(self):
        """
        Initial weight distribution.
        One of: ``"uniform_adaptive"``, ``"uniform"``, ``"normal"``  (default: ``"uniform_adaptive"``).
        """
        return self._parms.get("initial_weight_distribution")
    @initial_weight_distribution.setter
    def initial_weight_distribution(self, initial_weight_distribution):
        assert_is_type(initial_weight_distribution, None, Enum("uniform_adaptive", "uniform", "normal"))
        self._parms["initial_weight_distribution"] = initial_weight_distribution
    @property
    def initial_weight_scale(self):
        """
        Uniform: -value...value, Normal: stddev.
        Type: ``float``  (default: ``1``).
        """
        return self._parms.get("initial_weight_scale")
    @initial_weight_scale.setter
    def initial_weight_scale(self, initial_weight_scale):
        assert_is_type(initial_weight_scale, None, numeric)
        self._parms["initial_weight_scale"] = initial_weight_scale
    @property
    def initial_weights(self):
        """
        A list of H2OFrame ids to initialize the weight matrices of this model with.
        Type: ``List[H2OFrame]``.
        """
        return self._parms.get("initial_weights")
    @initial_weights.setter
    def initial_weights(self, initial_weights):
        assert_is_type(initial_weights, None, [H2OFrame, None])
        self._parms["initial_weights"] = initial_weights
    @property
    def initial_biases(self):
        """
        A list of H2OFrame ids to initialize the bias vectors of this model with.
        Type: ``List[H2OFrame]``.
        """
        return self._parms.get("initial_biases")
    @initial_biases.setter
    def initial_biases(self, initial_biases):
        assert_is_type(initial_biases, None, [H2OFrame, None])
        self._parms["initial_biases"] = initial_biases
    @property
    def loss(self):
        """
        Loss function.
        One of: ``"automatic"``, ``"cross_entropy"``, ``"quadratic"``, ``"huber"``, ``"absolute"``, ``"quantile"``
        (default: ``"automatic"``).
        """
        return self._parms.get("loss")
    @loss.setter
    def loss(self, loss):
        assert_is_type(loss, None, Enum("automatic", "cross_entropy", "quadratic", "huber", "absolute", "quantile"))
        self._parms["loss"] = loss
    @property
    def distribution(self):
        """
        Distribution function
        One of: ``"auto"``, ``"bernoulli"``, ``"multinomial"``, ``"gaussian"``, ``"poisson"``, ``"gamma"``,
        ``"tweedie"``, ``"laplace"``, ``"quantile"``, ``"huber"``  (default: ``"auto"``).
        """
        return self._parms.get("distribution")
    @distribution.setter
    def distribution(self, distribution):
        assert_is_type(distribution, None, Enum("auto", "bernoulli", "multinomial", "gaussian", "poisson", "gamma", "tweedie", "laplace", "quantile", "huber"))
        self._parms["distribution"] = distribution
    @property
    def quantile_alpha(self):
        """
        Desired quantile for Quantile regression, must be between 0 and 1.
        Type: ``float``  (default: ``0.5``).
        """
        return self._parms.get("quantile_alpha")
    @quantile_alpha.setter
    def quantile_alpha(self, quantile_alpha):
        assert_is_type(quantile_alpha, None, numeric)
        self._parms["quantile_alpha"] = quantile_alpha
    @property
    def tweedie_power(self):
        """
        Tweedie power for Tweedie regression, must be between 1 and 2.
        Type: ``float``  (default: ``1.5``).
        """
        return self._parms.get("tweedie_power")
    @tweedie_power.setter
    def tweedie_power(self, tweedie_power):
        assert_is_type(tweedie_power, None, numeric)
        self._parms["tweedie_power"] = tweedie_power
    @property
    def huber_alpha(self):
        """
        Desired quantile for Huber/M-regression (threshold between quadratic and linear loss, must be between 0 and 1).
        Type: ``float``  (default: ``0.9``).
        """
        return self._parms.get("huber_alpha")
    @huber_alpha.setter
    def huber_alpha(self, huber_alpha):
        assert_is_type(huber_alpha, None, numeric)
        self._parms["huber_alpha"] = huber_alpha
    @property
    def score_interval(self):
        """
        Shortest time interval (in seconds) between model scoring.
        Type: ``float``  (default: ``5``).
        """
        return self._parms.get("score_interval")
    @score_interval.setter
    def score_interval(self, score_interval):
        assert_is_type(score_interval, None, numeric)
        self._parms["score_interval"] = score_interval
    @property
    def score_training_samples(self):
        """
        Number of training set samples for scoring (0 for all).
        Type: ``int``  (default: ``10000``).
        """
        return self._parms.get("score_training_samples")
    @score_training_samples.setter
    def score_training_samples(self, score_training_samples):
        assert_is_type(score_training_samples, None, int)
        self._parms["score_training_samples"] = score_training_samples
    @property
    def score_validation_samples(self):
        """
        Number of validation set samples for scoring (0 for all).
        Type: ``int``  (default: ``0``).
        """
        return self._parms.get("score_validation_samples")
    @score_validation_samples.setter
    def score_validation_samples(self, score_validation_samples):
        assert_is_type(score_validation_samples, None, int)
        self._parms["score_validation_samples"] = score_validation_samples
    @property
    def score_duty_cycle(self):
        """
        Maximum duty cycle fraction for scoring (lower: more training, higher: more scoring).
        Type: ``float``  (default: ``0.1``).
        """
        return self._parms.get("score_duty_cycle")
    @score_duty_cycle.setter
    def score_duty_cycle(self, score_duty_cycle):
        assert_is_type(score_duty_cycle, None, numeric)
        self._parms["score_duty_cycle"] = score_duty_cycle
    @property
    def classification_stop(self):
        """
        Stopping criterion for classification error fraction on training data (-1 to disable).
        Type: ``float``  (default: ``0``).
        """
        return self._parms.get("classification_stop")
    @classification_stop.setter
    def classification_stop(self, classification_stop):
        assert_is_type(classification_stop, None, numeric)
        self._parms["classification_stop"] = classification_stop
    @property
    def regression_stop(self):
        """
        Stopping criterion for regression error (MSE) on training data (-1 to disable).
        Type: ``float``  (default: ``1e-06``).
        """
        return self._parms.get("regression_stop")
    @regression_stop.setter
    def regression_stop(self, regression_stop):
        assert_is_type(regression_stop, None, numeric)
        self._parms["regression_stop"] = regression_stop
    @property
    def stopping_rounds(self):
        """
        Early stopping based on convergence of stopping_metric. Stop if simple moving average of length k of the
        stopping_metric does not improve for k:=stopping_rounds scoring events (0 to disable)
        Type: ``int``  (default: ``5``).
        """
        return self._parms.get("stopping_rounds")
    @stopping_rounds.setter
    def stopping_rounds(self, stopping_rounds):
        assert_is_type(stopping_rounds, None, int)
        self._parms["stopping_rounds"] = stopping_rounds
    @property
    def stopping_metric(self):
        """
        Metric to use for early stopping (AUTO: logloss for classification, deviance for regression and anonomaly_score
        for Isolation Forest). Note that custom and custom_increasing can only be used in GBM and DRF with the Python
        client.
        One of: ``"auto"``, ``"deviance"``, ``"logloss"``, ``"mse"``, ``"rmse"``, ``"mae"``, ``"rmsle"``, ``"auc"``,
        ``"lift_top_group"``, ``"misclassification"``, ``"aucpr"``, ``"mean_per_class_error"``, ``"custom"``,
        ``"custom_increasing"``  (default: ``"auto"``).
        """
        return self._parms.get("stopping_metric")
    @stopping_metric.setter
    def stopping_metric(self, stopping_metric):
        assert_is_type(stopping_metric, None, Enum("auto", "deviance", "logloss", "mse", "rmse", "mae", "rmsle", "auc", "lift_top_group", "misclassification", "aucpr", "mean_per_class_error", "custom", "custom_increasing"))
        self._parms["stopping_metric"] = stopping_metric
    @property
    def stopping_tolerance(self):
        """
        Relative tolerance for metric-based stopping criterion (stop if relative improvement is not at least this much)
        Type: ``float``  (default: ``0``).
        """
        return self._parms.get("stopping_tolerance")
    @stopping_tolerance.setter
    def stopping_tolerance(self, stopping_tolerance):
        assert_is_type(stopping_tolerance, None, numeric)
        self._parms["stopping_tolerance"] = stopping_tolerance
    @property
    def max_runtime_secs(self):
        """
        Maximum allowed runtime in seconds for model training. Use 0 to disable.
        Type: ``float``  (default: ``0``).
        """
        return self._parms.get("max_runtime_secs")
    @max_runtime_secs.setter
    def max_runtime_secs(self, max_runtime_secs):
        assert_is_type(max_runtime_secs, None, numeric)
        self._parms["max_runtime_secs"] = max_runtime_secs
    @property
    def score_validation_sampling(self):
        """
        Method used to sample validation dataset for scoring.
        One of: ``"uniform"``, ``"stratified"``  (default: ``"uniform"``).
        """
        return self._parms.get("score_validation_sampling")
    @score_validation_sampling.setter
    def score_validation_sampling(self, score_validation_sampling):
        assert_is_type(score_validation_sampling, None, Enum("uniform", "stratified"))
        self._parms["score_validation_sampling"] = score_validation_sampling
    @property
    def diagnostics(self):
        """
        Enable diagnostics for hidden layers.
        Type: ``bool``  (default: ``True``).
        """
        return self._parms.get("diagnostics")
    @diagnostics.setter
    def diagnostics(self, diagnostics):
        assert_is_type(diagnostics, None, bool)
        self._parms["diagnostics"] = diagnostics
    @property
    def fast_mode(self):
        """
        Enable fast mode (minor approximation in back-propagation).
        Type: ``bool``  (default: ``True``).
        """
        return self._parms.get("fast_mode")
    @fast_mode.setter
    def fast_mode(self, fast_mode):
        assert_is_type(fast_mode, None, bool)
        self._parms["fast_mode"] = fast_mode
    @property
    def force_load_balance(self):
        """
        Force extra load balancing to increase training speed for small datasets (to keep all cores busy).
        Type: ``bool``  (default: ``True``).
        """
        return self._parms.get("force_load_balance")
    @force_load_balance.setter
    def force_load_balance(self, force_load_balance):
        assert_is_type(force_load_balance, None, bool)
        self._parms["force_load_balance"] = force_load_balance
    @property
    def variable_importances(self):
        """
        Compute variable importances for input features (Gedeon method) - can be slow for large networks.
        Type: ``bool``  (default: ``True``).
        """
        return self._parms.get("variable_importances")
    @variable_importances.setter
    def variable_importances(self, variable_importances):
        assert_is_type(variable_importances, None, bool)
        self._parms["variable_importances"] = variable_importances
    @property
    def replicate_training_data(self):
        """
        Replicate the entire training dataset onto every node for faster training on small datasets.
        Type: ``bool``  (default: ``True``).
        """
        return self._parms.get("replicate_training_data")
    @replicate_training_data.setter
    def replicate_training_data(self, replicate_training_data):
        assert_is_type(replicate_training_data, None, bool)
        self._parms["replicate_training_data"] = replicate_training_data
    @property
    def single_node_mode(self):
        """
        Run on a single node for fine-tuning of model parameters.
        Type: ``bool``  (default: ``False``).
        """
        return self._parms.get("single_node_mode")
    @single_node_mode.setter
    def single_node_mode(self, single_node_mode):
        assert_is_type(single_node_mode, None, bool)
        self._parms["single_node_mode"] = single_node_mode
    @property
    def shuffle_training_data(self):
        """
        Enable shuffling of training data (recommended if training data is replicated and train_samples_per_iteration is
        close to #nodes x #rows, of if using balance_classes).
        Type: ``bool``  (default: ``False``).
        """
        return self._parms.get("shuffle_training_data")
    @shuffle_training_data.setter
    def shuffle_training_data(self, shuffle_training_data):
        assert_is_type(shuffle_training_data, None, bool)
        self._parms["shuffle_training_data"] = shuffle_training_data
    @property
    def missing_values_handling(self):
        """
        Handling of missing values. Either MeanImputation or Skip.
        One of: ``"mean_imputation"``, ``"skip"``  (default: ``"mean_imputation"``).
        """
        return self._parms.get("missing_values_handling")
    @missing_values_handling.setter
    def missing_values_handling(self, missing_values_handling):
        assert_is_type(missing_values_handling, None, Enum("mean_imputation", "skip"))
        self._parms["missing_values_handling"] = missing_values_handling
    @property
    def quiet_mode(self):
        """
        Enable quiet mode for less output to standard output.
        Type: ``bool``  (default: ``False``).
        """
        return self._parms.get("quiet_mode")
    @quiet_mode.setter
    def quiet_mode(self, quiet_mode):
        assert_is_type(quiet_mode, None, bool)
        self._parms["quiet_mode"] = quiet_mode
    @property
    def autoencoder(self):
        """
        Auto-Encoder.
        Type: ``bool``  (default: ``False``).
        """
        return self._parms.get("autoencoder")
    @autoencoder.setter
    def autoencoder(self, autoencoder):
        assert_is_type(autoencoder, None, bool)
        self._parms["autoencoder"] = autoencoder
    @property
    def sparse(self):
        """
        Sparse data handling (more efficient for data with lots of 0 values).
        Type: ``bool``  (default: ``False``).
        """
        return self._parms.get("sparse")
    @sparse.setter
    def sparse(self, sparse):
        assert_is_type(sparse, None, bool)
        self._parms["sparse"] = sparse
    @property
    def col_major(self):
        """
        #DEPRECATED Use a column major weight matrix for input layer. Can speed up forward propagation, but might slow
        down backpropagation.
        Type: ``bool``  (default: ``False``).
        """
        return self._parms.get("col_major")
    @col_major.setter
    def col_major(self, col_major):
        assert_is_type(col_major, None, bool)
        self._parms["col_major"] = col_major
    @property
    def average_activation(self):
        """
        Average activation for sparse auto-encoder. #Experimental
        Type: ``float``  (default: ``0``).
        """
        return self._parms.get("average_activation")
    @average_activation.setter
    def average_activation(self, average_activation):
        assert_is_type(average_activation, None, numeric)
        self._parms["average_activation"] = average_activation
    @property
    def sparsity_beta(self):
        """
        Sparsity regularization. #Experimental
        Type: ``float``  (default: ``0``).
        """
        return self._parms.get("sparsity_beta")
    @sparsity_beta.setter
    def sparsity_beta(self, sparsity_beta):
        assert_is_type(sparsity_beta, None, numeric)
        self._parms["sparsity_beta"] = sparsity_beta
    @property
    def max_categorical_features(self):
        """
        Max. number of categorical features, enforced via hashing. #Experimental
        Type: ``int``  (default: ``2147483647``).
        """
        return self._parms.get("max_categorical_features")
    @max_categorical_features.setter
    def max_categorical_features(self, max_categorical_features):
        assert_is_type(max_categorical_features, None, int)
        self._parms["max_categorical_features"] = max_categorical_features
    @property
    def reproducible(self):
        """
        Force reproducibility on small data (will be slow - only uses 1 thread).
        Type: ``bool``  (default: ``False``).
        """
        return self._parms.get("reproducible")
    @reproducible.setter
    def reproducible(self, reproducible):
        assert_is_type(reproducible, None, bool)
        self._parms["reproducible"] = reproducible
    @property
    def export_weights_and_biases(self):
        """
        Whether to export Neural Network weights and biases to H2O Frames.
        Type: ``bool``  (default: ``False``).
        """
        return self._parms.get("export_weights_and_biases")
    @export_weights_and_biases.setter
    def export_weights_and_biases(self, export_weights_and_biases):
        assert_is_type(export_weights_and_biases, None, bool)
        self._parms["export_weights_and_biases"] = export_weights_and_biases
    @property
    def mini_batch_size(self):
        """
        Mini-batch size (smaller leads to better fit, larger can speed up and generalize better).
        Type: ``int``  (default: ``1``).
        """
        return self._parms.get("mini_batch_size")
    @mini_batch_size.setter
    def mini_batch_size(self, mini_batch_size):
        assert_is_type(mini_batch_size, None, int)
        self._parms["mini_batch_size"] = mini_batch_size
    @property
    def categorical_encoding(self):
        """
        Encoding scheme for categorical features
        One of: ``"auto"``, ``"enum"``, ``"one_hot_internal"``, ``"one_hot_explicit"``, ``"binary"``, ``"eigen"``,
        ``"label_encoder"``, ``"sort_by_response"``, ``"enum_limited"``  (default: ``"auto"``).
        """
        return self._parms.get("categorical_encoding")
    @categorical_encoding.setter
    def categorical_encoding(self, categorical_encoding):
        assert_is_type(categorical_encoding, None, Enum("auto", "enum", "one_hot_internal", "one_hot_explicit", "binary", "eigen", "label_encoder", "sort_by_response", "enum_limited"))
        self._parms["categorical_encoding"] = categorical_encoding
    @property
    def elastic_averaging(self):
        """
        Elastic averaging between compute nodes can improve distributed model convergence. #Experimental
        Type: ``bool``  (default: ``False``).
        """
        return self._parms.get("elastic_averaging")
    @elastic_averaging.setter
    def elastic_averaging(self, elastic_averaging):
        assert_is_type(elastic_averaging, None, bool)
        self._parms["elastic_averaging"] = elastic_averaging
    @property
    def elastic_averaging_moving_rate(self):
        """
        Elastic averaging moving rate (only if elastic averaging is enabled).
        Type: ``float``  (default: ``0.9``).
        """
        return self._parms.get("elastic_averaging_moving_rate")
    @elastic_averaging_moving_rate.setter
    def elastic_averaging_moving_rate(self, elastic_averaging_moving_rate):
        assert_is_type(elastic_averaging_moving_rate, None, numeric)
        self._parms["elastic_averaging_moving_rate"] = elastic_averaging_moving_rate
    @property
    def elastic_averaging_regularization(self):
        """
        Elastic averaging regularization strength (only if elastic averaging is enabled).
        Type: ``float``  (default: ``0.001``).
        """
        return self._parms.get("elastic_averaging_regularization")
    @elastic_averaging_regularization.setter
    def elastic_averaging_regularization(self, elastic_averaging_regularization):
        assert_is_type(elastic_averaging_regularization, None, numeric)
        self._parms["elastic_averaging_regularization"] = elastic_averaging_regularization
    @property
    def export_checkpoints_dir(self):
        """
        Automatically export generated models to this directory.
        Type: ``str``.
        """
        return self._parms.get("export_checkpoints_dir")
    @export_checkpoints_dir.setter
    def export_checkpoints_dir(self, export_checkpoints_dir):
        assert_is_type(export_checkpoints_dir, None, str)
        self._parms["export_checkpoints_dir"] = export_checkpoints_dir 
[docs]class H2OAutoEncoderEstimator(H2ODeepLearningEstimator):
    """
    :examples:
    >>> import h2o as ml
    >>> from h2o.estimators.deeplearning import H2OAutoEncoderEstimator
    >>> ml.init()
    >>> rows = [[1,2,3,4,0]*50, [2,1,2,4,1]*50, [2,1,4,2,1]*50, [0,1,2,34,1]*50, [2,3,4,1,0]*50]
    >>> fr = ml.H2OFrame(rows)
    >>> fr[4] = fr[4].asfactor()
    >>> model = H2OAutoEncoderEstimator()
    >>> model.train(x=range(4), training_frame=fr)
    """
    def __init__(self, **kwargs):
        super(H2OAutoEncoderEstimator, self).__init__(**kwargs)
        self._parms['autoencoder'] = True