Optimizers

class marius.nn.Optimizer
__init__()
clear_grad(self: marius._nn.Optimizer) None
property num_steps
reset_state(self: marius._nn.Optimizer) None
step(self: marius._nn.Optimizer) None
class marius.nn.SGDOptimizer
__init__(self: marius._nn.SGDOptimizer, param_dict: torch._C.cpp.OrderedTensorDict, learning_rate: float) None
property learning_rate
class marius.nn.AdagradOptimizer
__init__(self: marius._nn.AdagradOptimizer, param_dict: torch._C.cpp.OrderedTensorDict, options: marius._config.AdagradOptions) None
__init__(self: marius._nn.AdagradOptimizer, param_dict: torch._C.cpp.OrderedTensorDict, lr: float = 0.1, eps: float = 1e-10, lr_decay: float = 0, init_value: float = 0, weight_decay: float = 0) None
property eps
property init_value
property learning_rate
property lr_decay
property weight_decay
class marius.nn.AdamOptimizer
__init__(self: marius._nn.AdamOptimizer, param_dict: torch._C.cpp.OrderedTensorDict, options: marius._config.AdamOptions) None
__init__(self: marius._nn.AdamOptimizer, param_dict: torch._C.cpp.OrderedTensorDict, lr: float = 0.1, eps: float = 1e-08, beta_1: float = 0.9, beta_2: float = 0.999, weight_decay: float = 0, amsgrad: bool = False) None
property amsgrad
property beta_1
property beta_2
property eps
property learning_rate
property weight_decay