class DNN::Optimizers::RMSProp
Attributes
alpha[RW]
eps[RW]
lr[RW]
Public Class Methods
new(lr: 0.001, alpha: 0.9, eps: 1e-7, clip_norm: nil)
click to toggle source
@param [Float] lr Learning rate. @param [Float] alpha Moving average index of past slopes. @param [Float] eps Value to avoid division by zero.
Calls superclass method
DNN::Optimizers::Optimizer::new
# File lib/dnn/core/optimizers.rb, line 153 def initialize(lr: 0.001, alpha: 0.9, eps: 1e-7, clip_norm: nil) super(clip_norm: clip_norm) @lr = lr @alpha = alpha @eps = eps @g = {} @status = { g: @g } end
Public Instance Methods
load_hash(hash)
click to toggle source
# File lib/dnn/core/optimizers.rb, line 174 def load_hash(hash) initialize(lr: hash[:lr], alpha: hash[:alpha], eps: hash[:eps], clip_norm: hash[:clip_norm]) end
to_hash()
click to toggle source
Calls superclass method
DNN::Optimizers::Optimizer#to_hash
# File lib/dnn/core/optimizers.rb, line 162 def to_hash super(lr: @lr, alpha: @alpha, eps: @eps) end
Private Instance Methods
update_params(params)
click to toggle source
# File lib/dnn/core/optimizers.rb, line 166 def update_params(params) params.each do |param| @g[param] ||= Xumo::SFloat.zeros(*param.data.shape) @g[param] = @alpha * @g[param] + (1 - @alpha) * param.grad**2 param.data -= (@lr / Xumo::NMath.sqrt(@g[param] + @eps)) * param.grad end end