class DNN::Optimizers::RMSPropGraves

Attributes

alpha[RW]
eps[RW]
lr[RW]

Public Class Methods

new(lr: 0.0001, alpha: 0.95, eps: 0.0001, clip_norm: nil) click to toggle source

@param [Float] lr Learning rate. @param [Float] alpha Moving average index of past slopes. @param [Float] eps Value to avoid division by zero.

Calls superclass method DNN::Optimizers::Optimizer::new
# File lib/dnn/core/optimizers.rb, line 222
def initialize(lr: 0.0001, alpha: 0.95, eps: 0.0001, clip_norm: nil)
  super(clip_norm: clip_norm)
  @lr = lr
  @alpha = alpha
  @eps = eps
  @m = {}
  @v = {}
  @status = { m: @m, v: @v }
end

Public Instance Methods

load_hash(hash) click to toggle source
# File lib/dnn/core/optimizers.rb, line 246
def load_hash(hash)
  initialize(lr: hash[:lr], alpha: hash[:alpha], eps: hash[:eps], clip_norm: hash[:clip_norm])
end
to_hash() click to toggle source
Calls superclass method DNN::Optimizers::Optimizer#to_hash
# File lib/dnn/core/optimizers.rb, line 232
def to_hash
  super(lr: @lr, alpha: @alpha, eps: @eps)
end

Private Instance Methods

update_params(params) click to toggle source
# File lib/dnn/core/optimizers.rb, line 236
        def update_params(params)
  params.each do |param|
    @m[param] ||= Xumo::SFloat.zeros(*param.data.shape)
    @v[param] ||= Xumo::SFloat.zeros(*param.data.shape)
    @m[param] = @alpha * @m[param] + (1 - @alpha) * param.grad
    @v[param] = @alpha * @v[param] + (1 - @alpha) * param.grad**2
    param.data -= (@lr / Xumo::NMath.sqrt(@v[param] - @m[param]**2 + @eps)) * param.grad
  end
end