class DNN::Optimizers::SGD

Attributes

lr[RW]
momentum[RW]

Public Class Methods

new(lr: 0.01, momentum: 0, clip_norm: nil) click to toggle source

@param [Float] lr Learning rate. @param [Float] momentum Momentum coefficient.

Calls superclass method DNN::Optimizers::Optimizer::new
# File lib/dnn/core/optimizers.rb, line 70
def initialize(lr: 0.01, momentum: 0, clip_norm: nil)
  super(clip_norm: clip_norm)
  @lr = lr
  @momentum = momentum
  @v = {}
  @status = { v: @v }
end

Public Instance Methods

load_hash(hash) click to toggle source
# File lib/dnn/core/optimizers.rb, line 94
def load_hash(hash)
  initialize(lr: hash[:lr], momentum: hash[:momentum], clip_norm: hash[:clip_norm])
end
to_hash() click to toggle source
Calls superclass method DNN::Optimizers::Optimizer#to_hash
# File lib/dnn/core/optimizers.rb, line 78
def to_hash
  super(lr: @lr, momentum: @momentum)
end

Private Instance Methods

update_params(params) click to toggle source
# File lib/dnn/core/optimizers.rb, line 82
        def update_params(params)
  params.each do |param|
    amount = param.grad * @lr
    if @momentum > 0
      @v[param] ||= Xumo::SFloat.zeros(*param.data.shape)
      amount += @momentum * @v[param]
      @v[param] = amount
    end
    param.data -= amount
  end
end