class TensorStream::NN

High level machine learning functions

Public Class Methods

bias_add(value, bias, data_format: nil, name: nil) click to toggle source

Adds bias to value.

This is a narrow version of tf add where the bias is restructed to 1-D only

# File lib/tensor_stream/nn/nn_ops.rb, line 148
def bias_add(value, bias, data_format: nil, name: nil)
  value = TensorStream.convert_to_tensor(value, name: "input")
  bias = TensorStream.convert_to_tensor(bias, dtype: value.dtype, name: "bias")

  raise TensorStreamError, "value must be at least rank 2" if value.shape.known? && value.shape.ndims < 2

  _op(:bias_add, value, bias, data_format: data_format, name: name)
end
conv2d(input, filter, strides, padding, name: nil) click to toggle source
# File lib/tensor_stream/nn/nn_ops.rb, line 140
def conv2d(input, filter, strides, padding, name: nil)
  _op(:conv2d, input, filter, strides: strides, padding: padding, name: name)
end
dropout(x, keep_prob, noise_shape: nil, seed: nil, name: nil) click to toggle source

Computes dropout.

With probability keep_prob, outputs the input element scaled up by 1 / keep_prob, otherwise outputs 0. The scaling is so that the expected sum is unchanged.

# File lib/tensor_stream/nn/nn_ops.rb, line 29
def dropout(x, keep_prob, noise_shape: nil, seed: nil, name: nil)
  TensorStream.name_scope(name, "dropout", values: [x]) do
    x = TensorStream.convert_to_tensor(x, name: "x")
    raise TensorStream::ValueError, "x has to be a floating point tensor since it's going to be scaled. Got a #{x.data_type} tensor instead." unless fp_type?(x.data_type)
    raise TensorStream::ValueError, "keep_prob must be a scalar tensor or a float in the range (0, 1], got #{keep_prob}" if keep_prob.is_a?(Float) && !(keep_prob > 0 && keep_prob <= 1)

    return x if keep_prob.is_a?(Float) && keep_prob.to_f == 1.0

    keep_prob = TensorStream.convert_to_tensor(keep_prob, dtype: x.dtype, name: "keep_prob")
    return x if keep_prob.value == 1.0

    noise_shape = if noise_shape.nil?
      TensorStream.shape(x)
    else
      noise_shape
    end

    random_tensor = keep_prob
    random_tensor += TensorStream.random_uniform(noise_shape, seed: seed, dtype: x.dtype)

    binary_tensor = TensorStream.floor(random_tensor)
    TensorStream.div(x, keep_prob) * binary_tensor
  end
end
log_softmax(logits, axis: -1, name: nil) click to toggle source

Computes log softmax activations.

# File lib/tensor_stream/nn/nn_ops.rb, line 121
def log_softmax(logits, axis: -1, name: nil)
  _op(:log_softmax, logits, axis: axis, name: name)
end
relu(features, name: nil) click to toggle source
# File lib/tensor_stream/nn/nn_ops.rb, line 14
def relu(features, name: nil)
  TensorStream.max(features, 0, name: "relu_#{name}")
end
relu6(features, name: nil) click to toggle source
# File lib/tensor_stream/nn/nn_ops.rb, line 18
def relu6(features, name: nil)
  TensorStream.name_scope(name, "Relu6", values: [features]) do
    features = TensorStream.convert_to_tensor(features, name: "features")
    _op(:relu6, features, name: name)
  end
end
sigmoid(input, name: nil) click to toggle source
# File lib/tensor_stream/nn/nn_ops.rb, line 54
def sigmoid(input, name: nil)
  TensorStream.sigmoid(input, name: name)
end
sigmoid_cross_entropy_with_logits(labels: nil, logits: nil, name: nil) click to toggle source
# File lib/tensor_stream/nn/nn_ops.rb, line 125
def sigmoid_cross_entropy_with_logits(labels: nil, logits: nil, name: nil)
  TensorStream.name_scope(name, default: "logistic_loss", values: [logits, labels]) do |_name|
    tf = TensorStream
    logits = tf.convert_to_tensor(logits, name: "logits")
    labels = tf.convert_to_tensor(labels, name: "labels")
    zeros = tf.zeros_like(logits, dtype: logits.dtype)
    cond = (logits >= zeros)
    relu_logits = tf.where(cond, logits, zeros)
    neg_abs_logits = tf.where(cond, -logits, logits)

    tf.add(relu_logits - logits * labels,
      tf.log1p(tf.exp(neg_abs_logits)), name: name)
  end
end
softmax(logits, axis: nil, name: nil) click to toggle source
# File lib/tensor_stream/nn/nn_ops.rb, line 10
def softmax(logits, axis: nil, name: nil)
  _op(:softmax, logits, axis: axis, name: name)
end
softmax_cross_entropy_with_logits(labels: nil, logits: nil, name: nil) click to toggle source
# File lib/tensor_stream/nn/nn_ops.rb, line 58
def softmax_cross_entropy_with_logits(labels: nil, logits: nil, name: nil)
  softmax_cross_entropy_with_logits_v2(labels: labels, logits: logits, name: name)
end
softmax_cross_entropy_with_logits_v2(labels: nil, logits: nil, name: nil) click to toggle source
# File lib/tensor_stream/nn/nn_ops.rb, line 62
def softmax_cross_entropy_with_logits_v2(labels: nil, logits: nil, name: nil)
  TensorStream.name_scope(name, default: "softmax_cross_entropy_with_logits", values: [logits, labels]) do
    ts = TensorStream
    logits = ts.convert_to_tensor(logits, name: "logits")
    labels = ts.convert_to_tensor(labels, name: "labels")
    labels = ts.cast(labels, logits.dtype)

    output = _op(:softmax_cross_entropy_with_logits_v2, logits, labels)
    output[0]
  end
end
sparse_softmax_cross_entropy_with_logits(labels: nil, logits: nil, name: nil) click to toggle source
# File lib/tensor_stream/nn/nn_ops.rb, line 74
def sparse_softmax_cross_entropy_with_logits(labels: nil, logits: nil, name: nil)
  TensorStream.name_scope(name, default: "SparseSoftmaxCrossEntropyWithLogits", values: [logits, labels]) do
    tf = TensorStream
    labels = tf.convert_to_tensor(labels)
    logits = tf.convert_to_tensor(logits)
    precise_logits = logits.data_type == :float16 ? tf.cast(logits, :float32) : logits

    labels_static_shape = labels.shape
    labels_shape = tf.shape(labels)
    static_shapes_fully_defined = labels_static_shape.known? && logits.shape.known?

    raise TensorStream::ValueError, "Logits cannot be scalars - received shape #{logits.shape.shape}." if logits.shape.known? && logits.shape.scalar?
    if logits.shape.known? && (labels_static_shape.known? && labels_static_shape.ndims != logits.shape.ndims - 1)
      raise TensorStream::ValueError, "Rank mismatch: Rank of labels (received #{labels_static_shape.ndims}) " \
                                      "should equal rank of logits minus 1 (received #{logits.shape.ndims})."
    end
    if logits.shape.ndims == 2
      cost = _op(:sparse_softmax_cross_entropy_with_logits,
        precise_logits, labels, name: name)
      if logits.data_type == :float16
        return tf.cast(cost[0], :float16)
      else
        return cost[0]
      end
    end

    shape_checks = []

    shape_checks << tf.assert_equal(tf.rank(labels), tf.rank(logits) - 1) unless static_shapes_fully_defined

    tf.control_dependencies(shape_checks) do
      num_classes = tf.shape(logits)[tf.rank(logits) - 1]
      precise_logits = tf.reshape(precise_logits, [-1, num_classes])
      labels = tf.reshape(labels, [-1])
      cost = _op(:sparse_softmax_cross_entropy_with_logits, precise_logits, labels, name: name)
      cost = tf.reshape(cost[0], labels_shape)

      if logits.data_type == :float16
        tf.cast(cost, :float16)
      else
        cost
      end
    end
  end
end