Skip to content

activations, constraints, initializers, losses, regularizers: move Ops param from CTOR to call method #329

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 25 commits into from
Jun 2, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
c57a2e7
Merge pull request #3 from tensorflow/master
JimClarke5 Oct 8, 2020
09fc07e
Merge pull request #4 from tensorflow/master
JimClarke5 Oct 27, 2020
a99dcb4
Merge pull request #5 from tensorflow/master
JimClarke5 Nov 17, 2020
ba294ea
Merge pull request #6 from tensorflow/master
JimClarke5 Nov 19, 2020
04f419a
Merge pull request #7 from tensorflow/master
JimClarke5 Dec 30, 2020
02e7ebf
Merge pull request #8 from tensorflow/master
JimClarke5 Jan 29, 2021
e0c9ed8
Merge pull request #9 from tensorflow/master
JimClarke5 Feb 1, 2021
5b0374b
Merge pull request #10 from tensorflow/master
JimClarke5 Feb 11, 2021
e038bbd
Merge pull request #11 from tensorflow/master
JimClarke5 Feb 23, 2021
def3051
Merge pull request #13 from tensorflow/master
JimClarke5 Mar 3, 2021
11748ae
Merge pull request #15 from tensorflow/master
JimClarke5 Mar 21, 2021
a9412ea
Merge pull request #16 from tensorflow/master
JimClarke5 Apr 9, 2021
2ff8dfe
Merge pull request #17 from tensorflow/master
JimClarke5 Apr 22, 2021
ee5e38a
Merge pull request #18 from tensorflow/master
JimClarke5 May 1, 2021
26394d6
Merge pull request #19 from tensorflow/master
JimClarke5 May 2, 2021
a653d33
Merge branch 'tensorflow:master' into master
JimClarke5 May 6, 2021
8cf4fe9
Move Ops from CTOR to call method
JimClarke5 Jun 1, 2021
70657a3
Merge branch 'tensorflow:master' into master
JimClarke5 Jun 1, 2021
4598bc1
Merge branch 'tensorflow:master' into LossOps
JimClarke5 Jun 1, 2021
b6ae875
Move Ops from CTOR to call method
JimClarke5 Jun 1, 2021
3e54bcc
Merge remote-tracking branch 'origin/LossOps' into LossOps
JimClarke5 Jun 1, 2021
4eb231b
Move Ops from CTOR to call method
JimClarke5 Jun 1, 2021
8d45739
Merge remote-tracking branch 'origin/LossOps' into LossOps
JimClarke5 Jun 2, 2021
fe7e8e3
JavaDoc fixes including Dataset
JimClarke5 Jun 2, 2021
c5ae13b
Results of Run mvn spotless:apply
JimClarke5 Jun 2, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=======================================================================*/
package org.tensorflow.framework.activations;

import org.tensorflow.op.Ops;
import org.tensorflow.types.family.TNumber;

/** Abstract base class for Activations */
public abstract class AbstractActivation<T extends TNumber> implements Activation<T> {

/** The TensorFlow Ops */
protected Ops tf;

/** Creates the abstract class for an AbstractActivation */
protected AbstractActivation() {}

/**
* Gets the TensorFlow Ops
*
* @return the TensorFlow Ops
*/
protected Ops getTF() {
return this.tf;
}

/**
* Sets the TensorFlow Ops
*
* @param tf the TensorFlow Ops
*/
protected void setTF(Ops tf) {
this.tf = tf;
}
}
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Expand All @@ -19,50 +19,19 @@
import org.tensorflow.types.family.TNumber;

/**
* Abstract base class for Activations
* Interface for Activations
*
* <p><b>Note:</b> The {@link #tf} attribute must be set prior to invoking the call method. See
* {@link #setTF(Ops)} and the constructor {@link #Activation(Ops)}.
*
* @param <T> the data type of the activation
* @param <T> the data type of the input and the result
*/
public abstract class Activation<T extends TNumber> {

/** The TensorFlow Ops */
protected Ops tf;

/**
* Creates the abstract class for an Activation
*
* @param tf the TensorFlow Ops
*/
protected Activation(Ops tf) {
this.tf = tf;
}

/**
* Sets the TensorFlow Ops
*
* @param tf the TensorFlow Ops
*/
protected void setTF(Ops tf) {
this.tf = tf;
}

/**
* Gets the TensorFlow Ops
*
* @return the TensorFlow Ops
*/
protected Ops getTF() {
return this.tf;
}
@FunctionalInterface
public interface Activation<T extends TNumber> {

/**
* Gets the calculation operation for the activation.
*
* @param tf the TensorFlow Ops
* @param input the input tensor
* @return The operand for the activation
*/
public abstract Operand<T> call(Operand<T> input);
Operand<T> call(Ops tf, Operand<T> input);
}
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
=======================================================================*/
package org.tensorflow.framework.activations;

import static org.tensorflow.framework.utils.CastHelper.cast;

import org.tensorflow.Operand;
import org.tensorflow.op.Ops;
import org.tensorflow.types.TBool;
Expand Down Expand Up @@ -44,53 +46,41 @@
* Operand&lt;TFloat32&gt; result = elu.call(input);
* </pre>
*
* @param <T> the data type of the activation
* @see <a href="https://arxiv.org/abs/1511.07289">Clevert et al, 2016, Fast and Accurate Deep
* Network Learning by Exponential Linear Units (ELUs)</a>
*/
public class ELU<T extends TFloating> extends Activation<T> {
public class ELU<T extends TFloating> extends AbstractActivation<T> {

private static final double ALPHA_DEFAULT = 1.0;

/** A scalar, slope of negative section. */
private final double alpha;

/**
* Creates a new ELU with alpha={@link #ALPHA_DEFAULT}.
*
* @param tf the TensorFlow Ops
*/
public ELU(Ops tf) {
this(tf, ALPHA_DEFAULT);
/** Creates a new ELU with alpha={@link #ALPHA_DEFAULT}. */
public ELU() {
this(ALPHA_DEFAULT);
}

/**
* Creates a new ELU
*
* @param tf the TensorFlow Ops
* @param alpha A scalar, slope of negative section. It controls the value to which an ELU
* saturates for negative net inputs.
*/
public ELU(Ops tf, double alpha) {
super(tf);
public ELU(double alpha) {
super();
this.alpha = alpha;
}

/**
* Gets the calculation operation for the activation.
*
* @param input the input tensor
* @return The operand for the activation
*/
/** {@inheritDoc} */
@Override
public Operand<T> call(Operand<T> input) {

public Operand<T> call(Ops tf, Operand<T> input) {
Operand<T> result = tf.nn.elu(input);
if (alpha == 1.0) return result;
else {
Class<T> inputType = input.type();
Operand<T> y = tf.math.mul(result, tf.dtypes.cast(tf.constant(alpha), inputType));
Operand<TBool> cond = tf.math.greater(result, tf.dtypes.cast(tf.constant(0), inputType));
Operand<T> y = tf.math.mul(result, cast(tf, tf.constant(alpha), inputType));
Operand<TBool> cond = tf.math.greater(result, cast(tf, tf.constant(0), inputType));
return tf.select(cond, result, y);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,28 +30,17 @@
* Operand&lt;TFloat32&gt; result = exp.call(input);
* // result is [0.04978707f, 0.36787945f, 1.f, 2.7182817f, 20.085537f]
* </pre>
*
* @param <T> the data type of the activation
*/
public class Exponential<T extends TFloating> extends Activation<T> {
public class Exponential<T extends TFloating> extends AbstractActivation<T> {

/**
* Creates an Exponential activation.
*
* @param tf the TensorFlow Ops
*/
public Exponential(Ops tf) {
super(tf);
/** Creates an Exponential activation. */
public Exponential() {
super();
}

/**
* Calculates the Exponential activation.
*
* @param input the input tensor
* @return an Operand for the exponential activation: <code>exp(x)</code>.
*/
/** {@inheritDoc} */
@Override
public Operand<T> call(Operand<T> input) {
public Operand<T> call(Ops tf, Operand<T> input) {
return tf.math.exp(input);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
=======================================================================*/
package org.tensorflow.framework.activations;

import static org.tensorflow.framework.utils.CastHelper.cast;

import org.tensorflow.Operand;
import org.tensorflow.op.Ops;
import org.tensorflow.types.family.TFloating;
Expand All @@ -40,34 +42,23 @@
* Operand&lt;TFloat32&gt; result = hardSigmoid.call(input);
* // result is [0.f , 0.3f, 0.5f, 0.7f, 1.f]
* </pre>
*
* @param <T> the data type of the result
*/
public class HardSigmoid<T extends TFloating> extends Activation<T> {
public class HardSigmoid<T extends TFloating> extends AbstractActivation<T> {

/**
* Creates Hard sigmoid activation.
*
* @param tf the TensorFlow Ops
*/
public HardSigmoid(Ops tf) {
super(tf);
/** Creates Hard sigmoid activation. */
public HardSigmoid() {
super();
}

/**
* Gets the calculation operation for the activation.
*
* @param input the input tensor
* @return The operand for the activation
*/
/** {@inheritDoc} */
@Override
public Operand<T> call(Operand<T> input) {
public Operand<T> call(Ops tf, Operand<T> input) {
Class<T> inputType = input.type();
Operand<T> point2 = tf.dtypes.cast(tf.constant(0.2), inputType);
Operand<T> point5 = tf.dtypes.cast(tf.constant(0.5), inputType);
Operand<T> point2 = cast(tf, tf.constant(0.2), inputType);
Operand<T> point5 = cast(tf, tf.constant(0.5), inputType);

Operand<T> x = tf.math.add(tf.math.mul(input, point2), point5);
return tf.clipByValue(
x, tf.dtypes.cast(tf.constant(0), inputType), tf.dtypes.cast(tf.constant(1), inputType));
x, cast(tf, tf.constant(0), inputType), cast(tf, tf.constant(1), inputType));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@
import org.tensorflow.types.family.TNumber;

/**
* Linear activation function (pass-through).
* Linear activation function (pass-through).
*
* <p>The linear activation returns its input. It is also known as the Identity activation function.</p>
* <p>The linear activation returns its input. It is also known as the Identity activation function.
*
* <p>For example:
*
Expand All @@ -33,20 +33,16 @@
* // result is [-3.0f,-1.0f, 0.0f,1.0f,3.0f]
* </pre>
*/
public class Linear<U extends TNumber> extends Activation<U> {
public class Linear<U extends TNumber> extends AbstractActivation<U> {

/**
* Creates a linear activation.
*
* @param tf the TensorFlow Ops
*/
public Linear(Ops tf) {
super(tf);
/** Creates a linear activation. */
public Linear() {
super();
}

/** {@inheritDoc} */
@Override
public Operand<U> call(Operand<U> input) {
public Operand<U> call(Ops tf, Operand<U> input) {
return input;
}
}
Loading