Skip to content

Commit

Permalink
Merge pull request #5 from tensorflow/master
Browse files Browse the repository at this point in the history
Stay up to date
  • Loading branch information
AdityaKane2001 committed Apr 15, 2021
2 parents 2ee908a + 885861a commit b3bd82f
Show file tree
Hide file tree
Showing 433 changed files with 16,876 additions and 3,631 deletions.
6 changes: 6 additions & 0 deletions CONTRIBUTING.md
Expand Up @@ -200,6 +200,12 @@ There are two ways to run TensorFlow unit tests.
bazel test ${flags} //tensorflow/python/...
```

For a single component e.g. softmax op:

```bash
bazel test ${flags} tensorflow/python/kernel_tests:softmax_op_test
```

2. Using [Docker](https://www.docker.com) and TensorFlow's CI scripts.

```bash
Expand Down
2 changes: 1 addition & 1 deletion README.md
Expand Up @@ -165,7 +165,7 @@ Container Type | Status | Art
* [Learn ML with TensorFlow](https://www.tensorflow.org/resources/learn-ml)
* [TensorFlow Twitter](https://twitter.com/tensorflow)
* [TensorFlow YouTube](https://www.youtube.com/channel/UC0rqucBdTuFTjJiefW5t-IQ)
* [TensorFlow Roadmap](https://www.tensorflow.org/model_optimization/guide/roadmap)
* [TensorFlow model optimization roadmap](https://www.tensorflow.org/model_optimization/guide/roadmap)
* [TensorFlow White Papers](https://www.tensorflow.org/about/bib)
* [TensorBoard Visualization Toolkit](https://github.com/tensorflow/tensorboard)

Expand Down
11 changes: 10 additions & 1 deletion RELEASE.md
Expand Up @@ -35,7 +35,16 @@
* TF Core:
* Added `tf.saved_model.experimental.TrackableResource`, which allows the
creation of custom wrapper objects for resource tensors.

* Added `tf.lookup.experimental.MutableHashTable`, which provides a
generic mutable hash table implementation.
* Compared to `tf.lookup.experimental.DenseHashTable` this offers
lower overall memory usage, and a cleaner API. It does not require
specifying a `delete_key` and `empty_key` that cannot be inserted into
the table.
* `tf.data`:
* Promoting `tf.data.experimental.get_single_element` API to
`tf.data.Dataset.get_single_element` and deprecating the experimental
endpoint.
* `tf.lite`:
* Fix mean op reference quantization rounding issue.

Expand Down
5 changes: 4 additions & 1 deletion tensorflow/c/eager/BUILD
Expand Up @@ -672,6 +672,7 @@ tf_cuda_cc_test(
":c_api_test_util",
":tfe_op_internal",
":tfe_tensorhandle_internal",
"@com_google_absl//absl/strings",
"//tensorflow/c:c_test_util",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
Expand All @@ -680,7 +681,9 @@ tf_cuda_cc_test(
"//tensorflow/core:test_main",
"//tensorflow/core/common_runtime/eager:eager_operation",
"//tensorflow/core/common_runtime/eager:tensor_handle",
"@com_google_absl//absl/strings",
# copybara:uncomment_begin
# "@tf_runtime//backends/cpu:tf_ops_alwayslink",
# copybara:uncomment_end
],
)

Expand Down
28 changes: 14 additions & 14 deletions tensorflow/c/eager/gradient_checker.cc
Expand Up @@ -50,11 +50,11 @@ Status RunAndMaybeSum(AbstractContext* ctx, Model forward,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs,
bool use_function) {
std::vector<AbstractTensorHandle*> model_outputs(1);
AbstractTensorHandle* model_outputs[1];

// Run the model.
TF_RETURN_IF_ERROR(RunModel(forward, ctx, inputs,
absl::MakeSpan(model_outputs), use_function));
TF_RETURN_IF_ERROR(
RunModel(forward, ctx, inputs, model_outputs, use_function));
AbstractTensorHandlePtr model_out(model_outputs[0]);

TF_Tensor* model_out_tensor;
Expand Down Expand Up @@ -83,8 +83,8 @@ Status RunAndMaybeSum(AbstractContext* ctx, Model forward,
}

// Reduce sum the output on all dimensions.
TF_RETURN_IF_ERROR(
ops::Sum(ctx, model_out.get(), sum_dims.get(), outputs, "sum_output"));
TF_RETURN_IF_ERROR(ops::Sum(ctx, model_out.get(), sum_dims.get(), &outputs[0],
"sum_output"));
return Status::OK();
}
// ========================= End Helper Functions==============================
Expand Down Expand Up @@ -122,7 +122,7 @@ Status CalcNumericalGrad(AbstractContext* ctx, Model forward,
// Initialize auxilary data structures.
vector<float> thetaPlus_data(num_elems);
vector<float> thetaMinus_data(num_elems);
std::vector<AbstractTensorHandle*> f_outputs(1);
AbstractTensorHandle* f_outputs[1];

// Numerical Grad Check
for (int i = 0; i < num_elems; i++) {
Expand Down Expand Up @@ -164,25 +164,25 @@ Status CalcNumericalGrad(AbstractContext* ctx, Model forward,

// Get f(theta + eps):
theta_inputs[input_index] = thetaPlus.get();
TF_RETURN_IF_ERROR(RunAndMaybeSum(ctx, forward, theta_inputs,
absl::MakeSpan(f_outputs), use_function));
TF_RETURN_IF_ERROR(
RunAndMaybeSum(ctx, forward, theta_inputs, f_outputs, use_function));
AbstractTensorHandlePtr fPlus(f_outputs[0]);

// Get f(theta - eps):
theta_inputs[input_index] = thetaMinus.get();
TF_RETURN_IF_ERROR(RunAndMaybeSum(ctx, forward, theta_inputs,
absl::MakeSpan(f_outputs), use_function));
TF_RETURN_IF_ERROR(
RunAndMaybeSum(ctx, forward, theta_inputs, f_outputs, use_function));
AbstractTensorHandlePtr fMinus(f_outputs[0]);

// Take Difference of both estimates: (f(theta + eps) - f(theta - eps)).
TF_RETURN_IF_ERROR(ops::Sub(ctx, fPlus.get(), fMinus.get(),
absl::MakeSpan(f_outputs), "sub_top"));
TF_RETURN_IF_ERROR(
ops::Sub(ctx, fPlus.get(), fMinus.get(), f_outputs, "sub_top"));
AbstractTensorHandlePtr fDiff(f_outputs[0]);

// Calculate using the difference quotient definition:
// (f(theta + eps) - f(theta - eps)) / (2 * eps).
TF_RETURN_IF_ERROR(ops::Div(ctx, fDiff.get(), two_eps.get(),
absl::MakeSpan(f_outputs), "diff_quotient"));
TF_RETURN_IF_ERROR(
ops::Div(ctx, fDiff.get(), two_eps.get(), f_outputs, "diff_quotient"));
AbstractTensorHandlePtr diff_quotient(f_outputs[0]);

TF_Tensor* grad_tensor;
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/c/eager/gradient_checker_test.cc
Expand Up @@ -65,15 +65,15 @@ void CompareNumericalAndManualGradients(
Status MatMulModel(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
return ops::MatMul(ctx, inputs[0], inputs[1], outputs, "MatMul",
return ops::MatMul(ctx, inputs[0], inputs[1], &outputs[0], "MatMul",
/*transpose_a=*/false,
/*transpose_b=*/false);
}

Status MulModel(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
return ops::Mul(ctx, inputs[0], inputs[1], outputs, "Mul");
return ops::Mul(ctx, inputs[0], inputs[1], &outputs[0], "Mul");
}

// TODO(vnvo2409): Add more tests from `python/ops/gradient_checker_v2_test.py`.
Expand Down
10 changes: 5 additions & 5 deletions tensorflow/c/eager/gradients_test.cc
Expand Up @@ -119,11 +119,11 @@ Status RecordOperationWithNullGradientFunctionModel(
absl::Span<AbstractTensorHandle*> outputs) {
Tape tape(/*persistent=*/false);
tape.Watch(inputs[0]);
std::vector<AbstractTensorHandle*> neg_outputs(1);
TF_RETURN_IF_ERROR(
ops::Neg(ctx, inputs[0], absl::MakeSpan(neg_outputs), "Neg"));
tape.RecordOperation(inputs, neg_outputs, nullptr, "Neg");
return tape.ComputeGradient(ctx, /*targets=*/neg_outputs,
AbstractTensorHandle* neg_output;
TF_RETURN_IF_ERROR(ops::Neg(ctx, inputs[0], &neg_output, "Neg"));
tape.RecordOperation(inputs, {neg_output}, nullptr, "Neg");
return tape.ComputeGradient(ctx,
/*targets=*/{neg_output},
/*sources=*/inputs,
/*output_gradients=*/{}, outputs);
}
Expand Down
13 changes: 5 additions & 8 deletions tensorflow/c/experimental/gradients/custom_gradient_test.cc
Expand Up @@ -70,20 +70,17 @@ Status ExpWithPassThroughGrad(AbstractContext* ctx,
absl::Span<AbstractTensorHandle*> outputs) {
Tape tape(/*persistent=*/false);
tape.Watch(inputs[0]); // Watch x.
std::vector<AbstractTensorHandle*> exp_outputs(1);
TF_RETURN_IF_ERROR(
ops::Exp(ctx, inputs[0], absl::MakeSpan(exp_outputs), "Exp"));
AbstractTensorHandle* exp_output;
TF_RETURN_IF_ERROR(ops::Exp(ctx, inputs[0], &exp_output, "Exp"));
std::unique_ptr<GradientFunction> gradient_function(
new PassThroughGradientFunction);
tape.RecordOperation(inputs, exp_outputs, gradient_function.release());
tape.RecordOperation(inputs, {exp_output}, gradient_function.release());
TF_RETURN_IF_ERROR(tape.ComputeGradient(ctx,
/*targets*/ exp_outputs,
/*targets*/ {exp_output},
/*sources=*/inputs,
/*output_gradients=*/{},
/*result=*/outputs));
for (auto exp_output : exp_outputs) {
exp_output->Unref();
}
exp_output->Unref();
return Status::OK();
}

Expand Down

0 comments on commit b3bd82f

Please sign in to comment.