Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixes for reference before assignment… #18640

Open
wants to merge 9 commits into
base: master
Choose a base branch
from
1 change: 1 addition & 0 deletions examples/demo_jax_distributed.py
Expand Up @@ -287,6 +287,7 @@ def train_step(train_state, x, y):
print("\nTrainig:")
data_iter = iter(train_data)
for epoch in range(EPOCHS):
loss_value = None # default
for i in tqdm(range(STEPS_PER_EPOCH)):
x, y = next(data_iter)
sharded_x = jax.device_put(x.numpy(), data_sharding)
Expand Down
Expand Up @@ -281,8 +281,7 @@ def normalize(data, train_split):
)


for batch in dataset_train.take(1):
inputs, targets = batch
inputs, targets = next(iter(dataset_train))

print("Input shape:", inputs.numpy().shape)
print("Target shape:", targets.numpy().shape)
Expand Down
4 changes: 2 additions & 2 deletions guides/custom_train_step_in_jax.py
Expand Up @@ -124,7 +124,7 @@ def train_step(self, state, data):
)

# Update metrics.
new_metrics_vars = []
new_metrics_vars, logs = [], []
for metric in self.metrics:
this_metric_vars = metrics_variables[
len(new_metrics_vars) : len(new_metrics_vars)
Expand Down Expand Up @@ -314,7 +314,7 @@ def test_step(self, state, data):
loss = self.compute_loss(x, y, y_pred)

# Update metrics.
new_metrics_vars = []
new_metrics_vars, logs = [], []
for metric in self.metrics:
this_metric_vars = metrics_variables[
len(new_metrics_vars) : len(new_metrics_vars)
Expand Down
1 change: 1 addition & 0 deletions guides/distributed_training_with_jax.py
Expand Up @@ -251,6 +251,7 @@ def get_replicated_train_state(devices):
# Custom training loop
for epoch in range(num_epochs):
data_iter = iter(train_data)
loss_value = None # default
for data in data_iter:
x, y = data
sharded_x = jax.device_put(x.numpy(), data_sharding)
Expand Down
2 changes: 2 additions & 0 deletions keras/activations/activations.py
Expand Up @@ -83,6 +83,8 @@ def static_call(x, negative_slope=0.0, max_value=None, threshold=0.0):
negative_part = backend.nn.relu(-x + threshold)
else:
negative_part = backend.nn.relu(-x)
else:
negative_part = 1

clip_max = max_value is not None
if threshold != 0:
Expand Down
4 changes: 4 additions & 0 deletions keras/applications/densenet.py
Expand Up @@ -294,6 +294,8 @@ def DenseNet(
cache_subdir="models",
file_hash="1ceb130c1ea1b78c3bf6114dbdfd8807",
)
else:
raise ValueError("weights_path undefined")
else:
if blocks == [6, 12, 24, 16]:
weights_path = file_utils.get_file(
Expand All @@ -316,6 +318,8 @@ def DenseNet(
cache_subdir="models",
file_hash="c13680b51ded0fb44dff2d8f86ac8bb1",
)
else:
raise ValueError("weights_path undefined")
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
Expand Down
1 change: 1 addition & 0 deletions keras/backend/jax/trainer.py
Expand Up @@ -412,6 +412,7 @@ def fit(

self.make_train_function()
self.stop_training = False
training_logs = {}
callbacks.on_train_begin()

for epoch in range(initial_epoch, epochs):
Expand Down
1 change: 1 addition & 0 deletions keras/backend/torch/trainer.py
Expand Up @@ -280,6 +280,7 @@ def fit(
)

self.stop_training = False
training_logs = {}
self.make_train_function()
callbacks.on_train_begin()

Expand Down
2 changes: 2 additions & 0 deletions keras/layers/attention/attention.py
Expand Up @@ -129,6 +129,8 @@ def _calculate_scores(self, query, key):
scores = self.concat_score_weight * ops.sum(
ops.tanh(q_reshaped + k_reshaped), axis=-1
)
else:
raise ValueError("scores not computed")

return scores

Expand Down
2 changes: 2 additions & 0 deletions keras/layers/preprocessing/category_encoding.py
Expand Up @@ -129,6 +129,8 @@ def _encode(self, inputs):
)
elif self.output_mode == "count":
outputs = self._count(inputs)
else:
raise ValueError("_encode outputs not calculated")

return outputs

Expand Down
3 changes: 1 addition & 2 deletions keras/layers/preprocessing/category_encoding_test.py
Expand Up @@ -164,6 +164,5 @@ def test_tf_data_compatibility(self):
]
)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(4).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, expected_output)
3 changes: 1 addition & 2 deletions keras/layers/preprocessing/center_crop_test.py
Expand Up @@ -151,8 +151,7 @@ def test_tf_data_compatibility(self):
layer = layers.CenterCrop(8, 9)
input_data = np.random.random((2, 10, 12, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertEqual(list(output.shape), [2, 8, 9, 3])

def test_list_compatibility(self):
Expand Down
7 changes: 3 additions & 4 deletions keras/layers/preprocessing/discretization_test.py
Expand Up @@ -76,8 +76,7 @@ def test_tf_data_compatibility(self):
x = np.array([[-1.0, 0.0, 0.1, 0.2, 0.4, 0.5, 1.0, 1.2, 0.98]])
self.assertAllClose(layer(x), np.array([[0, 1, 1, 1, 2, 3, 4, 4, 3]]))
ds = tf_data.Dataset.from_tensor_slices(x).batch(1).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, np.array([[0, 1, 1, 1, 2, 3, 4, 4, 3]]))

# With adapt flow
Expand All @@ -87,8 +86,8 @@ def test_tf_data_compatibility(self):
)
x = np.array([[0.0, 0.1, 0.3]])
ds = tf_data.Dataset.from_tensor_slices(x).batch(1).map(layer)
for output in ds.take(1):
output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, x)

def test_saving(self):
# With fixed bins
Expand Down
3 changes: 1 addition & 2 deletions keras/layers/preprocessing/feature_space.py
Expand Up @@ -517,8 +517,7 @@ def adapt(self, dataset):
preprocessor = self.preprocessors[name]
# TODO: consider adding an adapt progress bar.
# Sample 1 element to check the rank
for x in feature_dataset.take(1):
pass
x = next(iter(feature_dataset))
if len(x.shape) == 0:
# The dataset yields unbatched scalars; batch it.
feature_dataset = feature_dataset.batch(32)
Expand Down
3 changes: 1 addition & 2 deletions keras/layers/preprocessing/hashed_crossing_test.py
Expand Up @@ -74,8 +74,7 @@ def test_tf_data_compatibility(self):
.batch(5)
.map(lambda x1, x2: layer((x1, x2)))
)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(np.array([1, 4, 1, 1, 3]), output)

def test_upsupported_shape_input_fails(self):
Expand Down
5 changes: 3 additions & 2 deletions keras/layers/preprocessing/hashing_test.py
Expand Up @@ -60,8 +60,7 @@ def test_tf_data_compatibility(self):
layer = layers.Hashing(num_bins=3)
inp = [["A"], ["B"], ["C"], ["D"], ["E"]]
ds = tf.data.Dataset.from_tensor_slices(inp).batch(5).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, np.array([[1], [0], [1], [1], [2]]))

@parameterized.named_parameters(
Expand Down Expand Up @@ -306,6 +305,8 @@ def test_count_output(self, input_value, expected_output, output_shape):
symbolic_sample_shape = ()
elif input_array.ndim == 2:
symbolic_sample_shape = (None,)
else:
raise TypeError("Unknown `symbolic_sample_shape`")
inputs = layers.Input(shape=symbolic_sample_shape, dtype="int32")
layer = layers.Hashing(num_bins=3, output_mode="count")
outputs = layer(inputs)
Expand Down
3 changes: 1 addition & 2 deletions keras/layers/preprocessing/integer_lookup_test.py
Expand Up @@ -102,6 +102,5 @@ def test_tf_data_compatibility(self):
)
input_data = [2, 3, 4, 5]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(4).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, np.array([2, 3, 4, 0]))
2 changes: 2 additions & 0 deletions keras/layers/preprocessing/normalization.py
Expand Up @@ -275,6 +275,8 @@ def adapt(self, data):
batch_var + (batch_mean - new_total_mean) ** 2
) * batch_weight
total_mean = new_total_mean
else:
raise NotImplementedError(type(data))

self.adapt_mean.assign(total_mean)
self.adapt_variance.assign(total_var)
Expand Down
2 changes: 2 additions & 0 deletions keras/layers/preprocessing/normalization_test.py
Expand Up @@ -65,6 +65,8 @@ def test_normalization_adapt(self, input_type):
data = backend.convert_to_tensor(x)
elif input_type == "tf.data":
data = tf_data.Dataset.from_tensor_slices(x).batch(8)
else:
raise NotImplementedError(input_type)

layer = layers.Normalization()
layer.adapt(data)
Expand Down
4 changes: 2 additions & 2 deletions keras/layers/preprocessing/random_brightness_test.py
Expand Up @@ -56,5 +56,5 @@ def test_tf_data_compatibility(self):
layer = layers.RandomBrightness(factor=0.5, seed=1337)
input_data = np.random.random((2, 8, 8, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, input_data)
3 changes: 1 addition & 2 deletions keras/layers/preprocessing/random_contrast_test.py
Expand Up @@ -43,5 +43,4 @@ def test_tf_data_compatibility(self):
layer = layers.RandomContrast(factor=0.5, seed=1337)
input_data = np.random.random((2, 8, 8, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
next(iter(ds)).numpy()
3 changes: 1 addition & 2 deletions keras/layers/preprocessing/random_crop_test.py
Expand Up @@ -69,6 +69,5 @@ def test_tf_data_compatibility(self):
layer = layers.RandomCrop(8, 9)
input_data = np.random.random((2, 10, 12, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertEqual(list(output.shape), [2, 8, 9, 3])
6 changes: 2 additions & 4 deletions keras/layers/preprocessing/random_flip_test.py
Expand Up @@ -135,8 +135,7 @@ def test_tf_data_compatibility(self):
input_data = np.array([[[2, 3, 4]], [[5, 6, 7]]])
expected_output = np.array([[[5, 6, 7]], [[2, 3, 4]]])
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, expected_output)
# Test 4D input: shape (2, 2, 1, 3)
layer = layers.RandomFlip("vertical", seed=42)
Expand All @@ -159,6 +158,5 @@ def test_tf_data_compatibility(self):
]
)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, expected_output)
3 changes: 1 addition & 2 deletions keras/layers/preprocessing/random_rotation_test.py
Expand Up @@ -65,6 +65,5 @@ def test_tf_data_compatibility(self):
[4, 3, 2, 1, 0],
]
).reshape((5, 5, 1))
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(expected_output, output)
3 changes: 1 addition & 2 deletions keras/layers/preprocessing/random_translation_test.py
Expand Up @@ -327,5 +327,4 @@ def test_tf_data_compatibility(self):
layer = layers.RandomTranslation(0.2, 0.1)
input_data = np.random.random((1, 4, 4, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(1).map(layer)
for output in ds.take(1):
output.numpy()
next(iter(ds)).numpy()
3 changes: 1 addition & 2 deletions keras/layers/preprocessing/random_zoom_test.py
Expand Up @@ -106,8 +106,7 @@ def test_tf_data_compatibility(self):
[0, 0, 0, 0, 0],
]
).reshape((1, 5, 5, 1))
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(expected_output, output)

def test_dynamic_shape(self):
Expand Down
3 changes: 1 addition & 2 deletions keras/layers/preprocessing/rescaling_test.py
Expand Up @@ -72,8 +72,7 @@ def test_tf_data_compatibility(self):
layer = layers.Rescaling(scale=1.0 / 255, offset=0.5)
x = np.random.random((3, 10, 10, 3)) * 255
ds = tf_data.Dataset.from_tensor_slices(x).batch(3).map(layer)
for output in ds.take(1):
output.numpy()
next(iter(ds)).numpy()

def test_rescaling_with_channels_first_and_vector_scale(self):
config = backend.image_data_format()
Expand Down
6 changes: 2 additions & 4 deletions keras/layers/preprocessing/resizing_test.py
Expand Up @@ -180,8 +180,7 @@ def test_tf_data_compatibility(self):
layer = layers.Resizing(8, 9)
input_data = np.random.random((2, 10, 12, 3))
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertEqual(list(output.shape), [2, 8, 9, 3])

@pytest.mark.skipif(
Expand All @@ -198,6 +197,5 @@ def test_tf_data_compatibility_sequential(self):
.batch(2)
.map(Sequential([layer]))
)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertEqual(list(output.shape), [2, 8, 9, 3])
3 changes: 1 addition & 2 deletions keras/layers/preprocessing/string_lookup_test.py
Expand Up @@ -55,6 +55,5 @@ def test_tf_data_compatibility(self):
)
input_data = ["b", "c", "d"]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(3).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, np.array([2, 3, 0]))
6 changes: 2 additions & 4 deletions keras/layers/preprocessing/text_vectorization_test.py
Expand Up @@ -72,8 +72,7 @@ def test_tf_data_compatibility(self):
)
input_data = [["foo qux bar"], ["qux baz"]]
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = output.numpy()
output = next(iter(ds)).numpy()
self.assertAllClose(output, np.array([[4, 1, 3, 0], [1, 2, 0, 0]]))

# Test adapt flow
Expand All @@ -84,8 +83,7 @@ def test_tf_data_compatibility(self):
)
layer.adapt(input_data)
ds = tf_data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output.numpy()
next(iter(ds)).numpy()

@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="Requires string tensors."
Expand Down
2 changes: 2 additions & 0 deletions keras/legacy/backend.py
Expand Up @@ -1279,6 +1279,8 @@ def relu(x, alpha=0.0, max_value=None, threshold=0.0):
negative_part = tf.nn.relu(-x + threshold)
else:
negative_part = tf.nn.relu(-x)
else:
negative_part = 1

clip_max = max_value is not None

Expand Down
5 changes: 3 additions & 2 deletions keras/models/model.py
Expand Up @@ -418,6 +418,7 @@ def load_weights(self, filepath, skip_mismatch=False, **kwargs):
def build_from_config(self, config):
if not config:
return
status = False
if "input_shape" in config:
# Case: all inputs are in the first arg (possibly nested).
if utils.is_default(self.build):
Expand All @@ -429,7 +430,7 @@ def build_from_config(self, config):
self.build(config["input_shape"])
status = True
except:
status = False
pass
self._build_shapes_dict = config

elif "shapes_dict" in config:
Expand All @@ -441,7 +442,7 @@ def build_from_config(self, config):
self.build(**config["shapes_dict"])
status = True
except:
status = False
pass
self._build_shapes_dict = config["shapes_dict"]

if not status:
Expand Down
1 change: 1 addition & 0 deletions keras/models/sequential.py
Expand Up @@ -320,6 +320,7 @@ def from_config(cls, config, custom_objects=None):
model.add(layer)
if (
not model._functional
and "build_input_shape" in locals()
and build_input_shape
and isinstance(build_input_shape, (tuple, list))
):
Expand Down
2 changes: 1 addition & 1 deletion keras/ops/function.py
Expand Up @@ -275,7 +275,7 @@ def map_graph(inputs, outputs):
"The following previous operations were accessed "
f"without issue: {operations_with_complete_input}"
)
operations_with_complete_input.append(operation.name)
operations_with_complete_input.append(node.operation.name)

for x in tree.flatten(node.outputs):
computable_tensors.add(x)
Expand Down