From a59f0ae812e5a578f2ee01d1b4693568ee816d2e Mon Sep 17 00:00:00 2001 From: Mikko Kotila Date: Sun, 21 Apr 2024 10:38:10 +0300 Subject: [PATCH] rename logcosh to LogCosh --- docs/Examples_Generator.md | 2 +- docs/Examples_Generator_Code.md | 2 +- docs/Examples_PyTorch.md | 2 +- docs/Examples_PyTorch_Code.md | 2 +- docs/Examples_Typical.md | 2 +- docs/Hidden_Layers.md | 2 +- ...mization with Keras for the Iris Prediction.ipynb | 2 +- .../Recover Best Models from Experiment Log.ipynb | 2 +- talos/autom8/autoparams.py | 2 +- talos/templates/params.py | 12 ++++++------ tests/commands/test_latest.py | 2 +- tests/commands/test_rest.py | 6 +++--- tests/commands/test_scan.py | 4 ++-- 13 files changed, 21 insertions(+), 21 deletions(-) diff --git a/docs/Examples_Generator.md b/docs/Examples_Generator.md index 304e7a5..003d451 100644 --- a/docs/Examples_Generator.md +++ b/docs/Examples_Generator.md @@ -84,7 +84,7 @@ return out, model ```python p = {'activation':['relu', 'elu'], 'optimizer': ['Adam'], - 'losses': ['logcosh'], + 'losses': ['LogCosh'], 'shapes': ['brick'], 'first_neuron': [32], 'dropout': [.2, .3], diff --git a/docs/Examples_Generator_Code.md b/docs/Examples_Generator_Code.md index 103a46f..d0c8cde 100644 --- a/docs/Examples_Generator_Code.md +++ b/docs/Examples_Generator_Code.md @@ -37,7 +37,7 @@ def mnist_model(x_train, y_train, x_val, y_val, params): p = {'activation':['relu', 'elu'], 'optimizer': ['Adam'], - 'losses': ['logcosh'], + 'losses': ['LogCosh'], 'shapes': ['brick'], 'first_neuron': [32], 'dropout': [.2, .3], diff --git a/docs/Examples_PyTorch.md b/docs/Examples_PyTorch.md index f8d95a7..1ad4872 100644 --- a/docs/Examples_PyTorch.md +++ b/docs/Examples_PyTorch.md @@ -139,7 +139,7 @@ return net, net.parameters() ```python p = {'activation':['relu', 'elu'], 'optimizer': ['Adagrad', 'Adam'], - 'losses': ['logcosh'], + 'losses': ['LogCosh'], 'hidden_layers':[0, 1, 2], 'batch_size': (20, 50, 5), 'epochs': [10, 20]} diff --git a/docs/Examples_PyTorch_Code.md b/docs/Examples_PyTorch_Code.md index 0d1b0b9..480185b 100644 --- a/docs/Examples_PyTorch_Code.md +++ b/docs/Examples_PyTorch_Code.md @@ -107,7 +107,7 @@ def breast_cancer(x_train, y_train, x_val, y_val, params): p = {'activation':['relu', 'elu'], 'optimizer': ['Adagrad', 'Adam'], - 'losses': ['logcosh'], + 'losses': ['LogCosh'], 'hidden_layers':[0, 1, 2], 'batch_size': (20, 50, 5), 'epochs': [10, 20]} diff --git a/docs/Examples_Typical.md b/docs/Examples_Typical.md index 6339696..e4ffe97 100644 --- a/docs/Examples_Typical.md +++ b/docs/Examples_Typical.md @@ -56,7 +56,7 @@ return out, model ```python p = {'activation':['relu', 'elu'], 'optimizer': ['Adagrad', 'Adam'], - 'losses': ['logcosh'], + 'losses': ['LogCosh'], 'hidden_layers':[0, 1, 2], 'batch_size': (20, 50, 5), 'epochs': [10, 20]} diff --git a/docs/Hidden_Layers.md b/docs/Hidden_Layers.md index 60a719e..62b65e1 100644 --- a/docs/Hidden_Layers.md +++ b/docs/Hidden_Layers.md @@ -19,7 +19,7 @@ When hidden layers are used, `dropout`, `shapes`, `hidden_layers`, and `first_ne p = {'activation':['relu', 'elu'], 'optimizer': ['Adagrad', 'Adam'], - 'losses': ['logcosh'], + 'losses': ['LogCosh'], 'shapes': ['brick'], # <<< required 'first_neuron': [32, 64], # <<< required 'hidden_layers':[0, 1, 2], # <<< required diff --git a/examples/Hyperparameter Optimization with Keras for the Iris Prediction.ipynb b/examples/Hyperparameter Optimization with Keras for the Iris Prediction.ipynb index 9caa432..223c67f 100644 --- a/examples/Hyperparameter Optimization with Keras for the Iris Prediction.ipynb +++ b/examples/Hyperparameter Optimization with Keras for the Iris Prediction.ipynb @@ -147,7 +147,7 @@ "source": [ "from tensorflow.keras.optimizers.legacy import Adam, Adagrad\n", "from tensorflow.keras.activations import softmax\n", - "from tensorflow.keras.losses import categorical_crossentropy, logcosh\n", + "from tensorflow.keras.losses import categorical_crossentropy, LogCosh\n", "\n", "p = {'lr': (0.1, 10, 10),\n", " 'first_neuron':[4, 8, 16, 32, 64, 128],\n", diff --git a/examples/Recover Best Models from Experiment Log.ipynb b/examples/Recover Best Models from Experiment Log.ipynb index cefe837..ed4ae3c 100644 --- a/examples/Recover Best Models from Experiment Log.ipynb +++ b/examples/Recover Best Models from Experiment Log.ipynb @@ -48,7 +48,7 @@ "# set the parameter space boundary\n", "p = {'activation':['relu', 'elu'],\n", " 'optimizer': ['Adagrad', 'Adam'],\n", - " 'losses': ['logcosh'],\n", + " 'losses': ['LogCosh'],\n", " 'shapes': ['brick'],\n", " 'first_neuron': [16, 32, 64, 128],\n", " 'hidden_layers':[0, 1, 2, 3],\n", diff --git a/talos/autom8/autoparams.py b/talos/autom8/autoparams.py index e6cc1e3..0e9631b 100644 --- a/talos/autom8/autoparams.py +++ b/talos/autom8/autoparams.py @@ -2,7 +2,7 @@ from tensorflow.keras.optimizers.legacy import Adam, Adagrad, SGD -loss = {'binary': ['binary_crossentropy', 'logcosh'], +loss = {'binary': ['binary_crossentropy', 'LogCosh'], 'multi_class': ['sparse_categorical_crossentropy'], 'multi_label': ['categorical_crossentropy'], 'continuous': ['mae']} diff --git a/talos/templates/params.py b/talos/templates/params.py index b8b023e..a6b154d 100644 --- a/talos/templates/params.py +++ b/talos/templates/params.py @@ -9,7 +9,7 @@ def titanic(debug=False): 'dropout': (0, 0.5, 5), 'optimizer': [Adam(), Adagrad()], 'epochs': [50, 100, 150], - 'losses': ['logcosh', 'binary_crossentropy'], + 'losses': ['LogCosh', 'binary_crossentropy'], 'shapes': ['brick', 'triangle', 0.2], 'hidden_layers': [0, 1, 2, 3, 4], 'activation': ['relu', 'elu'], @@ -23,7 +23,7 @@ def titanic(debug=False): 'dropout': [0.2, 0.3], 'optimizer': [Adam(), Adagrad()], 'epochs': [50, 100], - 'losses': ['logcosh', 'binary_crossentropy'], + 'losses': ['LogCosh', 'binary_crossentropy'], 'shapes': ['brick', 'triangle', 0.2], 'hidden_layers': [0, 1], 'activation': ['relu', 'elu'], @@ -35,7 +35,7 @@ def titanic(debug=False): def iris(): from tensorflow.keras.optimizers.legacy import Adam, Adagrad - from tensorflow.keras.losses import logcosh, categorical_crossentropy + from tensorflow.keras.losses import LogCosh, categorical_crossentropy from tensorflow.keras.activations import relu, elu, softmax # here use a standard 2d dictionary for inputting the param boundaries @@ -49,7 +49,7 @@ def iris(): 'emb_output_dims': [None], 'shapes': ['brick', 'triangle', 0.2], 'optimizer': [Adam, Adagrad], - 'losses': [logcosh, categorical_crossentropy], + 'losses': [LogCosh, categorical_crossentropy], 'activation': [relu, elu], 'last_activation': [softmax]} @@ -59,7 +59,7 @@ def iris(): def breast_cancer(): from tensorflow.keras.optimizers.legacy import Adam, Adagrad, RMSprop - from tensorflow.keras.losses import logcosh, binary_crossentropy + from tensorflow.keras.losses import LogCosh, binary_crossentropy from tensorflow.keras.activations import relu, elu, sigmoid # then we can go ahead and set the parameter space @@ -71,7 +71,7 @@ def breast_cancer(): 'dropout': (0, 0.5, 5), 'shapes': ['brick', 'triangle', 'funnel'], 'optimizer': [Adam, Adagrad, RMSprop], - 'losses': [logcosh, binary_crossentropy], + 'losses': [LogCosh, binary_crossentropy], 'activation': [relu, elu], 'last_activation': [sigmoid]} diff --git a/tests/commands/test_latest.py b/tests/commands/test_latest.py index 8692034..b1a690e 100644 --- a/tests/commands/test_latest.py +++ b/tests/commands/test_latest.py @@ -14,7 +14,7 @@ def test_latest(): p = {'activation': ['relu', 'elu'], 'optimizer': ['Adagrad', 'Adam'], - 'losses': ['logcosh'], + 'losses': ['LogCosh'], 'shapes': ['brick'], 'first_neuron': [16, 32, 64, 128], 'hidden_layers': [0, 1, 2, 3], diff --git a/tests/commands/test_rest.py b/tests/commands/test_rest.py index b6dfcd6..7382e7e 100644 --- a/tests/commands/test_rest.py +++ b/tests/commands/test_rest.py @@ -45,7 +45,7 @@ def test_rest(scan_object): model1 = Sequential() model1.add(Dense(10, input_dim=x.shape[1])) model1.add(Dense(1)) - model1.compile('adam', 'logcosh', metrics=metrics) + model1.compile('adam', 'LogCosh', metrics=metrics) model1.fit(x, y, callbacks=callbacks) print('\n ...generator... \n') @@ -53,7 +53,7 @@ def test_rest(scan_object): model2 = Sequential() model2.add(Dense(10, input_dim=x.shape[1])) model2.add(Dense(1)) - model2.compile('adam', 'logcosh') + model2.compile('adam', 'LogCosh') model2.fit_generator(talos.utils.generator(x, y, 10), 5) print('\n ...SequenceGenerator... \n') @@ -61,7 +61,7 @@ def test_rest(scan_object): model3 = Sequential() model3.add(Dense(10, input_dim=x.shape[1])) model3.add(Dense(1)) - model3.compile('adam', 'logcosh') + model3.compile('adam', 'LogCosh') model3.fit_generator(talos.utils.SequenceGenerator(x, y, 10)) print('\n ...gpu_utils... \n') diff --git a/tests/commands/test_scan.py b/tests/commands/test_scan.py index c6dcdd9..6e1eae4 100644 --- a/tests/commands/test_scan.py +++ b/tests/commands/test_scan.py @@ -12,7 +12,7 @@ def test_scan(): p = {'activation': [relu, elu], 'optimizer': ['Adagrad', Adam], - 'losses': ['logcosh', binary_crossentropy], + 'losses': ['LogCosh', binary_crossentropy], 'shapes': ['brick', 'funnel', 'triangle'], 'first_neuron': [16], 'hidden_layers': ([0, 1, 2, 3]), @@ -51,7 +51,7 @@ def iris_model(x_train, y_train, x_val, y_val, params): p_for_q = {'activation': ['relu', 'elu'], 'optimizer': ['Adagrad', 'Adam'], - 'losses': ['logcosh'], + 'losses': ['LogCosh'], 'shapes': ['brick'], 'first_neuron': [16, 32, 64, 128], 'hidden_layers': [0, 1, 2, 3],