diff --git a/.github/workflows/automerge.yml b/.github/workflows/automerge.yml index f96a072d..6303b329 100644 --- a/.github/workflows/automerge.yml +++ b/.github/workflows/automerge.yml @@ -5,7 +5,7 @@ name: Python application on: push: - branches: [ "main", "180-question-leakage-potentially-causing-the-extraordinarily-low-rmse-for-ames-dataset" ] + branches: [ "main", "201-fix-out-of-range-values-for-p_lateral_connection" ] permissions: contents: read diff --git a/regression-example-ames-no-preproc-val-set.py b/regression-example-ames-no-preproc-val-set.py index 0a663b27..521f0338 100644 --- a/regression-example-ames-no-preproc-val-set.py +++ b/regression-example-ames-no-preproc-val-set.py @@ -152,18 +152,18 @@ def hash_based_split(df, # Pandas dataframe meta_trial_number = 0 # In distributed training set this to a random number -activation = 'swish' -predecessor_level_connection_affinity_factor_first = 0.506486683067576 -predecessor_level_connection_affinity_factor_main = 1.6466748663373876 -max_consecutive_lateral_connections = 35 -p_lateral_connection = 3.703218275217572 -num_lateral_connection_tries_per_unit = 12 -learning_rate = 0.02804912925494706 -epochs = 130 -batch_size = 78 +activation = 'relu' +predecessor_level_connection_affinity_factor_first = 3.458608634090366 +predecessor_level_connection_affinity_factor_main = 3.020897950280901 +max_consecutive_lateral_connections = 29 +p_lateral_connection = 0.4724567916748979 +num_lateral_connection_tries_per_unit = 7 +learning_rate = 0.04143817317646551 +epochs = 85 +batch_size = 97 maximum_levels = 4 maximum_units_per_level = 3 -maximum_neurons_per_unit = 3 +maximum_neurons_per_unit = 5 cerebros =\ @@ -176,11 +176,11 @@ def hash_based_split(df, # Pandas dataframe validation_split=0.0, direction='minimize', metric_to_rank_by='val_root_mean_squared_error', - minimum_levels=4, + minimum_levels=3, maximum_levels=maximum_levels, minimum_units_per_level=2, maximum_units_per_level=maximum_units_per_level, - minimum_neurons_per_unit=3, + minimum_neurons_per_unit=4, maximum_neurons_per_unit=maximum_neurons_per_unit, validation_data=(val_x, val_labels), activation=activation, diff --git a/regression-example-ames-no-preproc.py b/regression-example-ames-no-preproc.py index 436514e7..b0ad6224 100644 --- a/regression-example-ames-no-preproc.py +++ b/regression-example-ames-no-preproc.py @@ -56,18 +56,18 @@ # discovered in a bayesian tuning study done on Katib) meta_trial_number = 0 # In distributed training set this to a random number -activation = 'swish' -predecessor_level_connection_affinity_factor_first = 0.506486683067576 -predecessor_level_connection_affinity_factor_main = 1.6466748663373876 -max_consecutive_lateral_connections = 35 -p_lateral_connection = 3.703218275217572 -num_lateral_connection_tries_per_unit = 12 -learning_rate = 0.02804912925494706 -epochs = 130 -batch_size = 78 +activation = 'relu' +predecessor_level_connection_affinity_factor_first = 3.458608634090366 +predecessor_level_connection_affinity_factor_main = 3.020897950280901 +max_consecutive_lateral_connections = 29 +p_lateral_connection = 0.4724567916748979 +num_lateral_connection_tries_per_unit = 7 +learning_rate = 0.04143817317646551 +epochs = 85 # 52 +batch_size = 97 maximum_levels = 4 maximum_units_per_level = 3 -maximum_neurons_per_unit = 3 +maximum_neurons_per_unit = 5 cerebros =\ @@ -80,11 +80,11 @@ validation_split=0.35, direction='minimize', metric_to_rank_by='val_root_mean_squared_error', - minimum_levels=4, + minimum_levels=3, maximum_levels=maximum_levels, minimum_units_per_level=2, maximum_units_per_level=maximum_units_per_level, - minimum_neurons_per_unit=3, + minimum_neurons_per_unit=4, maximum_neurons_per_unit=maximum_neurons_per_unit, activation=activation, final_activation=None,