Searched refs:adagrad (Results 1 – 25 of 45) sorted by relevance
12
/external/tensorflow/tensorflow/contrib/optimizer_v2/ |
D | adagrad_test.py | 23 from tensorflow.contrib.optimizer_v2 import adagrad 48 ada_opt = adagrad.AdagradOptimizer( 79 sgd_op = adagrad.AdagradOptimizer(1.0).minimize(loss) 97 ada_opt = adagrad.AdagradOptimizer( 129 ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1) 162 repeated_update = adagrad.AdagradOptimizer(3.0).apply_gradients( 164 aggregated_update = adagrad.AdagradOptimizer(3.0).apply_gradients( 186 update_op_repeated = adagrad.AdagradOptimizer( 188 update_op_aggregated = adagrad.AdagradOptimizer( 219 ada_opt = adagrad.AdagradOptimizer(1.0, initial_accumulator_value=0.1) [all …]
|
D | optimizer_v2_symbols.py | 23 from tensorflow.contrib.optimizer_v2.adagrad import AdagradOptimizer
|
/external/tensorflow/tensorflow/python/keras/optimizer_v2/ |
D | adagrad_test.py | 30 from tensorflow.python.keras.optimizer_v2 import adagrad 84 ada_opt = adagrad.Adagrad(learning_rate) 135 ada_opt = adagrad.Adagrad(learning_rate, decay=decay) 181 ada_opt = adagrad.Adagrad(lr_schedule) 222 sgd_op = adagrad.Adagrad(1.0).minimize(loss, var_list=[var0]) 247 ada_opt = adagrad.Adagrad(learning_rate) 286 ada_opt = adagrad.Adagrad(learning_rate) 331 repeated_update = adagrad.Adagrad(3.0).apply_gradients( 333 aggregated_update = adagrad.Adagrad(3.0).apply_gradients( 356 update_op_repeated = adagrad.Adagrad(2.0).minimize( [all …]
|
/external/tensorflow/tensorflow/python/training/ |
D | adagrad_test.py | 34 from tensorflow.python.training import adagrad 57 ada_opt = adagrad.AdagradOptimizer( 108 sgd_op = adagrad.AdagradOptimizer(1.0).minimize(loss) 128 ada_opt = adagrad.AdagradOptimizer( 163 ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1) 197 repeated_update = adagrad.AdagradOptimizer(3.0).apply_gradients( 199 aggregated_update = adagrad.AdagradOptimizer(3.0).apply_gradients( 222 update_op_repeated = adagrad.AdagradOptimizer( 224 update_op_aggregated = adagrad.AdagradOptimizer( 256 ada_opt = adagrad.AdagradOptimizer(1.0, initial_accumulator_value=0.1) [all …]
|
D | proximal_adagrad_test.py | 32 from tensorflow.python.training import adagrad 228 adagrad.AdagradOptimizer( 247 adagrad.AdagradOptimizer(
|
/external/tensorflow/tensorflow/compiler/tests/ |
D | adagrad_test.py | 28 from tensorflow.python.training import adagrad 40 ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1) 67 ada_opt = adagrad.AdagradOptimizer( 95 ada_opt = adagrad.AdagradOptimizer(3.0)
|
D | proximal_adagrad_test.py | 28 from tensorflow.python.training import adagrad 166 adagrad.AdagradOptimizer(
|
/external/tensorflow/tensorflow/contrib/tensor_forest/hybrid/python/models/ |
D | nn.py | 22 from tensorflow.python.training import adagrad 31 optimizer_class=adagrad.AdagradOptimizer,
|
D | decisions_to_data_then_nn.py | 23 from tensorflow.python.training import adagrad 32 optimizer_class=adagrad.AdagradOptimizer,
|
D | stochastic_soft_decisions_to_data_then_nn.py | 23 from tensorflow.python.training import adagrad 33 optimizer_class=adagrad.AdagradOptimizer,
|
D | stochastic_hard_decisions_to_data_then_nn.py | 23 from tensorflow.python.training import adagrad 33 optimizer_class=adagrad.AdagradOptimizer,
|
D | k_feature_decisions_to_data_then_nn.py | 23 from tensorflow.python.training import adagrad 32 optimizer_class=adagrad.AdagradOptimizer,
|
D | forest_to_data_then_nn.py | 23 from tensorflow.python.training import adagrad 32 optimizer_class=adagrad.AdagradOptimizer,
|
D | hard_decisions_to_data_then_nn.py | 25 from tensorflow.python.training import adagrad 34 optimizer_class=adagrad.AdagradOptimizer,
|
/external/tensorflow/tensorflow/contrib/distribute/python/ |
D | estimator_integration_test.py | 27 from tensorflow.contrib.optimizer_v2 import adagrad 103 dnn_optimizer=adagrad.AdagradOptimizer(0.001), 104 linear_optimizer=adagrad.AdagradOptimizer(0.001),
|
D | combinations.py | 50 from tensorflow.contrib.optimizer_v2 import adagrad as adagrad_v2 59 from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_keras_v2 65 from tensorflow.python.training import adagrad 413 "AdagradV1", lambda: adagrad.AdagradOptimizer(0.001))
|
/external/tensorflow/tensorflow/python/keras/layers/ |
D | embeddings_test.py | 29 from tensorflow.python.training import adagrad 94 opt = adagrad.AdagradOptimizer(0.1)
|
/external/tensorflow/tensorflow/contrib/opt/python/training/ |
D | reg_adagrad_optimizer.py | 21 from tensorflow.python.training import adagrad 26 class RegAdagradOptimizer(adagrad.AdagradOptimizer):
|
/external/tensorflow/tensorflow/contrib/tensor_forest/hybrid/python/ |
D | hybrid_model.py | 31 from tensorflow.python.training import adagrad 45 optimizer_class=adagrad.AdagradOptimizer,
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_ResourceApplyAdagrad.pbtxt | 35 summary: "Update \'*var\' according to the adagrad scheme."
|
D | api_def_ApplyAdagrad.pbtxt | 41 summary: "Update \'*var\' according to the adagrad scheme."
|
D | api_def_ResourceApplyAdagradDA.pbtxt | 58 summary: "Update \'*var\' according to the proximal adagrad scheme."
|
D | api_def_ResourceSparseApplyAdagrad.pbtxt | 41 summary: "Update relevant entries in \'*var\' and \'*accum\' according to the adagrad scheme."
|
D | api_def_ApplyAdagradDA.pbtxt | 64 summary: "Update \'*var\' according to the proximal adagrad scheme."
|
D | api_def_SparseApplyAdagrad.pbtxt | 47 summary: "Update relevant entries in \'*var\' and \'*accum\' according to the adagrad scheme."
|
12