/external/tensorflow/tensorflow/python/keras/optimizer_v2/ |
D | legacy_learning_rate_decay.py | 32 def exponential_decay(learning_rate, argument 186 def polynomial_decay(learning_rate, argument 287 def natural_exp_decay(learning_rate, argument 375 def inverse_time_decay(learning_rate, argument 458 def cosine_decay(learning_rate, global_step, decay_steps, alpha=0.0, name=None): argument 521 def cosine_decay_restarts(learning_rate, argument 598 def linear_cosine_decay(learning_rate, argument 683 def noisy_linear_cosine_decay(learning_rate, argument
|
D | adam.py | 108 learning_rate=0.001, argument 332 learning_rate=0.001, argument
|
D | adamax.py | 93 learning_rate=0.001, argument
|
D | ftrl.py | 89 learning_rate=0.001, argument
|
D | adadelta.py | 82 learning_rate=0.001, argument
|
D | nadam.py | 68 learning_rate=0.001, argument
|
D | gradient_descent.py | 104 learning_rate=0.01, argument
|
D | rmsprop.py | 99 learning_rate=0.001, argument
|
D | adagrad.py | 63 learning_rate=0.001, argument
|
D | adam_test.py | 219 learning_rate = lambda: 0.001 function 671 learning_rate = lambda: 0.001 function
|
/external/tensorflow/tensorflow/python/training/ |
D | adagrad_test.py | 53 learning_rate = lambda: 3.0 function 317 learning_rate = lambda: 3.0 function
|
D | adam_test.py | 183 learning_rate = lambda: 0.001 function 401 learning_rate = lambda: 0.001 function
|
D | gradient_descent.py | 34 def __init__(self, learning_rate, use_locking=False, name="GradientDescent"): argument
|
D | proximal_gradient_descent.py | 41 def __init__(self, learning_rate, l1_regularization_strength=0.0, argument
|
D | momentum.py | 46 def __init__(self, learning_rate, momentum, argument
|
D | adadelta.py | 38 def __init__(self, learning_rate=0.001, rho=0.95, epsilon=1e-8, argument
|
D | proximal_adagrad.py | 43 def __init__(self, learning_rate, initial_accumulator_value=0.1, argument
|
D | adagrad_da.py | 49 learning_rate, argument
|
D | rmsprop.py | 66 learning_rate, argument
|
D | adagrad.py | 41 def __init__(self, learning_rate, initial_accumulator_value=0.1, argument
|
D | ftrl.py | 43 learning_rate, argument
|
/external/tensorflow/tensorflow/core/kernels/boosted_trees/ |
D | training_ops.cc | 79 const auto learning_rate = learning_rate_t->scalar<float>()(); in Compute() local 170 OpKernelContext* const context, const float learning_rate, in FindBestSplitsPerNode() 281 const auto learning_rate = learning_rate_t->scalar<float>()(); in Compute() local 390 OpKernelContext* const context, const float learning_rate, in FindBestSplitsPerNode()
|
/external/tensorflow/tensorflow/compiler/mlir/tfr/examples/mnist/ |
D | mnist_train.py | 41 learning_rate = 0.01 variable
|
/external/webrtc/rtc_base/ |
D | rolling_accumulator.h | 109 double ComputeWeightedMean(double learning_rate) const { in ComputeWeightedMean()
|
/external/tensorflow/tensorflow/core/grappler/optimizers/ |
D | auto_parallel_test.cc | 41 Output learning_rate = ops::Const(s.WithOpName("learning_rate"), 0.01f, {1}); in TEST_F() local
|