Home
last modified time | relevance | path

Searched refs:apply_gradients (Results 1 – 25 of 165) sorted by relevance

1234567

/external/tensorflow/tensorflow/compiler/tests/
Dftrl_test.py50 ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
65 adagrad_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
85 ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
100 sgd_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
124 ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
158 ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
191 ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
225 ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
264 ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
304 update0 = opt0.apply_gradients([(grads0, var0)])
[all …]
Dproximal_gradient_descent_test.py42 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
64 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
86 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
108 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
127 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
Dproximal_adagrad_test.py45 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
76 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
100 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
124 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
143 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
Dadagrad_test.py41 ada_update = ada_opt.apply_gradients(
69 ada_update = ada_opt.apply_gradients(
98 ada_update1 = ada_opt.apply_gradients(
100 ada_update2 = ada_opt.apply_gradients(
/external/tensorflow/tensorflow/python/training/
Dadagrad_test.py61 ada_update = ada_opt.apply_gradients(
75 ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
130 ada_update = ada_opt.apply_gradients(
164 ada_update = ada_opt.apply_gradients(
197 repeated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
199 aggregated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
257 ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))
283 ada_update1 = ada_opt.apply_gradients(
285 ada_update2 = ada_opt.apply_gradients(
332 ada_update = ada_opt.apply_gradients(
[all …]
Dadam_test.py82 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
145 repeated_update = adam.AdamOptimizer().apply_gradients(
147 aggregated_update = adam.AdamOptimizer().apply_gradients(
193 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
225 opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
270 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
309 update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
310 update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
342 optimizer.apply_gradients([(grads0, var0)])
349 optimizer.apply_gradients([(grads0, var0)])
[all …]
Dftrl_test.py55 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
94 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
143 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
173 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
211 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
248 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
284 update0 = opt0.apply_gradients([(grads0, var0)])
285 update1 = opt1.apply_gradients([(grads1, var1)])
322 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
Dgradient_descent_test.py48 sgd_op = optimizer.apply_gradients(
71 sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
98 sgd_op = gradient_descent.GradientDescentOptimizer(lr).apply_gradients(
181 lrate).apply_gradients(zip([grads0, grads1], [var0, var1]))
215 sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
246 sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
269 optimizer.apply_gradients([(grad, self.v)])
/external/tensorflow/tensorflow/contrib/opt/python/training/
Dreg_adagrad_optimizer_test.py50 ada_update = ada_opt.apply_gradients(
102 ada_update = ada_opt.apply_gradients(
130 ada_update = ada_opt.apply_gradients(
158 3.0).apply_gradients([(grad_repeated_index,
161 3.0).apply_gradients([(grad_aggregated, aggregated_update_var)])
216 ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))
241 ada_update1 = ada_opt.apply_gradients(
243 ada_update2 = ada_opt.apply_gradients(
287 ada_update = ada_opt.apply_gradients(
321 ada_update = ada_opt.apply_gradients(
Dlazy_adam_optimizer_test.py85 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
151 repeated_update = repeated_update_opt.apply_gradients(
154 aggregated_update = aggregated_update_opt.apply_gradients(
197 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
221 opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
265 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
302 update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
303 update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
334 optimizer.apply_gradients([(grads0, var0)])
341 optimizer.apply_gradients([(grads0, var0)])
[all …]
Dadamax_test.py101 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
160 repeated_update = adamax.AdaMaxOptimizer().apply_gradients(
162 aggregated_update = adamax.AdaMaxOptimizer().apply_gradients(
195 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
218 opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
258 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
294 update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
295 update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
326 optimizer.apply_gradients([(grads0, var0)])
336 optimizer.apply_gradients([(grads0, var0)])
Dlazy_adam_gs_optimizer_test.py89 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
169 repeated_update = repeated_update_opt.apply_gradients(
174 aggregated_update = aggregated_update_opt.apply_gradients(
222 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
250 opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
297 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
337 update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
339 update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
371 optimizer.apply_gradients([(grads0, var0)])
378 optimizer.apply_gradients([(grads0, var0)])
[all …]
Dadam_gs_optimizer_test.py86 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
151 global_step=repeated_index_global_step).apply_gradients(
155 global_step=aggregated_global_step).apply_gradients(
203 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
231 opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
278 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
318 update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
320 update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
353 optimizer.apply_gradients([(grads0, var0)])
360 optimizer.apply_gradients([(grads0, var0)])
[all …]
Daddsign_test.py96 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
98 neg_update = opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),
114 opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
120 opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),
204 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
206 neg_update = opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),
Dpowersign_test.py97 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
99 neg_update = opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),
116 opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
122 opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),
209 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]),
211 neg_update = opt.apply_gradients(zip([-grads0, -grads1], [var0, var1]),
Dshampoo_test.py65 update = opt.apply_gradients(zip([grad], [var]),
67 update_2 = opt.apply_gradients(zip([grad_2], [var]),
116 update = opt.apply_gradients(zip([grad], [var]),
118 update_2 = opt.apply_gradients(zip([grad_2], [var]),
176 update = opt.apply_gradients(zip([grad], [var]),
178 update_2 = opt.apply_gradients(zip([grad_2], [var]),
265 update = opt.apply_gradients(zip([grad], [var]),
267 update_2 = opt.apply_gradients(zip([grad_2], [var]),
321 update = opt.apply_gradients(zip([grad], [var]),
323 update_2 = opt.apply_gradients(zip([grad_2], [var]),
[all …]
/external/tensorflow/tensorflow/contrib/optimizer_v2/
Dadagrad_test.py50 ada_update = ada_opt.apply_gradients(
99 ada_update = ada_opt.apply_gradients(
130 ada_update = ada_opt.apply_gradients(
162 repeated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
164 aggregated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
220 ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))
245 ada_update1 = ada_opt.apply_gradients(
247 ada_update2 = ada_opt.apply_gradients(
Dadam_test.py82 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
140 repeated_update = adam.AdamOptimizer().apply_gradients(
142 aggregated_update = adam.AdamOptimizer().apply_gradients(
175 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
200 opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
240 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
277 update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
278 update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
310 optimizer.apply_gradients([(grads0, var0)])
320 optimizer.apply_gradients([(grads0, var0)])
/external/tensorflow/tensorflow/python/keras/optimizer_v2/
Dadagrad_test.py90 ada_update = ada_opt.apply_gradients(
104 ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
141 ada_update = ada_opt.apply_gradients(
155 ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
187 ada_update = ada_opt.apply_gradients(
201 ada_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
248 ada_update = ada_opt.apply_gradients(
287 ada_update = ada_opt.apply_gradients(
331 repeated_update = adagrad.Adagrad(3.0).apply_gradients(
333 aggregated_update = adagrad.Adagrad(3.0).apply_gradients(
[all …]
Dadam_test.py135 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
190 repeated_update = adam.Adam().apply_gradients(
192 aggregated_update = adam.Adam().apply_gradients(
232 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
245 opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
282 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
295 opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
328 repeated_update = opt_repeated.apply_gradients(
330 aggregated_update = opt_aggregated.apply_gradients(
341 opt_repeated.apply_gradients(
[all …]
Dadamax_test.py104 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
163 repeated_update = adamax.Adamax().apply_gradients(
165 aggregated_update = adamax.Adamax().apply_gradients(
197 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
213 opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
247 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
263 opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
294 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
331 update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
332 update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
Dgradient_descent_test.py50 sgd_op = sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
66 sgd_op = sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
72 sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
82 sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
125 sgd_op = sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
182 sgd_op = gradient_descent.SGD(lrate).apply_gradients(
216 sgd_op = gradient_descent.SGD(3.0).apply_gradients(
239 3.0, decay=0.5).apply_gradients(
266 optimizer.apply_gradients([(grad, self.v)])
314 mom_update = mom_opt.apply_gradients(
[all …]
Dftrl_test.py55 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
94 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
146 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
176 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
214 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
251 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
287 update0 = opt0.apply_gradients([(grads0, var0)])
288 update1 = opt1.apply_gradients([(grads1, var1)])
325 update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
/external/tensorflow/tensorflow/contrib/eager/python/examples/revnet/
Dmain.py32 def apply_gradients(optimizer, grads, vars_, global_step=None): function
34 optimizer.apply_gradients(zip(grads, vars_), global_step=global_step)
59 global apply_gradients # pylint:disable=global-variable-undefined
60 apply_gradients = tfe.defun(apply_gradients)
212 apply_gradients(
/external/tensorflow/tensorflow/contrib/eager/python/examples/resnet50/
Dresnet50_test.py70 def apply_gradients(model, optimizer, gradients): function
71 optimizer.apply_gradients(zip(gradients, model.variables))
128 apply_gradients(model, optimizer,
153 apply_gradients(model, optimizer,
161 apply_gradients(model, optimizer,
267 apply_grads = apply_gradients
270 apply_grads = tfe.function(apply_gradients)

1234567