Home | History | Annotate | Download | only in python

Lines Matching refs:devices

202   num_subchunks different other devices at each tick.  Where multiple
203 independent data channels exist between devices, this strategy
224 devices = num_workers * num_gpus
225 if devices == 0:
240 pred_by_s_d = [[-1 for d in range(0, devices)]
242 rank_by_s_d = [[-1 for d in range(0, devices)]
245 for d in range(0, devices):
246 for t in range(0, devices):
249 pred_by_s_d[s][d] = perms_by_s[s][(t + devices - 1) % devices]
280 devices = [t.device for t in input_tensors]
284 input_tensors, devices,
297 def _build_ring_gather(input_tensors, devices, num_subchunks,
304 devices: array of device name strings
331 with ops.device(devices[d]):
340 with ops.device(devices[d]):
432 The concept is to arrange the participating n devices in
433 a linear sequence where devices exchange data pairwise
435 phase there are lg(n) rounds where devices exchange
463 devices = [t.device for t in input_tensors]
465 reduced_shards = _build_recursive_hd_gather(input_tensors, devices, red_op)
468 output_tensors = _build_recursive_hd_scatter(reduced_shards, devices)
474 def _build_recursive_hd_gather(input_tensors, devices, red_op):
479 devices: a list of strings naming the devices hosting input_tensors,
490 num_devices = len(devices)
498 new_chunks = [[] for _ in devices]
503 left_dev = devices[d]
504 right_dev = devices[d + span]
515 def _build_recursive_hd_scatter(input_tensors, devices):
520 devices: a list of strings naming the devices on which the reconstituted
526 num_devices = len(devices)
533 new_chunks = [[] for _ in devices]
540 left_dev = devices[left_idx]
541 right_dev = devices[right_idx]
556 parameter servers. Suppose tensor length is n, there are d devices
560 devices then join the g fully reduced fragments they receive from
568 gather_devices: list of names of devices on which reduction shards
591 gather_devices: list of names of devices on which reduction shards
628 dst_devices: list of names of devices at which the fully-reduced value
642 def _split_by_task(devices, values):
643 """Partition devices and values by common task.
646 devices: list of device name strings
647 values: list of T @{tf.tensor} of same length as devices.
654 the specific devices to which the values are local, and
658 ValueError: devices must be same length as values.
660 num_devices = len(devices)
662 raise ValueError("len(devices) must equal len(values)")
667 m = pattern.search(devices[d])
673 per_task_devices[index].append(devices[d])
676 assert False, "failed to parse device %s" % devices[d]
726 devices = [t.device for t in input_tensors]
727 per_worker_devices, per_worker_values = _split_by_task(devices, input_tensors)
816 devices = [t.device for t in input_tensors]
817 per_worker_devices, per_worker_values = _split_by_task(devices, input_tensors)