/external/autotest/puppylab/ |
lab_manifest.py | 14 shards. The afe on shards does not __need__ to be exposed. 16 show up on the host page of hosts sent to shards on the 20 'abc' the shards will do their heartbeat against 22 2. Shards: A list of boards for which to create shards. Note 32 # Shards will have their afes listening on base_port + shard number. 36 # Boards for which to create shards. 37 shards = ['board:stumpy'] variable
|
clusterctl | 56 # TODO: Enable multiple shards via command line args. 122 actually in control of all the shards in the cluster and can address 135 @param num_shards: The number of shards we wish to add to the cluster. 154 if lab_manifest.shards: 155 board = lab_manifest.shards.pop() 172 @param num_shards: The number of shards in the cluster. Each shard 248 shards if their shadow configs are as expected. If the shadow 282 @param num_shards: Number of shards. You cannot change 283 the number of shards on a running cluster, you need
|
/external/autotest/client/site_tests/graphics_dEQP/ |
generate_controlfiles.py | 7 3) Decomposing a test into shards. Ideally shard_count is chosen such that 16 Test = namedtuple('Test', 'filter, suite, shards, time, hasty, notpass') 36 Test('dEQP-EGL.functional', Suite.none, shards=1, hasty=False, notpass=True, time='LENGTHY'), 37 Test('dEQP-EGL.info', Suite.none, shards=1, hasty=False, notpass=True, time='SHORT'), 38 Test('dEQP-EGL.performance', Suite.none, shards=1, hasty=False, notpass=True, time='SHORT'), 39 Test('dEQP-EGL.stress', Suite.none, shards=1, hasty=False, notpass=True, time='LONG'), 40 Test('dEQP-GLES2.accuracy', Suite.bvtpb, shards=1, hasty=False, notpass=True, time='FAST'), 41 Test('dEQP-GLES2.capability', Suite.bvtpb, shards=1, hasty=False, notpass=True, time='FAST'), 42 Test('dEQP-GLES2.functional', Suite.daily, shards=1, hasty=False, notpass=True, time='LENGTHY'), 43 Test('dEQP-GLES2.functional', Suite.daily, shards=1, hasty=True, notpass=False, time='LONG') [all...] |
/external/autotest/cli/ |
shard.py | 6 manage shards in Autotest. 11 list: lists shards with label 25 msg_items = '<shards>' 33 attribute_name='shards', 38 return self.shards 52 if self.shards: 53 filters['hostname__in'] = self.shards 59 """Prints a warning if one label is assigned to multiple shards. 70 'multiple shards.\n' 97 req_items='shards') [all...] |
/external/chromium-trace/catapult/third_party/mapreduce/mapreduce/ |
mapper_pipeline.py | 44 shards: number of shards in the job as int. 70 shards=None): 80 shards: number of shards. This provides a guide to mapreduce. The real 81 number of shards is determined by how input are splited. 83 if shards is None: 84 shards = parameters.config.SHARD_COUNT 96 shard_count=shards,
|
mapreduce_pipeline.py | 71 shards: number of shards to start as int. 82 shards=None): 94 shards=shards) 117 shards: Optional. Number of output shards. Defaults to the number of 134 shards=None): 149 if shards is None: 150 shards = len(filenames [all...] |
shuffler.py | 197 shards=1) 387 """Split input into multiple shards.""" 407 The output is tailored towards shuffler needs. It shards key/values using 411 the shards. Then the list of all the same logical files will be assembled 478 shards = mapper_spec.shard_count 483 for i in range(shards): 491 shards = mapreduce_state.mapreduce_spec.mapper.shard_count 493 for _ in range(shards): 494 filenames.append([None] * shards) 498 for y in range(shards) [all...] |
/external/libvpx/libvpx/tools/ |
all_builds.py | 7 LONG_OPTIONS = ["shard=", "shards="] 35 options = {"--shard": 0, "--shards": 1} 44 print "Usage: %s [--shard=<n> --shards=<n>] -- [configure flag ...]"%argv[0] 52 shards = int(options["--shards"]) 58 my_configs = filter(lambda x: x[1] % shards == shard, my_configs)
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/tools/ |
all_builds.py | 7 LONG_OPTIONS = ["shard=", "shards="] 35 options = {"--shard": 0, "--shards": 1} 44 print "Usage: %s [--shard=<n> --shards=<n>] -- [configure flag ...]"%argv[0] 52 shards = int(options["--shards"]) 58 my_configs = filter(lambda x: x[1] % shards == shard, my_configs)
|
/external/autotest/server/ |
system_utils.py | 32 """Get a list of shards from server database or global config. 38 shards = config.get_config_value( 39 'SERVER', 'shards', default='') 40 return [hostname.strip() for hostname in shards.split(',')]
|
/external/skia/tools/lua/ |
ngrams_aggregate.lua | 3 -- Get the data from all shards.
|
/external/libchrome/base/test/ |
test_switches.cc | 54 // Index of the test shard to run, starting from 0 (first shard) to total shards 59 // Total number of shards. Must be the same for all shards. 61 "test-launcher-total-shards";
|
/external/autotest/tko/ |
retrieve_logs.cgi | 87 @param is_shard: True if hosts are shards, False otherwise. 108 # This cgi script is run only in master (cautotest) and shards. 110 # Only master should check drones and shards for the requested log. 111 # Also restricted users do not have access to drones or shards, 116 shards = system_utils.get_shards() 120 tpool_args += _get_tpool_args(shards, job_path, True, host_set)
|
/external/chromium-trace/catapult/third_party/gsutil/third_party/boto/boto/kinesis/ |
layer1.py | 97 shards, which are uniquely identified groups of data records 100 You specify and control the number of shards that a stream is 105 per second. You can add shards to a stream if the amount of 106 data input increases and you can remove shards if the amount 128 + Create more shards than are authorized for your account. 131 The default limit for an AWS account is 10 shards per stream. 132 If you need to create a stream with more than 10 shards, 150 :param shard_count: The number of shards that the stream will use. The 151 throughput of the stream is a function of the number of shards; 152 more shards are required for greater provisioned throughput [all...] |
/cts/common/host-side/tradefed/src/com/android/compatibility/common/tradefed/command/ |
CompatibilityConsole.java | 108 int shards = Integer.parseInt(arg); 109 if (shards <= 1) { 110 printLine("number of shards should be more than 1"); 113 splitModules(shards); 173 helpBuilder.append(" --shards <shards>: Shards a run into the given number of independant"); 218 private void splitModules(int shards) { 267 for (int i = 0; i < shards; i++) { 272 long[] shardTimes = new long[shards]; [all...] |
/cts/common/host-side/tradefed/src/com/android/compatibility/common/tradefed/testtype/ |
IModuleRepo.java | 39 void initialize(int shards, File testsDir, Set<IAbi> abis, List<String> deviceTokens, 49 * @return the number of shards this repo is initialized for.
|
/external/autotest/server/site_tests/video_VDAStressSetup/ |
video_VDAStressSetup.py | 36 # Break test_video_list into equal sized shards numbered 0 and only
|
/external/autotest/site_utils/ |
clear.sh | 9 # The main use case for this is if the master ever fails and all shards need to
|
backup_mysql_db.py | 180 """Dumps shards and their labels into a text file. 183 shards and their labels. 185 query = ('SELECT hostname, labels.name FROM afe_shards AS shards ' 187 'ON shards.id = afe_shards_labels.shard_id '
|
/external/chromium-trace/catapult/third_party/mapreduce/mapreduce/api/map_job/ |
abstract_datastore_input_reader.py | 24 # Maximum number of shards we'll create. 132 and assign those pieces to shards. 137 shard_count: number of shards to split. 184 shard_count: number of shards. 191 splits into requested shards, the returned list will contain KeyRanges 249 # We need to have as many shards as it was requested. Add some Nones.
|
/cts/tools/cts-tradefed/ |
README | 59 'run cts --shards <number of shards>
|
/cts/tools/tradefed-host/ |
README | 59 'run cts --plan CTS --shards <number of shards>
|
/external/autotest/frontend/ |
db_router.py | 11 - For shards, this this is the shard-local database. 15 - For the shards, this is the global database (the same as for the master). 24 The reason shards need two distinct databases for different objects is, that
|
/external/guava/guava-tests/test/com/google/common/hash/ |
HashingTest.java | 166 for (int shards = 1; shards <= 100000; shards++) { 167 int b = Hashing.consistentHash(hashCode, shards); 169 assertEquals(shards - 1, b); 189 for (int shards = 2; shards <= MAX_SHARDS; shards++) { 190 int chosen = Hashing.consistentHash(h, shards); 192 map.incrementAndGet(shards); [all...] |
/prebuilts/tools/linux-x86_64/kythe/proto/ |
storage.proto | 48 // results when the store is being written to. Shards are indexed from 0. 200 int64 shards = 2; 212 int64 shards = 2;
|