Home
last modified time | relevance | path

Searched full:strategy (Results 1 – 25 of 7561) sorted by relevance

12345678910>>...303

/aosp_15_r20/external/tensorflow/tensorflow/python/distribute/
H A Dstrategy_gather_test.py15 """Tests for common methods in strategy classes."""
43 strategy=[
60 strategy=[
71 strategy): argument
72 distributed_values = strategy.experimental_distribute_values_from_function(
76 return strategy.gather(distributed_values, axis=axis)
82 value_on_replica for _ in range(strategy.num_replicas_in_sync)
87 def testGatherPerReplicaDense1D0Axis(self, strategy, pure_eager): argument
91 self._gather_same_shape_and_verify(single_value, axis, pure_eager, strategy)
93 def testGatherPerReplicaDense2D0Axis(self, strategy, pure_eager): argument
[all …]
H A Dtpu_strategy_test.py85 strategy = tpu_lib.TPUStrategyV2(resolver)
86 strategy._enable_packed_variable_in_eager_mode = enable_packed_var
87 return strategy
259 strategy = get_tpu_strategy(enable_packed_var)
260 with strategy.scope():
273 strategy = get_tpu_strategy(enable_packed_var)
275 with strategy.scope():
300 strategy.experimental_distribute_datasets_from_function(dataset_fn))
310 strategy.run(step_fn, args=(next(iterator),))
322 strategy = get_tpu_strategy(enable_packed_var)
[all …]
H A Dstrategy_common_test.py15 """Tests for common methods in strategy classes."""
42 strategy=[
49 def testCaptureReplicaId(self, strategy): argument
61 return strategy.run(f)
65 def testMergeCallInitScope(self, strategy): argument
66 with strategy.scope():
83 return strategy.run(replica_fn)
85 result = strategy.experimental_local_results(fn())
86 self.assertAllClose(result, [12] * _get_num_replicas_per_client(strategy))
217 strategy=[
[all …]
H A Dstrategy_test_lib.py141 def is_mirrored_strategy(strategy: distribute_lib.Strategy) -> bool: argument
143 strategy,
148 strategy: distribute_lib.Strategy) -> bool: argument
149 return isinstance(strategy, (mwms_lib.CollectiveAllReduceStrategy,
153 def is_tpu_strategy(strategy: distribute_lib.Strategy) -> bool: argument
154 return isinstance(strategy,
358 self, strategy, input_fn, expected_values, ignore_order=False): argument
361 iterable = strategy.distribute_datasets_from_function(input_fn)
367 list(strategy.experimental_local_results(next(iterator))))
371 self.evaluate(strategy.experimental_local_results(next(iterator)))
[all …]
H A Ddistribute_lib.py19 and it will be usable with a variety of different `tf.distribute.Strategy`
20 implementations. Each descendant will implement a different strategy for
24 model definition code can run unchanged. The `tf.distribute.Strategy` API works
36 The tutorials cover how to use `tf.distribute.Strategy` to do distributed
39 `tf.distribute.Strategy`.
94 when you execute the computation function that was called with `strategy.run`.
101 An _cross-replica context_ is entered when you enter a `strategy.scope`. This
102 is useful for calling `tf.distribute.Strategy` methods which operate across
116 returned by `tf.distribute.Strategy.experimental_distribute_dataset` and
117 `tf.distribute.Strategy.distribute_datasets_from_function`. They are also the
[all …]
H A Ddistribution_strategy_context.py15 """Utility to get tf.distribute.Strategy related contexts."""
34 # replica or cross-replica context for a particular tf.distribute.Strategy.
40 self.strategy = dist
47 def __init__(self, strategy): argument
48 _ThreadMode.__init__(self, strategy, strategy, None)
54 _ThreadMode.__init__(self, replica_ctx.strategy, None, replica_ctx)
105 strategy = tf.distribute.MirroredStrategy(devices=["GPU:0", "GPU:1"])
106 with strategy.scope():
113 non_aggregated = strategy.run(replica_fn)
120 aggregated = strategy.run(replica_fn)
[all …]
H A Dstrategy_combinations_test.py40 strategy=strategy_combinations.two_replica_strategies,
42 def testTwoReplicaStrategy(self, strategy): argument
43 with strategy.scope():
49 one_per_replica = strategy.run(one)
50 num_replicas = strategy.reduce(
56 strategy=strategy_combinations.four_replica_strategies,
58 def testFourReplicaStrategy(self, strategy): argument
59 with strategy.scope():
65 one_per_replica = strategy.run(one)
66 num_replicas = strategy.reduce(
[all …]
H A Dtpu_strategy_model_parallelism_test.py68 strategy = tpu_lib.TPUStrategyV2(
72 return strategy, num_replicas
82 strategy, num_replicas = get_tpu_strategy()
83 with strategy.scope():
85 with strategy.extended.experimental_logical_device(1):
88 self.assertLen(strategy.experimental_local_results(v), num_replicas)
89 self.assertLen(strategy.experimental_local_results(w), num_replicas)
91 strategy.experimental_local_results(v)[0].device)
93 strategy.experimental_local_results(w)[0].device)
107 result = strategy.run(f, args=(5.,))
[all …]
H A Dparameter_server_strategy_v2_test.py73 strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
76 with strategy.scope():
95 strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
98 # The strategy scope always wins.
99 with strategy.scope():
109 with strategy.scope():
128 strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
136 # variable_creator_scope inside strategy.scope will not work.
137 with strategy.scope():
142 # strategy.scope still assigns variables in a round robin fashion.
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/tpu/
H A Dtpu_outside_compilation_test.py133 strategy = get_tpu_strategy()
146 return strategy.run(tpu_fn, args=(25.0,))
149 strategy.experimental_local_results(train_step()),
150 constant_op.constant(35., shape=(strategy.num_replicas_in_sync)))
153 strategy = get_tpu_strategy()
166 return strategy.run(tpu_fn, args=(25.0,))
169 strategy.experimental_local_results(train_step()),
170 constant_op.constant(35., shape=(strategy.num_replicas_in_sync)))
173 strategy = get_tpu_strategy()
187 return strategy.run(tpu_fn, args=(25.0,))
[all …]
/aosp_15_r20/external/ComputeLibrary/src/core/NEON/kernels/arm_gemm/
H A Dgemm_interleaved.hpp65 template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
70 strategy &strat, const To *a_ptr, const To *b_panel, size_t b_stride, Tri *c_panel,
79 template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
84 strategy &strat, const To *a_ptr, const To *b_panel, size_t, Tri *c_panel, in run()
89 const int bblocks = iceildiv(n_max - n_0, strategy::out_width()); in run()
93 …auto p=prof.ScopedProfiler(PROFILE_KERNEL, (strategy::out_height() * bblocks * strategy::out_width… in run()
101 …auto p=prof.ScopedProfiler(PROFILE_MERGE, (strategy::out_height() * bblocks * strategy::out_width(… in run()
109 template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
114 strategy &strat, const To *a_ptr, const To *b_panel, size_t b_stride, Tri *c_panel, in run()
121 const int bblocks = iceildiv(n_max - n_0, strategy::out_width()); in run()
[all …]
H A Dgemm_hybrid_indirect.hpp61 template<typename strategy, typename Tlo, typename Tro, typename Tr>
66 …const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<…
72 template<typename strategy, typename Tlo, typename Tro, typename Tr>
77 …const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<… in run()
81 …auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_… in run()
87 if (bias_ptr && !accumulate && (N % strategy::out_width() != 0)) { in run()
89 unsigned int N_remainder = N % strategy::out_width(); in run()
107 Tr *bias_pad_buffer = reinterpret_cast<Tr *>(alloca(strategy::out_width() * sizeof(Tr))); in run()
118 template<typename strategy, typename Tlo, typename Tro, typename Tr>
123 …const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<… in run()
[all …]
H A Dgemm_interleaved_pretransposed_2d.hpp51 template<typename strategy, typename To, typename Tr>
53 typedef typename strategy::operand_type Toi;
54 typedef typename strategy::result_type Tri;
89 const GemmInterleavedPretransposed2d<strategy, To, Tr> &_parent;
100 blockwalker(const GemmInterleavedPretransposed2d<strategy, To, Tr> &parent) in blockwalker() argument
105 …blockwalker(const GemmInterleavedPretransposed2d<strategy, To, Tr> &parent, unsigned int x0, unsig… in blockwalker() argument
169 return ROUND_UP(sizeof(Tri) * _x_block * strategy::out_height()); in get_c_working_size()
184 strategy strat(_ci); in execute_pretranspose()
187 const unsigned int window_per_batch = _Mround / strategy::out_height(); in execute_pretranspose()
192 unsigned int m_0 = (m_start - (batch_0 * window_per_batch)) * strategy::out_height(); in execute_pretranspose()
[all …]
/aosp_15_r20/frameworks/base/services/tests/displayservicetests/src/com/android/server/display/
H A DBrightnessMappingStrategyTest.java201 BrightnessMappingStrategy strategy = BrightnessMappingStrategy.create(mContext, ddc, in testSimpleStrategyIgnoresNewConfiguration() local
209 strategy.setBrightnessConfiguration(config); in testSimpleStrategyIgnoresNewConfiguration()
210 assertNotEquals(1.0f, strategy.getBrightness(1f), 0.0001f /*tolerance*/); in testSimpleStrategyIgnoresNewConfiguration()
217 BrightnessMappingStrategy strategy = BrightnessMappingStrategy.create(mContext, ddc, in testSimpleStrategyIgnoresNullConfiguration() local
220 strategy.setBrightnessConfiguration(null); in testSimpleStrategyIgnoresNullConfiguration()
224 strategy.getBrightness(LUX_LEVELS[n - 1]), 0.0001f /*tolerance*/); in testSimpleStrategyIgnoresNullConfiguration()
270 BrightnessMappingStrategy strategy = BrightnessMappingStrategy.create(mContext, ddc, in testPhysicalStrategyUsesNewConfigurations() local
281 strategy.setBrightnessConfiguration(config); in testPhysicalStrategyUsesNewConfigurations()
282 assertEquals(1.0f, strategy.getBrightness(1f), 0.0001f /*tolerance*/); in testPhysicalStrategyUsesNewConfigurations()
285 strategy.setBrightnessConfiguration(null); in testPhysicalStrategyUsesNewConfigurations()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/distribute/integration_test/
H A Dsaved_model_test.py55 strategy=[
69 def test_read_sync_on_read_variable(self, strategy): argument
90 with strategy.scope():
100 strategy=[
109 # tf.distribute.Strategy and used for serving later. Serving usually only uses
110 # one device and this is simulated by loading the model under no strategy
115 # tf.distribute.Strategy. The saved tf.function should be an inference
128 def test_read_sync_on_read_variable(self, strategy): argument
145 with strategy.scope():
151 self.evaluate(strategy.experimental_local_results(m.v)), [0.5, 0.5])
[all …]
/aosp_15_r20/external/jacoco/org.jacoco.core.test/src/org/jacoco/core/internal/instr/
H A DProbeArrayStrategyFactoryTest.java52 final IProbeArrayStrategy strategy = test(Opcodes.V1_1, 0, false, true, in testClass1() local
54 assertEquals(ClassFieldProbeArrayStrategy.class, strategy.getClass()); in testClass1()
61 final IProbeArrayStrategy strategy = test(Opcodes.V1_2, 0, false, true, in testClass2() local
63 assertEquals(ClassFieldProbeArrayStrategy.class, strategy.getClass()); in testClass2()
70 final IProbeArrayStrategy strategy = test(Opcodes.V1_3, 0, false, true, in testClass3() local
72 assertEquals(ClassFieldProbeArrayStrategy.class, strategy.getClass()); in testClass3()
79 final IProbeArrayStrategy strategy = test(Opcodes.V1_4, 0, false, true, in testClass4() local
81 assertEquals(ClassFieldProbeArrayStrategy.class, strategy.getClass()); in testClass4()
88 final IProbeArrayStrategy strategy = test(Opcodes.V1_5, 0, false, true, in testClass5() local
90 assertEquals(ClassFieldProbeArrayStrategy.class, strategy.getClass()); in testClass5()
[all …]
/aosp_15_r20/external/tensorflow/tensorflow/python/keras/distribute/
H A Ddataset_creator_model_fit_test.py40 strategy=strategy_combinations.all_strategies +
52 def testModelFit(self, strategy): argument
53 model = self._model_fit(strategy)
56 def testModelFitwithStepsPerEpochNegativeOne(self, strategy): argument
64 if strategy._should_use_with_coordinator:
67 strategy,
74 strategy,
80 def testModelFitWithNumpyData(self, strategy): argument
84 strategy,
92 def testModelFitWithTensorData(self, strategy): argument
[all …]
H A Ddistributed_file_utils.py53 def _get_base_dirpath(strategy): argument
54 task_id = strategy.extended._task_id # pylint: disable=protected-access
58 def _is_temp_dir(dirpath, strategy): argument
59 return dirpath.endswith(_get_base_dirpath(strategy))
62 def _get_temp_dir(dirpath, strategy): argument
63 if _is_temp_dir(dirpath, strategy):
66 temp_dir = os.path.join(dirpath, _get_base_dirpath(strategy))
71 def write_dirpath(dirpath, strategy): argument
78 strategy: The tf.distribute strategy object currently used.
83 if strategy is None:
[all …]
/aosp_15_r20/external/ComputeLibrary/examples/gemm_tuner/
H A DGemmTuner.py40 # Gemm strategy
41 Strategy = Enum("Strategy", ["Native", "ReshapedOnlyRHS", "Reshaped"]) variable
61 # Gemm configuration for strategy Native
76 # Gemm configuration for strategy Reshaped Only RHS
102 # Gemm configuration for strategy Reshaped
196 strategy: Strategy
218 gemm_param, strategy, gemm_config, measurement = benchmark_result
220 self._strategies.add(strategy)
230 """ Get the best GEMMConfig set per GEMMParam per Strategy
233 Tuple[GEMMParam, Strategy], List[Tuple[GEMMConfig, Measurement]]
[all …]
/aosp_15_r20/external/google-cloud-java/java-deploy/proto-google-cloud-deploy-v1/src/main/java/com/google/cloud/deploy/v1/
H A DStrategy.java25 * Strategy contains deployment strategy information.
28 * Protobuf type {@code google.cloud.deploy.v1.Strategy}
30 public final class Strategy extends com.google.protobuf.GeneratedMessageV3 class
32 // @@protoc_insertion_point(message_implements:google.cloud.deploy.v1.Strategy)
35 // Use Strategy.newBuilder() to construct.
36 private Strategy(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { in Strategy() method in Strategy
40 private Strategy() {} in Strategy() method in Strategy
45 return new Strategy(); in newInstance()
64 com.google.cloud.deploy.v1.Strategy.class, in internalGetFieldAccessorTable()
65 com.google.cloud.deploy.v1.Strategy.Builder.class); in internalGetFieldAccessorTable()
[all …]
/aosp_15_r20/external/googleapis/google/ads/searchads360/v0/common/
H A Dbidding.proto33 // An automated bidding strategy that raises bids for clicks
37 // This bidding strategy is deprecated and cannot be created anymore. Use
41 // Manual bidding strategy that allows advertiser to set the bid per
54 // An automated bidding strategy to help get the most conversions for your
57 // Maximum bid limit that can be set by the bid strategy.
58 // The limit applies to all keywords managed by the strategy.
62 // Minimum bid limit that can be set by the bid strategy.
63 // The limit applies to all keywords managed by the strategy.
69 // the bidding strategy's currency. If set, the bid strategy will get as many
71 // target CPA is not set, the bid strategy will aim to achieve the lowest
[all …]
/aosp_15_r20/frameworks/native/libs/input/tests/
H A DVelocityTracker_test.cpp231 static std::optional<float> computeVelocity(const VelocityTracker::Strategy strategy, in computeVelocity() argument
234 VelocityTracker vt(strategy); in computeVelocity()
244 const VelocityTracker::Strategy strategy, in computePlanarVelocity() argument
247 return computeVelocity(strategy, events, axis, pointerId); in computePlanarVelocity()
250 static void computeAndCheckVelocity(const VelocityTracker::Strategy strategy, in computeAndCheckVelocity() argument
254 checkVelocity(computePlanarVelocity(strategy, motions, axis, pointerId), targetVelocity); in computeAndCheckVelocity()
258 const VelocityTracker::Strategy strategy, in computeAndCheckAxisScrollVelocity() argument
262 checkVelocity(computeVelocity(strategy, events, AMOTION_EVENT_AXIS_SCROLL), targetVelocity); in computeAndCheckAxisScrollVelocity()
263 // The strategy LSQ2 is not compatible with AXIS_SCROLL. In those situations, we should fall in computeAndCheckAxisScrollVelocity()
264 // back to a strategy that supports differential axes. in computeAndCheckAxisScrollVelocity()
[all …]
/aosp_15_r20/platform_testing/libraries/device-collectors/src/test/java/android/device/collectors/
H A DPerfettoTracingPerClassStrategyTest.java89 PerfettoTracingStrategy strategy = in initStrategy() local
99 strategy.setup(b); in initStrategy()
100 return strategy; in initStrategy()
107 PerfettoTracingStrategy strategy = initStrategy(b); in testPerfettoTraceStartOnFirstTestStart() local
111 strategy.testRunStart(mDataRecord, mRunDesc); in testPerfettoTraceStartOnFirstTestStart()
113 strategy.testStart(mDataRecord, mTest1Desc); in testPerfettoTraceStartOnFirstTestStart()
121 PerfettoTracingStrategy strategy = initStrategy(b); in testPerfettoTraceStartOncePerClass() local
125 strategy.testRunStart(mDataRecord, mRunDesc); in testPerfettoTraceStartOncePerClass()
127 strategy.testStart(mDataRecord, mTest1Desc); in testPerfettoTraceStartOncePerClass()
128 strategy.testEnd(mDataRecord, mTest1Desc); in testPerfettoTraceStartOncePerClass()
[all …]
/aosp_15_r20/external/python/google-api-python-client/docs/dyn/
Ddfareporting_v3_5.placementStrategies.html82 <p class="firstline">Deletes an existing placement strategy.</p>
85 <p class="firstline">Gets one placement strategy by ID.</p>
88 <p class="firstline">Inserts a new placement strategy.</p>
97 <p class="firstline">Updates an existing placement strategy. This method supports patch semantics.<…
100 <p class="firstline">Updates an existing placement strategy.</p>
109 <pre>Deletes an existing placement strategy.
113 id: string, Placement strategy ID. (required)
123 <pre>Gets one placement strategy by ID.
127 id: string, Placement strategy ID. (required)
136 { # Contains properties of a placement strategy.
[all …]
Ddfareporting_v3_4.placementStrategies.html82 <p class="firstline">Deletes an existing placement strategy.</p>
85 <p class="firstline">Gets one placement strategy by ID.</p>
88 <p class="firstline">Inserts a new placement strategy.</p>
97 <p class="firstline">Updates an existing placement strategy. This method supports patch semantics.<…
100 <p class="firstline">Updates an existing placement strategy.</p>
109 <pre>Deletes an existing placement strategy.
113 id: string, Placement strategy ID. (required)
123 <pre>Gets one placement strategy by ID.
127 id: string, Placement strategy ID. (required)
136 { # Contains properties of a placement strategy.
[all …]

12345678910>>...303