Home
last modified time | relevance | path

Searched +full:- +full:- +full:batch (Results 1 – 25 of 1721) sorted by relevance

12345678910>>...69

/aosp_15_r20/external/mesa3d/src/gallium/drivers/iris/
H A Diris_batch.c29 * collect into a "batch buffer". Typically, many draw calls are grouped
30 * into a single batch to amortize command submission overhead.
36 * virtual memory address before executing our batch. If a BO is not in
73 iris_batch_reset(struct iris_batch *batch);
76 iris_batch_num_fences(struct iris_batch *batch) in iris_batch_num_fences() argument
78 return util_dynarray_num_elements(&batch->exec_fences, in iris_batch_num_fences()
86 iris_dump_fence_list(struct iris_batch *batch) in iris_dump_fence_list() argument
88 fprintf(stderr, "Fence list (length %u): ", iris_batch_num_fences(batch)); in iris_dump_fence_list()
90 util_dynarray_foreach(&batch->exec_fences, struct iris_batch_fence, f) { in iris_dump_fence_list()
92 (f->flags & IRIS_BATCH_FENCE_WAIT) ? "..." : "", in iris_dump_fence_list()
[all …]
H A Diris_batch.h46 /* Terminating the batch takes either 4 bytes for MI_BATCH_BUFFER_END or 12
53 /* Our target batch size - flush approximately at this point. */
54 #define BATCH_SZ (128 * 1024 - BATCH_RESERVED)
79 /** What batch is this? (e.g. IRIS_BATCH_RENDER/COMPUTE) */
87 /** Size of the primary batch being submitted to execbuf (in bytes). */
109 /** A list of all BOs referenced by this batch */
113 /** Bitset of whether this batch writes to BO `i'. */
117 /** Whether INTEL_BLACKHOLE_RENDER is enabled in the batch (aka first
127 * A list of iris_syncobjs associated with this batch.
129 * The first list entry will always be a signalling sync-point, indicating
[all …]
/aosp_15_r20/external/mesa3d/src/gallium/drivers/crocus/
H A Dcrocus_batch.c29 * collect into a "batch buffer". Typically, many draw calls are grouped
30 * into a single batch to amortize command submission overhead.
36 * virtual memory address before executing our batch. If a BO is not in
45 #include "drm-uapi/i915_drm.h"
65 /* Terminating the batch takes either 4 bytes for MI_BATCH_BUFFER_END
69 #define BATCH_RESERVED(devinfo) ((devinfo)->platform == INTEL_PLATFORM_HSW ? 32 : 16)
71 static void crocus_batch_reset(struct crocus_batch *batch);
74 num_fences(struct crocus_batch *batch) in num_fences() argument
76 return util_dynarray_num_elements(&batch->exec_fences, in num_fences()
84 dump_fence_list(struct crocus_batch *batch) in dump_fence_list() argument
[all …]
/aosp_15_r20/external/mesa3d/src/gallium/drivers/freedreno/
H A Dfreedreno_batch.c3 * SPDX-License-Identifier: MIT
21 alloc_ring(struct fd_batch *batch, unsigned sz, enum fd_ringbuffer_flags flags) in alloc_ring() argument
23 struct fd_context *ctx = batch->ctx; in alloc_ring()
26 * have no option but to allocate large worst-case sizes so that in alloc_ring()
33 if ((fd_device_version(ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS) && in alloc_ring()
39 return fd_submit_new_ringbuffer(batch->submit, sz, flags); in alloc_ring()
43 subpass_create(struct fd_batch *batch) in subpass_create() argument
47 subpass->draw = alloc_ring(batch, 0x100000, 0); in subpass_create()
49 /* Replace batch->draw with reference to current subpass, for in subpass_create()
52 if (batch->draw) in subpass_create()
[all …]
H A Dfreedreno_draw.c3 * SPDX-License-Identifier: MIT
28 batch_references_resource(struct fd_batch *batch, struct pipe_resource *prsc) in batch_references_resource() argument
31 return fd_batch_references_resource(batch, fd_resource(prsc)); in batch_references_resource()
35 resource_read(struct fd_batch *batch, struct pipe_resource *prsc) assert_dt in resource_read() argument
39 fd_batch_resource_read(batch, fd_resource(prsc)); in resource_read()
43 resource_written(struct fd_batch *batch, struct pipe_resource *prsc) assert_dt in resource_written() argument
47 fd_batch_resource_write(batch, fd_resource(prsc)); in resource_written()
51 batch_draw_tracking_for_dirty_bits(struct fd_batch *batch) assert_dt in batch_draw_tracking_for_dirty_bits() argument
53 struct fd_context *ctx = batch->ctx; in batch_draw_tracking_for_dirty_bits()
54 struct pipe_framebuffer_state *pfb = &batch->framebuffer; in batch_draw_tracking_for_dirty_bits()
[all …]
H A Dfreedreno_batch_cache.c3 * SPDX-License-Identifier: MIT
23 * The batch cache provides lookup for mapping pipe_framebuffer_state
24 * to a batch.
29 * Batch Cache hashtable key:
36 * Batch:
38 * Each batch needs to hold a reference to each resource it depends on (ie.
43 * When a resource is destroyed, we need to remove entries in the batch
48 * When a batch has weak reference to no more resources (ie. all the
49 * surfaces it rendered to are destroyed) the batch can be destroyed.
52 * surfaces are destroyed before the batch is submitted.
[all …]
/aosp_15_r20/external/mesa3d/src/gallium/drivers/panfrost/
H A Dpan_job.c2 * Copyright (C) 2019-2020 Collabora, Ltd.
4 * Copyright (C) 2014-2017 Broadcom
40 BITSET_FOREACH_SET(idx, ctx->batches.active, PAN_MAX_BATCHES)
43 panfrost_batch_idx(struct panfrost_batch *batch) in panfrost_batch_idx() argument
45 return batch - batch->ctx->batches.slots; in panfrost_batch_idx()
60 /* Adds the BO backing surface to a batch if the surface is non-null */
63 panfrost_batch_add_surface(struct panfrost_batch *batch, in panfrost_batch_add_surface() argument
67 struct panfrost_resource *rsrc = pan_resource(surf->texture); in panfrost_batch_add_surface()
68 pan_legalize_format(batch->ctx, rsrc, surf->format, true, false); in panfrost_batch_add_surface()
69 panfrost_batch_write_rsrc(batch, rsrc, PIPE_SHADER_FRAGMENT); in panfrost_batch_add_surface()
[all …]
H A Dpan_jm.c28 #include "drm-uapi/panfrost_drm.h"
42 GENX(jm_init_batch)(struct panfrost_batch *batch) in GENX()
45 batch->framebuffer = in GENX()
47 pan_pool_alloc_desc(&batch->pool.base, FRAMEBUFFER); in GENX()
50 &batch->pool.base, PAN_DESC(FRAMEBUFFER), PAN_DESC(ZS_CRC_EXTENSION), in GENX()
51 PAN_DESC_ARRAY(MAX2(batch->key.nr_cbufs, 1), RENDER_TARGET)); in GENX()
55 batch->tls = pan_pool_alloc_desc(&batch->pool.base, LOCAL_STORAGE); in GENX()
58 batch->tls = batch->framebuffer; in GENX()
64 cfg.pointer = batch->framebuffer.gpu; in GENX()
68 batch->tls.gpu = ptr.opaque[0]; in GENX()
[all …]
H A Dpan_cmdstream.c162 return !cso->compare_mode in panfrost_sampler_compare_func()
164 : panfrost_flip_compare_func((enum mali_func)cso->compare_func); in panfrost_sampler_compare_func()
192 so->base = *cso; in panfrost_create_sampler_state()
203 if (!util_format_is_depth_and_stencil(cso->border_color_format)) { in panfrost_create_sampler_state()
206 GENX(panfrost_format_from_pipe_format)(cso->border_color_format)->hw; in panfrost_create_sampler_state()
213 util_format_apply_color_swizzle(&so->base.border_color, &cso->border_color, in panfrost_create_sampler_state()
222 bool using_nearest = cso->min_img_filter == PIPE_TEX_MIPFILTER_NEAREST; in panfrost_create_sampler_state()
224 pan_pack(&so->hw, SAMPLER, cfg) { in panfrost_create_sampler_state()
225 cfg.magnify_nearest = cso->mag_img_filter == PIPE_TEX_FILTER_NEAREST; in panfrost_create_sampler_state()
226 cfg.minify_nearest = cso->min_img_filter == PIPE_TEX_FILTER_NEAREST; in panfrost_create_sampler_state()
[all …]
/aosp_15_r20/external/mesa3d/src/gallium/drivers/asahi/
H A Dagx_batch.c3 * Copyright 2019-2020 Collabora, Ltd.
4 * SPDX-License-Identifier: MIT
17 BITSET_FOREACH_SET(idx, ctx->batches.active, AGX_MAX_BATCHES)
20 BITSET_FOREACH_SET(idx, ctx->batches.submitted, AGX_MAX_BATCHES)
22 #define batch_debug(batch, fmt, ...) \ argument
24 if (unlikely(agx_device(batch->ctx->base.screen)->debug & \
26 agx_msg("[Queue %u Batch %u] " fmt "\n", batch->ctx->queue_id, \
27 agx_batch_idx(batch), ##__VA_ARGS__); \
31 agx_batch_is_active(struct agx_batch *batch) in agx_batch_is_active() argument
33 return BITSET_TEST(batch->ctx->batches.active, agx_batch_idx(batch)); in agx_batch_is_active()
[all …]
/aosp_15_r20/external/mesa3d/src/gallium/drivers/d3d12/
H A Dd3d12_batch.cpp45 return _mesa_hash_data(table->descs, sizeof(table->descs[0]) * table->count); in d3d12_sampler_desc_table_key_hash()
51 …return table_a->count == table_b->count && memcmp(table_a->descs, table_b->descs, sizeof(table_a->… in d3d12_sampler_desc_table_key_equals()
55 d3d12_init_batch(struct d3d12_context *ctx, struct d3d12_batch *batch) in d3d12_init_batch() argument
57 struct d3d12_screen *screen = d3d12_screen(ctx->base.screen); in d3d12_init_batch()
59 batch->bos = _mesa_hash_table_create(NULL, _mesa_hash_pointer, in d3d12_init_batch()
62 util_dynarray_init(&batch->local_bos, NULL); in d3d12_init_batch()
64 batch->surfaces = _mesa_set_create(NULL, _mesa_hash_pointer, in d3d12_init_batch()
66 batch->objects = _mesa_set_create(NULL, in d3d12_init_batch()
70 if (!batch->bos || !batch->surfaces || !batch->objects) in d3d12_init_batch()
74 if (screen->max_feature_level >= D3D_FEATURE_LEVEL_11_0) { in d3d12_init_batch()
[all …]
/aosp_15_r20/external/mesa3d/src/gallium/drivers/freedreno/a6xx/
H A Dfd6_gmem.cc4 * SPDX-License-Identifier: MIT
45 OUT_RELOC(ring, rsc->bo, fd_resource_ubwc_offset(rsc, level, layer), 0, in fd6_emit_flag_reference()
48 fdl_ubwc_pitch(&rsc->layout, level)) | in fd6_emit_flag_reference()
50 rsc->layout.ubwc_layer_size >> 2)); in fd6_emit_flag_reference()
73 for (i = 0; i < pfb->nr_cbufs; i++) { in emit_mrt()
82 if (!pfb->cbufs[i]) in emit_mrt()
85 struct pipe_surface *psurf = pfb->cbufs[i]; in emit_mrt()
86 enum pipe_format pformat = psurf->format; in emit_mrt()
87 rsc = fd_resource(psurf->texture); in emit_mrt()
89 uint32_t base = gmem ? gmem->cbuf_base[i] : 0; in emit_mrt()
[all …]
/aosp_15_r20/external/igt-gpu-tools/lib/
H A Drendercopy_gen4.c16 #define GEN4_GRF_BLOCKS(nreg) (((nreg) + 15) / 16 - 1)
83 batch_used(struct intel_batchbuffer *batch) in batch_used() argument
85 return batch->ptr - batch->buffer; in batch_used()
89 batch_round_upto(struct intel_batchbuffer *batch, uint32_t divisor) in batch_round_upto() argument
91 uint32_t offset = batch_used(batch); in batch_round_upto()
93 offset = (offset + divisor - 1) / divisor * divisor; in batch_round_upto()
94 batch->ptr = batch->buffer + offset; in batch_round_upto()
124 gen4_render_flush(struct intel_batchbuffer *batch, in gen4_render_flush() argument
129 ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer); in gen4_render_flush()
131 ret = drm_intel_gem_bo_context_exec(batch->bo, context, in gen4_render_flush()
[all …]
H A Dintel_batchbuffer.c20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
51 * @title: Batch Buffer
61 * structure called batch is in scope. The basic macros are #BEGIN_BATCH,
64 * Note that this library's header pulls in the [i-g-t core](igt-gpu-tools-i-g-t-core.html)
70 * @batch: batchbuffer object
73 * Aligns the current in-batch offset to the given value.
78 intel_batchbuffer_align(struct intel_batchbuffer *batch, uint32_t align) in intel_batchbuffer_align() argument
80 uint32_t offset = batch->ptr - batch->buffer; in intel_batchbuffer_align()
83 batch->ptr = batch->buffer + offset; in intel_batchbuffer_align()
89 * @batch: batchbuffer object
[all …]
H A Drendercopy_gen6.c52 batch_round_upto(struct intel_batchbuffer *batch, uint32_t divisor) in batch_round_upto() argument
54 uint32_t offset = batch->ptr - batch->buffer; in batch_round_upto()
56 offset = (offset + divisor-1) / divisor * divisor; in batch_round_upto()
57 batch->ptr = batch->buffer + offset; in batch_round_upto()
62 gen6_render_flush(struct intel_batchbuffer *batch, in gen6_render_flush() argument
67 ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer); in gen6_render_flush()
69 ret = drm_intel_gem_bo_context_exec(batch->bo, context, in gen6_render_flush()
75 gen6_bind_buf(struct intel_batchbuffer *batch, const struct igt_buf *buf, in gen6_bind_buf() argument
82 igt_assert_lte(buf->stride, 128*1024); in gen6_bind_buf()
93 ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 32); in gen6_bind_buf()
[all …]
H A Drendercopy_gen8.c31 static void dump_batch(struct intel_batchbuffer *batch) { in dump_batch() argument
32 int fd = open("/tmp/i965-batchbuffers.dump", O_WRONLY | O_CREAT, 0666); in dump_batch()
33 if (fd != -1) { in dump_batch()
34 igt_assert_eq(write(fd, batch->buffer, 4096), 4096); in dump_batch()
60 /* Write all -1 */
83 /* aub->annotations is an array keeping a list of annotations of the in annotation_init()
84 * batch buffer ordered by offset. aub->annotations[0] is thus left in annotation_init()
86 * the batch buffer with annotations_add_batch() */ in annotation_init()
87 aub->index = 1; in annotation_init()
94 a->type = type; in add_annotation()
[all …]
H A Drendercopy_gen9.c31 static void dump_batch(struct intel_batchbuffer *batch) { in dump_batch() argument
32 int fd = open("/tmp/i965-batchbuffers.dump", O_WRONLY | O_CREAT, 0666); in dump_batch()
33 if (fd != -1) { in dump_batch()
34 igt_assert_eq(write(fd, batch->buffer, 4096), 4096); in dump_batch()
60 /* Write all -1 */
91 /* Write all -1 */
114 /* ctx->annotations is an array keeping a list of annotations of the in annotation_init()
115 * batch buffer ordered by offset. ctx->annotations[0] is thus left in annotation_init()
117 * the batch buffer with annotations_add_batch() */ in annotation_init()
118 ctx->index = 1; in annotation_init()
[all …]
H A Drendercopy_gen7.c36 gen7_render_flush(struct intel_batchbuffer *batch, in gen7_render_flush() argument
41 ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer); in gen7_render_flush()
43 ret = drm_intel_gem_bo_context_exec(batch->bo, context, in gen7_render_flush()
60 gen7_bind_buf(struct intel_batchbuffer *batch, in gen7_bind_buf() argument
68 igt_assert_lte(buf->stride, 256*1024); in gen7_bind_buf()
72 switch (buf->bpp) { in gen7_bind_buf()
87 ss = intel_batchbuffer_subdata_alloc(batch, 8 * sizeof(*ss), 32); in gen7_bind_buf()
90 gen7_tiling_bits(buf->tiling) | in gen7_bind_buf()
92 ss[1] = buf->bo->offset; in gen7_bind_buf()
93 ss[2] = ((igt_buf_width(buf) - 1) << GEN7_SURFACE_WIDTH_SHIFT | in gen7_bind_buf()
[all …]
/aosp_15_r20/external/mesa3d/src/intel/blorp/
H A Dblorp_genX_exec_elk.h55 blorp_emit_dwords(struct blorp_batch *batch, unsigned n);
58 blorp_emit_reloc(struct blorp_batch *batch,
62 blorp_measure_start(struct blorp_batch *batch,
66 blorp_measure_end(struct blorp_batch *batch,
70 blorp_alloc_dynamic_state(struct blorp_batch *batch,
76 blorp_alloc_general_state(struct blorp_batch *batch,
82 blorp_alloc_vertex_buffer(struct blorp_batch *batch, uint32_t size,
85 blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch *batch,
91 blorp_get_workaround_address(struct blorp_batch *batch);
94 blorp_alloc_binding_table(struct blorp_batch *batch, unsigned num_entries,
[all …]
H A Dblorp_genX_exec_brw.h51 blorp_emit_dwords(struct blorp_batch *batch, unsigned n);
54 blorp_emit_reloc(struct blorp_batch *batch,
58 blorp_measure_start(struct blorp_batch *batch,
62 blorp_measure_end(struct blorp_batch *batch,
66 blorp_alloc_dynamic_state(struct blorp_batch *batch,
72 blorp_alloc_general_state(struct blorp_batch *batch,
78 blorp_get_dynamic_state(struct blorp_batch *batch,
82 blorp_alloc_vertex_buffer(struct blorp_batch *batch, uint32_t size,
85 blorp_vf_invalidate_for_vb_48b_transitions(struct blorp_batch *batch,
91 blorp_get_workaround_address(struct blorp_batch *batch);
[all …]
/aosp_15_r20/external/rust/android-crates-io/crates/grpcio-sys/grpc/src/core/lib/transport/
Dbatch_builder.h7 // http://www.apache.org/licenses/LICENSE-2.0
57 // Build up a transport stream op batch for a stream for a promise based
94 // cancellation batch instead of a trailing metadata op in a coalesced batch.
98 // paired with an initial op batch, and the transports would wait for the
99 // initial metadata batch to arrive (in case of reordering up the stack).
105 // Error => non-ok status
131 struct Batch;
135 explicit PendingCompletion(RefCountedPtr<Batch> batch);
140 RefCountedPtr<Batch> batch; member
153 return GetContext<Arena>()->MakePooled<Message>(std::move(*payload), in IntoMessageHandle()
[all …]
/aosp_15_r20/external/grpc-grpc/src/core/lib/transport/
H A Dbatch_builder.h7 // http://www.apache.org/licenses/LICENSE-2.0
55 // Build up a transport stream op batch for a stream for a promise based
92 // cancellation batch instead of a trailing metadata op in a coalesced batch.
96 // paired with an initial op batch, and the transports would wait for the
97 // initial metadata batch to arrive (in case of reordering up the stack).
103 // Error => non-ok status
129 struct Batch;
133 explicit PendingCompletion(RefCountedPtr<Batch> batch);
138 RefCountedPtr<Batch> batch; member
151 return GetContext<Arena>()->MakePooled<Message>(std::move(*payload), in IntoMessageHandle()
[all …]
/aosp_15_r20/external/XNNPACK/test/
H A Ds16-vlshift.cc3 // This source code is licensed under the BSD-style license found in the
6 // Auto-generated file. Do not edit!
7 // Specification: test/s16-vlshift.yaml
8 // Generator: tools/generate-vlshift-test.py
14 #include <xnnpack/isa-checks.h>
17 #include "vlshift-microkernel-tester.h"
24 .batch(8) in TEST()
30 for (size_t batch = 16; batch < 80; batch += 8) { in TEST() local
32 .batch(batch) in TEST()
39 for (size_t batch = 1; batch < 8; batch++) { in TEST() local
[all …]
H A Ds16-rmaxabs.cc3 // This source code is licensed under the BSD-style license found in the
6 // Auto-generated file. Do not edit!
7 // Specification: test/s16-rmaxabs.yaml
8 // Generator: tools/generate-rmaxabs-test.py
14 #include <xnnpack/isa-checks.h>
17 #include "rmaxabs-microkernel-tester.h"
24 .batch(8) in TEST()
30 for (size_t batch = 16; batch < 80; batch += 8) { in TEST() local
32 .batch(batch) in TEST()
39 for (size_t batch = 1; batch < 8; batch++) { in TEST() local
[all …]
/aosp_15_r20/external/google-cloud-java/java-batch/proto-google-cloud-batch-v1/src/main/java/com/google/cloud/batch/v1/
H A DAllocationPolicy.java8 * https://www.apache.org/licenses/LICENSE-2.0
17 // source: google/cloud/batch/v1/job.proto
19 package com.google.cloud.batch.v1;
29 * Protobuf type {@code google.cloud.batch.v1.AllocationPolicy}
33 // @@protoc_insertion_point(message_implements:google.cloud.batch.v1.AllocationPolicy)
57 return com.google.cloud.batch.v1.JobProto in getDescriptor()
75 return com.google.cloud.batch.v1.JobProto in internalGetFieldAccessorTable()
78 com.google.cloud.batch.v1.AllocationPolicy.class, in internalGetFieldAccessorTable()
79 com.google.cloud.batch.v1.AllocationPolicy.Builder.class); in internalGetFieldAccessorTable()
89 * Protobuf enum {@code google.cloud.batch.v1.AllocationPolicy.ProvisioningModel}
[all …]

12345678910>>...69