/aosp_15_r20/external/rust/android-crates-io/crates/vhost/src/ |
D | backend.rs | 271 fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()>; in set_vring_num() 278 fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()>; in set_vring_addr() 285 fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()>; in set_vring_base() 288 fn get_vring_base(&self, queue_index: usize) -> Result<u32>; in get_vring_base() 295 fn set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()>; in set_vring_call() 303 fn set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()>; in set_vring_kick() 310 fn set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()>; in set_vring_err() 357 fn set_vring_num(&mut self, queue_index: usize, num: u16) -> Result<()>; in set_vring_num() 364 fn set_vring_addr(&mut self, queue_index: usize, config_data: &VringConfigData) -> Result<()>; in set_vring_addr() 371 fn set_vring_base(&mut self, queue_index: usize, base: u16) -> Result<()>; in set_vring_base() [all …]
|
D | vdpa.rs | 66 fn set_vring_enable(&self, queue_index: usize, enabled: bool) -> Result<()>; in set_vring_enable() 96 fn get_vring_group(&self, queue_index: u32) -> Result<u32>; in get_vring_group()
|
/aosp_15_r20/external/crosvm/devices/src/virtio/vhost/ |
H A D | worker.rs | 94 for (&queue_index, queue) in self.queues.iter() { in init() 96 .set_vring_num(queue_index, queue.size()) in init() 102 queue_sizes[queue_index], in init() 104 queue_index, in init() 115 .find(|vring_base| vring_base.index == queue_index) in init() 122 .set_vring_base(queue_index, base) in init() 126 .set_vring_base(queue_index, 0) in init() 129 self.set_vring_call_for_entry(queue_index, queue.vector() as usize)?; in init() 131 .set_vring_kick(queue_index, queue.event()) in init() 194 for (&queue_index, queue) in self.queues.iter() { in run() [all …]
|
/aosp_15_r20/external/mesa3d/src/gallium/winsys/amdgpu/drm/ |
H A D | amdgpu_bo.h | 207 unsigned queue_index) in get_fence_from_ring() argument 210 assert(queue_index < AMDGPU_MAX_QUEUES); in get_fence_from_ring() 211 assert(fences->valid_fence_mask & BITFIELD_BIT(queue_index)); in get_fence_from_ring() 213 uint_seq_no buffer_seq_no = fences->seq_no[queue_index]; in get_fence_from_ring() 214 uint_seq_no latest_seq_no = aws->queues[queue_index].latest_seq_no; in get_fence_from_ring() 219 &aws->queues[queue_index].fences[buffer_seq_no % AMDGPU_FENCE_RING_SIZE]; in get_fence_from_ring() 228 fences->valid_fence_mask &= ~BITFIELD_BIT(queue_index); in get_fence_from_ring() 232 static inline uint_seq_no pick_latest_seq_no(struct amdgpu_winsys *aws, unsigned queue_index, in pick_latest_seq_no() argument 235 uint_seq_no latest = aws->queues[queue_index].latest_seq_no; in pick_latest_seq_no() 248 unsigned queue_index, uint_seq_no seq_no) in add_seq_no_to_list() argument [all …]
|
H A D | amdgpu_cs.cpp | 57 fence->queue_index = cs->queue_index; in amdgpu_fence_create() 913 cs->queue_index = INT_MAX; in amdgpu_cs_create() 916 cs->queue_index = 0; in amdgpu_cs_create() 925 cs->queue_index++; in amdgpu_cs_create() 927 assert(cs->queue_index < AMDGPU_MAX_QUEUES); in amdgpu_cs_create() 1182 add_seq_no_to_list(acs->aws, &cs->seq_no_dependencies, fence->queue_index, in amdgpu_cs_add_fence_dependency() 1208 static void amdgpu_set_bo_seq_no(unsigned queue_index, struct amdgpu_winsys_bo *bo, in amdgpu_set_bo_seq_no() argument 1211 bo->fences.seq_no[queue_index] = new_queue_seq_no; in amdgpu_set_bo_seq_no() 1212 bo->fences.valid_fence_mask |= BITFIELD_BIT(queue_index); in amdgpu_set_bo_seq_no() 1248 unsigned queue_index; in amdgpu_cs_submit_ib() local [all …]
|
H A D | amdgpu_cs.h | 126 unsigned queue_index; member 182 uint8_t queue_index; /* for non-imported fences */ member
|
/aosp_15_r20/external/rust/android-crates-io/crates/vhost/src/vhost_kern/ |
D | mod.rs | 188 fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> { in set_vring_num() 190 index: queue_index as u32, in set_vring_num() 205 fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> { in set_vring_addr() 211 let vring_addr = config_data.to_vhost_vring_addr(queue_index, self.mem())?; in set_vring_addr() 224 fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()> { in set_vring_base() 226 index: queue_index as u32, in set_vring_base() 236 fn get_vring_base(&self, queue_index: usize) -> Result<u32> { in get_vring_base() 238 index: queue_index as u32, in get_vring_base() 251 fn set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()> { in set_vring_call() 253 index: queue_index as u32, in set_vring_call() [all …]
|
D | vdpa.rs | 71 pub fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> { in set_vring_addr() 79 index: queue_index as u32, in set_vring_addr() 155 fn set_vring_enable(&self, queue_index: usize, enabled: bool) -> Result<()> { in set_vring_enable() 157 index: queue_index as u32, in set_vring_enable() 238 fn get_vring_group(&self, queue_index: u32) -> Result<u32> { in get_vring_group() 240 index: queue_index, in get_vring_group()
|
D | net.rs | 42 fn set_backend(&self, queue_index: usize, fd: Option<&File>) -> Result<()> { in set_backend() 44 index: queue_index as u32, in set_backend()
|
/aosp_15_r20/external/crosvm/vhost/src/ |
H A D | lib.rs | 189 fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> { in set_vring_num() 191 index: queue_index as u32, in set_vring_num() 221 queue_index: usize, in set_vring_addr() 255 index: queue_index as u32, in set_vring_addr() 278 fn set_vring_base(&self, queue_index: usize, num: u16) -> Result<()> { in set_vring_base() 280 index: queue_index as u32, in set_vring_base() 298 fn get_vring_base(&self, queue_index: usize) -> Result<u16> { in get_vring_base() 300 index: queue_index as u32, in get_vring_base() 320 fn set_vring_call(&self, queue_index: usize, event: &Event) -> Result<()> { in set_vring_call() 322 index: queue_index as u32, in set_vring_call() [all …]
|
H A D | net.rs | 42 fn set_backend(&self, queue_index: usize, descriptor: Option<&T>) -> Result<()>; in set_backend() 65 fn set_backend(&self, queue_index: usize, event: Option<&T>) -> Result<()> { in set_backend() 67 index: queue_index as u32, in set_backend()
|
/aosp_15_r20/external/rust/android-crates-io/crates/vhost/src/vhost_user/ |
D | master.rs | 40 fn set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()>; in set_vring_enable() 247 fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> { in set_vring_num() 249 if queue_index as u64 >= node.max_queue_num { in set_vring_num() 253 let val = VhostUserVringState::new(queue_index as u32, num.into()); in set_vring_num() 259 fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> { in set_vring_addr() 261 if queue_index as u64 >= node.max_queue_num in set_vring_addr() 267 let val = VhostUserVringAddr::from_config_data(queue_index as u32, config_data); in set_vring_addr() 273 fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()> { in set_vring_base() 275 if queue_index as u64 >= node.max_queue_num { in set_vring_base() 279 let val = VhostUserVringState::new(queue_index as u32, base.into()); in set_vring_base() [all …]
|
/aosp_15_r20/external/crosvm/devices/src/virtio/vhost_user_frontend/ |
H A D | mod.rs | 281 queue_index: usize, in activate_vring() 287 .set_vring_num(queue_index, queue.size()) in activate_vring() 305 .set_vring_addr(queue_index, &config_data) in activate_vring() 309 .set_vring_base(queue_index, queue.next_avail_to_process()) in activate_vring() 313 .set_vring_call(queue_index, irqfd) in activate_vring() 316 .set_vring_kick(queue_index, queue.event()) in activate_vring() 323 .set_vring_enable(queue_index, true) in activate_vring() 331 fn deactivate_vring(&self, queue_index: usize) -> Result<u16> { in deactivate_vring() 336 .set_vring_enable(queue_index, false) in deactivate_vring() 341 .get_vring_base(queue_index) in deactivate_vring() [all …]
|
/aosp_15_r20/external/rust/android-crates-io/crates/virtio-drivers/src/transport/ |
D | fake.rs | 122 pub fn write_to_queue<const QUEUE_SIZE: usize>(&mut self, queue_index: u16, data: &[u8]) { in write_to_queue() 123 let queue = &self.queues[queue_index as usize]; in write_to_queue() 141 pub fn read_from_queue<const QUEUE_SIZE: usize>(&mut self, queue_index: u16) -> Vec<u8> { in read_from_queue() 142 let queue = &self.queues[queue_index as usize]; in read_from_queue() 166 queue_index: u16, in read_write_queue() 169 let queue = &self.queues[queue_index as usize]; in read_write_queue() 180 pub fn wait_until_queue_notified(state: &Mutex<Self>, queue_index: u16) { in wait_until_queue_notified() 181 while !state.lock().unwrap().queues[usize::from(queue_index)] in wait_until_queue_notified()
|
/aosp_15_r20/external/crosvm/third_party/vmm_vhost/src/ |
H A D | backend_client.rs | 139 pub fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> { in set_vring_num() 140 let val = VhostUserVringState::new(queue_index as u32, num.into()); in set_vring_num() 146 pub fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> { in set_vring_addr() 151 let val = VhostUserVringAddr::from_config_data(queue_index as u32, config_data); in set_vring_addr() 158 pub fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()> { in set_vring_base() 159 let val = VhostUserVringState::new(queue_index as u32, base.into()); in set_vring_base() 166 pub fn get_vring_base(&self, queue_index: usize) -> Result<u32> { in get_vring_base() 167 let req = VhostUserVringState::new(queue_index as u32, 0); in get_vring_base() 178 pub fn set_vring_call(&self, queue_index: usize, event: &Event) -> Result<()> { in set_vring_call() 181 queue_index, in set_vring_call() [all …]
|
/aosp_15_r20/external/perfetto/src/trace_processor/util/ |
H A D | bump_allocator.cc | 80 uint64_t queue_index = ChunkIndexToQueueIndex(id.chunk_index); in Free() local 81 PERFETTO_DCHECK(queue_index <= std::numeric_limits<size_t>::max()); in Free() 82 Chunk& chunk = chunks_.at(static_cast<size_t>(queue_index)); in Free() 88 uint64_t queue_index = ChunkIndexToQueueIndex(id.chunk_index); in GetPointer() local 89 PERFETTO_CHECK(queue_index <= std::numeric_limits<size_t>::max()); in GetPointer() 90 return chunks_.at(static_cast<size_t>(queue_index)).allocation.get() + in GetPointer()
|
/aosp_15_r20/external/cronet/base/trace_event/ |
H A D | trace_buffer.cc | 112 for (size_t queue_index = queue_head_; queue_index != queue_tail_; in EstimateTraceMemoryOverhead() local 113 queue_index = NextQueueIndex(queue_index)) { in EstimateTraceMemoryOverhead() 114 size_t chunk_index = recyclable_chunks_queue_[queue_index]; in EstimateTraceMemoryOverhead()
|
/aosp_15_r20/external/libchrome/base/trace_event/ |
H A D | trace_buffer.cc | 109 for (size_t queue_index = queue_head_; queue_index != queue_tail_; in EstimateTraceMemoryOverhead() local 110 queue_index = NextQueueIndex(queue_index)) { in EstimateTraceMemoryOverhead() 111 size_t chunk_index = recyclable_chunks_queue_[queue_index]; in EstimateTraceMemoryOverhead()
|
/aosp_15_r20/external/crosvm/devices/src/virtio/ |
H A D | virtio_pci_device.rs | 644 .map(|((queue_index, queue), evt)| { in activate() 649 notify_base + queue_index as u64 * u64::from(NOTIFY_OFF_MULTIPLIER), in activate() 657 queue_index, in activate() 679 for (queue_index, evt) in self.queue_evts.iter_mut().enumerate() { in unregister_ioevents() 684 notify_base + queue_index as u64 * u64::from(NOTIFY_OFF_MULTIPLIER), in unregister_ioevents() 951 let queue_index = (offset - NOTIFICATION_BAR_OFFSET) as usize in write_bar() localVariable 953 trace!("write_bar notification fallback for queue {}", queue_index); in write_bar() 954 if let Some(evt) = self.queue_evts.get(queue_index) { in write_bar() 1418 .try_for_each(|((queue_index, _queue), evt)| { in restore() 1423 notify_base + queue_index as u64 * u64::from(NOTIFY_OFF_MULTIPLIER), in restore()
|
H A D | net.rs | 756 let mut queue_index = 0; in virtio_sleep() localVariable 764 queues.insert(queue_index + 0, worker.rx_queue); in virtio_sleep() 765 queues.insert(queue_index + 1, worker.tx_queue); in virtio_sleep() 766 queue_index += 2; in virtio_sleep() 769 queues.insert(queue_index, ctrl_queue); in virtio_sleep()
|
/aosp_15_r20/external/mesa3d/src/panfrost/vulkan/csf/ |
H A D | panvk_vX_queue.c | 251 .queue_index = subqueue, in init_subqueue() 552 .queue_index = i, in panvk_queue_submit() 568 .queue_index = j, in panvk_queue_submit() 588 .queue_index = i, in panvk_queue_submit() 641 uint32_t subqueue = qsubmits[i].queue_index; in panvk_queue_submit() 652 qsubmits[i].queue_index); in panvk_queue_submit() 670 debug_sync_points[qsubmits[i].queue_index]++; in panvk_queue_submit()
|
/aosp_15_r20/external/executorch/backends/vulkan/runtime/vk_api/ |
H A D | Adapter.cpp | 213 (queues_[i].queue_index == compute_queue.queue_index)) { in return_queue() 238 queue_mutexes_[device_queue.queue_index % NUM_QUEUE_MUTEXES]); in submit_cmd() 339 << compute_queue.queue_index << ": " << compute_queue.handle in stringize()
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/vulkan/api/ |
H A D | Adapter.cpp | 303 (queues_[i].queue_index == compute_queue.queue_index)) { in return_queue() 328 queue_mutexes_[device_queue.queue_index % NUM_QUEUE_MUTEXES]); in submit_cmd() 423 << compute_queue.queue_index << ": " << compute_queue.handle in stringize()
|
/aosp_15_r20/external/perfetto/src/traced/probes/ftrace/test/data/android_raven_AOSP.MASTER_5.10.43/events/net/net_dev_xmit_timeout/ |
H A D | format | 11 field:int queue_index; offset:16; size:4; signed:1; 13 print fmt: "dev=%s driver=%s queue=%d", __get_str(name), __get_str(driver), REC->queue_index
|
/aosp_15_r20/external/tensorflow/tensorflow/core/kernels/batching_util/ |
H A D | shared_batch_scheduler_test.cc | 888 const int queue_index = state.range(1); in BM_QueueSchedule() local 889 Queue* queue = (*queues)[queue_index].get(); in BM_QueueSchedule() 892 (*queue_labels)[queue_index]); in BM_QueueSchedule() 908 for (int queue_index : {0, 1, 2}) { in __anon3314b7bb1802() 909 b->ArgPair(10000, queue_index); in __anon3314b7bb1802()
|