Lines Matching full:mem

231 	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;  in swiotlb_print_info()  local
233 if (!mem->nslabs) { in swiotlb_print_info()
234 pr_warn("No low mem\n"); in swiotlb_print_info()
238 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end, in swiotlb_print_info()
239 (mem->nslabs << IO_TLB_SHIFT) >> 20); in swiotlb_print_info()
260 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; in swiotlb_update_mem_attributes() local
263 if (!mem->nslabs || mem->late_alloc) in swiotlb_update_mem_attributes()
265 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); in swiotlb_update_mem_attributes()
266 set_memory_decrypted((unsigned long)mem->vaddr, bytes >> PAGE_SHIFT); in swiotlb_update_mem_attributes()
269 static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start, in swiotlb_init_io_tlb_pool() argument
275 mem->nslabs = nslabs; in swiotlb_init_io_tlb_pool()
276 mem->start = start; in swiotlb_init_io_tlb_pool()
277 mem->end = mem->start + bytes; in swiotlb_init_io_tlb_pool()
278 mem->late_alloc = late_alloc; in swiotlb_init_io_tlb_pool()
279 mem->nareas = nareas; in swiotlb_init_io_tlb_pool()
280 mem->area_nslabs = nslabs / mem->nareas; in swiotlb_init_io_tlb_pool()
282 for (i = 0; i < mem->nareas; i++) { in swiotlb_init_io_tlb_pool()
283 spin_lock_init(&mem->areas[i].lock); in swiotlb_init_io_tlb_pool()
284 mem->areas[i].index = 0; in swiotlb_init_io_tlb_pool()
285 mem->areas[i].used = 0; in swiotlb_init_io_tlb_pool()
288 for (i = 0; i < mem->nslabs; i++) { in swiotlb_init_io_tlb_pool()
289 mem->slots[i].list = min(IO_TLB_SEGSIZE - io_tlb_offset(i), in swiotlb_init_io_tlb_pool()
290 mem->nslabs - i); in swiotlb_init_io_tlb_pool()
291 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; in swiotlb_init_io_tlb_pool()
292 mem->slots[i].alloc_size = 0; in swiotlb_init_io_tlb_pool()
293 mem->slots[i].pad_slots = 0; in swiotlb_init_io_tlb_pool()
297 mem->vaddr = vaddr; in swiotlb_init_io_tlb_pool()
303 * @mem: Software IO TLB allocator.
306 static void add_mem_pool(struct io_tlb_mem *mem, struct io_tlb_pool *pool) in add_mem_pool() argument
309 spin_lock(&mem->lock); in add_mem_pool()
310 list_add_rcu(&pool->node, &mem->pools); in add_mem_pool()
311 mem->nslabs += pool->nslabs; in add_mem_pool()
312 spin_unlock(&mem->lock); in add_mem_pool()
314 mem->nslabs = pool->nslabs; in add_mem_pool()
357 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; in swiotlb_init_remap() local
398 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); in swiotlb_init_remap()
399 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); in swiotlb_init_remap()
400 if (!mem->slots) { in swiotlb_init_remap()
406 mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area), in swiotlb_init_remap()
408 if (!mem->areas) { in swiotlb_init_remap()
409 pr_warn("%s: Failed to allocate mem->areas.\n", __func__); in swiotlb_init_remap()
413 swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false, nareas); in swiotlb_init_remap()
414 add_mem_pool(&io_tlb_default_mem, mem); in swiotlb_init_remap()
433 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; in swiotlb_init_late() local
498 area_order = get_order(array_size(sizeof(*mem->areas), nareas)); in swiotlb_init_late()
499 mem->areas = (struct io_tlb_area *) in swiotlb_init_late()
501 if (!mem->areas) in swiotlb_init_late()
504 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in swiotlb_init_late()
505 get_order(array_size(sizeof(*mem->slots), nslabs))); in swiotlb_init_late()
506 if (!mem->slots) in swiotlb_init_late()
511 swiotlb_init_io_tlb_pool(mem, virt_to_phys(vstart), nslabs, true, in swiotlb_init_late()
513 add_mem_pool(&io_tlb_default_mem, mem); in swiotlb_init_late()
519 free_pages((unsigned long)mem->areas, area_order); in swiotlb_init_late()
527 struct io_tlb_pool *mem = &io_tlb_default_mem.defpool; in swiotlb_exit() local
535 if (!mem->nslabs) in swiotlb_exit()
539 tbl_vaddr = (unsigned long)phys_to_virt(mem->start); in swiotlb_exit()
540 tbl_size = PAGE_ALIGN(mem->end - mem->start); in swiotlb_exit()
541 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs)); in swiotlb_exit()
544 if (mem->late_alloc) { in swiotlb_exit()
545 area_order = get_order(array_size(sizeof(*mem->areas), in swiotlb_exit()
546 mem->nareas)); in swiotlb_exit()
547 free_pages((unsigned long)mem->areas, area_order); in swiotlb_exit()
549 free_pages((unsigned long)mem->slots, get_order(slots_size)); in swiotlb_exit()
551 memblock_free_late(__pa(mem->areas), in swiotlb_exit()
552 array_size(sizeof(*mem->areas), mem->nareas)); in swiotlb_exit()
553 memblock_free_late(mem->start, tbl_size); in swiotlb_exit()
554 memblock_free_late(__pa(mem->slots), slots_size); in swiotlb_exit()
557 memset(mem, 0, sizeof(*mem)); in swiotlb_exit()
736 struct io_tlb_mem *mem = in swiotlb_dyn_alloc() local
741 default_nareas, mem->phys_limit, GFP_KERNEL); in swiotlb_dyn_alloc()
747 add_mem_pool(mem, pool); in swiotlb_dyn_alloc()
779 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in __swiotlb_find_pool() local
783 list_for_each_entry_rcu(pool, &mem->pools, node) { in __swiotlb_find_pool()
860 enum dma_data_direction dir, struct io_tlb_pool *mem) in swiotlb_bounce() argument
862 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT; in swiotlb_bounce()
863 phys_addr_t orig_addr = mem->slots[index].orig_addr; in swiotlb_bounce()
864 size_t alloc_size = mem->slots[index].alloc_size; in swiotlb_bounce()
866 unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start; in swiotlb_bounce()
937 static unsigned int wrap_area_index(struct io_tlb_pool *mem, unsigned int index) in wrap_area_index() argument
939 if (index >= mem->area_nslabs) in wrap_area_index()
951 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots) in inc_used_and_hiwater() argument
955 new_used = atomic_long_add_return(nslots, &mem->total_used); in inc_used_and_hiwater()
956 old_hiwater = atomic_long_read(&mem->used_hiwater); in inc_used_and_hiwater()
960 } while (!atomic_long_try_cmpxchg(&mem->used_hiwater, in inc_used_and_hiwater()
964 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots) in dec_used() argument
966 atomic_long_sub(nslots, &mem->total_used); in dec_used()
970 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots) in inc_used_and_hiwater() argument
973 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots) in dec_used() argument
980 static void inc_transient_used(struct io_tlb_mem *mem, unsigned int nslots) in inc_transient_used() argument
982 atomic_long_add(nslots, &mem->transient_nslabs); in inc_transient_used()
985 static void dec_transient_used(struct io_tlb_mem *mem, unsigned int nslots) in dec_transient_used() argument
987 atomic_long_sub(nslots, &mem->transient_nslabs); in dec_transient_used()
991 static void inc_transient_used(struct io_tlb_mem *mem, unsigned int nslots) in inc_transient_used() argument
994 static void dec_transient_used(struct io_tlb_mem *mem, unsigned int nslots) in dec_transient_used() argument
1145 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_search_area() local
1151 list_for_each_entry_rcu(pool, &mem->pools, node) { in swiotlb_search_area()
1185 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_find_slots() local
1204 if (!mem->can_grow) in swiotlb_find_slots()
1207 schedule_work(&mem->dyn_alloc); in swiotlb_find_slots()
1227 inc_transient_used(mem, pool->nslabs); in swiotlb_find_slots()
1241 * Second, the load from mem->pools must be ordered before the same in swiotlb_find_slots()
1284 * @mem: Software IO TLB allocator.
1291 static unsigned long mem_used(struct io_tlb_mem *mem) in mem_used() argument
1293 return atomic_long_read(&mem->total_used); in mem_used()
1318 * @mem: Software IO TLB allocator.
1325 static unsigned long mem_used(struct io_tlb_mem *mem) in mem_used() argument
1332 list_for_each_entry_rcu(pool, &mem->pools, node) in mem_used()
1338 return mem_pool_used(&mem->defpool); in mem_used()
1373 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_tbl_map_single() local
1382 if (!mem || !mem->nslabs) { in swiotlb_tbl_map_single()
1408 size, mem->nslabs, mem_used(mem)); in swiotlb_tbl_map_single()
1444 struct io_tlb_pool *mem) in swiotlb_release_slots() argument
1452 index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; in swiotlb_release_slots()
1453 index -= mem->slots[index].pad_slots; in swiotlb_release_slots()
1454 nslots = nr_slots(mem->slots[index].alloc_size + offset); in swiotlb_release_slots()
1455 aindex = index / mem->area_nslabs; in swiotlb_release_slots()
1456 area = &mem->areas[aindex]; in swiotlb_release_slots()
1464 BUG_ON(aindex >= mem->nareas); in swiotlb_release_slots()
1468 count = mem->slots[index + nslots].list; in swiotlb_release_slots()
1477 mem->slots[i].list = ++count; in swiotlb_release_slots()
1478 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; in swiotlb_release_slots()
1479 mem->slots[i].alloc_size = 0; in swiotlb_release_slots()
1480 mem->slots[i].pad_slots = 0; in swiotlb_release_slots()
1488 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; in swiotlb_release_slots()
1490 mem->slots[i].list = ++count; in swiotlb_release_slots()
1631 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in is_swiotlb_active() local
1633 return mem && mem->nslabs; in is_swiotlb_active()
1665 static unsigned long mem_transient_used(struct io_tlb_mem *mem) in mem_transient_used() argument
1667 return atomic_long_read(&mem->transient_nslabs); in mem_transient_used()
1672 struct io_tlb_mem *mem = data; in io_tlb_transient_used_get() local
1674 *val = mem_transient_used(mem); in io_tlb_transient_used_get()
1684 struct io_tlb_mem *mem = data; in io_tlb_used_get() local
1686 *val = mem_used(mem); in io_tlb_used_get()
1692 struct io_tlb_mem *mem = data; in io_tlb_hiwater_get() local
1694 *val = atomic_long_read(&mem->used_hiwater); in io_tlb_hiwater_get()
1700 struct io_tlb_mem *mem = data; in io_tlb_hiwater_set() local
1706 atomic_long_set(&mem->used_hiwater, val); in io_tlb_hiwater_set()
1714 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem, in swiotlb_create_debugfs_files() argument
1717 mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs); in swiotlb_create_debugfs_files()
1718 if (!mem->nslabs) in swiotlb_create_debugfs_files()
1721 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); in swiotlb_create_debugfs_files()
1722 debugfs_create_file("io_tlb_used", 0400, mem->debugfs, mem, in swiotlb_create_debugfs_files()
1724 debugfs_create_file("io_tlb_used_hiwater", 0600, mem->debugfs, mem, in swiotlb_create_debugfs_files()
1727 debugfs_create_file("io_tlb_transient_nslabs", 0400, mem->debugfs, in swiotlb_create_debugfs_files()
1728 mem, &fops_io_tlb_transient_used); in swiotlb_create_debugfs_files()
1742 static inline void swiotlb_create_debugfs_files(struct io_tlb_mem *mem, in swiotlb_create_debugfs_files() argument
1753 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_alloc() local
1759 if (!mem) in swiotlb_alloc()
1795 struct io_tlb_mem *mem = rmem->priv; in rmem_swiotlb_device_init() local
1811 if (!mem) { in rmem_swiotlb_device_init()
1814 mem = kzalloc(sizeof(*mem), GFP_KERNEL); in rmem_swiotlb_device_init()
1815 if (!mem) in rmem_swiotlb_device_init()
1817 pool = &mem->defpool; in rmem_swiotlb_device_init()
1821 kfree(mem); in rmem_swiotlb_device_init()
1829 kfree(mem); in rmem_swiotlb_device_init()
1837 mem->force_bounce = true; in rmem_swiotlb_device_init()
1838 mem->for_alloc = true; in rmem_swiotlb_device_init()
1840 spin_lock_init(&mem->lock); in rmem_swiotlb_device_init()
1841 INIT_LIST_HEAD_RCU(&mem->pools); in rmem_swiotlb_device_init()
1843 add_mem_pool(mem, pool); in rmem_swiotlb_device_init()
1845 rmem->priv = mem; in rmem_swiotlb_device_init()
1847 swiotlb_create_debugfs_files(mem, rmem->name); in rmem_swiotlb_device_init()
1850 dev->dma_io_tlb_mem = mem; in rmem_swiotlb_device_init()