Lines Matching full:asid
3 * Generic ASID allocator.
14 #include <asm/asid.h>
21 #define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift) argument
27 u64 asid; in flush_context() local
29 /* Update the list of reserved ASIDs and the ASID bitmap. */ in flush_context()
33 asid = atomic64_xchg_relaxed(&active_asid(info, i), 0); in flush_context()
38 * ASID, as this is the only trace we have of in flush_context()
41 if (asid == 0) in flush_context()
42 asid = reserved_asid(info, i); in flush_context()
43 __set_bit(asid2idx(info, asid), info->map); in flush_context()
44 reserved_asid(info, i) = asid; in flush_context()
54 static bool check_update_reserved_asid(struct asid_info *info, u64 asid, in check_update_reserved_asid() argument
63 * (i.e. the same ASID in the current generation) but we can't in check_update_reserved_asid()
65 * of the old ASID are updated to reflect the mm. Failure to do in check_update_reserved_asid()
66 * so could result in us missing the reserved ASID in a future in check_update_reserved_asid()
70 if (reserved_asid(info, cpu) == asid) { in check_update_reserved_asid()
83 u64 asid = atomic64_read(pasid); in new_context() local
86 if (asid != 0) { in new_context()
87 u64 newasid = generation | (asid & ~ASID_MASK(info)); in new_context()
90 * If our current ASID was active during a rollover, we in new_context()
93 if (check_update_reserved_asid(info, asid, newasid)) in new_context()
97 * We had a valid ASID in a previous life, so try to re-use in new_context()
100 if (!__test_and_set_bit(asid2idx(info, asid), info->map)) in new_context()
105 * Allocate a free ASID. If we can't find one, take a note of the in new_context()
107 * always count from ASID #2 (index 1), as we use ASID #0 when setting in new_context()
111 asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), cur_idx); in new_context()
112 if (asid != NUM_CTXT_ASIDS(info)) in new_context()
121 asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), 1); in new_context()
124 __set_bit(asid, info->map); in new_context()
125 cur_idx = asid; in new_context()
127 return idx2asid(info, asid) | generation; in new_context()
131 * Generate a new ASID for the context.
133 * @pasid: Pointer to the current ASID batch allocated. It will be updated
134 * with the new ASID batch.
141 u64 asid; in asid_new_context() local
144 /* Check that our ASID belongs to the current generation. */ in asid_new_context()
145 asid = atomic64_read(pasid); in asid_new_context()
146 if ((asid ^ atomic64_read(&info->generation)) >> info->bits) { in asid_new_context()
147 asid = new_context(info, pasid, mm); in asid_new_context()
148 atomic64_set(pasid, asid); in asid_new_context()
154 atomic64_set(&active_asid(info, cpu), asid); in asid_new_context()
160 * Initialize the ASID allocator
162 * @info: Pointer to the asid allocator structure
177 * one more ASID than CPUs. ASID #0 is always reserved. in asid_allocator_init()