Lines Matching full:pte
21 #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
22 #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
35 /* We never clear the high word of the pte */
53 * Location of the PFN in the PTE. Most 32-bit platforms use the same
111 /* Bits to mask out from a PMD to get to the PTE page */
120 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
125 * level has 2048 entries and the second level has 512 64-bit PTE entries.
212 * Bits in a linux-style PTE. These match the bits in the
213 * (hardware-defined) PowerPC PTE as closely as possible.
250 * PTE updates. This function is called whenever an existing
251 * valid PTE is updated. This does -not- include set_pte_at()
252 * which nowadays only sets a new PTE.
255 * and the PTE may be either 32 or 64 bit wide. In the later case,
256 * when using atomic updates, only the low part of the PTE is
365 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) argument
368 static inline int pte_swp_exclusive(pte_t pte) in pte_swp_exclusive() argument
370 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; in pte_swp_exclusive()
373 static inline pte_t pte_swp_mkexclusive(pte_t pte) in pte_swp_mkexclusive() argument
375 return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE); in pte_swp_mkexclusive()
378 static inline pte_t pte_swp_clear_exclusive(pte_t pte) in pte_swp_clear_exclusive() argument
380 return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE); in pte_swp_clear_exclusive()
383 /* Generic accessors to PTE bits */
384 static inline bool pte_read(pte_t pte) in pte_read() argument
386 return !!(pte_val(pte) & _PAGE_READ); in pte_read()
389 static inline bool pte_write(pte_t pte) in pte_write() argument
391 return !!(pte_val(pte) & _PAGE_WRITE); in pte_write()
394 static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); } in pte_dirty() argument
395 static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); } in pte_young() argument
396 static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } in pte_special() argument
397 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } in pte_none() argument
398 static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } in pte_exec() argument
400 static inline int pte_present(pte_t pte) in pte_present() argument
402 return pte_val(pte) & _PAGE_PRESENT; in pte_present()
405 static inline bool pte_hw_valid(pte_t pte) in pte_hw_valid() argument
407 return pte_val(pte) & _PAGE_PRESENT; in pte_hw_valid()
410 static inline bool pte_hashpte(pte_t pte) in pte_hashpte() argument
412 return !!(pte_val(pte) & _PAGE_HASHPTE); in pte_hashpte()
415 static inline bool pte_ci(pte_t pte) in pte_ci() argument
417 return !!(pte_val(pte) & _PAGE_NO_CACHE); in pte_ci()
425 static inline bool pte_access_permitted(pte_t pte, bool write) in pte_access_permitted() argument
431 if (!pte_present(pte) || !pte_read(pte)) in pte_access_permitted()
434 if (write && !pte_write(pte)) in pte_access_permitted()
452 /* Generic modifiers for PTE bits */
453 static inline pte_t pte_wrprotect(pte_t pte) in pte_wrprotect() argument
455 return __pte(pte_val(pte) & ~_PAGE_WRITE); in pte_wrprotect()
458 static inline pte_t pte_exprotect(pte_t pte) in pte_exprotect() argument
460 return __pte(pte_val(pte) & ~_PAGE_EXEC); in pte_exprotect()
463 static inline pte_t pte_mkclean(pte_t pte) in pte_mkclean() argument
465 return __pte(pte_val(pte) & ~_PAGE_DIRTY); in pte_mkclean()
468 static inline pte_t pte_mkold(pte_t pte) in pte_mkold() argument
470 return __pte(pte_val(pte) & ~_PAGE_ACCESSED); in pte_mkold()
473 static inline pte_t pte_mkexec(pte_t pte) in pte_mkexec() argument
475 return __pte(pte_val(pte) | _PAGE_EXEC); in pte_mkexec()
478 static inline pte_t pte_mkpte(pte_t pte) in pte_mkpte() argument
480 return pte; in pte_mkpte()
483 static inline pte_t pte_mkwrite_novma(pte_t pte) in pte_mkwrite_novma() argument
488 return __pte(pte_val(pte) | _PAGE_RW); in pte_mkwrite_novma()
491 static inline pte_t pte_mkdirty(pte_t pte) in pte_mkdirty() argument
493 return __pte(pte_val(pte) | _PAGE_DIRTY); in pte_mkdirty()
496 static inline pte_t pte_mkyoung(pte_t pte) in pte_mkyoung() argument
498 return __pte(pte_val(pte) | _PAGE_ACCESSED); in pte_mkyoung()
501 static inline pte_t pte_mkspecial(pte_t pte) in pte_mkspecial() argument
503 return __pte(pte_val(pte) | _PAGE_SPECIAL); in pte_mkspecial()
506 static inline pte_t pte_mkhuge(pte_t pte) in pte_mkhuge() argument
508 return pte; in pte_mkhuge()
511 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) in pte_modify() argument
513 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); in pte_modify()
518 /* This low level function performs the actual PTE insertion
519 * Setting the PTE depends on the MMU type and other factors.
524 * and see we need to keep track that this PTE needs invalidating.
526 * Second case is 32-bit with 64-bit PTE. In this case, we
529 * in the hash code, to pre-invalidate if the PTE was already hashed,
537 * per-CPU PTE such as a kmap_atomic, we also do a simple update preserving
541 pte_t *ptep, pte_t pte, int percpu) in __set_pte_at() argument
545 (pte_val(pte) & ~_PAGE_HASHPTE)); in __set_pte_at()
552 "r" (pte) : "memory"); in __set_pte_at()
554 pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0); in __set_pte_at()