Lines Matching +full:8 +full:- +full:cpu
1 /* SPDX-License-Identifier: GPL-2.0 */
14 ((((base) & _AC(0xff000000,ULL)) << (56-24)) | \
16 (((limit) & _AC(0x000f0000,ULL)) << (48-16)) | \
25 #define __BOOT_CS (GDT_ENTRY_BOOT_CS*8)
26 #define __BOOT_DS (GDT_ENTRY_BOOT_DS*8)
27 #define __BOOT_TSS (GDT_ENTRY_BOOT_TSS*8)
61 * The layout of the per-CPU GDT under Linux:
63 * 0 - null <=== cacheline #1
64 * 1 - reserved
65 * 2 - reserved
66 * 3 - reserved
68 * 4 - unused <=== cacheline #2
69 * 5 - unused
71 * ------- start of TLS (Thread-Local Storage) segments:
73 * 6 - TLS segment #1 [ glibc's TLS segment ]
74 * 7 - TLS segment #2 [ Wine's %fs Win32 segment ]
75 * 8 - TLS segment #3 <=== cacheline #3
76 * 9 - reserved
77 * 10 - reserved
78 * 11 - reserved
80 * ------- start of kernel segments:
82 * 12 - kernel code segment <=== cacheline #4
83 * 13 - kernel data segment
84 * 14 - default user CS
85 * 15 - default user DS
86 * 16 - TSS <=== cacheline #5
87 * 17 - LDT
88 * 18 - PNPBIOS support (16->32 gate)
89 * 19 - PNPBIOS support
90 * 20 - PNPBIOS support <=== cacheline #6
91 * 21 - PNPBIOS support
92 * 22 - PNPBIOS support
93 * 23 - APM BIOS support
94 * 24 - APM BIOS support <=== cacheline #7
95 * 25 - APM BIOS support
97 * 26 - ESPFIX small SS
98 * 27 - per-cpu [ offset to per-cpu data area ]
99 * 28 - VDSO getcpu
100 * 29 - unused
101 * 30 - unused
102 * 31 - TSS for double fault handler
105 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
135 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
136 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
137 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8 + 3)
138 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8 + 3)
140 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS*8)
143 #define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32*8)
145 #define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16*8)
151 #define PNP_DS (GDT_ENTRY_PNPBIOS_DS*8)
153 #define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1*8)
155 #define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2*8)
158 # define __KERNEL_PERCPU (GDT_ENTRY_PERCPU*8)
163 #define __CPUNODE_SEG (GDT_ENTRY_CPUNODE*8 + 3)
165 #else /* 64-bit: */
177 * GDT layout to get 64-bit SYSCALL/SYSRET support right. SYSRET hardcodes
180 * if returning to 32-bit userspace: cs = STAR.SYSRET_CS,
181 * if returning to 64-bit userspace: cs = STAR.SYSRET_CS+16,
183 * ss = STAR.SYSRET_CS+8 (in either case)
185 * thus USER_DS should be between 32-bit and 64-bit code selectors:
192 #define GDT_ENTRY_TSS 8
210 * expressed with the +3 value for user-space selectors:
212 #define __KERNEL32_CS (GDT_ENTRY_KERNEL32_CS*8)
213 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
214 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
215 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8 + 3)
216 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8 + 3)
217 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8 + 3)
218 #define __CPUNODE_SEG (GDT_ENTRY_CPUNODE*8 + 3)
228 #define GDT_SIZE (GDT_ENTRIES*8)
230 #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8)
232 /* Bit size and mask of CPU number stored in the per CPU data (and TSC_AUX) */
238 /* Helper functions to store/load CPU and node numbers */
240 static inline unsigned long vdso_encode_cpunode(int cpu, unsigned long node) in vdso_encode_cpunode() argument
242 return (node << VDSO_CPUNODE_BITS) | cpu; in vdso_encode_cpunode()
245 static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node) in vdso_read_cpunode() argument
250 * Load CPU and node number from the GDT. LSL is faster than RDTSCP in vdso_read_cpunode()
262 if (cpu) in vdso_read_cpunode()
263 *cpu = (p & VDSO_CPUNODE_MASK); in vdso_read_cpunode()
285 * max 8 bytes.
287 #define XEN_EARLY_IDT_HANDLER_SIZE (8 + ENDBR_INSN_SIZE)
301 * This is always the case on Intel CPUs and, even on 64-bit AMD CPUs, any
322 * On 32-bit systems, the hidden parts of FS and GS are unobservable if