Lines Matching +full:ecx +full:- +full:2000

1 // SPDX-License-Identifier: GPL-2.0-only
63 #include <asm/intel-family.h>
73 #include <asm/runtime-const.h>
149 info = (struct ppin_info *)id->driver_data; in ppin_init()
151 if (rdmsrl_safe(info->msr_ppin_ctl, &val)) in ppin_init()
161 wrmsrl_safe(info->msr_ppin_ctl, val | 2UL); in ppin_init()
162 rdmsrl_safe(info->msr_ppin_ctl, &val); in ppin_init()
167 c->ppin = __rdmsr(info->msr_ppin); in ppin_init()
168 set_cpu_cap(c, info->feature); in ppin_init()
173 setup_clear_cpu_cap(info->feature); in ppin_init()
183 if (c->cpuid_level == -1) { in default_init()
185 if (c->x86 == 4) in default_init()
186 strcpy(c->x86_model_id, "486"); in default_init()
187 else if (c->x86 == 3) in default_init()
188 strcpy(c->x86_model_id, "386"); in default_init()
205 * IRET will check the segment types kkeil 2000/10/28
251 return -EINVAL; in x86_nopcid_setup()
268 return -EINVAL; in x86_noinvpcid_setup()
313 static int cachesize_override = -1;
346 c->cpuid_level = cpuid_eax(0); in squash_the_stupid_serial_number()
513 * Protection Keys are not available in 32-bit mode.
631 * software. Add those features to this table to auto-disable them.
650 for (df = cpuid_dependent_features; df->feature; df++) { in filter_cpuid_features()
652 if (!cpu_has(c, df->feature)) in filter_cpuid_features()
655 * Note: cpuid_level is set to -1 if unavailable, but in filter_cpuid_features()
661 if (!((s32)df->level < 0 ? in filter_cpuid_features()
662 (u32)df->level > (u32)c->extended_cpuid_level : in filter_cpuid_features()
663 (s32)df->level > (s32)c->cpuid_level)) in filter_cpuid_features()
666 clear_cpu_cap(c, df->feature); in filter_cpuid_features()
671 x86_cap_flag(df->feature), df->level); in filter_cpuid_features()
688 if (c->x86_model >= 16) in table_lookup_model()
694 info = this_cpu->legacy_models; in table_lookup_model()
696 while (info->family) { in table_lookup_model()
697 if (info->family == c->x86) in table_lookup_model()
698 return info->model_names[c->x86_model]; in table_lookup_model()
710 /* The 32-bit entry code needs to find cpu_entry_area. */
714 /* Load the original GDT from the per-cpu structure */
720 gdt_descr.size = GDT_SIZE - 1; in load_direct_gdt()
725 /* Load a fixmap remapping of the per-cpu GDT */
731 gdt_descr.size = GDT_SIZE - 1; in load_fixmap_gdt()
737 * switch_gdt_and_percpu_base - Switch to direct GDT and runtime per CPU base
741 * the direct GDT and the runtime per CPU area. On 32-bit the percpu base
784 if (c->extended_cpuid_level < 0x80000004) in get_model_name()
787 v = (unsigned int *)c->x86_model_id; in get_model_name()
791 c->x86_model_id[48] = 0; in get_model_name()
794 p = q = s = &c->x86_model_id[0]; in get_model_name()
800 /* Note the last non-whitespace index */ in get_model_name()
812 unsigned int n, dummy, ebx, ecx, edx, l2size; in cpu_detect_cache_sizes() local
814 n = c->extended_cpuid_level; in cpu_detect_cache_sizes()
817 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); in cpu_detect_cache_sizes()
818 c->x86_cache_size = (ecx>>24) + (edx>>24); in cpu_detect_cache_sizes()
821 c->x86_tlbsize = 0; in cpu_detect_cache_sizes()
828 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); in cpu_detect_cache_sizes()
829 l2size = ecx >> 16; in cpu_detect_cache_sizes()
832 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); in cpu_detect_cache_sizes()
834 /* do processor-specific cache resizing */ in cpu_detect_cache_sizes()
835 if (this_cpu->legacy_cache_size) in cpu_detect_cache_sizes()
836 l2size = this_cpu->legacy_cache_size(c, l2size); in cpu_detect_cache_sizes()
839 if (cachesize_override != -1) in cpu_detect_cache_sizes()
846 c->x86_cache_size = l2size; in cpu_detect_cache_sizes()
859 if (this_cpu->c_detect_tlb) in cpu_detect_tlb()
860 this_cpu->c_detect_tlb(c); in cpu_detect_tlb()
873 char *v = c->x86_vendor_id; in get_cpu_vendor()
880 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || in get_cpu_vendor()
881 (cpu_devs[i]->c_ident[1] && in get_cpu_vendor()
882 !strcmp(v, cpu_devs[i]->c_ident[1]))) { in get_cpu_vendor()
885 c->x86_vendor = this_cpu->c_x86_vendor; in get_cpu_vendor()
893 c->x86_vendor = X86_VENDOR_UNKNOWN; in get_cpu_vendor()
900 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, in cpu_detect()
901 (unsigned int *)&c->x86_vendor_id[0], in cpu_detect()
902 (unsigned int *)&c->x86_vendor_id[8], in cpu_detect()
903 (unsigned int *)&c->x86_vendor_id[4]); in cpu_detect()
905 c->x86 = 4; in cpu_detect()
906 /* Intel-defined flags: level 0x00000001 */ in cpu_detect()
907 if (c->cpuid_level >= 0x00000001) { in cpu_detect()
911 c->x86 = x86_family(tfms); in cpu_detect()
912 c->x86_model = x86_model(tfms); in cpu_detect()
913 c->x86_stepping = x86_stepping(tfms); in cpu_detect()
916 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; in cpu_detect()
917 c->x86_cache_alignment = c->x86_clflush_size; in cpu_detect()
927 c->x86_capability[i] &= ~cpu_caps_cleared[i]; in apply_forced_caps()
928 c->x86_capability[i] |= cpu_caps_set[i]; in apply_forced_caps()
938 * Intel CPUs, for finer-grained selection of what's available. in init_speculation_control()
975 u32 eax, ebx, ecx, edx; in get_cpu_cap() local
977 /* Intel-defined flags: level 0x00000001 */ in get_cpu_cap()
978 if (c->cpuid_level >= 0x00000001) { in get_cpu_cap()
979 cpuid(0x00000001, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
981 c->x86_capability[CPUID_1_ECX] = ecx; in get_cpu_cap()
982 c->x86_capability[CPUID_1_EDX] = edx; in get_cpu_cap()
986 if (c->cpuid_level >= 0x00000006) in get_cpu_cap()
987 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); in get_cpu_cap()
989 /* Additional Intel-defined flags: level 0x00000007 */ in get_cpu_cap()
990 if (c->cpuid_level >= 0x00000007) { in get_cpu_cap()
991 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
992 c->x86_capability[CPUID_7_0_EBX] = ebx; in get_cpu_cap()
993 c->x86_capability[CPUID_7_ECX] = ecx; in get_cpu_cap()
994 c->x86_capability[CPUID_7_EDX] = edx; in get_cpu_cap()
996 /* Check valid sub-leaf index before accessing it */ in get_cpu_cap()
998 cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
999 c->x86_capability[CPUID_7_1_EAX] = eax; in get_cpu_cap()
1004 if (c->cpuid_level >= 0x0000000d) { in get_cpu_cap()
1005 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1007 c->x86_capability[CPUID_D_1_EAX] = eax; in get_cpu_cap()
1010 /* AMD-defined flags: level 0x80000001 */ in get_cpu_cap()
1012 c->extended_cpuid_level = eax; in get_cpu_cap()
1016 cpuid(0x80000001, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1018 c->x86_capability[CPUID_8000_0001_ECX] = ecx; in get_cpu_cap()
1019 c->x86_capability[CPUID_8000_0001_EDX] = edx; in get_cpu_cap()
1023 if (c->extended_cpuid_level >= 0x80000007) { in get_cpu_cap()
1024 cpuid(0x80000007, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1026 c->x86_capability[CPUID_8000_0007_EBX] = ebx; in get_cpu_cap()
1027 c->x86_power = edx; in get_cpu_cap()
1030 if (c->extended_cpuid_level >= 0x80000008) { in get_cpu_cap()
1031 cpuid(0x80000008, &eax, &ebx, &ecx, &edx); in get_cpu_cap()
1032 c->x86_capability[CPUID_8000_0008_EBX] = ebx; in get_cpu_cap()
1035 if (c->extended_cpuid_level >= 0x8000000a) in get_cpu_cap()
1036 c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); in get_cpu_cap()
1038 if (c->extended_cpuid_level >= 0x8000001f) in get_cpu_cap()
1039 c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f); in get_cpu_cap()
1041 if (c->extended_cpuid_level >= 0x80000021) in get_cpu_cap()
1042 c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021); in get_cpu_cap()
1049 * This needs to happen each time we re-probe, which may happen in get_cpu_cap()
1057 u32 eax, ebx, ecx, edx; in get_cpu_address_sizes() local
1060 (c->extended_cpuid_level < 0x80000008)) { in get_cpu_address_sizes()
1062 c->x86_clflush_size = 64; in get_cpu_address_sizes()
1063 c->x86_phys_bits = 36; in get_cpu_address_sizes()
1064 c->x86_virt_bits = 48; in get_cpu_address_sizes()
1066 c->x86_clflush_size = 32; in get_cpu_address_sizes()
1067 c->x86_virt_bits = 32; in get_cpu_address_sizes()
1068 c->x86_phys_bits = 32; in get_cpu_address_sizes()
1072 c->x86_phys_bits = 36; in get_cpu_address_sizes()
1075 cpuid(0x80000008, &eax, &ebx, &ecx, &edx); in get_cpu_address_sizes()
1077 c->x86_virt_bits = (eax >> 8) & 0xff; in get_cpu_address_sizes()
1078 c->x86_phys_bits = eax & 0xff; in get_cpu_address_sizes()
1081 if (!c->x86_clflush_size) in get_cpu_address_sizes()
1082 c->x86_clflush_size = 32; in get_cpu_address_sizes()
1085 c->x86_cache_bits = c->x86_phys_bits; in get_cpu_address_sizes()
1086 c->x86_cache_alignment = c->x86_clflush_size; in get_cpu_address_sizes()
1098 c->x86 = 4; in identify_cpu_without_cpuid()
1100 c->x86 = 3; in identify_cpu_without_cpuid()
1103 if (cpu_devs[i] && cpu_devs[i]->c_identify) { in identify_cpu_without_cpuid()
1104 c->x86_vendor_id[0] = 0; in identify_cpu_without_cpuid()
1105 cpu_devs[i]->c_identify(c); in identify_cpu_without_cpuid()
1106 if (c->x86_vendor_id[0]) { in identify_cpu_without_cpuid()
1177 * updated non-speculatively, and the issuing of %gs-relative memory
1186 /* AMD Family 0xf - 0x12 */
1192 /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
1221 /* CPU is affected by SMT (cross-thread) return predictions */
1282 return m && !!(m->driver_data & which); in cpu_matches()
1343 * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature in cpu_set_bug_bits()
1344 * flag and protect from vendor-specific bugs via the whitelist. in cpu_set_bug_bits()
1370 * - TSX is supported or in cpu_set_bug_bits()
1371 * - TSX_CTRL is present in cpu_set_bug_bits()
1469 * probing for it doesn't even work. Disable it completely on 32-bit
1471 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
1540 /* empty-string, i.e., ""-defined feature flags */ in cpu_parse_early_param()
1590 memset(&c->x86_capability, 0, sizeof(c->x86_capability)); in early_identify_cpu()
1591 c->extended_cpuid_level = 0; in early_identify_cpu()
1608 if (this_cpu->c_early_init) in early_identify_cpu()
1609 this_cpu->c_early_init(c); in early_identify_cpu()
1611 c->cpu_index = 0; in early_identify_cpu()
1614 if (this_cpu->c_bsp_init) in early_identify_cpu()
1615 this_cpu->c_bsp_init(c); in early_identify_cpu()
1631 * that it can't be enabled in 32-bit mode. in early_identify_cpu()
1638 * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not in early_identify_cpu()
1640 * false-positives at the later stage. in early_identify_cpu()
1643 * - 5-level paging is disabled compile-time; in early_identify_cpu()
1644 * - it's 32-bit kernel; in early_identify_cpu()
1645 * - machine doesn't support 5-level paging; in early_identify_cpu()
1646 * - user specified 'no5lvl' in kernel command line. in early_identify_cpu()
1682 if (!cpu_devs[i]->c_ident[j]) in early_cpu_init()
1684 pr_info(" %s %s\n", cpu_devs[i]->c_vendor, in early_cpu_init()
1685 cpu_devs[i]->c_ident[j]); in early_cpu_init()
1703 * detect it directly instead of hard-coding the choice by in detect_null_seg_behavior()
1744 if ((c->x86 == 0x17 || c->x86 == 0x18) && in check_null_seg_clears_base()
1754 c->extended_cpuid_level = 0; in generic_identify()
1780 * NB: For the time being, only 32-bit kernels support in generic_identify()
1781 * X86_BUG_ESPFIX as such. 64-bit kernels directly choose in generic_identify()
1783 * non-paravirt system ever shows up that does *not* have the in generic_identify()
1798 c->loops_per_jiffy = loops_per_jiffy; in identify_cpu()
1799 c->x86_cache_size = 0; in identify_cpu()
1800 c->x86_vendor = X86_VENDOR_UNKNOWN; in identify_cpu()
1801 c->x86_model = c->x86_stepping = 0; /* So far unknown... */ in identify_cpu()
1802 c->x86_vendor_id[0] = '\0'; /* Unset */ in identify_cpu()
1803 c->x86_model_id[0] = '\0'; /* Unset */ in identify_cpu()
1805 c->x86_clflush_size = 64; in identify_cpu()
1806 c->x86_phys_bits = 36; in identify_cpu()
1807 c->x86_virt_bits = 48; in identify_cpu()
1809 c->cpuid_level = -1; /* CPUID not detected */ in identify_cpu()
1810 c->x86_clflush_size = 32; in identify_cpu()
1811 c->x86_phys_bits = 32; in identify_cpu()
1812 c->x86_virt_bits = 32; in identify_cpu()
1814 c->x86_cache_alignment = c->x86_clflush_size; in identify_cpu()
1815 memset(&c->x86_capability, 0, sizeof(c->x86_capability)); in identify_cpu()
1817 memset(&c->vmx_capability, 0, sizeof(c->vmx_capability)); in identify_cpu()
1824 if (this_cpu->c_identify) in identify_cpu()
1825 this_cpu->c_identify(c); in identify_cpu()
1832 * Hygon will clear it in ->c_init() below. in identify_cpu()
1837 * Vendor-specific initialization. In this section we in identify_cpu()
1843 * At the end of this section, c->x86_capability better in identify_cpu()
1846 if (this_cpu->c_init) in identify_cpu()
1847 this_cpu->c_init(c); in identify_cpu()
1866 * The vendor-specific functions might have changed features. in identify_cpu()
1874 if (!c->x86_model_id[0]) { in identify_cpu()
1878 strcpy(c->x86_model_id, p); in identify_cpu()
1881 sprintf(c->x86_model_id, "%02x/%02x", in identify_cpu()
1882 c->x86, c->x86_model); in identify_cpu()
1904 boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; in identify_cpu()
1908 c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; in identify_cpu()
1921 * on 32-bit kernels:
1936 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- in enable_sep_cpu()
1940 tss->x86_tss.ss1 = __KERNEL_CS; in enable_sep_cpu()
1941 wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); in enable_sep_cpu()
1984 if (c->x86_vendor < X86_VENDOR_NUM) { in print_cpu_info()
1985 vendor = this_cpu->c_vendor; in print_cpu_info()
1987 if (c->cpuid_level >= 0) in print_cpu_info()
1988 vendor = c->x86_vendor_id; in print_cpu_info()
1991 if (vendor && !strstr(c->x86_model_id, vendor)) in print_cpu_info()
1994 if (c->x86_model_id[0]) in print_cpu_info()
1995 pr_cont("%s", c->x86_model_id); in print_cpu_info()
1997 pr_cont("%d86", c->x86); in print_cpu_info()
1999 pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); in print_cpu_info()
2001 if (c->x86_stepping || c->cpuid_level >= 0) in print_cpu_info()
2002 pr_cont(", stepping: 0x%x)\n", c->x86_stepping); in print_cpu_info()
2033 * Intel CPUs do not support 32-bit SYSCALL. Writing to MSR_CSTAR in wrmsrl_cstar()
2049 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. in idt_syscall_init()
2051 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). in idt_syscall_init()
2066 * to minimize user space-kernel interference. in idt_syscall_init()
2150 d.d = 1; /* 32-bit */ in setup_getcpu()
2158 /* Set up the per-CPU TSS IST stacks */ in tss_setup_ist()
2159 tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF); in tss_setup_ist()
2160 tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI); in tss_setup_ist()
2161 tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB); in tss_setup_ist()
2162 tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE); in tss_setup_ist()
2163 /* Only mapped when SEV-ES is active */ in tss_setup_ist()
2164 tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC); in tss_setup_ist()
2172 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID; in tss_setup_io_bitmap()
2175 tss->io_bitmap.prev_max = 0; in tss_setup_io_bitmap()
2176 tss->io_bitmap.prev_sequence = 0; in tss_setup_io_bitmap()
2177 memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap)); in tss_setup_io_bitmap()
2182 tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL; in tss_setup_io_bitmap()
2202 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); in cpu_init_exception_handling()
2229 * cpu_init() initializes state that is per-CPU. Some data is already
2252 memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); in cpu_init()
2265 cur->active_mm = &init_mm; in cpu_init()
2266 BUG_ON(cur->mm); in cpu_init()
2291 * store_cpu_caps() - Store a snapshot of CPU capabilities
2299 curr_info->cpuid_level = cpuid_eax(0); in store_cpu_caps()
2302 memcpy(&curr_info->x86_capability, &boot_cpu_data.x86_capability, in store_cpu_caps()
2303 sizeof(curr_info->x86_capability)); in store_cpu_caps()
2310 * microcode_check() - Check if any CPU capabilities changed after an update.
2328 if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability, in microcode_check()
2329 sizeof(prev_info->x86_capability))) in microcode_check()
2333 …pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS… in microcode_check()
2379 init_utsname()->machine[1] = in arch_cpu_finalize_init()
2395 c->initialized = true; in arch_cpu_finalize_init()
2405 USER_PTR_MAX = (1ul << 63) - PAGE_SIZE; in arch_cpu_finalize_init()
2427 * not cause "plain-text" data to be decrypted when accessed. It in arch_cpu_finalize_init()
2428 * must be called after late_time_init() so that Hyper-V x86/x64 in arch_cpu_finalize_init()