summaryrefslogtreecommitdiffstats
path: root/contrib/syslinux-4.02/com32/gpllib/cpuid.c
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/syslinux-4.02/com32/gpllib/cpuid.c')
-rw-r--r--contrib/syslinux-4.02/com32/gpllib/cpuid.c456
1 files changed, 456 insertions, 0 deletions
diff --git a/contrib/syslinux-4.02/com32/gpllib/cpuid.c b/contrib/syslinux-4.02/com32/gpllib/cpuid.c
new file mode 100644
index 0000000..2d5b5ce
--- /dev/null
+++ b/contrib/syslinux-4.02/com32/gpllib/cpuid.c
@@ -0,0 +1,456 @@
+/*
+ * Portions of this file taken from the Linux kernel,
+ * Copyright 1991-2009 Linus Torvalds and contributors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#include <stdio.h>
+#include <string.h>
+#include "cpuid.h"
+
+struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = { };
+
+/*
+* CPUID functions returning a single datum
+*/
+
+/* Probe for the CPUID instruction */
+static int have_cpuid_p(void)
+{
+ return cpu_has_eflag(X86_EFLAGS_ID);
+}
+
+static struct cpu_dev amd_cpu_dev = {
+ .c_vendor = "AMD",
+ .c_ident = {"AuthenticAMD"}
+};
+
+static struct cpu_dev intel_cpu_dev = {
+ .c_vendor = "Intel",
+ .c_ident = {"GenuineIntel"}
+};
+
+static struct cpu_dev cyrix_cpu_dev = {
+ .c_vendor = "Cyrix",
+ .c_ident = {"CyrixInstead"}
+};
+
+static struct cpu_dev umc_cpu_dev = {
+ .c_vendor = "UMC",
+ .c_ident = {"UMC UMC UMC"}
+
+};
+
+static struct cpu_dev nexgen_cpu_dev = {
+ .c_vendor = "Nexgen",
+ .c_ident = {"NexGenDriven"}
+};
+
+static struct cpu_dev centaur_cpu_dev = {
+ .c_vendor = "Centaur",
+ .c_ident = {"CentaurHauls"}
+};
+
+static struct cpu_dev rise_cpu_dev = {
+ .c_vendor = "Rise",
+ .c_ident = {"RiseRiseRise"}
+};
+
+static struct cpu_dev transmeta_cpu_dev = {
+ .c_vendor = "Transmeta",
+ .c_ident = {"GenuineTMx86", "TransmetaCPU"}
+};
+
+void init_cpu_devs(void)
+{
+ cpu_devs[X86_VENDOR_INTEL] = &intel_cpu_dev;
+ cpu_devs[X86_VENDOR_CYRIX] = &cyrix_cpu_dev;
+ cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev;
+ cpu_devs[X86_VENDOR_UMC] = &umc_cpu_dev;
+ cpu_devs[X86_VENDOR_NEXGEN] = &nexgen_cpu_dev;
+ cpu_devs[X86_VENDOR_CENTAUR] = &centaur_cpu_dev;
+ cpu_devs[X86_VENDOR_RISE] = &rise_cpu_dev;
+ cpu_devs[X86_VENDOR_TRANSMETA] = &transmeta_cpu_dev;
+}
+
+void get_cpu_vendor(struct cpuinfo_x86 *c)
+{
+ char *v = c->x86_vendor_id;
+ int i;
+ init_cpu_devs();
+ for (i = 0; i < X86_VENDOR_NUM; i++) {
+ if (cpu_devs[i]) {
+ if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
+ (cpu_devs[i]->c_ident[1] &&
+ !strcmp(v, cpu_devs[i]->c_ident[1]))) {
+ c->x86_vendor = i;
+ return;
+ }
+ }
+ }
+
+ c->x86_vendor = X86_VENDOR_UNKNOWN;
+}
+
+int get_model_name(struct cpuinfo_x86 *c)
+{
+ unsigned int *v;
+ char *p, *q;
+
+ if (cpuid_eax(0x80000000) < 0x80000004)
+ return 0;
+
+ v = (unsigned int *)c->x86_model_id;
+ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
+ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
+ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
+ c->x86_model_id[48] = 0;
+
+ /* Intel chips right-justify this string for some dumb reason;
+ undo that brain damage */
+ p = q = &c->x86_model_id[0];
+ while (*p == ' ')
+ p++;
+ if (p != q) {
+ while (*p)
+ *q++ = *p++;
+ while (q <= &c->x86_model_id[48])
+ *q++ = '\0'; /* Zero-pad the rest */
+ }
+
+ return 1;
+}
+
+void detect_cache(uint32_t xlvl, struct cpuinfo_x86 *c)
+{
+ uint32_t eax, ebx, ecx, edx, l2size;
+ /* Detecting L1 cache */
+ if (xlvl >= 0x80000005) {
+ cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
+ c->x86_l1_data_cache_size = ecx >> 24;
+ c->x86_l1_instruction_cache_size = edx >> 24;
+ }
+
+ /* Detecting L2 cache */
+ c->x86_l2_cache_size = 0;
+
+ if (xlvl < 0x80000006) /* Some chips just has a large L1. */
+ return;
+
+ cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
+ l2size = ecx >> 16;
+
+ /* Vendor based fixes */
+ switch (c->x86_vendor) {
+ case X86_VENDOR_INTEL:
+ /*
+ * Intel PIII Tualatin. This comes in two flavours.
+ * One has 256kb of cache, the other 512. We have no way
+ * to determine which, so we use a boottime override
+ * for the 512kb model, and assume 256 otherwise.
+ */
+ if ((c->x86 == 6) && (c->x86_model == 11) && (l2size == 0))
+ l2size = 256;
+ break;
+ case X86_VENDOR_AMD:
+ /* AMD errata T13 (order #21922) */
+ if ((c->x86 == 6)) {
+ if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */
+ l2size = 64;
+ if (c->x86_model == 4 && (c->x86_mask == 0 || c->x86_mask == 1)) /* Tbird rev A1/A2 */
+ l2size = 256;
+ }
+ break;
+ }
+ c->x86_l2_cache_size = l2size;
+}
+
+void generic_identify(struct cpuinfo_x86 *c)
+{
+ uint32_t tfms, xlvl;
+ uint32_t eax, ebx, ecx, edx;
+
+ /* Get vendor name */
+ cpuid(0x00000000,
+ (uint32_t *) & c->cpuid_level,
+ (uint32_t *) & c->x86_vendor_id[0],
+ (uint32_t *) & c->x86_vendor_id[8],
+ (uint32_t *) & c->x86_vendor_id[4]);
+
+ get_cpu_vendor(c);
+
+ /* Intel-defined flags: level 0x00000001 */
+ if (c->cpuid_level >= 0x00000001) {
+ uint32_t capability, excap;
+ cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
+ c->x86_capability[0] = capability;
+ c->x86_capability[4] = excap;
+ c->x86 = (tfms >> 8) & 15;
+ c->x86_model = (tfms >> 4) & 15;
+ if (c->x86 == 0xf)
+ c->x86 += (tfms >> 20) & 0xff;
+ if (c->x86 >= 0x6)
+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
+ c->x86_mask = tfms & 15;
+ if (cpu_has(c, X86_FEATURE_CLFLSH))
+ c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
+ } else {
+ /* Have CPUID level 0 only - unheard of */
+ c->x86 = 4;
+ }
+
+ /* AMD-defined flags: level 0x80000001 */
+ xlvl = cpuid_eax(0x80000000);
+ if ((xlvl & 0xffff0000) == 0x80000000) {
+ if (xlvl >= 0x80000001) {
+ c->x86_capability[1] = cpuid_edx(0x80000001);
+ c->x86_capability[6] = cpuid_ecx(0x80000001);
+ }
+ if (xlvl >= 0x80000004)
+ get_model_name(c); /* Default name */
+ }
+
+ /* Detecting the number of cores */
+ switch (c->x86_vendor) {
+ case X86_VENDOR_AMD:
+ if (xlvl >= 0x80000008) {
+ c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
+ if (c->x86_num_cores & (c->x86_num_cores - 1))
+ c->x86_num_cores = 1;
+ }
+ break;
+ case X86_VENDOR_INTEL:
+ if (c->cpuid_level >= 0x00000004) {
+ cpuid(0x4, &eax, &ebx, &ecx, &edx);
+ c->x86_num_cores = ((eax & 0xfc000000) >> 26) + 1;
+ }
+ break;
+ default:
+ c->x86_num_cores = 1;
+ break;
+ }
+
+ detect_cache(xlvl, c);
+}
+
+/*
+ * Checksum an MP configuration block.
+ */
+
+static int mpf_checksum(unsigned char *mp, int len)
+{
+ int sum = 0;
+
+ while (len--)
+ sum += *mp++;
+
+ return sum & 0xFF;
+}
+
+static int smp_scan_config(unsigned long base, unsigned long length)
+{
+ unsigned long *bp = (unsigned long *)base;
+ struct intel_mp_floating *mpf;
+
+// printf("Scan SMP from %p for %ld bytes.\n", bp,length);
+ if (sizeof(*mpf) != 16) {
+ printf("Error: MPF size\n");
+ return 0;
+ }
+
+ while (length > 0) {
+ mpf = (struct intel_mp_floating *)bp;
+ if ((*bp == SMP_MAGIC_IDENT) &&
+ (mpf->mpf_length == 1) &&
+ !mpf_checksum((unsigned char *)bp, 16) &&
+ ((mpf->mpf_specification == 1)
+ || (mpf->mpf_specification == 4))) {
+ return 1;
+ }
+ bp += 4;
+ length -= 16;
+ }
+ return 0;
+}
+
+int find_smp_config(void)
+{
+// unsigned int address;
+
+ /*
+ * FIXME: Linux assumes you have 640K of base ram..
+ * this continues the error...
+ *
+ * 1) Scan the bottom 1K for a signature
+ * 2) Scan the top 1K of base RAM
+ * 3) Scan the 64K of bios
+ */
+ if (smp_scan_config(0x0, 0x400) ||
+ smp_scan_config(639 * 0x400, 0x400) ||
+ smp_scan_config(0xF0000, 0x10000))
+ return 1;
+ /*
+ * If it is an SMP machine we should know now, unless the
+ * configuration is in an EISA/MCA bus machine with an
+ * extended bios data area.
+ *
+ * there is a real-mode segmented pointer pointing to the
+ * 4K EBDA area at 0x40E, calculate and scan it here.
+ *
+ * NOTE! There are Linux loaders that will corrupt the EBDA
+ * area, and as such this kind of SMP config may be less
+ * trustworthy, simply because the SMP table may have been
+ * stomped on during early boot. These loaders are buggy and
+ * should be fixed.
+ *
+ * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
+ */
+
+// address = get_bios_ebda();
+// if (address)
+// smp_scan_config(address, 0x400);
+ return 0;
+}
+
+void set_cpu_flags(struct cpuinfo_x86 *c, s_cpu * cpu)
+{
+ cpu->flags.fpu = cpu_has(c, X86_FEATURE_FPU);
+ cpu->flags.vme = cpu_has(c, X86_FEATURE_VME);
+ cpu->flags.de = cpu_has(c, X86_FEATURE_DE);
+ cpu->flags.pse = cpu_has(c, X86_FEATURE_PSE);
+ cpu->flags.tsc = cpu_has(c, X86_FEATURE_TSC);
+ cpu->flags.msr = cpu_has(c, X86_FEATURE_MSR);
+ cpu->flags.pae = cpu_has(c, X86_FEATURE_PAE);
+ cpu->flags.mce = cpu_has(c, X86_FEATURE_MCE);
+ cpu->flags.cx8 = cpu_has(c, X86_FEATURE_CX8);
+ cpu->flags.apic = cpu_has(c, X86_FEATURE_APIC);
+ cpu->flags.sep = cpu_has(c, X86_FEATURE_SEP);
+ cpu->flags.mtrr = cpu_has(c, X86_FEATURE_MTRR);
+ cpu->flags.pge = cpu_has(c, X86_FEATURE_PGE);
+ cpu->flags.mca = cpu_has(c, X86_FEATURE_MCA);
+ cpu->flags.cmov = cpu_has(c, X86_FEATURE_CMOV);
+ cpu->flags.pat = cpu_has(c, X86_FEATURE_PAT);
+ cpu->flags.pse_36 = cpu_has(c, X86_FEATURE_PSE36);
+ cpu->flags.psn = cpu_has(c, X86_FEATURE_PN);
+ cpu->flags.clflsh = cpu_has(c, X86_FEATURE_CLFLSH);
+ cpu->flags.dts = cpu_has(c, X86_FEATURE_DTES);
+ cpu->flags.acpi = cpu_has(c, X86_FEATURE_ACPI);
+ cpu->flags.pbe = cpu_has(c, X86_FEATURE_PBE);
+ cpu->flags.mmx = cpu_has(c, X86_FEATURE_MMX);
+ cpu->flags.fxsr = cpu_has(c, X86_FEATURE_FXSR);
+ cpu->flags.sse = cpu_has(c, X86_FEATURE_XMM);
+ cpu->flags.sse2 = cpu_has(c, X86_FEATURE_XMM2);
+ cpu->flags.ss = cpu_has(c, X86_FEATURE_SELFSNOOP);
+ cpu->flags.htt = cpu_has(c, X86_FEATURE_HT);
+ cpu->flags.acc = cpu_has(c, X86_FEATURE_ACC);
+ cpu->flags.syscall = cpu_has(c, X86_FEATURE_SYSCALL);
+ cpu->flags.mp = cpu_has(c, X86_FEATURE_MP);
+ cpu->flags.nx = cpu_has(c, X86_FEATURE_NX);
+ cpu->flags.mmxext = cpu_has(c, X86_FEATURE_MMXEXT);
+ cpu->flags.fxsr_opt = cpu_has(c, X86_FEATURE_FXSR_OPT);
+ cpu->flags.gbpages = cpu_has(c, X86_FEATURE_GBPAGES);
+ cpu->flags.rdtscp = cpu_has(c, X86_FEATURE_RDTSCP);
+ cpu->flags.lm = cpu_has(c, X86_FEATURE_LM);
+ cpu->flags.nowext = cpu_has(c, X86_FEATURE_3DNOWEXT);
+ cpu->flags.now = cpu_has(c, X86_FEATURE_3DNOW);
+ cpu->flags.smp = find_smp_config();
+ cpu->flags.pni = cpu_has(c, X86_FEATURE_XMM3);
+ cpu->flags.pclmulqd = cpu_has(c, X86_FEATURE_PCLMULQDQ);
+ cpu->flags.dtes64 = cpu_has(c, X86_FEATURE_DTES64);
+ cpu->flags.vmx = cpu_has(c, X86_FEATURE_VMX);
+ cpu->flags.smx = cpu_has(c, X86_FEATURE_SMX);
+ cpu->flags.est = cpu_has(c, X86_FEATURE_EST);
+ cpu->flags.tm2 = cpu_has(c, X86_FEATURE_TM2);
+ cpu->flags.sse3 = cpu_has(c, X86_FEATURE_SSE3);
+ cpu->flags.cid = cpu_has(c, X86_FEATURE_CID);
+ cpu->flags.fma = cpu_has(c, X86_FEATURE_FMA);
+ cpu->flags.cx16 = cpu_has(c, X86_FEATURE_CX16);
+ cpu->flags.xtpr = cpu_has(c, X86_FEATURE_XTPR);
+ cpu->flags.pdcm = cpu_has(c, X86_FEATURE_PDCM);
+ cpu->flags.dca = cpu_has(c, X86_FEATURE_DCA);
+ cpu->flags.xmm4_1 = cpu_has(c, X86_FEATURE_XMM4_1);
+ cpu->flags.xmm4_2 = cpu_has(c, X86_FEATURE_XMM4_2);
+ cpu->flags.x2apic = cpu_has(c, X86_FEATURE_X2APIC);
+ cpu->flags.movbe = cpu_has(c, X86_FEATURE_MOVBE);
+ cpu->flags.popcnt = cpu_has(c, X86_FEATURE_POPCNT);
+ cpu->flags.aes = cpu_has(c, X86_FEATURE_AES);
+ cpu->flags.xsave = cpu_has(c, X86_FEATURE_XSAVE);
+ cpu->flags.osxsave = cpu_has(c, X86_FEATURE_OSXSAVE);
+ cpu->flags.avx = cpu_has(c, X86_FEATURE_AVX);
+ cpu->flags.hypervisor = cpu_has(c, X86_FEATURE_HYPERVISOR);
+ cpu->flags.ace2 = cpu_has(c, X86_FEATURE_ACE2);
+ cpu->flags.ace2_en = cpu_has(c, X86_FEATURE_ACE2_EN);
+ cpu->flags.phe = cpu_has(c, X86_FEATURE_PHE);
+ cpu->flags.phe_en = cpu_has(c, X86_FEATURE_PHE_EN);
+ cpu->flags.pmm = cpu_has(c, X86_FEATURE_PMM);
+ cpu->flags.pmm_en = cpu_has(c, X86_FEATURE_PMM_EN);
+ cpu->flags.extapic = cpu_has(c, X86_FEATURE_EXTAPIC);
+ cpu->flags.cr8_legacy = cpu_has(c, X86_FEATURE_CR8_LEGACY);
+ cpu->flags.abm = cpu_has(c, X86_FEATURE_ABM);
+ cpu->flags.sse4a = cpu_has(c, X86_FEATURE_SSE4A);
+ cpu->flags.misalignsse = cpu_has(c, X86_FEATURE_MISALIGNSSE);
+ cpu->flags.nowprefetch = cpu_has(c, X86_FEATURE_3DNOWPREFETCH);
+ cpu->flags.osvw = cpu_has(c, X86_FEATURE_OSVW);
+ cpu->flags.ibs = cpu_has(c, X86_FEATURE_IBS);
+ cpu->flags.sse5 = cpu_has(c, X86_FEATURE_SSE5);
+ cpu->flags.skinit = cpu_has(c, X86_FEATURE_SKINIT);
+ cpu->flags.wdt = cpu_has(c, X86_FEATURE_WDT);
+ cpu->flags.ida = cpu_has(c, X86_FEATURE_IDA);
+ cpu->flags.arat = cpu_has(c, X86_FEATURE_ARAT);
+ cpu->flags.tpr_shadow = cpu_has(c, X86_FEATURE_TPR_SHADOW);
+ cpu->flags.vnmi = cpu_has(c, X86_FEATURE_VNMI);
+ cpu->flags.flexpriority = cpu_has(c, X86_FEATURE_FLEXPRIORITY);
+ cpu->flags.ept = cpu_has(c, X86_FEATURE_EPT);
+ cpu->flags.vpid = cpu_has(c, X86_FEATURE_VPID);
+ cpu->flags.svm = cpu_has(c, X86_FEATURE_SVM);
+}
+
+void set_generic_info(struct cpuinfo_x86 *c, s_cpu * cpu)
+{
+ cpu->family = c->x86;
+ cpu->vendor_id = c->x86_vendor;
+ cpu->model_id = c->x86_model;
+ cpu->stepping = c->x86_mask;
+ strlcpy(cpu->vendor, cpu_devs[c->x86_vendor]->c_vendor,
+ sizeof(cpu->vendor));
+ strlcpy(cpu->model, c->x86_model_id, sizeof(cpu->model));
+ cpu->num_cores = c->x86_num_cores;
+ cpu->l1_data_cache_size = c->x86_l1_data_cache_size;
+ cpu->l1_instruction_cache_size = c->x86_l1_instruction_cache_size;
+ cpu->l2_cache_size = c->x86_l2_cache_size;
+}
+
+void detect_cpu(s_cpu * cpu)
+{
+ struct cpuinfo_x86 c;
+ c.x86_clflush_size = 32;
+ c.x86_l1_data_cache_size = 0;
+ c.x86_l1_instruction_cache_size = 0;
+ c.x86_l2_cache_size = 0;
+ c.x86_vendor = X86_VENDOR_UNKNOWN;
+ c.cpuid_level = -1; /* CPUID not detected */
+ c.x86_model = c.x86_mask = 0; /* So far unknown... */
+ c.x86_num_cores = 1;
+ memset(&c.x86_capability, 0, sizeof(c.x86_capability));
+ memset(&c.x86_vendor_id, 0, sizeof(c.x86_vendor_id));
+ memset(&c.x86_model_id, 0, sizeof(c.x86_model_id));
+
+ if (!have_cpuid_p())
+ return;
+
+ generic_identify(&c);
+ set_generic_info(&c, cpu);
+ set_cpu_flags(&c, cpu);
+}