summaryrefslogtreecommitdiffstats
path: root/src/arch
diff options
context:
space:
mode:
authorMichael Brown2016-02-19 02:50:13 +0100
committerMichael Brown2016-02-19 03:58:09 +0100
commitd1562c38a6882a129998935cd63d09ab18f77add (patch)
treee834487ca48085ca2f62d200419e6a839301a514 /src/arch
parent[relocate] Preserve page alignment during relocation (diff)
downloadipxe-d1562c38a6882a129998935cd63d09ab18f77add.tar.gz
ipxe-d1562c38a6882a129998935cd63d09ab18f77add.tar.xz
ipxe-d1562c38a6882a129998935cd63d09ab18f77add.zip
[librm] Prepare for long-mode memory map
The bulk of the iPXE binary (the .textdata section) is physically relocated at runtime to the top of the 32-bit address space in order to allow space for an OS to be loaded. The relocation is achieved with the assistance of segmentation: we adjust the code and data segment bases so that the link-time addresses remain valid. Segmentation is not available (for normal code and data segments) in long mode. We choose to compile the C code with -mcmodel=kernel and use a link-time address of 0xffffffffeb000000. This choice allows us to identity-map the entirety of the 32-bit address space, and to alias our chosen link-time address to the physical location of our .textdata section. (This requires the .textdata section to always be aligned to a page boundary.) We simultaneously choose to set the 32-bit virtual address segment bases such that the link-time addresses may simply be truncated to 32 bits in order to generate a valid 32-bit virtual address. This allows symbols in .textdata to be trivially accessed by both 32-bit and 64-bit code. There is no (sensible) way in 32-bit assembly code to generate the required R_X86_64_32S relocation records for these truncated symbols. However, subtracting the fixed constant 0xffffffff00000000 has the same effect as truncation, and can be represented in a standard R_X86_64_32 relocation record. We define the VIRTUAL() macro to abstract away this truncation operation, and apply it to all references by 32-bit (or 16-bit) assembly code to any symbols within the .textdata section. We define "virt_offset" for a 64-bit build as "the value to be added to an address within .textdata in order to obtain its physical address". With this definition, the low 32 bits of "virt_offset" can be treated by 32-bit code as functionally equivalent to "virt_offset" in a 32-bit build. We define "text16" and "data16" for a 64-bit build as the physical addresses of the .text16 and .data16 sections. Since a physical address within the 32-bit address space may be used directly as a 64-bit virtual address (thanks to the identity map), this definition provides the most natural access to variables in .text16 and .data16. Note that this requires a minor adjustment in prot_to_real(), which accesses .text16 using 32-bit virtual addresses. Signed-off-by: Michael Brown <mcb30@ipxe.org>
Diffstat (limited to 'src/arch')
-rw-r--r--src/arch/x86/core/virtaddr.S10
-rw-r--r--src/arch/x86/include/librm.h39
-rw-r--r--src/arch/x86/transitions/librm.S65
-rw-r--r--src/arch/x86_64/Makefile4
-rw-r--r--src/arch/x86_64/Makefile.efi4
-rw-r--r--src/arch/x86_64/Makefile.pcbios9
6 files changed, 96 insertions, 35 deletions
diff --git a/src/arch/x86/core/virtaddr.S b/src/arch/x86/core/virtaddr.S
index 42559157..45beb164 100644
--- a/src/arch/x86/core/virtaddr.S
+++ b/src/arch/x86/core/virtaddr.S
@@ -32,13 +32,13 @@ _virt_to_phys:
pushl %ebp
/* Change return address to a physical address */
- movl virt_offset, %ebp
+ movl VIRTUAL(virt_offset), %ebp
addl %ebp, 12(%esp)
/* Switch to physical code segment */
cli
pushl $PHYSICAL_CS
- leal 1f(%ebp), %eax
+ leal VIRTUAL(1f)(%ebp), %eax
pushl %eax
lret
1:
@@ -78,7 +78,7 @@ _phys_to_virt:
/* Switch to virtual code segment */
cli
- ljmp $VIRTUAL_CS, $1f
+ ljmp $VIRTUAL_CS, $VIRTUAL(1f)
1:
/* Reload data segment registers */
movl $VIRTUAL_DS, %eax
@@ -88,7 +88,7 @@ _phys_to_virt:
movl %eax, %gs
/* Reload stack segment and adjust %esp */
- movl virt_offset, %ebp
+ movl VIRTUAL(virt_offset), %ebp
movl %eax, %ss
subl %ebp, %esp
@@ -134,7 +134,7 @@ _intr_to_virt:
/* Reload stack segment and adjust %esp if necessary */
je 1f
- movl virt_offset, %ebp
+ movl VIRTUAL(virt_offset), %ebp
movl %eax, %ss
subl %ebp, %esp
1:
diff --git a/src/arch/x86/include/librm.h b/src/arch/x86/include/librm.h
index 97461640..fc31c503 100644
--- a/src/arch/x86/include/librm.h
+++ b/src/arch/x86/include/librm.h
@@ -7,7 +7,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
*
* Don't change these unless you really know what you're doing.
*/
-
#define VIRTUAL_CS 0x08
#define VIRTUAL_DS 0x10
#define PHYSICAL_CS 0x18
@@ -16,6 +15,40 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
#define REAL_DS 0x30
#define P2R_DS 0x38
+/* Calculate symbol address within VIRTUAL_CS or VIRTUAL_DS
+ *
+ * In a 64-bit build, we set the bases of VIRTUAL_CS and VIRTUAL_DS
+ * such that truncating a .textdata symbol value to 32 bits gives a
+ * valid 32-bit virtual address.
+ *
+ * The C code is compiled with -mcmodel=kernel and so we must place
+ * all .textdata symbols within the negative 2GB of the 64-bit address
+ * space. Consequently, all .textdata symbols will have the MSB set
+ * after truncation to 32 bits. This means that a straightforward
+ * R_X86_64_32 relocation record for the symbol will fail, since the
+ * truncated symbol value will not correctly zero-extend to the
+ * original 64-bit value.
+ *
+ * Using an R_X86_64_32S relocation record would work, but there is no
+ * (sensible) way to generate these relocation records within 32-bit
+ * or 16-bit code.
+ *
+ * The simplest solution is to generate an R_X86_64_32 relocation
+ * record with an addend of (-0xffffffff00000000). Since all
+ * .textdata symbols are within the negative 2GB of the 64-bit address
+ * space, this addend acts to effectively truncate the symbol to 32
+ * bits, thereby matching the semantics of the R_X86_64_32 relocation
+ * records generated for 32-bit and 16-bit code.
+ *
+ * In a 32-bit build, this problem does not exist, and we can just use
+ * the .textdata symbol values directly.
+ */
+#ifdef __x86_64__
+#define VIRTUAL(address) ( (address) - 0xffffffff00000000 )
+#else
+#define VIRTUAL(address) (address)
+#endif
+
#ifdef ASSEMBLY
/**
@@ -24,7 +57,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
* @v function C function
*/
.macro virtcall function
- pushl $\function
+ pushl $VIRTUAL(\function)
call prot_call
.endm
@@ -42,7 +75,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
* @v function C function
*/
#define VIRT_CALL( function ) \
- "pushl $( " #function " )\n\t" \
+ "pushl $( " _S2 ( VIRTUAL ( function ) ) " )\n\t" \
"call prot_call\n\t"
/* Variables in librm.S */
diff --git a/src/arch/x86/transitions/librm.S b/src/arch/x86/transitions/librm.S
index 42cfb991..495f272d 100644
--- a/src/arch/x86/transitions/librm.S
+++ b/src/arch/x86/transitions/librm.S
@@ -19,8 +19,22 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL )
#define SIZEOF_REAL_MODE_REGS ( SIZEOF_I386_SEG_REGS + SIZEOF_I386_REGS )
#define SIZEOF_I386_FLAGS 4
#define SIZEOF_I386_ALL_REGS ( SIZEOF_REAL_MODE_REGS + SIZEOF_I386_FLAGS )
-
- .arch i386
+
+/* Size of an address */
+#ifdef __x86_64__
+#define SIZEOF_ADDR 8
+#else
+#define SIZEOF_ADDR 4
+#endif
+
+/* Selectively assemble code for 32-bit/64-bit builds */
+#ifdef __x86_64__
+#define if32 if 0
+#define if64 if 1
+#else
+#define if32 if 1
+#define if64 if 0
+#endif
/****************************************************************************
* Global descriptor table
@@ -126,7 +140,7 @@ rm_sp: .word 0
rm_ss: .word 0
.section ".data.pm_esp", "aw", @progbits
-pm_esp: .long _estack
+pm_esp: .long VIRTUAL(_estack)
/****************************************************************************
* Virtual address offsets
@@ -137,9 +151,9 @@ pm_esp: .long _estack
****************************************************************************
*/
.struct 0
-VA_VIRT_OFFSET: .space 4
-VA_TEXT16: .space 4
-VA_DATA16: .space 4
+VA_VIRT_OFFSET: .space SIZEOF_ADDR
+VA_TEXT16: .space SIZEOF_ADDR
+VA_DATA16: .space SIZEOF_ADDR
VA_SIZE:
.previous
@@ -168,7 +182,7 @@ virt_addrs: .space VA_SIZE
* Parameters:
* %cs : .text16 segment
* %ds : .data16 segment
- * %edi : Physical base of protected-mode code (virt_offset)
+ * %edi : Physical base of protected-mode code
****************************************************************************
*/
.section ".text16.init_librm", "ax", @progbits
@@ -181,7 +195,9 @@ init_librm:
pushl %edi
/* Store rm_virt_offset and set up virtual_cs and virtual_ds segments */
+ subl $VIRTUAL(_textdata), %edi
movl %edi, rm_virt_offset
+.if64 ; setae (rm_virt_offset+4) ; .endif
movl %edi, %eax
movw $virtual_cs, %bx
call set_seg_base
@@ -195,7 +211,7 @@ init_librm:
shll $4, %eax
movw $real_cs, %bx
call set_seg_base
- subl %edi, %eax
+.if32 ; subl %edi, %eax ; .endif
movl %eax, rm_text16
/* Store rm_ds and rm_data16, set up real_ds segment and GDT base */
@@ -207,7 +223,7 @@ init_librm:
call set_seg_base
movl %eax, gdt_base
addl $gdt, gdt_base
- subl %edi, %eax
+.if32 ; subl %edi, %eax ; .endif
movl %eax, rm_data16
/* Switch to protected mode */
@@ -221,7 +237,7 @@ init_librm_pmode:
movw $REAL_DS, %ax
movw %ax, %ds
movl $rm_virt_addrs, %esi
- movl $virt_addrs, %edi
+ movl $VIRTUAL(virt_addrs), %edi
movl $( VA_SIZE / 4 ), %ecx
rep movsl
popw %ds
@@ -312,7 +328,7 @@ real_to_prot:
movl %cr0, %eax
orb $CR0_PE, %al
movl %eax, %cr0
- data32 ljmp $VIRTUAL_CS, $r2p_pmode
+ data32 ljmp $VIRTUAL_CS, $VIRTUAL(r2p_pmode)
.section ".text.real_to_prot", "ax", @progbits
.code32
r2p_pmode:
@@ -323,15 +339,15 @@ r2p_pmode:
movw %ax, %fs
movw %ax, %gs
movw %ax, %ss
- movl pm_esp, %esp
+ movl VIRTUAL(pm_esp), %esp
/* Load protected-mode interrupt descriptor table */
- lidt idtr
+ lidt VIRTUAL(idtr)
/* Record real-mode %ss:sp (after removal of data) */
- movw %bp, rm_ss
+ movw %bp, VIRTUAL(rm_ss)
addl %ecx, %edx
- movw %dx, rm_sp
+ movw %dx, VIRTUAL(rm_sp)
/* Move data from RM stack to PM stack */
subl %ecx, %esp
@@ -365,7 +381,8 @@ r2p_pmode:
.code32
prot_to_real:
/* Copy real-mode global descriptor table register to RM code segment */
- movl text16, %edi
+ movl VIRTUAL(text16), %edi
+.if64 ; subl VIRTUAL(virt_offset), %edi ; .endif
leal rm_gdtr(%edi), %edi
movsw
movsl
@@ -377,20 +394,20 @@ prot_to_real:
addl $4, %ecx
/* Real-mode %ss:sp => %ebp:edx and virtual address => %edi */
- movzwl rm_ss, %ebp
- movzwl rm_sp, %edx
+ movzwl VIRTUAL(rm_ss), %ebp
+ movzwl VIRTUAL(rm_sp), %edx
subl %ecx, %edx
movl %ebp, %eax
shll $4, %eax
leal (%eax,%edx), %edi
- subl virt_offset, %edi
+ subl VIRTUAL(virt_offset), %edi
/* Move data from PM stack to RM stack */
movl %esp, %esi
rep movsb
/* Record protected-mode %esp (after removal of data) */
- movl %esi, pm_esp
+ movl %esi, VIRTUAL(pm_esp)
/* Load real-mode segment limits */
movw $P2R_DS, %ax
@@ -512,7 +529,7 @@ prot_call:
/* Switch to protected mode and move register dump to PM stack */
movl $PC_OFFSET_END, %ecx
- pushl $pc_pmode
+ pushl $VIRTUAL(pc_pmode)
jmp real_to_prot
.section ".text.prot_call", "ax", @progbits
.code32
@@ -589,7 +606,7 @@ real_call:
/* Switch to real mode and move register dump to RM stack */
movl $( RC_OFFSET_REGS_END + 4 /* function pointer copy */ ), %ecx
pushl $rc_rmode
- movl $rm_default_gdtr_idtr, %esi
+ movl $VIRTUAL(rm_default_gdtr_idtr), %esi
jmp prot_to_real
.section ".text16.real_call", "ax", @progbits
.code16
@@ -605,7 +622,7 @@ rc_rmode:
/* Switch to protected mode and move register dump back to PM stack */
movl $RC_OFFSET_REGS_END, %ecx
- pushl $rc_pmode
+ pushl $VIRTUAL(rc_pmode)
jmp real_to_prot
.section ".text.real_call", "ax", @progbits
.code32
@@ -665,6 +682,8 @@ flatten_dummy:
* May be entered with either physical or virtual stack segment.
****************************************************************************
*/
+ .section ".text.interrupt_wrapper", "ax", @progbits
+ .code32
.globl interrupt_wrapper
interrupt_wrapper:
/* Preserve segment registers and original %esp */
diff --git a/src/arch/x86_64/Makefile b/src/arch/x86_64/Makefile
index 48c0aa1a..246905cd 100644
--- a/src/arch/x86_64/Makefile
+++ b/src/arch/x86_64/Makefile
@@ -7,10 +7,6 @@ CFLAGS += -fstrength-reduce -fomit-frame-pointer
#
CFLAGS += -falign-jumps=1 -falign-loops=1 -falign-functions=1
-# Use %rip-relative addressing wherever possible.
-#
-CFLAGS += -fpie
-
# Force 64-bit code
#
CFLAGS += -m64
diff --git a/src/arch/x86_64/Makefile.efi b/src/arch/x86_64/Makefile.efi
index 12408f86..0041bb8f 100644
--- a/src/arch/x86_64/Makefile.efi
+++ b/src/arch/x86_64/Makefile.efi
@@ -1,5 +1,9 @@
# -*- makefile -*- : Force emacs to use Makefile mode
+# Use %rip-relative addressing wherever possible.
+#
+CFLAGS += -fpie
+
# EFI probably doesn't guarantee us a red zone, so let's not rely on it.
#
CFLAGS += -mno-red-zone
diff --git a/src/arch/x86_64/Makefile.pcbios b/src/arch/x86_64/Makefile.pcbios
index dfb8db0a..ba4c8d8d 100644
--- a/src/arch/x86_64/Makefile.pcbios
+++ b/src/arch/x86_64/Makefile.pcbios
@@ -1,5 +1,14 @@
# -*- makefile -*- : Force emacs to use Makefile mode
+# Place .textdata in negative 2GB of address space
+#
+CFLAGS += -mcmodel=kernel
+LDFLAGS += --section-start=.textdata=0xffffffffeb000000
+
+# Assembly code does not respect a red zone.
+#
+CFLAGS += -mno-red-zone
+
# Include generic BIOS Makefile
#
MAKEDEPS += arch/x86/Makefile.pcbios