Remove old aarch64 asm code

This commit is contained in:
Jeremy Soller 2024-10-29 07:59:12 -06:00
parent ea0356b26a
commit e4e55103ad
No known key found for this signature in database
GPG Key ID: D02FD439211AF56F
8 changed files with 0 additions and 620 deletions

View File

@ -48,14 +48,6 @@ fn main() {
match arch_str { match arch_str {
"aarch64" => { "aarch64" => {
// Build pre kstart init asm code for aarch64
/*TODO: do we need any of this?
println!("cargo:rerun-if-changed=src/arch/aarch64/init/pre_kstart/early_init.S");
cc::Build::new()
.file("src/arch/aarch64/init/pre_kstart/early_init.S")
.target("aarch64-unknown-redox")
.compile("early_init");
*/
println!("cargo:rustc-cfg=dtb"); println!("cargo:rustc-cfg=dtb");
} }
"x86" => { "x86" => {

View File

@ -24,8 +24,6 @@ SECTIONS {
.text : AT(ADDR(.text) - KERNEL_OFFSET) { .text : AT(ADDR(.text) - KERNEL_OFFSET) {
__text_start = .; __text_start = .;
*(.early_init.text*)
. = ALIGN(4096);
*(.text*) *(.text*)
__usercopy_start = .; __usercopy_start = .;
*(.usercopy-fns) *(.usercopy-fns)

View File

@ -1,51 +0,0 @@
// Early initialisation for AArch64 systems.
//
// This code is responsible for taking over control of the boot CPU from
// the bootloader and setting up enough of the CPU so Rust code can take
// over (in kstart).
//
// Readers are recommended to refer to the Arm Architecture Reference Manual
// when studying this code. The latest version of the Arm Arm can be found at:
//
// https://developer.arm.com/products/architecture/cpu-architecture/a-profile/docs
//
// The code is structured such that different phases/functionality are
// in separate files included by this central one.
//
// This is hopefully easier to grok and study than one gigantic file.
//
// The emphasis is on clarity and not optimisation. Clarity is hard without
// a decent understanding of the Arm architecture.
//
// Optimisation is not too much of a concern given that this is boot code.
// That said, future revisions will aim to optimise.
#include "helpers/consts.h"
#include "helpers/pre_mmu_enabled.S"
#include "helpers/build_page_tables.S"
#include "helpers/post_mmu_enabled.S"
#include "helpers/vectors.S"
// Entry point for the boot CPU. We assume that x0 contains the physical address of a DTB image
// passed in by the bootloader.
//
// Note that the kernel linker script arranges for this code to lie at the start of the kernel
// image.
.text
.align 2
.pushsection ".early_init.text", "ax"
.globl early_init
early_init:
bl early_setup
bl disable_mmu
bl create_page_tables
bl enable_mmu
b mmu_on_trampoline // With the mmu now on, this returns below to
// mmu_on using Virtual Addressing
mmu_on:
bl setup_kstart_context // Setup environment for kstart
b kstart // Let the show begin! :)
.popsection

View File

@ -1,249 +0,0 @@
// Creates the following MMU mappings:
//
// 1. Identity mapping for the kernel (VA == PA) to be able to switch on the MMU
// 2. Mapping for the kernel with high VAs from KERNEL_OFFSET onwards
// 3. Mapping for the kernel stack
// 4. Mapping for the DTB Image
// 5. Optional Mapping for a diagnostic UART
create_page_tables:
mov x22, x30
adr x0, addr_marker // x0: Physical address of addr_marker
ldr x1, [x0] // x1: Virtual address of addr_marker
ldr x2, =KERNEL_OFFSET // x2: Virtual address of kernel base
sub x3, x1, x2 // x3: 'Distance' of addr_marker from kernel base
sub x0, x0, x3 // x0: Physical address of kernel base
mov x11,x0 // x11: Stash away the Physical address of the kernel image base
ldr x1, =KERNEL_OFFSET // x1: Virtual address of kernel start addr
ldr x2, =__end // x2: Virtual address of kernel end addr
sub x12, x2, x1 // x12: Size of the kernel image
add x12, x12, #(0x200000) // x12: Align to 2MB (Add 2MB, then clear low bits if any)
and x3, x12, #0xffffffffffe00000
cmp x12, #0x200, lsl #12
csel x12, x3, x12, hi
add x13, x1, x12 // x13: Stack top vaddr (kbase.vaddr + ksize)
mov x14, #(EARLY_KSTACK_SIZE) // x14: Stack size
ldr x15, =KERNEL_OFFSET // x15: Kernel base vaddr
// From this point on, the following registers are not to be modified for convenience:
// x11: PA of kernel image base
// x12: Kernel image size (2MB aligned)
// x13: VA of stack top
// x14: Stack size
// x15: VA of kernel Base
// Zero out all the tables
zero_tables:
adr x0, identkmap_l0_ptable
mov x1, #(PAGE_SIZE)
mov x2, #(NUM_TABLES) // There are normally 12 tables to clear (2 L0, 5 L1, 5 L2, 1 env)
mul x1, x1, x2
lsr x1, x1, #3
mov x2, xzr
zero_loop:
str xzr, [x0, x2]
add x2, x2, #8
cmp x1, x2
b.ne zero_loop
// Identity map the kernel
mov x0, x11 // x0: Paddr of kernel image base
mov x1, x11 // x1: Paddr of kernel image base
mov x2, x12 // x2: Kernel image size
mov x3, #(NORMAL_UNCACHED_MEM) // x3: Attributes to apply
adr x4, identkmap_l0_ptable // x5: Ptr to L0 table for identity mapping the kernel
adr x5, identkmap_l1_ptable // x6: Ptr to L1 table for identity mapping the kernel
adr x6, identkmap_l2_ptable // x7: Ptr to L2 table for identity mapping the kernel
bl build_map
// Map the kernel
ldr x0, =KERNEL_OFFSET // x0: Vaddr of kernel base
mov x1, x11 // x1: Paddr of kernel base
mov x2, x12 // x2: Kernel image size
mov x3, #(NORMAL_CACHED_MEM) // x3: Attributes to apply
adr x4, kernmap_l0_ptable // x5: Ptr to L0 table for mapping the kernel
adr x5, kernmap_l1_ptable // x6: Ptr to L1 table for mapping the kernel
adr x6, kernmap_l2_ptable // x7: Ptr to L2 table for mapping the kernel
bl build_map
// Map the kernel stack
ldr x0, =KERNEL_OFFSET // x0: Vaddr of kernel stack top
add x0, x0, x12
sub x1, x11, x14 // x1: Paddr of kernel stack top (kbase.paddr - kstack size)
mov x2, #(EARLY_KSTACK_SIZE) // x2: Size of kernel stack
mov x3, #(NORMAL_CACHED_MEM) // x3: Attributes to apply
adr x4, kernmap_l0_ptable // x5: Ptr to the kernel L0 table
adr x5, kstack_l1_ptable // x6: Ptr to L1 table for mapping the kernel stack
adr x6, kstack_l2_ptable // x7: Ptr to L2 table for mapping the kernel stack
bl build_map
// Map first GIGABYTE at PHYS_OFFSET
mov x1, #0 // x1: Physical address
adr x6, physmap_1gb_l2_ptable // x7: Ptr to L2 table
bl build_physmap
// Map second GIGABYTE at PHYS_OFFSET + GIGABYTE
mov x1, #(GIGABYTE) // x1: Physical address
adr x6, physmap_2gb_l2_ptable // x7: Ptr to L2 table
bl build_physmap
// Map third GIGABYTE at PHYS_OFFSET + 2*GIGABYTE
mov x1, #(2*GIGABYTE) // x1: Physical address
adr x6, physmap_3gb_l2_ptable // x7: Ptr to L2 table
bl build_physmap
// Map fourth GIGABYTE at PHYS_OFFSET + 3*GIGABYTE
mov x1, #(3*GIGABYTE) // x1: Physical address
adr x6, physmap_4gb_l2_ptable // x7: Ptr to L2 table
bl build_physmap
// Set up recursive paging for TTBR1
adr x0, kernmap_l0_ptable
add x1, x0, #(511 * 8)
orr x0, x0, #((DESC_TYPE_TABLE << DESC_TYPE_BIT) | (DESC_VALID << DESC_VALID_BIT))
orr x0, x0, #(ACCESS_FLAG_BIT)
str x0, [x1]
// Set up recursive paging for TTBR0
adr x0, identkmap_l0_ptable
add x1, x0, #(511 * 8)
orr x0, x0, #((DESC_TYPE_TABLE << DESC_TYPE_BIT) | (DESC_VALID << DESC_VALID_BIT))
orr x0, x0, #(ACCESS_FLAG_BIT)
str x0, [x1]
mov x30, x22
ret
// Add a physmap entry
// x1: physical address, a multiple of GIGABYTE
// x6: address of l2 page table
build_physmap:
ldr x0, =DEVMAP_VBASE // x0: Virtual address
add x0, x0, x1
mov x2, #(GIGABYTE - 1) // x2: Size (minus one to work around errors)
mov x3, #(DEVICE_MEM) // x3: Attributes to apply
adr x4, kernmap_l0_ptable // x5: Ptr to L0 table
adr x5, physmap_l1_ptable // x6: Ptr to L1 table
b build_map
// Generic routine to build mappings. Requires the following inputs:
//
// x0: Vaddr to map to Paddr
// x1: Paddr to map Vaddr to
// x2: Length (in bytes) of region to map
// x3: Region attributes
// x4: Paddr of L0 table to use for mapping
// x5: Paddr of L1 table to use for mapping
// x6: Paddr of L2 table to use for mapping
//
// To keep things simple everything is mapped using 2MB blocks. This implies that the length
// is explicitly aligned to 2MB to prevent any translation aliases. Since block translations
// at L2 cover 2MB blocks, that suits us nicely so everything uses 2MB L2 blocks. Wasteful
// perhaps but at this stage it's convenient and in any case will get ripped out and
// reprogrammed in kstart.
build_map:
lsr x8, x0, #39 // First group of 9 bits of VA
and x8, x8, #0x1ff
lsl x8, x8, #3 // x8: Index into L0 table
ldr x9, [x4, x8]
cbnz x9, l1_idx_prefilled
mov x9, x5 // Get L1 base
bfm w9, wzr, #0, #11
orr x9, x9, #((DESC_TYPE_TABLE << DESC_TYPE_BIT) | (DESC_VALID << DESC_VALID_BIT))
orr x9, x9, #(ACCESS_FLAG_BIT)
str x9, [x4, x8] // L0[Index]: L1
l1_idx_prefilled:
lsr x8, x0, #30 // Second group of 9 bits of VA
and x8, x8, #0x1ff
lsl x8, x8, #3 // x8: Index into L1 table
ldr x9, [x5, x8]
cbnz x9, l2_idx_prefilled
build_map_l2:
mov x9, x6 // Get L2 base
bfm w9, wzr, #0, #11
orr x9, x9, #((DESC_TYPE_TABLE << DESC_TYPE_BIT) | (DESC_VALID << DESC_VALID_BIT))
orr x9, x9, #(ACCESS_FLAG_BIT)
lsl x4, x3, #2
orr x9, x9, x4
str x9, [x5, x8] // L1[Index]: Base of L2 table
l2_idx_prefilled:
lsr x2, x2, #21 // Number of 2MB blocks needed */
add x2, x2, #1 //TODO: remove this and remove workarounds
lsr x8, x0, #21 // Third group of 9 bits of VA
and x8, x8, #0x1ff
lsl x8, x8, #3 // x8: Index into L2 table
ldr x9, [x6, x8]
cbnz x9, build_map_error
build_map_l2_loop:
mov x9, x1
bfm w9, wzr, #0, #11
orr x9, x9, #((DESC_TYPE_BLOCK << DESC_TYPE_BIT) | (DESC_VALID << DESC_VALID_BIT))
orr x9, x9, #(ACCESS_FLAG_BIT)
lsl x4, x3, #2
orr x9, x9, x4
ldr x10, [x6, x8]
mov x7, #(DESC_VALID << DESC_VALID_BIT)
and x10, x10, x7
cmp x10, x7
b.eq build_map_error
str x9, [x6, x8] // L2[Index]: PA of 2MB region to map to
mov x9, #1
add x1, x1, x9, lsl #21
add x8, x8, #8
sub x2, x2, #1
cbnz x2, build_map_l2_loop
ret
build_map_error:
wfi
b build_map_error
// Statically allocated tables consumed by build_map.
.align 12
identkmap_l0_ptable:
.space PAGE_SIZE
identkmap_l1_ptable:
.space PAGE_SIZE
identkmap_l2_ptable:
.space PAGE_SIZE
kernmap_l0_ptable:
.space PAGE_SIZE
kernmap_l1_ptable:
.space PAGE_SIZE
kernmap_l2_ptable:
.space PAGE_SIZE
kstack_l1_ptable:
.space PAGE_SIZE
kstack_l2_ptable:
.space PAGE_SIZE
physmap_l1_ptable:
.space PAGE_SIZE
physmap_1gb_l2_ptable:
.space PAGE_SIZE
physmap_2gb_l2_ptable:
.space PAGE_SIZE
physmap_3gb_l2_ptable:
.space PAGE_SIZE
physmap_4gb_l2_ptable:
.space PAGE_SIZE
env_region:
.space PAGE_SIZE
// Misc scratch memory used by this file
addr_marker:
.quad addr_marker

View File

@ -1,26 +0,0 @@
#define PAGE_SIZE 4096
#define GIGABYTE 0x40000000
#define VIRT_BITS 48
#define NUM_TABLES 14
#define EARLY_KSTACK_SIZE (PAGE_SIZE) // Initial stack
#define DEVMAP_VBASE 0xfffffe0000000000
#define SCTLR_M 0x00000001 // SCTLR_M bit used to control MMU on/off
#define DEVICE_MEM 0 // Memory type specifiers
#define NORMAL_UNCACHED_MEM 1
#define NORMAL_CACHED_MEM 2
#define DESC_VALID_BIT 0 // Descriptor validity setting
#define DESC_VALID 1
#define DESC_INVALID 0
#define DESC_TYPE_BIT 1 // Descriptor type
#define DESC_TYPE_TABLE 1
#define DESC_TYPE_PAGE 1
#define DESC_TYPE_BLOCK 0
#define BLOCK_DESC_MASK (~((0xffff << 48) | (0xffff))) // Convenience mask for block desciptors
#define ACCESS_FLAG_BIT (1 << 10)

View File

@ -1,95 +0,0 @@
// Populates misc arguments, sets up the stack, clears all other registers.
setup_kstart_context:
adr x0, args.kernel_base // Physical address of kernel base
str x11, [x0]
adr x0, args.kernel_size // Size of kernel image
str x12, [x0]
adr x0, args.stack_base // Virtual address of kernel stack base
ldr x1, =KERNEL_OFFSET
add x1, x1, x12
str x1, [x0]
adr x0, args.stack_size // Size of kernel stack
mov x1, #(EARLY_KSTACK_SIZE)
str x1, [x0]
adr x0, args.env_base // Virtual address of environment base
adr x1, env_region_marker
ldr x1, [x1]
str x1, [x0]
adr x0, args.env_size // Size of environment (populated later in kstart)
ldr x1, =PAGE_SIZE
str x1, [x0]
adr x0, args.dtb_base // Physical address of DTB Image's base
str x19, [x0]
adr x0, args.dtb_size // Size of DTB image
mov w1, w21
str w1, [x0]
add x1, x15, x12 // Initialize the stack pointer, everything is 2MB aligned
add x1, x1, x14 // sp = (kbase.vaddr + ksize + stksize) - sizeof(word)
sub x1, x1, #16
mov sp, x1
adr x0, tmp_zero // Store a zero at tmp_zero
str xzr, [x0] // Note: x0 points to addr_marker so we use it below as-is
ldp x2, x3, [x0, #0]! // Zero x1:x31
ldp x4, x5, [x0, #0]!
ldp x6, x7, [x0, #0]!
ldp x8, x9, [x0, #0]!
ldp x10, x11, [x0, #0]!
ldp x12, x13, [x0, #0]!
ldp x14, x15, [x0, #0]!
ldp x16, x17, [x0, #0]!
ldp x18, x19, [x0, #0]!
ldp x20, x21, [x0, #0]!
ldp x22, x23, [x0, #0]!
ldp x24, x25, [x0, #0]!
ldp x26, x27, [x0, #0]!
ldp x28, x29, [x0, #0]!
ldr x0, =args.kernel_base // x0 = Start of argument block
mov x1, #0
ret
mmu_on_trampoline:
adr x0, mmu_on_marker // x0: paddr of mmu_on_marker
ldr x0, [x0] // x0: vaddr of mmu_on
br x0 // MMU now On. Jump to mmu_on using it's vaddr
// Statically allocated space to hold misc arguments for kstart.
.align 3
args.kernel_base:
.space 8
args.kernel_size:
.space 8
args.stack_base:
.space 8
args.stack_size:
.space 8
args.env_base:
.space 8
args.env_size:
.space 8
args.dtb_base:
.space 8
args.dtb_size:
.space 8
// Misc scratch memory used by this file
env_region_marker:
.quad env_region
mmu_on_marker:
.quad mmu_on
tmp_zero:
.quad tmp_zero

View File

@ -1,66 +0,0 @@
// Stashes DTB size for use later
// Sets up the exception vectors
early_setup:
mov x19, x0 // Store paddr of DTB in x19
ldr w21, [x0, #4] // x0[4] has the DTB size in Big Endian Format
rev w21, w21 // Swizzle to little endian
msr contextidr_el1, xzr // Set contextID reg
dsb sy
ldr x0, =exception_vector_base
msr vbar_el1, x0
ret
disable_mmu:
mrs x0, sctlr_el1
bic x0, x0, SCTLR_M
msr sctlr_el1, x0
isb
ret
// Programs the TTBR registers, MAIR registers, TCR and SCTLR registers.
enable_mmu:
dsb sy
adr x0, identkmap_l0_ptable // Setup TTBRx_EL1
msr ttbr0_el1, x0 // ttbr0_el1: Lower vaddrs
adr x1, kernmap_l0_ptable
msr ttbr1_el1, x1 // ttbr1_el1: Higher vaddrs
isb
tlbi vmalle1is // Invalidate the TLB
ldr x2, mair // Setup MAIR
msr mair_el1, x2
ldr x2, tcr // Setup TCR ()ID_AA64MMFR0_EL1)
mrs x3, id_aa64mmfr0_el1
bfi x2, x3, #32, #3
msr tcr_el1, x2
isb
ldr x2, sctlr_set_bits // Setup SCTLR
ldr x3, sctlr_clr_bits
mrs x1, sctlr_el1
bic x1, x1, x3
orr x1, x1, x2
msr sctlr_el1, x1
isb
mrs x1, sctlr_el1
ret
// Magic config runes (Too much detail to enumerate here: grep the ARM ARM for details)
.align 3
mair:
.quad 0xff4400 // MAIR: Arrange for Device, Normal Non-Cache, Normal Write-Back access types
tcr:
.quad 0x1085100510 // Setup TCR: (TxSZ, ASID_16, TG1_4K, Cache Attrs, SMP Attrs)
sctlr_set_bits:
.quad 0x3485d13d // Set SCTLR bits: (LSMAOE, nTLSMD, UCI, SPAN, nTWW, nTWI, UCT, DZE, I, SED, SA0, SA, C, M, CP15BEN)
sctlr_clr_bits:
.quad 0x32802c2 // Clear SCTLR bits: (EE, EOE, IESB, WXN, UMA, ITD, THEE, A)

View File

@ -1,123 +0,0 @@
// Exception vector stubs
//
// The hex values in x18 are to aid debugging
// Unhandled exceptions spin in a wfi loop for the moment
// This can be macro-ified
.align 11
exception_vector_base:
// Synchronous
.align 7
__vec_00:
mov x18, #0xb0b0
b synchronous_exception_at_el1_with_sp0
b __vec_00
// IRQ
.align 7
__vec_01:
mov x18, #0xb0b1
b irq_at_el1
b __vec_01
// FIQ
.align 7
__vec_02:
mov x18, #0xb0b2
b unhandled_exception
b __vec_02
// SError
.align 7
__vec_03:
mov x18, #0xb0b3
b unhandled_exception
b __vec_03
// Synchronous
.align 7
__vec_04:
mov x18, #0xb0b4
b synchronous_exception_at_el1_with_spx
b __vec_04
// IRQ
.align 7
__vec_05:
mov x18, #0xb0b5
b irq_at_el1
b __vec_05
// FIQ
.align 7
__vec_06:
mov x18, #0xb0b6
b unhandled_exception
b __vec_06
// SError
.align 7
__vec_07:
mov x18, #0xb0b7
b unhandled_exception
b __vec_07
// Synchronous
.align 7
__vec_08:
mov x18, #0xb0b8
b synchronous_exception_at_el0
b __vec_08
// IRQ
.align 7
__vec_09:
mov x18, #0xb0b9
b irq_at_el0
b __vec_09
// FIQ
.align 7
__vec_10:
mov x18, #0xb0ba
b unhandled_exception
b __vec_10
// SError
.align 7
__vec_11:
mov x18, #0xb0bb
b unhandled_exception
b __vec_11
// Synchronous
.align 7
__vec_12:
mov x18, #0xb0bc
b unhandled_exception
b __vec_12
// IRQ
.align 7
__vec_13:
mov x18, #0xb0bd
b unhandled_exception
b __vec_13
// FIQ
.align 7
__vec_14:
mov x18, #0xb0be
b unhandled_exception
b __vec_14
// SError
.align 7
__vec_15:
mov x18, #0xb0bf
b unhandled_exception
b __vec_15
.align 7
exception_vector_end: