代码拉取完成,页面将自动刷新
同步操作将从 YingyiTech/plat-raspi 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
/*
* Copyright (C) 2018, bzt (bztsrc@github), https://github.com/bztsrc/raspi3-tutorial
* Copyright (c) 2018, Sergey Matyukevich <https://github.com/s-matyukevich/raspberry-pi-os>
* (c) 2020, Santiago Pagani <santiagopagani@gmail.com>
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
#include <raspi/sysregs.h>
#include <raspi/mm.h>
#include <raspi/mmu.h>
#include <uk/config.h>
#include <uk/arch/lcpu.h>
#include <arm/arm64/pagetable.S>
.section ".text.boot"
.global _start
_start:
mov x25, x0 /* preserve dtb addr */
// save assembly entry time for debugging messages
// these values are used as arguments for function `_libraspiplat_entry`
// address of system-timer
ldr x0, =0x3F003000
ldr w10, [x0, #4]
ldr w11, [x0, #8]
ldr w12, [x0, #4]
ldr w13, [x0, #8]
// read cpu id, stop slave cores
mrs x1, mpidr_el1
and x1, x1, #3
cbz x1, master
// If the cpu id is > 0, hang here
hang: wfe
b hang
// Continue if cpu id == 0
master:
// disable mmu and cache
ldr x0, =SCTLR_EL1_VALUE_MMU_DISABLED
msr sctlr_el1, x0
ldr x0, =SCTLR_EL2_VALUE
msr sctlr_el2, x0
isb
// Disable coprocessor traps
ldr x0, =CPACR_EL1_VALUE
msr cpacr_el1, x0
// define register width, el1: aarch64, el0: by code. other bits are 0.
ldr x0, =HCR_EL2_VALUE
msr hcr_el2, x0
// Set the SPSR state to restore when returning from EL2 to EL1
ldr x0, =SPSR_EL2_VALUE
msr spsr_el2, x0
// set return address when returning from EL2 to EL1
adr x0, el1_entry
msr elr_el2, x0
// core0 timers interrupt control, for enable or disable
ldr x0, =0x40000040
ldr w14, [x0]
// irq enable
orr w14, w14, #8
str w14, [x0]
eret
el1_entry:
/*
* We will disable MMU and cache before the pagetables are ready.
* This means we will change memory with cache disabled, so we need to
* invalidate the cache to ensure there is no stale data in it.
* But it would be expensive to invalidate the whole cache.
* In this case, just need to invalidate what we are going to use:
* DTB, TEXT, DATA, BSS, and bootstack.
*/
ldr x0, =VA_START
ldr x2, =_end
sub x1, x2, x0
bl clean_and_invalidate_dcache_range
dsb sy
ldr x0, =vectors_el1
msr vbar_el1, x0
/* Enable the mmu */
bl start_mmu
#if CONFIG_RASPI_WATERMARK_STACK
watermark_stack_start:
ldr x1, =VA_START
ldr w2, =0x10000
watermark_stack_loop:
cbz w2, watermark_stack_done
str x2, [x1], #8
sub w2, w2, #1
cbnz w2, watermark_stack_loop
watermark_stack_done:
#endif
clear_bss_start:
// Clear bss
ldr x1, =__bss_start
ldr w2, =__bss_size
clear_bss_loop:
cbz w2, clear_bss_done
str xzr, [x1], #8
sub w2, w2, #1
cbnz w2, clear_bss_loop
clear_bss_done:
// Set the stack before our code, use SP_ELx for exception level ELx
msr SPSel, #1
ldr x1, =_start
// stack space: _start ~ 0x0
mov sp, x1
jump_to_C:
/* Load dtb address to x4 as a parameter */
mov x4, x25
// Recover initial timer value
mov w0, w10
mov w1, w11
mov w2, w12
mov w3, w13
bl _libraspiplat_entry
// As a failsafe, we also hang the main core
b hang
.macro create_pgd_entry, tbl, virt, tmp1, tmp2
create_table_entry \tbl, \virt, PGD_SHIFT, \tmp1, \tmp2
create_table_entry \tbl, \virt, PUD_SHIFT, \tmp1, \tmp2
.endm
.macro create_table_entry, tbl, virt, shift, tmp1, tmp2
lsr \tmp1, \virt, #\shift
// table index
and \tmp1, \tmp1, #PTRS_PER_TABLE - 1
add \tmp2, \tbl, #PAGE_SIZE
orr \tmp2, \tmp2, #PTE_TYPE_TABLE
str \tmp2, [\tbl, \tmp1, lsl #3]
// next level table page
add \tbl, \tbl, #PAGE_SIZE
.endm
.macro create_block_map, tbl, phys, start, end, flags, tmp1
lsr \start, \start, #SECTION_SHIFT
// table index
and \start, \start, #PTRS_PER_TABLE - 1
lsr \end, \end, #SECTION_SHIFT
// table end index
and \end, \end, #PTRS_PER_TABLE - 1
lsr \phys, \phys, #SECTION_SHIFT
ldr \tmp1, =\flags
// table entry
orr \phys, \tmp1, \phys, lsl #SECTION_SHIFT
// store the entry
9999: str \phys, [\tbl, \start, lsl #3]
// next entry
add \start, \start, #1
// next block
add \phys, \phys, #SECTION_SIZE
cmp \start, \end
b.ls 9999b
.endm
.global create_page_tables
create_page_tables:
// save return address
mov x29, x30
adrp x0, _pagetables
mov x1, #PG_DIR_SIZE
bl memzero
adrp x0, _pagetables
mov x1, #VA_START
create_pgd_entry x0, x1, x2, x3
/* Mapping kernel and init stack*/
// start mapping from physical offset 0
mov x1, xzr
// first virtual address
mov x2, #VA_START
// last virtual address
ldr x3, =(VA_START + DEVICE_BASE - SECTION_SIZE)
create_block_map x0, x1, x2, x3, PTE_BLOCK_NORMAL_RWX, x4
/* Mapping device memory*/
// start mapping from device base address
mov x1, #DEVICE_BASE
// first virtual address
ldr x2, =(VA_START + DEVICE_BASE)
// last virtual address
ldr x3, =(VA_START + PHYS_MEMORY_SIZE - SECTION_SIZE)
create_block_map x0, x1, x2, x3, PTE_BLOCK_DEVICE_nGnRnE, x4
// restore return address
mov x30, x29
ret
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。