1
0
mirror of https://github.com/azure-rtos/threadx synced 2025-01-16 07:42:57 +08:00

Fixed the issue of the data/bss section cannot be read from ARM FVP d… (#301)

* Fixed the issue of the data/bss section cannot be read from ARM FVP debug tool in cortex-A7 GNU port.

https://msazure.visualstudio.com/One/_workitems/edit/24597276/

* remove untracked files.
This commit is contained in:
Yajun Xia 2023-09-15 10:46:20 +08:00 committed by GitHub
parent cd9007712b
commit 6aeefea8e6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
40 changed files with 5677 additions and 348 deletions

View File

@ -79,6 +79,14 @@ _mainCRTStartup:
#endif
#endif
.global _fini
.type _fini,function
_fini:
#ifdef __THUMB_INTERWORK
BX lr // Return to caller
#else
MOV pc, lr // Return to caller
#endif
/* Workspace for Angel calls. */
.data

View File

@ -109,7 +109,7 @@ SECTIONS
.eh_frame_hdr : { *(.eh_frame_hdr) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
. = ALIGN(256) + (. & (256 - 1));
. = 0x2E000000;
.data :
{
*(.data)

View File

@ -0,0 +1,155 @@
// ------------------------------------------------------------
// v7-A Cache, TLB and Branch Prediction Maintenance Operations
// Header File
//
// Copyright (c) 2011-2016 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
#ifndef _ARMV7A_GENERIC_H
#define _ARMV7A_GENERIC_H
// ------------------------------------------------------------
// Memory barrier mnemonics
enum MemBarOpt {
RESERVED_0 = 0, RESERVED_1 = 1, OSHST = 2, OSH = 3,
RESERVED_4 = 4, RESERVED_5 = 5, NSHST = 6, NSH = 7,
RESERVED_8 = 8, RESERVED_9 = 9, ISHST = 10, ISH = 11,
RESERVED_12 = 12, RESERVED_13 = 13, ST = 14, SY = 15
};
//
// Note:
// *_IS() stands for "inner shareable"
// DO NOT USE THESE FUNCTIONS ON A CORTEX-A8
//
// ------------------------------------------------------------
// Interrupts
// Enable/disables IRQs (not FIQs)
void enableInterrupts(void);
void disableInterrupts(void);
// ------------------------------------------------------------
// Caches
void invalidateCaches_IS(void);
void cleanInvalidateDCache(void);
void invalidateCaches_IS(void);
void enableCaches(void);
void disableCaches(void);
void invalidateCaches(void);
void cleanDCache(void);
// ------------------------------------------------------------
// TLBs
void invalidateUnifiedTLB(void);
void invalidateUnifiedTLB_IS(void);
// ------------------------------------------------------------
// Branch prediction
void flushBranchTargetCache(void);
void flushBranchTargetCache_IS(void);
// ------------------------------------------------------------
// High Vecs
void enableHighVecs(void);
void disableHighVecs(void);
// ------------------------------------------------------------
// ID Registers
unsigned int getMIDR(void);
#define MIDR_IMPL_SHIFT 24
#define MIDR_IMPL_MASK 0xFF
#define MIDR_VAR_SHIFT 20
#define MIDR_VAR_MASK 0xF
#define MIDR_ARCH_SHIFT 16
#define MIDR_ARCH_MASK 0xF
#define MIDR_PART_SHIFT 4
#define MIDR_PART_MASK 0xFFF
#define MIDR_REV_SHIFT 0
#define MIDR_REV_MASK 0xF
// tmp = get_MIDR();
// implementor = (tmp >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
// variant = (tmp >> MIDR_VAR_SHIFT) & MIDR_VAR_MASK;
// architecture= (tmp >> MIDR_ARCH_SHIFT) & MIDR_ARCH_MASK;
// part_number = (tmp >> MIDR_PART_SHIFT) & MIDR_PART_MASK;
// revision = tmp & MIDR_REV_MASK;
#define MIDR_PART_CA5 0xC05
#define MIDR_PART_CA8 0xC08
#define MIDR_PART_CA9 0xC09
unsigned int getMPIDR(void);
#define MPIDR_FORMAT_SHIFT 31
#define MPIDR_FORMAT_MASK 0x1
#define MPIDR_UBIT_SHIFT 30
#define MPIDR_UBIT_MASK 0x1
#define MPIDR_CLUSTER_SHIFT 7
#define MPIDR_CLUSTER_MASK 0xF
#define MPIDR_CPUID_SHIFT 0
#define MPIDR_CPUID_MASK 0x3
#define MPIDR_CPUID_CPU0 0x0
#define MPIDR_CPUID_CPU1 0x1
#define MPIDR_CPUID_CPU2 0x2
#define MPIDR_CPUID_CPU3 0x3
#define MPIDR_UNIPROCESSPR 0x1
#define MPDIR_NEW_FORMAT 0x1
// ------------------------------------------------------------
// Context ID
unsigned int getContextID(void);
void setContextID(unsigned int);
#define CONTEXTID_ASID_SHIFT 0
#define CONTEXTID_ASID_MASK 0xFF
#define CONTEXTID_PROCID_SHIFT 8
#define CONTEXTID_PROCID_MASK 0x00FFFFFF
// tmp = getContextID();
// ASID = tmp & CONTEXTID_ASID_MASK;
// PROCID = (tmp >> CONTEXTID_PROCID_SHIFT) & CONTEXTID_PROCID_MASK;
// ------------------------------------------------------------
// SMP related for Armv7-A MPCore processors
//
// DO NOT CALL THESE FUNCTIONS ON A CORTEX-A8
// Returns the base address of the private peripheral memory space
unsigned int getBaseAddr(void);
// Returns the CPU ID (0 to 3) of the CPU executed on
#define MP_CPU0 (0)
#define MP_CPU1 (1)
#define MP_CPU2 (2)
#define MP_CPU3 (3)
unsigned int getCPUID(void);
// Set this core as participating in SMP
void joinSMP(void);
// Set this core as NOT participating in SMP
void leaveSMP(void);
// Go to sleep, never returns
void goToSleep(void);
#endif
// ------------------------------------------------------------
// End of v7.h
// ------------------------------------------------------------

View File

@ -0,0 +1,476 @@
// ------------------------------------------------------------
// v7-A Cache and Branch Prediction Maintenance Operations
//
// Copyright (c) 2011-2018 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
.arm
// ------------------------------------------------------------
// Interrupt enable/disable
// ------------------------------------------------------------
// Could use intrinsic instead of these
.global enableInterrupts
.type enableInterrupts,function
// void enableInterrupts(void)//
enableInterrupts:
CPSIE i
BX lr
.global disableInterrupts
.type disableInterrupts,function
// void disableInterrupts(void)//
disableInterrupts:
CPSID i
BX lr
// ------------------------------------------------------------
// Cache Maintenance
// ------------------------------------------------------------
.global enableCaches
.type enableCaches,function
// void enableCaches(void)//
enableCaches:
MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
ORR r0, r0, #(1 << 2) // Set C bit
ORR r0, r0, #(1 << 12) // Set I bit
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
BX lr
.global disableCaches
.type disableCaches,function
// void disableCaches(void)
disableCaches:
MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
BIC r0, r0, #(1 << 2) // Clear C bit
BIC r0, r0, #(1 << 12) // Clear I bit
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
BX lr
.global cleanDCache
.type cleanDCache,function
// void cleanDCache(void)//
cleanDCache:
PUSH {r4-r12}
//
// Based on code example given in section 11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ clean_dcache_finished
MOV r10, #0
clean_dcache_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT clean_dcache_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
clean_dcache_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
clean_dcache_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c10, 2 // DCCSW - clean by set/way
SUBS r9, r9, #1 // decrement the way number
BGE clean_dcache_loop3
SUBS r7, r7, #1 // decrement the index
BGE clean_dcache_loop2
clean_dcache_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT clean_dcache_loop1
clean_dcache_finished:
POP {r4-r12}
BX lr
.global cleanInvalidateDCache
.type cleanInvalidateDCache,function
// void cleanInvalidateDCache(void)//
cleanInvalidateDCache:
PUSH {r4-r12}
//
// Based on code example given in section 11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ clean_invalidate_dcache_finished
MOV r10, #0
clean_invalidate_dcache_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT clean_invalidate_dcache_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
clean_invalidate_dcache_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
clean_invalidate_dcache_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c14, 2 // DCCISW - clean and invalidate by set/way
SUBS r9, r9, #1 // decrement the way number
BGE clean_invalidate_dcache_loop3
SUBS r7, r7, #1 // decrement the index
BGE clean_invalidate_dcache_loop2
clean_invalidate_dcache_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT clean_invalidate_dcache_loop1
clean_invalidate_dcache_finished:
POP {r4-r12}
BX lr
.global invalidateCaches
.type invalidateCaches,function
// void invalidateCaches(void)//
invalidateCaches:
PUSH {r4-r12}
//
// Based on code example given in section B2.2.4/11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MOV r0, #0
MCR p15, 0, r0, c7, c5, 0 // ICIALLU - Invalidate entire I Cache, and flushes branch target cache
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ invalidate_caches_finished
MOV r10, #0
invalidate_caches_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT invalidate_caches_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
invalidate_caches_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
invalidate_caches_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c6, 2 // DCISW - invalidate by set/way
SUBS r9, r9, #1 // decrement the way number
BGE invalidate_caches_loop3
SUBS r7, r7, #1 // decrement the index
BGE invalidate_caches_loop2
invalidate_caches_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT invalidate_caches_loop1
invalidate_caches_finished:
POP {r4-r12}
BX lr
.global invalidateCaches_IS
.type invalidateCaches_IS,function
// void invalidateCaches_IS(void)//
invalidateCaches_IS:
PUSH {r4-r12}
MOV r0, #0
MCR p15, 0, r0, c7, c1, 0 // ICIALLUIS - Invalidate entire I Cache inner shareable
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ invalidate_caches_is_finished
MOV r10, #0
invalidate_caches_is_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT invalidate_caches_is_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
invalidate_caches_is_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
invalidate_caches_is_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c6, 2 // DCISW - clean by set/way
SUBS r9, r9, #1 // decrement the way number
BGE invalidate_caches_is_loop3
SUBS r7, r7, #1 // decrement the index
BGE invalidate_caches_is_loop2
invalidate_caches_is_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT invalidate_caches_is_loop1
invalidate_caches_is_finished:
POP {r4-r12}
BX lr
// ------------------------------------------------------------
// TLB
// ------------------------------------------------------------
.global invalidateUnifiedTLB
.type invalidateUnifiedTLB,function
// void invalidateUnifiedTLB(void)//
invalidateUnifiedTLB:
MOV r0, #0
MCR p15, 0, r0, c8, c7, 0 // TLBIALL - Invalidate entire unified TLB
BX lr
.global invalidateUnifiedTLB_IS
.type invalidateUnifiedTLB_IS,function
// void invalidateUnifiedTLB_IS(void)//
invalidateUnifiedTLB_IS:
MOV r0, #1
MCR p15, 0, r0, c8, c3, 0 // TLBIALLIS - Invalidate entire unified TLB Inner Shareable
BX lr
// ------------------------------------------------------------
// Branch Prediction
// ------------------------------------------------------------
.global flushBranchTargetCache
.type flushBranchTargetCache,function
// void flushBranchTargetCache(void)
flushBranchTargetCache:
MOV r0, #0
MCR p15, 0, r0, c7, c5, 6 // BPIALL - Invalidate entire branch predictor array
BX lr
.global flushBranchTargetCache_IS
.type flushBranchTargetCache_IS,function
// void flushBranchTargetCache_IS(void)
flushBranchTargetCache_IS:
MOV r0, #0
MCR p15, 0, r0, c7, c1, 6 // BPIALLIS - Invalidate entire branch predictor array Inner Shareable
BX lr
// ------------------------------------------------------------
// High Vecs
// ------------------------------------------------------------
.global enableHighVecs
.type enableHighVecs,function
// void enableHighVecs(void)//
enableHighVecs:
MRC p15, 0, r0, c1, c0, 0 // Read Control Register
ORR r0, r0, #(1 << 13) // Set the V bit (bit 13)
MCR p15, 0, r0, c1, c0, 0 // Write Control Register
ISB
BX lr
.global disableHighVecs
.type disableHighVecs,function
// void disable_highvecs(void)//
disableHighVecs:
MRC p15, 0, r0, c1, c0, 0 // Read Control Register
BIC r0, r0, #(1 << 13) // Clear the V bit (bit 13)
MCR p15, 0, r0, c1, c0, 0 // Write Control Register
ISB
BX lr
// ------------------------------------------------------------
// Context ID
// ------------------------------------------------------------
.global getContextID
.type getContextID,function
// uint32_t getContextIDd(void)//
getContextID:
MRC p15, 0, r0, c13, c0, 1 // Read Context ID Register
BX lr
.global setContextID
.type setContextID,function
// void setContextID(uint32_t)//
setContextID:
MCR p15, 0, r0, c13, c0, 1 // Write Context ID Register
BX lr
// ------------------------------------------------------------
// ID registers
// ------------------------------------------------------------
.global getMIDR
.type getMIDR,function
// uint32_t getMIDR(void)//
getMIDR:
MRC p15, 0, r0, c0, c0, 0 // Read Main ID Register (MIDR)
BX lr
.global getMPIDR
.type getMPIDR,function
// uint32_t getMPIDR(void)//
getMPIDR:
MRC p15, 0, r0, c0 ,c0, 5// Read Multiprocessor ID register (MPIDR)
BX lr
// ------------------------------------------------------------
// CP15 SMP related
// ------------------------------------------------------------
.global getBaseAddr
.type getBaseAddr,function
// uint32_t getBaseAddr(void)
// Returns the value CBAR (base address of the private peripheral memory space)
getBaseAddr:
MRC p15, 4, r0, c15, c0, 0 // Read peripheral base address
BX lr
// ------------------------------------------------------------
.global getCPUID
.type getCPUID,function
// uint32_t getCPUID(void)
// Returns the CPU ID (0 to 3) of the CPU executed on
getCPUID:
MRC p15, 0, r0, c0, c0, 5 // Read CPU ID register
AND r0, r0, #0x03 // Mask off, leaving the CPU ID field
BX lr
// ------------------------------------------------------------
.global goToSleep
.type goToSleep,function
// void goToSleep(void)
goToSleep:
DSB // Clear all pending data accesses
WFI // Go into standby
B goToSleep // Catch in case of rogue events
BX lr
// ------------------------------------------------------------
.global joinSMP
.type joinSMP,function
// void joinSMP(void)
// Sets the ACTRL.SMP bit
joinSMP:
// SMP status is controlled by bit 6 of the CP15 Aux Ctrl Reg
MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
MOV r1, r0
ORR r0, r0, #0x040 // Set bit 6
CMP r0, r1
MCRNE p15, 0, r0, c1, c0, 1 // Write ACTLR
ISB
BX lr
// ------------------------------------------------------------
.global leaveSMP
.type leaveSMP,function
// void leaveSMP(void)
// Clear the ACTRL.SMP bit
leaveSMP:
// SMP status is controlled by bit 6 of the CP15 Aux Ctrl Reg
MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
BIC r0, r0, #0x040 // Clear bit 6
MCR p15, 0, r0, c1, c0, 1 // Write ACTLR
ISB
BX lr
// ------------------------------------------------------------
// End of v7.s
// ------------------------------------------------------------

View File

@ -79,6 +79,14 @@ _mainCRTStartup:
#endif
#endif
.global _fini
.type _fini,function
_fini:
#ifdef __THUMB_INTERWORK
BX lr // Return to caller
#else
MOV pc, lr // Return to caller
#endif
/* Workspace for Angel calls. */
.data

View File

@ -109,7 +109,7 @@ SECTIONS
.eh_frame_hdr : { *(.eh_frame_hdr) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
. = ALIGN(256) + (. & (256 - 1));
. = 0x2E000000;
.data :
{
*(.data)

View File

@ -0,0 +1,155 @@
// ------------------------------------------------------------
// v7-A Cache, TLB and Branch Prediction Maintenance Operations
// Header File
//
// Copyright (c) 2011-2016 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
#ifndef _ARMV7A_GENERIC_H
#define _ARMV7A_GENERIC_H
// ------------------------------------------------------------
// Memory barrier mnemonics
enum MemBarOpt {
RESERVED_0 = 0, RESERVED_1 = 1, OSHST = 2, OSH = 3,
RESERVED_4 = 4, RESERVED_5 = 5, NSHST = 6, NSH = 7,
RESERVED_8 = 8, RESERVED_9 = 9, ISHST = 10, ISH = 11,
RESERVED_12 = 12, RESERVED_13 = 13, ST = 14, SY = 15
};
//
// Note:
// *_IS() stands for "inner shareable"
// DO NOT USE THESE FUNCTIONS ON A CORTEX-A8
//
// ------------------------------------------------------------
// Interrupts
// Enable/disables IRQs (not FIQs)
void enableInterrupts(void);
void disableInterrupts(void);
// ------------------------------------------------------------
// Caches
void invalidateCaches_IS(void);
void cleanInvalidateDCache(void);
void invalidateCaches_IS(void);
void enableCaches(void);
void disableCaches(void);
void invalidateCaches(void);
void cleanDCache(void);
// ------------------------------------------------------------
// TLBs
void invalidateUnifiedTLB(void);
void invalidateUnifiedTLB_IS(void);
// ------------------------------------------------------------
// Branch prediction
void flushBranchTargetCache(void);
void flushBranchTargetCache_IS(void);
// ------------------------------------------------------------
// High Vecs
void enableHighVecs(void);
void disableHighVecs(void);
// ------------------------------------------------------------
// ID Registers
unsigned int getMIDR(void);
#define MIDR_IMPL_SHIFT 24
#define MIDR_IMPL_MASK 0xFF
#define MIDR_VAR_SHIFT 20
#define MIDR_VAR_MASK 0xF
#define MIDR_ARCH_SHIFT 16
#define MIDR_ARCH_MASK 0xF
#define MIDR_PART_SHIFT 4
#define MIDR_PART_MASK 0xFFF
#define MIDR_REV_SHIFT 0
#define MIDR_REV_MASK 0xF
// tmp = get_MIDR();
// implementor = (tmp >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
// variant = (tmp >> MIDR_VAR_SHIFT) & MIDR_VAR_MASK;
// architecture= (tmp >> MIDR_ARCH_SHIFT) & MIDR_ARCH_MASK;
// part_number = (tmp >> MIDR_PART_SHIFT) & MIDR_PART_MASK;
// revision = tmp & MIDR_REV_MASK;
#define MIDR_PART_CA5 0xC05
#define MIDR_PART_CA8 0xC08
#define MIDR_PART_CA9 0xC09
unsigned int getMPIDR(void);
#define MPIDR_FORMAT_SHIFT 31
#define MPIDR_FORMAT_MASK 0x1
#define MPIDR_UBIT_SHIFT 30
#define MPIDR_UBIT_MASK 0x1
#define MPIDR_CLUSTER_SHIFT 7
#define MPIDR_CLUSTER_MASK 0xF
#define MPIDR_CPUID_SHIFT 0
#define MPIDR_CPUID_MASK 0x3
#define MPIDR_CPUID_CPU0 0x0
#define MPIDR_CPUID_CPU1 0x1
#define MPIDR_CPUID_CPU2 0x2
#define MPIDR_CPUID_CPU3 0x3
#define MPIDR_UNIPROCESSPR 0x1
#define MPDIR_NEW_FORMAT 0x1
// ------------------------------------------------------------
// Context ID
unsigned int getContextID(void);
void setContextID(unsigned int);
#define CONTEXTID_ASID_SHIFT 0
#define CONTEXTID_ASID_MASK 0xFF
#define CONTEXTID_PROCID_SHIFT 8
#define CONTEXTID_PROCID_MASK 0x00FFFFFF
// tmp = getContextID();
// ASID = tmp & CONTEXTID_ASID_MASK;
// PROCID = (tmp >> CONTEXTID_PROCID_SHIFT) & CONTEXTID_PROCID_MASK;
// ------------------------------------------------------------
// SMP related for Armv7-A MPCore processors
//
// DO NOT CALL THESE FUNCTIONS ON A CORTEX-A8
// Returns the base address of the private peripheral memory space
unsigned int getBaseAddr(void);
// Returns the CPU ID (0 to 3) of the CPU executed on
#define MP_CPU0 (0)
#define MP_CPU1 (1)
#define MP_CPU2 (2)
#define MP_CPU3 (3)
unsigned int getCPUID(void);
// Set this core as participating in SMP
void joinSMP(void);
// Set this core as NOT participating in SMP
void leaveSMP(void);
// Go to sleep, never returns
void goToSleep(void);
#endif
// ------------------------------------------------------------
// End of v7.h
// ------------------------------------------------------------

View File

@ -0,0 +1,476 @@
// ------------------------------------------------------------
// v7-A Cache and Branch Prediction Maintenance Operations
//
// Copyright (c) 2011-2018 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
.arm
// ------------------------------------------------------------
// Interrupt enable/disable
// ------------------------------------------------------------
// Could use intrinsic instead of these
.global enableInterrupts
.type enableInterrupts,function
// void enableInterrupts(void)//
enableInterrupts:
CPSIE i
BX lr
.global disableInterrupts
.type disableInterrupts,function
// void disableInterrupts(void)//
disableInterrupts:
CPSID i
BX lr
// ------------------------------------------------------------
// Cache Maintenance
// ------------------------------------------------------------
.global enableCaches
.type enableCaches,function
// void enableCaches(void)//
enableCaches:
MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
ORR r0, r0, #(1 << 2) // Set C bit
ORR r0, r0, #(1 << 12) // Set I bit
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
BX lr
.global disableCaches
.type disableCaches,function
// void disableCaches(void)
disableCaches:
MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
BIC r0, r0, #(1 << 2) // Clear C bit
BIC r0, r0, #(1 << 12) // Clear I bit
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
BX lr
.global cleanDCache
.type cleanDCache,function
// void cleanDCache(void)//
cleanDCache:
PUSH {r4-r12}
//
// Based on code example given in section 11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ clean_dcache_finished
MOV r10, #0
clean_dcache_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT clean_dcache_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
clean_dcache_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
clean_dcache_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c10, 2 // DCCSW - clean by set/way
SUBS r9, r9, #1 // decrement the way number
BGE clean_dcache_loop3
SUBS r7, r7, #1 // decrement the index
BGE clean_dcache_loop2
clean_dcache_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT clean_dcache_loop1
clean_dcache_finished:
POP {r4-r12}
BX lr
.global cleanInvalidateDCache
.type cleanInvalidateDCache,function
// void cleanInvalidateDCache(void)//
cleanInvalidateDCache:
PUSH {r4-r12}
//
// Based on code example given in section 11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ clean_invalidate_dcache_finished
MOV r10, #0
clean_invalidate_dcache_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT clean_invalidate_dcache_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
clean_invalidate_dcache_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
clean_invalidate_dcache_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c14, 2 // DCCISW - clean and invalidate by set/way
SUBS r9, r9, #1 // decrement the way number
BGE clean_invalidate_dcache_loop3
SUBS r7, r7, #1 // decrement the index
BGE clean_invalidate_dcache_loop2
clean_invalidate_dcache_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT clean_invalidate_dcache_loop1
clean_invalidate_dcache_finished:
POP {r4-r12}
BX lr
.global invalidateCaches
.type invalidateCaches,function
// void invalidateCaches(void)//
invalidateCaches:
PUSH {r4-r12}
//
// Based on code example given in section B2.2.4/11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MOV r0, #0
MCR p15, 0, r0, c7, c5, 0 // ICIALLU - Invalidate entire I Cache, and flushes branch target cache
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ invalidate_caches_finished
MOV r10, #0
invalidate_caches_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT invalidate_caches_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
invalidate_caches_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
invalidate_caches_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c6, 2 // DCISW - invalidate by set/way
SUBS r9, r9, #1 // decrement the way number
BGE invalidate_caches_loop3
SUBS r7, r7, #1 // decrement the index
BGE invalidate_caches_loop2
invalidate_caches_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT invalidate_caches_loop1
invalidate_caches_finished:
POP {r4-r12}
BX lr
.global invalidateCaches_IS
.type invalidateCaches_IS,function
// void invalidateCaches_IS(void)//
invalidateCaches_IS:
PUSH {r4-r12}
MOV r0, #0
MCR p15, 0, r0, c7, c1, 0 // ICIALLUIS - Invalidate entire I Cache inner shareable
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ invalidate_caches_is_finished
MOV r10, #0
invalidate_caches_is_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT invalidate_caches_is_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
invalidate_caches_is_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
invalidate_caches_is_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c6, 2 // DCISW - clean by set/way
SUBS r9, r9, #1 // decrement the way number
BGE invalidate_caches_is_loop3
SUBS r7, r7, #1 // decrement the index
BGE invalidate_caches_is_loop2
invalidate_caches_is_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT invalidate_caches_is_loop1
invalidate_caches_is_finished:
POP {r4-r12}
BX lr
// ------------------------------------------------------------
// TLB
// ------------------------------------------------------------
.global invalidateUnifiedTLB
.type invalidateUnifiedTLB,function
// void invalidateUnifiedTLB(void)//
invalidateUnifiedTLB:
MOV r0, #0
MCR p15, 0, r0, c8, c7, 0 // TLBIALL - Invalidate entire unified TLB
BX lr
.global invalidateUnifiedTLB_IS
.type invalidateUnifiedTLB_IS,function
// void invalidateUnifiedTLB_IS(void)//
invalidateUnifiedTLB_IS:
MOV r0, #1
MCR p15, 0, r0, c8, c3, 0 // TLBIALLIS - Invalidate entire unified TLB Inner Shareable
BX lr
// ------------------------------------------------------------
// Branch Prediction
// ------------------------------------------------------------
.global flushBranchTargetCache
.type flushBranchTargetCache,function
// void flushBranchTargetCache(void)
flushBranchTargetCache:
MOV r0, #0
MCR p15, 0, r0, c7, c5, 6 // BPIALL - Invalidate entire branch predictor array
BX lr
.global flushBranchTargetCache_IS
.type flushBranchTargetCache_IS,function
// void flushBranchTargetCache_IS(void)
flushBranchTargetCache_IS:
MOV r0, #0
MCR p15, 0, r0, c7, c1, 6 // BPIALLIS - Invalidate entire branch predictor array Inner Shareable
BX lr
// ------------------------------------------------------------
// High Vecs
// ------------------------------------------------------------
.global enableHighVecs
.type enableHighVecs,function
// void enableHighVecs(void)//
enableHighVecs:
MRC p15, 0, r0, c1, c0, 0 // Read Control Register
ORR r0, r0, #(1 << 13) // Set the V bit (bit 13)
MCR p15, 0, r0, c1, c0, 0 // Write Control Register
ISB
BX lr
.global disableHighVecs
.type disableHighVecs,function
// void disable_highvecs(void)//
disableHighVecs:
MRC p15, 0, r0, c1, c0, 0 // Read Control Register
BIC r0, r0, #(1 << 13) // Clear the V bit (bit 13)
MCR p15, 0, r0, c1, c0, 0 // Write Control Register
ISB
BX lr
// ------------------------------------------------------------
// Context ID
// ------------------------------------------------------------
.global getContextID
.type getContextID,function
// uint32_t getContextIDd(void)//
getContextID:
MRC p15, 0, r0, c13, c0, 1 // Read Context ID Register
BX lr
.global setContextID
.type setContextID,function
// void setContextID(uint32_t)//
setContextID:
MCR p15, 0, r0, c13, c0, 1 // Write Context ID Register
BX lr
// ------------------------------------------------------------
// ID registers
// ------------------------------------------------------------
.global getMIDR
.type getMIDR,function
// uint32_t getMIDR(void)//
getMIDR:
MRC p15, 0, r0, c0, c0, 0 // Read Main ID Register (MIDR)
BX lr
.global getMPIDR
.type getMPIDR,function
// uint32_t getMPIDR(void)//
getMPIDR:
MRC p15, 0, r0, c0 ,c0, 5// Read Multiprocessor ID register (MPIDR)
BX lr
// ------------------------------------------------------------
// CP15 SMP related
// ------------------------------------------------------------
.global getBaseAddr
.type getBaseAddr,function
// uint32_t getBaseAddr(void)
// Returns the value CBAR (base address of the private peripheral memory space)
getBaseAddr:
MRC p15, 4, r0, c15, c0, 0 // Read peripheral base address
BX lr
// ------------------------------------------------------------
.global getCPUID
.type getCPUID,function
// uint32_t getCPUID(void)
// Returns the CPU ID (0 to 3) of the CPU executed on
getCPUID:
MRC p15, 0, r0, c0, c0, 5 // Read CPU ID register
AND r0, r0, #0x03 // Mask off, leaving the CPU ID field
BX lr
// ------------------------------------------------------------
.global goToSleep
.type goToSleep,function
// void goToSleep(void)
goToSleep:
DSB // Clear all pending data accesses
WFI // Go into standby
B goToSleep // Catch in case of rogue events
BX lr
// ------------------------------------------------------------
.global joinSMP
.type joinSMP,function
// void joinSMP(void)
// Sets the ACTRL.SMP bit
joinSMP:
// SMP status is controlled by bit 6 of the CP15 Aux Ctrl Reg
MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
MOV r1, r0
ORR r0, r0, #0x040 // Set bit 6
CMP r0, r1
MCRNE p15, 0, r0, c1, c0, 1 // Write ACTLR
ISB
BX lr
// ------------------------------------------------------------
.global leaveSMP
.type leaveSMP,function
// void leaveSMP(void)
// Clear the ACTRL.SMP bit
leaveSMP:
// SMP status is controlled by bit 6 of the CP15 Aux Ctrl Reg
MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
BIC r0, r0, #0x040 // Clear bit 6
MCR p15, 0, r0, c1, c0, 1 // Write ACTLR
ISB
BX lr
// ------------------------------------------------------------
// End of v7.s
// ------------------------------------------------------------

View File

@ -79,6 +79,14 @@ _mainCRTStartup:
#endif
#endif
.global _fini
.type _fini,function
_fini:
#ifdef __THUMB_INTERWORK
BX lr // Return to caller
#else
MOV pc, lr // Return to caller
#endif
/* Workspace for Angel calls. */
.data

View File

@ -109,7 +109,7 @@ SECTIONS
.eh_frame_hdr : { *(.eh_frame_hdr) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
. = ALIGN(256) + (. & (256 - 1));
. = 0x2E000000;
.data :
{
*(.data)

View File

@ -0,0 +1,155 @@
// ------------------------------------------------------------
// v7-A Cache, TLB and Branch Prediction Maintenance Operations
// Header File
//
// Copyright (c) 2011-2016 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
#ifndef _ARMV7A_GENERIC_H
#define _ARMV7A_GENERIC_H
// ------------------------------------------------------------
// Memory barrier mnemonics
enum MemBarOpt {
RESERVED_0 = 0, RESERVED_1 = 1, OSHST = 2, OSH = 3,
RESERVED_4 = 4, RESERVED_5 = 5, NSHST = 6, NSH = 7,
RESERVED_8 = 8, RESERVED_9 = 9, ISHST = 10, ISH = 11,
RESERVED_12 = 12, RESERVED_13 = 13, ST = 14, SY = 15
};
//
// Note:
// *_IS() stands for "inner shareable"
// DO NOT USE THESE FUNCTIONS ON A CORTEX-A8
//
// ------------------------------------------------------------
// Interrupts
// Enable/disables IRQs (not FIQs)
void enableInterrupts(void);
void disableInterrupts(void);
// ------------------------------------------------------------
// Caches
void invalidateCaches_IS(void);
void cleanInvalidateDCache(void);
void invalidateCaches_IS(void);
void enableCaches(void);
void disableCaches(void);
void invalidateCaches(void);
void cleanDCache(void);
// ------------------------------------------------------------
// TLBs
void invalidateUnifiedTLB(void);
void invalidateUnifiedTLB_IS(void);
// ------------------------------------------------------------
// Branch prediction
void flushBranchTargetCache(void);
void flushBranchTargetCache_IS(void);
// ------------------------------------------------------------
// High Vecs
void enableHighVecs(void);
void disableHighVecs(void);
// ------------------------------------------------------------
// ID Registers
unsigned int getMIDR(void);
#define MIDR_IMPL_SHIFT 24
#define MIDR_IMPL_MASK 0xFF
#define MIDR_VAR_SHIFT 20
#define MIDR_VAR_MASK 0xF
#define MIDR_ARCH_SHIFT 16
#define MIDR_ARCH_MASK 0xF
#define MIDR_PART_SHIFT 4
#define MIDR_PART_MASK 0xFFF
#define MIDR_REV_SHIFT 0
#define MIDR_REV_MASK 0xF
// tmp = get_MIDR();
// implementor = (tmp >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
// variant = (tmp >> MIDR_VAR_SHIFT) & MIDR_VAR_MASK;
// architecture= (tmp >> MIDR_ARCH_SHIFT) & MIDR_ARCH_MASK;
// part_number = (tmp >> MIDR_PART_SHIFT) & MIDR_PART_MASK;
// revision = tmp & MIDR_REV_MASK;
#define MIDR_PART_CA5 0xC05
#define MIDR_PART_CA8 0xC08
#define MIDR_PART_CA9 0xC09
unsigned int getMPIDR(void);
#define MPIDR_FORMAT_SHIFT 31
#define MPIDR_FORMAT_MASK 0x1
#define MPIDR_UBIT_SHIFT 30
#define MPIDR_UBIT_MASK 0x1
#define MPIDR_CLUSTER_SHIFT 7
#define MPIDR_CLUSTER_MASK 0xF
#define MPIDR_CPUID_SHIFT 0
#define MPIDR_CPUID_MASK 0x3
#define MPIDR_CPUID_CPU0 0x0
#define MPIDR_CPUID_CPU1 0x1
#define MPIDR_CPUID_CPU2 0x2
#define MPIDR_CPUID_CPU3 0x3
#define MPIDR_UNIPROCESSPR 0x1
#define MPDIR_NEW_FORMAT 0x1
// ------------------------------------------------------------
// Context ID
unsigned int getContextID(void);
void setContextID(unsigned int);
#define CONTEXTID_ASID_SHIFT 0
#define CONTEXTID_ASID_MASK 0xFF
#define CONTEXTID_PROCID_SHIFT 8
#define CONTEXTID_PROCID_MASK 0x00FFFFFF
// tmp = getContextID();
// ASID = tmp & CONTEXTID_ASID_MASK;
// PROCID = (tmp >> CONTEXTID_PROCID_SHIFT) & CONTEXTID_PROCID_MASK;
// ------------------------------------------------------------
// SMP related for Armv7-A MPCore processors
//
// DO NOT CALL THESE FUNCTIONS ON A CORTEX-A8
// Returns the base address of the private peripheral memory space
unsigned int getBaseAddr(void);
// Returns the CPU ID (0 to 3) of the CPU executed on
#define MP_CPU0 (0)
#define MP_CPU1 (1)
#define MP_CPU2 (2)
#define MP_CPU3 (3)
unsigned int getCPUID(void);
// Set this core as participating in SMP
void joinSMP(void);
// Set this core as NOT participating in SMP
void leaveSMP(void);
// Go to sleep, never returns
void goToSleep(void);
#endif
// ------------------------------------------------------------
// End of v7.h
// ------------------------------------------------------------

View File

@ -0,0 +1,476 @@
// ------------------------------------------------------------
// v7-A Cache and Branch Prediction Maintenance Operations
//
// Copyright (c) 2011-2018 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
.arm
// ------------------------------------------------------------
// Interrupt enable/disable
// ------------------------------------------------------------
// Could use intrinsic instead of these
.global enableInterrupts
.type enableInterrupts,function
// void enableInterrupts(void)//
enableInterrupts:
CPSIE i
BX lr
.global disableInterrupts
.type disableInterrupts,function
// void disableInterrupts(void)//
disableInterrupts:
CPSID i
BX lr
// ------------------------------------------------------------
// Cache Maintenance
// ------------------------------------------------------------
.global enableCaches
.type enableCaches,function
// void enableCaches(void)//
enableCaches:
MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
ORR r0, r0, #(1 << 2) // Set C bit
ORR r0, r0, #(1 << 12) // Set I bit
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
BX lr
.global disableCaches
.type disableCaches,function
// void disableCaches(void)
disableCaches:
MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
BIC r0, r0, #(1 << 2) // Clear C bit
BIC r0, r0, #(1 << 12) // Clear I bit
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
BX lr
.global cleanDCache
.type cleanDCache,function
// void cleanDCache(void)//
cleanDCache:
PUSH {r4-r12}
//
// Based on code example given in section 11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ clean_dcache_finished
MOV r10, #0
clean_dcache_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT clean_dcache_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
clean_dcache_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
clean_dcache_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c10, 2 // DCCSW - clean by set/way
SUBS r9, r9, #1 // decrement the way number
BGE clean_dcache_loop3
SUBS r7, r7, #1 // decrement the index
BGE clean_dcache_loop2
clean_dcache_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT clean_dcache_loop1
clean_dcache_finished:
POP {r4-r12}
BX lr
.global cleanInvalidateDCache
.type cleanInvalidateDCache,function
// void cleanInvalidateDCache(void)//
cleanInvalidateDCache:
PUSH {r4-r12}
//
// Based on code example given in section 11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ clean_invalidate_dcache_finished
MOV r10, #0
clean_invalidate_dcache_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT clean_invalidate_dcache_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
clean_invalidate_dcache_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
clean_invalidate_dcache_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c14, 2 // DCCISW - clean and invalidate by set/way
SUBS r9, r9, #1 // decrement the way number
BGE clean_invalidate_dcache_loop3
SUBS r7, r7, #1 // decrement the index
BGE clean_invalidate_dcache_loop2
clean_invalidate_dcache_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT clean_invalidate_dcache_loop1
clean_invalidate_dcache_finished:
POP {r4-r12}
BX lr
.global invalidateCaches
.type invalidateCaches,function
// void invalidateCaches(void)//
invalidateCaches:
PUSH {r4-r12}
//
// Based on code example given in section B2.2.4/11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MOV r0, #0
MCR p15, 0, r0, c7, c5, 0 // ICIALLU - Invalidate entire I Cache, and flushes branch target cache
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ invalidate_caches_finished
MOV r10, #0
invalidate_caches_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT invalidate_caches_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
invalidate_caches_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
invalidate_caches_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c6, 2 // DCISW - invalidate by set/way
SUBS r9, r9, #1 // decrement the way number
BGE invalidate_caches_loop3
SUBS r7, r7, #1 // decrement the index
BGE invalidate_caches_loop2
invalidate_caches_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT invalidate_caches_loop1
invalidate_caches_finished:
POP {r4-r12}
BX lr
.global invalidateCaches_IS
.type invalidateCaches_IS,function
// void invalidateCaches_IS(void)//
invalidateCaches_IS:
PUSH {r4-r12}
MOV r0, #0
MCR p15, 0, r0, c7, c1, 0 // ICIALLUIS - Invalidate entire I Cache inner shareable
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ invalidate_caches_is_finished
MOV r10, #0
invalidate_caches_is_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT invalidate_caches_is_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
invalidate_caches_is_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
invalidate_caches_is_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c6, 2 // DCISW - clean by set/way
SUBS r9, r9, #1 // decrement the way number
BGE invalidate_caches_is_loop3
SUBS r7, r7, #1 // decrement the index
BGE invalidate_caches_is_loop2
invalidate_caches_is_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT invalidate_caches_is_loop1
invalidate_caches_is_finished:
POP {r4-r12}
BX lr
// ------------------------------------------------------------
// TLB
// ------------------------------------------------------------
.global invalidateUnifiedTLB
.type invalidateUnifiedTLB,function
// void invalidateUnifiedTLB(void)//
invalidateUnifiedTLB:
MOV r0, #0
MCR p15, 0, r0, c8, c7, 0 // TLBIALL - Invalidate entire unified TLB
BX lr
.global invalidateUnifiedTLB_IS
.type invalidateUnifiedTLB_IS,function
// void invalidateUnifiedTLB_IS(void)//
invalidateUnifiedTLB_IS:
MOV r0, #1
MCR p15, 0, r0, c8, c3, 0 // TLBIALLIS - Invalidate entire unified TLB Inner Shareable
BX lr
// ------------------------------------------------------------
// Branch Prediction
// ------------------------------------------------------------
.global flushBranchTargetCache
.type flushBranchTargetCache,function
// void flushBranchTargetCache(void)
flushBranchTargetCache:
MOV r0, #0
MCR p15, 0, r0, c7, c5, 6 // BPIALL - Invalidate entire branch predictor array
BX lr
.global flushBranchTargetCache_IS
.type flushBranchTargetCache_IS,function
// void flushBranchTargetCache_IS(void)
flushBranchTargetCache_IS:
MOV r0, #0
MCR p15, 0, r0, c7, c1, 6 // BPIALLIS - Invalidate entire branch predictor array Inner Shareable
BX lr
// ------------------------------------------------------------
// High Vecs
// ------------------------------------------------------------
.global enableHighVecs
.type enableHighVecs,function
// void enableHighVecs(void)//
enableHighVecs:
MRC p15, 0, r0, c1, c0, 0 // Read Control Register
ORR r0, r0, #(1 << 13) // Set the V bit (bit 13)
MCR p15, 0, r0, c1, c0, 0 // Write Control Register
ISB
BX lr
.global disableHighVecs
.type disableHighVecs,function
// void disable_highvecs(void)//
disableHighVecs:
MRC p15, 0, r0, c1, c0, 0 // Read Control Register
BIC r0, r0, #(1 << 13) // Clear the V bit (bit 13)
MCR p15, 0, r0, c1, c0, 0 // Write Control Register
ISB
BX lr
// ------------------------------------------------------------
// Context ID
// ------------------------------------------------------------
.global getContextID
.type getContextID,function
// uint32_t getContextIDd(void)//
getContextID:
MRC p15, 0, r0, c13, c0, 1 // Read Context ID Register
BX lr
.global setContextID
.type setContextID,function
// void setContextID(uint32_t)//
setContextID:
MCR p15, 0, r0, c13, c0, 1 // Write Context ID Register
BX lr
// ------------------------------------------------------------
// ID registers
// ------------------------------------------------------------
.global getMIDR
.type getMIDR,function
// uint32_t getMIDR(void)//
getMIDR:
MRC p15, 0, r0, c0, c0, 0 // Read Main ID Register (MIDR)
BX lr
.global getMPIDR
.type getMPIDR,function
// uint32_t getMPIDR(void)//
getMPIDR:
MRC p15, 0, r0, c0 ,c0, 5// Read Multiprocessor ID register (MPIDR)
BX lr
// ------------------------------------------------------------
// CP15 SMP related
// ------------------------------------------------------------
.global getBaseAddr
.type getBaseAddr,function
// uint32_t getBaseAddr(void)
// Returns the value CBAR (base address of the private peripheral memory space)
getBaseAddr:
MRC p15, 4, r0, c15, c0, 0 // Read peripheral base address
BX lr
// ------------------------------------------------------------
.global getCPUID
.type getCPUID,function
// uint32_t getCPUID(void)
// Returns the CPU ID (0 to 3) of the CPU executed on
getCPUID:
MRC p15, 0, r0, c0, c0, 5 // Read CPU ID register
AND r0, r0, #0x03 // Mask off, leaving the CPU ID field
BX lr
// ------------------------------------------------------------
.global goToSleep
.type goToSleep,function
// void goToSleep(void)
goToSleep:
DSB // Clear all pending data accesses
WFI // Go into standby
B goToSleep // Catch in case of rogue events
BX lr
// ------------------------------------------------------------
.global joinSMP
.type joinSMP,function
// void joinSMP(void)
// Sets the ACTRL.SMP bit
joinSMP:
// SMP status is controlled by bit 6 of the CP15 Aux Ctrl Reg
MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
MOV r1, r0
ORR r0, r0, #0x040 // Set bit 6
CMP r0, r1
MCRNE p15, 0, r0, c1, c0, 1 // Write ACTLR
ISB
BX lr
// ------------------------------------------------------------
.global leaveSMP
.type leaveSMP,function
// void leaveSMP(void)
// Clear the ACTRL.SMP bit
leaveSMP:
// SMP status is controlled by bit 6 of the CP15 Aux Ctrl Reg
MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
BIC r0, r0, #0x040 // Clear bit 6
MCR p15, 0, r0, c1, c0, 1 // Write ACTLR
ISB
BX lr
// ------------------------------------------------------------
// End of v7.s
// ------------------------------------------------------------

View File

@ -79,6 +79,14 @@ _mainCRTStartup:
#endif
#endif
.global _fini
.type _fini,function
_fini:
#ifdef __THUMB_INTERWORK
BX lr // Return to caller
#else
MOV pc, lr // Return to caller
#endif
/* Workspace for Angel calls. */
.data

View File

@ -109,7 +109,7 @@ SECTIONS
.eh_frame_hdr : { *(.eh_frame_hdr) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
. = ALIGN(256) + (. & (256 - 1));
. = 0x2E000000;
.data :
{
*(.data)

View File

@ -0,0 +1,155 @@
// ------------------------------------------------------------
// v7-A Cache, TLB and Branch Prediction Maintenance Operations
// Header File
//
// Copyright (c) 2011-2016 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
#ifndef _ARMV7A_GENERIC_H
#define _ARMV7A_GENERIC_H
// ------------------------------------------------------------
// Memory barrier mnemonics
enum MemBarOpt {
RESERVED_0 = 0, RESERVED_1 = 1, OSHST = 2, OSH = 3,
RESERVED_4 = 4, RESERVED_5 = 5, NSHST = 6, NSH = 7,
RESERVED_8 = 8, RESERVED_9 = 9, ISHST = 10, ISH = 11,
RESERVED_12 = 12, RESERVED_13 = 13, ST = 14, SY = 15
};
//
// Note:
// *_IS() stands for "inner shareable"
// DO NOT USE THESE FUNCTIONS ON A CORTEX-A8
//
// ------------------------------------------------------------
// Interrupts
// Enable/disables IRQs (not FIQs)
void enableInterrupts(void);
void disableInterrupts(void);
// ------------------------------------------------------------
// Caches
void invalidateCaches_IS(void);
void cleanInvalidateDCache(void);
void invalidateCaches_IS(void);
void enableCaches(void);
void disableCaches(void);
void invalidateCaches(void);
void cleanDCache(void);
// ------------------------------------------------------------
// TLBs
void invalidateUnifiedTLB(void);
void invalidateUnifiedTLB_IS(void);
// ------------------------------------------------------------
// Branch prediction
void flushBranchTargetCache(void);
void flushBranchTargetCache_IS(void);
// ------------------------------------------------------------
// High Vecs
void enableHighVecs(void);
void disableHighVecs(void);
// ------------------------------------------------------------
// ID Registers
unsigned int getMIDR(void);
#define MIDR_IMPL_SHIFT 24
#define MIDR_IMPL_MASK 0xFF
#define MIDR_VAR_SHIFT 20
#define MIDR_VAR_MASK 0xF
#define MIDR_ARCH_SHIFT 16
#define MIDR_ARCH_MASK 0xF
#define MIDR_PART_SHIFT 4
#define MIDR_PART_MASK 0xFFF
#define MIDR_REV_SHIFT 0
#define MIDR_REV_MASK 0xF
// tmp = get_MIDR();
// implementor = (tmp >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
// variant = (tmp >> MIDR_VAR_SHIFT) & MIDR_VAR_MASK;
// architecture= (tmp >> MIDR_ARCH_SHIFT) & MIDR_ARCH_MASK;
// part_number = (tmp >> MIDR_PART_SHIFT) & MIDR_PART_MASK;
// revision = tmp & MIDR_REV_MASK;
#define MIDR_PART_CA5 0xC05
#define MIDR_PART_CA8 0xC08
#define MIDR_PART_CA9 0xC09
unsigned int getMPIDR(void);
#define MPIDR_FORMAT_SHIFT 31
#define MPIDR_FORMAT_MASK 0x1
#define MPIDR_UBIT_SHIFT 30
#define MPIDR_UBIT_MASK 0x1
#define MPIDR_CLUSTER_SHIFT 7
#define MPIDR_CLUSTER_MASK 0xF
#define MPIDR_CPUID_SHIFT 0
#define MPIDR_CPUID_MASK 0x3
#define MPIDR_CPUID_CPU0 0x0
#define MPIDR_CPUID_CPU1 0x1
#define MPIDR_CPUID_CPU2 0x2
#define MPIDR_CPUID_CPU3 0x3
#define MPIDR_UNIPROCESSPR 0x1
#define MPDIR_NEW_FORMAT 0x1
// ------------------------------------------------------------
// Context ID
unsigned int getContextID(void);
void setContextID(unsigned int);
#define CONTEXTID_ASID_SHIFT 0
#define CONTEXTID_ASID_MASK 0xFF
#define CONTEXTID_PROCID_SHIFT 8
#define CONTEXTID_PROCID_MASK 0x00FFFFFF
// tmp = getContextID();
// ASID = tmp & CONTEXTID_ASID_MASK;
// PROCID = (tmp >> CONTEXTID_PROCID_SHIFT) & CONTEXTID_PROCID_MASK;
// ------------------------------------------------------------
// SMP related for Armv7-A MPCore processors
//
// DO NOT CALL THESE FUNCTIONS ON A CORTEX-A8
// Returns the base address of the private peripheral memory space
unsigned int getBaseAddr(void);
// Returns the CPU ID (0 to 3) of the CPU executed on
#define MP_CPU0 (0)
#define MP_CPU1 (1)
#define MP_CPU2 (2)
#define MP_CPU3 (3)
unsigned int getCPUID(void);
// Set this core as participating in SMP
void joinSMP(void);
// Set this core as NOT participating in SMP
void leaveSMP(void);
// Go to sleep, never returns
void goToSleep(void);
#endif
// ------------------------------------------------------------
// End of v7.h
// ------------------------------------------------------------

View File

@ -0,0 +1,476 @@
// ------------------------------------------------------------
// v7-A Cache and Branch Prediction Maintenance Operations
//
// Copyright (c) 2011-2018 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
.arm
// ------------------------------------------------------------
// Interrupt enable/disable
// ------------------------------------------------------------
// Could use intrinsic instead of these
.global enableInterrupts
.type enableInterrupts,function
// void enableInterrupts(void)//
enableInterrupts:
CPSIE i
BX lr
.global disableInterrupts
.type disableInterrupts,function
// void disableInterrupts(void)//
disableInterrupts:
CPSID i
BX lr
// ------------------------------------------------------------
// Cache Maintenance
// ------------------------------------------------------------
.global enableCaches
.type enableCaches,function
// void enableCaches(void)//
enableCaches:
MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
ORR r0, r0, #(1 << 2) // Set C bit
ORR r0, r0, #(1 << 12) // Set I bit
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
BX lr
.global disableCaches
.type disableCaches,function
// void disableCaches(void)
disableCaches:
MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
BIC r0, r0, #(1 << 2) // Clear C bit
BIC r0, r0, #(1 << 12) // Clear I bit
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
BX lr
.global cleanDCache
.type cleanDCache,function
// void cleanDCache(void)//
cleanDCache:
PUSH {r4-r12}
//
// Based on code example given in section 11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ clean_dcache_finished
MOV r10, #0
clean_dcache_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT clean_dcache_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
clean_dcache_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
clean_dcache_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c10, 2 // DCCSW - clean by set/way
SUBS r9, r9, #1 // decrement the way number
BGE clean_dcache_loop3
SUBS r7, r7, #1 // decrement the index
BGE clean_dcache_loop2
clean_dcache_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT clean_dcache_loop1
clean_dcache_finished:
POP {r4-r12}
BX lr
.global cleanInvalidateDCache
.type cleanInvalidateDCache,function
// void cleanInvalidateDCache(void)//
cleanInvalidateDCache:
PUSH {r4-r12}
//
// Based on code example given in section 11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ clean_invalidate_dcache_finished
MOV r10, #0
clean_invalidate_dcache_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT clean_invalidate_dcache_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
clean_invalidate_dcache_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
clean_invalidate_dcache_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c14, 2 // DCCISW - clean and invalidate by set/way
SUBS r9, r9, #1 // decrement the way number
BGE clean_invalidate_dcache_loop3
SUBS r7, r7, #1 // decrement the index
BGE clean_invalidate_dcache_loop2
clean_invalidate_dcache_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT clean_invalidate_dcache_loop1
clean_invalidate_dcache_finished:
POP {r4-r12}
BX lr
.global invalidateCaches
.type invalidateCaches,function
// void invalidateCaches(void)//
invalidateCaches:
PUSH {r4-r12}
//
// Based on code example given in section B2.2.4/11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MOV r0, #0
MCR p15, 0, r0, c7, c5, 0 // ICIALLU - Invalidate entire I Cache, and flushes branch target cache
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ invalidate_caches_finished
MOV r10, #0
invalidate_caches_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT invalidate_caches_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
invalidate_caches_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
invalidate_caches_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c6, 2 // DCISW - invalidate by set/way
SUBS r9, r9, #1 // decrement the way number
BGE invalidate_caches_loop3
SUBS r7, r7, #1 // decrement the index
BGE invalidate_caches_loop2
invalidate_caches_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT invalidate_caches_loop1
invalidate_caches_finished:
POP {r4-r12}
BX lr
.global invalidateCaches_IS
.type invalidateCaches_IS,function
// void invalidateCaches_IS(void)//
invalidateCaches_IS:
PUSH {r4-r12}
MOV r0, #0
MCR p15, 0, r0, c7, c1, 0 // ICIALLUIS - Invalidate entire I Cache inner shareable
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ invalidate_caches_is_finished
MOV r10, #0
invalidate_caches_is_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT invalidate_caches_is_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
invalidate_caches_is_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
invalidate_caches_is_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c6, 2 // DCISW - clean by set/way
SUBS r9, r9, #1 // decrement the way number
BGE invalidate_caches_is_loop3
SUBS r7, r7, #1 // decrement the index
BGE invalidate_caches_is_loop2
invalidate_caches_is_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT invalidate_caches_is_loop1
invalidate_caches_is_finished:
POP {r4-r12}
BX lr
// ------------------------------------------------------------
// TLB
// ------------------------------------------------------------
.global invalidateUnifiedTLB
.type invalidateUnifiedTLB,function
// void invalidateUnifiedTLB(void)//
invalidateUnifiedTLB:
MOV r0, #0
MCR p15, 0, r0, c8, c7, 0 // TLBIALL - Invalidate entire unified TLB
BX lr
.global invalidateUnifiedTLB_IS
.type invalidateUnifiedTLB_IS,function
// void invalidateUnifiedTLB_IS(void)//
invalidateUnifiedTLB_IS:
MOV r0, #1
MCR p15, 0, r0, c8, c3, 0 // TLBIALLIS - Invalidate entire unified TLB Inner Shareable
BX lr
// ------------------------------------------------------------
// Branch Prediction
// ------------------------------------------------------------
.global flushBranchTargetCache
.type flushBranchTargetCache,function
// void flushBranchTargetCache(void)
flushBranchTargetCache:
MOV r0, #0
MCR p15, 0, r0, c7, c5, 6 // BPIALL - Invalidate entire branch predictor array
BX lr
.global flushBranchTargetCache_IS
.type flushBranchTargetCache_IS,function
// void flushBranchTargetCache_IS(void)
flushBranchTargetCache_IS:
MOV r0, #0
MCR p15, 0, r0, c7, c1, 6 // BPIALLIS - Invalidate entire branch predictor array Inner Shareable
BX lr
// ------------------------------------------------------------
// High Vecs
// ------------------------------------------------------------
.global enableHighVecs
.type enableHighVecs,function
// void enableHighVecs(void)//
enableHighVecs:
MRC p15, 0, r0, c1, c0, 0 // Read Control Register
ORR r0, r0, #(1 << 13) // Set the V bit (bit 13)
MCR p15, 0, r0, c1, c0, 0 // Write Control Register
ISB
BX lr
.global disableHighVecs
.type disableHighVecs,function
// void disable_highvecs(void)//
disableHighVecs:
MRC p15, 0, r0, c1, c0, 0 // Read Control Register
BIC r0, r0, #(1 << 13) // Clear the V bit (bit 13)
MCR p15, 0, r0, c1, c0, 0 // Write Control Register
ISB
BX lr
// ------------------------------------------------------------
// Context ID
// ------------------------------------------------------------
.global getContextID
.type getContextID,function
// uint32_t getContextIDd(void)//
getContextID:
MRC p15, 0, r0, c13, c0, 1 // Read Context ID Register
BX lr
.global setContextID
.type setContextID,function
// void setContextID(uint32_t)//
setContextID:
MCR p15, 0, r0, c13, c0, 1 // Write Context ID Register
BX lr
// ------------------------------------------------------------
// ID registers
// ------------------------------------------------------------
.global getMIDR
.type getMIDR,function
// uint32_t getMIDR(void)//
getMIDR:
MRC p15, 0, r0, c0, c0, 0 // Read Main ID Register (MIDR)
BX lr
.global getMPIDR
.type getMPIDR,function
// uint32_t getMPIDR(void)//
getMPIDR:
MRC p15, 0, r0, c0 ,c0, 5// Read Multiprocessor ID register (MPIDR)
BX lr
// ------------------------------------------------------------
// CP15 SMP related
// ------------------------------------------------------------
.global getBaseAddr
.type getBaseAddr,function
// uint32_t getBaseAddr(void)
// Returns the value CBAR (base address of the private peripheral memory space)
getBaseAddr:
MRC p15, 4, r0, c15, c0, 0 // Read peripheral base address
BX lr
// ------------------------------------------------------------
.global getCPUID
.type getCPUID,function
// uint32_t getCPUID(void)
// Returns the CPU ID (0 to 3) of the CPU executed on
getCPUID:
MRC p15, 0, r0, c0, c0, 5 // Read CPU ID register
AND r0, r0, #0x03 // Mask off, leaving the CPU ID field
BX lr
// ------------------------------------------------------------
.global goToSleep
.type goToSleep,function
// void goToSleep(void)
goToSleep:
DSB // Clear all pending data accesses
WFI // Go into standby
B goToSleep // Catch in case of rogue events
BX lr
// ------------------------------------------------------------
.global joinSMP
.type joinSMP,function
// void joinSMP(void)
// Sets the ACTRL.SMP bit
joinSMP:
// SMP status is controlled by bit 6 of the CP15 Aux Ctrl Reg
MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
MOV r1, r0
ORR r0, r0, #0x040 // Set bit 6
CMP r0, r1
MCRNE p15, 0, r0, c1, c0, 1 // Write ACTLR
ISB
BX lr
// ------------------------------------------------------------
.global leaveSMP
.type leaveSMP,function
// void leaveSMP(void)
// Clear the ACTRL.SMP bit
leaveSMP:
// SMP status is controlled by bit 6 of the CP15 Aux Ctrl Reg
MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
BIC r0, r0, #0x040 // Clear bit 6
MCR p15, 0, r0, c1, c0, 1 // Write ACTLR
ISB
BX lr
// ------------------------------------------------------------
// End of v7.s
// ------------------------------------------------------------

View File

@ -0,0 +1,74 @@
// ------------------------------------------------------------
// Cortex-A7 MPCore - Interrupt Controller functions
// Header File
//
// Copyright (c) 2011-2018 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
#ifndef _CORTEXA_GIC_
#define _CORTEXA_GIC_
// ------------------------------------------------------------
// GIC
// ------------------------------------------------------------
// Typical calls to enable interrupt ID X:
// enable_irq_id(X) <-- Enable that ID
// set_irq_priority(X, 0) <-- Set the priority of X to 0 (the max priority)
// set_priority_mask(0x1F) <-- Set Core's priority mask to 0x1F (the lowest priority)
// enable_GIC() <-- Enable the GIC (global)
// enable_gic_processor_interface() <-- Enable the CPU interface (local to the core)
//
// Global enable of the Interrupt Distributor
void enableGIC(void);
// Global disable of the Interrupt Distributor
void disableGIC(void);
// Enables the interrupt source number ID
void enableIntID(unsigned int ID);
// Disables the interrupt source number ID
void disableIntID(unsigned int ID);
// Sets the priority of the specified ID
void setIntPriority(unsigned int ID, unsigned int priority);
// Enables the processor interface
// Must be done on each core separately
void enableGICProcessorInterface(void);
// Disables the processor interface
// Must be done on each core separately
void disableGICProcessorInterface(void);
// Sets the Priority mask register for the core run on
// The reset value masks ALL interrupts!
void setPriorityMask(unsigned int priority);
// Sets the Binary Point Register for the core run on
void setBinaryPoint(unsigned int priority);
// Returns the value of the Interrupt Acknowledge Register
unsigned int readIntAck(void);
// Writes ID to the End Of Interrupt register
void writeEOI(unsigned int ID);
// ------------------------------------------------------------
// SGI
// ------------------------------------------------------------
// Send a software generate interrupt
void sendSGI(unsigned int ID, unsigned int core_list, unsigned int filter_list);
#endif
// ------------------------------------------------------------
// End of MP_GIC.h
// ------------------------------------------------------------

View File

@ -0,0 +1,294 @@
//----------------------------------------------------------------
// Copyright (c) 2005-2018 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
//
// Cortex-A7MP example - Startup Code
//----------------------------------------------------------------
.text
//----------------------------------------------------------------
// GIC. Generic Interrupt Controller Architecture Specification
//----------------------------------------------------------------
// CPU Interface offset from base of private peripheral space --> 0x0100
// Interrupt Distributor offset from base of private peripheral space --> 0x1000
// Typical calls to enable interrupt ID X:
// enableIntID(X) <-- Enable that ID
// setIntPriority(X, 0) <-- Set the priority of X to 0 (the max priority)
// setPriorityMask(0x1F) <-- Set CPU's priority mask to 0x1F (the lowest priority)
// enableGIC() <-- Enable the GIC (global)
// enableGICProcessorInterface() <-- Enable the CPU interface (local to the CPU)
// void enableGIC(void)
// Global enable of the Interrupt Distributor
.global enableGIC
.type enableGIC,function
enableGIC:
// Get base address of private peripheral space
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
ADD r0, r0, #0x1000 // Add the GIC offset
LDR r1, [r0] // Read the GIC Enable Register (ICDDCR)
ORR r1, r1, #0x01 // Set bit 0, the enable bit
STR r1, [r0] // Write the GIC Enable Register (ICDDCR)
BX lr
// ------------------------------------------------------------
.global disableGIC
.type disableGIC,function
// void disableGIC(void)
// Global disable of the Interrupt Distributor
disableGIC:
// Get base address of private peripheral space
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
ADD r0, r0, #0x1000 // Add the GIC offset
LDR r1, [r0] // Read the GIC Enable Register (ICDDCR)
BIC r1, r1, #0x01 // Clear bit 0, the enable bit
STR r1, [r0] // Write the GIC Enable Register (ICDDCR)
BX lr
// ------------------------------------------------------------
.global enableIntID
.type enableIntID,function
// void enableIntID(unsigned int ID)
// Enables the interrupt source number ID
enableIntID:
// Get base address of private peripheral space
MOV r1, r0 // Back up passed in ID value
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
// Each interrupt source has an enable bit in the GIC. These
// are grouped into registers, with 32 sources per register
// First, we need to identify which 32-bit block the interrupt lives in
MOV r2, r1 // Make working copy of ID in r2
MOV r2, r2, LSR #5 // LSR by 5 places, affective divide by 32
// r2 now contains the 32-bit block this ID lives in
MOV r2, r2, LSL #2 // Now multiply by 4, to convert offset into an address offset (four bytes per reg)
// Now work out which bit within the 32-bit block the ID is
AND r1, r1, #0x1F // Mask off to give offset within 32-bit block
MOV r3, #1 // Move enable value into r3
MOV r3, r3, LSL r1 // Shift it left to position of ID
ADD r2, r2, #0x1100 // Add the base offset of the Enable Set registers to the offset for the ID
STR r3, [r0, r2] // Store out (ICDISER)
BX lr
// ------------------------------------------------------------
.global disableIntID
.type disableIntID,function
// void disableIntID(unsigned int ID)
// Disables the interrupt source number ID
disableIntID:
// Get base address of private peripheral space
MOV r1, r0 // Back up passed in ID value
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
// First, we need to identify which 32-bit block the interrupt lives in
MOV r2, r1 // Make working copy of ID in r2
MOV r2, r2, LSR #5 // LSR by 5 places, affective divide by 32
// r2 now contains the 32-bit block this ID lives in
MOV r2, r2, LSL #2 // Now multiply by 4, to convert offset into an address offset (four bytes per reg)
// Now work out which bit within the 32-bit block the ID is
AND r1, r1, #0x1F // Mask off to give offset within 32-bit block
MOV r3, #1 // Move enable value into r3
MOV r3, r3, LSL r1 // Shift it left to position of ID in 32-bit block
ADD r2, r2, #0x1180 // Add the base offset of the Enable Clear registers to the offset for the ID
STR r3, [r0, r2] // Store out (ICDICER)
BX lr
// ------------------------------------------------------------
.global setIntPriority
.type setIntPriority,function
// void setIntPriority(unsigned int ID, unsigned int priority)
// Sets the priority of the specified ID
// r0 = ID
// r1 = priority
setIntPriority:
// Get base address of private peripheral space
MOV r2, r0 // Back up passed in ID value
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
// r0 = base addr
// r1 = priority
// r2 = ID
// Make sure that priority value is only 5 bits, and convert to expected format
AND r1, r1, #0x1F
MOV r1, r1, LSL #3
// Find which register this ID lives in
BIC r3, r2, #0x03 // Make a copy of the ID, clearing off the bottom two bits
// There are four IDs per reg, by clearing the bottom two bits we get an address offset
ADD r3, r3, #0x1400 // Now add the offset of the Priority Level registers from the base of the private peripheral space
ADD r0, r0, r3 // Now add in the base address of the private peripheral space, giving us the absolute address
// Now work out which ID in the register it is
AND r2, r2, #0x03 // Clear all but the bottom two bits, leaves which ID in the reg it is (which byte)
MOV r2, r2, LSL #3 // Multiply by 8, this gives a bit offset
// Read -> Modify -> Write
MOV r12, #0xFF // 8 bit field mask
MOV r12, r12, LSL r2 // Move mask into correct bit position
MOV r1, r1, LSL r2 // Also, move passed in priority value into correct bit position
LDR r3, [r0] // Read current value of the Priority Level register (ICDIPR)
BIC r3, r3, r12 // Clear appropriate field
ORR r3, r3, r1 // Now OR in the priority value
STR r3, [r0] // And store it back again (ICDIPR)
BX lr
// ------------------------------------------------------------
.global enableGICProcessorInterface
.type enableGICProcessorInterface,function
// void enableGICProcessorInterface(void)
// Enables the processor interface
// Must be done on each core separately
enableGICProcessorInterface:
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
ADD r0, r0, #0x2000
LDR r1, [r0, #0x0] // Read the Processor Interface Control register (ICCICR/ICPICR)
ORR r1, r1, #0x03 // Bit 0: Enables secure interrupts, Bit 1: Enables Non-Secure interrupts
BIC r1, r1, #0x08 // Bit 3: Ensure Group 0 interrupts are signalled using IRQ, not FIQ
STR r1, [r0, #0x0] // Write the Processor Interface Control register (ICCICR/ICPICR)
BX lr
// ------------------------------------------------------------
.global disableGICProcessorInterface
.type disableGICProcessorInterface,function
// void disableGICProcessorInterface(void)
// Disables the processor interface
// Must be done on each core separately
disableGICProcessorInterface:
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
ADD r0, r0, #0x2000
LDR r1, [r0, #0x0] // Read the Processor Interface Control register (ICCICR/ICPICR)
BIC r1, r1, #0x03 // Bit 0: Enables secure interrupts, Bit 1: Enables Non-Secure interrupts
STR r1, [r0, #0x0] // Write the Processor Interface Control register (ICCICR/ICPICR)
BX lr
// ------------------------------------------------------------
.global setPriorityMask
.type setPriorityMask,function
// void setPriorityMask(unsigned int priority)
// Sets the Priority mask register for the CPU run on
// The reset value masks ALL interrupts!
setPriorityMask:
// Get base address of private peripheral space
MRC p15, 4, r1, c15, c0, 0 // Read periph base address
ADD r1, r1, #0x2000
STR r0, [r1, #0x4] // Write the Priority Mask register (ICCPMR/ICCIPMR)
BX lr
// ------------------------------------------------------------
.global setBinaryPoint
.type setBinaryPoint,function
// void setBinaryPoint(unsigned int priority)
// Sets the Binary Point Register for the CPU run on
setBinaryPoint:
// Get base address of private peripheral space
MRC p15, 4, r1, c15, c0, 0 // Read periph base address
ADD r1, r1, #0x2000
STR r0, [r1, #0x8] // Write the Binary register (ICCBPR/ICCBPR)
BX lr
// ------------------------------------------------------------
.global readIntAck
.type readIntAck,function
// unsigned int readIntAck(void)
// Returns the value of the Interrupt Acknowledge Register
readIntAck:
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
ADD r0, r0, #0x2000
LDR r0, [r0, #0xC] // Read the Interrupt Acknowledge Register (ICCIAR)
BX lr
// ------------------------------------------------------------
.global writeEOI
.type writeEOI,function
// void writeEOI(unsigned int ID)
// Writes ID to the End Of Interrupt register
writeEOI:
// Get base address of private peripheral space
MRC p15, 4, r1, c15, c0, 0 // Read periph base address
ADD r1, r1, #0x2000
STR r0, [r1, #0x10] // Write ID to the End of Interrupt register (ICCEOIR)
BX lr
//----------------------------------------------------------------
// SGI
//----------------------------------------------------------------
.global sendSGI
.type sendSGI,function
// void sendSGI(unsigned int ID, unsigned int target_list, unsigned int filter_list)//
// Send a software generate interrupt
sendSGI:
AND r3, r0, #0x0F // Mask off unused bits of ID, and move to r3
AND r1, r1, #0x0F // Mask off unused bits of target_filter
AND r2, r2, #0x0F // Mask off unused bits of filter_list
ORR r3, r3, r1, LSL #16 // Combine ID and target_filter
ORR r3, r3, r2, LSL #24 // and now the filter list
// Get the address of the GIC
MRC p15, 4, r0, c15, c0, 0 // Read periph base address
ADD r0, r0, #0x1F00 // Add offset of the sgi_trigger reg
STR r3, [r0] // Write to the Software Generated Interrupt Register (ICDSGIR)
BX lr
//----------------------------------------------------------------
// End of MP_GIC.s
//----------------------------------------------------------------

View File

@ -0,0 +1,84 @@
// ------------------------------------------------------------
// Cortex-A MPCore - Private timer functions
//
// Copyright ARM Ltd 2009. All rights reserved.
// ------------------------------------------------------------
.text
.align 3
// PPI ID 29
// Typical set of calls to enable Timer:
// init_private_timer(0xXXXX, 0) <-- Counter down value of 0xXXXX, with auto-reload
// start_private_timer()
// Timer offset from base of private peripheral space --> 0x600
// ------------------------------------------------------------
// void init_private_timer(unsigned int load_value, unsigned int auto_reload)
// Sets up the private timer
// r0: initial load value
// r1: IF 0 (AutoReload) ELSE (SingleShot) AutoReload not supported on Cortex-A7
.global init_private_timer
.type init_private_timer,function
init_private_timer:
// Setup timeout value (CNTP_TVAL)
MCR p15, 0, r0, c14, c2, 0
BX lr
// ------------------------------------------------------------
// void start_private_timer(void)
// Starts the private timer
.global start_private_timer
.type start_private_timer,function
start_private_timer:
MOV r0, #0x1
// Enable timer (CNTP_CTL)
MCR p15, 0, r0, c14, c2, 1
BX lr
// ------------------------------------------------------------
// void stop_private_timer(void)
// Stops the private timer
.global stop_private_timer
.type stop_private_timer,function
stop_private_timer:
BX lr
// ------------------------------------------------------------
// unsigned int read_private_timer(void)
// Reads the current value of the timer count register
.global get_private_timer_count
.type get_private_timer_count,function
get_private_timer_count:
BX lr
// ------------------------------------------------------------
// void clear_private_timer_irq(void)
// Clears the private timer interrupt
.global clear_private_timer_irq
.type clear_private_timer_irq,function
clear_private_timer_irq:
BX lr
// ------------------------------------------------------------
// End of code
// ------------------------------------------------------------
// ------------------------------------------------------------
// End of MP_PrivateTimer.s
// ------------------------------------------------------------

View File

@ -0,0 +1,36 @@
// ------------------------------------------------------------
// Cortex-A MPCore - Private timer functions
// Header Filer
//
// Copyright ARM Ltd 2009. All rights reserved.
// ------------------------------------------------------------
#ifndef _CORTEXA_PRIVATE_TIMER_
#define _CORTEXA_PRIVATE_TIMER_
// Typical set of calls to enable Timer:
// init_private_timer(0xXXXX, 0) <-- Counter down value of 0xXXXX, with auto-reload
// start_private_timer()
// Sets up the private timer
// r0: initial load value
// r1: IF 0 (AutoReload) ELSE (SingleShot)
void init_private_timer(unsigned int load_value, unsigned int auto_reload);
// Starts the private timer
void start_private_timer(void);
// Stops the private timer
void stop_private_timer(void);
// Reads the current value of the timer count register
unsigned int get_private_timer_count(void);
// Clears the private timer interrupt
void clear_private_timer_irq(void);
#endif
// ------------------------------------------------------------
// End of MP_PrivateTimer.h
// ------------------------------------------------------------

View File

@ -1,8 +1,8 @@
arm-none-eabi-gcc -c -g -mcpu=cortex-a7 reset.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a7 crt0.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a7 tx_initialize_low_level.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a7 MP_GIC.s
arm-none-eabi-gcc -c -g -mcpu=cortex-a7 MP_PrivateTimer.s
arm-none-eabi-gcc -c -g -mcpu=cortex-a7 V7.s
arm-none-eabi-gcc -c -g -mcpu=cortex-a7 -I../../../../common/inc -I../inc sample_threadx.c
arm-none-eabi-gcc -g -mcpu=cortex-a7 -T sample_threadx.ld --specs=nosys.specs -o sample_threadx.out -Wl,-Map=sample_threadx.map tx_initialize_low_level.o sample_threadx.o tx.a
arm-none-eabi-gcc -g -nostartfiles -mcpu=cortex-a7 -T sample_threadx.ld --specs=nosys.specs -o sample_threadx.out -Wl,-Map=sample_threadx.map MP_GIC.o MP_PrivateTimer.o V7.o crt0.o reset.o tx_initialize_low_level.o sample_threadx.o tx.a

View File

@ -79,6 +79,14 @@ _mainCRTStartup:
#endif
#endif
.global _fini
.type _fini,function
_fini:
#ifdef __THUMB_INTERWORK
BX lr // Return to caller
#else
MOV pc, lr // Return to caller
#endif
/* Workspace for Angel calls. */
.data

View File

@ -109,7 +109,7 @@ SECTIONS
.eh_frame_hdr : { *(.eh_frame_hdr) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
. = ALIGN(256) + (. & (256 - 1));
. = 0x2E000000;
.data :
{
*(.data)

View File

@ -41,8 +41,13 @@ SYS_STACK_SIZE = 1024 // System stack size
.global _end
.global _sp
.global _stack_bottom
.global __vectors
.global disableHighVecs
.global enableGIC
.global enableGICProcessorInterface
.global enableCaches
.global init_private_timer
.global start_private_timer
/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
applications calling this function from to 16-bit Thumb mode. */
@ -160,6 +165,42 @@ _stack_error_loop:
ADD r1, r1, #8 // Increment to next free word
STR r1, [r2] // Save first free memory address
PUSH {lr}
/* Setup the vector table. */
LDR r0, =__vectors // Get address of vector table
MCR p15, 0, r0, c12, c0, 0 // Write vector table address to CP15
BL disableHighVecs // Disable high vectors
//
// GIC Init
// ---------
BL enableGIC
BL enableGICProcessorInterface
//
// Enable Private Timer for periodic IRQ
// --------------------------------------
MOV r0, #0x1F
BL setPriorityMask // Set priority mask (local)
// Enable the Private Timer Interrupt Source
MOV r0, #29
MOV r1, #0
BL enableIntID
// Set the priority
MOV r0, #29
MOV r1, #0
BL setIntPriority
// Configure Timer
MOV r0, #0xF0000
MOV r1, #0x0
BL init_private_timer
BL start_private_timer
POP {lr}
#ifdef __THUMB_INTERWORK
BX lr // Return to caller
#else
@ -202,16 +243,18 @@ __tx_irq_processing_return:
if nested IRQ interrupts are desired. Interrupts may be re-enabled over
small code sequences where lr is saved before enabling interrupts and
restored after interrupts are again disabled. */
PUSH {r4, r5} // Save some preserved registers (r5 is saved just for 8-byte alignment)
BL readIntAck
MOV r4, r0
/* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
from IRQ mode with interrupts disabled. This routine switches to the
system mode and returns with IRQ interrupts enabled.
CMP r0, #29 // If not Private Timer interrupt (ID 29), by pass
BNE by_pass_timer_interrupt
NOTE: It is very important to ensure all IRQ interrupts are cleared
prior to enabling nested IRQ interrupts. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_start
#endif
MOV r0, #0xF0000
MOV r1, #0x0
BL init_private_timer
DSB
/* For debug purpose, execute the timer interrupt processing here. In
a real system, some kind of status indication would have to be checked
@ -219,13 +262,10 @@ __tx_irq_processing_return:
BL _tx_timer_interrupt // Timer interrupt handler
/* If interrupt nesting was started earlier, the end of interrupt nesting
service must be called before returning to _tx_thread_context_restore.
This routine returns in processing in IRQ mode with interrupts disabled. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_end
#endif
by_pass_timer_interrupt:
MOV r0, r4
BL writeEOI
POP {r4, r5} // Recover preserved registers
/* Jump to context restore to restore system context. */
B _tx_thread_context_restore

View File

@ -0,0 +1,155 @@
// ------------------------------------------------------------
// v7-A Cache, TLB and Branch Prediction Maintenance Operations
// Header File
//
// Copyright (c) 2011-2016 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
#ifndef _ARMV7A_GENERIC_H
#define _ARMV7A_GENERIC_H
// ------------------------------------------------------------
// Memory barrier mnemonics
enum MemBarOpt {
RESERVED_0 = 0, RESERVED_1 = 1, OSHST = 2, OSH = 3,
RESERVED_4 = 4, RESERVED_5 = 5, NSHST = 6, NSH = 7,
RESERVED_8 = 8, RESERVED_9 = 9, ISHST = 10, ISH = 11,
RESERVED_12 = 12, RESERVED_13 = 13, ST = 14, SY = 15
};
//
// Note:
// *_IS() stands for "inner shareable"
// DO NOT USE THESE FUNCTIONS ON A CORTEX-A8
//
// ------------------------------------------------------------
// Interrupts
// Enable/disables IRQs (not FIQs)
void enableInterrupts(void);
void disableInterrupts(void);
// ------------------------------------------------------------
// Caches
void invalidateCaches_IS(void);
void cleanInvalidateDCache(void);
void invalidateCaches_IS(void);
void enableCaches(void);
void disableCaches(void);
void invalidateCaches(void);
void cleanDCache(void);
// ------------------------------------------------------------
// TLBs
void invalidateUnifiedTLB(void);
void invalidateUnifiedTLB_IS(void);
// ------------------------------------------------------------
// Branch prediction
void flushBranchTargetCache(void);
void flushBranchTargetCache_IS(void);
// ------------------------------------------------------------
// High Vecs
void enableHighVecs(void);
void disableHighVecs(void);
// ------------------------------------------------------------
// ID Registers
unsigned int getMIDR(void);
#define MIDR_IMPL_SHIFT 24
#define MIDR_IMPL_MASK 0xFF
#define MIDR_VAR_SHIFT 20
#define MIDR_VAR_MASK 0xF
#define MIDR_ARCH_SHIFT 16
#define MIDR_ARCH_MASK 0xF
#define MIDR_PART_SHIFT 4
#define MIDR_PART_MASK 0xFFF
#define MIDR_REV_SHIFT 0
#define MIDR_REV_MASK 0xF
// tmp = get_MIDR();
// implementor = (tmp >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
// variant = (tmp >> MIDR_VAR_SHIFT) & MIDR_VAR_MASK;
// architecture= (tmp >> MIDR_ARCH_SHIFT) & MIDR_ARCH_MASK;
// part_number = (tmp >> MIDR_PART_SHIFT) & MIDR_PART_MASK;
// revision = tmp & MIDR_REV_MASK;
#define MIDR_PART_CA5 0xC05
#define MIDR_PART_CA8 0xC08
#define MIDR_PART_CA9 0xC09
unsigned int getMPIDR(void);
#define MPIDR_FORMAT_SHIFT 31
#define MPIDR_FORMAT_MASK 0x1
#define MPIDR_UBIT_SHIFT 30
#define MPIDR_UBIT_MASK 0x1
#define MPIDR_CLUSTER_SHIFT 7
#define MPIDR_CLUSTER_MASK 0xF
#define MPIDR_CPUID_SHIFT 0
#define MPIDR_CPUID_MASK 0x3
#define MPIDR_CPUID_CPU0 0x0
#define MPIDR_CPUID_CPU1 0x1
#define MPIDR_CPUID_CPU2 0x2
#define MPIDR_CPUID_CPU3 0x3
#define MPIDR_UNIPROCESSPR 0x1
#define MPDIR_NEW_FORMAT 0x1
// ------------------------------------------------------------
// Context ID
unsigned int getContextID(void);
void setContextID(unsigned int);
#define CONTEXTID_ASID_SHIFT 0
#define CONTEXTID_ASID_MASK 0xFF
#define CONTEXTID_PROCID_SHIFT 8
#define CONTEXTID_PROCID_MASK 0x00FFFFFF
// tmp = getContextID();
// ASID = tmp & CONTEXTID_ASID_MASK;
// PROCID = (tmp >> CONTEXTID_PROCID_SHIFT) & CONTEXTID_PROCID_MASK;
// ------------------------------------------------------------
// SMP related for Armv7-A MPCore processors
//
// DO NOT CALL THESE FUNCTIONS ON A CORTEX-A8
// Returns the base address of the private peripheral memory space
unsigned int getBaseAddr(void);
// Returns the CPU ID (0 to 3) of the CPU executed on
#define MP_CPU0 (0)
#define MP_CPU1 (1)
#define MP_CPU2 (2)
#define MP_CPU3 (3)
unsigned int getCPUID(void);
// Set this core as participating in SMP
void joinSMP(void);
// Set this core as NOT participating in SMP
void leaveSMP(void);
// Go to sleep, never returns
void goToSleep(void);
#endif
// ------------------------------------------------------------
// End of v7.h
// ------------------------------------------------------------

View File

@ -0,0 +1,476 @@
// ------------------------------------------------------------
// v7-A Cache and Branch Prediction Maintenance Operations
//
// Copyright (c) 2011-2018 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
.arm
// ------------------------------------------------------------
// Interrupt enable/disable
// ------------------------------------------------------------
// Could use intrinsic instead of these
.global enableInterrupts
.type enableInterrupts,function
// void enableInterrupts(void)//
enableInterrupts:
CPSIE i
BX lr
.global disableInterrupts
.type disableInterrupts,function
// void disableInterrupts(void)//
disableInterrupts:
CPSID i
BX lr
// ------------------------------------------------------------
// Cache Maintenance
// ------------------------------------------------------------
.global enableCaches
.type enableCaches,function
// void enableCaches(void)//
enableCaches:
MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
ORR r0, r0, #(1 << 2) // Set C bit
ORR r0, r0, #(1 << 12) // Set I bit
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
BX lr
.global disableCaches
.type disableCaches,function
// void disableCaches(void)
disableCaches:
MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
BIC r0, r0, #(1 << 2) // Clear C bit
BIC r0, r0, #(1 << 12) // Clear I bit
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
BX lr
.global cleanDCache
.type cleanDCache,function
// void cleanDCache(void)//
cleanDCache:
PUSH {r4-r12}
//
// Based on code example given in section 11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ clean_dcache_finished
MOV r10, #0
clean_dcache_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT clean_dcache_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
clean_dcache_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
clean_dcache_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c10, 2 // DCCSW - clean by set/way
SUBS r9, r9, #1 // decrement the way number
BGE clean_dcache_loop3
SUBS r7, r7, #1 // decrement the index
BGE clean_dcache_loop2
clean_dcache_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT clean_dcache_loop1
clean_dcache_finished:
POP {r4-r12}
BX lr
.global cleanInvalidateDCache
.type cleanInvalidateDCache,function
// void cleanInvalidateDCache(void)//
cleanInvalidateDCache:
PUSH {r4-r12}
//
// Based on code example given in section 11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ clean_invalidate_dcache_finished
MOV r10, #0
clean_invalidate_dcache_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT clean_invalidate_dcache_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
clean_invalidate_dcache_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
clean_invalidate_dcache_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c14, 2 // DCCISW - clean and invalidate by set/way
SUBS r9, r9, #1 // decrement the way number
BGE clean_invalidate_dcache_loop3
SUBS r7, r7, #1 // decrement the index
BGE clean_invalidate_dcache_loop2
clean_invalidate_dcache_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT clean_invalidate_dcache_loop1
clean_invalidate_dcache_finished:
POP {r4-r12}
BX lr
.global invalidateCaches
.type invalidateCaches,function
// void invalidateCaches(void)//
invalidateCaches:
PUSH {r4-r12}
//
// Based on code example given in section B2.2.4/11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MOV r0, #0
MCR p15, 0, r0, c7, c5, 0 // ICIALLU - Invalidate entire I Cache, and flushes branch target cache
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ invalidate_caches_finished
MOV r10, #0
invalidate_caches_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT invalidate_caches_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
invalidate_caches_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
invalidate_caches_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c6, 2 // DCISW - invalidate by set/way
SUBS r9, r9, #1 // decrement the way number
BGE invalidate_caches_loop3
SUBS r7, r7, #1 // decrement the index
BGE invalidate_caches_loop2
invalidate_caches_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT invalidate_caches_loop1
invalidate_caches_finished:
POP {r4-r12}
BX lr
.global invalidateCaches_IS
.type invalidateCaches_IS,function
// void invalidateCaches_IS(void)//
invalidateCaches_IS:
PUSH {r4-r12}
MOV r0, #0
MCR p15, 0, r0, c7, c1, 0 // ICIALLUIS - Invalidate entire I Cache inner shareable
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ invalidate_caches_is_finished
MOV r10, #0
invalidate_caches_is_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT invalidate_caches_is_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
invalidate_caches_is_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
invalidate_caches_is_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c6, 2 // DCISW - clean by set/way
SUBS r9, r9, #1 // decrement the way number
BGE invalidate_caches_is_loop3
SUBS r7, r7, #1 // decrement the index
BGE invalidate_caches_is_loop2
invalidate_caches_is_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT invalidate_caches_is_loop1
invalidate_caches_is_finished:
POP {r4-r12}
BX lr
// ------------------------------------------------------------
// TLB
// ------------------------------------------------------------
.global invalidateUnifiedTLB
.type invalidateUnifiedTLB,function
// void invalidateUnifiedTLB(void)//
invalidateUnifiedTLB:
MOV r0, #0
MCR p15, 0, r0, c8, c7, 0 // TLBIALL - Invalidate entire unified TLB
BX lr
.global invalidateUnifiedTLB_IS
.type invalidateUnifiedTLB_IS,function
// void invalidateUnifiedTLB_IS(void)//
invalidateUnifiedTLB_IS:
MOV r0, #1
MCR p15, 0, r0, c8, c3, 0 // TLBIALLIS - Invalidate entire unified TLB Inner Shareable
BX lr
// ------------------------------------------------------------
// Branch Prediction
// ------------------------------------------------------------
.global flushBranchTargetCache
.type flushBranchTargetCache,function
// void flushBranchTargetCache(void)
flushBranchTargetCache:
MOV r0, #0
MCR p15, 0, r0, c7, c5, 6 // BPIALL - Invalidate entire branch predictor array
BX lr
.global flushBranchTargetCache_IS
.type flushBranchTargetCache_IS,function
// void flushBranchTargetCache_IS(void)
flushBranchTargetCache_IS:
MOV r0, #0
MCR p15, 0, r0, c7, c1, 6 // BPIALLIS - Invalidate entire branch predictor array Inner Shareable
BX lr
// ------------------------------------------------------------
// High Vecs
// ------------------------------------------------------------
.global enableHighVecs
.type enableHighVecs,function
// void enableHighVecs(void)//
enableHighVecs:
MRC p15, 0, r0, c1, c0, 0 // Read Control Register
ORR r0, r0, #(1 << 13) // Set the V bit (bit 13)
MCR p15, 0, r0, c1, c0, 0 // Write Control Register
ISB
BX lr
.global disableHighVecs
.type disableHighVecs,function
// void disable_highvecs(void)//
disableHighVecs:
MRC p15, 0, r0, c1, c0, 0 // Read Control Register
BIC r0, r0, #(1 << 13) // Clear the V bit (bit 13)
MCR p15, 0, r0, c1, c0, 0 // Write Control Register
ISB
BX lr
// ------------------------------------------------------------
// Context ID
// ------------------------------------------------------------
.global getContextID
.type getContextID,function
// uint32_t getContextIDd(void)//
getContextID:
MRC p15, 0, r0, c13, c0, 1 // Read Context ID Register
BX lr
.global setContextID
.type setContextID,function
// void setContextID(uint32_t)//
setContextID:
MCR p15, 0, r0, c13, c0, 1 // Write Context ID Register
BX lr
// ------------------------------------------------------------
// ID registers
// ------------------------------------------------------------
.global getMIDR
.type getMIDR,function
// uint32_t getMIDR(void)//
getMIDR:
MRC p15, 0, r0, c0, c0, 0 // Read Main ID Register (MIDR)
BX lr
.global getMPIDR
.type getMPIDR,function
// uint32_t getMPIDR(void)//
getMPIDR:
MRC p15, 0, r0, c0 ,c0, 5// Read Multiprocessor ID register (MPIDR)
BX lr
// ------------------------------------------------------------
// CP15 SMP related
// ------------------------------------------------------------
.global getBaseAddr
.type getBaseAddr,function
// uint32_t getBaseAddr(void)
// Returns the value CBAR (base address of the private peripheral memory space)
getBaseAddr:
MRC p15, 4, r0, c15, c0, 0 // Read peripheral base address
BX lr
// ------------------------------------------------------------
.global getCPUID
.type getCPUID,function
// uint32_t getCPUID(void)
// Returns the CPU ID (0 to 3) of the CPU executed on
getCPUID:
MRC p15, 0, r0, c0, c0, 5 // Read CPU ID register
AND r0, r0, #0x03 // Mask off, leaving the CPU ID field
BX lr
// ------------------------------------------------------------
.global goToSleep
.type goToSleep,function
// void goToSleep(void)
goToSleep:
DSB // Clear all pending data accesses
WFI // Go into standby
B goToSleep // Catch in case of rogue events
BX lr
// ------------------------------------------------------------
.global joinSMP
.type joinSMP,function
// void joinSMP(void)
// Sets the ACTRL.SMP bit
joinSMP:
// SMP status is controlled by bit 6 of the CP15 Aux Ctrl Reg
MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
MOV r1, r0
ORR r0, r0, #0x040 // Set bit 6
CMP r0, r1
MCRNE p15, 0, r0, c1, c0, 1 // Write ACTLR
ISB
BX lr
// ------------------------------------------------------------
.global leaveSMP
.type leaveSMP,function
// void leaveSMP(void)
// Clear the ACTRL.SMP bit
leaveSMP:
// SMP status is controlled by bit 6 of the CP15 Aux Ctrl Reg
MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
BIC r0, r0, #0x040 // Clear bit 6
MCR p15, 0, r0, c1, c0, 1 // Write ACTLR
ISB
BX lr
// ------------------------------------------------------------
// End of v7.s
// ------------------------------------------------------------

View File

@ -79,6 +79,14 @@ _mainCRTStartup:
#endif
#endif
.global _fini
.type _fini,function
_fini:
#ifdef __THUMB_INTERWORK
BX lr // Return to caller
#else
MOV pc, lr // Return to caller
#endif
/* Workspace for Angel calls. */
.data

View File

@ -109,7 +109,7 @@ SECTIONS
.eh_frame_hdr : { *(.eh_frame_hdr) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
. = ALIGN(256) + (. & (256 - 1));
. = 0x2E000000;
.data :
{
*(.data)

View File

@ -0,0 +1,155 @@
// ------------------------------------------------------------
// v7-A Cache, TLB and Branch Prediction Maintenance Operations
// Header File
//
// Copyright (c) 2011-2016 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
#ifndef _ARMV7A_GENERIC_H
#define _ARMV7A_GENERIC_H
// ------------------------------------------------------------
// Memory barrier mnemonics
enum MemBarOpt {
RESERVED_0 = 0, RESERVED_1 = 1, OSHST = 2, OSH = 3,
RESERVED_4 = 4, RESERVED_5 = 5, NSHST = 6, NSH = 7,
RESERVED_8 = 8, RESERVED_9 = 9, ISHST = 10, ISH = 11,
RESERVED_12 = 12, RESERVED_13 = 13, ST = 14, SY = 15
};
//
// Note:
// *_IS() stands for "inner shareable"
// DO NOT USE THESE FUNCTIONS ON A CORTEX-A8
//
// ------------------------------------------------------------
// Interrupts
// Enable/disables IRQs (not FIQs)
void enableInterrupts(void);
void disableInterrupts(void);
// ------------------------------------------------------------
// Caches
void invalidateCaches_IS(void);
void cleanInvalidateDCache(void);
void invalidateCaches_IS(void);
void enableCaches(void);
void disableCaches(void);
void invalidateCaches(void);
void cleanDCache(void);
// ------------------------------------------------------------
// TLBs
void invalidateUnifiedTLB(void);
void invalidateUnifiedTLB_IS(void);
// ------------------------------------------------------------
// Branch prediction
void flushBranchTargetCache(void);
void flushBranchTargetCache_IS(void);
// ------------------------------------------------------------
// High Vecs
void enableHighVecs(void);
void disableHighVecs(void);
// ------------------------------------------------------------
// ID Registers
unsigned int getMIDR(void);
#define MIDR_IMPL_SHIFT 24
#define MIDR_IMPL_MASK 0xFF
#define MIDR_VAR_SHIFT 20
#define MIDR_VAR_MASK 0xF
#define MIDR_ARCH_SHIFT 16
#define MIDR_ARCH_MASK 0xF
#define MIDR_PART_SHIFT 4
#define MIDR_PART_MASK 0xFFF
#define MIDR_REV_SHIFT 0
#define MIDR_REV_MASK 0xF
// tmp = get_MIDR();
// implementor = (tmp >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
// variant = (tmp >> MIDR_VAR_SHIFT) & MIDR_VAR_MASK;
// architecture= (tmp >> MIDR_ARCH_SHIFT) & MIDR_ARCH_MASK;
// part_number = (tmp >> MIDR_PART_SHIFT) & MIDR_PART_MASK;
// revision = tmp & MIDR_REV_MASK;
#define MIDR_PART_CA5 0xC05
#define MIDR_PART_CA8 0xC08
#define MIDR_PART_CA9 0xC09
unsigned int getMPIDR(void);
#define MPIDR_FORMAT_SHIFT 31
#define MPIDR_FORMAT_MASK 0x1
#define MPIDR_UBIT_SHIFT 30
#define MPIDR_UBIT_MASK 0x1
#define MPIDR_CLUSTER_SHIFT 7
#define MPIDR_CLUSTER_MASK 0xF
#define MPIDR_CPUID_SHIFT 0
#define MPIDR_CPUID_MASK 0x3
#define MPIDR_CPUID_CPU0 0x0
#define MPIDR_CPUID_CPU1 0x1
#define MPIDR_CPUID_CPU2 0x2
#define MPIDR_CPUID_CPU3 0x3
#define MPIDR_UNIPROCESSPR 0x1
#define MPDIR_NEW_FORMAT 0x1
// ------------------------------------------------------------
// Context ID
unsigned int getContextID(void);
void setContextID(unsigned int);
#define CONTEXTID_ASID_SHIFT 0
#define CONTEXTID_ASID_MASK 0xFF
#define CONTEXTID_PROCID_SHIFT 8
#define CONTEXTID_PROCID_MASK 0x00FFFFFF
// tmp = getContextID();
// ASID = tmp & CONTEXTID_ASID_MASK;
// PROCID = (tmp >> CONTEXTID_PROCID_SHIFT) & CONTEXTID_PROCID_MASK;
// ------------------------------------------------------------
// SMP related for Armv7-A MPCore processors
//
// DO NOT CALL THESE FUNCTIONS ON A CORTEX-A8
// Returns the base address of the private peripheral memory space
unsigned int getBaseAddr(void);
// Returns the CPU ID (0 to 3) of the CPU executed on
#define MP_CPU0 (0)
#define MP_CPU1 (1)
#define MP_CPU2 (2)
#define MP_CPU3 (3)
unsigned int getCPUID(void);
// Set this core as participating in SMP
void joinSMP(void);
// Set this core as NOT participating in SMP
void leaveSMP(void);
// Go to sleep, never returns
void goToSleep(void);
#endif
// ------------------------------------------------------------
// End of v7.h
// ------------------------------------------------------------

View File

@ -0,0 +1,476 @@
// ------------------------------------------------------------
// v7-A Cache and Branch Prediction Maintenance Operations
//
// Copyright (c) 2011-2018 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
.arm
// ------------------------------------------------------------
// Interrupt enable/disable
// ------------------------------------------------------------
// Could use intrinsic instead of these
.global enableInterrupts
.type enableInterrupts,function
// void enableInterrupts(void)//
enableInterrupts:
CPSIE i
BX lr
.global disableInterrupts
.type disableInterrupts,function
// void disableInterrupts(void)//
disableInterrupts:
CPSID i
BX lr
// ------------------------------------------------------------
// Cache Maintenance
// ------------------------------------------------------------
.global enableCaches
.type enableCaches,function
// void enableCaches(void)//
enableCaches:
MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
ORR r0, r0, #(1 << 2) // Set C bit
ORR r0, r0, #(1 << 12) // Set I bit
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
BX lr
.global disableCaches
.type disableCaches,function
// void disableCaches(void)
disableCaches:
MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
BIC r0, r0, #(1 << 2) // Clear C bit
BIC r0, r0, #(1 << 12) // Clear I bit
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
BX lr
.global cleanDCache
.type cleanDCache,function
// void cleanDCache(void)//
cleanDCache:
PUSH {r4-r12}
//
// Based on code example given in section 11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ clean_dcache_finished
MOV r10, #0
clean_dcache_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT clean_dcache_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
clean_dcache_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
clean_dcache_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c10, 2 // DCCSW - clean by set/way
SUBS r9, r9, #1 // decrement the way number
BGE clean_dcache_loop3
SUBS r7, r7, #1 // decrement the index
BGE clean_dcache_loop2
clean_dcache_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT clean_dcache_loop1
clean_dcache_finished:
POP {r4-r12}
BX lr
.global cleanInvalidateDCache
.type cleanInvalidateDCache,function
// void cleanInvalidateDCache(void)//
cleanInvalidateDCache:
PUSH {r4-r12}
//
// Based on code example given in section 11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ clean_invalidate_dcache_finished
MOV r10, #0
clean_invalidate_dcache_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT clean_invalidate_dcache_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
clean_invalidate_dcache_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
clean_invalidate_dcache_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c14, 2 // DCCISW - clean and invalidate by set/way
SUBS r9, r9, #1 // decrement the way number
BGE clean_invalidate_dcache_loop3
SUBS r7, r7, #1 // decrement the index
BGE clean_invalidate_dcache_loop2
clean_invalidate_dcache_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT clean_invalidate_dcache_loop1
clean_invalidate_dcache_finished:
POP {r4-r12}
BX lr
.global invalidateCaches
.type invalidateCaches,function
// void invalidateCaches(void)//
invalidateCaches:
PUSH {r4-r12}
//
// Based on code example given in section B2.2.4/11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MOV r0, #0
MCR p15, 0, r0, c7, c5, 0 // ICIALLU - Invalidate entire I Cache, and flushes branch target cache
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ invalidate_caches_finished
MOV r10, #0
invalidate_caches_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT invalidate_caches_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
invalidate_caches_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
invalidate_caches_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c6, 2 // DCISW - invalidate by set/way
SUBS r9, r9, #1 // decrement the way number
BGE invalidate_caches_loop3
SUBS r7, r7, #1 // decrement the index
BGE invalidate_caches_loop2
invalidate_caches_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT invalidate_caches_loop1
invalidate_caches_finished:
POP {r4-r12}
BX lr
.global invalidateCaches_IS
.type invalidateCaches_IS,function
// void invalidateCaches_IS(void)//
invalidateCaches_IS:
PUSH {r4-r12}
MOV r0, #0
MCR p15, 0, r0, c7, c1, 0 // ICIALLUIS - Invalidate entire I Cache inner shareable
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ invalidate_caches_is_finished
MOV r10, #0
invalidate_caches_is_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT invalidate_caches_is_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
invalidate_caches_is_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
invalidate_caches_is_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c6, 2 // DCISW - clean by set/way
SUBS r9, r9, #1 // decrement the way number
BGE invalidate_caches_is_loop3
SUBS r7, r7, #1 // decrement the index
BGE invalidate_caches_is_loop2
invalidate_caches_is_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT invalidate_caches_is_loop1
invalidate_caches_is_finished:
POP {r4-r12}
BX lr
// ------------------------------------------------------------
// TLB
// ------------------------------------------------------------
.global invalidateUnifiedTLB
.type invalidateUnifiedTLB,function
// void invalidateUnifiedTLB(void)//
invalidateUnifiedTLB:
MOV r0, #0
MCR p15, 0, r0, c8, c7, 0 // TLBIALL - Invalidate entire unified TLB
BX lr
.global invalidateUnifiedTLB_IS
.type invalidateUnifiedTLB_IS,function
// void invalidateUnifiedTLB_IS(void)//
invalidateUnifiedTLB_IS:
MOV r0, #1
MCR p15, 0, r0, c8, c3, 0 // TLBIALLIS - Invalidate entire unified TLB Inner Shareable
BX lr
// ------------------------------------------------------------
// Branch Prediction
// ------------------------------------------------------------
.global flushBranchTargetCache
.type flushBranchTargetCache,function
// void flushBranchTargetCache(void)
flushBranchTargetCache:
MOV r0, #0
MCR p15, 0, r0, c7, c5, 6 // BPIALL - Invalidate entire branch predictor array
BX lr
.global flushBranchTargetCache_IS
.type flushBranchTargetCache_IS,function
// void flushBranchTargetCache_IS(void)
flushBranchTargetCache_IS:
MOV r0, #0
MCR p15, 0, r0, c7, c1, 6 // BPIALLIS - Invalidate entire branch predictor array Inner Shareable
BX lr
// ------------------------------------------------------------
// High Vecs
// ------------------------------------------------------------
.global enableHighVecs
.type enableHighVecs,function
// void enableHighVecs(void)//
enableHighVecs:
MRC p15, 0, r0, c1, c0, 0 // Read Control Register
ORR r0, r0, #(1 << 13) // Set the V bit (bit 13)
MCR p15, 0, r0, c1, c0, 0 // Write Control Register
ISB
BX lr
.global disableHighVecs
.type disableHighVecs,function
// void disable_highvecs(void)//
disableHighVecs:
MRC p15, 0, r0, c1, c0, 0 // Read Control Register
BIC r0, r0, #(1 << 13) // Clear the V bit (bit 13)
MCR p15, 0, r0, c1, c0, 0 // Write Control Register
ISB
BX lr
// ------------------------------------------------------------
// Context ID
// ------------------------------------------------------------
.global getContextID
.type getContextID,function
// uint32_t getContextIDd(void)//
getContextID:
MRC p15, 0, r0, c13, c0, 1 // Read Context ID Register
BX lr
.global setContextID
.type setContextID,function
// void setContextID(uint32_t)//
setContextID:
MCR p15, 0, r0, c13, c0, 1 // Write Context ID Register
BX lr
// ------------------------------------------------------------
// ID registers
// ------------------------------------------------------------
.global getMIDR
.type getMIDR,function
// uint32_t getMIDR(void)//
getMIDR:
MRC p15, 0, r0, c0, c0, 0 // Read Main ID Register (MIDR)
BX lr
.global getMPIDR
.type getMPIDR,function
// uint32_t getMPIDR(void)//
getMPIDR:
MRC p15, 0, r0, c0 ,c0, 5// Read Multiprocessor ID register (MPIDR)
BX lr
// ------------------------------------------------------------
// CP15 SMP related
// ------------------------------------------------------------
.global getBaseAddr
.type getBaseAddr,function
// uint32_t getBaseAddr(void)
// Returns the value CBAR (base address of the private peripheral memory space)
getBaseAddr:
MRC p15, 4, r0, c15, c0, 0 // Read peripheral base address
BX lr
// ------------------------------------------------------------
.global getCPUID
.type getCPUID,function
// uint32_t getCPUID(void)
// Returns the CPU ID (0 to 3) of the CPU executed on
getCPUID:
MRC p15, 0, r0, c0, c0, 5 // Read CPU ID register
AND r0, r0, #0x03 // Mask off, leaving the CPU ID field
BX lr
// ------------------------------------------------------------
.global goToSleep
.type goToSleep,function
// void goToSleep(void)
goToSleep:
DSB // Clear all pending data accesses
WFI // Go into standby
B goToSleep // Catch in case of rogue events
BX lr
// ------------------------------------------------------------
.global joinSMP
.type joinSMP,function
// void joinSMP(void)
// Sets the ACTRL.SMP bit
joinSMP:
// SMP status is controlled by bit 6 of the CP15 Aux Ctrl Reg
MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
MOV r1, r0
ORR r0, r0, #0x040 // Set bit 6
CMP r0, r1
MCRNE p15, 0, r0, c1, c0, 1 // Write ACTLR
ISB
BX lr
// ------------------------------------------------------------
.global leaveSMP
.type leaveSMP,function
// void leaveSMP(void)
// Clear the ACTRL.SMP bit
leaveSMP:
// SMP status is controlled by bit 6 of the CP15 Aux Ctrl Reg
MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
BIC r0, r0, #0x040 // Clear bit 6
MCR p15, 0, r0, c1, c0, 1 // Write ACTLR
ISB
BX lr
// ------------------------------------------------------------
// End of v7.s
// ------------------------------------------------------------

View File

@ -79,6 +79,14 @@ _mainCRTStartup:
#endif
#endif
.global _fini
.type _fini,function
_fini:
#ifdef __THUMB_INTERWORK
BX lr // Return to caller
#else
MOV pc, lr // Return to caller
#endif
/* Workspace for Angel calls. */
.data

View File

@ -109,7 +109,7 @@ SECTIONS
.eh_frame_hdr : { *(.eh_frame_hdr) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
. = ALIGN(256) + (. & (256 - 1));
. = 0x2E000000;
.data :
{
*(.data)

View File

@ -0,0 +1,155 @@
// ------------------------------------------------------------
// v7-A Cache, TLB and Branch Prediction Maintenance Operations
// Header File
//
// Copyright (c) 2011-2016 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
#ifndef _ARMV7A_GENERIC_H
#define _ARMV7A_GENERIC_H
// ------------------------------------------------------------
// Memory barrier mnemonics
enum MemBarOpt {
RESERVED_0 = 0, RESERVED_1 = 1, OSHST = 2, OSH = 3,
RESERVED_4 = 4, RESERVED_5 = 5, NSHST = 6, NSH = 7,
RESERVED_8 = 8, RESERVED_9 = 9, ISHST = 10, ISH = 11,
RESERVED_12 = 12, RESERVED_13 = 13, ST = 14, SY = 15
};
//
// Note:
// *_IS() stands for "inner shareable"
// DO NOT USE THESE FUNCTIONS ON A CORTEX-A8
//
// ------------------------------------------------------------
// Interrupts
// Enable/disables IRQs (not FIQs)
void enableInterrupts(void);
void disableInterrupts(void);
// ------------------------------------------------------------
// Caches
void invalidateCaches_IS(void);
void cleanInvalidateDCache(void);
void invalidateCaches_IS(void);
void enableCaches(void);
void disableCaches(void);
void invalidateCaches(void);
void cleanDCache(void);
// ------------------------------------------------------------
// TLBs
void invalidateUnifiedTLB(void);
void invalidateUnifiedTLB_IS(void);
// ------------------------------------------------------------
// Branch prediction
void flushBranchTargetCache(void);
void flushBranchTargetCache_IS(void);
// ------------------------------------------------------------
// High Vecs
void enableHighVecs(void);
void disableHighVecs(void);
// ------------------------------------------------------------
// ID Registers
unsigned int getMIDR(void);
#define MIDR_IMPL_SHIFT 24
#define MIDR_IMPL_MASK 0xFF
#define MIDR_VAR_SHIFT 20
#define MIDR_VAR_MASK 0xF
#define MIDR_ARCH_SHIFT 16
#define MIDR_ARCH_MASK 0xF
#define MIDR_PART_SHIFT 4
#define MIDR_PART_MASK 0xFFF
#define MIDR_REV_SHIFT 0
#define MIDR_REV_MASK 0xF
// tmp = get_MIDR();
// implementor = (tmp >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
// variant = (tmp >> MIDR_VAR_SHIFT) & MIDR_VAR_MASK;
// architecture= (tmp >> MIDR_ARCH_SHIFT) & MIDR_ARCH_MASK;
// part_number = (tmp >> MIDR_PART_SHIFT) & MIDR_PART_MASK;
// revision = tmp & MIDR_REV_MASK;
#define MIDR_PART_CA5 0xC05
#define MIDR_PART_CA8 0xC08
#define MIDR_PART_CA9 0xC09
unsigned int getMPIDR(void);
#define MPIDR_FORMAT_SHIFT 31
#define MPIDR_FORMAT_MASK 0x1
#define MPIDR_UBIT_SHIFT 30
#define MPIDR_UBIT_MASK 0x1
#define MPIDR_CLUSTER_SHIFT 7
#define MPIDR_CLUSTER_MASK 0xF
#define MPIDR_CPUID_SHIFT 0
#define MPIDR_CPUID_MASK 0x3
#define MPIDR_CPUID_CPU0 0x0
#define MPIDR_CPUID_CPU1 0x1
#define MPIDR_CPUID_CPU2 0x2
#define MPIDR_CPUID_CPU3 0x3
#define MPIDR_UNIPROCESSPR 0x1
#define MPDIR_NEW_FORMAT 0x1
// ------------------------------------------------------------
// Context ID
unsigned int getContextID(void);
void setContextID(unsigned int);
#define CONTEXTID_ASID_SHIFT 0
#define CONTEXTID_ASID_MASK 0xFF
#define CONTEXTID_PROCID_SHIFT 8
#define CONTEXTID_PROCID_MASK 0x00FFFFFF
// tmp = getContextID();
// ASID = tmp & CONTEXTID_ASID_MASK;
// PROCID = (tmp >> CONTEXTID_PROCID_SHIFT) & CONTEXTID_PROCID_MASK;
// ------------------------------------------------------------
// SMP related for Armv7-A MPCore processors
//
// DO NOT CALL THESE FUNCTIONS ON A CORTEX-A8
// Returns the base address of the private peripheral memory space
unsigned int getBaseAddr(void);
// Returns the CPU ID (0 to 3) of the CPU executed on
#define MP_CPU0 (0)
#define MP_CPU1 (1)
#define MP_CPU2 (2)
#define MP_CPU3 (3)
unsigned int getCPUID(void);
// Set this core as participating in SMP
void joinSMP(void);
// Set this core as NOT participating in SMP
void leaveSMP(void);
// Go to sleep, never returns
void goToSleep(void);
#endif
// ------------------------------------------------------------
// End of v7.h
// ------------------------------------------------------------

View File

@ -0,0 +1,476 @@
// ------------------------------------------------------------
// v7-A Cache and Branch Prediction Maintenance Operations
//
// Copyright (c) 2011-2018 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
.arm
// ------------------------------------------------------------
// Interrupt enable/disable
// ------------------------------------------------------------
// Could use intrinsic instead of these
.global enableInterrupts
.type enableInterrupts,function
// void enableInterrupts(void)//
enableInterrupts:
CPSIE i
BX lr
.global disableInterrupts
.type disableInterrupts,function
// void disableInterrupts(void)//
disableInterrupts:
CPSID i
BX lr
// ------------------------------------------------------------
// Cache Maintenance
// ------------------------------------------------------------
.global enableCaches
.type enableCaches,function
// void enableCaches(void)//
enableCaches:
MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
ORR r0, r0, #(1 << 2) // Set C bit
ORR r0, r0, #(1 << 12) // Set I bit
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
BX lr
.global disableCaches
.type disableCaches,function
// void disableCaches(void)
disableCaches:
MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
BIC r0, r0, #(1 << 2) // Clear C bit
BIC r0, r0, #(1 << 12) // Clear I bit
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
BX lr
.global cleanDCache
.type cleanDCache,function
// void cleanDCache(void)//
cleanDCache:
PUSH {r4-r12}
//
// Based on code example given in section 11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ clean_dcache_finished
MOV r10, #0
clean_dcache_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT clean_dcache_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
clean_dcache_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
clean_dcache_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c10, 2 // DCCSW - clean by set/way
SUBS r9, r9, #1 // decrement the way number
BGE clean_dcache_loop3
SUBS r7, r7, #1 // decrement the index
BGE clean_dcache_loop2
clean_dcache_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT clean_dcache_loop1
clean_dcache_finished:
POP {r4-r12}
BX lr
.global cleanInvalidateDCache
.type cleanInvalidateDCache,function
// void cleanInvalidateDCache(void)//
cleanInvalidateDCache:
PUSH {r4-r12}
//
// Based on code example given in section 11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ clean_invalidate_dcache_finished
MOV r10, #0
clean_invalidate_dcache_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT clean_invalidate_dcache_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
clean_invalidate_dcache_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
clean_invalidate_dcache_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c14, 2 // DCCISW - clean and invalidate by set/way
SUBS r9, r9, #1 // decrement the way number
BGE clean_invalidate_dcache_loop3
SUBS r7, r7, #1 // decrement the index
BGE clean_invalidate_dcache_loop2
clean_invalidate_dcache_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT clean_invalidate_dcache_loop1
clean_invalidate_dcache_finished:
POP {r4-r12}
BX lr
.global invalidateCaches
.type invalidateCaches,function
// void invalidateCaches(void)//
invalidateCaches:
PUSH {r4-r12}
//
// Based on code example given in section B2.2.4/11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MOV r0, #0
MCR p15, 0, r0, c7, c5, 0 // ICIALLU - Invalidate entire I Cache, and flushes branch target cache
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ invalidate_caches_finished
MOV r10, #0
invalidate_caches_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT invalidate_caches_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
invalidate_caches_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
invalidate_caches_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c6, 2 // DCISW - invalidate by set/way
SUBS r9, r9, #1 // decrement the way number
BGE invalidate_caches_loop3
SUBS r7, r7, #1 // decrement the index
BGE invalidate_caches_loop2
invalidate_caches_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT invalidate_caches_loop1
invalidate_caches_finished:
POP {r4-r12}
BX lr
.global invalidateCaches_IS
.type invalidateCaches_IS,function
// void invalidateCaches_IS(void)//
invalidateCaches_IS:
PUSH {r4-r12}
MOV r0, #0
MCR p15, 0, r0, c7, c1, 0 // ICIALLUIS - Invalidate entire I Cache inner shareable
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ invalidate_caches_is_finished
MOV r10, #0
invalidate_caches_is_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT invalidate_caches_is_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
invalidate_caches_is_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
invalidate_caches_is_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c6, 2 // DCISW - clean by set/way
SUBS r9, r9, #1 // decrement the way number
BGE invalidate_caches_is_loop3
SUBS r7, r7, #1 // decrement the index
BGE invalidate_caches_is_loop2
invalidate_caches_is_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT invalidate_caches_is_loop1
invalidate_caches_is_finished:
POP {r4-r12}
BX lr
// ------------------------------------------------------------
// TLB
// ------------------------------------------------------------
.global invalidateUnifiedTLB
.type invalidateUnifiedTLB,function
// void invalidateUnifiedTLB(void)//
invalidateUnifiedTLB:
MOV r0, #0
MCR p15, 0, r0, c8, c7, 0 // TLBIALL - Invalidate entire unified TLB
BX lr
.global invalidateUnifiedTLB_IS
.type invalidateUnifiedTLB_IS,function
// void invalidateUnifiedTLB_IS(void)//
invalidateUnifiedTLB_IS:
MOV r0, #1
MCR p15, 0, r0, c8, c3, 0 // TLBIALLIS - Invalidate entire unified TLB Inner Shareable
BX lr
// ------------------------------------------------------------
// Branch Prediction
// ------------------------------------------------------------
.global flushBranchTargetCache
.type flushBranchTargetCache,function
// void flushBranchTargetCache(void)
flushBranchTargetCache:
MOV r0, #0
MCR p15, 0, r0, c7, c5, 6 // BPIALL - Invalidate entire branch predictor array
BX lr
.global flushBranchTargetCache_IS
.type flushBranchTargetCache_IS,function
// void flushBranchTargetCache_IS(void)
flushBranchTargetCache_IS:
MOV r0, #0
MCR p15, 0, r0, c7, c1, 6 // BPIALLIS - Invalidate entire branch predictor array Inner Shareable
BX lr
// ------------------------------------------------------------
// High Vecs
// ------------------------------------------------------------
.global enableHighVecs
.type enableHighVecs,function
// void enableHighVecs(void)//
enableHighVecs:
MRC p15, 0, r0, c1, c0, 0 // Read Control Register
ORR r0, r0, #(1 << 13) // Set the V bit (bit 13)
MCR p15, 0, r0, c1, c0, 0 // Write Control Register
ISB
BX lr
.global disableHighVecs
.type disableHighVecs,function
// void disable_highvecs(void)//
disableHighVecs:
MRC p15, 0, r0, c1, c0, 0 // Read Control Register
BIC r0, r0, #(1 << 13) // Clear the V bit (bit 13)
MCR p15, 0, r0, c1, c0, 0 // Write Control Register
ISB
BX lr
// ------------------------------------------------------------
// Context ID
// ------------------------------------------------------------
.global getContextID
.type getContextID,function
// uint32_t getContextIDd(void)//
getContextID:
MRC p15, 0, r0, c13, c0, 1 // Read Context ID Register
BX lr
.global setContextID
.type setContextID,function
// void setContextID(uint32_t)//
setContextID:
MCR p15, 0, r0, c13, c0, 1 // Write Context ID Register
BX lr
// ------------------------------------------------------------
// ID registers
// ------------------------------------------------------------
.global getMIDR
.type getMIDR,function
// uint32_t getMIDR(void)//
getMIDR:
MRC p15, 0, r0, c0, c0, 0 // Read Main ID Register (MIDR)
BX lr
.global getMPIDR
.type getMPIDR,function
// uint32_t getMPIDR(void)//
getMPIDR:
MRC p15, 0, r0, c0 ,c0, 5// Read Multiprocessor ID register (MPIDR)
BX lr
// ------------------------------------------------------------
// CP15 SMP related
// ------------------------------------------------------------
.global getBaseAddr
.type getBaseAddr,function
// uint32_t getBaseAddr(void)
// Returns the value CBAR (base address of the private peripheral memory space)
getBaseAddr:
MRC p15, 4, r0, c15, c0, 0 // Read peripheral base address
BX lr
// ------------------------------------------------------------
.global getCPUID
.type getCPUID,function
// uint32_t getCPUID(void)
// Returns the CPU ID (0 to 3) of the CPU executed on
getCPUID:
MRC p15, 0, r0, c0, c0, 5 // Read CPU ID register
AND r0, r0, #0x03 // Mask off, leaving the CPU ID field
BX lr
// ------------------------------------------------------------
.global goToSleep
.type goToSleep,function
// void goToSleep(void)
goToSleep:
DSB // Clear all pending data accesses
WFI // Go into standby
B goToSleep // Catch in case of rogue events
BX lr
// ------------------------------------------------------------
.global joinSMP
.type joinSMP,function
// void joinSMP(void)
// Sets the ACTRL.SMP bit
joinSMP:
// SMP status is controlled by bit 6 of the CP15 Aux Ctrl Reg
MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
MOV r1, r0
ORR r0, r0, #0x040 // Set bit 6
CMP r0, r1
MCRNE p15, 0, r0, c1, c0, 1 // Write ACTLR
ISB
BX lr
// ------------------------------------------------------------
.global leaveSMP
.type leaveSMP,function
// void leaveSMP(void)
// Clear the ACTRL.SMP bit
leaveSMP:
// SMP status is controlled by bit 6 of the CP15 Aux Ctrl Reg
MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
BIC r0, r0, #0x040 // Clear bit 6
MCR p15, 0, r0, c1, c0, 1 // Write ACTLR
ISB
BX lr
// ------------------------------------------------------------
// End of v7.s
// ------------------------------------------------------------

View File

@ -79,6 +79,14 @@ _mainCRTStartup:
#endif
#endif
.global _fini
.type _fini,function
_fini:
#ifdef __THUMB_INTERWORK
BX lr // Return to caller
#else
MOV pc, lr // Return to caller
#endif
/* Workspace for Angel calls. */
.data

View File

@ -109,7 +109,7 @@ SECTIONS
.eh_frame_hdr : { *(.eh_frame_hdr) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
. = ALIGN(256) + (. & (256 - 1));
. = 0x2E000000;
.data :
{
*(.data)

View File

@ -1,311 +0,0 @@
/**************************************************************************/
/* */
/* Copyright (c) Microsoft Corporation. All rights reserved. */
/* */
/* This software is licensed under the Microsoft Software License */
/* Terms for Microsoft Azure RTOS. Full text of the license can be */
/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
/* and in the root directory of this software. */
/* */
/**************************************************************************/
/**************************************************************************/
/**************************************************************************/
/** */
/** ThreadX Component */
/** */
/** Initialize */
/** */
/**************************************************************************/
/**************************************************************************/
#ifdef TX_INCLUDE_USER_DEFINE_FILE
#include "tx_user.h"
#endif
.arm
SVC_MODE = 0xD3 // Disable IRQ/FIQ SVC mode
IRQ_MODE = 0xD2 // Disable IRQ/FIQ IRQ mode
FIQ_MODE = 0xD1 // Disable IRQ/FIQ FIQ mode
SYS_MODE = 0xDF // Disable IRQ/FIQ SYS mode
FIQ_STACK_SIZE = 512 // FIQ stack size
IRQ_STACK_SIZE = 1024 // IRQ stack size
SYS_STACK_SIZE = 1024 // System stack size
.global _tx_thread_system_stack_ptr
.global _tx_initialize_unused_memory
.global _tx_thread_context_save
.global _tx_thread_context_restore
.global _tx_timer_interrupt
.global _end
.global _sp
.global _stack_bottom
/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
applications calling this function from to 16-bit Thumb mode. */
.text
.align 2
.thumb
.global $_tx_initialize_low_level
.type $_tx_initialize_low_level,function
$_tx_initialize_low_level:
BX pc // Switch to 32-bit mode
NOP //
.arm
STMFD sp!, {lr} // Save return address
BL _tx_initialize_low_level // Call _tx_initialize_low_level function
LDMFD sp!, {lr} // Recover saved return address
BX lr // Return to 16-bit caller
.text
.align 2
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_initialize_low_level ARMv7-A */
/* 6.x */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
/* */
/* This function is responsible for any low-level processor */
/* initialization, including setting up interrupt vectors, setting */
/* up a periodic timer interrupt source, saving the system stack */
/* pointer for use in ISR processing later, and finding the first */
/* available RAM memory address for tx_application_define. */
/* */
/* INPUT */
/* */
/* None */
/* */
/* OUTPUT */
/* */
/* None */
/* */
/* CALLS */
/* */
/* None */
/* */
/* CALLED BY */
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 04-25-2022 Zhen Kong Updated comments, */
/* resulting in version 6.1.11 */
/* xx-xx-xxxx Tiejun Zhou Modified comment(s), added */
/* #include tx_user.h, */
/* resulting in version 6.x */
/* */
/**************************************************************************/
.global _tx_initialize_low_level
.type _tx_initialize_low_level,function
_tx_initialize_low_level:
/* We must be in SVC mode at this point! */
/* Setup various stack pointers. */
LDR r1, =_sp // Get pointer to stack area
#ifdef TX_ENABLE_IRQ_NESTING
/* Setup the system mode stack for nested interrupt support */
LDR r2, =SYS_STACK_SIZE // Pickup stack size
MOV r3, #SYS_MODE // Build SYS mode CPSR
MSR CPSR_c, r3 // Enter SYS mode
SUB r1, r1, #1 // Backup 1 byte
BIC r1, r1, #7 // Ensure 8-byte alignment
MOV sp, r1 // Setup SYS stack pointer
SUB r1, r1, r2 // Calculate start of next stack
#endif
LDR r2, =FIQ_STACK_SIZE // Pickup stack size
MOV r0, #FIQ_MODE // Build FIQ mode CPSR
MSR CPSR, r0 // Enter FIQ mode
SUB r1, r1, #1 // Backup 1 byte
BIC r1, r1, #7 // Ensure 8-byte alignment
MOV sp, r1 // Setup FIQ stack pointer
SUB r1, r1, r2 // Calculate start of next stack
LDR r2, =IRQ_STACK_SIZE // Pickup IRQ stack size
MOV r0, #IRQ_MODE // Build IRQ mode CPSR
MSR CPSR, r0 // Enter IRQ mode
SUB r1, r1, #1 // Backup 1 byte
BIC r1, r1, #7 // Ensure 8-byte alignment
MOV sp, r1 // Setup IRQ stack pointer
SUB r3, r1, r2 // Calculate end of IRQ stack
MOV r0, #SVC_MODE // Build SVC mode CPSR
MSR CPSR, r0 // Enter SVC mode
LDR r2, =_stack_bottom // Pickup stack bottom
CMP r3, r2 // Compare the current stack end with the bottom
_stack_error_loop:
BLT _stack_error_loop // If the IRQ stack exceeds the stack bottom, just sit here!
LDR r2, =_tx_thread_system_stack_ptr // Pickup stack pointer
STR r1, [r2] // Save the system stack
LDR r1, =_end // Get end of non-initialized RAM area
LDR r2, =_tx_initialize_unused_memory // Pickup unused memory ptr address
ADD r1, r1, #8 // Increment to next free word
STR r1, [r2] // Save first free memory address
#ifdef __THUMB_INTERWORK
BX lr // Return to caller
#else
MOV pc, lr // Return to caller
#endif
/* Define shells for each of the interrupt vectors. */
.global __tx_undefined
__tx_undefined:
B __tx_undefined // Undefined handler
.global __tx_swi_interrupt
__tx_swi_interrupt:
B __tx_swi_interrupt // Software interrupt handler
.global __tx_prefetch_handler
__tx_prefetch_handler:
B __tx_prefetch_handler // Prefetch exception handler
.global __tx_abort_handler
__tx_abort_handler:
B __tx_abort_handler // Abort exception handler
.global __tx_reserved_handler
__tx_reserved_handler:
B __tx_reserved_handler // Reserved exception handler
.global __tx_irq_handler
.global __tx_irq_processing_return
__tx_irq_handler:
/* Jump to context save to save system context. */
B _tx_thread_context_save
__tx_irq_processing_return:
//
/* At this point execution is still in the IRQ mode. The CPSR, point of
interrupt, and all C scratch registers are available for use. In
addition, IRQ interrupts may be re-enabled - with certain restrictions -
if nested IRQ interrupts are desired. Interrupts may be re-enabled over
small code sequences where lr is saved before enabling interrupts and
restored after interrupts are again disabled. */
/* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
from IRQ mode with interrupts disabled. This routine switches to the
system mode and returns with IRQ interrupts enabled.
NOTE: It is very important to ensure all IRQ interrupts are cleared
prior to enabling nested IRQ interrupts. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_start
#endif
/* For debug purpose, execute the timer interrupt processing here. In
a real system, some kind of status indication would have to be checked
before the timer interrupt handler could be called. */
BL _tx_timer_interrupt // Timer interrupt handler
/* If interrupt nesting was started earlier, the end of interrupt nesting
service must be called before returning to _tx_thread_context_restore.
This routine returns in processing in IRQ mode with interrupts disabled. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_end
#endif
/* Jump to context restore to restore system context. */
B _tx_thread_context_restore
/* This is an example of a vectored IRQ handler. */
/* Save initial context and call context save to prepare for
vectored ISR execution. */
/* At this point execution is still in the IRQ mode. The CPSR, point of
interrupt, and all C scratch registers are available for use. In
addition, IRQ interrupts may be re-enabled - with certain restrictions -
if nested IRQ interrupts are desired. Interrupts may be re-enabled over
small code sequences where lr is saved before enabling interrupts and
restored after interrupts are again disabled. */
/* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
from IRQ mode with interrupts disabled. This routine switches to the
system mode and returns with IRQ interrupts enabled.
NOTE: It is very important to ensure all IRQ interrupts are cleared
prior to enabling nested IRQ interrupts. */
/* Application IRQ handlers can be called here! */
/* If interrupt nesting was started earlier, the end of interrupt nesting
service must be called before returning to _tx_thread_context_restore.
This routine returns in processing in IRQ mode with interrupts disabled. */
#ifdef TX_ENABLE_FIQ_SUPPORT
.global __tx_fiq_handler
.global __tx_fiq_processing_return
__tx_fiq_handler:
/* Jump to fiq context save to save system context. */
B _tx_thread_fiq_context_save
__tx_fiq_processing_return:
/* At this point execution is still in the FIQ mode. The CPSR, point of
interrupt, and all C scratch registers are available for use. */
/* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
from FIQ mode with interrupts disabled. This routine switches to the
system mode and returns with FIQ interrupts enabled.
NOTE: It is very important to ensure all FIQ interrupts are cleared
prior to enabling nested FIQ interrupts. */
#ifdef TX_ENABLE_FIQ_NESTING
BL _tx_thread_fiq_nesting_start
#endif
/* Application FIQ handlers can be called here! */
/* If interrupt nesting was started earlier, the end of interrupt nesting
service must be called before returning to _tx_thread_fiq_context_restore. */
#ifdef TX_ENABLE_FIQ_NESTING
BL _tx_thread_fiq_nesting_end
#endif
/* Jump to fiq context restore to restore system context. */
B _tx_thread_fiq_context_restore
#else
.global __tx_fiq_handler
__tx_fiq_handler:
B __tx_fiq_handler // FIQ interrupt handler
#endif
BUILD_OPTIONS:
.word _tx_build_options // Reference to bring in
VERSION_ID:
.word _tx_version_id // Reference to bring in

View File

@ -0,0 +1,155 @@
// ------------------------------------------------------------
// v7-A Cache, TLB and Branch Prediction Maintenance Operations
// Header File
//
// Copyright (c) 2011-2016 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
#ifndef _ARMV7A_GENERIC_H
#define _ARMV7A_GENERIC_H
// ------------------------------------------------------------
// Memory barrier mnemonics
enum MemBarOpt {
RESERVED_0 = 0, RESERVED_1 = 1, OSHST = 2, OSH = 3,
RESERVED_4 = 4, RESERVED_5 = 5, NSHST = 6, NSH = 7,
RESERVED_8 = 8, RESERVED_9 = 9, ISHST = 10, ISH = 11,
RESERVED_12 = 12, RESERVED_13 = 13, ST = 14, SY = 15
};
//
// Note:
// *_IS() stands for "inner shareable"
// DO NOT USE THESE FUNCTIONS ON A CORTEX-A8
//
// ------------------------------------------------------------
// Interrupts
// Enable/disables IRQs (not FIQs)
void enableInterrupts(void);
void disableInterrupts(void);
// ------------------------------------------------------------
// Caches
void invalidateCaches_IS(void);
void cleanInvalidateDCache(void);
void invalidateCaches_IS(void);
void enableCaches(void);
void disableCaches(void);
void invalidateCaches(void);
void cleanDCache(void);
// ------------------------------------------------------------
// TLBs
void invalidateUnifiedTLB(void);
void invalidateUnifiedTLB_IS(void);
// ------------------------------------------------------------
// Branch prediction
void flushBranchTargetCache(void);
void flushBranchTargetCache_IS(void);
// ------------------------------------------------------------
// High Vecs
void enableHighVecs(void);
void disableHighVecs(void);
// ------------------------------------------------------------
// ID Registers
unsigned int getMIDR(void);
#define MIDR_IMPL_SHIFT 24
#define MIDR_IMPL_MASK 0xFF
#define MIDR_VAR_SHIFT 20
#define MIDR_VAR_MASK 0xF
#define MIDR_ARCH_SHIFT 16
#define MIDR_ARCH_MASK 0xF
#define MIDR_PART_SHIFT 4
#define MIDR_PART_MASK 0xFFF
#define MIDR_REV_SHIFT 0
#define MIDR_REV_MASK 0xF
// tmp = get_MIDR();
// implementor = (tmp >> MIDR_IMPL_SHIFT) & MIDR_IMPL_MASK;
// variant = (tmp >> MIDR_VAR_SHIFT) & MIDR_VAR_MASK;
// architecture= (tmp >> MIDR_ARCH_SHIFT) & MIDR_ARCH_MASK;
// part_number = (tmp >> MIDR_PART_SHIFT) & MIDR_PART_MASK;
// revision = tmp & MIDR_REV_MASK;
#define MIDR_PART_CA5 0xC05
#define MIDR_PART_CA8 0xC08
#define MIDR_PART_CA9 0xC09
unsigned int getMPIDR(void);
#define MPIDR_FORMAT_SHIFT 31
#define MPIDR_FORMAT_MASK 0x1
#define MPIDR_UBIT_SHIFT 30
#define MPIDR_UBIT_MASK 0x1
#define MPIDR_CLUSTER_SHIFT 7
#define MPIDR_CLUSTER_MASK 0xF
#define MPIDR_CPUID_SHIFT 0
#define MPIDR_CPUID_MASK 0x3
#define MPIDR_CPUID_CPU0 0x0
#define MPIDR_CPUID_CPU1 0x1
#define MPIDR_CPUID_CPU2 0x2
#define MPIDR_CPUID_CPU3 0x3
#define MPIDR_UNIPROCESSPR 0x1
#define MPDIR_NEW_FORMAT 0x1
// ------------------------------------------------------------
// Context ID
unsigned int getContextID(void);
void setContextID(unsigned int);
#define CONTEXTID_ASID_SHIFT 0
#define CONTEXTID_ASID_MASK 0xFF
#define CONTEXTID_PROCID_SHIFT 8
#define CONTEXTID_PROCID_MASK 0x00FFFFFF
// tmp = getContextID();
// ASID = tmp & CONTEXTID_ASID_MASK;
// PROCID = (tmp >> CONTEXTID_PROCID_SHIFT) & CONTEXTID_PROCID_MASK;
// ------------------------------------------------------------
// SMP related for Armv7-A MPCore processors
//
// DO NOT CALL THESE FUNCTIONS ON A CORTEX-A8
// Returns the base address of the private peripheral memory space
unsigned int getBaseAddr(void);
// Returns the CPU ID (0 to 3) of the CPU executed on
#define MP_CPU0 (0)
#define MP_CPU1 (1)
#define MP_CPU2 (2)
#define MP_CPU3 (3)
unsigned int getCPUID(void);
// Set this core as participating in SMP
void joinSMP(void);
// Set this core as NOT participating in SMP
void leaveSMP(void);
// Go to sleep, never returns
void goToSleep(void);
#endif
// ------------------------------------------------------------
// End of v7.h
// ------------------------------------------------------------

View File

@ -0,0 +1,476 @@
// ------------------------------------------------------------
// v7-A Cache and Branch Prediction Maintenance Operations
//
// Copyright (c) 2011-2018 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
// ------------------------------------------------------------
.arm
// ------------------------------------------------------------
// Interrupt enable/disable
// ------------------------------------------------------------
// Could use intrinsic instead of these
.global enableInterrupts
.type enableInterrupts,function
// void enableInterrupts(void)//
enableInterrupts:
CPSIE i
BX lr
.global disableInterrupts
.type disableInterrupts,function
// void disableInterrupts(void)//
disableInterrupts:
CPSID i
BX lr
// ------------------------------------------------------------
// Cache Maintenance
// ------------------------------------------------------------
.global enableCaches
.type enableCaches,function
// void enableCaches(void)//
enableCaches:
MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
ORR r0, r0, #(1 << 2) // Set C bit
ORR r0, r0, #(1 << 12) // Set I bit
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
BX lr
.global disableCaches
.type disableCaches,function
// void disableCaches(void)
disableCaches:
MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
BIC r0, r0, #(1 << 2) // Clear C bit
BIC r0, r0, #(1 << 12) // Clear I bit
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
BX lr
.global cleanDCache
.type cleanDCache,function
// void cleanDCache(void)//
cleanDCache:
PUSH {r4-r12}
//
// Based on code example given in section 11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ clean_dcache_finished
MOV r10, #0
clean_dcache_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT clean_dcache_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
clean_dcache_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
clean_dcache_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c10, 2 // DCCSW - clean by set/way
SUBS r9, r9, #1 // decrement the way number
BGE clean_dcache_loop3
SUBS r7, r7, #1 // decrement the index
BGE clean_dcache_loop2
clean_dcache_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT clean_dcache_loop1
clean_dcache_finished:
POP {r4-r12}
BX lr
.global cleanInvalidateDCache
.type cleanInvalidateDCache,function
// void cleanInvalidateDCache(void)//
cleanInvalidateDCache:
PUSH {r4-r12}
//
// Based on code example given in section 11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ clean_invalidate_dcache_finished
MOV r10, #0
clean_invalidate_dcache_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT clean_invalidate_dcache_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
clean_invalidate_dcache_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
clean_invalidate_dcache_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c14, 2 // DCCISW - clean and invalidate by set/way
SUBS r9, r9, #1 // decrement the way number
BGE clean_invalidate_dcache_loop3
SUBS r7, r7, #1 // decrement the index
BGE clean_invalidate_dcache_loop2
clean_invalidate_dcache_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT clean_invalidate_dcache_loop1
clean_invalidate_dcache_finished:
POP {r4-r12}
BX lr
.global invalidateCaches
.type invalidateCaches,function
// void invalidateCaches(void)//
invalidateCaches:
PUSH {r4-r12}
//
// Based on code example given in section B2.2.4/11.2.4 of Armv7-A/R Architecture Reference Manual (DDI 0406B)
//
MOV r0, #0
MCR p15, 0, r0, c7, c5, 0 // ICIALLU - Invalidate entire I Cache, and flushes branch target cache
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ invalidate_caches_finished
MOV r10, #0
invalidate_caches_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT invalidate_caches_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
invalidate_caches_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
invalidate_caches_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c6, 2 // DCISW - invalidate by set/way
SUBS r9, r9, #1 // decrement the way number
BGE invalidate_caches_loop3
SUBS r7, r7, #1 // decrement the index
BGE invalidate_caches_loop2
invalidate_caches_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT invalidate_caches_loop1
invalidate_caches_finished:
POP {r4-r12}
BX lr
.global invalidateCaches_IS
.type invalidateCaches_IS,function
// void invalidateCaches_IS(void)//
invalidateCaches_IS:
PUSH {r4-r12}
MOV r0, #0
MCR p15, 0, r0, c7, c1, 0 // ICIALLUIS - Invalidate entire I Cache inner shareable
MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
ANDS r3, r0, #0x7000000
MOV r3, r3, LSR #23 // Cache level value (naturally aligned)
BEQ invalidate_caches_is_finished
MOV r10, #0
invalidate_caches_is_loop1:
ADD r2, r10, r10, LSR #1 // Work out 3xcachelevel
MOV r1, r0, LSR r2 // bottom 3 bits are the Cache type for this level
AND r1, r1, #7 // get those 3 bits alone
CMP r1, #2
BLT invalidate_caches_is_skip // no cache or only instruction cache at this level
MCR p15, 2, r10, c0, c0, 0 // write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, r1, c0, c0, 0 // reads current Cache Size ID register
AND r2, r1, #7 // extract the line length field
ADD r2, r2, #4 // add 4 for the line length offset (log2 16 bytes)
LDR r4, =0x3FF
ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ r5, r4 // R5 is the bit position of the way size increment
LDR r7, =0x00007FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
invalidate_caches_is_loop2:
MOV r9, R4 // R9 working copy of the max way size (right aligned)
invalidate_caches_is_loop3:
ORR r11, r10, r9, LSL r5 // factor in the way number and cache number into R11
ORR r11, r11, r7, LSL r2 // factor in the index number
MCR p15, 0, r11, c7, c6, 2 // DCISW - clean by set/way
SUBS r9, r9, #1 // decrement the way number
BGE invalidate_caches_is_loop3
SUBS r7, r7, #1 // decrement the index
BGE invalidate_caches_is_loop2
invalidate_caches_is_skip:
ADD r10, r10, #2 // increment the cache number
CMP r3, r10
BGT invalidate_caches_is_loop1
invalidate_caches_is_finished:
POP {r4-r12}
BX lr
// ------------------------------------------------------------
// TLB
// ------------------------------------------------------------
.global invalidateUnifiedTLB
.type invalidateUnifiedTLB,function
// void invalidateUnifiedTLB(void)//
invalidateUnifiedTLB:
MOV r0, #0
MCR p15, 0, r0, c8, c7, 0 // TLBIALL - Invalidate entire unified TLB
BX lr
.global invalidateUnifiedTLB_IS
.type invalidateUnifiedTLB_IS,function
// void invalidateUnifiedTLB_IS(void)//
invalidateUnifiedTLB_IS:
MOV r0, #1
MCR p15, 0, r0, c8, c3, 0 // TLBIALLIS - Invalidate entire unified TLB Inner Shareable
BX lr
// ------------------------------------------------------------
// Branch Prediction
// ------------------------------------------------------------
.global flushBranchTargetCache
.type flushBranchTargetCache,function
// void flushBranchTargetCache(void)
flushBranchTargetCache:
MOV r0, #0
MCR p15, 0, r0, c7, c5, 6 // BPIALL - Invalidate entire branch predictor array
BX lr
.global flushBranchTargetCache_IS
.type flushBranchTargetCache_IS,function
// void flushBranchTargetCache_IS(void)
flushBranchTargetCache_IS:
MOV r0, #0
MCR p15, 0, r0, c7, c1, 6 // BPIALLIS - Invalidate entire branch predictor array Inner Shareable
BX lr
// ------------------------------------------------------------
// High Vecs
// ------------------------------------------------------------
.global enableHighVecs
.type enableHighVecs,function
// void enableHighVecs(void)//
enableHighVecs:
MRC p15, 0, r0, c1, c0, 0 // Read Control Register
ORR r0, r0, #(1 << 13) // Set the V bit (bit 13)
MCR p15, 0, r0, c1, c0, 0 // Write Control Register
ISB
BX lr
.global disableHighVecs
.type disableHighVecs,function
// void disable_highvecs(void)//
disableHighVecs:
MRC p15, 0, r0, c1, c0, 0 // Read Control Register
BIC r0, r0, #(1 << 13) // Clear the V bit (bit 13)
MCR p15, 0, r0, c1, c0, 0 // Write Control Register
ISB
BX lr
// ------------------------------------------------------------
// Context ID
// ------------------------------------------------------------
.global getContextID
.type getContextID,function
// uint32_t getContextIDd(void)//
getContextID:
MRC p15, 0, r0, c13, c0, 1 // Read Context ID Register
BX lr
.global setContextID
.type setContextID,function
// void setContextID(uint32_t)//
setContextID:
MCR p15, 0, r0, c13, c0, 1 // Write Context ID Register
BX lr
// ------------------------------------------------------------
// ID registers
// ------------------------------------------------------------
.global getMIDR
.type getMIDR,function
// uint32_t getMIDR(void)//
getMIDR:
MRC p15, 0, r0, c0, c0, 0 // Read Main ID Register (MIDR)
BX lr
.global getMPIDR
.type getMPIDR,function
// uint32_t getMPIDR(void)//
getMPIDR:
MRC p15, 0, r0, c0 ,c0, 5// Read Multiprocessor ID register (MPIDR)
BX lr
// ------------------------------------------------------------
// CP15 SMP related
// ------------------------------------------------------------
.global getBaseAddr
.type getBaseAddr,function
// uint32_t getBaseAddr(void)
// Returns the value CBAR (base address of the private peripheral memory space)
getBaseAddr:
MRC p15, 4, r0, c15, c0, 0 // Read peripheral base address
BX lr
// ------------------------------------------------------------
.global getCPUID
.type getCPUID,function
// uint32_t getCPUID(void)
// Returns the CPU ID (0 to 3) of the CPU executed on
getCPUID:
MRC p15, 0, r0, c0, c0, 5 // Read CPU ID register
AND r0, r0, #0x03 // Mask off, leaving the CPU ID field
BX lr
// ------------------------------------------------------------
.global goToSleep
.type goToSleep,function
// void goToSleep(void)
goToSleep:
DSB // Clear all pending data accesses
WFI // Go into standby
B goToSleep // Catch in case of rogue events
BX lr
// ------------------------------------------------------------
.global joinSMP
.type joinSMP,function
// void joinSMP(void)
// Sets the ACTRL.SMP bit
joinSMP:
// SMP status is controlled by bit 6 of the CP15 Aux Ctrl Reg
MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
MOV r1, r0
ORR r0, r0, #0x040 // Set bit 6
CMP r0, r1
MCRNE p15, 0, r0, c1, c0, 1 // Write ACTLR
ISB
BX lr
// ------------------------------------------------------------
.global leaveSMP
.type leaveSMP,function
// void leaveSMP(void)
// Clear the ACTRL.SMP bit
leaveSMP:
// SMP status is controlled by bit 6 of the CP15 Aux Ctrl Reg
MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
BIC r0, r0, #0x040 // Clear bit 6
MCR p15, 0, r0, c1, c0, 1 // Write ACTLR
ISB
BX lr
// ------------------------------------------------------------
// End of v7.s
// ------------------------------------------------------------

View File

@ -1,18 +1,18 @@
>output thread_0_counter
2913840557
10
>output thread_1_counter
2913840557
299267
>output thread_2_counter
2913840557
299268
>output thread_3_counter
2913840557
23
>output thread_4_counter
2913840557
23
>output thread_5_counter
2913840557
9
>output thread_6_counter
2913840557
23
>output thread_7_counter
2913840557
23
>log file
Stopped duplicating logging output