FOC: Enable caches

This commit is contained in:
Sebastian Sumpf
2013-02-14 14:33:20 +01:00
parent a5c603b666
commit dad53fb148
4 changed files with 53 additions and 32 deletions

View File

@@ -14,8 +14,8 @@ Platform_control::boot_ap_cpus(Address phys_tramp_mp_addr)
{
// Write start address to iRam base (0x2020000). This is checked by the app
// cpus wihtin an wfe (wait-for event) loop.
printf("START CPUs\n");
Io::write<Mword>(phys_tramp_mp_addr, Kmem::Devices5_map_base + 0x20000);
Io::write<Mword>(phys_tramp_mp_addr, Kmem::Devices5_map_base + 0x20000);
// wake-up cpus
asm volatile("dsb; sev" : : : "memory");
}

View File

@@ -46,10 +46,10 @@ void Timer::init(unsigned)
{
unsigned cpu_id = Proc::cpu_id();
if (!Cpu::boot_cpu()->phys_id() == cpu_id)
if (Cpu::boot_cpu()->phys_id() == cpu_id)
{
// prescaler to one
Io::write<Mword>(0x1, CFG0);
Io::write<Mword>(0x101, CFG0);
// divider to 1
Io::write<Mword>(0x0, CFG1);
}

View File

@@ -45,7 +45,7 @@ public:
enum
{
Cache_enabled = false,
Cache_enabled = true,
};
static const char char_micro;

View File

@@ -5,35 +5,56 @@
.p2align 12
#ifdef CONFIG_ARM_V7
/* See cache_func_gen.cpp */
.global v7_invalidate_l1
invalidate_l1_v7:
/* Directly taken from the ARMV7 manual section 'Performing cache
maintenance operations' */
.global v7_invalidate_dcache
v7_invalidate_dcache:
mrc p15, 1, r0, c0, c0, 1 @Read CLIDR into R0
ands r3, r0, #0x07000000
mov r3, r3, lsr #23 @ Cache level value (naturally aligned)
beq finished
mov r10, #0
loop1:
add r2, r10, r10, lsr #1 @ Work out 3 x cachelevel
mov r1, r0, lsr r2 @ bottom 3 bits are the Cache type for this level
and r1, r1, #7 @ get those 3 bits alone
cmp r1, #2
blt skip @ no cache or only instruction cache at this level
mcr p15, 2, r10, c0, c0, 0 @ write csselr from r10
isb @ ISB to sync the change to the CCSIDR
mrc p15, 1, r1, c0, c0, 0 @ read current CCSIDR to R1
and r2, r1, #7 @ extract the line length field
add r2, r2, #4 @ add 4 for the line length offset (log2 16 bytes)
ldr r4, =0x3ff
ands r4, r4, r1, lsr #3 @ R4 is the max number on the way size (right aligned)
clz r5, r4 @ R5 is the bit position of the way size increment
mov r9, r4 @ R9 working copy of the max way size (right aligned)
loop2:
ldr r7, =0x00007fff
ands r7, r7, r1, lsr #13 @ R7 is the max number of the index size (right aligned)
loop3:
orr r11, r10, r9, lsl r5 @ factor in the way number and cache number into R11
orr r11, r11, r7, lsl r2 @ factor in the index number
mcr p15, 0, r11, c7, c14, 2 @ dccsw, clean/invalidate by set/way
subs r7, r7, #1 @ decrement the index
bge loop3
subs r9, r9, #1 @ decrement the way number
bge loop2
skip:
add r10, r10, #2 @ increment the cache number
cmp r3, r10
bgt loop1
mov r3, #0
mcr p15, 2, r3, c0, c0, 0
mrc p15, 1, r2, c0, c0, 0
mov r3, r2, lsr #3
mov r0, r2, lsr #13
mov r3, r3, asl #22
mov r3, r3, lsr #22
and r2, r2, #7
mov r0, r0, asl #17
add ip, r2, #4
mov r0, r0, lsr #17
mov r2, r3
clz r3, r3
.L3:
mov r4, r2, asl r3
mov r1, r0
.L2:
orr r5, r4, r1, asl ip
mcr p15, 0, r5, c7, c6, 2
subs r1, r1, #1
bcs .L2
subs r2, r2, #1
bcs .L3
isb
dsb
mov pc, lr
isb
finished:
mov pc, lr
#endif
.global _tramp_mp_entry
@@ -53,7 +74,7 @@ _tramp_mp_entry:
#endif
#ifdef CONFIG_ARM_V7
bl invalidate_l1_v7
bl v7_invalidate_dcache
#endif
mcr p15, 0, r0, c7, c5, 0 // ICIALLU