bcm: fix /dev/reboot text/data corruption (thanks richard miller)

- clean dcache before turning off caches and mmu (rebootcode.s)
- use WFE and inter-core mailboxes for cpu startup (rebootcode.s)
- disable SMP during dcache invalidation before enabling caches and mmu (in armv7.s)
This commit is contained in:
cinap_lenrek 2018-10-31 19:48:16 +01:00
parent 913be4e74a
commit 5608be398e
5 changed files with 103 additions and 86 deletions

View file

@ -145,7 +145,6 @@ getncpus(void)
{ {
int n, max; int n, max;
char *p; char *p;
n = 4; n = 4;
if(n > MAXMACH) if(n > MAXMACH)
n = MAXMACH; n = MAXMACH;

View file

@ -46,34 +46,34 @@ TEXT armstart(SB), 1, $-4
BARRIERS BARRIERS
/* /*
* turn SMP on * turn SMP off
* invalidate tlb
*/ */
MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
ORR $CpACsmp, R1 /* turn SMP on */ BIC $CpACsmp, R1
MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
BARRIERS BARRIERS
MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
BARRIERS
/* /*
* clear mach and page tables * clear mach and page tables
*/ */
MOVW $PADDR(MACHADDR), R1 MOVW $PADDR(MACHADDR), R1
MOVW $PADDR(KTZERO), R2 MOVW $PADDR(KTZERO), R2
MOVW $0, R0
_ramZ: _ramZ:
MOVW R0, (R1) MOVW R0, (R1)
ADD $4, R1 ADD $4, R1
CMP R1, R2 CMP R1, R2
BNE _ramZ BNE _ramZ
/* /*
* start stack at top of mach (physical addr) * start stack at top of mach (physical addr)
* set up page tables for kernel * set up page tables for kernel
*/ */
MOVW $PADDR(MACHADDR+MACHSIZE-4), R13 MOVW $PADDR(MACHADDR+MACHSIZE-4), R13
MOVW $PADDR(L1), R0 MOVW $PADDR(L1), R0
BL mmuinit(SB) BL mmuinit(SB)
BL mmuinvalidate(SB)
/* /*
* set up domain access control and page table base * set up domain access control and page table base
@ -93,6 +93,14 @@ _ramZ:
BL l2cacheuinv(SB) BL l2cacheuinv(SB)
BARRIERS BARRIERS
/*
* turn SMP on
*/
MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
ORR $CpACsmp, R1
MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
BARRIERS
/* /*
* enable caches, mmu, and high vectors * enable caches, mmu, and high vectors
*/ */
@ -133,12 +141,10 @@ TEXT cpureset(SB), 1, $-4
reset: reset:
/* /*
* load physical base for SB addressing while mmu is off * load physical base for SB addressing while mmu is off
* keep a handy zero in R0 until first function call
*/ */
MOVW $setR12(SB), R12 MOVW $setR12(SB), R12
SUB $KZERO, R12 SUB $KZERO, R12
ADD $PHYSDRAM, R12 ADD $PHYSDRAM, R12
MOVW $0, R0
/* /*
* SVC mode, interrupts disabled * SVC mode, interrupts disabled
@ -156,15 +162,12 @@ reset:
BARRIERS BARRIERS
/* /*
* turn SMP on * turn SMP off
* invalidate tlb
*/ */
MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
ORR $CpACsmp, R1 /* turn SMP on */ BIC $CpACsmp, R1
MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
BARRIERS BARRIERS
MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
BARRIERS
/* /*
* find Mach for this cpu * find Mach for this cpu
@ -173,6 +176,8 @@ reset:
AND $(MAXMACH-1), R2 /* mask out non-cpu-id bits */ AND $(MAXMACH-1), R2 /* mask out non-cpu-id bits */
SLL $2, R2 /* convert to word index */ SLL $2, R2 /* convert to word index */
MOVW $machaddr(SB), R0 MOVW $machaddr(SB), R0
BIC $KSEGM, R0
ORR $PHYSDRAM, R0
ADD R2, R0 /* R0 = &machaddr[cpuid] */ ADD R2, R0 /* R0 = &machaddr[cpuid] */
MOVW (R0), R0 /* R0 = machaddr[cpuid] */ MOVW (R0), R0 /* R0 = machaddr[cpuid] */
CMP $0, R0 CMP $0, R0
@ -184,6 +189,8 @@ reset:
*/ */
ADD $(MACHSIZE-4), R(MACH), R13 ADD $(MACHSIZE-4), R(MACH), R13
BL mmuinvalidate(SB)
/* /*
* set up domain access control and page table base * set up domain access control and page table base
*/ */
@ -202,6 +209,14 @@ reset:
BL cacheiinv(SB) BL cacheiinv(SB)
BARRIERS BARRIERS
/*
* turn SMP on
*/
MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
ORR $CpACsmp, R1
MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
BARRIERS
/* /*
* enable caches, mmu, and high vectors * enable caches, mmu, and high vectors
*/ */

View file

@ -242,7 +242,7 @@ launchinit(void)
} }
cachedwbse(machaddr, sizeof machaddr); cachedwbse(machaddr, sizeof machaddr);
if((mach = startcpus(conf.nmach)) < conf.nmach) if((mach = startcpus(conf.nmach)) < conf.nmach)
print("only %d cpu%s started\n", mach, mach == 1? "" : "s"); print("only %d cpu%s started\n", mach, mach == 1? "" : "s");
} }
static void static void
@ -551,10 +551,9 @@ confinit(void)
} }
static void static void
rebootjump(ulong entry, ulong code, ulong size) rebootjump(void *entry, void *code, ulong size)
{ {
static void (*f)(ulong, ulong, ulong); void (*f)(void*, void*, ulong);
static Lock lk;
intrsoff(); intrsoff();
intrcpushutdown(); intrcpushutdown();
@ -562,17 +561,10 @@ rebootjump(ulong entry, ulong code, ulong size)
/* redo identity map */ /* redo identity map */
mmuinit1(1); mmuinit1(1);
lock(&lk); /* setup reboot trampoline function */
if(f == nil){ f = (void*)REBOOTADDR;
/* setup reboot trampoline function */ memmove(f, rebootcode, sizeof(rebootcode));
f = (void*)REBOOTADDR;
memmove(f, rebootcode, sizeof(rebootcode));
cachedwbse(f, sizeof(rebootcode));
}
unlock(&lk);
cacheuwbinv(); cacheuwbinv();
l2cacheuwbinv();
(*f)(entry, code, size); (*f)(entry, code, size);
@ -587,9 +579,9 @@ exit(int)
{ {
cpushutdown(); cpushutdown();
splfhi(); splfhi();
if(m->machno != 0) if(m->machno == 0)
rebootjump(0, 0, 0); archreboot();
archreboot(); rebootjump(0, 0, 0);
} }
/* /*
@ -609,13 +601,13 @@ void
reboot(void *entry, void *code, ulong size) reboot(void *entry, void *code, ulong size)
{ {
writeconf(); writeconf();
if (m->machno != 0) { while(m->machno != 0){
procwired(up, 0); procwired(up, 0);
sched(); sched();
} }
cpushutdown(); cpushutdown();
delay(1000); delay(2000);
splfhi(); splfhi();
@ -630,7 +622,7 @@ reboot(void *entry, void *code, ulong size)
wdogoff(); wdogoff();
/* off we go - never to return */ /* off we go - never to return */
rebootjump(PADDR(entry), PADDR(code), size); rebootjump(entry, code, size);
} }
void void

View file

@ -70,8 +70,6 @@ LIB=\
/$objtype/lib/libmp.a\ /$objtype/lib/libmp.a\
/$objtype/lib/libc.a\ /$objtype/lib/libc.a\
9:V: $p$CONF s$p$CONF
$p$CONF:DQ: $CONF.c $OBJ $LIB mkfile $p$CONF:DQ: $CONF.c $OBJ $LIB mkfile
$CC $CFLAGS '-DKERNDATE='`{date -n} $CONF.c $CC $CFLAGS '-DKERNDATE='`{date -n} $CONF.c
echo '# linking raw kernel' # H6: no headers, data segment aligned echo '# linking raw kernel' # H6: no headers, data segment aligned
@ -123,8 +121,8 @@ init.h:D: ../port/initcode.c init9.s
reboot.h:D: rebootcode.s arm.s arm.h mem.h reboot.h:D: rebootcode.s arm.s arm.h mem.h
$AS rebootcode.s $AS rebootcode.s
# -lc is only for memmove. -T arg is REBOOTADDR # -T arg is REBOOTADDR
$LD -l -s -T0x1c00 -R4 -o reboot.out rebootcode.$O -lc $LD -l -s -T0x1c00 -R4 -o reboot.out rebootcode.$O
{echo 'uchar rebootcode[]={' {echo 'uchar rebootcode[]={'
xd -1x reboot.out | xd -1x reboot.out |
sed -e '1,2d' -e 's/^[0-9a-f]+ //' -e 's/ ([0-9a-f][0-9a-f])/0x\1,/g' sed -e '1,2d' -e 's/^[0-9a-f]+ //' -e 's/ ([0-9a-f][0-9a-f])/0x\1,/g'

View file

@ -6,25 +6,46 @@
#define WFI WORD $0xe320f003 /* wait for interrupt */ #define WFI WORD $0xe320f003 /* wait for interrupt */
#define WFE WORD $0xe320f002 /* wait for event */ #define WFE WORD $0xe320f002 /* wait for event */
/*
* Turn off MMU, then copy the new kernel to its correct location
* in physical memory. Then jump to the start of the kernel.
*/
/* main(PADDR(entry), PADDR(code), size); */
TEXT main(SB), 1, $-4 TEXT main(SB), 1, $-4
MOVW $setR12(SB), R12 MOVW $setR12(SB), R12
/* copy in arguments before stack gets unmapped */ MOVW R0, entry+0(FP)
MOVW R0, R8 /* entry point */ CMP $0, R0
MOVW p2+4(FP), R9 /* source */ BEQ shutdown
MOVW n+8(FP), R6 /* byte count */
/* SVC mode, interrupts disabled */ MOVW entry+0(FP), R8
MOVW $(PsrDirq|PsrDfiq|PsrMsvc), R1 MOVW code+4(FP), R9
MOVW R1, CPSR MOVW size+8(FP), R6
/* turn caches off */ /* round to words */
BIC $3, R8
BIC $3, R9
ADD $3, R6
BIC $3, R6
memloop:
MOVM.IA.W (R9), [R1]
MOVM.IA.W [R1], (R8)
SUB.S $4, R6
BNE memloop
shutdown:
/* clean dcache using appropriate code for armv6 or armv7 */
MRC CpSC, 0, R1, C(CpID), C(CpIDfeat), 7 /* Memory Model Feature Register 3 */
TST $0xF, R1 /* hierarchical cache maintenance? */
BNE l2wb
DSB
MOVW $0, R0
MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEall
B l2wbx
l2wb:
BL cachedwb(SB)
BL l2cacheuwb(SB)
l2wbx:
/* load entry before turning off mmu */
MOVW entry+0(FP), R8
/* disable caches */
MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
BIC $(CpCdcache|CpCicache|CpCpredict), R1 BIC $(CpCdcache|CpCicache|CpCpredict), R1
MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
@ -39,42 +60,34 @@ TEXT main(SB), 1, $-4
MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
BIC $CpCmmu, R1 BIC $CpCmmu, R1
MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
/* continue with reboot only on cpu0 */
CPUID(R2)
BEQ bootcpu
/* other cpus wait for inter processor interrupt from cpu0 */
/* turn icache back on */
MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
ORR $(CpCicache), R1
MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
BARRIERS BARRIERS
/* turn SMP off */
MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
BIC $CpACsmp, R1
MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpAuxctl
ISB
DSB
/* have entry? */
CMP $0, R8
BNE bootcpu
/* other cpus wait for inter processor interrupt */
CPUID(R2)
dowfi: dowfi:
WFI WFE /* wait for event signal */
MOVW $0x40000060, R1 MOVW $0x400000CC, R1 /* inter-core .startcpu mailboxes */
ADD R2<<2, R1 ADD R2<<4, R1 /* mailbox for this core */
MOVW 0(R1), R0 MOVW 0(R1), R8 /* content of mailbox */
AND $0x10, R0 CMP $0, R8
BEQ dowfi BEQ dowfi /* if zero, wait again */
MOVW $0x8000, R1
BL (R1)
B dowfi
bootcpu: bootcpu:
/* set up a tiny stack for local vars and memmove args */ BIC $KSEGM, R8 /* entry to physical */
MOVW R8, SP /* stack top just before kernel dest */ ORR $PHYSDRAM, R8
SUB $20, SP /* allocate stack frame */ BL (R8)
B dowfi
/* copy the kernel to final destination */ #define ICACHELINESZ 32
MOVW R8, 16(SP) /* save dest (entry point) */ #include "cache.v7.s"
MOVW R8, R0 /* first arg is dest */
MOVW R9, 8(SP) /* push src */
MOVW R6, 12(SP) /* push size */
BL memmove(SB)
MOVW 16(SP), R8 /* restore entry point */
/* jump to kernel physical entry point */
ORR R8,R8
B (R8)
B 0(PC)