pc64: fix kmap() and invlpg()

flushing tlb once the index wraps arround is not enougth
as in use pte's can be speculatively loaded. so instead
use invlpg() and explicitely invalidate the tlb of the
page mapped.

this fixes wired mount cache corruption for reads approaching
2MB which is the size of the KMAP window.

invlpg() was broken, using wrong operand type.
This commit is contained in:
cinap_lenrek 2018-01-29 08:26:42 +01:00
parent b5362dc722
commit 83d8a24215
2 changed files with 5 additions and 10 deletions

View file

@ -449,11 +449,8 @@ TEXT _wrmsrinst(SB), $0
MOVQ BP, AX /* BP set to -1 if traped */
RET
TEXT invlpg(SB), 1, $-4 /* INVLPG va+0(FP) */
MOVQ RARG, va+0(FP)
INVLPG va+0(FP)
TEXT invlpg(SB), 1, $-4
INVLPG (RARG)
RET
TEXT wbinvd(SB), 1, $-4

View file

@ -485,15 +485,13 @@ kmap(Page *page)
return (KMap*)KADDR(pa);
x = splhi();
va = KMAP + ((uintptr)up->kmapindex << PGSHIFT);
va = KMAP + (((uintptr)up->kmapindex++ << PGSHIFT) & (KMAPSIZE-1));
pte = mmuwalk(m->pml4, va, 0, 1);
if(pte == 0 || *pte & PTEVALID)
if(pte == 0 || (*pte & PTEVALID) != 0)
panic("kmap: pa=%#p va=%#p", pa, va);
*pte = pa | PTEWRITE|PTEVALID;
up->kmapindex = (up->kmapindex + 1) % (1<<PTSHIFT);
if(up->kmapindex == 0)
mmuflushtlb();
splx(x);
invlpg(va);
return (KMap*)va;
}