pc, pc64: new MTRR code supporting AMD TOM2 MSR and fixed mtrr ranges

The new MTRR code handles overlapping ranges
and supports AMD specific TOM2 MSR.

The format in /dev/archctl now only shows
the effective cache ranges only, without
exposing the low level registers.
This commit is contained in:
cinap_lenrek 2020-11-21 16:26:46 +01:00
parent e6684dbfda
commit 2594b99629

View file

@ -1,11 +1,3 @@
/*
* memory-type region registers.
*
* due to the possibility of extended addresses (for PAE)
* as large as 36 bits coming from the e820 memory map and the like,
* we'll use vlongs to hold addresses and lengths, even though we don't
* implement PAE in Plan 9.
*/
#include "u.h" #include "u.h"
#include "../port/lib.h" #include "../port/lib.h"
#include "mem.h" #include "mem.h"
@ -20,25 +12,59 @@ enum {
*/ */
MTRRPhysBase0 = 0x200, MTRRPhysBase0 = 0x200,
MTRRPhysMask0 = 0x201, MTRRPhysMask0 = 0x201,
MTRRDefaultType = 0x2FF,
MTRRCap = 0xFE,
Nmtrr = 8,
/* cpuid extended function codes */ MTRRDefaultType = 0x2FF,
Exthighfunc = 1ul << 31, Deftype = 0xFF, /* default MTRR type */
Extprocsigamd, Deffixena = 1<<10, /* fixed-range MTRR enable */
Extprocname0, Defena = 1<<11, /* MTRR enable */
Extprocname1,
Extprocname2, MTRRCap = 0xFE,
Exttlbl1, Capvcnt = 0xFF, /* mask: # of variable-range MTRRs we have */
Extl2, Capwc = 1<<8, /* flag: have write combining? */
Extapm, Capfix = 1<<10, /* flag: have fixed MTRRs? */
Extaddrsz,
AMDK8SysCfg = 0xC0010010,
Tom2Enabled = 1<<21,
Tom2ForceMemTypeWB = 1<<22,
AMDK8TopMem2 = 0xC001001D,
}; };
enum { enum {
CR4PageGlobalEnable = 1 << 7, Nvarreg = 8,
CR0CacheDisable = 1 << 30, Nfixreg = 11*8,
Nranges = Nfixreg+Nvarreg*2+1,
};
typedef struct Varreg Varreg;
struct Varreg {
vlong base;
vlong mask;
};
typedef struct Fixreg Fixreg;
struct Fixreg {
int msr;
ulong base;
ulong size;
};
typedef struct State State;
struct State {
uvlong mask;
vlong cap;
vlong def;
vlong tom2;
int nvarreg;
Varreg varreg[Nvarreg];
vlong fixreg[Nfixreg/8];
};
typedef struct Range Range;
struct Range {
uvlong base;
uvlong size;
int type;
}; };
enum { enum {
@ -51,37 +77,16 @@ enum {
Writeback = 6, Writeback = 6,
}; };
enum {
Capvcnt = 0xff, /* mask: # of variable-range MTRRs we have */
Capwc = 1<<8, /* flag: have write combining? */
Capfix = 1<<10, /* flag: have fixed MTRRs? */
Deftype = 0xff, /* default MTRR type */
Deffixena = 1<<10, /* fixed-range MTRR enable */
Defena = 1<<11, /* MTRR enable */
};
typedef struct Mtrreg Mtrreg;
typedef struct Mtrrop Mtrrop;
struct Mtrreg {
vlong base;
vlong mask;
};
static char *types[] = { static char *types[] = {
[Uncacheable] "uc", [Uncacheable] "uc",
[Writecomb] "wc", [Writecomb] "wc",
[Unknown1] "uk1", [Unknown1] "uk1",
[Unknown2] "uk2", [Unknown2] "uk2",
[Writethru] "wt", [Writethru] "wt",
[Writeprot] "wp", [Writeprot] "wp",
[Writeback] "wb", [Writeback] "wb",
nil
}; };
static int dosync;
static Mtrreg mtrreg[Nmtrr];
static char * static char *
type2str(int type) type2str(int type)
{ {
@ -93,137 +98,635 @@ type2str(int type)
static int static int
str2type(char *str) str2type(char *str)
{ {
char **p; int type;
for(p = types; *p != nil; p++) for(type = 0; type < nelem(types); type++){
if (strcmp(str, *p) == 0) if(strcmp(str, types[type]) == 0)
return p - types; return type;
}
return -1; return -1;
} }
static int
getvarreg(State *s, Range *rp, int index)
{
Varreg *reg = &s->varreg[index];
if((reg->mask & (1<<11)) == 0)
return 0;
rp->base = reg->base & ~0xFFFULL;
rp->type = reg->base & 0xFF;
rp->size = (s->mask ^ (reg->mask & ~0xFFFULL)) + 1;
return 1;
}
static void
setvarreg(State *s, Range *rp, int index)
{
Varreg *reg = &s->varreg[index];
if(rp == nil || rp->size == 0){
reg->base = 0;
reg->mask = 0;
return;
}
reg->base = rp->base | (rp->type & 0xFF);
reg->mask = (s->mask & ~(rp->size-1)) | 1<<11;
}
static Fixreg fixreg[Nfixreg/8] = {
0x250, 0x00000, 0x10000,
0x258, 0x80000, 0x04000,
0x259, 0xA0000, 0x04000,
0x268, 0xC0000, 0x01000,
0x269, 0xC8000, 0x01000,
0x26A, 0xD0000, 0x01000,
0x26B, 0xD8000, 0x01000,
0x26C, 0xE0000, 0x01000,
0x26D, 0xE8000, 0x01000,
0x26E, 0xF0000, 0x01000,
0x26F, 0xF8000, 0x01000,
};
static int
getfixreg(State *s, Range *rp, int index)
{
Fixreg *reg = &fixreg[index >> 3];
index &= 7;
rp->base = reg->base + reg->size * index;
rp->size = reg->size;
rp->type = ((uvlong)s->fixreg[reg - fixreg] >> 8*index) & 0xFF;
return 1;
}
static void
setfixreg(State *s, Range *rp, int index)
{
Fixreg *reg = &fixreg[index >> 3];
int type;
index &= 7;
if(rp == nil || rp->size == 0)
type = Uncacheable;
else
type = rp->type & 0xFF;
s->fixreg[reg - fixreg] &= ~(0xFFULL << 8*index);
s->fixreg[reg - fixreg] |= (uvlong)type << 8*index;
}
static int
preftype(int a, int b)
{
if(a == b)
return a;
if(a == Uncacheable || b == Uncacheable)
return Uncacheable;
if(a == Writethru && b == Writeback
|| a == Writeback && b == Writethru)
return Writethru;
return -1;
}
static int
gettype(State *s, uvlong pa, Range *new)
{
int i, type;
Range r;
if(new != nil && pa >= new->base && pa < new->base + new->size)
return new->type;
if((s->def & Defena) == 0)
return Uncacheable;
if(pa < 0x100000 && (s->def & Deffixena) != 0){
for(i = 0; i < Nfixreg; i++){
if(getfixreg(s, &r, i) && pa < r.base + r.size && pa >= r.base)
return r.type;
}
}
if(pa >= 0x100000000ULL && pa < s->tom2)
return Writeback;
type = -1;
for(i = 0; i < s->nvarreg; i++){
if(!getvarreg(s, &r, i))
continue;
if((pa & -r.size) == r.base)
type = (type == -1) ? r.type : preftype(r.type, type);
}
if(type == -1)
type = s->def & Deftype;
return type;
}
static uvlong
getnext(State *s, uvlong pa, Range *new)
{
uvlong end;
Range r;
int i;
if(new != nil){
end = getnext(s, pa, nil);
if(pa < new->base && end > new->base)
return new->base;
if(pa < new->base + new->size && end > new->base + new->size)
return new->base + new->size;
return end;
}
end = s->mask+1;
if((s->def & Defena) == 0)
return end;
if(pa < 0x100000 && (s->def & Deffixena) != 0){
for(i = 0; i < Nfixreg; i++){
if(getfixreg(s, &r, i) && pa < r.base + r.size && pa >= r.base)
return r.base + r.size;
}
}
if(pa >= 0x100000000ULL && pa < s->tom2)
return s->tom2;
for(i = 0; i < s->nvarreg; i++){
if(!getvarreg(s, &r, i))
continue;
if((pa & -r.size) == r.base)
r.base += r.size;
else if(r.base <= pa)
continue;
if(r.base < end)
end = r.base;
}
if(pa < 0x100000000ULL && end > 0x100000000ULL)
end = 0x100000000ULL;
return end;
}
enum {
Exthighfunc = 1ul << 31,
Extprocsigamd,
Extprocname0,
Extprocname1,
Extprocname2,
Exttlbl1,
Extl2,
Extapm,
Extaddrsz,
};
static uvlong static uvlong
physmask(void) physmask(void)
{ {
ulong regs[4]; ulong regs[4];
static vlong mask = -1; uvlong mask;
if (mask != -1)
return mask;
cpuid(Exthighfunc, regs); cpuid(Exthighfunc, regs);
if(regs[0] >= Extaddrsz) { /* ax */ if(regs[0] >= Extaddrsz) { /* ax */
cpuid(Extaddrsz, regs); cpuid(Extaddrsz, regs);
mask = (1LL << (regs[0] & 0xFF)) - 1; /* ax */ mask = (1ULL << (regs[0] & 0xFF)) - 1; /* ax */
} else { } else {
mask &= (1LL << 36) - 1; mask = (1ULL << 36) - 1;
} }
return mask; return mask;
} }
static int static int
ispow2(uvlong ul) getstate(State *s)
{ {
return (ul & (ul - 1)) == 0; vlong v;
int i;
s->mask = physmask();
if(rdmsr(MTRRDefaultType, &s->def) < 0)
return -1;
if(rdmsr(MTRRCap, &s->cap) < 0)
return -1;
if(s->cap & Capfix){
for(i = 0; i < nelem(fixreg); i++){
if(rdmsr(fixreg[i].msr, &s->fixreg[i]) < 0)
return -1;
}
} else {
s->def &= ~(vlong)Deffixena;
}
s->nvarreg = s->cap & Capvcnt;
if(s->nvarreg > Nvarreg)
s->nvarreg = Nvarreg;
for(i = 0; i < s->nvarreg; i++){
if(rdmsr(MTRRPhysBase0 + 2*i, &s->varreg[i].base) < 0)
return -1;
if(rdmsr(MTRRPhysMask0 + 2*i, &s->varreg[i].mask) < 0)
return -1;
}
if(strcmp(m->cpuidid, "AuthenticAMD") != 0
|| m->cpuidfamily < 15
|| rdmsr(AMDK8SysCfg, &v) < 0
|| (v & (Tom2Enabled|Tom2ForceMemTypeWB)) != (Tom2Enabled|Tom2ForceMemTypeWB)
|| rdmsr(AMDK8TopMem2, &s->tom2) < 0)
s->tom2 = 0;
else {
s->tom2 &= s->mask;
s->tom2 &= -0x800000LL;
}
return 0;
} }
/* true if mtrr is valid */ enum {
static int CR4PageGlobalEnable = 1 << 7,
mtrrdec(Mtrreg *mtrr, uvlong *ptr, uvlong *size, int *type) CR0CacheDisable = 1 << 30,
{ };
*ptr = mtrr->base & ~(BY2PG-1);
*type = mtrr->base & 0xff;
*size = (physmask() ^ (mtrr->mask & ~(BY2PG-1))) + 1;
return (mtrr->mask >> 11) & 1;
}
static void static void
mtrrenc(Mtrreg *mtrr, uvlong ptr, uvlong size, int type, int ok) putstate(State *s)
{ {
mtrr->base = ptr | (type & 0xff);
mtrr->mask = (physmask() & ~(size - 1)) | (ok? 1<<11: 0);
}
/*
* i is the index of the MTRR, and is multiplied by 2 because
* mask and base offsets are interleaved.
*/
static void
mtrrget(Mtrreg *mtrr, uint i)
{
rdmsr(MTRRPhysBase0 + 2*i, &mtrr->base);
rdmsr(MTRRPhysMask0 + 2*i, &mtrr->mask);
}
static void
mtrrput(Mtrreg *mtrr, uint i)
{
wrmsr(MTRRPhysBase0 + 2*i, mtrr->base);
wrmsr(MTRRPhysMask0 + 2*i, mtrr->mask);
}
static int
mtrrvcnt(void)
{
vlong cap;
int vcnt;
rdmsr(MTRRCap, &cap);
vcnt = cap & Capvcnt;
if(vcnt > Nmtrr)
vcnt = Nmtrr;
return vcnt;
}
static int
mtrrgetall(void)
{
int i, vcnt;
vcnt = mtrrvcnt();
for(i = 0; i < vcnt; i++)
mtrrget(&mtrreg[i], i);
return vcnt;
}
static void
mtrrputall(void)
{
int s, i, vcnt;
ulong cr0, cr4; ulong cr0, cr4;
vlong def; int i, x;
s = splhi(); x = splhi();
cr4 = getcr4(); /* disable cache */
putcr4(cr4 & ~CR4PageGlobalEnable);
cr0 = getcr0(); cr0 = getcr0();
wbinvd();
putcr0(cr0 | CR0CacheDisable); putcr0(cr0 | CR0CacheDisable);
wbinvd(); wbinvd();
rdmsr(MTRRDefaultType, &def);
wrmsr(MTRRDefaultType, def & ~(vlong)Defena);
vcnt = mtrrvcnt(); /* disable PGE */
for(i=0; i<vcnt; i++) cr4 = getcr4();
mtrrput(&mtrreg[i], i); putcr4(cr4 & ~CR4PageGlobalEnable);
/* flush tlb */
putcr3(getcr3());
/* disable MTRRs */
wrmsr(MTRRDefaultType, s->def & ~(vlong)(Defena|Deffixena|Deftype));
wbinvd(); wbinvd();
wrmsr(MTRRDefaultType, def);
/* write all registers */
if(s->cap & Capfix){
for(i = 0; i < nelem(fixreg); i++)
wrmsr(fixreg[i].msr, s->fixreg[i]);
}
for(i = 0; i < s->nvarreg; i++){
wrmsr(MTRRPhysBase0 + 2*i, s->varreg[i].base);
wrmsr(MTRRPhysMask0 + 2*i, s->varreg[i].mask);
}
/* flush tlb */
putcr3(getcr3());
/* enable MTRRs */
wrmsr(MTRRDefaultType, s->def);
/* reenable cache */
putcr0(cr0); putcr0(cr0);
/* reenable PGE */
putcr4(cr4); putcr4(cr4);
splx(s); splx(x);
} }
static int
fls64(uvlong x)
{
int i;
for(i = 0; i < 64; i++)
if(x & (1ULL<<i))
break;
return i;
}
static int
fms64(uvlong x)
{
int i;
if(x == 0)
return 0;
for(i = 63; i >= 0; i--)
if(x & (1ULL<<i))
break;
return i;
}
static int
range2varreg(State *s, Range r, int index, int doit)
{
uvlong len;
if(index < 0)
return -1;
if(r.base <= 0x100000 && (s->def & Deffixena) != 0){
r.size += r.base;
r.base = 0;
}
if(r.base >= 0x100000000ULL && r.base <= s->tom2){
if(r.base + r.size <= s->tom2){
if(r.type != Writeback)
return -1;
return index;
}
}
len = r.size;
while(len){
if(index >= s->nvarreg)
return -1;
if(fls64(r.base) > fms64(len))
r.size = 1ULL << fms64(len);
else
r.size = 1ULL << fls64(r.base);
if(doit)
setvarreg(s, &r, index);
index++;
len -= r.size;
r.base += r.size;
}
return index;
}
static int ranges2varregs(State*, Range*, int, int, int);
/*
* try to combine same type ranges that are split by
* higher precedence ranges.
*/
static int
ranges2varregscomb(State *s, Range *rp, int nr, int index, int doit)
{
Range rr;
int i, j;
if(nr < 2 || rp[0].type == rp[1].type)
return -1;
rr = rp[0];
if(preftype(rr.type, rp[1].type) == rr.type)
rr.type = rp[1].type;
for(j = 1; j < nr; j++){
if(rp[j].type != rr.type
&& preftype(rp[j].type, rr.type) != rp[j].type)
return -1;
rr.size += rp[j].size;
}
i = ranges2varregs(s, &rr, 1, index, doit);
for(j = 0; j < nr && i >= index; j++){
if(rp[j].type != rr.type)
i = range2varreg(s, rp[j], i, doit);
}
return i;
}
static int
ranges2varregs(State *s, Range *rp, int nr, int index, int doit)
{
int i, j, k;
if(nr == 1){
if(rp->type == (s->def & Deftype))
return index;
return range2varreg(s, *rp, index, doit);
}
/* try combining */
i = ranges2varregscomb(s, rp, nr, index, doit);
/*
* now see if we can find a better solution using
* different splittings.
*/
for(k = 1; k < nr; k++){
j = ranges2varregs(s, rp+k, nr-k,
ranges2varregs(s, rp, k, index, 0), 0);
if(j < 0)
continue;
if(i < 0 || j < i)
i = doit ? ranges2varregs(s, rp+k, nr-k,
ranges2varregs(s, rp, k, index, 1), 1) : j;
}
return i;
}
static int
range2fixreg(State *s, Range r)
{
Range rr;
int i;
for(i = 0; i < Nfixreg; i++){
if(!getfixreg(s, &rr, i) || rr.base + rr.size <= r.base)
continue;
if(rr.base >= r.base + r.size)
break;
if(r.base > rr.base || r.base + r.size < rr.base + rr.size)
return -1;
rr.type = r.type;
setfixreg(s, &rr, i);
}
return 0;
}
static int
setranges(State *s, Range *rp, int nr)
{
int i, j;
if(nr < 1 || nr > Nranges)
return -1;
s->def &= ~(vlong)(Defena|Deffixena|Deftype);
i = 0;
if(rp[0].size != s->mask+1 || rp[0].type != Uncacheable){
s->def |= Defena;
/* first handle ranges below 1MB using fixed registers */
if(rp[0].size < 0x100000 && (s->cap & Capfix) != 0){
s->def |= Deffixena;
for(i = 0; i < Nfixreg; i++)
setfixreg(s, nil, i);
while(nr > 0 && rp->base < 0x100000){
if(range2fixreg(s, *rp) < 0)
return -1;
if(rp->base + rp->size > 0x100000)
break;
rp++;
nr--;
}
}
/* remaining ranges to to variable registers */
if(nr > 0){
/* make sure the algorithm doesnt explode */
if(nr > Nvarreg+1)
return -1;
/* try with UC default type */
s->def = (s->def & ~(vlong)Deftype) | Uncacheable;
i = ranges2varregs(s, rp, nr, 0, 1);
/* try with WB default type, dont do it yet */
s->def = (s->def & ~(vlong)Deftype) | Writeback;
j = ranges2varregs(s, rp, nr, 0, 0);
if(j < 0 || (i >= 0 && i <= j)){
/* WB not better or worse, use UC solution */
s->def = (s->def & ~(vlong)Deftype) | Uncacheable;
} else {
/* WB default is better, doit! */
i = ranges2varregs(s, rp, nr, 0, 1);
}
if(i < 0)
return -1;
}
}
/* clear unused variable registers */
for(; i < s->nvarreg; i++)
setvarreg(s, nil, i);
return 0;
}
static int
checkranges(State *s, Range *rp, int nr)
{
uvlong base, next;
int i;
for(i = 0; i < nr; i++){
next = rp[i].base + rp[i].size;
for(base = rp[i].base; base < next; base = getnext(s, base, nil)){
if(gettype(s, base, nil) != rp[i].type)
return -1;
}
}
return 0;
}
static int
getranges(State *s, Range *rp, int nr, Range *new)
{
uvlong base, next;
Range *rs, *re;
int type;
rs = rp;
re = rp + nr;
for(base = 0; base <= s->mask; base = next) {
if(rp >= re)
return -1;
type = gettype(s, base, new);
next = getnext(s, base, new);
while(next <= s->mask && (gettype(s, next, new) == type))
next = getnext(s, next, new);
rp->base = base;
rp->size = next - base;
rp->type = type;
rp++;
}
return rp - rs;
}
static int dosync;
static QLock mtrrlk;
static State cpu0state;
static Range ranges[Nranges];
char*
mtrr(uvlong base, uvlong size, char *tstr)
{
static State newstate;
Range new;
int nr;
if(cpu0state.mask == 0)
return "mtrr not supported";
if(size < 0x1000)
return "size too small";
if((base | size) & 0xFFF)
return "base or size not page aligned";
if(base & ~cpu0state.mask)
return "base out of range";
if(base + size > cpu0state.mask+1)
return "size out of range";
new.base = base;
new.size = size;
if((new.type = str2type(tstr)) < 0)
return "bad cache type";
qlock(&mtrrlk);
newstate = cpu0state;
nr = getranges(&newstate, ranges, Nranges, &new);
if(setranges(&newstate, ranges, nr) < 0
|| checkranges(&newstate, ranges, nr) < 0){
qunlock(&mtrrlk);
return "cache range not satisfiable";
}
cpu0state = newstate;
coherence();
dosync = 1;
mtrrclock();
qunlock(&mtrrlk);
return nil;
}
int
mtrrprint(char *buf, long bufsize)
{
char *cp, *ep;
int i, nr;
if(cpu0state.mask == 0)
return 0;
cp = buf;
ep = buf + bufsize;
qlock(&mtrrlk);
nr = getranges(&cpu0state, ranges, Nranges, nil);
for(i = 0; i < nr; i++){
cp = seprint(cp, ep, "cache %#.16llux %15llud %s\n",
ranges[i].base,
ranges[i].size,
type2str(ranges[i].type));
}
qunlock(&mtrrlk);
return cp - buf;
}
/* called from clock interrupt */
void void
mtrrclock(void) /* called from clock interrupt */ mtrrclock(void)
{ {
static Ref bar1, bar2; static Ref bar1, bar2;
int s; int x;
if(dosync == 0) if(dosync == 0 || cpu0state.mask == 0)
return; return;
s = splhi(); x = splhi();
/* /*
* wait for all CPUs to sync here, so that the MTRR setup gets * wait for all CPUs to sync here, so that the MTRR setup gets
@ -233,7 +736,7 @@ mtrrclock(void) /* called from clock interrupt */
while(bar1.ref < conf.nmach) while(bar1.ref < conf.nmach)
microdelay(10); microdelay(10);
mtrrputall(); putstate(&cpu0state);
/* /*
* wait for all CPUs to sync up again, so that we don't continue * wait for all CPUs to sync up again, so that we don't continue
@ -248,128 +751,21 @@ mtrrclock(void) /* called from clock interrupt */
decref(&bar2); decref(&bar2);
dosync = 0; dosync = 0;
splx(s); splx(x);
}
static char*
mtrr0(uvlong base, uvlong size, char *tstr)
{
int i, vcnt, slot, type, mtype, mok;
vlong def, cap;
uvlong mp, msize;
if(!(m->cpuiddx & Mtrr))
return "mtrrs not supported";
if(base & (BY2PG-1) || size & (BY2PG-1) || size == 0)
return "mtrr base or size not 4k aligned or zero size";
if(!ispow2(size))
return "mtrr size not power of 2";
if(base & (size - 1))
return "mtrr base not naturally aligned";
if((type = str2type(tstr)) == -1)
return "mtrr bad type";
rdmsr(MTRRCap, &cap);
rdmsr(MTRRDefaultType, &def);
switch(type){
default:
return "mtrr unknown type";
case Writecomb:
if(!(cap & Capwc))
return "mtrr type wc (write combining) unsupported";
/* fallthrough */
case Uncacheable:
case Writethru:
case Writeprot:
case Writeback:
break;
}
vcnt = mtrrgetall();
slot = -1;
for(i = 0; i < vcnt; i++){
mok = mtrrdec(&mtrreg[i], &mp, &msize, &mtype);
if(slot == -1 && !mok)
slot = i; /* good, but look further for exact match */
if(mok && mp == base && msize == size){
slot = i;
break;
}
}
if(slot == -1)
return "no free mtrr slots";
mtrrenc(&mtrreg[slot], base, size, type, 1);
coherence();
dosync = 1;
mtrrclock();
return nil;
}
char*
mtrr(uvlong base, uvlong size, char *tstr)
{
static QLock mtrrlk;
char *err;
qlock(&mtrrlk);
err = mtrr0(base, size, tstr);
qunlock(&mtrrlk);
return err;
}
int
mtrrprint(char *buf, long bufsize)
{
int i, n, vcnt, type;
uvlong base, size;
Mtrreg mtrr;
vlong def;
if(!(m->cpuiddx & Mtrr))
return 0;
rdmsr(MTRRDefaultType, &def);
n = snprint(buf, bufsize, "cache default %s\n",
type2str(def & Deftype));
vcnt = mtrrvcnt();
for(i = 0; i < vcnt; i++){
mtrrget(&mtrr, i);
if (mtrrdec(&mtrr, &base, &size, &type))
n += snprint(buf+n, bufsize-n,
"cache 0x%llux %llud %s\n",
base, size, type2str(type));
}
return n;
} }
/* called from cpuidentify() */
void void
mtrrsync(void) mtrrsync(void)
{ {
static vlong cap0, def0; State s;
vlong cap, def;
rdmsr(MTRRCap, &cap); if(getstate(&s) < 0)
rdmsr(MTRRDefaultType, &def); return;
if(cpu0state.mask == 0){
if(m->machno == 0){ cpu0state = s;
cap0 = cap; coherence();
def0 = def;
mtrrgetall();
return; return;
} }
putstate(&cpu0state);
if(cap0 != cap)
print("mtrrcap%d: %lluX %lluX\n",
m->machno, cap0, cap);
if(def0 != def)
print("mtrrdef%d: %lluX %lluX\n",
m->machno, def0, def);
mtrrputall();
} }