From dfbf774bbba142ef4286c4e230475e5eedcf46d6 Mon Sep 17 00:00:00 2001 From: Sigrid Date: Thu, 15 Oct 2020 10:30:40 +0200 Subject: [PATCH 1/5] games/nes: workaround for truncated chr --- sys/src/games/nes/nes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sys/src/games/nes/nes.c b/sys/src/games/nes/nes.c index 92c470f21..ba79c0bbc 100644 --- a/sys/src/games/nes/nes.c +++ b/sys/src/games/nes/nes.c @@ -85,7 +85,7 @@ loadrom(char *file, int sflag) chr = malloc(nchr * CHRSZ); if(chr == nil) sysfatal("malloc: %r"); - if(readn(fd, chr, nchr * CHRSZ) < nchr * CHRSZ) + if(readn(fd, chr, nchr * CHRSZ) < 1) sysfatal("read: %r"); }else{ nchr = 1; From bf187247380252d3f79ad7089251600b7535815e Mon Sep 17 00:00:00 2001 From: cinap_lenrek Date: Sat, 17 Oct 2020 21:28:25 +0200 Subject: [PATCH 2/5] ndb/dns: mark ns record authoritative when in our area for delegation I have the problem that i need to delegate a subdomain to another name server that is confused about its own zone (and its own name) returning unusable ns records. With this, one can make up a nameserver entry in ndb that is authoritative and owned by us for that nameserver, and then put it in the soa=delegated ns entry. This promotes the ns record in the soa=delegated to Authoritative, which avoids overriding the ns rr's from the confused server for the delegated zone. --- sys/src/cmd/ndb/dn.c | 3 ++- sys/src/cmd/ndb/dnarea.c | 50 ++++++++++++++++++++++------------------ 2 files changed, 30 insertions(+), 23 deletions(-) diff --git a/sys/src/cmd/ndb/dn.c b/sys/src/cmd/ndb/dn.c index 71768e7e3..5a789925f 100644 --- a/sys/src/cmd/ndb/dn.c +++ b/sys/src/cmd/ndb/dn.c @@ -641,7 +641,8 @@ dnauthdb(void) if(rp->ttl < minttl) rp->ttl = minttl; rp->auth = 1; - } + } else if(rp->type == Tns && inmyarea(rp->host->name)) + rp->auth = 1; } l = &rp->next; } diff --git a/sys/src/cmd/ndb/dnarea.c b/sys/src/cmd/ndb/dnarea.c index 3418d4686..5d459b328 100644 --- a/sys/src/cmd/ndb/dnarea.c +++ b/sys/src/cmd/ndb/dnarea.c @@ -7,36 +7,36 @@ Area *owned, *delegated; +static Area* +nameinarea(char *name, Area *s) +{ + int len; + + for(len = strlen(name); s != nil; s = s->next){ + if(s->len > len) + continue; + if(cistrcmp(s->soarr->owner->name, name + len - s->len) == 0) + if(len == s->len || name[len - s->len - 1] == '.') + return s; + } + return nil; +} + /* * true if a name is in our area */ Area* inmyarea(char *name) { - int len; Area *s, *d; - len = strlen(name); - for(s = owned; s; s = s->next){ - if(s->len > len) - continue; - if(cistrcmp(s->soarr->owner->name, name + len - s->len) == 0) - if(len == s->len || name[len - s->len - 1] == '.') - break; - } + s = nameinarea(name, owned); if(s == nil) return nil; - - /* name is in area `s' */ - for(d = delegated; d; d = d->next){ - if(d->len > len) - continue; - if(cistrcmp(d->soarr->owner->name, name + len - d->len) == 0) - if(len == d->len || name[len - d->len - 1] == '.') - return nil; /* name is in a delegated subarea */ - } - - return s; /* name is in area `s' and not in a delegated subarea */ + d = nameinarea(name, delegated); + if(d && d->len > s->len) + return nil; + return s; /* name is in owned area `s' and not in a delegated subarea */ } /* @@ -48,6 +48,9 @@ addarea(DN *dp, RR *rp, Ndbtuple *t) { Area *s; Area **l; + int len; + + len = strlen(dp->name); lock(&dnlock); if(t->val[0]) @@ -55,11 +58,14 @@ addarea(DN *dp, RR *rp, Ndbtuple *t) else l = &owned; - for (s = *l; s != nil; s = s->next) + for (s = *l; s != nil; l = &s->next, s = s->next){ + if(s->len < len) + break; if(s->soarr->owner == dp) { unlock(&dnlock); return; /* we've already got one */ } + } /* * The area contains a copy of the soa rr that created it. @@ -67,7 +73,7 @@ addarea(DN *dp, RR *rp, Ndbtuple *t) * as the area does. */ s = emalloc(sizeof(*s)); - s->len = strlen(dp->name); + s->len = len; rrcopy(rp, &s->soarr); s->soarr->owner = dp; s->soarr->db = 1; From 0b094303f3e30007fd9fccc3df81f44cf2c49003 Mon Sep 17 00:00:00 2001 From: cinap_lenrek Date: Sat, 17 Oct 2020 21:28:56 +0200 Subject: [PATCH 3/5] ndb/dnsdebug: add -c flag to debug caching dns server behaviour --- sys/man/8/ndb | 5 ++++- sys/src/cmd/ndb/dnsdebug.c | 16 +++++++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/sys/man/8/ndb b/sys/man/8/ndb index af4df056d..f6da3bbb8 100644 --- a/sys/man/8/ndb +++ b/sys/man/8/ndb @@ -97,7 +97,7 @@ query, ipquery, mkhash, mkdb, mkhosts, cs, csquery, dns, dnstcp, dnsquery, dnsde .br .B ndb/dnsdebug [ -.B -rx +.B -rxc ] [ .B -f .I dbfile @@ -758,6 +758,9 @@ to use the interface and .B /lib/ndb/external database file. +The +.B -c +option enables caching which is handy for debugging the dns code. .PP .I Ndb/dnsgetip resolves and prints A and AAAA records without consulting diff --git a/sys/src/cmd/ndb/dnsdebug.c b/sys/src/cmd/ndb/dnsdebug.c index 092e0c7b3..650e705d9 100644 --- a/sys/src/cmd/ndb/dnsdebug.c +++ b/sys/src/cmd/ndb/dnsdebug.c @@ -56,9 +56,16 @@ main(int argc, char *argv[]) case 'f': dbfile = EARGF(usage()); break; + case 'c': + cfg.cachedb = 1; + break; case 'r': cfg.resolver = 1; break; + case 'd': + debug = 1; + traceactivity = 1; + break; case 'x': dbfile = "/lib/ndb/external"; strcpy(mntpt, "/net.alt"); @@ -73,6 +80,7 @@ main(int argc, char *argv[]) fmtinstall('R', prettyrrfmt); opendatabase(); srand(truerand()); + db2cache(1); if(cfg.resolver) squirrelserveraddrs(); @@ -89,7 +97,6 @@ main(int argc, char *argv[]) p[Blinelen(&in)-1] = 0; n = tokenize(p, f, 3); if(n>=1) { - dnpurge(); /* flush the cache */ docmd(n, f); } } @@ -457,6 +464,12 @@ docmd(int n, char **f) name = type = nil; tmpsrv = 0; + if(strcmp(f[0], "refresh") == 0){ + db2cache(1); + dnageall(0); + return; + } + if(*f[0] == '@') { if(setserver(f[0]+1) < 0) return; @@ -483,6 +496,7 @@ docmd(int n, char **f) if(name == nil) return; + if(!cfg.cachedb) dnpurge(); /* flush the cache */ doquery(name, type); if(tmpsrv) From cf8ff0e71369afdf600e36924c2d046ecf783cbf Mon Sep 17 00:00:00 2001 From: cinap_lenrek Date: Sun, 18 Oct 2020 02:51:32 +0200 Subject: [PATCH 4/5] sdnvme: handle machines with more cpu's than submit queues (thanks mischief) We used to assume a 1:1 pairing of processors to submit queues. With recent machines, we now got more cpu cores than what some nvme drives support so we need to distribute the queues across these cpu's which requires locking on command submission. There is a feature get/set command to probe the number of submit and completion queues, but we decided to just handling submission queue create command error gracefully as it is simpler and has less chance of regression with existing setups. Thanks to mischief for investigating and writing the code. --- sys/src/9/pc/sdnvme.c | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/sys/src/9/pc/sdnvme.c b/sys/src/9/pc/sdnvme.c index 152dba187..1909eabdb 100644 --- a/sys/src/9/pc/sdnvme.c +++ b/sys/src/9/pc/sdnvme.c @@ -41,6 +41,7 @@ struct SQ u32int *base; WS **wait; Ctlr *ctlr; + Lock; }; struct Ctlr @@ -63,6 +64,8 @@ struct Ctlr u32int mpsshift; u32int dstrd; + u32int nsq; + CQ cq[1+1]; SQ sq[1+MAXMACH]; @@ -99,7 +102,9 @@ qcmd(WS *ws, Ctlr *ctlr, int adm, u32int opc, u32int nsid, void *mptr, void *dat if(!adm){ Retry: splhi(); - sq = &ctlr->sq[1+m->machno]; + sq = &ctlr->sq[1+(m->machno % ctlr->nsq)]; + if(conf.nmach > ctlr->nsq) + lock(sq); } else { qlock(ctlr); sq = &ctlr->sq[0]; @@ -207,7 +212,9 @@ wcmd(WS *ws) coherence(); ctlr->reg[DBell + ((sq-ctlr->sq)*2+0 << ctlr->dstrd)] = sq->tail & sq->mask; if(sq > ctlr->sq) { - assert(sq == &ctlr->sq[1+m->machno]); + assert(sq == &ctlr->sq[1+(m->machno % ctlr->nsq)]); + if(conf.nmach > ctlr->nsq) + unlock(sq); spllo(); } else qunlock(sq->ctlr); @@ -381,7 +388,7 @@ sqalloc(Ctlr *ctlr, SQ *sq, u32int lgsize) static void setupqueues(Ctlr *ctlr) { - u32int lgsize, *e; + u32int lgsize, st, *e; CQ *cq; SQ *sq; WS ws; @@ -400,6 +407,8 @@ setupqueues(Ctlr *ctlr) e[11] = 3; /* IEN | PC */ checkstatus(wcmd(&ws), "create completion queue"); + st = 0; + /* SQID[1..nmach]: submission queue per cpu */ for(i=1; i<=conf.nmach; i++){ sq = &ctlr->sq[i]; @@ -407,8 +416,19 @@ setupqueues(Ctlr *ctlr) e = qcmd(&ws, ctlr, 1, 0x01, 0, nil, sq->base, 0x1000); e[10] = i | sq->mask<<16; e[11] = (cq - ctlr->cq)<<16 | 1; /* CQID<<16 | PC */ - checkstatus(wcmd(&ws), "create submission queue"); + + st = wcmd(&ws); + if(st != 0){ + free(sq->base); + free(sq->wait); + memset(sq, 0, sizeof(*sq)); + break; + } } + + ctlr->nsq = i - 1; + if(ctlr->nsq < 1) + checkstatus(st, "create submission queues"); ilock(&ctlr->intr); ctlr->ints |= 1<<(cq - ctlr->cq); @@ -544,7 +564,7 @@ nvmeenable(SDev *sd) Ready: identify(ctlr); setupqueues(ctlr); - + print("%s: using %d submit queues\n", name, ctlr->nsq); poperror(); return 1; From 3cfa8326b878e3eacce5595f6b839b4650c61621 Mon Sep 17 00:00:00 2001 From: cinap_lenrek Date: Sun, 18 Oct 2020 03:05:35 +0200 Subject: [PATCH 5/5] etheriwl: don't break controller on command flush timeout ori and echoline are reporting regression on some 6000 cards; which sometimes time out on crystal calibration command; which is expected by the driver. but the new code used to force a device reset on any command timeout. reverting to old behaviour until for now until we have a chance investigating. --- sys/src/9/pc/etheriwl.c | 1 - 1 file changed, 1 deletion(-) diff --git a/sys/src/9/pc/etheriwl.c b/sys/src/9/pc/etheriwl.c index f7b353242..93285850e 100644 --- a/sys/src/9/pc/etheriwl.c +++ b/sys/src/9/pc/etheriwl.c @@ -3513,7 +3513,6 @@ cmd(Ctlr *ctlr, uint code, uchar *data, int size) if((err = qcmd(ctlr, 4, code, data, size, nil)) != nil || (err = flushq(ctlr, 4)) != nil){ print("#l%d: cmd %ud: %s\n", ctlr->edev->ctlrno, code, err); - ctlr->broken = 1; return err; } return nil;