Plan 9 from Bell Labs’s /usr/web/sources/extra/9hist/bitsy/mmu.c

Copyright © 2021 Plan 9 Foundation.
Distributed under the MIT License.
Download the Plan 9 distribution.


## diffname bitsy/mmu.c 2000/0902
## diff -e /dev/null /n/emeliedump/2000/0902/sys/src/9/bitsy/mmu.c
0a
#include	"u.h"
#include	"../port/lib.h"
#include	"mem.h"
#include	"dat.h"
#include	"fns.h"
#include	"io.h"
#include	"ureg.h"
#include	"../port/error.h"

void
putmmu(ulong va, ulong pa, Page*)
{
	USED(va, pa);
}

void
mmurelease(Proc* proc)
{
	USED(proc);
}

void
mmuswitch(Proc* proc)
{
	USED(proc);
}
.
## diffname bitsy/mmu.c 2000/0905
## diff -e /n/emeliedump/2000/0902/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/0905/sys/src/9/bitsy/mmu.c
10a
mmuinit(void)
{
}

void
.
## diffname bitsy/mmu.c 2000/0906
## diff -e /n/emeliedump/2000/0905/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/0906/sys/src/9/bitsy/mmu.c
12a
	ulong a, e;

	/* set up the domain register to cause all domains to obey pte access bits */
	putdac(0x55555555);

	/* get a prototype level 1 page */
	l1page = xspanalloc(BY2PG, 16*1024, 0);
	memset(l1page, 0, BY2PG);

	/* map DRAM */
	e = PHYSDRAM0 + BY2PG*con
	for(
	/* map zeros */
	/* map flash */
.
9a
/* real protection bits */
enum
{
	Small_Page=	(2<<0),
	Large_Page=	(1<<0),
	Cached=		(1<<3),
	Buffered=	(1<<2),
	UserRO=		(0xAA<<4),
	UserRW=		(0xFF<<4),
	KernelRW=	(0x55<<4),
};


/*
 *  table to map fault.c bits to physical bits
 */
static ulong phystrans[8] =
{
	[PTEVALID]			Small_Page|Cached|Buffered|UserRO,
	[PTEVALID|PTEWRITE]		Small_Page|Cached|Buffered|UserRW,
	[PTEVALID|UNCACHED]		Small_Page|UserRO,
	[PTEVALID|UNCACHED|PTEWRITE]	Small_Page|UserRW,
};

ulong *l1page;

/*
 *  We map all of memory, flash, and the zeros area with sections.
 *  Special use space is mapped on the fly with regmap.
 */
.
## diffname bitsy/mmu.c 2000/0907
## diff -e /n/emeliedump/2000/0906/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/0907/sys/src/9/bitsy/mmu.c
52,56c
	/* direct map DRAM */
	e = conf.base1 + BY2PG*conf.npage2;
	for(a = PHYSDRAM0; a < e; a += OneMeg)
		l1table[a>>20] = L1Section | L1KernelRW |
				L1Cached | L1Buffered | (a&L1SectBaseMask);

	/* direct map zeros area */
	for(a = PHYSNULL0; a < PHYSNULL0 + 128 * OneMeg; a += OneMeg)
		l1table[a>>20] = L1Section | L1KernelRW |
				L1Cached | L1Buffered | (a&L1SectBaseMask);

	/* direct map flash */
	for(a = PHYFLASH0; a < PHYFLASH0 + 128 * OneMeg; a += OneMeg)
		l1table[a>>20] = L1Section | L1KernelRW |
				L1Cached | L1Buffered | (a&L1SectBaseMask);

	/* map the uart so that we can continue using iprint */
	uart3regs = mapspecial(UART3REGS, 64);
}

/*
 *  map special use space 
 */
ulong*
mapspecial(ulong addr, int len)
{
	ulong *t;
	ulong a, i;

	/* first see if we've mapped it somewhere, the first hole means we're done */
	for(a = REGZERO; a < REGTOP; a += OneMeg){
		if((l1table[a>>20] & L1TypeMask) != L1PageTable){
			/* create a page table and break */
			t = xspanalloc(BY2PG, 1024, 0);
			memzero(t, BY2PG, 0);
			l1table[a>>20] = L1PageTable | L1Domain0 | (((ulong)t) & L1PTBaseMask);
			break;
		}
		t = (ulong*)(l1table[a>>20] & L1PTBaseMask);
		for(i = 0; i < OneMeg; i += BY2PG){
			if((t[a>>20] & L2TypeMask) != L2SmallPage)
				break;
		}
		if(i < OneMeg){
			a += i;
			break;
		}
	}
.
49,50c
	l1table = xspanalloc(BY2PG, 16*1024, 0);
	memset(l1table, 0, BY2PG);
.
34c
ulong *l1table;
.
28,31c
	[PTEVALID]				L2SmallPage|L2Cached|L2Buffered|L2UserRO,
	[PTEVALID|PTEWRITE]			L2SmallPage|L2Cached|L2Buffered|L2UserRW,
	[PTEVALID|PTEUNCACHED]			L2SmallPage|L2UserRO,
	[PTEVALID|PTEUNCACHED|PTEWRITE]		L2SmallPage|L2UserRW,

	[PTEKERNEL|PTEVALID]			L2SmallPage|L2Cached|L2Buffered|L2KernelRW,
	[PTEKERNEL|PTEVALID|PTEWRITE]		L2SmallPage|L2Cached|L2Buffered|L2KernelRW,
	[PTEKERNEL|PTEVALID|PTEUNCACHED]		L2SmallPage|L2KernelRW,
	[PTEKERNEL|PTEVALID|PTEUNCACHED|PTEWRITE]	L2SmallPage|L2KernelRW,
.
26c
static ulong phystrans[16] =
.
22d
13,19c
	/* level 1 descriptor bits */
	L1TypeMask=	(3<<0),
	L1Invalid=	(0<<0),
	L1PageTable=	(1<<0),
	L1Section=	(2<<0),
	L1Cached=	(1<<3),
	L1Buffered=	(1<<2),
	L1Domain0=	(0<<5),
	L1KernelRW=	(0x1<<10),
	L1UserRO=	(0x2<<10),
	L1UserRW=	(0x3<<10),
	L1SectBaseMask=	(0xFFF<<20),
	L1PTBaseMask=	(0x3FFFFF<<10),
	
	/* level 2 descriptor bits */
	L2TypeMask=	(3<<0),
	L2SmallPage=	(2<<0),
	L2LargePage=	(1<<0),
	L2Cached=	(1<<3),
	L2Buffered=	(1<<2),
	L2KernelRW=	(0x55<<4),
	L2UserRO=	(0xAA<<4),
	L2UserRW=	(0xFF<<4),
	L2PageBaseMask=	(0xFFFFF<<12),
.
## diffname bitsy/mmu.c 2000/0909
## diff -e /n/emeliedump/2000/0907/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/0909/sys/src/9/bitsy/mmu.c
120a

	/* we get here if no entry was found mapping this physical address */
	
.
117c
			virtaddr += i;
.
113c
			if((t[virtaddr>>20] & L2TypeMask) != L2SmallPage)
.
111c
		t = (ulong*)(l1table[virtaddr>>20] & L1PTBaseMask);
.
108c
			l1table[virtaddr>>20] = L1PageTable | L1Domain0 | (((ulong)t) & L1PTBaseMask);
.
103,104c
	for(virtaddr = REGZERO; virtaddr < REGTOP; virtaddr += OneMeg){
		if((l1table[virtaddr>>20] & L1TypeMask) != L1PageTable){
.
100c
	ulong virtaddr, i;
.
97c
mapspecial(ulong physaddr, int len)
.
## diffname bitsy/mmu.c 2000/0920
## diff -e /n/emeliedump/2000/0909/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/0920/sys/src/9/bitsy/mmu.c
122,123c
	/* we get here if no entry was found mapping this physical range */
.
114a
			
.
113c
			if((t[(virtaddr+i)>>20] & L2TypeMask) != L2SmallPage)
.
94c
 *  map special use space
.
## diffname bitsy/mmu.c 2000/0921
## diff -e /n/emeliedump/2000/0920/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/0921/sys/src/9/bitsy/mmu.c
115c

			if(candidate == 0){
				/* look for start of range */
				if((entry & L2PageBaseMask) != base)
					continue;
				candidate = virtaddr+i;
			} else {
				/* look for contiunued range */
				if((entry & L2PageBaseMask) != base + off)
					candidate = 0;
					continue;
				}
			}

			/* if we're at the end of the range, area is already mapped */
			if((entry & L2PageBaseMask) == end)
				return candidate + (physaddr-base);
.
113c
			entry = t[(virtaddr+i)>>20];

			/* first hole means nothing left, add map */
			if((entry & L2TypeMask) != L2SmallPage)
.
108c
			l1table[virtaddr>>20] = L1PageTable | L1Domain0 |
						(((ulong)t) & L1PTBaseMask);
.
101a
	base = physaddr & ~(BY2PG-1);
	end = (physaddr+len-1) & ~(BY2PG-1);
	if(len > 128*1024)
		usemeg = 1;
	off = 0;
	candidate = 0;

.
100c
	ulong virtaddr, i, base, end, off, entry, candidate;
.
94c
 *  map special space, assume that the space isn't already mapped
.
## diffname bitsy/mmu.c 2000/0923
## diff -e /n/emeliedump/2000/0921/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/0923/sys/src/9/bitsy/mmu.c
134c
				if((entry & L2PageBaseMask) != base + off){
.
96a
mapspecmeg(ulong physaddr, int len)
ulong*
.
94c
 *  map special space uncached, assume that the space isn't already mapped
.
## diffname bitsy/mmu.c 2000/0924
## diff -e /n/emeliedump/2000/0923/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/0924/sys/src/9/bitsy/mmu.c
152c
	/* didn't fit */
	if(base <= end)
		return nil;

	return rv;
.
146,149d
141,144d
125,139c
			/* found unused entry on level 2 table */
			if((entry & L2TypeMask) != L2SmallPage){
				if(rv == nil)
					rv = (ulong*)(va+i*BY2PG+off);
				t[i>>PGSHIFT] = L2SmallPage | L2KernelRW | 
						(base & L2PageBaseMask);
				base += BY2PG;
				continue;
.
123c
			entry = t[i>>PGSHIFT];
.
121c

		t = (ulong*)(l1table[va>>20] & L1PTBaseMask);
.
119d
117c
			l1table[va>>20] = L1PageTable | L1Domain0 |
.
111,114c
	for(va = REGZERO; va < REGTOP && base >= end; va += OneMeg){
		if((l1table[va>>20] & L1TypeMask) != L1PageTable){

			/* found unused entry on level 1 table */
			if(livelarge){
				if(rv == nil)
					rv = (ulong*)(va+i*BY2PG+off);
				l1table[va>>20] = L1Section | L1KernelRW |
							(base&L1SectBaseMask);
				base += OneMeg;
				continue;
			}

			/* create a page table and keep going */
.
104,109c
	rv = nil;
	livelarge = len >= 128*1024;
	if(livelarge){
		base = pa & ~(OneMeg-1);
		end = (pa+len-1) & ~(OneMeg-1);
	} else {
		base = pa & ~(BY2PG-1);
		end = (pa+len-1) & ~(BY2PG-1);
	}
	off = pa - base;
.
102c
	ulong va, i, base, end, off;
	int livelarge;
	ulong* rv;
.
97,99c
mapspecial(ulong pa, int len)
.
## diffname bitsy/mmu.c 2000/0928
## diff -e /n/emeliedump/2000/0924/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/0928/sys/src/9/bitsy/mmu.c
130c
			memset(t, 0, BY2PG);
.
100c
	ulong va, i, base, end, off, entry;
.
90c
//	uart3regs = mapspecial(UART3REGS, 64);

	/* set up the domain register to cause all domains to obey pte access bits */
	iprint("setting up domain access\n");
	putdac(0x55555555);

	/* point to map */
	iprint("setting tlb map %lux\n", (ulong)l1table);
	putttb((ulong)l1table);
.
85c
	for(a = PHYSFLASH0; a < PHYSFLASH0 + 128 * OneMeg; a += OneMeg)
.
78a
	/* direct map devs */
	for(a = REGZERO; a < REGTOP; a += OneMeg)
		l1table[a>>20] = L1Section | L1KernelRW | (a&L1SectBaseMask);

.
74c
	e = conf.base1 + BY2PG*conf.npage1;
.
66,68d
## diffname bitsy/mmu.c 2000/0929
## diff -e /n/emeliedump/2000/0928/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/0929/sys/src/9/bitsy/mmu.c
151c
					rv = (ulong*)(va+i+off);
.
143a
		/* here if we're using page maps instead of sections */
.
137,141c
				/* create an L2 page table and keep going */
				t = xspanalloc(BY2PG, 1024, 0);
				memset(t, 0, BY2PG);
				l1table[va>>20] = L1PageTable | L1Domain0 |
							(((ulong)t) & L1PTBaseMask);
			}
			break;
		case L1Section:
			continue;
		case L1PageTable:
			if(livelarge)
				continue;
			break;
.
135c
			} else {
.
132c
							(base & L1SectBaseMask);
.
130c
					rv = (ulong*)(va+off);
.
124,126c
	for(va = REGZERO; va < REGTOP && base <= end; va += OneMeg){
		switch(l1table[va>>20] & L1TypeMask){
		default:
.
99a

	/* map the uart so that we can continue using iprint */
	uart3regs = (Uartregs*)mapspecial(UART3REGS, 64);

	/* enable mmu, and make 0xFFFF0000 the virtual address of the exception vecs */
	mmuenable();

	iprint("uart3regs now at %lux\n", uart3regs);
.
95c
	putdac(0xFFFFFFFF);
.
90,91c
	/* map first page of DRAM also into 0xFFFF0000 for the interrupt vectors */
	t = xspanalloc(BY2PG, 16*1024, 0);
	memset(t, 0, BY2PG);
	l1table[0xFFFF0000>>20] = L1PageTable | L1Domain0 | (((ulong)t) & L1PTBaseMask);
	t[0xF0000>>PGSHIFT] = L2SmallPage | L2KernelRW | PHYSDRAM0;
.
87,88c
		l1table[a>>20] = L1Section | L1KernelRW | (a&L1SectBaseMask) |
				L1Cached | L1Buffered;
.
82,83c
		l1table[a>>20] = L1Section | L1KernelRW | (a&L1SectBaseMask);
.
76,79d
73,74c
		l1table[a>>20] = L1Section | L1KernelRW | (a&L1SectBaseMask) |
				L1Cached | L1Buffered;
.
64a
	ulong *t;
.
## diffname bitsy/mmu.c 2000/1002
## diff -e /n/emeliedump/2000/0929/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/1002/sys/src/9/bitsy/mmu.c
112c
void*
.
101c
	uart3regs = mapspecial(UART3REGS, 64);
.
## diffname bitsy/mmu.c 2000/1006
## diff -e /n/emeliedump/2000/1002/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/1006/sys/src/9/bitsy/mmu.c
105,106d
100,102d
## diffname bitsy/mmu.c 2000/1007
## diff -e /n/emeliedump/2000/1006/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/1007/sys/src/9/bitsy/mmu.c
100c
	/* enable mmu */
	wbflush();
	flushcache();
	flushmmu();
.
89,90c
	l1table[a>>20] = L1PageTable | L1Domain0 | (((ulong)t) & L1PTBaseMask);
	t[(a&0xfffff)>>PGSHIFT] = L2SmallPage | L2KernelRW | (PHYSDRAM0 & L2PageBaseMask);
.
86,87c
	/*
	 *  double map start of ram to exception vectors
	 */
	a = EVECTORS;
	t = xspanalloc(BY2PG, 1024, 0);
.
68,69c
	l1table = xspanalloc(16*1024, 16*1024, 0);
	memset(l1table, 0, 16*1024);
.
56a

.
## diffname bitsy/mmu.c 2000/1011
## diff -e /n/emeliedump/2000/1007/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/1011/sys/src/9/bitsy/mmu.c
200c
	/* set pid */
	if(p->pid <= 0)
		p->pid = newtlbpid(p);
	putpid(p->pid<<25);

	/* set domain register to this + the kernel's domains */
	putdac((Dclient<<(2*p->pid)) | Dclient);
.
198c
mmuswitch(Proc* p)
.
194c
	Page *pg;
	int i;

	for(i = 0; i < nelem(p->l1); i++){
		pg = p->l1[i];
		if(pg == nil)
			continue;
		if(--pg->ref)
			panic("mmurelease: pg->ref %d\n", pg->ref);
		pagechainhead(pg);
		p->l1[i] = nil;
	}
.
192c
mmurelease(Proc* p)
.
190a
/*
 *  this is called with palloc locked so the pagechainhead is kosher
 */
.
188c
	ulong pva;
	Page *p;
	ulong *t;

	/* if user memory, offset by pid value */
	if((va & 0xfe000000) == 0)
		pva = va | (up->pid << 25);
	else
		pva = va;

	/* always point L1 entry to L2 page, can't hurt */
	p = up->l1[va>>20];
	if(p == nil){
		p = auxpage();
		if(p == nil)
			pexit("out of memory", 1);
		p->va = VA(kmap(p));
		up->l1[va>>20] = p;
	}
	l1table[pva>>20] = L1PageTable | L1Domain0 | (p->pa & L1PTBaseMask);
	t = (ulong*)p->va;

	/* set L2 entry */
	t[(pva & (OneMeg-1))>>PGSHIFT] = mmubits[pa & (PTEKERNEL|PTEVALID|PTEUNCACHED|PTEWRITE)]
		| (pa & ~(PTEKERNEL|PTEVALID|PTEUNCACHED|PTEWRITE));

	wbflush();
.
184a
/*
 *  find a new pid.  If none exist, flush all pids, mmu, and caches.
 */
static Lock pidlock;

int
newtlbpid(Proc *p)
{
	return p->pid;
}

/*
 *  table to map fault.c bits to physical bits
 */
static ulong mmubits[16] =
{
	[PTEVALID]				L2SmallPage|L2Cached|L2Buffered|L2UserRO,
	[PTEVALID|PTEWRITE]			L2SmallPage|L2Cached|L2Buffered|L2UserRW,
	[PTEVALID|PTEUNCACHED]			L2SmallPage|L2UserRO,
	[PTEVALID|PTEUNCACHED|PTEWRITE]		L2SmallPage|L2UserRW,

	[PTEKERNEL|PTEVALID]			L2SmallPage|L2Cached|L2Buffered|L2KernelRW,
	[PTEKERNEL|PTEVALID|PTEWRITE]		L2SmallPage|L2Cached|L2Buffered|L2KernelRW,
	[PTEKERNEL|PTEVALID|PTEUNCACHED]		L2SmallPage|L2KernelRW,
	[PTEKERNEL|PTEVALID|PTEUNCACHED|PTEWRITE]	L2SmallPage|L2KernelRW,
};

/*
 *  add an entry to the current map
 */
.
153a
			/* if it's already mapped in a one meg area, don't remap */
			entry = l1table[va>>20];
			i = entry & L1SectBaseMask;
			if(pa >= i && (pa+len) <= i + OneMeg)
			if((entry & ~L1SectBaseMask) == (L1Section | L1KernelRW | L1Domain0))
				return (void*)(va + (pa & (OneMeg-1)));
				
.
140c
				l1table[va>>20] = L1Section | L1KernelRW | L1Domain0 |
.
98c
	putdac(Dclient);
.
86a
	/* map peripheral control module regs */
	mapspecial(0x80000000, OneMeg);

	/* map system control module regs */
	mapspecial(0x90000000, OneMeg);

.
82,85c
	/* map flash */
	for(o = 0; o < 128 * OneMeg; o += OneMeg)
		l1table[(FLASHZERO+o)>>20] = L1Section | L1KernelRW | L1Domain0
			| L1Cached | L1Buffered
			| ((PHYSFLASH0+o)&L1SectBaseMask);
.
78,80c
	/* map zeros area */
	for(o = 0; o < 128 * OneMeg; o += OneMeg)
		l1table[(NULLZERO+o)>>20] = L1Section | L1KernelRW | L1Domain0
			| ((PHYSNULL0+o)&L1SectBaseMask);
.
72,76c
	/* map DRAM */
	for(o = 0; o < DRAMTOP; o += OneMeg)
		l1table[(DRAMZERO+o)>>20] = L1Section | L1KernelRW| L1Domain0 
			| L1Cached | L1Buffered
			| ((PHYSDRAM0+o)&L1SectBaseMask);
.
65c
	ulong a, o;
.
39,52c
	/* domain values */
	Dnoaccess=	0,
	Dclient=	1,
	Dmanager=	3,
.
37d
20c
	L1DomShift=	5,
	L1Domain0=	(0<<L1DomShift),
.
9a
/*
 *  to avoid mmu and cash flushing, we use the pid register in the MMU
 *  to map all user addresses.  Although there are 64 possible pids, we
 *  can only use 31 because there are only 32 protection domains and we
 *  need one for the kernel.  Pid i is thus associated with domain i.
 *  Domain 0 is used for the kernel.
 */

.
## diffname bitsy/mmu.c 2000/1012
## diff -e /n/emeliedump/2000/1011/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/1012/sys/src/9/bitsy/mmu.c
289c
	putdac((Dclient<<(2*p->tlbpid)) | Dclient);
.
284,286c
	if(p->tlbpid <= 0)
		p->tlbpid = newtlbpid(p);
iprint("using tlbpid %d\n", p->tlbpid);
	putpid(p->tlbpid<<25);
.
237c
		pva = va | (up->tlbpid << 25);
.
235c
iprint("putmmu(0x%.8lux, 0x%.8lux)\n", va, pa);
	/* if user memory, add pid value */
.
206c
	int i;

	ilock(&pidlock);
	i = ++(m->lastpid);
	if(i >= nelem(m->pid2proc)){
		flushpids();
		i = m->lastpid = 0;
	}
	m->pid2proc[i] = p;
	p->tlbpid = i+1;
	iunlock(&pidlock);
	return p->tlbpid;
.
202a
void
flushpids(void)
{
	memset(l1table, 0, BY2WD*nelem(m->pid2proc)*32);
	memset(m->pid2proc, 0, sizeof(m->pid2proc));
	flushcache();
	flushmmu();
}

.
199c
 *  maintain pids
.
107d
103d
## diffname bitsy/mmu.c 2000/1013
## diff -e /n/emeliedump/2000/1012/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/1013/sys/src/9/bitsy/mmu.c
302,309c
//	flushcache();	/* drain and flush the cache */
//	flushmmu();
//	memmove(l1table, p->l1table, sizeof(p->l1table));
.
295c
		p->l1page[i] = nil;
.
288,289c
	for(i = 0; i < nelem(p->l1page); i++){
		pg = p->l1page[i];
.
276c
	flushmmu();
.
274a
iprint("%lux[%lux] = %lux\n", (ulong)t, (va & (OneMeg-1))>>PGSHIFT, t[(va & (OneMeg-1))>>PGSHIFT]);
.
273c
	t[(va & (OneMeg-1))>>PGSHIFT] = mmubits[pa & (PTEKERNEL|PTEVALID|PTEUNCACHED|PTEWRITE)]
.
269c
	l1table[va>>20] = L1PageTable | L1Domain0 | (p->pa & L1PTBaseMask);
iprint("%lux[%lux] = %lux\n", l1table, va>>20, l1table[va>>20]);
	up->l1table[va>>20] = l1table[va>>20];
.
267c
		up->l1page[va>>20] = p;
.
261c
	p = up->l1page[va>>20];
.
254,259d
249d
197,227d
71c
	for(o = 0; o < 128*OneMeg; o += OneMeg)
.
69a
	/* map low mem */
	for(o = 0; o < 1*OneMeg; o += OneMeg)
		l1table[(0+o)>>20] = L1Section | L1KernelRW| L1Domain0 
			| L1Cached | L1Buffered
			| ((0+o)&L1SectBaseMask);

.
## diffname bitsy/mmu.c 2000/1014
## diff -e /n/emeliedump/2000/1013/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/1014/sys/src/9/bitsy/mmu.c
273,275c
iprint("switching to proc %d\n", p->pid);
	memmove(l1table, p->l1table, sizeof(p->l1table));
	cleanaddr((ulong)l1table);
	wbflush();
}

void
peekmmu(ulong va)
{
	ulong e;

	e = l1table[va>>20];
	switch(e & L1TypeMask){
	default:
		iprint("l1: %lux invalid\n", e);
		break;
	case L1PageTable:
		iprint("l1: %lux pt\n", e);
		va &= OneMeg-1;
		va >>= PGSHIFT;
		e &= L1PTBaseMask;
		e = ((ulong*)e)[va];
		iprint("l2: %lux\n", e);
		break;
	case L1Section:
		iprint("l1: %lux section\n", e);
		break;
	}
.
247c
	wbflush();
.
245a
	cleanaddr((ulong)&t[(va & (OneMeg-1))>>PGSHIFT]);
.
237a
	cleanaddr((ulong)&l1table[va>>20]);
.
235a
		memset((uchar*)(p->va), 0, BY2PG);
.
## diffname bitsy/mmu.c 2000/1015
## diff -e /n/emeliedump/2000/1014/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/1015/sys/src/9/bitsy/mmu.c
279a

	/* lose any possible stale tlb entries */
	mmuinvalidate();
}

void
flushmmu(void)
{
	int s;

	s = splhi();
	up->newtlb = 1;
	mmuswitch(up);
	splx(s);
.
278c

	/* make sure map is in memory and drain write buffer */
	cacheflushaddr((ulong)l1table);
.
276c
	if(p->newtlb){
		mmuptefree(p);
		p->newtlb = 0;
	}

	/* write back dirty cache entries before changing map */
	cacheflush();

	/* move in new map */
.
270a
	if(p->mmufree && palloc.r.p)
		wakeup(&palloc.r);
	p->mmufree = nil;

	memset(l1table, 0, sizeof(p->l1table));
.
269d
265a
		p->l1page[i] = nil;
		pg->next = p->mmufree;
		p->mmufree = pg;
	}
	memset(p->l1table, 0, sizeof(p->l1table));
}

/*
 *  this is called with palloc locked so the pagechainhead is kosher
 */
void
mmurelease(Proc* p)
{
	Page *pg, *next;

	/* write back dirty cache entries before changing map */
	cacheflush();

	mmuptefree(p);

	for(pg = p->mmufree; pg; pg = next){
		next = pg->next;
.
262c
	for(i = 0; i < Nmeg; i++){
.
257c
mmuptefree(Proc *p)
.
254c
 *  free up all page tables for this proc
.
247,248c
//iprint("%lux[%lux] = %lux\n", (ulong)t, (va & (OneMeg-1))>>PGSHIFT, t[(va & (OneMeg-1))>>PGSHIFT]);
	cacheflushaddr((ulong)&t[(va & (OneMeg-1))>>PGSHIFT]);
.
239,240c
	cacheflushaddr((ulong)&l1table[va>>20]);
//iprint("%lux[%lux] = %lux\n", l1table, va>>20, l1table[va>>20]);
.
227c
//iprint("putmmu(0x%.8lux, 0x%.8lux)\n", va, pa);
.
118a
	cacheflush();
.
116,117c
	mmuinvalidate();
.
84a
			| L1Cached | L1Buffered
.
## diffname bitsy/mmu.c 2000/1016
## diff -e /n/emeliedump/2000/1015/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/1016/sys/src/9/bitsy/mmu.c
302a
	if(m->mmupid == p->pid && p->newtlb == 0)
		return;
	m->mmupid = p->pid;

.
301c
mmuswitch(Proc *p)
.
248c
print("%lux[%lux] = %lux\n", (ulong)t, (va & (OneMeg-1))>>PGSHIFT, t[(va & (OneMeg-1))>>PGSHIFT]);
.
243c
	t = (ulong*)pg->va;
.
241c
print("%lux[%lux] = %lux\n", l1table, va>>20, l1table[va>>20]);
.
239c
	l1table[va>>20] = L1PageTable | L1Domain0 | (pg->pa & L1PTBaseMask);
.
230,237c
	pg = up->l1page[va>>20];
	if(pg == nil){
		pg = up->mmufree;
		if(pg != nil){
			up->mmufree = pg->next;
		} else {
			pg = auxpage();
			if(pg == nil)
				pexit("out of memory", 1);
		}
		pg->va = VA(kmap(pg));
		up->l1page[va>>20] = pg;
		memset((uchar*)(pg->va), 0, BY2PG);
.
228c
print("putmmu(0x%.8lux, 0x%.8lux)\n", va, pa);
.
225c
	Page *pg;
.
## diffname bitsy/mmu.c 2000/1018
## diff -e /n/emeliedump/2000/1016/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/1018/sys/src/9/bitsy/mmu.c
323,325c
	/* make sure map is in memory */
	cachewbregion(l1table, sizeof(p->l1table));
.
317,319d
311a
	/* write back dirty cache entries and invalidate all cache entries */
	cacheflush();

.
302a
	cachewbregion(l1table, sizeof(p->l1table));
.
256c
	/*  write back dirty entries - we need this because the pio() in
	 *  fault.c is writing via a different virt addr and won't clean
	 *  its changes out of the dcache.  Page coloring doesn't work
	 *  on this mmu because the virtual cache is set associative
	 *  rather than direct mapped.
	 */
	cachewb();
	if(pg->cachectl[0] == PG_TXTFLUSH){
		/* pio() sets PG_TXTFLUSH whenever a text page has been written */
		icacheinvalidate();
		pg->cachectl[0] = PG_NOFLUSH;
	}

	splx(s);
.
253,254d
251c
	l2p = &t[(va & (OneMeg-1))>>PGSHIFT];
	*l2p = mmubits[pa & (PTEKERNEL|PTEVALID|PTEUNCACHED|PTEWRITE)]
.
249a
	/* always point L1 entry to L2 page, can't hurt */
	l1p = &l1table[va>>20];
	*l1p = L1PageTable | L1Domain0 | (l2pg->pa & L1PTBaseMask);
	up->l1table[va>>20] = *l1p;
	t = (ulong*)l2pg->va;

.
244,248d
240,242c
		l2pg->va = VA(kmap(l2pg));
		up->l1page[va>>20] = l2pg;
		memset((uchar*)(l2pg->va), 0, BY2PG);
.
236,237c
			l2pg = auxpage();
			if(l2pg == nil)
.
228,234c
	s = splhi();

	/* clear out the current entry */
	mmuinvalidateaddr(va);

	l2pg = up->l1page[va>>20];
	if(l2pg == nil){
		l2pg = up->mmufree;
		if(l2pg != nil){
			up->mmufree = l2pg->next;
.
225,226c
	Page *l2pg;
	ulong *t, *l1p, *l2p;
	int s;
.
223c
putmmu(ulong va, ulong pa, Page *pg)
.
## diffname bitsy/mmu.c 2000/1019
## diff -e /n/emeliedump/2000/1018/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/1019/sys/src/9/bitsy/mmu.c
378c
		iprint("l1: %lux[%lux] = %lux section\n", l1table, va>>20, e);
.
374,375c
		d = ((ulong*)e)[va];
		iprint("l2: %lux[%lux] = %lux\n", e, va, d);
.
370c
		iprint("l1: %lux[%lux] = %lux pt\n", l1table, va>>20, e);
.
367c
		iprint("l1: %lux[%lux] = %lux invalid\n", l1table, va>>20, e);
.
362c
	ulong e, d;
.
198a
	cacheflush();
.
181c
		for(i = 0; i < OneMeg && base <= end; i += BY2PG){
.
174c
			if(large)
.
148c
			if(large){
.
134,135c
	large = len >= 128*1024;
	if(large){
.
130c
	int large;
.
## diffname bitsy/mmu.c 2000/1106
## diff -e /n/emeliedump/2000/1019/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/1106/sys/src/9/bitsy/mmu.c
343c
	cachewbregion((ulong)l1table, sizeof(p->l1table));
.
321c
	cachewbregion((ulong)l1table, sizeof(p->l1table));
.
## diffname bitsy/mmu.c 2000/1118
## diff -e /n/emeliedump/2000/1106/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/1118/sys/src/9/bitsy/mmu.c
201a
}

/* map in i/o registers */
void*
mapspecial(ulong pa, int len)
{
	return _map(pa, len, REGZERO, REGTOP, L1KernelRW, L2KernelRW);
}

/* map add on memory */
void*
mapmem(ulong pa, int len)
{
	return _map(pa, len, EMEMZERO, EMEMTOP, L1KernelRW|L1Cached|L1Buffered,
			L2KernelRW|L2Cached|L2Buffered);
}

/* map a virtual address to a physical one */
ulong
mmu_paddr(ulong va)
{
	ulong entry;
	ulong *t;

	entry = l1table[va>>20];
	switch(entry & L1TypeMask){
	case L1Section:
		return (entry & L1SectBaseMask) | (va & (OneMeg-1));
	case L1PageTable:
		t = (ulong*)(entry & L1PTBaseMask);
		va &= OneMeg-1;
		entry = t[va>>PGSHIFT];
		switch(entry & L1TypeMask){
		case L2SmallPage:
			return (entry & L2PageBaseMask) | (va & (BY2PG-1));
		}
	}
	return 0;
}

/* map a physical address to a virtual one */
static ulong
findva(ulong pa, ulong zero, ulong top)
{
	int i;
	ulong entry, va;
	ulong start, end;
	ulong *t;

	for(va = zero; va < top; va += OneMeg){
		/* search the L1 entry */
		entry = l1table[va>>20];
		switch(entry & L1TypeMask){
		default:
			return 0;	/* no holes */
		case L1Section:
			start = entry & L1SectBaseMask;
			end = start + OneMeg;
			if(pa >= start && pa < end)
				return va | (pa & (OneMeg-1));
			continue;
		case L1PageTable:
			break;
		}

		/* search the L2 entry */
		t = (ulong*)(l1table[va>>20] & L1PTBaseMask);
		for(i = 0; i < OneMeg; i += BY2PG){
			entry = t[i>>PGSHIFT];

			/* found unused entry on level 2 table */
			if((entry & L2TypeMask) != L2SmallPage)
				break;

			start = entry & L2PageBaseMask;
			end = start + BY2PG;
			if(pa >= start && pa < end)
				return va | (BY2PG*i) | (pa & (BY2PG-1));
		}
	}
	return 0;
}
ulong
mmu_kaddr(ulong pa)
{
	ulong va;

	/* try the easy stuff first (the first case is true most of the time) */
	if(pa >= PHYSDRAM0 && pa <= PHYSDRAM0+(DRAMTOP-DRAMZERO))
		return DRAMZERO+(pa-PHYSDRAM0);
	if(pa >= PHYSFLASH0 && pa <= PHYSFLASH0+(FLASHTOP-FLASHZERO))
		return FLASHZERO+(pa-PHYSFLASH0);
	if(pa >= PHYSNULL0 && pa <= PHYSNULL0+(NULLTOP-NULLZERO))
		return NULLZERO+(pa-PHYSNULL0);

	if(!mmuinited)
		return 0;	/* this shouldn't happen */

	/* walk the map for the special regs and extended memory */
	va = findva(pa, EMEMZERO, EMEMTOP);
	if(va != 0)
		return va;
	return findva(pa, REGZERO, REGTOP);
.
188c
				t[i>>PGSHIFT] = L2SmallPage | l2prop | 
.
169c
			if((entry & ~L1SectBaseMask) == (L1Section | l1prop | L1Domain0))
.
151c
				l1table[va>>20] = L1Section | l1prop | L1Domain0 |
.
144c
	for(va = zero; va < top && base <= end; va += OneMeg){
.
125,126c
static void*
_map(ulong pa, int len, ulong zero, ulong top, ulong l1prop, ulong l2prop)
.
123c
 *  map on request
.
119a

	mmuinited = 1;
.
91d
89c
	for(o = 0; o < FLASHTOP-FLASHZERO; o += OneMeg)
.
83c
	for(o = 0; o < NULLTOP-NULLZERO; o += OneMeg)
.
77c
	for(o = 0; o < DRAMTOP-DRAMZERO; o += OneMeg)
.
70c
	/* map low mem (I really don't know why I have to do this -- presotto) */
.
55d
53a
static int mmuinited;
.
## diffname bitsy/mmu.c 2000/1121
## diff -e /n/emeliedump/2000/1118/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/1121/sys/src/9/bitsy/mmu.c
216,217c
	ulong l1, l2;

	if(cached){
		l1 = L1KernelRW|L1Cached|L1Buffered;
		l2 = L2KernelRW|L2Cached|L2Buffered;
	} else {
		l1 = L1KernelRW;
		l2 = L2KernelRW;
	}
	return _map(pa, len, EMEMZERO, EMEMTOP, l1, l2);
.
214c
mapmem(ulong pa, int len, int cached)
.
## diffname bitsy/mmu.c 2000/1130
## diff -e /n/emeliedump/2000/1121/sys/src/9/bitsy/mmu.c /n/emeliedump/2000/1130/sys/src/9/bitsy/mmu.c
81a
	/* uncached DRAM */
	for(o = 0; o < UCDRAMTOP-UCDRAMZERO; o += OneMeg)
		l1table[(UCDRAMZERO+o)>>20] = L1Section | L1KernelRW| L1Domain0 
			| ((PHYSDRAM0+o)&L1SectBaseMask);

.
## diffname bitsy/mmu.c 2001/0810
## diff -e /n/emeliedump/2000/1130/sys/src/9/bitsy/mmu.c /n/emeliedump/2001/0810/sys/src/9/bitsy/mmu.c
124,125d
112a
	mmurestart();

	mmuinited = 1;
}

void
mmurestart(void) {
.
## diffname bitsy/mmu.c 2001/0813
## diff -e /n/emeliedump/2001/0810/sys/src/9/bitsy/mmu.c /n/emeliedump/2001/0813/sys/src/9/bitsy/mmu.c
127a
	icacheinvalidate();	/* you never know ... (sape) */
.
## diffname bitsy/mmu.c 2001/0814
## diff -e /n/emeliedump/2001/0813/sys/src/9/bitsy/mmu.c /n/emeliedump/2001/0814/sys/src/9/bitsy/mmu.c
128d
120a

.
## diffname bitsy/mmu.c 2001/0815
## diff -e /n/emeliedump/2001/0814/sys/src/9/bitsy/mmu.c /n/emeliedump/2001/0815/sys/src/9/bitsy/mmu.c
263c
ulong
.

Bell Labs OSI certified Powered by Plan 9

(Return to Plan 9 Home Page)

Copyright © 2021 Plan 9 Foundation. All Rights Reserved.
Comments to webmaster@9p.io.