Add an MD page flag for tracking if a page is cacheable or not, so that

we don't flush all mappings of a physical page in order to make it
virtually cachable again, if it is already cachable.
This commit is contained in:
Jake Burkholder 2002-05-29 06:12:13 +00:00
parent 468303c500
commit 20bd6675fb
2 changed files with 7 additions and 1 deletions

View file

@ -52,6 +52,8 @@
#define PMAP_CONTEXT_MAX 8192
#define PG_UNCACHEABLE (1<<0)
#define pmap_resident_count(pm) (pm->pm_stats.resident_count)
typedef struct pmap *pmap_t;
@ -59,6 +61,7 @@ typedef struct pmap *pmap_t;
struct md_page {
STAILQ_HEAD(, tte) tte_list;
int colors[DCACHE_COLORS];
int flags;
};
struct pmap {

View file

@ -517,6 +517,7 @@ pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
m = &vm_page_array[i];
STAILQ_INIT(&m->md.tte_list);
m->md.flags = 0;
}
for (i = 0; i < translations_size; i++) {
@ -605,6 +606,7 @@ pmap_cache_enter(vm_page_t m, vm_offset_t va)
TTE_GET_VA(tp));
}
dcache_page_inval(VM_PAGE_TO_PHYS(m));
m->md.flags |= PG_UNCACHEABLE;
return (0);
}
@ -624,13 +626,14 @@ pmap_cache_remove(vm_page_t m, vm_offset_t va)
if (m->md.colors[i] != 0)
c++;
}
if (c != 1)
if (c != 1 || (m->md.flags & PG_UNCACHEABLE) == 0)
return;
STAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
tp->tte_data |= TD_CV;
tlb_page_demap(TLB_DTLB | TLB_ITLB, TTE_GET_PMAP(tp),
TTE_GET_VA(tp));
}
m->md.flags &= ~PG_UNCACHEABLE;
}
/*