mirror of
https://github.com/opnsense/src.git
synced 2026-03-01 21:01:24 -05:00
Improve hash coverage for kernel page table entries by modifying the kernel
ESID -> VSID map function. This makes ZFS run stably on PowerPC under heavy loads (repeated simultaneous SVN checkouts and updates).
This commit is contained in:
parent
19291ab3de
commit
3b4b38304e
3 changed files with 10 additions and 16 deletions
|
|
@ -838,7 +838,7 @@ moea64_bootstrap_slb_prefault(vm_offset_t va, int large)
|
|||
}
|
||||
|
||||
entry.slbe = slbe;
|
||||
entry.slbv = KERNEL_VSID(esid, large) << SLBV_VSID_SHIFT;
|
||||
entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
|
||||
if (large)
|
||||
entry.slbv |= SLBV_L;
|
||||
|
||||
|
|
|
|||
|
|
@ -104,17 +104,10 @@ uint64_t
|
|||
va_to_vsid(pmap_t pm, vm_offset_t va)
|
||||
{
|
||||
struct slb entry;
|
||||
int large;
|
||||
|
||||
/* Shortcut kernel case */
|
||||
if (pm == kernel_pmap) {
|
||||
large = 0;
|
||||
if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS &&
|
||||
mem_valid(va, 0) == 0)
|
||||
large = 1;
|
||||
|
||||
return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT, large));
|
||||
}
|
||||
if (pm == kernel_pmap)
|
||||
return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT));
|
||||
|
||||
/*
|
||||
* If there is no vsid for this VA, we need to add a new entry
|
||||
|
|
|
|||
|
|
@ -47,14 +47,15 @@
|
|||
#define SLBV_VSID_MASK 0xfffffffffffff000UL /* Virtual segment ID mask */
|
||||
#define SLBV_VSID_SHIFT 12
|
||||
|
||||
#define KERNEL_VSID_BIT 0x0000001000000000UL /* Bit set in all kernel VSIDs */
|
||||
|
||||
/*
|
||||
* Shift large-page VSIDs one place left. At present, they are only used in the
|
||||
* kernel direct map, and we already assume in the placement of KVA that the
|
||||
* CPU cannot address more than 63 bits of memory.
|
||||
* Make a predictable 1:1 map from ESIDs to VSIDs for the kernel. Hash table
|
||||
* coverage is increased by swizzling the ESID and multiplying by a prime
|
||||
* number (0x13bb).
|
||||
*/
|
||||
#define KERNEL_VSID(esid, large) (((uint64_t)(esid) << (large ? 1 : 0)) | KERNEL_VSID_BIT)
|
||||
#define KERNEL_VSID_BIT 0x0000001000000000UL /* Bit set in all kernel VSIDs */
|
||||
#define KERNEL_VSID(esid) ((((((uint64_t)esid << 8) | ((uint64_t)esid >> 28)) \
|
||||
* 0x13bbUL) & (KERNEL_VSID_BIT - 1)) | \
|
||||
KERNEL_VSID_BIT)
|
||||
|
||||
#define SLBE_VALID 0x0000000008000000UL /* SLB entry valid */
|
||||
#define SLBE_INDEX_MASK 0x0000000000000fffUL /* SLB index mask*/
|
||||
|
|
|
|||
Loading…
Reference in a new issue