Merge svn+ssh://svn.freebsd.org/base/head@205807

This commit is contained in:
Marcel Moolenaar 2010-03-28 22:30:14 +00:00
commit 8892b97b4b
149 changed files with 3112 additions and 2834 deletions

View file

@ -841,11 +841,7 @@ SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
* Reset registers to default values on exec.
*/
void
exec_setregs(td, entry, stack, ps_strings)
struct thread *td;
u_long entry;
u_long stack;
u_long ps_strings;
exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
{
struct trapframe *regs = td->td_frame;
struct pcb *pcb = td->td_pcb;
@ -863,7 +859,7 @@ exec_setregs(td, entry, stack, ps_strings)
pcb->pcb_full_iret = 1;
bzero((char *)regs, sizeof(struct trapframe));
regs->tf_rip = entry;
regs->tf_rip = imgp->entry_addr;
regs->tf_rsp = ((stack - 8) & ~0xFul) + 8;
regs->tf_rdi = stack; /* argv */
regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T);

View file

@ -880,9 +880,12 @@ pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde)
load_cr4(cr4 & ~CR4_PGE);
/*
* Although preemption at this point could be detrimental to
* performance, it would not lead to an error.
* performance, it would not lead to an error. PG_G is simply
* ignored if CR4.PGE is clear. Moreover, in case this block
* is re-entered, the load_cr4() either above or below will
* modify CR4.PGE flushing the TLB.
*/
load_cr4(cr4);
load_cr4(cr4 | CR4_PGE);
}
}
#ifdef SMP

View file

@ -701,11 +701,7 @@ freebsd32_sigreturn(td, uap)
* Clear registers on exec
*/
void
ia32_setregs(td, entry, stack, ps_strings)
struct thread *td;
u_long entry;
u_long stack;
u_long ps_strings;
ia32_setregs(struct thread *td, struct image_params *imgp, u_long stack)
{
struct trapframe *regs = td->td_frame;
struct pcb *pcb = td->td_pcb;
@ -721,12 +717,12 @@ ia32_setregs(td, entry, stack, ps_strings)
pcb->pcb_initial_fpucw = __INITIAL_FPUCW_I386__;
bzero((char *)regs, sizeof(struct trapframe));
regs->tf_rip = entry;
regs->tf_rip = imgp->entry_addr;
regs->tf_rsp = stack;
regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T);
regs->tf_ss = _udatasel;
regs->tf_cs = _ucode32sel;
regs->tf_rbx = ps_strings;
regs->tf_rbx = imgp->ps_strings;
regs->tf_ds = _udatasel;
regs->tf_es = _udatasel;
regs->tf_fs = _ufssel;

View file

@ -203,9 +203,9 @@ struct l_newstat {
l_ulong st_size;
l_ulong st_blksize;
l_ulong st_blocks;
struct l_timespec st_atimespec;
struct l_timespec st_mtimespec;
struct l_timespec st_ctimespec;
struct l_timespec st_atim;
struct l_timespec st_mtim;
struct l_timespec st_ctim;
l_ulong __unused4;
l_ulong __unused5;
} __packed;
@ -219,9 +219,9 @@ struct l_stat {
l_ushort st_gid;
l_ushort st_rdev;
l_long st_size;
struct l_timespec st_atimespec;
struct l_timespec st_mtimespec;
struct l_timespec st_ctimespec;
struct l_timespec st_atim;
struct l_timespec st_mtim;
struct l_timespec st_ctim;
l_long st_blksize;
l_long st_blocks;
l_ulong st_flags;
@ -242,9 +242,9 @@ struct l_stat64 {
l_ulong st_blksize;
l_ulong st_blocks;
l_ulong __pad4;
struct l_timespec st_atimespec;
struct l_timespec st_mtimespec;
struct l_timespec st_ctimespec;
struct l_timespec st_atim;
struct l_timespec st_mtim;
struct l_timespec st_ctim;
l_ulonglong st_ino;
} __packed;

View file

@ -124,8 +124,8 @@ static register_t *linux_copyout_strings(struct image_params *imgp);
static void linux_prepsyscall(struct trapframe *tf, int *args, u_int *code,
caddr_t *params);
static void linux_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask);
static void exec_linux_setregs(struct thread *td, u_long entry,
u_long stack, u_long ps_strings);
static void exec_linux_setregs(struct thread *td,
struct image_params *imgp, u_long stack);
static void linux32_fixlimit(struct rlimit *rl, int which);
static boolean_t linux32_trans_osrel(const Elf_Note *note, int32_t *osrel);
@ -828,11 +828,7 @@ exec_linux_imgact_try(struct image_params *imgp)
* XXX copied from ia32_signal.c.
*/
static void
exec_linux_setregs(td, entry, stack, ps_strings)
struct thread *td;
u_long entry;
u_long stack;
u_long ps_strings;
exec_linux_setregs(struct thread *td, struct image_params *imgp, u_long stack)
{
struct trapframe *regs = td->td_frame;
struct pcb *pcb = td->td_pcb;
@ -852,7 +848,7 @@ exec_linux_setregs(td, entry, stack, ps_strings)
pcb->pcb_initial_fpucw = __LINUX_NPXCW__;
bzero((char *)regs, sizeof(struct trapframe));
regs->tf_rip = entry;
regs->tf_rip = imgp->entry_addr;
regs->tf_rsp = stack;
regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T);
regs->tf_gs = _ugssel;
@ -862,7 +858,7 @@ exec_linux_setregs(td, entry, stack, ps_strings)
regs->tf_ss = _udatasel;
regs->tf_flags = TF_HASSEGS;
regs->tf_cs = _ucode32sel;
regs->tf_rbx = ps_strings;
regs->tf_rbx = imgp->ps_strings;
td->td_pcb->pcb_full_iret = 1;
load_cr0(rcr0() | CR0_MP | CR0_TS);
fpstate_drop(td);

View file

@ -516,15 +516,15 @@ spinlock_exit(void)
* Clear registers on exec
*/
void
exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
{
struct trapframe *tf = td->td_frame;
memset(tf, 0, sizeof(*tf));
tf->tf_usr_sp = stack;
tf->tf_usr_lr = entry;
tf->tf_usr_lr = imgp->entry_addr;
tf->tf_svc_lr = 0x77777777;
tf->tf_pc = entry;
tf->tf_pc = imgp->entry_addr;
tf->tf_spsr = PSR_USR32_MODE;
}

View file

@ -74,7 +74,7 @@ fled_attach(device_t dev)
sc->sc_led = led_create(fled_cb, dev, "front");
fled_cb(sc, 1); /* Turn on LED */
fled_cb(dev, 1); /* Turn on LED */
return 0;
}

View file

@ -283,7 +283,7 @@ main(void)
for (;;) {
if (!autoboot || !OPT_CHECK(RBX_QUIET))
printf("\nFreeBSD/i386 boot\n"
printf("\nFreeBSD/x86 boot\n"
"Default: %u:%s(%u,%c)%s\n"
"boot: ",
dsk.drive & DRV_MASK, dev_nm[dsk.type], dsk.unit,

View file

@ -281,7 +281,7 @@ main(void)
for (;;) {
if (!autoboot || !OPT_CHECK(RBX_QUIET))
printf("\nFreeBSD/i386 boot\n"
printf("\nFreeBSD/x86 boot\n"
"Default: %u:%s(%up%u)%s\n"
"boot: ",
dsk.drive & DRV_MASK, dev_nm[dsk.type], dsk.unit,

View file

@ -730,7 +730,7 @@ main(void)
for (;;) {
if (!autoboot || !OPT_CHECK(RBX_QUIET))
printf("\nFreeBSD/i386 boot\n"
printf("\nFreeBSD/x86 boot\n"
"Default: %s:%s\n"
"boot: ",
spa->spa_name, kname);

View file

@ -143,15 +143,15 @@ struct stat32 {
uid_t st_uid;
gid_t st_gid;
dev_t st_rdev;
struct timespec32 st_atimespec;
struct timespec32 st_mtimespec;
struct timespec32 st_ctimespec;
struct timespec32 st_atim;
struct timespec32 st_mtim;
struct timespec32 st_ctim;
off_t st_size;
int64_t st_blocks;
u_int32_t st_blksize;
u_int32_t st_flags;
u_int32_t st_gen;
struct timespec32 st_birthtimespec;
struct timespec32 st_birthtim;
unsigned int :(8 / 2) * (16 - (int)sizeof(struct timespec32));
unsigned int :(8 / 2) * (16 - (int)sizeof(struct timespec32));
};

View file

@ -1634,9 +1634,9 @@ copy_stat( struct stat *in, struct stat32 *out)
CP(*in, *out, st_uid);
CP(*in, *out, st_gid);
CP(*in, *out, st_rdev);
TS_CP(*in, *out, st_atimespec);
TS_CP(*in, *out, st_mtimespec);
TS_CP(*in, *out, st_ctimespec);
TS_CP(*in, *out, st_atim);
TS_CP(*in, *out, st_mtim);
TS_CP(*in, *out, st_ctim);
CP(*in, *out, st_size);
CP(*in, *out, st_blocks);
CP(*in, *out, st_blksize);

View file

@ -185,5 +185,5 @@ extern char freebsd4_ia32_sigcode[];
extern int sz_ia32_sigcode;
extern int sz_freebsd4_ia32_sigcode;
extern void ia32_sendsig(sig_t, struct ksiginfo *, sigset_t *);
extern void ia32_setregs(struct thread *td, u_long entry, u_long stack,
u_long ps_strings);
extern void ia32_setregs(struct thread *td, struct image_params *imgp,
u_long stack);

View file

@ -1227,6 +1227,24 @@ linprocfs_docmdline(PFS_FILL_ARGS)
return (0);
}
/*
* Filler function for proc/filesystems
*/
static int
linprocfs_dofilesystems(PFS_FILL_ARGS)
{
struct vfsconf *vfsp;
mtx_lock(&Giant);
TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
if (vfsp->vfc_flags & VFCF_SYNTHETIC)
sbuf_printf(sb, "nodev");
sbuf_printf(sb, "\t%s\n", vfsp->vfc_name);
}
mtx_unlock(&Giant);
return(0);
}
#if 0
/*
* Filler function for proc/modules
@ -1276,6 +1294,8 @@ linprocfs_init(PFS_INIT_ARGS)
NULL, NULL, NULL, PFS_RD);
pfs_create_file(root, "devices", &linprocfs_dodevices,
NULL, NULL, NULL, PFS_RD);
pfs_create_file(root, "filesystems", &linprocfs_dofilesystems,
NULL, NULL, NULL, PFS_RD);
pfs_create_file(root, "loadavg", &linprocfs_doloadavg,
NULL, NULL, NULL, PFS_RD);
pfs_create_file(root, "meminfo", &linprocfs_domeminfo,

View file

@ -2711,7 +2711,7 @@ linux_v4l_clip_copy(void *lvc, struct video_clip **ppvc)
/* XXX: If there can be no concurrency: s/M_NOWAIT/M_WAITOK/ */
if ((*ppvc = malloc(sizeof(**ppvc), M_LINUX, M_NOWAIT)) == NULL)
return (ENOMEM); /* XXX: linux has no ENOMEM here */
memcpy(&vclip, *ppvc, sizeof(vclip));
memcpy(*ppvc, &vclip, sizeof(vclip));
(*ppvc)->next = NULL;
return (0);
}
@ -2726,6 +2726,8 @@ linux_v4l_cliplist_free(struct video_window *vw)
ppvc_next = &((*ppvc)->next);
free(*ppvc, M_LINUX);
}
vw->clips = NULL;
return (0);
}

View file

@ -173,9 +173,12 @@ newstat_copyout(struct stat *buf, void *ubuf)
tbuf.st_gid = buf->st_gid;
tbuf.st_rdev = buf->st_rdev;
tbuf.st_size = buf->st_size;
tbuf.st_atime = buf->st_atime;
tbuf.st_mtime = buf->st_mtime;
tbuf.st_ctime = buf->st_ctime;
tbuf.st_atim.tv_sec = buf->st_atim.tv_sec;
tbuf.st_atim.tv_nsec = buf->st_atim.tv_nsec;
tbuf.st_mtim.tv_sec = buf->st_mtim.tv_sec;
tbuf.st_mtim.tv_nsec = buf->st_mtim.tv_nsec;
tbuf.st_ctim.tv_sec = buf->st_ctim.tv_sec;
tbuf.st_ctim.tv_nsec = buf->st_ctim.tv_nsec;
tbuf.st_blksize = buf->st_blksize;
tbuf.st_blocks = buf->st_blocks;
@ -260,9 +263,12 @@ stat_copyout(struct stat *buf, void *ubuf)
lbuf.st_size = buf->st_size;
else
lbuf.st_size = -2;
lbuf.st_atime = buf->st_atime;
lbuf.st_mtime = buf->st_mtime;
lbuf.st_ctime = buf->st_ctime;
lbuf.st_atim.tv_sec = buf->st_atim.tv_sec;
lbuf.st_atim.tv_nsec = buf->st_atim.tv_nsec;
lbuf.st_mtim.tv_sec = buf->st_mtim.tv_sec;
lbuf.st_mtim.tv_nsec = buf->st_mtim.tv_nsec;
lbuf.st_ctim.tv_sec = buf->st_ctim.tv_sec;
lbuf.st_ctim.tv_nsec = buf->st_ctim.tv_nsec;
lbuf.st_blksize = buf->st_blksize;
lbuf.st_blocks = buf->st_blocks;
lbuf.st_flags = buf->st_flags;
@ -498,9 +504,12 @@ stat64_copyout(struct stat *buf, void *ubuf)
lbuf.st_gid = buf->st_gid;
lbuf.st_rdev = buf->st_rdev;
lbuf.st_size = buf->st_size;
lbuf.st_atime = buf->st_atime;
lbuf.st_mtime = buf->st_mtime;
lbuf.st_ctime = buf->st_ctime;
lbuf.st_atim.tv_sec = buf->st_atim.tv_sec;
lbuf.st_atim.tv_nsec = buf->st_atim.tv_nsec;
lbuf.st_mtim.tv_sec = buf->st_mtim.tv_sec;
lbuf.st_mtim.tv_nsec = buf->st_mtim.tv_nsec;
lbuf.st_ctim.tv_sec = buf->st_ctim.tv_sec;
lbuf.st_ctim.tv_nsec = buf->st_ctim.tv_nsec;
lbuf.st_blksize = buf->st_blksize;
lbuf.st_blocks = buf->st_blocks;

View file

@ -106,9 +106,9 @@ bsd_to_svr4_stat(st, st4)
st4->st_gid = st->st_gid;
st4->st_rdev = bsd_to_svr4_odev_t(st->st_rdev);
st4->st_size = st->st_size;
st4->st_atim = st->st_atimespec.tv_sec;
st4->st_mtim = st->st_mtimespec.tv_sec;
st4->st_ctim = st->st_ctimespec.tv_sec;
st4->st_atim = st->st_atim.tv_sec;
st4->st_mtim = st->st_mtim.tv_sec;
st4->st_ctim = st->st_ctim.tv_sec;
}
#endif
@ -127,9 +127,9 @@ bsd_to_svr4_xstat(st, st4)
st4->st_gid = st->st_gid;
st4->st_rdev = bsd_to_svr4_dev_t(st->st_rdev);
st4->st_size = st->st_size;
st4->st_atim = st->st_atimespec;
st4->st_mtim = st->st_mtimespec;
st4->st_ctim = st->st_ctimespec;
st4->st_atim = st->st_atim;
st4->st_mtim = st->st_mtim;
st4->st_ctim = st->st_ctim;
st4->st_blksize = st->st_blksize;
st4->st_blocks = st->st_blocks;
strcpy(st4->st_fstype, "unknown");
@ -150,9 +150,9 @@ bsd_to_svr4_stat64(st, st4)
st4->st_gid = st->st_gid;
st4->st_rdev = bsd_to_svr4_dev_t(st->st_rdev);
st4->st_size = st->st_size;
st4->st_atim = st->st_atimespec;
st4->st_mtim = st->st_mtimespec;
st4->st_ctim = st->st_ctimespec;
st4->st_atim = st->st_atim;
st4->st_mtim = st->st_mtim;
st4->st_ctim = st->st_ctim;
st4->st_blksize = st->st_blksize;
st4->st_blocks = st->st_blocks;
strcpy(st4->st_fstype, "unknown");

View file

@ -56,8 +56,7 @@ __FBSDID("$FreeBSD$");
#define X86BIOS_IVT_SIZE 0x00000500 /* 1K + 256 (BDA) */
#define X86BIOS_SEG_SIZE 0x00010000 /* 64K */
#define X86BIOS_MEM_SIZE (0x00100000 + X86BIOS_SEG_SIZE)
/* 1M + 64K (high memory) */
#define X86BIOS_MEM_SIZE 0x00100000 /* 1M */
#define X86BIOS_IVT_BASE 0x00000000
#define X86BIOS_RAM_BASE 0x00001000
@ -69,7 +68,6 @@ __FBSDID("$FreeBSD$");
#define X86BIOS_R_DS _pad1
#define X86BIOS_R_SS _pad2
#define X86BIOS_R_SP _pad3.I16_reg.x_reg
static struct x86emu x86bios_emu;
@ -113,15 +111,16 @@ x86bios_set_fault(struct x86emu *emu, uint32_t addr)
static void *
x86bios_get_pages(uint32_t offset, size_t size)
{
int i;
vm_offset_t page;
if (offset + size > X86BIOS_MEM_SIZE)
if (offset + size > X86BIOS_MEM_SIZE + X86BIOS_IVT_SIZE)
return (NULL);
i = offset / X86BIOS_PAGE_SIZE;
if (x86bios_map[i] != 0)
return ((void *)(x86bios_map[i] + offset -
i * X86BIOS_PAGE_SIZE));
if (offset >= X86BIOS_MEM_SIZE)
offset -= X86BIOS_MEM_SIZE;
page = x86bios_map[offset / X86BIOS_PAGE_SIZE];
if (page != 0)
return ((void *)(page + offset % X86BIOS_PAGE_SIZE));
return (NULL);
}
@ -306,8 +305,8 @@ x86bios_emu_get_intr(struct x86emu *emu, int intno)
sp[2] = htole16(emu->x86.R_FLG);
iv = x86bios_get_intr(intno);
emu->x86.R_IP = iv & 0x000f;
emu->x86.R_CS = (iv >> 12) & 0xffff;
emu->x86.R_IP = iv & 0xffff;
emu->x86.R_CS = (iv >> 16) & 0xffff;
emu->x86.R_FLG &= ~(F_IF | F_TF);
}
@ -354,7 +353,6 @@ x86bios_init_regs(struct x86regs *regs)
bzero(regs, sizeof(*regs));
regs->X86BIOS_R_DS = 0x40;
regs->X86BIOS_R_SS = x86bios_seg_phys >> 4;
regs->X86BIOS_R_SP = 0xfffe;
}
void
@ -526,13 +524,6 @@ x86bios_map_mem(void)
return (1);
}
#endif
/* Change attribute for high memory. */
if (pmap_change_attr((vm_offset_t)x86bios_rom + X86BIOS_ROM_SIZE -
X86BIOS_SEG_SIZE, X86BIOS_SEG_SIZE, PAT_WRITE_BACK) != 0) {
pmap_unmapdev((vm_offset_t)x86bios_ivt, X86BIOS_IVT_SIZE);
pmap_unmapdev((vm_offset_t)x86bios_rom, X86BIOS_ROM_SIZE);
return (1);
}
x86bios_seg = contigmalloc(X86BIOS_SEG_SIZE, M_DEVBUF, M_WAITOK,
X86BIOS_RAM_BASE, x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0);
@ -556,10 +547,6 @@ x86bios_map_mem(void)
X86BIOS_ROM_BASE, X86BIOS_MEM_SIZE - X86BIOS_SEG_SIZE - 1,
(void *)((vm_offset_t)x86bios_rom + X86BIOS_ROM_BASE -
(vm_offset_t)x86bios_rom_phys));
printf("x86bios: HIMEM 0x%06x-0x%06x at %p\n",
X86BIOS_MEM_SIZE - X86BIOS_SEG_SIZE, X86BIOS_MEM_SIZE - 1,
(void *)((vm_offset_t)x86bios_rom + X86BIOS_ROM_SIZE -
X86BIOS_SEG_SIZE));
}
return (0);

View file

@ -15,6 +15,10 @@ MKMODULESENV+= DESTDIR="${DESTDIR}"
SYSDIR?= ${S:C;^[^/];${.CURDIR}/&;}
MKMODULESENV+= KERNBUILDDIR="${.CURDIR}" SYSDIR="${SYSDIR}"
.if defined(CONF_CFLAGS)
MKMODULESENV+= CONF_CFLAGS="${CONF_CFLAGS}"
.endif
.MAIN: all
.for target in all clean cleandepend cleandir clobber depend install \

View file

@ -328,6 +328,9 @@ ${_src}:
.endfor
.endif
# Respect configuration-specific C flags.
CFLAGS+= ${CONF_CFLAGS}
MFILES?= dev/acpica/acpi_if.m dev/acpi_support/acpi_wmi_if.m \
dev/agp/agp_if.m dev/ata/ata_if.m dev/eisa/eisa_if.m \
dev/iicbus/iicbb_if.m dev/iicbus/iicbus_if.m \

View file

@ -437,6 +437,7 @@ SCTP_MBCNT_LOGGING opt_sctp.h # Log to KTR mbcnt activity
SCTP_PACKET_LOGGING opt_sctp.h # Log to a packet buffer last N packets
SCTP_LTRACE_CHUNKS opt_sctp.h # Log to KTR chunks processed
SCTP_LTRACE_ERRORS opt_sctp.h # Log to KTR error returns.
SCTP_USE_PERCPU_STAT opt_sctp.h # Use per cpu stats.
#
#
#

View file

@ -2568,8 +2568,15 @@ ata_raid_intel_read_meta(device_t dev, struct ar_softc **raidp)
if (meta->generation >= raid->generation) {
for (disk = 0; disk < raid->total_disks; disk++) {
struct ata_device *atadev = device_get_softc(parent);
int len;
if (!strncmp(raid->disks[disk].serial, atadev->param.serial,
for (len = 0; len < sizeof(atadev->param.serial); len++) {
if (atadev->param.serial[len] < 0x20)
break;
}
len = (len > sizeof(raid->disks[disk].serial)) ?
len - sizeof(raid->disks[disk].serial) : 0;
if (!strncmp(raid->disks[disk].serial, atadev->param.serial + len,
sizeof(raid->disks[disk].serial))) {
raid->disks[disk].dev = parent;
raid->disks[disk].flags |= (AR_DF_PRESENT | AR_DF_ONLINE);
@ -2639,8 +2646,15 @@ ata_raid_intel_write_meta(struct ar_softc *rdp)
device_get_softc(device_get_parent(rdp->disks[disk].dev));
struct ata_device *atadev =
device_get_softc(rdp->disks[disk].dev);
int len;
bcopy(atadev->param.serial, meta->disk[disk].serial,
for (len = 0; len < sizeof(atadev->param.serial); len++) {
if (atadev->param.serial[len] < 0x20)
break;
}
len = (len > sizeof(rdp->disks[disk].serial)) ?
len - sizeof(rdp->disks[disk].serial) : 0;
bcopy(atadev->param.serial + len, meta->disk[disk].serial,
sizeof(rdp->disks[disk].serial));
meta->disk[disk].sectors = rdp->disks[disk].sectors;
meta->disk[disk].id = (ch->unit << 16) | atadev->unit;

View file

@ -421,7 +421,6 @@ static uint32_t bge_readreg_ind(struct bge_softc *, int);
#endif
static void bge_writemem_direct(struct bge_softc *, int, int);
static void bge_writereg_ind(struct bge_softc *, int, int);
static void bge_set_max_readrq(struct bge_softc *);
static int bge_miibus_readreg(device_t, int, int);
static int bge_miibus_writereg(device_t, int, int, int);
@ -561,32 +560,6 @@ bge_writemem_ind(struct bge_softc *sc, int off, int val)
pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4);
}
/*
* PCI Express only
*/
static void
bge_set_max_readrq(struct bge_softc *sc)
{
device_t dev;
uint16_t val;
dev = sc->bge_dev;
val = pci_read_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL, 2);
if ((val & PCIM_EXP_CTL_MAX_READ_REQUEST) !=
BGE_PCIE_DEVCTL_MAX_READRQ_4096) {
if (bootverbose)
device_printf(dev, "adjust device control 0x%04x ",
val);
val &= ~PCIM_EXP_CTL_MAX_READ_REQUEST;
val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096;
pci_write_config(dev, sc->bge_expcap + PCIR_EXPRESS_DEVICE_CTL,
val, 2);
if (bootverbose)
printf("-> 0x%04x\n", val);
}
}
#ifdef notdef
static uint32_t
bge_readreg_ind(struct bge_softc *sc, int off)
@ -2695,7 +2668,8 @@ bge_attach(device_t dev)
*/
sc->bge_flags |= BGE_FLAG_PCIE;
sc->bge_expcap = reg;
bge_set_max_readrq(sc);
if (pci_get_max_read_req(dev) != 4096)
pci_set_max_read_req(dev, 4096);
} else {
/*
* Check if the device is in PCI-X Mode.

View file

@ -89,9 +89,9 @@
* EEProm stuff
*/
struct eeProm {
short offset;
short count;
u_char bytes[ 256 ];
short offset;
short count;
unsigned char bytes[ 256 ];
};
@ -147,7 +147,7 @@ struct eeProm {
* b23-b16: i2c addr (write)
* b31-b24: 1 = write, 0 = read
*/
#define BT848_I2CWR _IOWR('x', 57, u_long) /* i2c read-write */
#define BT848_I2CWR _IOWR('x', 57, unsigned long) /* i2c read-write */
struct bktr_msp_control {
unsigned char function;
@ -192,10 +192,10 @@ typedef enum { METEOR_PIXTYPE_RGB, METEOR_PIXTYPE_YUV,
struct meteor_pixfmt {
u_int index; /* Index in supported pixfmt list */
unsigned int index; /* Index in supported pixfmt list */
METEOR_PIXTYPE type; /* What's the board gonna feed us */
u_int Bpp; /* Bytes per pixel */
u_long masks[3]; /* R,G,B or Y,U,V masks, respectively */
unsigned int Bpp; /* Bytes per pixel */
unsigned long masks[3]; /* R,G,B or Y,U,V masks, respectively */
unsigned swap_bytes :1; /* Bytes swapped within shorts */
unsigned swap_shorts:1; /* Shorts swapped within longs */
};

View file

@ -50,27 +50,27 @@ struct meteor_capframe {
/* structure for METEOR[GS]ETGEO - get/set geometry */
struct meteor_geomet {
u_short rows;
u_short columns;
u_short frames;
u_long oformat;
unsigned short rows;
unsigned short columns;
unsigned short frames;
unsigned long oformat;
} ;
/* structure for METEORGCOUNT-get count of frames, fifo errors and dma errors */
struct meteor_counts {
u_long fifo_errors; /* count of fifo errors since open */
u_long dma_errors; /* count of dma errors since open */
u_long frames_captured; /* count of frames captured since open */
u_long even_fields_captured; /* count of even fields captured */
u_long odd_fields_captured; /* count of odd fields captured */
unsigned long fifo_errors; /* count of fifo errors since open */
unsigned long dma_errors; /* count of dma errors since open */
unsigned long frames_captured; /* count of frames captured since open */
unsigned long even_fields_captured; /* count of even fields captured */
unsigned long odd_fields_captured; /* count of odd fields captured */
} ;
/* structure for getting and setting direct transfers to vram */
struct meteor_video {
u_long addr; /* Address of location to dma to */
u_long width; /* Width of memory area */
u_long banksize; /* Size of Vram bank */
u_long ramsize; /* Size of Vram */
unsigned long addr; /* Address of location to dma to */
unsigned long width; /* Width of memory area */
unsigned long banksize; /* Size of Vram bank */
unsigned long ramsize; /* Size of Vram */
};
#define METEORCAPTUR _IOW('x', 1, int) /* capture a frame */

View file

@ -1,5 +1,6 @@
/*-
* Copyright (c) 1998 Kazutaka YOKOTA and Michael Smith
* Copyright (c) 2009-2010 Jung-uk Kim <jkim@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -188,7 +189,7 @@ static int vesa_bios_load_palette2(int start, int colors, u_char *r, u_char *g,
#define STATE_ALL (STATE_HW | STATE_DATA | STATE_DAC | STATE_REG)
static ssize_t vesa_bios_state_buf_size(void);
static int vesa_bios_save_restore(int code, void *p, size_t size);
#if 0
#ifdef MODE_TABLE_BROKEN
static int vesa_bios_get_line_length(void);
#endif
static int vesa_bios_set_line_length(int pixel, int *bytes, int *lines);
@ -199,6 +200,7 @@ static int vesa_bios_set_start(int x, int y);
static int vesa_map_gen_mode_num(int type, int color, int mode);
static int vesa_translate_flags(u_int16_t vflags);
static int vesa_translate_mmodel(u_int8_t vmodel);
static int vesa_get_bpscanline(struct vesa_mode *vmode);
static int vesa_bios_init(void);
static void vesa_clear_modes(video_info_t *info, int color);
@ -558,7 +560,7 @@ vesa_bios_save_restore(int code, void *p, size_t size)
return (regs.R_AX != 0x004f);
}
#if 0
#ifdef MODE_TABLE_BROKEN
static int
vesa_bios_get_line_length(void)
{
@ -709,6 +711,43 @@ vesa_translate_mmodel(u_int8_t vmodel)
return (V_INFO_MM_OTHER);
}
static int
vesa_get_bpscanline(struct vesa_mode *vmode)
{
int bpsl;
if ((vmode->v_modeattr & V_MODEGRAPHICS) != 0) {
/* Find the minimum length. */
switch (vmode->v_bpp / vmode->v_planes) {
case 1:
bpsl = vmode->v_width / 8;
break;
case 2:
bpsl = vmode->v_width / 4;
break;
case 4:
bpsl = vmode->v_width / 2;
break;
default:
bpsl = vmode->v_width * ((vmode->v_bpp + 7) / 8);
bpsl /= vmode->v_planes;
break;
}
/* Use VBE 3.0 information if it looks sane. */
if ((vmode->v_modeattr & V_MODELFB) != 0 &&
vesa_adp_info->v_version >= 0x0300 &&
vmode->v_linbpscanline > bpsl)
return (vmode->v_linbpscanline);
/* Return the minimum if the mode table looks absurd. */
if (vmode->v_bpscanline < bpsl)
return (bpsl);
}
return (vmode->v_bpscanline);
}
#define VESA_MAXSTR 256
#define VESA_STRCPY(dst, src) do { \
@ -733,7 +772,6 @@ vesa_bios_init(void)
void *vmbuf;
uint32_t offs;
uint16_t vers;
int bpsl;
int is_via_cle266;
int modes;
int i;
@ -858,9 +896,7 @@ vesa_bios_init(void)
}
#endif
bpsl = (vmode.v_modeattr & V_MODELFB) != 0 && vers >= 0x0300 ?
vmode.v_linbpscanline : vmode.v_bpscanline;
bsize = bpsl * vmode.v_height;
bsize = vesa_get_bpscanline(&vmode) * vmode.v_height;
if ((vmode.v_modeattr & V_MODEGRAPHICS) != 0)
bsize *= vmode.v_planes;
@ -1209,7 +1245,7 @@ vesa_set_mode(video_adapter_t *adp, int mode)
int10_set_mode(adp->va_initial_bios_mode);
if (adp->va_info.vi_flags & V_INFO_LINEAR)
pmap_unmapdev(adp->va_buffer,
adp->va_buffer_size);
vesa_adp_info->v_memsize * 64 * 1024);
/*
* Once (*prevvidsw->get_info)() succeeded,
* (*prevvidsw->set_mode)() below won't fail...
@ -1241,12 +1277,12 @@ vesa_set_mode(video_adapter_t *adp, int mode)
if ((vesa_adp_info->v_flags & V_DAC8) != 0 &&
(info.vi_flags & V_INFO_GRAPHICS) != 0 &&
(info.vi_flags & V_INFO_NONVGA) != 0 &&
vesa_bios_set_dac(8) > 6)
adp->va_flags |= V_ADP_DAC8;
if (adp->va_info.vi_flags & V_INFO_LINEAR)
pmap_unmapdev(adp->va_buffer, adp->va_buffer_size);
pmap_unmapdev(adp->va_buffer,
vesa_adp_info->v_memsize * 64 * 1024);
#if VESA_DEBUG > 0
printf("VESA: mode set!\n");
@ -1257,13 +1293,31 @@ vesa_set_mode(video_adapter_t *adp, int mode)
(info.vi_flags & V_INFO_COLOR) ? V_ADP_COLOR : 0;
vesa_adp->va_crtc_addr =
(vesa_adp->va_flags & V_ADP_COLOR) ? COLOR_CRTC : MONO_CRTC;
vesa_adp->va_line_width = info.vi_buffer_size / info.vi_height;
if ((info.vi_flags & V_INFO_GRAPHICS) != 0)
vesa_adp->va_line_width /= info.vi_planes;
#ifdef MODE_TABLE_BROKEN
/* If VBE function returns bigger bytes per scan line, use it. */
{
int bpsl = vesa_bios_get_line_length();
if (bpsl > vesa_adp->va_line_width) {
vesa_adp->va_line_width = bpsl;
info.vi_buffer_size = bpsl * info.vi_height;
if ((info.vi_flags & V_INFO_GRAPHICS) != 0)
info.vi_buffer_size *= info.vi_planes;
}
}
#endif
if (info.vi_flags & V_INFO_LINEAR) {
#if VESA_DEBUG > 1
printf("VESA: setting up LFB\n");
#endif
vesa_adp->va_buffer =
(vm_offset_t)pmap_mapdev_attr(info.vi_buffer,
info.vi_buffer_size, PAT_WRITE_COMBINING);
vesa_adp_info->v_memsize * 64 * 1024, PAT_WRITE_COMBINING);
vesa_adp->va_window = vesa_adp->va_buffer;
vesa_adp->va_window_size = info.vi_buffer_size / info.vi_planes;
vesa_adp->va_window_gran = info.vi_buffer_size / info.vi_planes;
@ -1275,9 +1329,6 @@ vesa_set_mode(video_adapter_t *adp, int mode)
}
vesa_adp->va_buffer_size = info.vi_buffer_size;
vesa_adp->va_window_orig = 0;
vesa_adp->va_line_width = info.vi_buffer_size / info.vi_height;
if ((info.vi_flags & V_INFO_GRAPHICS) != 0)
vesa_adp->va_line_width /= info.vi_planes;
vesa_adp->va_disp_start.x = 0;
vesa_adp->va_disp_start.y = 0;
#if VESA_DEBUG > 0
@ -1322,10 +1373,10 @@ vesa_save_palette(video_adapter_t *adp, u_char *palette)
{
int bits;
if (adp == vesa_adp && VESA_MODE(adp->va_mode) &&
(adp->va_info.vi_flags & V_INFO_NONVGA) != 0) {
if (adp == vesa_adp && VESA_MODE(adp->va_mode)) {
bits = (adp->va_flags & V_ADP_DAC8) != 0 ? 8 : 6;
return (vesa_bios_save_palette(0, 256, palette, bits));
if (vesa_bios_save_palette(0, 256, palette, bits) == 0)
return (0);
}
return ((*prevvidsw->save_palette)(adp, palette));
@ -1336,10 +1387,10 @@ vesa_load_palette(video_adapter_t *adp, u_char *palette)
{
int bits;
if (adp == vesa_adp && VESA_MODE(adp->va_mode) &&
(adp->va_info.vi_flags & V_INFO_NONVGA) != 0) {
if (adp == vesa_adp && VESA_MODE(adp->va_mode)) {
bits = (adp->va_flags & V_ADP_DAC8) != 0 ? 8 : 6;
return (vesa_bios_load_palette(0, 256, palette, bits));
if (vesa_bios_load_palette(0, 256, palette, bits) == 0)
return (0);
}
return ((*prevvidsw->load_palette)(adp, palette));
@ -1544,8 +1595,6 @@ get_palette(video_adapter_t *adp, int base, int count,
return (1);
if (!VESA_MODE(adp->va_mode))
return (1);
if ((adp->va_info.vi_flags & V_INFO_NONVGA) == 0)
return (1);
bits = (adp->va_flags & V_ADP_DAC8) != 0 ? 8 : 6;
r = malloc(count * 3, M_DEVBUF, M_WAITOK);
@ -1582,8 +1631,6 @@ set_palette(video_adapter_t *adp, int base, int count,
return (1);
if (!VESA_MODE(adp->va_mode))
return (1);
if ((adp->va_info.vi_flags & V_INFO_NONVGA) == 0)
return (1);
bits = (adp->va_flags & V_ADP_DAC8) != 0 ? 8 : 6;
r = malloc(count * 3, M_DEVBUF, M_WAITOK);

View file

@ -1979,6 +1979,7 @@ vga_show_font(video_adapter_t *adp, int page)
static int
vga_save_palette(video_adapter_t *adp, u_char *palette)
{
int bits;
int i;
prologue(adp, V_ADP_PALETTE, ENODEV);
@ -1988,8 +1989,9 @@ vga_save_palette(video_adapter_t *adp, u_char *palette)
* VGA has 6 bit DAC .
*/
outb(PALRADR, 0x00);
bits = (adp->va_flags & V_ADP_DAC8) != 0 ? 0 : 2;
for (i = 0; i < 256*3; ++i)
palette[i] = inb(PALDATA) << 2;
palette[i] = inb(PALDATA) << bits;
inb(adp->va_crtc_addr + 6); /* reset flip/flop */
return 0;
}
@ -1998,15 +2000,17 @@ static int
vga_save_palette2(video_adapter_t *adp, int base, int count,
u_char *r, u_char *g, u_char *b)
{
int bits;
int i;
prologue(adp, V_ADP_PALETTE, ENODEV);
outb(PALRADR, base);
bits = (adp->va_flags & V_ADP_DAC8) != 0 ? 0 : 2;
for (i = 0; i < count; ++i) {
r[i] = inb(PALDATA) << 2;
g[i] = inb(PALDATA) << 2;
b[i] = inb(PALDATA) << 2;
r[i] = inb(PALDATA) << bits;
g[i] = inb(PALDATA) << bits;
b[i] = inb(PALDATA) << bits;
}
inb(adp->va_crtc_addr + 6); /* reset flip/flop */
return 0;
@ -2021,14 +2025,16 @@ vga_save_palette2(video_adapter_t *adp, int base, int count,
static int
vga_load_palette(video_adapter_t *adp, u_char *palette)
{
int bits;
int i;
prologue(adp, V_ADP_PALETTE, ENODEV);
outb(PIXMASK, 0xff); /* no pixelmask */
outb(PALWADR, 0x00);
bits = (adp->va_flags & V_ADP_DAC8) != 0 ? 0 : 2;
for (i = 0; i < 256*3; ++i)
outb(PALDATA, palette[i] >> 2);
outb(PALDATA, palette[i] >> bits);
inb(adp->va_crtc_addr + 6); /* reset flip/flop */
outb(ATC, 0x20); /* enable palette */
return 0;
@ -2038,16 +2044,18 @@ static int
vga_load_palette2(video_adapter_t *adp, int base, int count,
u_char *r, u_char *g, u_char *b)
{
int bits;
int i;
prologue(adp, V_ADP_PALETTE, ENODEV);
outb(PIXMASK, 0xff); /* no pixelmask */
outb(PALWADR, base);
bits = (adp->va_flags & V_ADP_DAC8) != 0 ? 0 : 2;
for (i = 0; i < count; ++i) {
outb(PALDATA, r[i] >> 2);
outb(PALDATA, g[i] >> 2);
outb(PALDATA, b[i] >> 2);
outb(PALDATA, r[i] >> bits);
outb(PALDATA, g[i] >> bits);
outb(PALDATA, b[i] >> bits);
}
inb(adp->va_crtc_addr + 6); /* reset flip/flop */
outb(ATC, 0x20); /* enable palette */

View file

@ -298,7 +298,6 @@ pmclog_loop(void *arg)
mtx_unlock(&pmc_kthread_mtx);
sigpipe_retry:
/* process the request */
PMCDBG(LOG,WRI,2, "po=%p base=%p ptr=%p", po,
lb->plb_base, lb->plb_ptr);
@ -322,9 +321,6 @@ sigpipe_retry:
if (error) {
/* XXX some errors are recoverable */
if (error == EPIPE)
goto sigpipe_retry;
/* send a SIGIO to the owner and exit */
PROC_LOCK(p);
psignal(p, SIGIO);

View file

@ -74,14 +74,9 @@ __FBSDID("$FreeBSD$");
*/
static const char fconf[] = "Chan %d PortDB[%d] changed:\n current =(0x%x@0x%06x 0x%08x%08x 0x%08x%08x)\n database=(0x%x@0x%06x 0x%08x%08x 0x%08x%08x)";
static const char notresp[] = "Not RESPONSE in RESPONSE Queue (type 0x%x) @ idx %d (next %d) nlooked %d";
static const char xact1[] = "HBA attempted queued transaction with disconnect not set for %d.%d.%d";
static const char xact2[] = "HBA attempted queued transaction to target routine %d on target %d bus %d";
static const char xact3[] = "HBA attempted queued cmd for %d.%d.%d when queueing disabled";
static const char pskip[] = "SCSI phase skipped for target %d.%d.%d";
static const char topology[] = "Chan %d WWPN 0x%08x%08x PortID 0x%06x N-Port Handle %d, Connection '%s'";
static const char finmsg[] = "%d.%d.%d: FIN dl%d resid %ld STS 0x%x SKEY %c XS_ERR=0x%x";
static const char sc4[] = "NVRAM";
static const char bun[] = "bad underrun for %d.%d (count %d, resid %d, status %s)";
static const char bun[] = "bad underrun (count %d, resid %d, status %s)";
static const char lipd[] = "Chan %d LIP destroyed %d active commands";
static const char sacq[] = "unable to acquire scratch area";
@ -107,6 +102,7 @@ static const uint8_t alpa_map[] = {
/*
* Local function prototypes.
*/
static void isp_prt_endcmd(ispsoftc_t *, XS_T *);
static int isp_parse_async(ispsoftc_t *, uint16_t);
static int isp_parse_async_fc(ispsoftc_t *, uint16_t);
static int isp_handle_other_response(ispsoftc_t *, int, isphdr_t *, uint32_t *);
@ -1431,10 +1427,8 @@ isp_scsi_channel_init(ispsoftc_t *isp, int chan)
(sdp->isp_devparam[tgt].goal_offset << 8) |
(sdp->isp_devparam[tgt].goal_period);
}
isp_prt(isp, ISP_LOGDEBUG0,
"Initial Settings bus%d tgt%d flags 0x%x off 0x%x per 0x%x",
chan, tgt, mbs.param[2], mbs.param[3] >> 8,
mbs.param[3] & 0xff);
isp_prt(isp, ISP_LOGDEBUG0, "Initial Settings bus%d tgt%d flags 0x%x off 0x%x per 0x%x",
chan, tgt, mbs.param[2], mbs.param[3] >> 8, mbs.param[3] & 0xff);
isp_mboxcmd(isp, &mbs);
if (mbs.param[0] != MBOX_COMMAND_COMPLETE) {
sdf = DPARM_SAFE_DFLT;
@ -1705,8 +1699,7 @@ isp_fibre_init(ispsoftc_t *isp)
isp_prt(isp, ISP_LOGERR, sacq);
return;
}
isp_prt(isp, ISP_LOGDEBUG0,
"isp_fibre_init: fwopt 0x%x xfwopt 0x%x zfwopt 0x%x",
isp_prt(isp, ISP_LOGDEBUG0, "isp_fibre_init: fwopt 0x%x xfwopt 0x%x zfwopt 0x%x",
icbp->icb_fwoptions, icbp->icb_xfwoptions, icbp->icb_zfwoptions);
isp_put_icb(isp, icbp, (isp_icb_t *)fcp->isp_scratch);
@ -4435,7 +4428,7 @@ isp_start(XS_T *xs)
*/
return (dmaresult);
}
isp_prt(isp, ISP_LOGDEBUG0, "START cmd for %d.%d.%d cmd 0x%x datalen %ld", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), XS_CDBP(xs)[0], (long) XS_XFRLEN(xs));
isp_xs_prt(isp, xs, ISP_LOGDEBUG0, "START cmd cdb[0]=0x%x datalen %ld", XS_CDBP(xs)[0], (long) XS_XFRLEN(xs));
isp->isp_nactive++;
return (CMD_QUEUED);
}
@ -5248,7 +5241,7 @@ again:
} else {
ptr = rnames[resp[FCP_RSPNS_CODE_OFFSET]];
}
isp_prt(isp, ISP_LOGWARN, "%d.%d.%d FCP RESPONSE, LENGTH %u: %s CDB0=0x%02x", XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), rlen, ptr, XS_CDBP(xs)[0] & 0xff);
isp_xs_prt(isp, xs, ISP_LOGWARN, "FCP RESPONSE, LENGTH %u: %s CDB0=0x%02x", rlen, ptr, XS_CDBP(xs)[0] & 0xff);
if (resp[FCP_RSPNS_CODE_OFFSET] != 0) {
XS_SETERR(xs, HBA_BOTCH);
}
@ -5325,25 +5318,9 @@ again:
isp_destroy_handle(isp, sp->req_handle);
if (((isp->isp_dblev & (ISP_LOGDEBUG1|ISP_LOGDEBUG2|ISP_LOGDEBUG3))) ||
((isp->isp_dblev & ISP_LOGDEBUG0) && ((!XS_NOERR(xs)) ||
(*XS_STSP(xs) != SCSI_GOOD)))) {
char skey;
if (req_state_flags & RQSF_GOT_SENSE) {
skey = XS_SNSKEY(xs) & 0xf;
if (skey < 10)
skey += '0';
else
skey += 'a' - 10;
} else if (*XS_STSP(xs) == SCSI_CHECK) {
skey = '?';
} else {
skey = '.';
}
isp_prt(isp, ISP_LOGALL, finmsg, XS_CHANNEL(xs),
XS_TGT(xs), XS_LUN(xs), XS_XFRLEN(xs), (long) XS_GET_RESID(xs),
*XS_STSP(xs), skey, XS_ERR(xs));
((isp->isp_dblev & (ISP_LOGDEBUG0|ISP_LOG_CWARN) && ((!XS_NOERR(xs)) || (*XS_STSP(xs) != SCSI_GOOD))))) {
isp_prt_endcmd(isp, xs);
}
if (isp->isp_nactive > 0) {
isp->isp_nactive--;
}
@ -5393,6 +5370,25 @@ out:
* Support routines.
*/
static void
isp_prt_endcmd(ispsoftc_t *isp, XS_T *xs)
{
char cdbstr[16 * 5 + 1];
int i, lim;
lim = XS_CDBLEN(xs) > 16? 16 : XS_CDBLEN(xs);
ISP_SNPRINTF(cdbstr, sizeof (cdbstr), "0x%02x ", XS_CDBP(xs)[0]);
for (i = 1; i < lim; i++) {
ISP_SNPRINTF(cdbstr, sizeof (cdbstr), "%s0x%02x ", cdbstr, XS_CDBP(xs)[i]);
}
if (XS_SENSE_VALID(xs)) {
isp_xs_prt(isp, xs, ISP_LOGALL, "FIN dl%d resid %ld CDB=%s KEY/ASC/ASCQ=0x%02x/0x%02x/0x%02x",
XS_XFRLEN(xs), (long) XS_GET_RESID(xs), cdbstr, XS_SNSKEY(xs), XS_SNSASC(xs), XS_SNSASCQ(xs));
} else {
isp_xs_prt(isp, xs, ISP_LOGALL, "FIN dl%d resid %ld CDB=%s STS 0x%x XS_ERR=0x%x", XS_XFRLEN(xs), (long) XS_GET_RESID(xs), cdbstr, *XS_STSP(xs), XS_ERR(xs));
}
}
/*
* Parse an ASYNC mailbox complete
*
@ -5937,8 +5933,7 @@ isp_parse_async_fc(ispsoftc_t *isp, uint16_t mbox)
*/
static int
isp_handle_other_response(ispsoftc_t *isp, int type,
isphdr_t *hp, uint32_t *optrp)
isp_handle_other_response(ispsoftc_t *isp, int type, isphdr_t *hp, uint32_t *optrp)
{
switch (type) {
case RQSTYPE_STATUS_CONT:
@ -6010,24 +6005,18 @@ isp_parse_status(ispsoftc_t *isp, ispstatusreq_t *sp, XS_T *xs, long *rp)
case RQCS_INCOMPLETE:
if ((sp->req_state_flags & RQSF_GOT_TARGET) == 0) {
isp_prt(isp, ISP_LOGDEBUG1,
"Selection Timeout for %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGDEBUG1, "Selection Timeout");
if (XS_NOERR(xs)) {
XS_SETERR(xs, HBA_SELTIMEOUT);
*rp = XS_XFRLEN(xs);
}
return;
}
isp_prt(isp, ISP_LOGERR,
"command incomplete for %d.%d.%d, state 0x%x",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs),
sp->req_state_flags);
isp_xs_prt(isp, xs, ISP_LOGERR, "Command Incomplete, state 0x%x", sp->req_state_flags);
break;
case RQCS_DMA_ERROR:
isp_prt(isp, ISP_LOGERR, "DMA error for command on %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "DMA Error");
*rp = XS_XFRLEN(xs);
break;
@ -6081,18 +6070,14 @@ isp_parse_status(ispsoftc_t *isp, ispstatusreq_t *sp, XS_T *xs, long *rp)
if (sp->req_status_flags & RQSTF_NEGOTIATION) {
ISP_SNPRINTF(buf, sizeof (buf), "%s Negotiation", buf);
}
isp_prt(isp, ISP_LOGERR, "%s", buf);
isp_prt(isp, ISP_LOGERR, "transport error for %d.%d.%d:\n%s",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), buf);
isp_xs_prt(isp, xs, ISP_LOGERR, "Transport Error: %s", buf);
*rp = XS_XFRLEN(xs);
break;
}
case RQCS_RESET_OCCURRED:
{
int chan;
isp_prt(isp, ISP_LOGWARN,
"bus reset destroyed command for %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGWARN, "Bus Reset destroyed command");
for (chan = 0; chan < isp->isp_nchan; chan++) {
FCPARAM(isp, chan)->sendmarker = 1;
}
@ -6103,8 +6088,7 @@ isp_parse_status(ispsoftc_t *isp, ispstatusreq_t *sp, XS_T *xs, long *rp)
return;
}
case RQCS_ABORTED:
isp_prt(isp, ISP_LOGERR, "command aborted for %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "Command Aborted");
ISP_SET_SENDMARKER(isp, XS_CHANNEL(xs), 1);
if (XS_NOERR(xs)) {
XS_SETERR(xs, HBA_ABORTED);
@ -6112,8 +6096,7 @@ isp_parse_status(ispsoftc_t *isp, ispstatusreq_t *sp, XS_T *xs, long *rp)
return;
case RQCS_TIMEOUT:
isp_prt(isp, ISP_LOGWARN, "command timed out for %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGWARN, "Command timed out");
/*
* XXX: Check to see if we logged out of the device.
*/
@ -6124,83 +6107,62 @@ isp_parse_status(ispsoftc_t *isp, ispstatusreq_t *sp, XS_T *xs, long *rp)
case RQCS_DATA_OVERRUN:
XS_SET_RESID(xs, sp->req_resid);
isp_prt(isp, ISP_LOGERR, "data overrun (%ld) for command on %d.%d.%d",
(long) XS_GET_RESID(xs), XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "data overrun (%ld)", (long) XS_GET_RESID(xs));
if (XS_NOERR(xs)) {
XS_SETERR(xs, HBA_DATAOVR);
}
return;
case RQCS_COMMAND_OVERRUN:
isp_prt(isp, ISP_LOGERR,
"command overrun for command on %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "command overrun");
break;
case RQCS_STATUS_OVERRUN:
isp_prt(isp, ISP_LOGERR,
"status overrun for command on %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "status overrun");
break;
case RQCS_BAD_MESSAGE:
isp_prt(isp, ISP_LOGERR,
"msg not COMMAND COMPLETE after status %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "msg not COMMAND COMPLETE after status");
break;
case RQCS_NO_MESSAGE_OUT:
isp_prt(isp, ISP_LOGERR,
"No MESSAGE OUT phase after selection on %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "No MESSAGE OUT phase after selection");
break;
case RQCS_EXT_ID_FAILED:
isp_prt(isp, ISP_LOGERR, "EXTENDED IDENTIFY failed %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "EXTENDED IDENTIFY failed");
break;
case RQCS_IDE_MSG_FAILED:
isp_prt(isp, ISP_LOGERR,
"INITIATOR DETECTED ERROR rejected by %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "INITIATOR DETECTED ERROR rejected");
break;
case RQCS_ABORT_MSG_FAILED:
isp_prt(isp, ISP_LOGERR, "ABORT OPERATION rejected by %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "ABORT OPERATION rejected");
break;
case RQCS_REJECT_MSG_FAILED:
isp_prt(isp, ISP_LOGERR, "MESSAGE REJECT rejected by %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "MESSAGE REJECT rejected");
break;
case RQCS_NOP_MSG_FAILED:
isp_prt(isp, ISP_LOGERR, "NOP rejected by %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "NOP rejected");
break;
case RQCS_PARITY_ERROR_MSG_FAILED:
isp_prt(isp, ISP_LOGERR,
"MESSAGE PARITY ERROR rejected by %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "MESSAGE PARITY ERROR rejected");
break;
case RQCS_DEVICE_RESET_MSG_FAILED:
isp_prt(isp, ISP_LOGWARN,
"BUS DEVICE RESET rejected by %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGWARN, "BUS DEVICE RESET rejected");
break;
case RQCS_ID_MSG_FAILED:
isp_prt(isp, ISP_LOGERR, "IDENTIFY rejected by %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "IDENTIFY rejected");
break;
case RQCS_UNEXP_BUS_FREE:
isp_prt(isp, ISP_LOGERR, "%d.%d.%d had an unexpected bus free",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "Unexpected Bus Free");
break;
case RQCS_DATA_UNDERRUN:
@ -6208,9 +6170,7 @@ isp_parse_status(ispsoftc_t *isp, ispstatusreq_t *sp, XS_T *xs, long *rp)
if (IS_FC(isp)) {
int ru_marked = (sp->req_scsi_status & RQCS_RU) != 0;
if (!ru_marked || sp->req_resid > XS_XFRLEN(xs)) {
isp_prt(isp, ISP_LOGWARN, bun, XS_TGT(xs),
XS_LUN(xs), XS_XFRLEN(xs), sp->req_resid,
(ru_marked)? "marked" : "not marked");
isp_xs_prt(isp, xs, ISP_LOGWARN, bun, XS_XFRLEN(xs), sp->req_resid, (ru_marked)? "marked" : "not marked");
if (XS_NOERR(xs)) {
XS_SETERR(xs, HBA_BOTCH);
}
@ -6225,18 +6185,15 @@ isp_parse_status(ispsoftc_t *isp, ispstatusreq_t *sp, XS_T *xs, long *rp)
}
case RQCS_XACT_ERR1:
isp_prt(isp, ISP_LOGERR, xact1, XS_CHANNEL(xs),
XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "HBA attempted queued transaction with disconnect not set");
break;
case RQCS_XACT_ERR2:
isp_prt(isp, ISP_LOGERR, xact2,
XS_LUN(xs), XS_TGT(xs), XS_CHANNEL(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "HBA attempted queued transaction to target routine %d", XS_LUN(xs));
break;
case RQCS_XACT_ERR3:
isp_prt(isp, ISP_LOGERR, xact3,
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "HBA attempted queued cmd when queueing disabled");
break;
case RQCS_BAD_ENTRY:
@ -6244,9 +6201,7 @@ isp_parse_status(ispsoftc_t *isp, ispstatusreq_t *sp, XS_T *xs, long *rp)
break;
case RQCS_QUEUE_FULL:
isp_prt(isp, ISP_LOGDEBUG0,
"internal queues full for %d.%d.%d status 0x%x",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), *XS_STSP(xs));
isp_xs_prt(isp, xs, ISP_LOGDEBUG0, "internal queues full status 0x%x", *XS_STSP(xs));
/*
* If QFULL or some other status byte is set, then this
@ -6270,23 +6225,18 @@ isp_parse_status(ispsoftc_t *isp, ispstatusreq_t *sp, XS_T *xs, long *rp)
return;
case RQCS_PHASE_SKIPPED:
isp_prt(isp, ISP_LOGERR, pskip, XS_CHANNEL(xs),
XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "SCSI phase skipped");
break;
case RQCS_ARQS_FAILED:
isp_prt(isp, ISP_LOGERR,
"Auto Request Sense failed for %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "Auto Request Sense Failed");
if (XS_NOERR(xs)) {
XS_SETERR(xs, HBA_ARQFAIL);
}
return;
case RQCS_WIDE_FAILED:
isp_prt(isp, ISP_LOGERR,
"Wide Negotiation failed for %d.%d.%d",
XS_TGT(xs), XS_LUN(xs), XS_CHANNEL(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "Wide Negotiation Failed");
if (IS_SCSI(isp)) {
sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs));
sdp->isp_devparam[XS_TGT(xs)].goal_flags &= ~DPARM_WIDE;
@ -6299,9 +6249,7 @@ isp_parse_status(ispsoftc_t *isp, ispstatusreq_t *sp, XS_T *xs, long *rp)
return;
case RQCS_SYNCXFER_FAILED:
isp_prt(isp, ISP_LOGERR,
"SDTR Message failed for target %d.%d.%d",
XS_TGT(xs), XS_LUN(xs), XS_CHANNEL(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "SDTR Message Failed");
if (IS_SCSI(isp)) {
sdparam *sdp = SDPARAM(isp, XS_CHANNEL(xs));
sdp += XS_CHANNEL(xs);
@ -6312,9 +6260,7 @@ isp_parse_status(ispsoftc_t *isp, ispstatusreq_t *sp, XS_T *xs, long *rp)
break;
case RQCS_LVD_BUSERR:
isp_prt(isp, ISP_LOGERR,
"Bad LVD condition while talking to %d.%d.%d",
XS_TGT(xs), XS_LUN(xs), XS_CHANNEL(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "Bad LVD condition");
break;
case RQCS_PORT_UNAVAILABLE:
@ -6384,8 +6330,7 @@ isp_parse_status(ispsoftc_t *isp, ispstatusreq_t *sp, XS_T *xs, long *rp)
}
static void
isp_parse_status_24xx(ispsoftc_t *isp, isp24xx_statusreq_t *sp,
XS_T *xs, long *rp)
isp_parse_status_24xx(ispsoftc_t *isp, isp24xx_statusreq_t *sp, XS_T *xs, long *rp)
{
int ru_marked, sv_marked;
int chan = XS_CHANNEL(xs);
@ -6398,19 +6343,15 @@ isp_parse_status_24xx(ispsoftc_t *isp, isp24xx_statusreq_t *sp,
return;
case RQCS_DMA_ERROR:
isp_prt(isp, ISP_LOGERR, "DMA error for command on %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "DMA error");
break;
case RQCS_TRANSPORT_ERROR:
isp_prt(isp, ISP_LOGERR, "transport error for %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "Transport Error");
break;
case RQCS_RESET_OCCURRED:
isp_prt(isp, ISP_LOGWARN,
"reset destroyed command for %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGWARN, "reset destroyed command");
FCPARAM(isp, chan)->sendmarker = 1;
if (XS_NOERR(xs)) {
XS_SETERR(xs, HBA_BUSRESET);
@ -6418,8 +6359,7 @@ isp_parse_status_24xx(ispsoftc_t *isp, isp24xx_statusreq_t *sp,
return;
case RQCS_ABORTED:
isp_prt(isp, ISP_LOGERR, "command aborted for %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "Command Aborted");
FCPARAM(isp, chan)->sendmarker = 1;
if (XS_NOERR(xs)) {
XS_SETERR(xs, HBA_ABORTED);
@ -6427,8 +6367,7 @@ isp_parse_status_24xx(ispsoftc_t *isp, isp24xx_statusreq_t *sp,
return;
case RQCS_TIMEOUT:
isp_prt(isp, ISP_LOGWARN, "command timed out for %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGWARN, "Command Timed Out");
if (XS_NOERR(xs)) {
XS_SETERR(xs, HBA_CMDTIMEOUT);
}
@ -6436,9 +6375,7 @@ isp_parse_status_24xx(ispsoftc_t *isp, isp24xx_statusreq_t *sp,
case RQCS_DATA_OVERRUN:
XS_SET_RESID(xs, sp->req_resid);
isp_prt(isp, ISP_LOGERR,
"data overrun for command on %d.%d.%d",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs));
isp_xs_prt(isp, xs, ISP_LOGERR, "Data Overrun");
if (XS_NOERR(xs)) {
XS_SETERR(xs, HBA_DATAOVR);
}
@ -6471,19 +6408,14 @@ isp_parse_status_24xx(ispsoftc_t *isp, isp24xx_statusreq_t *sp,
sv_marked = (sp->req_scsi_status & (RQCS_SV|RQCS_RV)) != 0;
if ((ru_marked == 0 && sv_marked == 0) ||
(sp->req_resid > XS_XFRLEN(xs))) {
isp_prt(isp, ISP_LOGWARN, bun, XS_TGT(xs),
XS_LUN(xs), XS_XFRLEN(xs), sp->req_resid,
(ru_marked)? "marked" : "not marked");
isp_xs_prt(isp, xs, ISP_LOGWARN, bun, XS_XFRLEN(xs), sp->req_resid, (ru_marked)? "marked" : "not marked");
if (XS_NOERR(xs)) {
XS_SETERR(xs, HBA_BOTCH);
}
return;
}
XS_SET_RESID(xs, sp->req_resid);
isp_prt(isp, ISP_LOGDEBUG0,
"%d.%d.%d data underrun (%d) for command 0x%x",
XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs),
sp->req_resid, XS_CDBP(xs)[0] & 0xff);
isp_xs_prt(isp, xs, ISP_LOGDEBUG0, "Data Underrun (%d) for command 0x%x", sp->req_resid, XS_CDBP(xs)[0] & 0xff);
if (XS_NOERR(xs)) {
XS_SETERR(xs, HBA_NOERROR);
}
@ -7384,8 +7316,7 @@ isp_spi_update(ispsoftc_t *isp, int chan)
if (sdp->isp_devparam[tgt].dev_enable == 0) {
sdp->isp_devparam[tgt].dev_update = 0;
sdp->isp_devparam[tgt].dev_refresh = 0;
isp_prt(isp, ISP_LOGDEBUG0,
"skipping target %d bus %d update", tgt, chan);
isp_prt(isp, ISP_LOGDEBUG0, "skipping target %d bus %d update", tgt, chan);
continue;
}
/*
@ -7441,10 +7372,8 @@ isp_spi_update(ispsoftc_t *isp, int chan)
sdp->isp_devparam[tgt].actv_flags &= ~DPARM_TQING;
sdp->isp_devparam[tgt].actv_flags |=
(sdp->isp_devparam[tgt].goal_flags & DPARM_TQING);
isp_prt(isp, ISP_LOGDEBUG0,
"bus %d set tgt %d flags 0x%x off 0x%x period 0x%x",
chan, tgt, mbs.param[2], mbs.param[3] >> 8,
mbs.param[3] & 0xff);
isp_prt(isp, ISP_LOGDEBUG0, "bus %d set tgt %d flags 0x%x off 0x%x period 0x%x",
chan, tgt, mbs.param[2], mbs.param[3] >> 8, mbs.param[3] & 0xff);
get = 0;
} else {
continue;
@ -7778,8 +7707,7 @@ isp_read_nvram(ispsoftc_t *isp, int bus)
nvram_data[2] != 'P') {
if (isp->isp_bustype != ISP_BT_SBUS) {
isp_prt(isp, ISP_LOGWARN, "invalid NVRAM header");
isp_prt(isp, ISP_LOGDEBUG0, "%x %x %x",
nvram_data[0], nvram_data[1], nvram_data[2]);
isp_prt(isp, ISP_LOGDEBUG0, "%x %x %x", nvram_data[0], nvram_data[1], nvram_data[2]);
}
retval = -1;
goto out;
@ -8294,8 +8222,7 @@ isp_parse_nvram_2100(ispsoftc_t *isp, uint8_t *nvram_data)
ISP2100_NVRAM_TOV(nvram_data));
fcp->isp_xfwoptions = ISP2100_XFW_OPTIONS(nvram_data);
fcp->isp_zfwoptions = ISP2100_ZFW_OPTIONS(nvram_data);
isp_prt(isp, ISP_LOGDEBUG0,
"xfwoptions 0x%x zfw options 0x%x",
isp_prt(isp, ISP_LOGDEBUG0, "xfwoptions 0x%x zfw options 0x%x",
ISP2100_XFW_OPTIONS(nvram_data), ISP2100_ZFW_OPTIONS(nvram_data));
}

View file

@ -5432,6 +5432,20 @@ isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...)
printf("\n");
}
void
isp_xs_prt(ispsoftc_t *isp, XS_T *xs, int level, const char *fmt, ...)
{
va_list ap;
if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
return;
}
xpt_print_path(xs->ccb_h.path);
va_start(ap, fmt);
vprintf(fmt, ap);
va_end(ap);
printf("\n");
}
uint64_t
isp_nanotime_sub(struct timespec *b, struct timespec *a)
{

View file

@ -424,6 +424,8 @@ default: \
imin((sizeof((ccb)->sense_data)), ccb->sense_len)
#define XS_SNSKEY(ccb) ((ccb)->sense_data.flags & 0xf)
#define XS_SNSASC(ccb) ((ccb)->sense_data.add_sense_code)
#define XS_SNSASCQ(ccb) ((ccb)->sense_data.add_sense_code_qual)
#define XS_TAG_P(ccb) \
(((ccb)->ccb_h.flags & CAM_TAG_ACTION_VALID) && \
(ccb)->tag_action != CAM_TAG_ACTION_NONE)
@ -461,7 +463,7 @@ default: \
(xs)->ccb_h.status |= CAM_AUTOSNS_VALID; \
memcpy(&(xs)->sense_data, sense_ptr, imin(XS_SNSLEN(xs), sense_len))
#define XS_SET_STATE_STAT(a, b, c)
#define XS_SENSE_VALID(xs) (((xs)->ccb_h.status & CAM_AUTOSNS_VALID) != 0)
#define DEFAULT_FRAMESIZE(isp) isp->isp_osinfo.framesize
#define DEFAULT_EXEC_THROTTLE(isp) isp->isp_osinfo.exec_throttle
@ -593,6 +595,7 @@ extern int isp_autoconfig;
* Platform Library Functions
*/
void isp_prt(ispsoftc_t *, int level, const char *, ...) __printflike(3, 4);
void isp_xs_prt(ispsoftc_t *, XS_T *, int level, const char *, ...) __printflike(4, 5);
uint64_t isp_nanotime_sub(struct timespec *, struct timespec *);
int isp_mbox_acquire(ispsoftc_t *);
void isp_mbox_wait_complete(ispsoftc_t *, mbreg_t *);

View file

@ -294,10 +294,10 @@ uint32_t
isp_handle_index(ispsoftc_t *isp, uint32_t handle)
{
if (!ISP_VALID_HANDLE(isp, handle)) {
return (handle & ISP_HANDLE_CMD_MASK);
} else {
isp_prt(isp, ISP_LOGERR, "%s: bad handle 0x%x", __func__, handle);
return (ISP_BAD_HANDLE_INDEX);
} else {
return (handle & ISP_HANDLE_CMD_MASK);
}
}

View file

@ -954,12 +954,13 @@ void isp_async(ispsoftc_t *, ispasync_t, ...);
/*
* Platform Dependent Error and Debug Printout
*
* Generally this is:
* Two required functions for each platform must be provided:
*
* void isp_prt(ispsoftc_t *, int level, const char *, ...)
* void isp_xs_prt(ispsoftc_t *, XS_T *, int level, const char *, ...)
*
* but due to compiler differences on different platforms this won't be
* formally done here. Instead, it goes in each platform definition file.
* formally defined here. Instead, they go in each platform definition file.
*/
#define ISP_LOGALL 0x0 /* log always */
@ -972,6 +973,7 @@ void isp_async(ispsoftc_t *, ispasync_t, ...);
#define ISP_LOGDEBUG2 0x40 /* log most debug messages */
#define ISP_LOGDEBUG3 0x80 /* log high frequency debug messages */
#define ISP_LOGSANCFG 0x100 /* log SAN configuration */
#define ISP_LOG_CWARN 0x200 /* log SCSI command "warnings" (e.g., check conditions) */
#define ISP_LOGTINFO 0x1000 /* log informational messages (target mode) */
#define ISP_LOGTDEBUG0 0x2000 /* log simple debug messages (target mode) */
#define ISP_LOGTDEBUG1 0x4000 /* log intermediate debug messages (target) */
@ -1045,6 +1047,8 @@ void isp_async(ispsoftc_t *, ispasync_t, ...);
* XS_SNSP(xs) gets a pointer to the associate sense data
* XS_SNSLEN(xs) gets the length of sense data storage
* XS_SNSKEY(xs) dereferences XS_SNSP to get the current stored Sense Key
* XS_SNSASC(xs) dereferences XS_SNSP to get the current stored Additional Sense Code
* XS_SNSASCQ(xs) dereferences XS_SNSP to get the current stored Additional Sense Code Qualifier
* XS_TAG_P(xs) predicate of whether this command should be tagged
* XS_TAG_TYPE(xs) which type of tag to use
* XS_SETERR(xs) set error state
@ -1065,6 +1069,8 @@ void isp_async(ispsoftc_t *, ispasync_t, ...);
*
* XS_SAVE_SENSE(xs, sp, len) save sense data
*
* XS_SENSE_VALID(xs) indicates whether sense is valid
*
* DEFAULT_FRAMESIZE(ispsoftc_t *) Default Frame Size
* DEFAULT_EXEC_THROTTLE(ispsoftc_t *) Default Execution Throttle
*

View file

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -176,7 +176,7 @@
#define MSIX_82599_BAR 4
#define IXGBE_TSO_SIZE 65535
#define IXGBE_TX_BUFFER_SIZE ((u32) 1514)
#define IXGBE_RX_HDR 256
#define IXGBE_RX_HDR 128
#define IXGBE_VFTA_SIZE 128
#define IXGBE_BR_SIZE 4096
#define CSUM_OFFLOAD 7 /* Bits in csum flags */
@ -231,6 +231,7 @@ struct ixgbe_tx_buf {
struct ixgbe_rx_buf {
struct mbuf *m_head;
struct mbuf *m_pack;
struct mbuf *fmp;
bus_dmamap_t map;
};
@ -248,20 +249,34 @@ struct ixgbe_dma_alloc {
};
/*
* The transmit ring, one per tx queue
** Driver queue struct: this is the interrupt container
** for the associated tx and rx ring.
*/
struct ix_queue {
struct adapter *adapter;
u32 msix; /* This queue's MSIX vector */
u32 eims; /* This queue's EIMS bit */
u32 eitr_setting;
struct resource *res;
void *tag;
struct tx_ring *txr;
struct rx_ring *rxr;
struct task que_task;
struct taskqueue *tq;
u64 irqs;
};
/*
* The transmit ring, one per queue
*/
struct tx_ring {
struct adapter *adapter;
struct mtx tx_mtx;
u32 me;
u32 msix;
bool watchdog_check;
int watchdog_time;
union ixgbe_adv_tx_desc *tx_base;
volatile u32 tx_hwb;
struct ixgbe_dma_alloc txdma;
struct task tx_task;
struct taskqueue *tq;
u32 next_avail_desc;
u32 next_to_clean;
struct ixgbe_tx_buf *tx_buffers;
@ -272,17 +287,14 @@ struct tx_ring {
#if __FreeBSD_version >= 800000
struct buf_ring *br;
#endif
/* Interrupt resources */
void *tag;
struct resource *res;
#ifdef IXGBE_FDIR
u16 atr_sample;
u16 atr_count;
#endif
u32 bytes; /* used for AIM */
u32 packets;
/* Soft Stats */
u32 no_tx_desc_avail;
u32 no_tx_desc_late;
u64 tx_irq;
u64 no_desc_avail;
u64 total_packets;
};
@ -294,35 +306,29 @@ struct rx_ring {
struct adapter *adapter;
struct mtx rx_mtx;
u32 me;
u32 msix;
u32 payload;
struct task rx_task;
struct taskqueue *tq;
union ixgbe_adv_rx_desc *rx_base;
struct ixgbe_dma_alloc rxdma;
struct lro_ctrl lro;
bool lro_enabled;
bool hdr_split;
bool hw_rsc;
unsigned int last_refreshed;
unsigned int next_to_check;
bool discard;
u32 next_to_refresh;
u32 next_to_check;
char mtx_name[16];
struct ixgbe_rx_buf *rx_buffers;
bus_dma_tag_t rxtag;
bus_dmamap_t spare_map;
char mtx_name[16];
u32 bytes; /* Used for AIM calc */
u32 eitr_setting;
/* Interrupt resources */
void *tag;
struct resource *res;
u32 packets;
/* Soft stats */
u64 rx_irq;
u64 rx_split_packets;
u64 rx_packets;
u64 rx_bytes;
u64 rx_discarded;
u64 rsc_num;
#ifdef IXGBE_FDIR
u64 flm;
@ -331,94 +337,94 @@ struct rx_ring {
/* Our adapter structure */
struct adapter {
struct ifnet *ifp;
struct ixgbe_hw hw;
struct ifnet *ifp;
struct ixgbe_hw hw;
struct ixgbe_osdep osdep;
struct device *dev;
struct device *dev;
struct resource *pci_mem;
struct resource *msix_mem;
struct resource *pci_mem;
struct resource *msix_mem;
/*
* Interrupt resources: this set is
* either used for legacy, or for Link
* when doing MSIX
*/
void *tag;
struct resource *res;
void *tag;
struct resource *res;
struct ifmedia media;
struct callout timer;
int msix;
int if_flags;
struct ifmedia media;
struct callout timer;
int msix;
int if_flags;
struct mtx core_mtx;
struct mtx core_mtx;
eventhandler_tag vlan_attach;
eventhandler_tag vlan_detach;
eventhandler_tag vlan_attach;
eventhandler_tag vlan_detach;
u16 num_vlans;
u16 num_queues;
u16 num_vlans;
u16 num_queues;
/* Info about the board itself */
u32 optics;
bool link_active;
u16 max_frame_size;
u32 link_speed;
bool link_up;
u32 linkvec;
u32 optics;
bool link_active;
u16 max_frame_size;
u32 link_speed;
bool link_up;
u32 linkvec;
/* Mbuf cluster size */
u32 rx_mbuf_sz;
u32 rx_mbuf_sz;
/* Support for pluggable optics */
bool sfp_probe;
struct task link_task; /* Link tasklet */
struct task mod_task; /* SFP tasklet */
struct task msf_task; /* Multispeed Fiber tasklet */
bool sfp_probe;
struct task link_task; /* Link tasklet */
struct task mod_task; /* SFP tasklet */
struct task msf_task; /* Multispeed Fiber */
#ifdef IXGBE_FDIR
int fdir_reinit;
struct task fdir_task;
#endif
struct taskqueue *tq;
/*
** Queues:
** This is the irq holder, it has
** and RX/TX pair or rings associated
** with it.
*/
struct ix_queue *queues;
/*
* Transmit rings:
* Allocated at run time, an array of rings.
*/
struct tx_ring *tx_rings;
int num_tx_desc;
struct tx_ring *tx_rings;
int num_tx_desc;
/*
* Receive rings:
* Allocated at run time, an array of rings.
*/
struct rx_ring *rx_rings;
int num_rx_desc;
u64 rx_mask;
u32 rx_process_limit;
#ifdef IXGBE_IEEE1588
/* IEEE 1588 precision time support */
struct cyclecounter cycles;
struct nettimer clock;
struct nettime_compare compare;
struct hwtstamp_ctrl hwtstamp;
#endif
struct rx_ring *rx_rings;
int num_rx_desc;
u64 que_mask;
u32 rx_process_limit;
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
unsigned long mbuf_defrag_failed;
unsigned long mbuf_header_failed;
unsigned long mbuf_packet_failed;
unsigned long no_tx_map_avail;
unsigned long no_tx_dma_setup;
unsigned long watchdog_events;
unsigned long tso_tx;
unsigned long link_irq;
unsigned long dropped_pkts;
unsigned long mbuf_defrag_failed;
unsigned long mbuf_header_failed;
unsigned long mbuf_packet_failed;
unsigned long no_tx_map_avail;
unsigned long no_tx_dma_setup;
unsigned long watchdog_events;
unsigned long tso_tx;
unsigned long link_irq;
struct ixgbe_hw_stats stats;
struct ixgbe_hw_stats stats;
};
/* Precision Time Sync (IEEE 1588) defines */
@ -452,8 +458,8 @@ ixgbe_is_sfp(struct ixgbe_hw *hw)
case ixgbe_phy_sfp_ftl:
case ixgbe_phy_sfp_intel:
case ixgbe_phy_sfp_unknown:
case ixgbe_phy_tw_tyco:
case ixgbe_phy_tw_unknown:
case ixgbe_phy_sfp_passive_tyco:
case ixgbe_phy_sfp_passive_unknown:
return TRUE;
default:
return FALSE;

View file

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -59,6 +59,7 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan,
@ -164,6 +165,7 @@ s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
/* MAC */
mac->ops.start_hw = &ixgbe_start_hw_82598;
mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
mac->ops.reset_hw = &ixgbe_reset_hw_82598;
mac->ops.get_media_type = &ixgbe_get_media_type_82598;
mac->ops.get_supported_physical_layer =
@ -273,7 +275,8 @@ out:
* @hw: pointer to hardware structure
*
* Starts the hardware using the generic start_hw function.
* Then set pcie completion timeout
* Disables relaxed ordering Then set pcie completion timeout
*
**/
s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
{
@ -287,17 +290,17 @@ s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
/* Disable relaxed ordering */
for (i = 0; ((i < hw->mac.max_tx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
}
for (i = 0; ((i < hw->mac.max_rx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
@ -439,15 +442,23 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
DEBUGFUNC("ixgbe_fc_enable_82598");
/*
* On 82598 backplane having FC on causes resets while doing
* KX, so turn off here.
* On 82598 having Rx FC on causes resets while doing 1G
* so if it's on turn it off once we know link_speed. For
* more details see 82598 Specification update.
*/
hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
if (link_up &&
link_speed == IXGBE_LINK_SPEED_1GB_FULL &&
hw->mac.ops.get_media_type(hw) == ixgbe_media_type_backplane) {
hw->fc.disable_fc_autoneg = TRUE;
hw->fc.requested_mode = ixgbe_fc_none;
if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
switch (hw->fc.requested_mode) {
case ixgbe_fc_full:
hw->fc.requested_mode = ixgbe_fc_tx_pause;
break;
case ixgbe_fc_rx_pause:
hw->fc.requested_mode = ixgbe_fc_none;
break;
default:
/* no change */
break;
}
}
/* Negotiate the fc mode to use */
@ -842,12 +853,9 @@ no_phy_reset:
* Prevent the PCI-E bus from from hanging by disabling PCI-E master
* access and verify no pending requests before reset
*/
status = ixgbe_disable_pcie_master(hw);
if (status != IXGBE_SUCCESS) {
status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
DEBUGOUT("PCI-E Master disable polling has failed.\n");
}
ixgbe_disable_pcie_master(hw);
mac_reset_top:
/*
* Issue global reset to the MAC. This needs to be a SW reset.
* If link reset is used, it might reset the MAC when mng is using it
@ -868,6 +876,19 @@ no_phy_reset:
DEBUGOUT("Reset polling failed to complete.\n");
}
/*
* Double resets are required for recovery from certain error
* conditions. Between resets, it is necessary to stall to allow time
* for any pending HW events to complete. We use 1usec since that is
* what is needed for ixgbe_disable_pcie_master(). The second reset
* then clears out any effects of those events.
*/
if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
usec_delay(1);
goto mac_reset_top;
}
msec_delay(50);
gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
@ -1299,3 +1320,32 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
return IXGBE_SUCCESS;
}
/**
* ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
* @hw: pointer to hardware structure
*
**/
void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
{
u32 regval;
u32 i;
DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
/* Enable relaxed ordering */
for (i = 0; ((i < hw->mac.max_tx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
}
for (i = 0; ((i < hw->mac.max_rx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
}

View file

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -64,6 +64,7 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw);
void ixgbe_enable_relaxed_ordering_82599(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
@ -267,6 +268,8 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
DEBUGFUNC("ixgbe_get_link_capabilities_82599");
/*
* Determine link capabilities based on the stored value of AUTOC,
* which represents EEPROM defaults. If AUTOC value has not
@ -878,7 +881,7 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
{
s32 status = IXGBE_SUCCESS;
u32 ctrl, ctrl_ext;
u32 ctrl;
u32 i;
u32 autoc;
u32 autoc2;
@ -913,12 +916,9 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
* Prevent the PCI-E bus from from hanging by disabling PCI-E master
* access and verify no pending requests before reset
*/
status = ixgbe_disable_pcie_master(hw);
if (status != IXGBE_SUCCESS) {
status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
DEBUGOUT("PCI-E Master disable polling has failed.\n");
}
ixgbe_disable_pcie_master(hw);
mac_reset_top:
/*
* Issue global reset to the MAC. This needs to be a SW reset.
* If link reset is used, it might reset the MAC when mng is using it
@ -938,10 +938,19 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
status = IXGBE_ERR_RESET_FAILED;
DEBUGOUT("Reset polling failed to complete.\n");
}
/* Clear PF Reset Done bit so PF/VF Mail Ops can work */
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
/*
* Double resets are required for recovery from certain error
* conditions. Between resets, it is necessary to stall to allow time
* for any pending HW events to complete. We use 1usec since that is
* what is needed for ixgbe_disable_pcie_master(). The second reset
* then clears out any effects of those events.
*/
if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
usec_delay(1);
goto mac_reset_top;
}
msec_delay(50);
@ -981,8 +990,6 @@ s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
hw->mac.num_rar_entries = 128;
hw->mac.ops.init_rx_addrs(hw);
/* Store the permanent SAN mac address */
hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
@ -1207,6 +1214,9 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
/* Send interrupt when 64 filters are left */
fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
/* Initialize the drop queue to Rx queue 127 */
fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
switch (pballoc) {
case IXGBE_FDIR_PBALLOC_64K:
/* 2k - 1 perfect filters */
@ -1886,23 +1896,26 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
* ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
* @hw: pointer to hardware structure
* @input: input bitstream
* @input_masks: masks for the input bitstream
* @soft_id: software index for the filters
* @queue: queue index to direct traffic to
*
* Note that the caller to this function must lock before calling, since the
* hardware writes must be protected from one another.
**/
s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
struct ixgbe_atr_input *input,
u16 soft_id,
u8 queue)
struct ixgbe_atr_input *input,
struct ixgbe_atr_input_masks *input_masks,
u16 soft_id, u8 queue)
{
u32 fdircmd = 0;
u32 fdirhash;
u32 src_ipv4, dst_ipv4;
u32 src_ipv4 = 0, dst_ipv4 = 0;
u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
u16 src_port, dst_port, vlan_id, flex_bytes;
u16 bucket_hash;
u8 l4type;
u8 fdirm = 0;
DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
@ -1959,7 +1972,6 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
/* IPv4 */
ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
}
ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
@ -1968,7 +1980,78 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
(flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
(dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
(dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
/*
* Program the relevant mask registers. If src/dst_port or src/dst_addr
* are zero, then assume a full mask for that field. Also assume that
* a VLAN of 0 is unspecified, so mask that out as well. L4type
* cannot be masked out in this implementation.
*
* This also assumes IPv4 only. IPv6 masking isn't supported at this
* point in time.
*/
if (src_ipv4 == 0)
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xffffffff);
else
IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
if (dst_ipv4 == 0)
IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xffffffff);
else
IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
case IXGBE_ATR_L4TYPE_TCP:
if (src_port == 0)
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xffff);
else
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
input_masks->src_port_mask);
if (dst_port == 0)
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
(IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
(0xffff << 16)));
else
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
(IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
(input_masks->dst_port_mask << 16)));
break;
case IXGBE_ATR_L4TYPE_UDP:
if (src_port == 0)
IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xffff);
else
IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
input_masks->src_port_mask);
if (dst_port == 0)
IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
(IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
(0xffff << 16)));
else
IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
(IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
(input_masks->src_port_mask << 16)));
break;
default:
/* this already would have failed above */
break;
}
/* Program the last mask register, FDIRM */
if (input_masks->vlan_id_mask || !vlan_id)
/* Mask both VLAN and VLANP - bits 0 and 1 */
fdirm |= (IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP);
if (input_masks->data_mask || !flex_bytes)
/* Flex bytes need masking, so mask the whole thing - bit 4 */
fdirm |= IXGBE_FDIRM_FLEX;
/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
fdirm |= (IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6);
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
@ -2063,7 +2146,7 @@ s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw)
for (i = 0; i < hw->mac.max_rx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
@ -2192,10 +2275,14 @@ sfp_check:
goto out;
switch (hw->phy.type) {
case ixgbe_phy_tw_tyco:
case ixgbe_phy_tw_unknown:
case ixgbe_phy_sfp_passive_tyco:
case ixgbe_phy_sfp_passive_unknown:
physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
break;
case ixgbe_phy_sfp_ftl_active:
case ixgbe_phy_sfp_active_unknown:
physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
break;
case ixgbe_phy_sfp_avago:
case ixgbe_phy_sfp_ftl:
case ixgbe_phy_sfp_intel:
@ -2328,3 +2415,30 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
fw_version_out:
return status;
}
/**
* ixgbe_enable_relaxed_ordering_82599 - Enable relaxed ordering
* @hw: pointer to hardware structure
*
**/
void ixgbe_enable_relaxed_ordering_82599(struct ixgbe_hw *hw)
{
u32 regval;
u32 i;
DEBUGFUNC("ixgbe_enable_relaxed_ordering_82599");
/* Enable relaxed ordering */
for (i = 0; i < hw->mac.max_tx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
}
for (i = 0; i < hw->mac.max_rx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
}

View file

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -111,6 +111,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
case IXGBE_DEV_ID_82599_SFP:
case IXGBE_DEV_ID_82599_CX4:
case IXGBE_DEV_ID_82599_T3_LOM:
hw->mac.type = ixgbe_mac_82599EB;
break;
default:
@ -167,6 +168,20 @@ s32 ixgbe_start_hw(struct ixgbe_hw *hw)
IXGBE_NOT_IMPLEMENTED);
}
/**
* ixgbe_enable_relaxed_ordering - Enables tx relaxed ordering,
* which is disabled by default in ixgbe_start_hw();
*
* @hw: pointer to hardware structure
*
* Enable relaxed ordering;
**/
void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw)
{
if (hw->mac.ops.enable_relaxed_ordering)
hw->mac.ops.enable_relaxed_ordering(hw);
}
/**
* ixgbe_clear_hw_cntrs - Clear hardware counters
* @hw: pointer to hardware structure

View file

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -43,6 +43,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
s32 ixgbe_init_hw(struct ixgbe_hw *hw);
s32 ixgbe_reset_hw(struct ixgbe_hw *hw);
s32 ixgbe_start_hw(struct ixgbe_hw *hw);
void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw);
s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw);
s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr);
@ -122,6 +123,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
u8 queue);
s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
struct ixgbe_atr_input *input,
struct ixgbe_atr_input_masks *masks,
u16 soft_id,
u8 queue);
u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *input, u32 key);

View file

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -474,8 +474,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
* Prevent the PCI-E bus from from hanging by disabling PCI-E master
* access and verify no pending requests
*/
if (ixgbe_disable_pcie_master(hw) != IXGBE_SUCCESS)
DEBUGOUT("PCI-E Master disable polling has failed.\n");
ixgbe_disable_pcie_master(hw);
return IXGBE_SUCCESS;
}
@ -2198,10 +2197,14 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
u32 i;
u32 reg_val;
u32 number_of_queues;
s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
s32 status = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_disable_pcie_master");
/* Just jump out if bus mastering is already disabled */
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
goto out;
/* Disable the receive unit by stopping each queue */
number_of_queues = hw->mac.max_rx_queues;
for (i = 0; i < number_of_queues; i++) {
@ -2217,13 +2220,42 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) {
status = IXGBE_SUCCESS;
break;
}
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
goto out;
usec_delay(100);
}
DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
/*
* The GIO Master Disable bit didn't clear. There are multiple reasons
* for this listed in the datasheet 5.2.5.3.2 Master Disable, and they
* all require a double reset to recover from. Before proceeding, we
* first wait a little more to try to ensure that, at a minimum, the
* PCIe block has no transactions pending.
*/
for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
break;
usec_delay(100);
}
if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT)
DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
/*
* Two consecutive resets are required via CTRL.RST per datasheet
* 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
* of this need. The first reset prevents new master requests from
* being issued by our device. We then must wait 1usec for any
* remaining completions from the PCIe bus to trickle in, and then reset
* again to clear out any effects they may have had on our device.
*/
hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
out:
return status;
}
@ -2695,6 +2727,10 @@ s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
u32 first_empty_slot = 0;
s32 regindex;
/* short cut the special case */
if (vlan == 0)
return 0;
/*
* Search for the vlan id in the VLVF entries. Save off the first empty
* slot found along the way
@ -2717,7 +2753,7 @@ s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
regindex = first_empty_slot;
else {
DEBUGOUT("No space in VLVF.\n");
regindex = -1;
regindex = IXGBE_ERR_NO_SPACE;
}
}
@ -2738,8 +2774,11 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
{
s32 regindex;
u32 bitindex;
u32 vfta;
u32 bits;
u32 vt;
u32 targetbit;
bool vfta_changed = FALSE;
DEBUGFUNC("ixgbe_set_vfta_generic");
@ -2749,6 +2788,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
/*
* this is a 2 part operation - first the VFTA, then the
* VLVF and VLVFB if VT Mode is set
* We don't write the VFTA until we know the VLVF part succeeded.
*/
/* Part 1
@ -2759,13 +2799,20 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
*/
regindex = (vlan >> 5) & 0x7F;
bitindex = vlan & 0x1F;
bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
if (vlan_on)
bits |= (1 << bitindex);
else
bits &= ~(1 << bitindex);
IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
targetbit = (1 << bitindex);
vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
if (vlan_on) {
if (!(vfta & targetbit)) {
vfta |= targetbit;
vfta_changed = TRUE;
}
} else {
if ((vfta & targetbit)) {
vfta &= ~targetbit;
vfta_changed = TRUE;
}
}
/* Part 2
* If VT Mode is set
@ -2777,61 +2824,84 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
*/
vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
if (vt & IXGBE_VT_CTL_VT_ENABLE) {
if (vlan == 0) {
regindex = 0;
} else {
regindex = ixgbe_find_vlvf_slot(hw, vlan);
if (regindex < 0)
goto out;
}
s32 vlvf_index;
vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
if (vlvf_index < 0)
return vlvf_index;
if (vlan_on) {
/* set the pool bit */
if (vind < 32) {
bits = IXGBE_READ_REG(hw,
IXGBE_VLVFB(regindex*2));
IXGBE_VLVFB(vlvf_index*2));
bits |= (1 << vind);
IXGBE_WRITE_REG(hw,
IXGBE_VLVFB(regindex*2),
IXGBE_VLVFB(vlvf_index*2),
bits);
} else {
bits = IXGBE_READ_REG(hw,
IXGBE_VLVFB((regindex*2)+1));
bits |= (1 << vind);
IXGBE_VLVFB((vlvf_index*2)+1));
bits |= (1 << (vind-32));
IXGBE_WRITE_REG(hw,
IXGBE_VLVFB((regindex*2)+1),
IXGBE_VLVFB((vlvf_index*2)+1),
bits);
}
} else {
/* clear the pool bit */
if (vind < 32) {
bits = IXGBE_READ_REG(hw,
IXGBE_VLVFB(regindex*2));
IXGBE_VLVFB(vlvf_index*2));
bits &= ~(1 << vind);
IXGBE_WRITE_REG(hw,
IXGBE_VLVFB(regindex*2),
IXGBE_VLVFB(vlvf_index*2),
bits);
bits |= IXGBE_READ_REG(hw,
IXGBE_VLVFB((regindex*2)+1));
IXGBE_VLVFB((vlvf_index*2)+1));
} else {
bits = IXGBE_READ_REG(hw,
IXGBE_VLVFB((regindex*2)+1));
bits &= ~(1 << vind);
IXGBE_VLVFB((vlvf_index*2)+1));
bits &= ~(1 << (vind-32));
IXGBE_WRITE_REG(hw,
IXGBE_VLVFB((regindex*2)+1),
IXGBE_VLVFB((vlvf_index*2)+1),
bits);
bits |= IXGBE_READ_REG(hw,
IXGBE_VLVFB(regindex*2));
IXGBE_VLVFB(vlvf_index*2));
}
}
if (bits)
IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex),
/*
* If there are still bits set in the VLVFB registers
* for the VLAN ID indicated we need to see if the
* caller is requesting that we clear the VFTA entry bit.
* If the caller has requested that we clear the VFTA
* entry bit but there are still pools/VFs using this VLAN
* ID entry then ignore the request. We're not worried
* about the case where we're turning the VFTA VLAN ID
* entry bit on, only when requested to turn it off as
* there may be multiple pools and/or VFs using the
* VLAN ID entry. In that case we cannot clear the
* VFTA bit until all pools/VFs using that VLAN ID have also
* been cleared. This will be indicated by "bits" being
* zero.
*/
if (bits) {
IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
(IXGBE_VLVF_VIEN | vlan));
if (!vlan_on) {
/* someone wants to clear the vfta entry
* but some pools/VFs are still using it.
* Ignore it. */
vfta_changed = FALSE;
}
}
else
IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 0);
IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
}
out:
if (vfta_changed)
IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
return IXGBE_SUCCESS;
}
@ -2869,14 +2939,23 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
* Reads the links register to determine if link is up and the current speed
**/
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete)
bool *link_up, bool link_up_wait_to_complete)
{
u32 links_reg;
u32 links_reg, links_orig;
u32 i;
DEBUGFUNC("ixgbe_check_mac_link_generic");
/* clear the old state */
links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
if (links_orig != links_reg) {
DEBUGOUT2("LINKS changed from %08X to %08X\n",
links_orig, links_reg);
}
if (link_up_wait_to_complete) {
for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
if (links_reg & IXGBE_LINKS_UP) {

View file

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -77,7 +77,8 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
phy->ops.i2c_bus_clear = &ixgbe_i2c_bus_clear;
phy->ops.identify_sfp = &ixgbe_identify_sfp_module_generic;
phy->sfp_type = ixgbe_sfp_type_unknown;
phy->ops.check_overtemp = &ixgbe_tn_check_overtemp;
phy->ops.set_low_power_state = &ixgbe_tn_set_low_power_state;
return IXGBE_SUCCESS;
}
@ -241,13 +242,19 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
IXGBE_MDIO_PHY_XS_DEV_TYPE,
IXGBE_MDIO_PHY_XS_RESET);
/* Poll for reset bit to self-clear indicating reset is complete */
for (i = 0; i < 500; i++) {
msec_delay(1);
/*
* Poll for reset bit to self-clear indicating reset is complete.
* Some PHYs could take up to 3 seconds to complete and need about
* 1.7 usec delay after the reset is complete.
*/
for (i = 0; i < 30; i++) {
msec_delay(100);
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl);
if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET))
if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
usec_delay(2);
break;
}
}
if (ctrl & IXGBE_MDIO_PHY_XS_RESET) {
@ -922,6 +929,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
u8 comp_codes_10g = 0;
u8 oui_bytes[3] = {0, 0, 0};
u8 cable_tech = 0;
u8 cable_spec = 0;
u16 enforce_sfp = 0;
DEBUGFUNC("ixgbe_identify_sfp_module_generic");
@ -968,6 +976,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
* 4 SFP_DA_CORE1 - 82599-specific
* 5 SFP_SR/LR_CORE0 - 82599-specific
* 6 SFP_SR/LR_CORE1 - 82599-specific
* 7 SFP_act_lmt_DA_CORE0 - 82599-specific
* 8 SFP_act_lmt_DA_CORE1 - 82599-specific
*/
if (hw->mac.type == ixgbe_mac_82598EB) {
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
@ -979,29 +989,40 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
} else if (hw->mac.type == ixgbe_mac_82599EB) {
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_da_cu_core0;
else
hw->phy.sfp_type =
ixgbe_sfp_type_da_cu_core1;
else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
} else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
hw->phy.ops.read_i2c_eeprom(
hw, IXGBE_SFF_CABLE_SPEC_COMP,
&cable_spec);
if (cable_spec &
IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_da_act_lmt_core0;
else
hw->phy.sfp_type =
ixgbe_sfp_type_da_act_lmt_core1;
} else
hw->phy.sfp_type =
ixgbe_sfp_type_unknown;
} else if (comp_codes_10g &
(IXGBE_SFF_10GBASESR_CAPABLE |
IXGBE_SFF_10GBASELR_CAPABLE)) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_srlr_core0;
else
hw->phy.sfp_type =
ixgbe_sfp_type_srlr_core1;
else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
ixgbe_sfp_type_srlr_core0;
else
hw->phy.sfp_type =
ixgbe_sfp_type_srlr_core1;
else
} else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
}
if (hw->phy.sfp_type != stored_sfp_type)
@ -1036,10 +1057,14 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
switch (vendor_oui) {
case IXGBE_SFF_VENDOR_OUI_TYCO:
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
hw->phy.type = ixgbe_phy_tw_tyco;
hw->phy.type =
ixgbe_phy_sfp_passive_tyco;
break;
case IXGBE_SFF_VENDOR_OUI_FTL:
hw->phy.type = ixgbe_phy_sfp_ftl;
if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
hw->phy.type = ixgbe_phy_sfp_ftl_active;
else
hw->phy.type = ixgbe_phy_sfp_ftl;
break;
case IXGBE_SFF_VENDOR_OUI_AVAGO:
hw->phy.type = ixgbe_phy_sfp_avago;
@ -1049,15 +1074,20 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
break;
default:
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
hw->phy.type = ixgbe_phy_tw_unknown;
hw->phy.type =
ixgbe_phy_sfp_passive_unknown;
else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
hw->phy.type =
ixgbe_phy_sfp_active_unknown;
else
hw->phy.type = ixgbe_phy_sfp_unknown;
break;
}
}
/* All passive DA cables are supported */
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
/* Allow any DA cable vendor */
if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
IXGBE_SFF_DA_ACTIVE_CABLE)) {
status = IXGBE_SUCCESS;
goto out;
}
@ -1108,6 +1138,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *data_offset)
{
u16 sfp_id;
u16 sfp_type = hw->phy.sfp_type;
DEBUGFUNC("ixgbe_get_sfp_init_sequence_offsets");
@ -1121,6 +1152,12 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
(hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
return IXGBE_ERR_SFP_NOT_SUPPORTED;
/* Limiting active cables must be initialized as SR modules */
if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0)
sfp_type = ixgbe_sfp_type_srlr_core0;
else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1)
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset);
@ -1137,7 +1174,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
hw->eeprom.ops.read(hw, *list_offset, &sfp_id);
while (sfp_id != IXGBE_PHY_INIT_END_NL) {
if (sfp_id == hw->phy.sfp_type) {
if (sfp_id == sfp_type) {
(*list_offset)++;
hw->eeprom.ops.read(hw, *list_offset, data_offset);
if ((!*data_offset) || (*data_offset == 0xFFFF)) {
@ -1722,3 +1759,56 @@ void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
/* Put the i2c bus back to default state */
ixgbe_i2c_stop(hw);
}
/**
* ixgbe_check_overtemp - Checks if an overtemp occured.
* @hw: pointer to hardware structure
*
* Checks if the LASI temp alarm status was triggered due to overtemp
**/
s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
{
s32 status = IXGBE_SUCCESS;
u16 phy_data = 0;
DEBUGFUNC("ixgbe_tn_check_overtemp");
if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
goto out;
/* Check that the LASI temp alarm status was triggered */
hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data);
if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
goto out;
status = IXGBE_ERR_OVERTEMP;
out:
return status;
}
/**
* ixgbe_set_tn_low_power_state - Sets the teranetics phy into low power state
* @hw: pointer to hardware structure
*
* Sets the phy into low power mode when LASI temp alarm status is triggered
**/
s32 ixgbe_tn_set_low_power_state(struct ixgbe_hw *hw)
{
s32 status = IXGBE_SUCCESS;
u16 phy_data = 0;
DEBUGFUNC("ixgbe_set_tn_low_power_state");
/* Set the phy into low power mode */
hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_CONTROL_ADDR,
IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data);
phy_data |= IXGBE_MDIO_PHY_LOW_POWER_MODE;
hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_PMD_CONTROL_ADDR,
IXGBE_MDIO_PMA_PMD_DEV_TYPE, phy_data);
return status;
}

View file

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -47,9 +47,12 @@
#define IXGBE_SFF_1GBE_COMP_CODES 0x6
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
@ -84,6 +87,9 @@
#define IXGBE_I2C_T_SU_STO 4
#define IXGBE_I2C_T_BUF 5
#define IXGBE_TN_LASI_STATUS_REG 0x9005
#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
@ -119,6 +125,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset);
s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
s32 ixgbe_tn_set_low_power_state(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,

View file

@ -1,6 +1,6 @@
/******************************************************************************
Copyright (c) 2001-2009, Intel Corporation
Copyright (c) 2001-2010, Intel Corporation
All rights reserved.
Redistribution and use in source and binary forms, with or without
@ -57,9 +57,11 @@
#define IXGBE_DEV_ID_82599_KX4 0x10F7
#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
#define IXGBE_DEV_ID_82599_CX4 0x10F9
#define IXGBE_DEV_ID_82599_SFP 0x10FB
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
/* General Registers */
#define IXGBE_CTRL 0x00000
@ -89,7 +91,7 @@
/* General Receive Control */
#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
#define IXGBE_GRC_APME 0x00000002 /* Advanced Power Management Enable */
#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
#define IXGBE_VPDDIAG0 0x10204
#define IXGBE_VPDDIAG1 0x10208
@ -198,6 +200,7 @@
#define IXGBE_RFCTL 0x05008
#define IXGBE_DRECCCTL 0x02F08
#define IXGBE_DRECCCTL_DISABLE 0
/* Multicast Table Array - 128 entries */
#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
@ -334,7 +337,7 @@
/* Wake Up Control */
#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */
#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
#define IXGBE_WUC_ADVD3WUC 0x00000010 /* D3Cold wake up cap. enable*/
#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */
/* Wake Up Filter Control */
#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
@ -736,6 +739,12 @@
#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
#define IXGBE_GCR_CAP_VER2 0x00040000
#define IXGBE_GCR_EXT_MSIX_EN 0x80000000
#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
IXGBE_GCR_EXT_VT_MODE_64)
/* Time Sync Registers */
#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
@ -889,6 +898,8 @@
#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */
#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */
#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */
#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */
/* RQTC Bit Masks and Shifts */
#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
@ -1020,7 +1031,9 @@
#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */
#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */
#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */
#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */
#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */
#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */
@ -1369,10 +1382,12 @@
* EAPOL 802.1x (0x888e): Filter 0
* FCoE (0x8906): Filter 2
* 1588 (0x88f7): Filter 3
* FIP (0x8914): Filter 4
*/
#define IXGBE_ETQF_FILTER_EAPOL 0
#define IXGBE_ETQF_FILTER_FCOE 2
#define IXGBE_ETQF_FILTER_1588 3
#define IXGBE_ETQF_FILTER_FIP 4
/* VLAN Control Bit Masks */
#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
@ -1476,6 +1491,7 @@
#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
/* LINKS Bit Masks */
#define IXGBE_LINKS_KX_AN_COMP 0x80000000
#define IXGBE_LINKS_UP 0x40000000
@ -1655,6 +1671,8 @@
#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */
/* PCI Bus Info */
#define IXGBE_PCI_DEVICE_STATUS 0xAA
#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
#define IXGBE_PCI_LINK_STATUS 0xB2
#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
#define IXGBE_PCI_LINK_WIDTH 0x3F0
@ -1787,6 +1805,7 @@
#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */
#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA and VT_ENA */
#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
/* Receive Descriptor bit definitions */
@ -2000,10 +2019,9 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRM_VLANID 0x00000001
#define IXGBE_FDIRM_VLANP 0x00000002
#define IXGBE_FDIRM_POOL 0x00000004
#define IXGBE_FDIRM_L3P 0x00000008
#define IXGBE_FDIRM_L4P 0x00000010
#define IXGBE_FDIRM_FLEX 0x00000020
#define IXGBE_FDIRM_DIPv6 0x00000040
#define IXGBE_FDIRM_L4P 0x00000008
#define IXGBE_FDIRM_FLEX 0x00000010
#define IXGBE_FDIRM_DIPv6 0x00000020
#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
#define IXGBE_FDIRFREE_FREE_SHIFT 0
@ -2218,6 +2236,8 @@ typedef u32 ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
/* Software ATR hash keys */
#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D
@ -2258,6 +2278,15 @@ struct ixgbe_atr_input {
u8 byte_stream[42];
};
struct ixgbe_atr_input_masks {
u32 src_ip_mask;
u32 dst_ip_mask;
u16 src_port_mask;
u16 dst_port_mask;
u16 vlan_id_mask;
u16 data_mask;
};
enum ixgbe_eeprom_type {
ixgbe_eeprom_uninitialized = 0,
ixgbe_eeprom_spi,
@ -2281,10 +2310,12 @@ enum ixgbe_phy_type {
ixgbe_phy_qt,
ixgbe_phy_xaui,
ixgbe_phy_nl,
ixgbe_phy_tw_tyco,
ixgbe_phy_tw_unknown,
ixgbe_phy_sfp_passive_tyco,
ixgbe_phy_sfp_passive_unknown,
ixgbe_phy_sfp_active_unknown,
ixgbe_phy_sfp_avago,
ixgbe_phy_sfp_ftl,
ixgbe_phy_sfp_ftl_active,
ixgbe_phy_sfp_unknown,
ixgbe_phy_sfp_intel,
ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/
@ -2312,6 +2343,8 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_da_cu_core1 = 4,
ixgbe_sfp_type_srlr_core0 = 5,
ixgbe_sfp_type_srlr_core1 = 6,
ixgbe_sfp_type_da_act_lmt_core0 = 7,
ixgbe_sfp_type_da_act_lmt_core1 = 8,
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
@ -2354,25 +2387,25 @@ enum ixgbe_bus_type {
/* PCI bus speeds */
enum ixgbe_bus_speed {
ixgbe_bus_speed_unknown = 0,
ixgbe_bus_speed_33,
ixgbe_bus_speed_66,
ixgbe_bus_speed_100,
ixgbe_bus_speed_120,
ixgbe_bus_speed_133,
ixgbe_bus_speed_2500,
ixgbe_bus_speed_5000,
ixgbe_bus_speed_33 = 33,
ixgbe_bus_speed_66 = 66,
ixgbe_bus_speed_100 = 100,
ixgbe_bus_speed_120 = 120,
ixgbe_bus_speed_133 = 133,
ixgbe_bus_speed_2500 = 2500,
ixgbe_bus_speed_5000 = 5000,
ixgbe_bus_speed_reserved
};
/* PCI bus widths */
enum ixgbe_bus_width {
ixgbe_bus_width_unknown = 0,
ixgbe_bus_width_pcie_x1,
ixgbe_bus_width_pcie_x2,
ixgbe_bus_width_pcie_x1 = 1,
ixgbe_bus_width_pcie_x2 = 2,
ixgbe_bus_width_pcie_x4 = 4,
ixgbe_bus_width_pcie_x8 = 8,
ixgbe_bus_width_32,
ixgbe_bus_width_64,
ixgbe_bus_width_32 = 32,
ixgbe_bus_width_64 = 64,
ixgbe_bus_width_reserved
};
@ -2503,6 +2536,7 @@ struct ixgbe_mac_operations {
s32 (*reset_hw)(struct ixgbe_hw *);
s32 (*start_hw)(struct ixgbe_hw *);
s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
void (*enable_relaxed_ordering)(struct ixgbe_hw *);
enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
@ -2570,6 +2604,8 @@ struct ixgbe_phy_operations {
s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
void (*i2c_bus_clear)(struct ixgbe_hw *);
s32 (*check_overtemp)(struct ixgbe_hw *);
s32 (*set_low_power_state)(struct ixgbe_hw *);
};
struct ixgbe_eeprom_info {
@ -2580,6 +2616,7 @@ struct ixgbe_eeprom_info {
u16 address_bits;
};
#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
struct ixgbe_mac_info {
struct ixgbe_mac_operations ops;
enum ixgbe_mac_type type;
@ -2603,6 +2640,7 @@ struct ixgbe_mac_info {
u32 orig_autoc2;
bool orig_link_settings_stored;
bool autotry_restart;
u8 flags;
};
struct ixgbe_phy_info {
@ -2668,6 +2706,8 @@ struct ixgbe_hw {
#define IXGBE_ERR_NO_SAN_ADDR_PTR -22
#define IXGBE_ERR_FDIR_REINIT_FAILED -23
#define IXGBE_ERR_EEPROM_VERSION -24
#define IXGBE_ERR_NO_SPACE -25
#define IXGBE_ERR_OVERTEMP -26
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF

View file

@ -165,7 +165,7 @@ ofw_std_init(ofw_t ofw, void *openfirm)
static int
ofw_std_test(ofw_t ofw, const char *name)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -187,7 +187,7 @@ static int
ofw_std_interpret(ofw_t ofw, const char *cmd, int nreturns,
unsigned long *returns)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -217,7 +217,7 @@ ofw_std_interpret(ofw_t ofw, const char *cmd, int nreturns,
static phandle_t
ofw_std_peer(ofw_t ofw, phandle_t node)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -239,7 +239,7 @@ ofw_std_peer(ofw_t ofw, phandle_t node)
static phandle_t
ofw_std_child(ofw_t ofw, phandle_t node)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -261,7 +261,7 @@ ofw_std_child(ofw_t ofw, phandle_t node)
static phandle_t
ofw_std_parent(ofw_t ofw, phandle_t node)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -283,7 +283,7 @@ ofw_std_parent(ofw_t ofw, phandle_t node)
static phandle_t
ofw_std_instance_to_package(ofw_t ofw, ihandle_t instance)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -305,7 +305,7 @@ ofw_std_instance_to_package(ofw_t ofw, ihandle_t instance)
static ssize_t
ofw_std_getproplen(ofw_t ofw, phandle_t package, const char *propname)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -330,7 +330,7 @@ static ssize_t
ofw_std_getprop(ofw_t ofw, phandle_t package, const char *propname, void *buf,
size_t buflen)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -359,7 +359,7 @@ static int
ofw_std_nextprop(ofw_t ofw, phandle_t package, const char *previous, char *buf,
size_t size)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -387,7 +387,7 @@ static int
ofw_std_setprop(ofw_t ofw, phandle_t package, const char *propname,
const void *buf, size_t len)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -415,7 +415,7 @@ ofw_std_setprop(ofw_t ofw, phandle_t package, const char *propname,
static ssize_t
ofw_std_canon(ofw_t ofw, const char *device, char *buf, size_t len)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -441,7 +441,7 @@ ofw_std_canon(ofw_t ofw, const char *device, char *buf, size_t len)
static phandle_t
ofw_std_finddevice(ofw_t ofw, const char *device)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -463,7 +463,7 @@ ofw_std_finddevice(ofw_t ofw, const char *device)
static ssize_t
ofw_std_instance_to_path(ofw_t ofw, ihandle_t instance, char *buf, size_t len)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -489,7 +489,7 @@ ofw_std_instance_to_path(ofw_t ofw, ihandle_t instance, char *buf, size_t len)
static ssize_t
ofw_std_package_to_path(ofw_t ofw, phandle_t package, char *buf, size_t len)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -516,7 +516,7 @@ static int
ofw_std_call_method(ofw_t ofw, ihandle_t instance, const char *method,
int nargs, int nreturns, unsigned long *args_and_returns)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -559,7 +559,7 @@ ofw_std_call_method(ofw_t ofw, ihandle_t instance, const char *method,
static ihandle_t
ofw_std_open(ofw_t ofw, const char *device)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -581,7 +581,7 @@ ofw_std_open(ofw_t ofw, const char *device)
static void
ofw_std_close(ofw_t ofw, ihandle_t instance)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -599,7 +599,7 @@ ofw_std_close(ofw_t ofw, ihandle_t instance)
static ssize_t
ofw_std_read(ofw_t ofw, ihandle_t instance, void *addr, size_t len)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -626,7 +626,7 @@ ofw_std_read(ofw_t ofw, ihandle_t instance, void *addr, size_t len)
static ssize_t
ofw_std_write(ofw_t ofw, ihandle_t instance, const void *addr, size_t len)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -652,7 +652,7 @@ ofw_std_write(ofw_t ofw, ihandle_t instance, const void *addr, size_t len)
static int
ofw_std_seek(ofw_t ofw, ihandle_t instance, uint64_t pos)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -682,7 +682,7 @@ ofw_std_seek(ofw_t ofw, ihandle_t instance, uint64_t pos)
static caddr_t
ofw_std_claim(ofw_t ofw, void *virt, size_t size, u_int align)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -708,7 +708,7 @@ ofw_std_claim(ofw_t ofw, void *virt, size_t size, u_int align)
static void
ofw_std_release(ofw_t ofw, void *virt, size_t size)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -732,7 +732,7 @@ ofw_std_release(ofw_t ofw, void *virt, size_t size)
static void
ofw_std_enter(ofw_t ofw)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
@ -748,7 +748,7 @@ ofw_std_enter(ofw_t ofw)
static void
ofw_std_exit(ofw_t ofw)
{
static struct {
struct {
cell_t name;
cell_t nargs;
cell_t nreturns;

View file

@ -89,6 +89,7 @@ static struct pci_id pci_ids[] = {
{ 0x84031415, "Oxford Semiconductor OX12PCI840 Parallel port", 0x10 },
{ 0x95131415, "Oxford Semiconductor OX16PCI954 Parallel port", 0x10 },
{ 0x98059710, "NetMos NM9805 1284 Printer port", 0x10 },
{ 0x99019710, "MosChip MCS9901 PCIe to Peripheral Controller", 0x10 },
{ 0xffff }
};

View file

@ -1071,6 +1071,7 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
if (IOCGROUP(cmd) == 'M') {
if (cmd == OSS_GETVERSION) {
*arg_i = SOUND_VERSION;
PCM_GIANT_EXIT(d);
return (0);
}
ret = dsp_ioctl_channel(i_dev, PCM_VOLCH(i_dev), cmd, arg);

View file

@ -181,9 +181,17 @@ static u_short mouse_or_mask[16] = {
#define vga_drawpxl(pos, color) \
switch (scp->sc->adp->va_info.vi_depth) { \
case 32: \
case 24: \
writel(pos, vga_palette32[color]); \
break; \
case 24: \
if (((pos) & 1) == 0) { \
writew(pos, vga_palette32[color]); \
writeb(pos + 2, vga_palette32[color] >> 16);\
} else { \
writeb(pos, vga_palette32[color]); \
writew(pos + 1, vga_palette32[color] >> 8);\
} \
break; \
case 16: \
if (scp->sc->adp->va_info.vi_pixel_fsizes[1] == 5)\
writew(pos, vga_palette15[color]); \

View file

@ -208,6 +208,8 @@ ehci_pci_match(device_t self)
return "NVIDIA nForce3 250 USB 2.0 controller";
case 0x005b10de:
return "NVIDIA nForce4 USB 2.0 controller";
case 0x036d10de:
return "NVIDIA nForce MCP55 USB 2.0 controller";
case 0x03f210de:
return "NVIDIA nForce MCP61 USB 2.0 controller";
case 0x0aa610de:

View file

@ -166,6 +166,8 @@ ohci_pci_match(device_t self)
case 0x00d710de:
return ("nVidia nForce3 USB Controller");
case 0x036c10de:
return ("nVidia nForce MCP55 USB Controller");
case 0x03f110de:
return ("nVidia nForce MCP61 USB Controller");
case 0x0aa510de:

View file

@ -896,8 +896,7 @@ ukbd_attach(device_t dev)
hid_input, 0, &sc->sc_loc_apple_fn, &flags,
&temp_id)) {
if (flags & HIO_VARIABLE)
sc->sc_flags |= UKBD_FLAG_APPLE_FN |
UKBD_FLAG_APPLE_SWAP;
sc->sc_flags |= UKBD_FLAG_APPLE_FN;
DPRINTFN(1, "Found Apple FN-key\n");
apple_keys = 1;
sc->sc_kbd_id = temp_id;

View file

@ -227,6 +227,7 @@ static struct usb_quirk_entry usb_quirks[USB_DEV_QUIRKS_MAX] = {
USB_QUIRK(IOMEGA, ZIP100, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI,
UQ_MSC_NO_TEST_UNIT_READY), /* XXX ZIP drives can also use ATAPI */
USB_QUIRK(JMICRON, JM20336, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
USB_QUIRK(JMICRON, JM20337, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI,
UQ_MSC_NO_SYNC_CACHE),
@ -317,6 +318,8 @@ static struct usb_quirk_entry usb_quirks[USB_DEV_QUIRKS_MAX] = {
USB_QUIRK(PNY, ATTACHE2, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE,
UQ_MSC_NO_START_STOP),
USB_QUIRK(PROLIFIC, PL2506, 0x0000, 0xffff,
UQ_MSC_NO_SYNC_CACHE),
USB_QUIRK_VP(USB_VENDOR_SAMSUNG_TECHWIN,
USB_PRODUCT_SAMSUNG_TECHWIN_DIGIMAX_410, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
@ -442,6 +445,7 @@ static struct usb_quirk_entry usb_quirks[USB_DEV_QUIRKS_MAX] = {
USB_QUIRK(ACTIONS, MP4, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_SYNC_CACHE),
USB_QUIRK(ASUS, GMSC, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
USB_QUIRK(UNKNOWN4, USBMEMSTICK, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
};
#undef USB_QUIRK_VP
#undef USB_QUIRK

View file

@ -435,7 +435,7 @@ usb_submit_urb(struct urb *urb, uint16_t mem_flags)
uhe->bsd_xfer[1]) {
/* we are ready! */
TAILQ_INSERT_HEAD(&uhe->bsd_urb_list, urb, bsd_urb_list);
TAILQ_INSERT_TAIL(&uhe->bsd_urb_list, urb, bsd_urb_list);
urb->status = -EINPROGRESS;
@ -908,6 +908,7 @@ usb_linux_create_usb_device(struct usb_device *udev, device_t dev)
if (p_uhe) {
bcopy(ed, &p_uhe->desc, sizeof(p_uhe->desc));
p_uhe->bsd_iface_index = iface_index - 1;
TAILQ_INIT(&p_uhe->bsd_urb_list);
p_uhe++;
}
if (p_uhi) {

View file

@ -58,6 +58,7 @@ $FreeBSD$
vendor UNKNOWN1 0x0053 Unknown vendor
vendor UNKNOWN2 0x0105 Unknown vendor
vendor EGALAX2 0x0123 eGalax, Inc.
vendor UNKNOWN4 0x0204 Unknown vendor
vendor HUMAX 0x02ad HUMAX
vendor LTS 0x0386 LTS
vendor BWCT 0x03da Bernd Walter Computer Technology
@ -1098,6 +1099,7 @@ product BROADCOM BCM2033 0x2033 BCM2033 Bluetooth USB dongle
/* Brother Industries products */
product BROTHER HL1050 0x0002 HL-1050 laser printer
product BROTHER MFC8600_9650 0x0100 MFC8600/9650 multifunction device
/* Behavior Technology Computer products */
product BTC BTC7932 0x6782 Keyboard with mouse port
@ -1812,6 +1814,7 @@ product JABLOTRON PC60B 0x0001 PC-60B
product JATON EDA 0x5704 Ethernet
/* JMicron products */
product JMICRON JM20336 0x2336 USB to SATA Bridge
product JMICRON JM20337 0x2338 USB to ATA/ATAPI Bridge
/* JVC products */
@ -2127,6 +2130,7 @@ product MUSTEK 1200UB 0x0006 1200 UB scanner
product MUSTEK 1200USBPLUS 0x0007 1200 USB Plus scanner
product MUSTEK 1200CUPLUS 0x0008 1200 CU Plus scanner
product MUSTEK BEARPAW1200F 0x0010 BearPaw 1200F scanner
product MUSTEK BEARPAW2400TA 0x0218 BearPaw 2400TA scanner
product MUSTEK BEARPAW1200TA 0x021e BearPaw 1200TA scanner
product MUSTEK 600USB 0x0873 600 USB scanner
product MUSTEK MDC800 0xa800 MDC-800 digital camera
@ -2412,6 +2416,7 @@ product PROLIFIC PL2303 0x2303 PL2303 Serial (ATEN/IOGEAR UC232A)
product PROLIFIC PL2305 0x2305 Parallel printer
product PROLIFIC ATAPI4 0x2307 ATAPI-4 Controller
product PROLIFIC PL2501 0x2501 PL2501 Host-Host interface
product PROLIFIC PL2506 0x2506 PL2506 USB to IDE Bridge
product PROLIFIC PHAROS 0xaaa0 Prolific Pharos
product PROLIFIC RSAQ3 0xaaa2 PL2303 Serial Adapter (IODATA USB-RSAQ3)
product PROLIFIC2 WSIM 0x2001 Willcom WSIM
@ -2512,12 +2517,14 @@ product QUALCOMMINC E2003 0x2003 3G modem
/* Quanta products */
/* Quanta products */
product QUANTA RW6815_1 0x00ce HP iPAQ rw6815
product QUANTA RT3070 0x0304 RT3070
product QUANTA Q101 0xea02 HSDPA modem
product QUANTA Q111 0xea03 HSDPA modem
product QUANTA GLX 0xea04 HSDPA modem
product QUANTA GKE 0xea05 HSDPA modem
product QUANTA GLE 0xea06 HSDPA modem
product QUANTA RW6815_2 0xf003 HP iPAQ rw6815
/* Qtronix products */
product QTRONIX 980N 0x2011 Scorpion-980N keyboard
@ -2725,6 +2732,7 @@ product SIERRA AIRCARD875 0x6820 Aircard 875 HSDPA
product SIERRA TRUINSTALL 0x0fff Aircard Tru Installer
/* Sigmatel products */
product SIGMATEL WBT_3052 0x4200 WBT-3052 IrDA/USB Bridge
product SIGMATEL I_BEAD100 0x8008 i-Bead 100 MP3 Player
/* SIIG products */
@ -3023,6 +3031,9 @@ product UMEDIA AR5523_2_NF 0x3206 AR5523 (no firmware)
/* Universal Access products */
product UNIACCESS PANACHE 0x0101 Panache Surf USB ISDN Adapter
/* Unknown vendors */
product UNKNOWN4 USBMEMSTICK 0x6025 Flash Disk CBM
/* U.S. Robotics products */
product USR USR5423 0x0121 USR5423 WLAN

View file

@ -117,7 +117,7 @@ struct mtx nfs_slock_mutex;
/* local functions */
static int nfssvc_call(struct thread *, struct nfssvc_args *, struct ucred *);
#if defined(__i386__)
#ifdef __NO_STRICT_ALIGNMENT
/*
* These architectures don't need re-alignment, so just return.
*/
@ -127,7 +127,7 @@ newnfs_realign(struct mbuf **pm)
return;
}
#else
#else /* !__NO_STRICT_ALIGNMENT */
/*
* newnfs_realign:
*
@ -185,7 +185,7 @@ newnfs_realign(struct mbuf **pm)
pm = &m->m_next;
}
}
#endif /* !__i386__ */
#endif /* __NO_STRICT_ALIGNMENT */
#ifdef notdef
static void

View file

@ -2443,6 +2443,9 @@ nfsvno_fhtovp(struct mount *mp, fhandle_t *fhp, struct sockaddr *nam,
*credp = NULL;
exp->nes_numsecflavor = 0;
error = VFS_FHTOVP(mp, &fhp->fh_fid, vpp);
if (error != 0)
/* Make sure the server replies ESTALE to the client. */
error = ESTALE;
if (nam && !error) {
error = VFS_CHECKEXP(mp, nam, &exp->nes_exflag, credp,
&exp->nes_numsecflavor, &secflavors);

View file

@ -443,7 +443,10 @@ g_io_request(struct bio *bp, struct g_consumer *cp)
("Bio already on queue bp=%p", bp));
bp->bio_flags |= BIO_ONQUEUE;
binuptime(&bp->bio_t0);
if (g_collectstats)
binuptime(&bp->bio_t0);
else
getbinuptime(&bp->bio_t0);
/*
* The statistics collection is lockless, as such, but we

View file

@ -1461,11 +1461,7 @@ SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0,
* Reset registers to default values on exec.
*/
void
exec_setregs(td, entry, stack, ps_strings)
struct thread *td;
u_long entry;
u_long stack;
u_long ps_strings;
exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
{
struct trapframe *regs = td->td_frame;
struct pcb *pcb = td->td_pcb;
@ -1481,7 +1477,7 @@ exec_setregs(td, entry, stack, ps_strings)
mtx_unlock_spin(&dt_lock);
bzero((char *)regs, sizeof(struct trapframe));
regs->tf_eip = entry;
regs->tf_eip = imgp->entry_addr;
regs->tf_esp = stack;
regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
regs->tf_ss = _udatasel;
@ -1491,7 +1487,7 @@ exec_setregs(td, entry, stack, ps_strings)
regs->tf_cs = _ucodesel;
/* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */
regs->tf_ebx = ps_strings;
regs->tf_ebx = imgp->ps_strings;
/*
* Reset the hardware debug registers if they were in use.

View file

@ -60,11 +60,20 @@ static int mca_count; /* Number of records stored. */
SYSCTL_NODE(_hw, OID_AUTO, mca, CTLFLAG_RD, NULL, "Machine Check Architecture");
static int mca_enabled = 0;
static int mca_enabled = 1;
TUNABLE_INT("hw.mca.enabled", &mca_enabled);
SYSCTL_INT(_hw_mca, OID_AUTO, enabled, CTLFLAG_RDTUN, &mca_enabled, 0,
"Administrative toggle for machine check support");
static int amd10h_L1TP = 1;
TUNABLE_INT("hw.mca.amd10h_L1TP", &amd10h_L1TP);
SYSCTL_INT(_hw_mca, OID_AUTO, amd10h_L1TP, CTLFLAG_RDTUN, &amd10h_L1TP, 0,
"Administrative toggle for logging of level one TLB parity (L1TP) errors");
int workaround_erratum383;
SYSCTL_INT(_hw_mca, OID_AUTO, erratum383, CTLFLAG_RD, &workaround_erratum383, 0,
"Is the workaround for Erratum 383 on AMD Family 10h processors enabled?");
static STAILQ_HEAD(, mca_internal) mca_records;
static struct callout mca_timer;
static int mca_ticks = 3600; /* Check hourly by default. */
@ -527,7 +536,7 @@ void
mca_init(void)
{
uint64_t mcg_cap;
uint64_t ctl;
uint64_t ctl, mask;
int skip;
int i;
@ -535,6 +544,15 @@ mca_init(void)
if (!mca_enabled || !(cpu_feature & CPUID_MCE))
return;
/*
* On AMD Family 10h processors, unless logging of level one TLB
* parity (L1TP) errors is disabled, enable the recommended workaround
* for Erratum 383.
*/
if (cpu_vendor_id == CPU_VENDOR_AMD &&
CPUID_TO_FAMILY(cpu_id) == 0x10 && amd10h_L1TP)
workaround_erratum383 = 1;
if (cpu_feature & CPUID_MCA) {
if (PCPU_GET(cpuid) == 0)
mca_setup();
@ -545,6 +563,19 @@ mca_init(void)
/* Enable MCA features. */
wrmsr(MSR_MCG_CTL, MCG_CTL_ENABLE);
/*
* Disable logging of level one TLB parity (L1TP) errors by
* the data cache as an alternative workaround for AMD Family
* 10h Erratum 383. Unlike the recommended workaround, there
* is no performance penalty to this workaround. However,
* L1TP errors will go unreported.
*/
if (cpu_vendor_id == CPU_VENDOR_AMD &&
CPUID_TO_FAMILY(cpu_id) == 0x10 && !amd10h_L1TP) {
mask = rdmsr(MSR_MC0_CTL_MASK);
if ((mask & (1UL << 5)) == 0)
wrmsr(MSR_MC0_CTL_MASK, mask | (1UL << 5));
}
for (i = 0; i < (mcg_cap & MCG_CAP_COUNT); i++) {
/* By default enable logging of all errors. */
ctl = 0xffffffffffffffffUL;

View file

@ -5,7 +5,7 @@
* All rights reserved.
* Copyright (c) 1994 David Greenman
* All rights reserved.
* Copyright (c) 2005-2008 Alan L. Cox <alc@cs.rice.edu>
* Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
@ -207,8 +207,8 @@ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
int pgeflag = 0; /* PG_G or-in */
int pseflag = 0; /* PG_PS or-in */
static int nkpt;
vm_offset_t kernel_vm_end;
static int nkpt = NKPT;
vm_offset_t kernel_vm_end = KERNBASE + NKPT * NBPDR;
extern u_int32_t KERNend;
extern u_int32_t KPTphys;
@ -297,6 +297,7 @@ static void pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
static void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde);
static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
static void pmap_pde_attr(pd_entry_t *pde, int cache_bits);
static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
@ -315,6 +316,9 @@ static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
vm_page_t m);
static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
pd_entry_t newpde);
static void pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde);
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
@ -380,11 +384,17 @@ pmap_bootstrap(vm_paddr_t firstaddr)
kernel_pmap->pm_active = -1; /* don't allow deactivation */
TAILQ_INIT(&kernel_pmap->pm_pvchunk);
LIST_INIT(&allpmaps);
/*
* Request a spin mutex so that changes to allpmaps cannot be
* preempted by smp_rendezvous_cpus(). Otherwise,
* pmap_update_pde_kernel() could access allpmaps while it is
* being changed.
*/
mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN);
mtx_lock_spin(&allpmaps_lock);
LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list);
mtx_unlock_spin(&allpmaps_lock);
nkpt = NKPT;
/*
* Reserve some special page table entries/VA space for temporary
@ -692,19 +702,21 @@ pmap_init(void)
pv_entry_high_water = 9 * (pv_entry_max / 10);
/*
* Disable large page mappings by default if the kernel is running in
* a virtual machine on an AMD Family 10h processor. This is a work-
* around for Erratum 383.
* If the kernel is running in a virtual machine on an AMD Family 10h
* processor, then it must assume that MCA is enabled by the virtual
* machine monitor.
*/
if (vm_guest == VM_GUEST_VM && cpu_vendor_id == CPU_VENDOR_AMD &&
CPUID_TO_FAMILY(cpu_id) == 0x10)
pg_ps_enabled = 0;
workaround_erratum383 = 1;
/*
* Are large page mappings enabled?
* Are large page mappings supported and enabled?
*/
TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
if (pg_ps_enabled) {
if (pseflag == 0)
pg_ps_enabled = 0;
else if (pg_ps_enabled) {
KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0,
("pmap_init: can't assign to pagesizes[1]"));
pagesizes[1] = NBPDR;
@ -850,6 +862,69 @@ pmap_cache_bits(int mode, boolean_t is_pde)
cache_bits |= PG_NC_PWT;
return (cache_bits);
}
/*
* The caller is responsible for maintaining TLB consistency.
*/
static void
pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde)
{
pd_entry_t *pde;
pmap_t pmap;
boolean_t PTD_updated;
PTD_updated = FALSE;
mtx_lock_spin(&allpmaps_lock);
LIST_FOREACH(pmap, &allpmaps, pm_list) {
if ((pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] &
PG_FRAME))
PTD_updated = TRUE;
pde = pmap_pde(pmap, va);
pde_store(pde, newpde);
}
mtx_unlock_spin(&allpmaps_lock);
KASSERT(PTD_updated,
("pmap_kenter_pde: current page table is not in allpmaps"));
}
/*
* After changing the page size for the specified virtual address in the page
* table, flush the corresponding entries from the processor's TLB. Only the
* calling processor's TLB is affected.
*
* The calling thread must be pinned to a processor.
*/
static void
pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde)
{
u_long cr4;
if ((newpde & PG_PS) == 0)
/* Demotion: flush a specific 2MB page mapping. */
invlpg(va);
else if ((newpde & PG_G) == 0)
/*
* Promotion: flush every 4KB page mapping from the TLB
* because there are too many to flush individually.
*/
invltlb();
else {
/*
* Promotion: flush every 4KB page mapping from the TLB,
* including any global (PG_G) mappings.
*/
cr4 = rcr4();
load_cr4(cr4 & ~CR4_PGE);
/*
* Although preemption at this point could be detrimental to
* performance, it would not lead to an error. PG_G is simply
* ignored if CR4.PGE is clear. Moreover, in case this block
* is re-entered, the load_cr4() either above or below will
* modify CR4.PGE flushing the TLB.
*/
load_cr4(cr4 | CR4_PGE);
}
}
#ifdef SMP
/*
* For SMP, these functions have to use the IPI mechanism for coherence.
@ -946,6 +1021,92 @@ pmap_invalidate_cache(void)
smp_cache_flush();
sched_unpin();
}
struct pde_action {
cpumask_t store; /* processor that updates the PDE */
cpumask_t invalidate; /* processors that invalidate their TLB */
vm_offset_t va;
pd_entry_t *pde;
pd_entry_t newpde;
};
static void
pmap_update_pde_kernel(void *arg)
{
struct pde_action *act = arg;
pd_entry_t *pde;
pmap_t pmap;
if (act->store == PCPU_GET(cpumask))
/*
* Elsewhere, this operation requires allpmaps_lock for
* synchronization. Here, it does not because it is being
* performed in the context of an all_cpus rendezvous.
*/
LIST_FOREACH(pmap, &allpmaps, pm_list) {
pde = pmap_pde(pmap, act->va);
pde_store(pde, act->newpde);
}
}
static void
pmap_update_pde_user(void *arg)
{
struct pde_action *act = arg;
if (act->store == PCPU_GET(cpumask))
pde_store(act->pde, act->newpde);
}
static void
pmap_update_pde_teardown(void *arg)
{
struct pde_action *act = arg;
if ((act->invalidate & PCPU_GET(cpumask)) != 0)
pmap_update_pde_invalidate(act->va, act->newpde);
}
/*
* Change the page size for the specified virtual address in a way that
* prevents any possibility of the TLB ever having two entries that map the
* same virtual address using different page sizes. This is the recommended
* workaround for Erratum 383 on AMD Family 10h processors. It prevents a
* machine check exception for a TLB state that is improperly diagnosed as a
* hardware error.
*/
static void
pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
{
struct pde_action act;
cpumask_t active, cpumask;
sched_pin();
cpumask = PCPU_GET(cpumask);
if (pmap == kernel_pmap)
active = all_cpus;
else
active = pmap->pm_active;
if ((active & PCPU_GET(other_cpus)) != 0) {
act.store = cpumask;
act.invalidate = active;
act.va = va;
act.pde = pde;
act.newpde = newpde;
smp_rendezvous_cpus(cpumask | active,
smp_no_rendevous_barrier, pmap == kernel_pmap ?
pmap_update_pde_kernel : pmap_update_pde_user,
pmap_update_pde_teardown, &act);
} else {
if (pmap == kernel_pmap)
pmap_kenter_pde(va, newpde);
else
pde_store(pde, newpde);
if ((active & cpumask) != 0)
pmap_update_pde_invalidate(va, newpde);
}
sched_unpin();
}
#else /* !SMP */
/*
* Normal, non-SMP, 486+ invalidation functions.
@ -983,6 +1144,18 @@ pmap_invalidate_cache(void)
wbinvd();
}
static void
pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
{
if (pmap == kernel_pmap)
pmap_kenter_pde(va, newpde);
else
pde_store(pde, newpde);
if (pmap == kernel_pmap || pmap->pm_active)
pmap_update_pde_invalidate(va, newpde);
}
#endif /* !SMP */
void
@ -1856,32 +2029,17 @@ SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
void
pmap_growkernel(vm_offset_t addr)
{
struct pmap *pmap;
vm_paddr_t ptppaddr;
vm_page_t nkpg;
pd_entry_t newpdir;
pt_entry_t *pde;
boolean_t updated_PTD;
mtx_assert(&kernel_map->system_mtx, MA_OWNED);
if (kernel_vm_end == 0) {
kernel_vm_end = KERNBASE;
nkpt = 0;
while (pdir_pde(PTD, kernel_vm_end)) {
kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
nkpt++;
if (kernel_vm_end - 1 >= kernel_map->max_offset) {
kernel_vm_end = kernel_map->max_offset;
break;
}
}
}
addr = roundup2(addr, PAGE_SIZE * NPTEPG);
addr = roundup2(addr, NBPDR);
if (addr - 1 >= kernel_map->max_offset)
addr = kernel_map->max_offset;
while (kernel_vm_end < addr) {
if (pdir_pde(PTD, kernel_vm_end)) {
kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
if (kernel_vm_end - 1 >= kernel_map->max_offset) {
kernel_vm_end = kernel_map->max_offset;
break;
@ -1903,19 +2061,8 @@ pmap_growkernel(vm_offset_t addr)
newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
pdir_pde(KPTD, kernel_vm_end) = pgeflag | newpdir;
updated_PTD = FALSE;
mtx_lock_spin(&allpmaps_lock);
LIST_FOREACH(pmap, &allpmaps, pm_list) {
if ((pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] &
PG_FRAME))
updated_PTD = TRUE;
pde = pmap_pde(pmap, kernel_vm_end);
pde_store(pde, newpdir);
}
mtx_unlock_spin(&allpmaps_lock);
KASSERT(updated_PTD,
("pmap_growkernel: current page table is not in allpmaps"));
kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
pmap_kenter_pde(kernel_vm_end, newpdir);
kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK;
if (kernel_vm_end - 1 >= kernel_map->max_offset) {
kernel_vm_end = kernel_map->max_offset;
break;
@ -2358,7 +2505,6 @@ static boolean_t
pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
{
pd_entry_t newpde, oldpde;
pmap_t allpmaps_entry;
pt_entry_t *firstpte, newpte;
vm_paddr_t mptepa;
vm_page_t free, mpte;
@ -2464,25 +2610,11 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
* processor changing the setting of PG_A and/or PG_M between
* the read above and the store below.
*/
if (pmap == kernel_pmap) {
/*
* A harmless race exists between this loop and the bcopy()
* in pmap_pinit() that initializes the kernel segment of
* the new page table directory. Specifically, that bcopy()
* may copy the new PDE from the PTD to the new page table
* before this loop updates that new page table.
*/
mtx_lock_spin(&allpmaps_lock);
LIST_FOREACH(allpmaps_entry, &allpmaps, pm_list) {
pde = pmap_pde(allpmaps_entry, va);
KASSERT(*pde == newpde || (*pde & PG_PTE_PROMOTE) ==
(oldpde & PG_PTE_PROMOTE),
("pmap_demote_pde: pde was %#jx, expected %#jx",
(uintmax_t)*pde, (uintmax_t)oldpde));
pde_store(pde, newpde);
}
mtx_unlock_spin(&allpmaps_lock);
} else
if (workaround_erratum383)
pmap_update_pde(pmap, va, pde, newpde);
else if (pmap == kernel_pmap)
pmap_kenter_pde(va, newpde);
else
pde_store(pde, newpde);
if (firstpte == PADDR2)
mtx_unlock(&PMAP2mutex);
@ -3001,7 +3133,6 @@ static void
pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
{
pd_entry_t newpde;
pmap_t allpmaps_entry;
pt_entry_t *firstpte, oldpte, pa, *pte;
vm_offset_t oldpteva;
vm_page_t mpte;
@ -3013,7 +3144,7 @@ pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
* either invalid, unused, or does not map the first 4KB physical page
* within a 2- or 4MB page.
*/
firstpte = vtopte(trunc_4mpage(va));
firstpte = pmap_pte_quick(pmap, trunc_4mpage(va));
setpde:
newpde = *firstpte;
if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
@ -3105,14 +3236,11 @@ setpte:
/*
* Map the superpage.
*/
if (pmap == kernel_pmap) {
mtx_lock_spin(&allpmaps_lock);
LIST_FOREACH(allpmaps_entry, &allpmaps, pm_list) {
pde = pmap_pde(allpmaps_entry, va);
pde_store(pde, PG_PS | newpde);
}
mtx_unlock_spin(&allpmaps_lock);
} else
if (workaround_erratum383)
pmap_update_pde(pmap, va, pde, PG_PS | newpde);
else if (pmap == kernel_pmap)
pmap_kenter_pde(va, PG_PS | newpde);
else
pde_store(pde, PG_PS | newpde);
pmap_pde_promotions++;

View file

@ -72,9 +72,9 @@ bsd_stat2ibcs_stat(st, st4)
st4->st_size = (ibcs2_off_t)st->st_size;
else
st4->st_size = -2;
st4->st_atim = (ibcs2_time_t)st->st_atime;
st4->st_mtim = (ibcs2_time_t)st->st_mtime;
st4->st_ctim = (ibcs2_time_t)st->st_ctime;
st4->st_atim = (ibcs2_time_t)st->st_atim.tv_sec;
st4->st_mtim = (ibcs2_time_t)st->st_mtim.tv_sec;
st4->st_ctim = (ibcs2_time_t)st->st_ctim.tv_sec;
}
static int

View file

@ -73,6 +73,7 @@ extern int szosigcode;
#endif
extern uint32_t *vm_page_dump;
extern int vm_page_dump_size;
extern int workaround_erratum383;
typedef void alias_for_inthand_t(u_int cs, u_int ef, u_int esp, u_int ss);
struct thread;

View file

@ -551,6 +551,7 @@
/* AMD64 MSR's */
#define MSR_EFER 0xc0000080 /* extended features */
#define MSR_K8_UCODE_UPDATE 0xc0010020 /* update microcode */
#define MSR_MC0_CTL_MASK 0xc0010044
/* VIA ACE crypto featureset: for via_feature_rng */
#define VIA_HAS_RNG 1 /* cpu has RNG */

View file

@ -178,9 +178,9 @@ struct l_newstat {
l_ulong st_size;
l_ulong st_blksize;
l_ulong st_blocks;
struct l_timespec st_atimespec;
struct l_timespec st_mtimespec;
struct l_timespec st_ctimespec;
struct l_timespec st_atim;
struct l_timespec st_mtim;
struct l_timespec st_ctim;
l_ulong __unused4;
l_ulong __unused5;
};
@ -194,9 +194,9 @@ struct l_stat {
l_ushort st_gid;
l_ushort st_rdev;
l_long st_size;
struct l_timespec st_atimespec;
struct l_timespec st_mtimespec;
struct l_timespec st_ctimespec;
struct l_timespec st_atim;
struct l_timespec st_mtim;
struct l_timespec st_ctim;
l_long st_blksize;
l_long st_blocks;
l_ulong st_flags;
@ -217,9 +217,9 @@ struct l_stat64 {
l_ulong st_blksize;
l_ulong st_blocks;
l_ulong __pad4;
struct l_timespec st_atimespec;
struct l_timespec st_mtimespec;
struct l_timespec st_ctimespec;
struct l_timespec st_atim;
struct l_timespec st_mtim;
struct l_timespec st_ctim;
l_ulonglong st_ino;
};

View file

@ -105,8 +105,8 @@ static int elf_linux_fixup(register_t **stack_base,
static void linux_prepsyscall(struct trapframe *tf, int *args, u_int *code,
caddr_t *params);
static void linux_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask);
static void exec_linux_setregs(struct thread *td, u_long entry,
u_long stack, u_long ps_strings);
static void exec_linux_setregs(struct thread *td,
struct image_params *imgp, u_long stack);
static register_t *linux_copyout_strings(struct image_params *imgp);
static boolean_t linux_trans_osrel(const Elf_Note *note, int32_t *osrel);
@ -927,12 +927,11 @@ exec_linux_imgact_try(struct image_params *imgp)
* override the exec_setregs default(s) here.
*/
static void
exec_linux_setregs(struct thread *td, u_long entry,
u_long stack, u_long ps_strings)
exec_linux_setregs(struct thread *td, struct image_params *imgp, u_long stack)
{
struct pcb *pcb = td->td_pcb;
exec_setregs(td, entry, stack, ps_strings);
exec_setregs(td, imgp, stack);
/* Linux sets %gs to 0, we default to _udatasel */
pcb->pcb_gs = 0;

View file

@ -20,20 +20,21 @@
#
# $FreeBSD$
cpu ITANIUM
cpu ITANIUM2
ident GENERIC
makeoptions DEBUG=-g # Build kernel with debug information.
options AUDIT # Security event auditing
options CD9660 # ISO 9660 Filesystem
options COMPAT_FREEBSD6 # Compatible with FreeBSD6
options COMPAT_FREEBSD7 # Compatible with FreeBSD7
options DDB # Support DDB
options DEADLKRES # Enable the deadlock resolver
options FFS # Berkeley Fast Filesystem
options FLOWTABLE # per-cpu routing cache
options GDB # Support remote GDB
options GEOM_LABEL # Provides labelization
options INCLUDE_CONFIG_FILE # Include this file in kernel
options INET # InterNETworking
options INET6 # IPv6 communications protocols
options INVARIANTS # Enable calls of extra sanity checking
@ -44,9 +45,11 @@ options MAC # TrustedBSD MAC Framework
options MD_ROOT # MD usable as root device
options MSDOSFS # MSDOS Filesystem
options NFSCLIENT # Network Filesystem Client
options NFSSERVER # Network Filesystem Server
options NFSLOCKD # Network Lock Manager
options NFSSERVER # Network Filesystem Server
options NFS_ROOT # NFS usable as root device
options P1003_1B_SEMAPHORES # POSIX-style semaphores
options PREEMPTION # Enable kernel thread preemption
options PRINTF_BUFR_SIZE=128 # Printf buffering to limit interspersion
options PROCFS # Process filesystem (/proc)
options PSEUDOFS # Pseudo-filesystem framework
@ -59,15 +62,12 @@ options STACK # stack(9) support
options SYSVMSG # SYSV-style message queues
options SYSVSEM # SYSV-style semaphores
options SYSVSHM # SYSV-style shared memory
options P1003_1B_SEMAPHORES # POSIX-style semaphores
options UFS_ACL # Support for access control lists
options UFS_DIRHASH # Hash-based directory lookup scheme
options UFS_GJOURNAL # Enable gjournal-based UFS journaling
options WITNESS # Enable checks to detect deadlocks and cycles
options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed
options _KPOSIX_PRIORITY_SCHEDULING # Posix P1003_1B RT extensions
options HWPMC_HOOKS # Necessary kernel hooks for hwpmc(4)
options INCLUDE_CONFIG_FILE # Include this file in kernel
# Various "busses"
device firewire # FireWire bus code
@ -81,20 +81,25 @@ device ata # ATA controller
device atadisk # ATA disk drives
device atapicd # ATAPI CDROM drives
device atapifd # ATAPI floppy drives
device atapist # ATAPI tape drives
device ataraid # ATA RAID drives
# SCSI Controllers
device ahc # AHA2940 and AIC7xxx devices
device ahd # AHA39320/29320 and AIC79xx devices
device hptiop # Highpoint RocketRaid 3xxx series
device isp # Qlogic family
device mpt # LSI-Logic MPT-Fusion
device sym # NCR/Symbios Logic
# RAID controllers interfaced to the SCSI subsystem
device amr # AMI MegaRAID
device ciss # Compaq Smart RAID 5*
device dpt # DPT Smartcache III, IV
device iir # Intel Integrated RAID
device ips # IBM (Adaptec) ServeRAID
device mly # Mylex AcceleRAID/eXtremeRAID
device twa # 3ware 9000 series PATA/SATA RAID
# SCSI peripherals
device cd # CD-ROM, DVD-ROM etc.
@ -107,7 +112,6 @@ device ses # Environmental Services (and SAF-TE)
# RAID controllers
device aac # Adaptec FSA RAID
device aacp # SCSI passthrough for aac (requires CAM)
device amr # AMI MegaRAID
device ida # Compaq Smart RAID
device mlx # Mylex DAC960 family
@ -123,34 +127,58 @@ device ums # Mouse
# PCI Ethernet NICs.
device de # DEC/Intel DC21x4x (``Tulip'')
device em # Intel PRO/1000 adapter Gigabit Ethernet Card
device le # AMD Am7900 LANCE and Am79C9xx PCnet
device em # Intel PRO/1000 Gigabit Ethernet Family
device igb # Intel PRO/1000 PCIE Server Gigabit Family
device ixgbe # Intel PRO/10GbE PCIE Ethernet Family
device txp # 3Com 3cR990 (``Typhoon'')
device vx # 3Com 3c590, 3c595 (``Vortex'')
# PCI Ethernet NICs that use the common MII bus controller code.
device ae # Attansic/Atheros L2 FastEthernet
device age # Attansic/Atheros L1 Gigabit Ethernet
device alc # Atheros AR8131/AR8132 Ethernet
device ale # Atheros AR8121/AR8113/AR8114 Ethernet
device bce # Broadcom BCM5706/BCM5708 Gigabit Ethernet
device bfe # Broadcom BCM440x 10/100 Ethernet
device bge # Broadcom BCM570xx Gigabit Ethernet
device dc # DEC/Intel 21143 and various workalikes
device et # Agere ET1310 10/100/Gigabit Ethernet
device jme # JMicron JMC250 Gigabit/JMC260 Fast Ethernet
device msk # Marvell/SysKonnect Yukon II Gigabit Ethernet
device nge # NatSemi DP83820 gigabit Ethernet
device fxp # Intel EtherExpress PRO/100B (82557, 82558)
device pcn # AMD Am79C97x PCI 10/100 (precedence over 'le')
device re # RealTek 8139C+/8169/8169S/8110S
device rl # RealTek 8129/8139
device sf # Adaptec AIC-6915 (``Starfire'')
device sis # Silicon Integrated Systems SiS 900/SiS 7016
device sk # SysKonnect SK-984x & SK-982x gigabit Ethernet
device ste # Sundance ST201 (D-Link DFE-550TX)
device stge # Sundance/Tamarack TC9021 gigabit Ethernet
device tx # SMC EtherPower II (83c170 ``EPIC'')
device vge # VIA VT612x gigabit Ethernet
device xl # 3Com 3c90x ("Boomerang", "Cyclone")
# USB Ethernet
device aue # ADMtek USB Ethernet
device axe # ASIX Electronics USB Ethernet
device cdce # Generic USB over Ethernet
device cue # CATC USB Ethernet
device kue # Kawasaki LSI USB Ethernet
device rue # RealTek RTL8150 USB Ethernet
device udav # Davicom DM9601E USB
# USB Serial
device uark # Technologies ARK3116 based serial adapters
device ubsa # Belkin F5U103 and compatible serial adapters
device uftdi # For FTDI usb serial adapters
device uipaq # Some WinCE based devices
device uplcom # Prolific PL-2303 serial adapters
device uslcom # SI Labs CP2101/CP2102 serial adapters
device uvisor # Visor and Palm devices
device uvscom # USB serial support for DDI pocket's PHS
# FireWire support
device fwip # IP over FireWire (RFC 2734,3146)
device sbp # SCSI over FireWire (need scbus & da)
# Various (pseudo) devices
device ether # Ethernet support
device vlan # 802.1Q VLAN support
device faith # IPv6-to-IPv4 relaying (translation)
device gif # IPv6 and IPv4 tunneling
device loop # Network loopback
@ -160,6 +188,7 @@ device puc # Multi I/O cards and multi-channel UARTs
device random # Entropy device
device tun # Packet tunnel.
device uart # Serial port (UART)
device vlan # 802.1Q VLAN support
device firmware # firmware assist module
# The `bpf' device enables the Berkeley Packet Filter.

View file

@ -120,7 +120,7 @@ freebsd32_sigreturn(struct thread *td, struct freebsd32_sigreturn_args *uap)
void
ia32_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
ia32_setregs(struct thread *td, struct image_params *imgp, u_long stack)
{
struct trapframe *tf = td->td_frame;
vm_offset_t gdt, ldt;
@ -129,7 +129,7 @@ ia32_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
struct segment_descriptor desc;
struct vmspace *vmspace = td->td_proc->p_vmspace;
exec_setregs(td, entry, stack, ps_strings);
exec_setregs(td, imgp, stack);
/* Non-syscall frames are cleared by exec_setregs() */
if (tf->tf_flags & FRAME_SYSCALL) {

View file

@ -39,15 +39,9 @@
#include <sys/bus.h>
#include <sys/cons.h>
#include <machine/md_var.h>
#include <machine/bootinfo.h>
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <cam/cam_sim.h>
#include <cam/cam_periph.h>
#include <cam/cam_xpt_sim.h>
#include <cam/cam_debug.h>
#include <machine/intr.h>
#include <machine/md_var.h>
static void configure_first(void *);
static void configure(void *);
@ -97,12 +91,9 @@ static void
configure_final(void *dummy)
{
/*
* Now we're ready to handle (pending) interrupts.
* XXX this is slightly misplaced.
*/
enable_intr();
cninit_finish();
ia64_enable_intr();
cold = 0;
}

View file

@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$");
#include <machine/intr.h>
#include <machine/intrcnt.h>
#include <machine/md_var.h>
#include <machine/smp.h>
SYSCTL_NODE(_debug, OID_AUTO, clock, CTLFLAG_RW, 0, "clock statistics");
@ -91,60 +92,63 @@ ia64_ih_clock(struct thread *td, u_int xiv, struct trapframe *tf)
int count;
PCPU_INC(md.stats.pcs_nclks);
intrcnt[INTRCNT_CLOCK]++;
itc = ia64_get_itc();
if (PCPU_GET(cpuid) == 0) {
/*
* Clock processing on the BSP.
*/
intrcnt[INTRCNT_CLOCK]++;
adj = PCPU_GET(md.clockadj);
clk = PCPU_GET(md.clock);
itc = ia64_get_itc();
delta = itc - clk;
count = 0;
while (delta >= ia64_clock_reload) {
/* Only the BSP runs the real clock */
if (PCPU_GET(cpuid) == 0)
adj = PCPU_GET(md.clockadj);
clk = PCPU_GET(md.clock);
delta = itc - clk;
count = 0;
while (delta >= ia64_clock_reload) {
#ifdef SMP
ipi_all_but_self(ia64_clock_xiv);
#endif
hardclock(TRAPF_USERMODE(tf), TRAPF_PC(tf));
else
hardclock_cpu(TRAPF_USERMODE(tf));
if (profprocs != 0)
profclock(TRAPF_USERMODE(tf), TRAPF_PC(tf));
statclock(TRAPF_USERMODE(tf));
delta -= ia64_clock_reload;
clk += ia64_clock_reload;
if (adj != 0)
adjust_ticks++;
count++;
}
ia64_set_itm(ia64_get_itc() + ia64_clock_reload - adj);
ia64_srlz_d();
if (count > 0) {
adjust_lost += count - 1;
if (delta > (ia64_clock_reload >> 3)) {
if (adj == 0)
adjust_edges++;
adj = ia64_clock_reload >> 4;
} else
adj = 0;
} else {
adj = 0;
adjust_excess++;
}
PCPU_SET(md.clock, clk);
PCPU_SET(md.clockadj, adj);
} else {
/*
* Clock processing on the BSP.
*/
hardclock_cpu(TRAPF_USERMODE(tf));
if (profprocs != 0)
profclock(TRAPF_USERMODE(tf), TRAPF_PC(tf));
statclock(TRAPF_USERMODE(tf));
delta -= ia64_clock_reload;
clk += ia64_clock_reload;
if (adj != 0)
adjust_ticks++;
count++;
}
ia64_set_itm(ia64_get_itc() + ia64_clock_reload - adj);
ia64_srlz_d();
if (count > 0) {
adjust_lost += count - 1;
if (delta > (ia64_clock_reload >> 3)) {
if (adj == 0)
adjust_edges++;
adj = ia64_clock_reload >> 4;
} else
adj = 0;
} else {
adj = 0;
adjust_excess++;
}
PCPU_SET(md.clock, clk);
PCPU_SET(md.clockadj, adj);
return (0);
}
void
pcpu_initclock(void)
{
PCPU_SET(md.clockadj, 0);
PCPU_SET(md.clock, ia64_get_itc());
ia64_set_itm(PCPU_GET(md.clock) + ia64_clock_reload);
ia64_set_itv(ia64_clock_xiv);
ia64_srlz_d();
}
/*
* Start the real-time and statistics clocks. We use ar.itc and cr.itm
* to implement a 1000hz clock.
@ -154,7 +158,7 @@ cpu_initclocks()
{
u_long itc_freq;
ia64_clock_xiv = ia64_xiv_alloc(PI_REALTIME, IA64_XIV_IRQ,
ia64_clock_xiv = ia64_xiv_alloc(PI_REALTIME, IA64_XIV_IPI,
ia64_ih_clock);
if (ia64_clock_xiv == 0)
panic("No XIV for clock interrupts");
@ -169,7 +173,11 @@ cpu_initclocks()
tc_init(&ia64_timecounter);
#endif
pcpu_initclock();
PCPU_SET(md.clockadj, 0);
PCPU_SET(md.clock, ia64_get_itc());
ia64_set_itm(PCPU_GET(md.clock) + ia64_clock_reload);
ia64_set_itv(ia64_clock_xiv);
ia64_srlz_d();
}
void

View file

@ -122,7 +122,7 @@ ia64_xiv_reserve(u_int xiv, enum ia64_xiv_use what, ia64_ihtype ih)
return (EBUSY);
ia64_xiv[xiv] = what;
ia64_handler[xiv] = (ih == NULL) ? ia64_ih_invalid: ih;
if (1 || bootverbose)
if (bootverbose)
printf("XIV %u: use=%u, IH=%p\n", xiv, what, ih);
return (0);
}
@ -139,7 +139,7 @@ ia64_xiv_alloc(u_int prio, enum ia64_xiv_use what, ia64_ihtype ih)
xiv0 = IA64_NXIVS - (hwprio + 1) * 16;
KASSERT(xiv0 > IA64_MIN_XIV, ("%s: min XIV", __func__));
KASSERT(xiv0 >= IA64_MIN_XIV, ("%s: min XIV", __func__));
KASSERT(xiv0 < IA64_NXIVS, ("%s: max XIV", __func__));
xiv = xiv0;
@ -280,6 +280,27 @@ ia64_teardown_intr(void *cookie)
return (intr_event_remove_handler(cookie));
}
void
ia64_bind_intr(void)
{
struct ia64_intr *i;
struct pcpu *pc;
u_int xiv;
int cpu;
cpu = MAXCPU;
for (xiv = IA64_NXIVS - 1; xiv >= IA64_MIN_XIV; xiv--) {
if (ia64_xiv[xiv] != IA64_XIV_IRQ)
continue;
i = ia64_intrs[xiv];
do {
cpu = (cpu == 0) ? MAXCPU - 1 : cpu - 1;
pc = cpuid_to_pcpu[cpu];
} while (pc == NULL || !pc->pc_md.awake);
sapic_bind_intr(i->irq, pc);
}
}
/*
* Interrupt handlers.
*/
@ -318,9 +339,9 @@ ia64_handle_intr(struct trapframe *tf)
out:
if (TRAPF_USERMODE(tf)) {
while (td->td_flags & (TDF_ASTPENDING|TDF_NEEDRESCHED)) {
enable_intr();
ia64_enable_intr();
ast(tf);
disable_intr();
ia64_disable_intr();
}
}
}

View file

@ -1328,7 +1328,7 @@ set_mcontext(struct thread *td, const mcontext_t *mc)
* Clear registers on exec.
*/
void
exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
{
struct trapframe *tf;
uint64_t *ksttop, *kst;
@ -1366,7 +1366,7 @@ exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
*kst-- = 0;
if (((uintptr_t)kst & 0x1ff) == 0x1f8)
*kst-- = 0;
*kst-- = ps_strings;
*kst-- = imgp->ps_strings;
if (((uintptr_t)kst & 0x1ff) == 0x1f8)
*kst-- = 0;
*kst = stack;
@ -1381,11 +1381,11 @@ exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
* Assumes that (bspstore & 0x1f8) < 0x1e0.
*/
suword((caddr_t)tf->tf_special.bspstore - 24, stack);
suword((caddr_t)tf->tf_special.bspstore - 16, ps_strings);
suword((caddr_t)tf->tf_special.bspstore - 16, imgp->ps_strings);
suword((caddr_t)tf->tf_special.bspstore - 8, 0);
}
tf->tf_special.iip = entry;
tf->tf_special.iip = imgp->entry_addr;
tf->tf_special.sp = (stack & ~15) - 16;
tf->tf_special.rsc = 0xf;
tf->tf_special.fpsr = IA64_FPSR_DEFAULT;

View file

@ -206,12 +206,11 @@ ia64_ap_startup(void)
CTR1(KTR_SMP, "SMP: cpu%d launched", PCPU_GET(cpuid));
/* kick off the clock on this AP */
pcpu_initclock();
/* Mask interval timer interrupts on APs. */
ia64_set_itv(0x10000);
ia64_set_tpr(0);
ia64_srlz_d();
enable_intr();
ia64_enable_intr();
sched_throw(NULL);
/* NOTREACHED */
@ -383,6 +382,12 @@ cpu_mp_unleash(void *dummy)
smp_active = 1;
smp_started = 1;
/*
* Now that all CPUs are up and running, bind interrupts to each of
* them.
*/
ia64_bind_intr();
}
/*

View file

@ -50,13 +50,13 @@
#include <machine/bus.h>
#include <sys/rman.h>
#include <sys/interrupt.h>
#include <sys/pcpu.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/efi.h>
#include <machine/intr.h>
#include <machine/nexusvar.h>
#include <machine/pmap.h>
#include <machine/resource.h>
#include <machine/vmparam.h>
@ -73,7 +73,6 @@
static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device");
struct nexus_device {
struct resource_list nx_resources;
int nx_pcibus;
};
#define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev))
@ -87,8 +86,6 @@ static device_t nexus_add_child(device_t bus, int order, const char *name,
int unit);
static struct resource *nexus_alloc_resource(device_t, device_t, int, int *,
u_long, u_long, u_long, u_int);
static int nexus_read_ivar(device_t, device_t, int, uintptr_t *);
static int nexus_write_ivar(device_t, device_t, int, uintptr_t);
static int nexus_activate_resource(device_t, device_t, int, int,
struct resource *);
static int nexus_deactivate_resource(device_t, device_t, int, int,
@ -105,6 +102,7 @@ static int nexus_set_resource(device_t, device_t, int, int, u_long, u_long);
static int nexus_get_resource(device_t, device_t, int, int, u_long *,
u_long *);
static void nexus_delete_resource(device_t, device_t, int, int);
static int nexus_bind_intr(device_t, device_t, struct resource *, int);
static int nexus_config_intr(device_t, int, enum intr_trigger,
enum intr_polarity);
@ -123,8 +121,6 @@ static device_method_t nexus_methods[] = {
/* Bus interface */
DEVMETHOD(bus_print_child, nexus_print_child),
DEVMETHOD(bus_add_child, nexus_add_child),
DEVMETHOD(bus_read_ivar, nexus_read_ivar),
DEVMETHOD(bus_write_ivar, nexus_write_ivar),
DEVMETHOD(bus_alloc_resource, nexus_alloc_resource),
DEVMETHOD(bus_release_resource, nexus_release_resource),
DEVMETHOD(bus_activate_resource, nexus_activate_resource),
@ -135,6 +131,7 @@ static device_method_t nexus_methods[] = {
DEVMETHOD(bus_set_resource, nexus_set_resource),
DEVMETHOD(bus_get_resource, nexus_get_resource),
DEVMETHOD(bus_delete_resource, nexus_delete_resource),
DEVMETHOD(bus_bind_intr, nexus_bind_intr),
DEVMETHOD(bus_config_intr, nexus_config_intr),
/* Clock interface */
@ -215,8 +212,6 @@ nexus_print_child(device_t bus, device_t child)
retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#lx");
retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
if (ndev->nx_pcibus != -1)
retval += printf(" pcibus %d", ndev->nx_pcibus);
if (device_get_flags(child))
retval += printf(" flags %#x", device_get_flags(child));
retval += printf(" on motherboard\n"); /* XXX "motherboard", ick */
@ -234,7 +229,6 @@ nexus_add_child(device_t bus, int order, const char *name, int unit)
if (!ndev)
return(0);
resource_list_init(&ndev->nx_resources);
ndev->nx_pcibus = -1;
child = device_add_child_ordered(bus, order, name, unit);
@ -244,37 +238,6 @@ nexus_add_child(device_t bus, int order, const char *name, int unit)
return(child);
}
static int
nexus_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
{
struct nexus_device *ndev = DEVTONX(child);
switch (which) {
case NEXUS_IVAR_PCIBUS:
*result = ndev->nx_pcibus;
break;
default:
return ENOENT;
}
return 0;
}
static int
nexus_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
{
struct nexus_device *ndev = DEVTONX(child);
switch (which) {
case NEXUS_IVAR_PCIBUS:
ndev->nx_pcibus = value;
break;
default:
return ENOENT;
}
return 0;
}
/*
* Allocate a resource on behalf of child. NB: child is usually going to be a
@ -501,6 +464,17 @@ nexus_config_intr(device_t dev, int irq, enum intr_trigger trig,
return (sapic_config_intr(irq, trig, pol));
}
static int
nexus_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu)
{
struct pcpu *pc;
pc = cpuid_to_pcpu[cpu];
if (pc == NULL)
return (EINVAL);
return (sapic_bind_intr(rman_get_start(irq), pc));
}
static int
nexus_gettime(device_t dev, struct timespec *ts)
{

View file

@ -35,6 +35,7 @@
#include <sys/bus.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/pcpu.h>
#include <sys/sysctl.h>
#include <machine/intr.h>
@ -171,6 +172,26 @@ sapic_lookup(u_int irq, u_int *vecp)
}
int
sapic_bind_intr(u_int irq, struct pcpu *pc)
{
struct sapic_rte rte;
struct sapic *sa;
sa = sapic_lookup(irq, NULL);
if (sa == NULL)
return (EINVAL);
mtx_lock_spin(&sa->sa_mtx);
sapic_read_rte(sa, irq - sa->sa_base, &rte);
rte.rte_destination_id = (pc->pc_md.lid >> 24) & 255;
rte.rte_destination_eid = (pc->pc_md.lid >> 16) & 255;
rte.rte_delivery_mode = SAPIC_DELMODE_FIXED;
sapic_write_rte(sa, irq - sa->sa_base, &rte);
mtx_unlock_spin(&sa->sa_mtx);
return (0);
}
int
sapic_config_intr(u_int irq, enum intr_trigger trig, enum intr_polarity pol)
{

View file

@ -334,11 +334,11 @@ int
do_ast(struct trapframe *tf)
{
disable_intr();
ia64_disable_intr();
while (curthread->td_flags & (TDF_ASTPENDING|TDF_NEEDRESCHED)) {
enable_intr();
ia64_enable_intr();
ast(tf);
disable_intr();
ia64_disable_intr();
}
/*
* Keep interrupts disabled. We return r10 as a favor to the EPC

View file

@ -56,8 +56,8 @@
#define ACPI_ASM_MACROS
#define BREAKPOINT3
#define ACPI_DISABLE_IRQS() disable_intr()
#define ACPI_ENABLE_IRQS() enable_intr()
#define ACPI_DISABLE_IRQS() ia64_disable_intr()
#define ACPI_ENABLE_IRQS() ia64_enable_intr()
#define ACPI_FLUSH_CPU_CACHE() /* XXX ia64_fc()? */

View file

@ -56,13 +56,13 @@ breakpoint(void)
static __inline void
disable_intr(void)
ia64_disable_intr(void)
{
__asm __volatile ("rsm psr.i");
}
static __inline void
enable_intr(void)
ia64_enable_intr(void)
{
__asm __volatile ("ssm psr.i;; srlz.d");
}
@ -71,8 +71,9 @@ static __inline register_t
intr_disable(void)
{
register_t psr;
__asm __volatile ("mov %0=psr;;" : "=r"(psr));
disable_intr();
ia64_disable_intr();
return ((psr & IA64_PSR_I) ? 1 : 0);
}
@ -80,7 +81,7 @@ static __inline void
intr_restore(register_t ie)
{
if (ie)
enable_intr();
ia64_enable_intr();
}
#endif /* __GNUCLIKE_ASM */

View file

@ -35,6 +35,7 @@
#define IA64_MAX_HWPRIO 14
struct pcpu;
struct sapic;
struct thread;
struct trapframe;
@ -65,6 +66,7 @@ typedef u_int (ia64_ihtype)(struct thread *, u_int, struct trapframe *);
extern struct ia64_pib *ia64_pib;
void ia64_bind_intr(void);
void ia64_handle_intr(struct trapframe *);
int ia64_setup_intr(const char *, int, driver_filter_t, driver_intr_t,
void *, enum intr_type, void **);
@ -75,6 +77,7 @@ u_int ia64_xiv_alloc(u_int, enum ia64_xiv_use, ia64_ihtype);
int ia64_xiv_free(u_int, enum ia64_xiv_use);
int ia64_xiv_reserve(u_int, enum ia64_xiv_use, ia64_ihtype);
int sapic_bind_intr(u_int, struct pcpu *);
int sapic_config_intr(u_int, enum intr_trigger, enum intr_polarity);
struct sapic *sapic_create(u_int, u_int, uint64_t);
int sapic_enable(struct sapic *, u_int, u_int);

View file

@ -1,43 +0,0 @@
/*-
* Copyright (c) 2000 Peter Wemm <peter@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_NEXUSVAR_H_
#define _MACHINE_NEXUSVAR_H_ 1
enum nexus_device_ivars {
NEXUS_IVAR_PCIBUS
};
#define NEXUS_ACCESSOR(var, ivar, type) \
__BUS_ACCESSOR(nexus, var, NEXUS, ivar, type)
NEXUS_ACCESSOR(pcibus, PCIBUS, u_int32_t)
#undef NEXUS_ACCESSOR
#endif /* !_MACHINE_NEXUSVAR_H_ */

View file

@ -91,8 +91,6 @@ __curthread(void)
#define PCPU_PTR(member) (&pcpup->pc_ ## member)
#define PCPU_SET(member,value) (pcpup->pc_ ## member = (value))
void pcpu_initclock(void);
#endif /* _KERNEL */
#endif /* !_MACHINE_PCPU_H_ */

View file

@ -832,13 +832,8 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
phdr[i].p_vaddr + et_dyn_addr - seg_addr);
/*
* Is this .text or .data? We can't use
* VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
* alpha terribly and possibly does other bad
* things so we stick to the old way of figuring
* it out: If the segment contains the program
* entry point, it's a text segment, otherwise it
* is a data segment.
* Make the largest executable segment the official
* text segment and all others data.
*
* Note that obreak() assumes that data_addr +
* data_size == end of data load area, and the ELF
@ -846,12 +841,10 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
* address. If multiple data segments exist, the
* last one will be used.
*/
if (hdr->e_entry >= phdr[i].p_vaddr &&
hdr->e_entry < (phdr[i].p_vaddr +
phdr[i].p_memsz)) {
if (phdr[i].p_flags & PF_X && text_size < seg_size) {
text_size = seg_size;
text_addr = seg_addr;
entry = (u_long)hdr->e_entry + et_dyn_addr;
} else {
data_size = seg_size;
data_addr = seg_addr;
@ -871,6 +864,8 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
data_size = text_size;
}
entry = (u_long)hdr->e_entry + et_dyn_addr;
/*
* Check limits. It should be safe to check the
* limits after loading the segments since we do
@ -948,6 +943,7 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
imgp->auxargs = elf_auxargs;
imgp->interpreted = 0;
imgp->reloc_base = addr;
imgp->proc->p_osrel = osrel;
return (error);

View file

@ -372,6 +372,7 @@ do_execve(td, args, mac_p)
imgp->execlabel = NULL;
imgp->attr = &attr;
imgp->entry_addr = 0;
imgp->reloc_base = 0;
imgp->vmspace_destroyed = 0;
imgp->interpreted = 0;
imgp->opened = 0;
@ -799,11 +800,10 @@ interpret:
/* Set values passed into the program in registers. */
if (p->p_sysent->sv_setregs)
(*p->p_sysent->sv_setregs)(td, imgp->entry_addr,
(u_long)(uintptr_t)stack_base, imgp->ps_strings);
(*p->p_sysent->sv_setregs)(td, imgp,
(u_long)(uintptr_t)stack_base);
else
exec_setregs(td, imgp->entry_addr,
(u_long)(uintptr_t)stack_base, imgp->ps_strings);
exec_setregs(td, imgp, (u_long)(uintptr_t)stack_base);
vfs_mark_atime(imgp->vp, td->td_ucred);
@ -1260,7 +1260,7 @@ exec_copyout_strings(imgp)
* Fill in "ps_strings" struct for ps, w, etc.
*/
suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp);
suword(&arginfo->ps_nargvstr, argc);
suword32(&arginfo->ps_nargvstr, argc);
/*
* Fill in argument portion of vector table.
@ -1276,7 +1276,7 @@ exec_copyout_strings(imgp)
suword(vectp++, 0);
suword(&arginfo->ps_envstr, (long)(intptr_t)vectp);
suword(&arginfo->ps_nenvstr, envc);
suword32(&arginfo->ps_nenvstr, envc);
/*
* Fill in environment portion of vector table.

View file

@ -199,8 +199,8 @@ void
rw_destroy(struct rwlock *rw)
{
KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock not unlocked"));
KASSERT(rw->rw_recurse == 0, ("rw lock still recursed"));
KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw));
KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw));
rw->rw_lock = RW_DESTROYED;
lock_destroy(&rw->lock_object);
}

View file

@ -1428,9 +1428,9 @@ pipe_stat(fp, ub, active_cred, td)
else
ub->st_size = pipe->pipe_buffer.cnt;
ub->st_blocks = (ub->st_size + ub->st_blksize - 1) / ub->st_blksize;
ub->st_atimespec = pipe->pipe_atime;
ub->st_mtimespec = pipe->pipe_mtime;
ub->st_ctimespec = pipe->pipe_ctime;
ub->st_atim = pipe->pipe_atime;
ub->st_mtim = pipe->pipe_mtime;
ub->st_ctim = pipe->pipe_ctime;
ub->st_uid = fp->f_cred->cr_uid;
ub->st_gid = fp->f_cred->cr_gid;
/*

View file

@ -556,9 +556,9 @@ ptsdev_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
#endif /* PTS_EXTERNAL */
sb->st_ino = sb->st_rdev = tty_udev(tp);
sb->st_atimespec = dev->si_atime;
sb->st_ctimespec = dev->si_ctime;
sb->st_mtimespec = dev->si_mtime;
sb->st_atim = dev->si_atime;
sb->st_ctim = dev->si_ctime;
sb->st_mtim = dev->si_mtime;
sb->st_uid = dev->si_uid;
sb->st_gid = dev->si_gid;
sb->st_mode = dev->si_mode | S_IFCHR;

View file

@ -2447,10 +2447,10 @@ mqf_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
struct mqfs_node *pn = fp->f_data;
bzero(st, sizeof *st);
st->st_atimespec = pn->mn_atime;
st->st_mtimespec = pn->mn_mtime;
st->st_ctimespec = pn->mn_ctime;
st->st_birthtimespec = pn->mn_birth;
st->st_atim = pn->mn_atime;
st->st_mtim = pn->mn_mtime;
st->st_ctim = pn->mn_ctime;
st->st_birthtim = pn->mn_birth;
st->st_uid = pn->mn_uid;
st->st_gid = pn->mn_gid;
st->st_mode = S_IFIFO | pn->mn_mode;

View file

@ -219,10 +219,10 @@ ksem_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
bzero(sb, sizeof(*sb));
sb->st_mode = S_IFREG | ks->ks_mode; /* XXX */
sb->st_atimespec = ks->ks_atime;
sb->st_ctimespec = ks->ks_ctime;
sb->st_mtimespec = ks->ks_mtime;
sb->st_birthtimespec = ks->ks_birthtime;
sb->st_atim = ks->ks_atime;
sb->st_ctim = ks->ks_ctime;
sb->st_mtim = ks->ks_mtime;
sb->st_birthtim = ks->ks_birthtime;
sb->st_uid = ks->ks_uid;
sb->st_gid = ks->ks_gid;

View file

@ -219,10 +219,10 @@ shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
sb->st_blksize = PAGE_SIZE;
sb->st_size = shmfd->shm_size;
sb->st_blocks = (sb->st_size + sb->st_blksize - 1) / sb->st_blksize;
sb->st_atimespec = shmfd->shm_atime;
sb->st_ctimespec = shmfd->shm_ctime;
sb->st_mtimespec = shmfd->shm_mtime;
sb->st_birthtimespec = shmfd->shm_birthtime;
sb->st_atim = shmfd->shm_atime;
sb->st_ctim = shmfd->shm_ctime;
sb->st_mtim = shmfd->shm_mtime;
sb->st_birthtim = shmfd->shm_birthtime;
sb->st_uid = shmfd->shm_uid;
sb->st_gid = shmfd->shm_gid;

View file

@ -948,19 +948,17 @@ relookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
#endif
/*
* Check for degenerate name (e.g. / or "")
* which is a way of talking about a directory,
* e.g. like "/." or ".".
* Check for "" which represents the root directory after slash
* removal.
*/
if (cnp->cn_nameptr[0] == '\0') {
if (cnp->cn_nameiop != LOOKUP || wantparent) {
error = EISDIR;
goto bad;
}
if (dp->v_type != VDIR) {
error = ENOTDIR;
goto bad;
}
/*
* Support only LOOKUP for "/" because lookup()
* can't succeed for CREATE, DELETE and RENAME.
*/
KASSERT(cnp->cn_nameiop == LOOKUP, ("nameiop must be LOOKUP"));
KASSERT(dp->v_type == VDIR, ("dp is not a directory"));
if (!(cnp->cn_flags & LOCKLEAF))
VOP_UNLOCK(dp, 0);
*vpp = dp;

View file

@ -2269,9 +2269,9 @@ cvtstat(st, ost)
ost->st_size = st->st_size;
else
ost->st_size = -2;
ost->st_atime = st->st_atime;
ost->st_mtime = st->st_mtime;
ost->st_ctime = st->st_ctime;
ost->st_atim = st->st_atim;
ost->st_mtim = st->st_mtim;
ost->st_ctim = st->st_ctim;
ost->st_blksize = st->st_blksize;
ost->st_blocks = st->st_blocks;
ost->st_flags = st->st_flags;
@ -2431,15 +2431,15 @@ cvtnstat(sb, nsb)
nsb->st_uid = sb->st_uid;
nsb->st_gid = sb->st_gid;
nsb->st_rdev = sb->st_rdev;
nsb->st_atimespec = sb->st_atimespec;
nsb->st_mtimespec = sb->st_mtimespec;
nsb->st_ctimespec = sb->st_ctimespec;
nsb->st_atim = sb->st_atim;
nsb->st_mtim = sb->st_mtim;
nsb->st_ctim = sb->st_ctim;
nsb->st_size = sb->st_size;
nsb->st_blocks = sb->st_blocks;
nsb->st_blksize = sb->st_blksize;
nsb->st_flags = sb->st_flags;
nsb->st_gen = sb->st_gen;
nsb->st_birthtimespec = sb->st_birthtimespec;
nsb->st_birthtim = sb->st_birthtim;
}
#ifndef _SYS_SYSPROTO_H_

View file

@ -782,10 +782,10 @@ vn_stat(vp, sb, active_cred, file_cred, td)
if (vap->va_size > OFF_MAX)
return (EOVERFLOW);
sb->st_size = vap->va_size;
sb->st_atimespec = vap->va_atime;
sb->st_mtimespec = vap->va_mtime;
sb->st_ctimespec = vap->va_ctime;
sb->st_birthtimespec = vap->va_birthtime;
sb->st_atim = vap->va_atime;
sb->st_mtim = vap->va_mtime;
sb->st_ctim = vap->va_ctime;
sb->st_birthtim = vap->va_birthtime;
/*
* According to www.opengroup.org, the meaning of st_blksize is

View file

@ -283,6 +283,35 @@ breakpoint(void)
__asm __volatile ("break");
}
#if defined(__GNUC__) && !defined(__mips_o32)
static inline uint64_t
mips3_ld(const volatile uint64_t *va)
{
uint64_t rv;
#if defined(_LP64)
rv = *va;
#else
__asm volatile("ld %0,0(%1)" : "=d"(rv) : "r"(va));
#endif
return (rv);
}
static inline void
mips3_sd(volatile uint64_t *va, uint64_t v)
{
#if defined(_LP64)
*va = v;
#else
__asm volatile("sd %0,0(%1)" :: "r"(v), "r"(va));
#endif
}
#else
uint64_t mips3_ld(volatile uint64_t *va);
void mips3_sd(volatile uint64_t *, uint64_t);
#endif /* __GNUC__ */
#endif /* _KERNEL */
#define readb(va) (*(volatile uint8_t *) (va))

View file

@ -472,7 +472,7 @@ set_fpregs(struct thread *td, struct fpreg *fpregs)
* code by the MIPS elf abi).
*/
void
exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
{
bzero((caddr_t)td->td_frame, sizeof(struct trapframe));
@ -481,8 +481,8 @@ exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
* Make sp 64-bit aligned.
*/
td->td_frame->sp = ((register_t) stack) & ~(sizeof(__int64_t) - 1);
td->td_frame->pc = entry & ~3;
td->td_frame->t9 = entry & ~3; /* abicall req */
td->td_frame->pc = imgp->entry_addr & ~3;
td->td_frame->t9 = imgp->entry_addr & ~3; /* abicall req */
#if 0
// td->td_frame->sr = SR_KSU_USER | SR_EXL | SR_INT_ENAB;
//? td->td_frame->sr |= idle_mask & ALL_INT_MASK;
@ -511,7 +511,7 @@ exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
td->td_frame->a0 = (register_t) stack;
td->td_frame->a1 = 0;
td->td_frame->a2 = 0;
td->td_frame->a3 = (register_t)ps_strings;
td->td_frame->a3 = (register_t)imgp->ps_strings;
td->td_md.md_flags &= ~MDTD_FPUSED;
if (PCPU_GET(fpcurthread) == td)

View file

@ -50,6 +50,38 @@
* $FreeBSD$
*/
/*
* Copyright (c) 1997 Jonathan Stone (hereinafter referred to as the author)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Jonathan R. Stone for
* the NetBSD Project.
* 4. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Contains code that is the first executed at boot time plus
* assembly language support routines.
@ -61,6 +93,7 @@
#include <machine/asm.h>
#include <machine/cpu.h>
#include <machine/regnum.h>
#include <machine/cpuregs.h>
#include "assym.s"
@ -1586,3 +1619,78 @@ LEAF(octeon_get_control)
.set mips0
END(octeon_get_control)
#endif
LEAF(mips3_ld)
.set push
.set noreorder
.set mips64
#if defined(__mips_o32)
mfc0 t0, MIPS_COP_0_STATUS # turn off interrupts
and t1, t0, ~(MIPS_SR_INT_IE)
mtc0 t1, MIPS_COP_0_STATUS
COP0_SYNC
nop
nop
nop
ld v0, 0(a0)
#if _BYTE_ORDER == _BIG_ENDIAN
dsll v1, v0, 32
dsra v1, v1, 32 # low word in v1
dsra v0, v0, 32 # high word in v0
#else
dsra v1, v0, 32 # high word in v1
dsll v0, v0, 32
dsra v0, v0, 32 # low word in v0
#endif
mtc0 t0, MIPS_COP_0_STATUS # restore intr status.
COP0_SYNC
nop
#else /* !__mips_o32 */
ld v0, 0(a0)
#endif /* !__mips_o32 */
jr ra
nop
.set pop
END(mips3_ld)
LEAF(mips3_sd)
.set push
.set mips64
.set noreorder
#if defined(__mips_o32)
mfc0 t0, MIPS_COP_0_STATUS # turn off interrupts
and t1, t0, ~(MIPS_SR_INT_IE)
mtc0 t1, MIPS_COP_0_STATUS
COP0_SYNC
nop
nop
nop
# NOTE: a1 is padding!
#if _BYTE_ORDER == _BIG_ENDIAN
dsll a2, a2, 32 # high word in a2
dsll a3, a3, 32 # low word in a3
dsrl a3, a3, 32
#else
dsll a2, a2, 32 # low word in a2
dsrl a2, a2, 32
dsll a3, a3, 32 # high word in a3
#endif
or a1, a2, a3
sd a1, 0(a0)
mtc0 t0, MIPS_COP_0_STATUS # restore intr status.
COP0_SYNC
nop
#else /* !__mips_o32 */
sd a1, 0(a0)
#endif /* !__mips_o32 */
jr ra
nop
.set pop
END(mips3_sd)

View file

@ -63,17 +63,14 @@ static uint64_t cycles_per_hz, cycles_per_stathz, cycles_per_profhz;
static u_int32_t counter_upper = 0;
static u_int32_t counter_lower_last = 0;
struct clk_ticks
{
struct clk_ticks {
u_long hard_ticks;
u_long stat_ticks;
u_long prof_ticks;
/*
* pad for cache line alignment of pcpu info
* cache-line-size - number of used bytes
*/
char pad[32-(3*sizeof (u_long))];
} static pcpu_ticks[MAXCPU];
uint32_t compare_ticks;
} __aligned(CACHE_LINE_SIZE);
static struct clk_ticks pcpu_ticks[MAXCPU];
/*
* Device methods
@ -260,25 +257,47 @@ clock_intr(void *arg)
{
struct clk_ticks *cpu_ticks;
struct trapframe *tf;
uint32_t ltick;
uint32_t count, compare, delta;
cpu_ticks = &pcpu_ticks[PCPU_GET(cpuid)];
/*
* Set next clock edge.
*/
ltick = mips_rd_count();
mips_wr_compare(ltick + cycles_per_tick);
cpu_ticks = &pcpu_ticks[PCPU_GET(cpuid)];
count = mips_rd_count();
compare = cpu_ticks->compare_ticks;
cpu_ticks->compare_ticks = count + cycles_per_tick;
mips_wr_compare(cpu_ticks->compare_ticks);
critical_enter();
if (ltick < counter_lower_last) {
if (count < counter_lower_last) {
counter_upper++;
counter_lower_last = ltick;
counter_lower_last = count;
}
/*
* Magic. Setting up with an arg of NULL means we get passed tf.
*/
tf = (struct trapframe *)arg;
delta = cycles_per_tick;
/*
* Account for the "lost time" between when the timer interrupt fired
* and when 'clock_intr' actually started executing.
*/
delta += count - compare;
/*
* If the COUNT and COMPARE registers are no longer in sync then make
* up some reasonable value for the 'delta'.
*
* This could happen, for e.g., after we resume normal operations after
* exiting the debugger.
*/
if (delta > cycles_per_hz)
delta = cycles_per_hz;
/* Fire hardclock at hz. */
cpu_ticks->hard_ticks += cycles_per_tick;
cpu_ticks->hard_ticks += delta;
if (cpu_ticks->hard_ticks >= cycles_per_hz) {
cpu_ticks->hard_ticks -= cycles_per_hz;
if (PCPU_GET(cpuid) == 0)
@ -288,14 +307,14 @@ clock_intr(void *arg)
}
/* Fire statclock at stathz. */
cpu_ticks->stat_ticks += cycles_per_tick;
cpu_ticks->stat_ticks += delta;
if (cpu_ticks->stat_ticks >= cycles_per_stathz) {
cpu_ticks->stat_ticks -= cycles_per_stathz;
statclock(USERMODE(tf->sr));
}
/* Fire profclock at profhz, but only when needed. */
cpu_ticks->prof_ticks += cycles_per_tick;
cpu_ticks->prof_ticks += delta;
if (cpu_ticks->prof_ticks >= cycles_per_profhz) {
cpu_ticks->prof_ticks -= cycles_per_profhz;
if (profprocs != 0)

View file

@ -28,61 +28,11 @@
#include <machine/asm.h>
#include <machine/cpuregs.h>
#include <machine/endian.h>
/*
* We compile a 32-bit kernel to run on the SB-1 processor which is a 64-bit
* processor. It has some registers that must be accessed using 64-bit load
* and store instructions.
*
* So we have to resort to assembly because the compiler does not emit the
* 'ld' and 'sd' instructions since it thinks that it is compiling for a
* 32-bit mips processor.
*/
.set mips64
.set noat
.set noreorder
/*
* Parameters: uint32_t ptr
* Return value: *(uint64_t *)ptr
*/
LEAF(sb_load64)
ld v1, 0(a0) /* result = *(uint64_t *)ptr */
move v0, v1
#if _BYTE_ORDER == _BIG_ENDIAN
dsll32 v1, v1, 0
dsra32 v1, v1, 0 /* v1 = lower_uint32(result) */
jr ra
dsra32 v0, v0, 0 /* v0 = upper_uint32(result) */
#else
dsll32 v0, v0, 0
dsra32 v0, v0, 0 /* v0 = lower_uint32(result) */
jr ra
dsra32 v1, v1, 0 /* v1 = upper_uint32(result) */
#endif
END(sb_load64)
/*
* Parameters: uint32_t ptr, uint64_t val
* Return value: void
*/
LEAF(sb_store64)
#if _BYTE_ORDER == _BIG_ENDIAN
dsll32 a2, a2, 0 /* a2 = upper_uint32(val) */
dsll32 a3, a3, 0 /* a3 = lower_uint32(val) */
dsrl32 a3, a3, 0
#else
dsll32 a3, a3, 0 /* a3 = upper_uint32(val) */
dsll32 a2, a2, 0 /* a2 = lower_uint32(val) */
dsrl32 a2, a2, 0
#endif
or t0, a2, a3
jr ra
sd t0, 0(a0)
END(sb_store64)
#ifdef SMP
/*
* This function must be implemented in assembly because it is called early

View file

@ -38,8 +38,15 @@ __FBSDID("$FreeBSD$");
#include "sb_scd.h"
extern void sb_store64(uint32_t addr, uint64_t val);
extern uint64_t sb_load64(uint32_t addr);
/*
* We compile a 32-bit kernel to run on the SB-1 processor which is a 64-bit
* processor. It has some registers that must be accessed using 64-bit load
* and store instructions.
*
* We use the mips_ld() and mips_sd() functions to do this for us.
*/
#define sb_store64(addr, val) mips3_sd((uint64_t *)(addr), (val))
#define sb_load64(addr) mips3_ld((uint64_t *)(addr))
/*
* System Control and Debug (SCD) unit on the Sibyte ZBbus.

View file

@ -565,10 +565,7 @@ _zfs= zfs
.endif
.if ${MACHINE_ARCH} == "ia64"
# Modules not enabled on ia64 (as compared to i386) include:
# aac acpi aout apm atspeaker drm ibcs2 linprocfs linux ncv
# nsp s3 sbni stg vesa
# acpi is not enabled because it is broken as a module on ia64
_aac= aac
_aic= aic
_an= an
_arcnet= arcnet
@ -581,12 +578,17 @@ _cm= cm
_cmx= cmx
_coff= coff
_cpufreq= cpufreq
_dpt= dpt
_em= em
_ep= ep
_et= et
_exca= exca
_fe= fe
_hptiop= hptiop
_ida= ida
_igb= igb
_iir= iir
_ips= ips
_mly= mly
_pccard= pccard
_scsi_low= scsi_low
@ -595,6 +597,7 @@ _sound= sound
_splash= splash
_sppp= sppp
_streams= streams
_twa= twa
_wi= wi
_xe= xe
.endif

View file

@ -5,6 +5,8 @@
.PATH: ${.CURDIR}/../../netinet/ipfw
KMOD= dummynet
SRCS= ip_dummynet.c
SRCS+= ip_dn_glue.c ip_dn_io.c
SRCS+= dn_heap.c dn_sched_fifo.c dn_sched_qfq.c dn_sched_rr.c dn_sched_wf2q.c
SRCS+= opt_inet6.h
.if !defined(KERNBUILDDIR)

View file

@ -6,7 +6,7 @@ SRCS += ixgbe.c
# Shared source
SRCS += ixgbe_common.c ixgbe_api.c ixgbe_phy.c
SRCS += ixgbe_82599.c ixgbe_82598.c
CFLAGS+= -I${.CURDIR}/../../dev/ixgbe -DSMP
CFLAGS+= -I${.CURDIR}/../../dev/ixgbe -DSMP -DIXGBE_FDIR
clean:
rm -f device_if.h bus_if.h pci_if.h setdef* *_StripErr

Some files were not shown because too many files have changed in this diff Show more