diff options
author | Pasha <pasha@member.fsf.org> | 2024-02-20 18:49:50 +0000 |
---|---|---|
committer | Pasha <pasha@member.fsf.org> | 2024-02-20 18:49:50 +0000 |
commit | 5e0b8d508ed51004bd836384293be00950ee62c9 (patch) | |
tree | e3f16b1aa8b7177032ce3ec429fbad2b1d92a876 /device | |
download | gnumach-riscv-5e0b8d508ed51004bd836384293be00950ee62c9.tar.gz gnumach-riscv-5e0b8d508ed51004bd836384293be00950ee62c9.tar.bz2 |
init gnumach copy
Diffstat (limited to 'device')
40 files changed, 9664 insertions, 0 deletions
diff --git a/device/blkio.c b/device/blkio.c new file mode 100644 index 0000000..0dfa33c --- /dev/null +++ b/device/blkio.c @@ -0,0 +1,66 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/89 + * + * Block IO driven from generic kernel IO interface. + */ +#include <mach/kern_return.h> + +#include <device/blkio.h> +#include <device/buf.h> +#include <device/param.h> +#include <device/device_types.h> +#include <device/io_req.h> +#include <device/ds_routines.h> + + +/* + * 'standard' max_count routine. VM continuations mean that this + * code can cope with arbitrarily-sized write operations (they won't be + * atomic, but any caller that cares will do the op synchronously). + */ +#define MAX_PHYS (256 * 1024) + +void minphys(io_req_t ior) +{ + if ((ior->io_op & (IO_WRITE | IO_READ | IO_OPEN)) == IO_WRITE) + return; + + if (ior->io_count > MAX_PHYS) + ior->io_count = MAX_PHYS; +} + +/* + * Dummy routine placed in device switch entries to indicate that + * block device may be mapped. + */ +vm_offset_t block_io_mmap(dev_t dev, vm_offset_t off, int prot) +{ + return (0); +} + diff --git a/device/blkio.h b/device/blkio.h new file mode 100644 index 0000000..b188f38 --- /dev/null +++ b/device/blkio.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2013 Free Software Foundation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef _DEVICE_BLKIO_H_ +#define _DEVICE_BLKIO_H_ + +#include <sys/types.h> + +extern vm_offset_t block_io_mmap(dev_t dev, vm_offset_t off, int prot); + +#endif /* _DEVICE_BLKIO_H_ */ diff --git a/device/buf.h b/device/buf.h new file mode 100644 index 0000000..7c8a436 --- /dev/null +++ b/device/buf.h @@ -0,0 +1,96 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 3/90 + * + * Definitions to make new IO structures look like old ones + */ + +#ifndef _DEVICE_BUF_H_ +#define _DEVICE_BUF_H_ + +/* + * io_req and fields + */ +#include <device/io_req.h> + +#define buf io_req + +/* + * Redefine fields for drivers using old names + */ +#define b_flags io_op +#define b_bcount io_count +#define b_error io_error +#define b_dev io_unit +#define b_blkno io_recnum +#define b_resid io_residual +#define b_un io_un +#define b_addr data +#define av_forw io_next +#define av_back io_prev +#define b_physblock io_physrec +#define b_blocktotal io_rectotal + +/* + * Redefine fields for driver request list heads, using old names. + */ +#define b_actf io_next +#define b_actl io_prev +#define b_forw io_link +#define b_back io_rlink +#define b_active io_count +#define b_errcnt io_residual +#define b_bufsize io_alloc_size + +/* + * Redefine flags + */ +#define B_WRITE IO_WRITE +#define B_READ IO_READ +#define B_OPEN IO_OPEN +#define B_DONE IO_DONE +#define B_ERROR IO_ERROR +#define B_BUSY IO_BUSY +#define B_WANTED IO_WANTED +#define B_BAD IO_BAD +#define B_CALL IO_CALL + +#define B_MD1 IO_SPARE_START + +/* + * Export standard minphys routine. + */ +extern void minphys(io_req_t); + +/* + * Alternate name for iodone + */ +#define biodone iodone +#define biowait iowait + +#endif /* _DEVICE_BUF_H_ */ diff --git a/device/chario.c b/device/chario.c new file mode 100644 index 0000000..3fe93cc --- /dev/null +++ b/device/chario.c @@ -0,0 +1,1060 @@ +/* + * Mach Operating System + * Copyright (c) 1993-1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 8/88 + * + * TTY io. + * Compatibility with old TTY device drivers. + */ + +#include <mach/kern_return.h> +#include <mach/mig_errors.h> +#include <mach/vm_param.h> +#include <machine/machspl.h> /* spl definitions */ + +#include <ipc/ipc_port.h> + +#include <kern/lock.h> +#include <kern/queue.h> + +#include <vm/vm_map.h> +#include <vm/vm_kern.h> +#include <vm/vm_user.h> + +#include <device/device_types.h> +#include <device/io_req.h> +#include <device/ds_routines.h> +#include <device/device_reply.user.h> +#include <device/chario.h> + +#include <device/tty.h> + +/* If you change these, check that tty_outq_size and tty_inq_size + * is greater than largest tthiwat entry. + */ +short tthiwat[NSPEEDS] = + { 100,100,100,100,100,100,100,200,200,400,400,400,650,650,1300,2000, + 2000,2000 }; +short ttlowat[NSPEEDS] = + { 30, 30, 30, 30, 30, 30, 30, 50, 50,120,120,120,125,125, 125, 125, + 125,125 }; + +/* + * Fake 'line discipline' switch for the benefit of old code + * that wants to call through it. + */ +struct ldisc_switch linesw[] = { + { + char_read, + char_write, + ttyinput, + ttymodem, + tty_output + } +}; + +/* + * Sizes for input and output circular buffers. + */ +const unsigned int tty_inq_size = 4096; /* big nuf */ +const unsigned int tty_outq_size = 2048; /* Must be bigger that tthiwat */ +boolean_t pdma_default = TRUE; /* turn pseudo dma on by default */ + +/* + * compute pseudo-dma tables + */ + +int pdma_timeouts[NSPEEDS]; /* how many ticks in timeout */ +int pdma_water_mark[NSPEEDS]; + + +void chario_init(void) +{ + /* the basic idea with the timeouts is two allow enough + time for a character to show up if data is coming in at full data rate + plus a little slack. 2 ticks is considered slack + Below 300 baud we just glob a character at a time */ +#define _PR(x) ((hz/x) + 2) + + int i; + + for (i = B0; i < B300; i++) + pdma_timeouts[i] = 0; + + pdma_timeouts[B300] = _PR(30); + pdma_timeouts[B600] = _PR(60); + pdma_timeouts[B1200] = _PR(120); + pdma_timeouts[B1800] = _PR(180); + pdma_timeouts[B2400] = _PR(240); + pdma_timeouts[B4800] = _PR(480); + pdma_timeouts[B9600] = _PR(960); + pdma_timeouts[EXTA] = _PR(1440); /* >14400 baud */ + pdma_timeouts[EXTB] = _PR(1920); /* >19200 baud */ + pdma_timeouts[B57600] = _PR(5760); + pdma_timeouts[B115200] = _PR(11520); + + for (i = B0; i < B300; i++) + pdma_water_mark[i] = 0; + + /* for the slow speeds, we try to buffer 0.02 of the baud rate + (20% of the character rate). For the faster lines, + we try to buffer 1/2 the input queue size */ + +#undef _PR +#define _PR(x) (0.20 * x) + + pdma_water_mark[B300] = _PR(120); + pdma_water_mark[B600] = _PR(120); + pdma_water_mark[B1200] = _PR(120); + pdma_water_mark[B1800] = _PR(180); + pdma_water_mark[B2400] = _PR(240); + pdma_water_mark[B4800] = _PR(480); + i = tty_inq_size/2; + pdma_water_mark[B9600] = i; + pdma_water_mark[EXTA] = i; /* >14400 baud */ + pdma_water_mark[EXTB] = i; /* >19200 baud */ + pdma_water_mark[B57600] = i; + pdma_water_mark[B115200] = i; + + return; +} + +/* + * Open TTY, waiting for CARR_ON. + * No locks may be held. + * May run on any CPU. + */ +io_return_t char_open( + int dev, + struct tty * tp, + dev_mode_t mode, + io_req_t ior) +{ + spl_t s; + io_return_t rc = D_SUCCESS; + + s = simple_lock_irq(&tp->t_lock); + + tp->t_dev = dev; + + if (tp->t_mctl) + (*tp->t_mctl)(tp, TM_DTR, DMSET); + + if (pdma_default) + tp->t_state |= TS_MIN; + + if ((tp->t_state & TS_CARR_ON) == 0) { + /* + * No carrier. + */ + if (mode & D_NODELAY) { + tp->t_state |= TS_ONDELAY; + } + else { + /* + * Don`t return from open until carrier detected. + */ + tp->t_state |= TS_WOPEN; + + ior->io_dev_ptr = (char *)tp; + + queue_delayed_reply(&tp->t_delayed_open, ior, char_open_done); + rc = D_IO_QUEUED; + goto out; + } + } + tp->t_state |= TS_ISOPEN; + if (tp->t_mctl) + (*tp->t_mctl)(tp, TM_RTS, DMBIS); +out: + simple_unlock_irq(s, &tp->t_lock); + return rc; +} + +/* + * Retry wait for CARR_ON for open. + * No locks may be held. + * May run on any CPU. + */ +boolean_t char_open_done( + io_req_t ior) +{ + struct tty *tp = (struct tty *)ior->io_dev_ptr; + spl_t s; + + s = simple_lock_irq(&tp->t_lock); + if ((tp->t_state & TS_ISOPEN) == 0) { + queue_delayed_reply(&tp->t_delayed_open, ior, char_open_done); + simple_unlock_irq(s, &tp->t_lock); + return FALSE; + } + + tp->t_state |= TS_ISOPEN; + tp->t_state &= ~TS_WOPEN; + + if (tp->t_mctl) + (*tp->t_mctl)(tp, TM_RTS, DMBIS); + + simple_unlock_irq(s, &tp->t_lock); + + ior->io_error = D_SUCCESS; + (void) ds_open_done(ior); + return TRUE; +} + +static boolean_t tty_close_open_reply( + io_req_t ior) +{ + ior->io_error = D_DEVICE_DOWN; + (void) ds_open_done(ior); + return TRUE; +} + +/* + * Write to TTY. + * No locks may be held. + * Calls device start routine; must already be on master if + * device needs to run on master. + */ +io_return_t char_write( + struct tty * tp, + io_req_t ior) +{ + spl_t s; + int count; + char *data; + vm_offset_t addr = 0; + io_return_t rc = D_SUCCESS; + + data = ior->io_data; + count = ior->io_count; + if (count == 0) + return rc; + + if (!(ior->io_op & IO_INBAND)) { + /* + * Copy out-of-line data into kernel address space. + * Since data is copied as page list, it will be + * accessible. + */ + vm_map_copy_t copy = (vm_map_copy_t) data; + kern_return_t kr; + + kr = vm_map_copyout(device_io_map, &addr, copy); + if (kr != KERN_SUCCESS) + return kr; + data = (char *) addr; + } + + /* + * Check for tty operating. + */ + s = simple_lock_irq(&tp->t_lock); + + if ((tp->t_state & TS_CARR_ON) == 0) { + + if ((tp->t_state & TS_ONDELAY) == 0) { + /* + * No delayed writes - tell caller that device is down + */ + rc = D_IO_ERROR; + goto out; + } + + if (ior->io_mode & D_NOWAIT) { + rc = D_WOULD_BLOCK; + goto out; + } + } + + /* + * Copy data into the output buffer. + * Report the amount not copied. + */ + + ior->io_residual = b_to_q(data, count, &tp->t_outq); + + /* + * Start hardware output. + */ + + tp->t_state &= ~TS_TTSTOP; + tty_output(tp); + + if (tp->t_outq.c_cc > TTHIWAT(tp) || + (tp->t_state & TS_CARR_ON) == 0) { + + /* + * Do not send reply until some characters have been sent. + */ + ior->io_dev_ptr = (char *)tp; + queue_delayed_reply(&tp->t_delayed_write, ior, char_write_done); + + rc = D_IO_QUEUED; + } +out: + simple_unlock_irq(s, &tp->t_lock); + + if (!(ior->io_op & IO_INBAND)) + (void) vm_deallocate(device_io_map, addr, ior->io_count); + return rc; +} + +/* + * Retry wait for output queue emptied, for write. + * No locks may be held. + * May run on any CPU. + */ +boolean_t char_write_done( + io_req_t ior) +{ + struct tty *tp = (struct tty *)ior->io_dev_ptr; + spl_t s; + + s = simple_lock_irq(&tp->t_lock); + if (tp->t_outq.c_cc > TTHIWAT(tp) || + (tp->t_state & TS_CARR_ON) == 0) { + + queue_delayed_reply(&tp->t_delayed_write, ior, char_write_done); + simple_unlock_irq(s, &tp->t_lock); + return FALSE; + } + simple_unlock_irq(s, &tp->t_lock); + + if (IP_VALID(ior->io_reply_port)) { + (void) (*((ior->io_op & IO_INBAND) ? + ds_device_write_reply_inband : + ds_device_write_reply))(ior->io_reply_port, + ior->io_reply_port_type, + ior->io_error, + (int) (ior->io_total - + ior->io_residual)); + } + mach_device_deallocate(ior->io_device); + return TRUE; +} + +static boolean_t tty_close_write_reply( + io_req_t ior) +{ + ior->io_residual = ior->io_count; + ior->io_error = D_DEVICE_DOWN; + (void) ds_write_done(ior); + return TRUE; +} + +/* + * Read from TTY. + * No locks may be held. + * May run on any CPU - does not talk to device driver. + */ +io_return_t char_read( + struct tty *tp, + io_req_t ior) +{ + spl_t s; + kern_return_t rc; + + /* + * Allocate memory for read buffer. + */ + rc = device_read_alloc(ior, (vm_size_t)ior->io_count); + if (rc != KERN_SUCCESS) + return rc; + + s = simple_lock_irq(&tp->t_lock); + if ((tp->t_state & TS_CARR_ON) == 0) { + + if ((tp->t_state & TS_ONDELAY) == 0) { + /* + * No delayed writes - tell caller that device is down + */ + rc = D_IO_ERROR; + goto out; + } + + if (ior->io_mode & D_NOWAIT) { + rc = D_WOULD_BLOCK; + goto out; + } + + } + + if (tp->t_inq.c_cc <= 0 || + (tp->t_state & TS_CARR_ON) == 0) { + + ior->io_dev_ptr = (char *)tp; + queue_delayed_reply(&tp->t_delayed_read, ior, char_read_done); + rc = D_IO_QUEUED; + goto out; + } + + ior->io_residual = ior->io_count - q_to_b(&tp->t_inq, + ior->io_data, + (int)ior->io_count); + if (tp->t_state & TS_RTS_DOWN) { + (*tp->t_mctl)(tp, TM_RTS, DMBIS); + tp->t_state &= ~TS_RTS_DOWN; + } + + out: + simple_unlock_irq(s, &tp->t_lock); + return rc; +} + +/* + * Retry wait for characters, for read. + * No locks may be held. + * May run on any CPU - does not talk to device driver. + */ +boolean_t char_read_done( + io_req_t ior) +{ + struct tty *tp = (struct tty *)ior->io_dev_ptr; + spl_t s; + + s = simple_lock_irq(&tp->t_lock); + + if (tp->t_inq.c_cc <= 0 || + (tp->t_state & TS_CARR_ON) == 0) { + + queue_delayed_reply(&tp->t_delayed_read, ior, char_read_done); + simple_unlock_irq(s, &tp->t_lock); + return FALSE; + } + + ior->io_residual = ior->io_count - q_to_b(&tp->t_inq, + ior->io_data, + (int)ior->io_count); + if (tp->t_state & TS_RTS_DOWN) { + (*tp->t_mctl)(tp, TM_RTS, DMBIS); + tp->t_state &= ~TS_RTS_DOWN; + } + + simple_unlock_irq(s, &tp->t_lock); + + (void) ds_read_done(ior); + return TRUE; +} + +static boolean_t tty_close_read_reply( + io_req_t ior) +{ + ior->io_residual = ior->io_count; + ior->io_error = D_DEVICE_DOWN; + (void) ds_read_done(ior); + return TRUE; +} + +/* + * Close the tty. + * Tty must be locked (at spltty). + * Iff modem control should run on master. + */ +void ttyclose( + struct tty *tp) +{ + io_req_t ior; + + /* + * Flush the read and write queues. Signal + * the open queue so that those waiting for open + * to complete will see that the tty is closed. + */ + while ((ior = (io_req_t)dequeue_head(&tp->t_delayed_read)) != 0) { + ior->io_done = tty_close_read_reply; + iodone(ior); + } + while ((ior = (io_req_t)dequeue_head(&tp->t_delayed_write)) != 0) { + ior->io_done = tty_close_write_reply; + iodone(ior); + } + while ((ior = (io_req_t)dequeue_head(&tp->t_delayed_open)) != 0) { + ior->io_done = tty_close_open_reply; + iodone(ior); + } + + /* Close down modem */ + if (tp->t_mctl) { + (*tp->t_mctl)(tp, TM_BRK|TM_RTS, DMBIC); + if ((tp->t_state&(TS_HUPCLS|TS_WOPEN)) || (tp->t_state&TS_ISOPEN)==0) + (*tp->t_mctl)(tp, TM_HUP, DMSET); + } + + /* only save buffering bit, and carrier */ + tp->t_state = tp->t_state & (TS_MIN|TS_CARR_ON); +} + +/* + * Port-death routine to clean up reply messages. + */ +static boolean_t +tty_queue_clean( + queue_t q, + const ipc_port_t port, + boolean_t (*routine)(io_req_t) ) +{ + io_req_t ior; + + ior = (io_req_t)queue_first(q); + while (!queue_end(q, (queue_entry_t)ior)) { + if (ior->io_reply_port == port) { + remqueue(q, (queue_entry_t)ior); + ior->io_done = routine; + iodone(ior); + return TRUE; + } + ior = ior->io_next; + } + return FALSE; +} + +/* + * Handle port-death (dead reply port) for tty. + * No locks may be held. + * May run on any CPU. + */ +boolean_t +tty_portdeath( + struct tty * tp, + const ipc_port_t port) +{ + spl_t spl; + boolean_t result; + + spl = simple_lock_irq(&tp->t_lock); + + /* + * The queues may never have been initialized + */ + if (tp->t_delayed_read.next == 0) { + result = FALSE; + } + else { + result = + tty_queue_clean(&tp->t_delayed_read, port, + tty_close_read_reply) + || tty_queue_clean(&tp->t_delayed_write, port, + tty_close_write_reply) + || tty_queue_clean(&tp->t_delayed_open, port, + tty_close_open_reply); + } + simple_unlock_irq(spl, &tp->t_lock); + + return result; +} + +/* + * Get TTY status. + * No locks may be held. + * May run on any CPU. + */ +io_return_t tty_get_status( + struct tty *tp, + dev_flavor_t flavor, + int * data, /* pointer to OUT array */ + natural_t *count) /* out */ +{ + spl_t s; + + switch (flavor) { + case TTY_STATUS: + { + struct tty_status *tsp = + (struct tty_status *) data; + + if (*count < TTY_STATUS_COUNT) + return (D_INVALID_OPERATION); + + s = simple_lock_irq(&tp->t_lock); + + tsp->tt_ispeed = tp->t_ispeed; + tsp->tt_ospeed = tp->t_ospeed; + tsp->tt_breakc = tp->t_breakc; + tsp->tt_flags = tp->t_flags; + if (tp->t_state & TS_HUPCLS) + tsp->tt_flags |= TF_HUPCLS; + + simple_unlock_irq(s, &tp->t_lock); + + *count = TTY_STATUS_COUNT; + break; + + } + default: + return D_INVALID_OPERATION; + } + return D_SUCCESS; +} + +/* + * Set TTY status. + * No locks may be held. + * Calls device start or stop routines; must already be on master if + * device needs to run on master. + */ +io_return_t tty_set_status( + struct tty *tp, + dev_flavor_t flavor, + int * data, + natural_t count) +{ + int s; + + switch (flavor) { + case TTY_FLUSH: + { + int flags; + if (count < TTY_FLUSH_COUNT) + return D_INVALID_OPERATION; + + flags = *data; + if (flags == 0) + flags = D_READ | D_WRITE; + + s = simple_lock_irq(&tp->t_lock); + tty_flush(tp, flags); + simple_unlock_irq(s, &tp->t_lock); + + break; + } + case TTY_STOP: + /* stop output */ + s = simple_lock_irq(&tp->t_lock); + if ((tp->t_state & TS_TTSTOP) == 0) { + tp->t_state |= TS_TTSTOP; + (*tp->t_stop)(tp, 0); + } + simple_unlock_irq(s, &tp->t_lock); + break; + + case TTY_START: + /* start output */ + s = simple_lock_irq(&tp->t_lock); + if (tp->t_state & TS_TTSTOP) { + tp->t_state &= ~TS_TTSTOP; + tty_output(tp); + } + simple_unlock_irq(s, &tp->t_lock); + break; + + case TTY_STATUS: + /* set special characters and speed */ + { + struct tty_status *tsp; + + if (count < TTY_STATUS_COUNT) + return D_INVALID_OPERATION; + + tsp = (struct tty_status *)data; + + if (tsp->tt_ispeed < 0 || + tsp->tt_ispeed >= NSPEEDS || + tsp->tt_ospeed < 0 || + tsp->tt_ospeed >= NSPEEDS) + { + return D_INVALID_OPERATION; + } + + s = simple_lock_irq(&tp->t_lock); + + tp->t_ispeed = tsp->tt_ispeed; + tp->t_ospeed = tsp->tt_ospeed; + tp->t_breakc = tsp->tt_breakc; + tp->t_flags = tsp->tt_flags & ~TF_HUPCLS; + if (tsp->tt_flags & TF_HUPCLS) + tp->t_state |= TS_HUPCLS; + + simple_unlock_irq(s, &tp->t_lock); + break; + } + default: + return D_INVALID_OPERATION; + } + return D_SUCCESS; +} + + +/* + * [internal] + * Queue IOR on reply queue, to wait for TTY operation. + * TTY must be locked (at spltty). + */ +void queue_delayed_reply( + queue_t qh, + io_req_t ior, + boolean_t (*io_done)(io_req_t) ) +{ + ior->io_done = io_done; + enqueue_tail(qh, (queue_entry_t)ior); +} + +/* + * Retry delayed IO operations for TTY. + * TTY containing queue must be locked (at spltty). + */ +void tty_queue_completion( + queue_t qh) +{ + io_req_t ior; + + while ((ior = (io_req_t)dequeue_head(qh)) != 0) { + iodone(ior); + } +} + +/* + * Set the default special characters. + * Since this routine is called whenever a tty has never been opened, + * we can initialize the queues here. + */ +void ttychars( + struct tty *tp) +{ + if ((tp->t_flags & TS_INIT) == 0) { + /* + * Initialize queues + */ + queue_init(&tp->t_delayed_open); + queue_init(&tp->t_delayed_read); + queue_init(&tp->t_delayed_write); + + /* + * Initialize character buffers + */ + cb_alloc(&tp->t_inq, tty_inq_size); + + /* if we might do modem flow control */ + if (tp->t_mctl && tp->t_inq.c_hog > 30) + tp->t_inq.c_hog -= 30; + + cb_alloc(&tp->t_outq, tty_outq_size); + + /* + * Mark initialized + */ + tp->t_state |= TS_INIT; + } + + tp->t_breakc = 0; +} + +/* + * Flush all TTY queues. + * Called at spltty, tty already locked. + * Calls device STOP routine; must already be on master if + * device needs to run on master. + */ +void tty_flush( + struct tty *tp, + int rw) +{ + if (rw & D_READ) { + cb_clear(&tp->t_inq); + tty_queue_completion(&tp->t_delayed_read); + } + if (rw & D_WRITE) { + tp->t_state &= ~TS_TTSTOP; + (*tp->t_stop)(tp, rw); + cb_clear(&tp->t_outq); + tty_queue_completion(&tp->t_delayed_write); + } +} + +/* + * Restart character output after a delay timeout. + * Calls device start routine - must be on master CPU. + * + * Timeout routines are called only on master CPU. + * What if device runs on a different CPU? + */ +void ttrstrt( + struct tty *tp) +{ + spl_t s; + + s = simple_lock_irq(&tp->t_lock); + + tp->t_state &= ~TS_TIMEOUT; + ttstart (tp); + + simple_unlock_irq(s, &tp->t_lock); +} + +/* + * Start output on the typewriter. It is used from the top half + * after some characters have been put on the output queue, + * from the interrupt routine to transmit the next + * character, and after a timeout has finished. + * + * Called at spltty, tty already locked. + * Must be on master CPU if device runs on master. + */ +void ttstart(struct tty *tp) +{ + if ((tp->t_state & (TS_TIMEOUT|TS_TTSTOP|TS_BUSY)) == 0) { + /* + * Start up the hardware again + */ + (*tp->t_start)(tp); + + /* + * Wake up those waiting for write completion. + */ + if (tp->t_outq.c_cc <= TTLOWAT(tp)) + tty_queue_completion(&tp->t_delayed_write); + } +} + +/* + * Start character output, if the device is not busy or + * stopped or waiting for a timeout. + * + * Called at spltty, tty already locked. + * Must be on master CPU if device runs on master. + */ +void tty_output( + struct tty *tp) +{ + if ((tp->t_state & (TS_TIMEOUT|TS_TTSTOP|TS_BUSY)) == 0) { + /* + * Not busy. Start output. + */ + (*tp->t_start)(tp); + + /* + * Wake up those waiting for write completion. + */ + if (tp->t_outq.c_cc <= TTLOWAT(tp)) + tty_queue_completion(&tp->t_delayed_write); + } +} + +/* + * Send any buffered recvd chars up to user + */ +static void ttypush(void * _tp) +{ + struct tty *tp = _tp; + spl_t s; + int state; + + s = simple_lock_irq(&tp->t_lock); + + /* + The pdma timeout has gone off. + If no character has been received since the timeout + was set, push any pending characters up. + If any characters were received in the last interval + then just reset the timeout and the character received bit. + */ + + state = tp->t_state; + + if (state & TS_MIN_TO) + { + if (state & TS_MIN_TO_RCV) + { /* a character was received */ + tp->t_state = state & ~TS_MIN_TO_RCV; + timeout(ttypush, tp, pdma_timeouts[tp->t_ispeed]); + } + else + { + tp->t_state = state & ~TS_MIN_TO; + if (tp->t_inq.c_cc) /* pending characters */ + tty_queue_completion(&tp->t_delayed_read); + } + } + else + { + tp->t_state = state & ~TS_MIN_TO_RCV;/* sanity */ + } + + simple_unlock_irq(s, &tp->t_lock); +} + +/* + * Put input character on input queue. + * + * Called at spltty, tty already locked. + */ +void ttyinput( + unsigned int c, + struct tty *tp) +{ + if (tp->t_inq.c_cc >= tp->t_inq.c_hog) { + /* + * Do not want to overflow input queue + */ + if (tp->t_mctl) { + (*tp->t_mctl)(tp, TM_RTS, DMBIC); + tp->t_state |= TS_RTS_DOWN; + } + tty_queue_completion(&tp->t_delayed_read); + return; + + } + + c &= 0xff; + + (void) putc(c, &tp->t_inq); + if ((tp->t_state & TS_MIN) == 0 || + tp->t_inq.c_cc > pdma_water_mark[tp->t_ispeed]) + { + /* + * No input buffering, or input minimum exceeded. + * Grab a request from input queue and queue it + * to io_done thread. + */ + if (tp->t_state & TS_MIN_TO) { + tp->t_state &= ~(TS_MIN_TO|TS_MIN_TO_RCV); + untimeout(ttypush, tp); + } + tty_queue_completion(&tp->t_delayed_read); + } + else { + /* + * Not enough characters. + * If no timeout is set, initiate the timeout + * Otherwise set the character received during timeout interval + * flag. + * One alternative approach would be just to reset the timeout + * into the future, but this involves making a timeout/untimeout + * call on every character. + */ + int ptime = pdma_timeouts[tp->t_ispeed]; + if (ptime > 0) + { + if ((tp->t_state & TS_MIN_TO) == 0) + { + tp->t_state |= TS_MIN_TO; + timeout(ttypush, tp, ptime); + } + else + { + tp->t_state |= TS_MIN_TO_RCV; + } + } + } +} + +/* + * Put many characters on input queue. + * + * Called at spltty, tty already locked. + */ +void ttyinput_many( + struct tty *tp, + char *chars, + int count) +{ + /* + * Do not want to overflow input queue + */ + if (tp->t_inq.c_cc < tp->t_inq.c_hog) + count -= b_to_q(chars, count, &tp->t_inq); + + tty_queue_completion(&tp->t_delayed_read); +} + + +/* + * Handle modem control transition on a tty. + * Flag indicates new state of carrier. + * Returns FALSE if the line should be turned off. + * + * Called at spltty, tty already locked. + */ +boolean_t ttymodem( + struct tty * tp, + boolean_t carrier_up) +{ + if ((tp->t_state&TS_WOPEN) == 0 && (tp->t_flags & MDMBUF)) { + /* + * Flow control by carrier. Carrier down stops + * output; carrier up restarts output. + */ + if (carrier_up) { + tp->t_state &= ~TS_TTSTOP; + tty_output(tp); + } + else if ((tp->t_state&TS_TTSTOP) == 0) { + tp->t_state |= TS_TTSTOP; + (*tp->t_stop)(tp, 0); + } + } + else if (carrier_up) { + /* + * Carrier now on. + */ + tp->t_state |= TS_CARR_ON; + tt_open_wakeup(tp); + } + else { + /* + * Lost carrier. + */ + tp->t_state &= ~TS_CARR_ON; + if (tp->t_state & TS_ISOPEN && + (tp->t_flags & NOHANG) == 0) + { + /* + * Hang up TTY if carrier drops. + * Need to alert users, somehow... + */ + tty_flush(tp, D_READ|D_WRITE); + return FALSE; + } + } + return TRUE; +} + +/* + * Similarly, handle transitions on the ClearToSend + * signal. Nowadays, it is used by many modems as + * a flow-control device: they turn it down to stop + * us from sending more chars. We do the same with + * the RequestToSend signal. [Yes, that is exactly + * why those signals are defined in the standard.] + * + * Tty must be locked and on master. + */ +void +tty_cts( + struct tty * tp, + boolean_t cts_up) +{ + if (tp->t_state & TS_ISOPEN){ + if (cts_up) { + tp->t_state &= ~(TS_TTSTOP|TS_BUSY); + tty_output(tp); + } else { + tp->t_state |= (TS_TTSTOP|TS_BUSY); + (*tp->t_stop)(tp, D_WRITE); + } + } +} diff --git a/device/chario.h b/device/chario.h new file mode 100644 index 0000000..52105a2 --- /dev/null +++ b/device/chario.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2013 Free Software Foundation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef _DEVICE_CHARIO_H_ +#define _DEVICE_CHARIO_H_ + +#include <device/tty.h> + +extern void chario_init(void); + +void queue_delayed_reply( + queue_t qh, + io_req_t ior, + boolean_t (*io_done)(io_req_t)); + +void tty_output(struct tty *tp); + +boolean_t char_open_done(io_req_t); +boolean_t char_read_done(io_req_t); +boolean_t char_write_done(io_req_t); + +#endif /* _DEVICE_CHARIO_H_ */ diff --git a/device/cirbuf.c b/device/cirbuf.c new file mode 100644 index 0000000..ed09f3d --- /dev/null +++ b/device/cirbuf.c @@ -0,0 +1,277 @@ +/* + * Mach Operating System + * Copyright (c) 1992,1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + * + * Circular buffers for TTY + */ + +#include <string.h> +#include <device/cirbuf.h> +#include <kern/debug.h> +#include <kern/kalloc.h> + + + +/* read at c_cf, write at c_cl */ +/* if c_cf == c_cl, buffer is empty */ +/* if c_cl == c_cf - 1, buffer is full */ + +#if DEBUG +#include <mach/boolean.h> + +boolean_t cb_check_enable = FALSE; +#define CB_CHECK(cb) if (cb_check_enable) cb_check(cb) + +void +cb_check(struct cirbuf *cb) +{ + if (!(cb->c_cf >= cb->c_start && cb->c_cf < cb->c_end)) + panic("cf %p out of range [%p..%p)", + cb->c_cf, cb->c_start, cb->c_end); + if (!(cb->c_cl >= cb->c_start && cb->c_cl < cb->c_end)) + panic("cl %p out of range [%p..%p)", + cb->c_cl, cb->c_start, cb->c_end); + if (cb->c_cf <= cb->c_cl) { + if (!(cb->c_cc == cb->c_cl - cb->c_cf)) + panic("cc %x should be %x", + cb->c_cc, + cb->c_cl - cb->c_cf); + } + else { + if (!(cb->c_cc == cb->c_end - cb->c_cf + + cb->c_cl - cb->c_start)) + panic("cc %x should be %x", + cb->c_cc, + cb->c_end - cb->c_cf + + cb->c_cl - cb->c_start); + } +} +#else /* DEBUG */ +#define CB_CHECK(cb) +#endif /* DEBUG */ + +/* + * Put one character in circular buffer. + */ +int putc( + int c, + struct cirbuf *cb) +{ + char *ow, *nw; + + ow = cb->c_cl; + nw = ow+1; + if (nw == cb->c_end) + nw = cb->c_start; + if (nw == cb->c_cf) + return 1; /* not entered */ + *ow = c; + cb->c_cl = nw; + + cb->c_cc++; + + CB_CHECK(cb); + + return 0; +} + +/* + * Get one character from circular buffer. + */ +int getc(struct cirbuf *cb) +{ + unsigned char *nr; + int c; + + nr = (unsigned char *)cb->c_cf; + if (nr == (unsigned char *)cb->c_cl) { + CB_CHECK(cb); + return -1; /* empty */ + } + c = *nr; + nr++; + if (nr == (unsigned char *)cb->c_end) + nr = (unsigned char *)cb->c_start; + cb->c_cf = (char *)nr; + + cb->c_cc--; + + CB_CHECK(cb); + + return c; +} + +/* + * Get lots of characters. + * Return number moved. + */ +int +q_to_b( struct cirbuf *cb, + char *cp, + int count) +{ + char *ocp = cp; + int i; + + while (count != 0) { + if (cb->c_cl == cb->c_cf) + break; /* empty */ + if (cb->c_cl < cb->c_cf) + i = cb->c_end - cb->c_cf; + else + i = cb->c_cl - cb->c_cf; + if (i > count) + i = count; + memcpy(cp, cb->c_cf, i); + cp += i; + count -= i; + cb->c_cf += i; + cb->c_cc -= i; + if (cb->c_cf == cb->c_end) + cb->c_cf = cb->c_start; + + CB_CHECK(cb); + } + CB_CHECK(cb); + + return cp - ocp; +} + +/* + * Add character array to buffer and return number of characters + * NOT entered. + */ +int +b_to_q( char *cp, + int count, + struct cirbuf *cb) +{ + int i; + char *lim; + + while (count != 0) { + lim = cb->c_cf - 1; + if (lim < cb->c_start) + lim = cb->c_end - 1; + + if (cb->c_cl == lim) + break; + if (cb->c_cl < lim) + i = lim - cb->c_cl; + else + i = cb->c_end - cb->c_cl; + + if (i > count) + i = count; + memcpy(cb->c_cl, cp, i); + cp += i; + count -= i; + cb->c_cc += i; + cb->c_cl += i; + if (cb->c_cl == cb->c_end) + cb->c_cl = cb->c_start; + + CB_CHECK(cb); + } + CB_CHECK(cb); + return count; +} + +/* + * Flush characters from circular buffer. + */ +void +ndflush(struct cirbuf *cb, + int count) +{ + int i; + + while (count != 0) { + if (cb->c_cl == cb->c_cf) + break; /* empty */ + if (cb->c_cl < cb->c_cf) + i = cb->c_end - cb->c_cf; + else + i = cb->c_cl - cb->c_cf; + if (i > count) + i = count; + count -= i; + cb->c_cf += i; + cb->c_cc -= i; + if (cb->c_cf == cb->c_end) + cb->c_cf = cb->c_start; + CB_CHECK(cb); + } + + CB_CHECK(cb); +} + +/* + * Empty a circular buffer. + */ +void cb_clear(struct cirbuf *cb) +{ + cb->c_cf = cb->c_start; + cb->c_cl = cb->c_start; + cb->c_cc = 0; +} + +/* + * Allocate character space for a circular buffer. + */ +void +cb_alloc( + struct cirbuf *cb, + vm_size_t buf_size) +{ + char *buf; + + buf = (char *)kalloc(buf_size); + + cb->c_start = buf; + cb->c_end = buf + buf_size; + cb->c_cf = buf; + cb->c_cl = buf; + cb->c_cc = 0; + cb->c_hog = buf_size - 1; + + CB_CHECK(cb); +} + +/* + * Free character space for a circular buffer. + */ +void +cb_free(struct cirbuf *cb) +{ + vm_size_t size; + + size = cb->c_end - cb->c_start; + kfree((vm_offset_t)cb->c_start, size); +} + diff --git a/device/cirbuf.h b/device/cirbuf.h new file mode 100644 index 0000000..64771ce --- /dev/null +++ b/device/cirbuf.h @@ -0,0 +1,61 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ + +#ifndef _DEVICE_CIRBUF_H_ +#define _DEVICE_CIRBUF_H_ + +/* + * Circular buffers for TTY + */ + +struct cirbuf { + char * c_start; /* start of buffer */ + char * c_end; /* end of buffer + 1*/ + char * c_cf; /* read pointer */ + char * c_cl; /* write pointer */ + short c_cc; /* current number of characters + (compatibility) */ + short c_hog; /* max ever */ +}; + +/* + * Exported routines + */ +extern int putc(int, struct cirbuf *); +extern int getc(struct cirbuf *); +extern int q_to_b(struct cirbuf *, char *, int); +extern int b_to_q(char *, int, struct cirbuf *); +extern void ndflush(struct cirbuf *, int); +extern void cb_clear(struct cirbuf *); + +extern void cb_alloc(struct cirbuf *, vm_size_t); +extern void cb_free(struct cirbuf *); + +#endif /* _DEVICE_CIRBUF_H_ */ diff --git a/device/conf.h b/device/conf.h new file mode 100644 index 0000000..8177966 --- /dev/null +++ b/device/conf.h @@ -0,0 +1,127 @@ +/* + * Mach Operating System + * Copyright (c) 1993,1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 8/88 + */ + +#ifndef _DEVICE_CONF_H_ +#define _DEVICE_CONF_H_ + +#include <mach/machine/vm_types.h> +#include <sys/types.h> +#include <mach/port.h> +#include <mach/vm_prot.h> +#include <device/device_types.h> +#include <device/net_status.h> + +struct io_req; +typedef struct io_req *io_req_t; + +typedef int io_return_t; + +/* + * Operations list for major device types. + */ +struct dev_ops { + char * d_name; /* name for major device */ + int (*d_open)(dev_t, int, io_req_t);/* open device */ + void (*d_close)(dev_t, int); /* close device */ + int (*d_read)(dev_t, io_req_t); /* read */ + int (*d_write)(dev_t, io_req_t); /* write */ + int (*d_getstat)(dev_t, dev_flavor_t, dev_status_t, mach_msg_type_number_t *); /* get status/control */ + int (*d_setstat)(dev_t, dev_flavor_t, dev_status_t, mach_msg_type_number_t); /* set status/control */ + vm_offset_t (*d_mmap)(dev_t, vm_offset_t, vm_prot_t); /* map memory */ + int (*d_async_in)(dev_t, const ipc_port_t, int, filter_t*, unsigned int); /* asynchronous input setup */ + int (*d_reset)(dev_t); /* reset device */ + int (*d_port_death)(dev_t, mach_port_t); + /* clean up reply ports */ + int d_subdev; /* number of sub-devices per + unit */ + int (*d_dev_info)(dev_t, int, int*); /* driver info for kernel */ +}; +typedef struct dev_ops *dev_ops_t; + +/* + * Routines for null entries. + */ +extern int nulldev_reset(dev_t dev); +extern int nulldev_open(dev_t dev, int flag, io_req_t ior); +extern void nulldev_close(dev_t dev, int flags); +extern int nulldev_read(dev_t dev, io_req_t ior); +extern int nulldev_write(dev_t dev, io_req_t ior); +extern io_return_t nulldev_getstat(dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t *count); +extern io_return_t nulldev_setstat(dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t count); +extern io_return_t nulldev_portdeath(dev_t dev, mach_port_t port); +extern int nodev_async_in(dev_t, const ipc_port_t, int, filter_t*, unsigned int); /* no operation - error */ +extern int nodev_info(dev_t, int, int*); /* no operation - error */ +extern vm_offset_t nomap(dev_t dev, vm_offset_t off, int prot); /* no operation - error */ + +/* + * Flavor constants for d_dev_info routine + */ +#define D_INFO_BLOCK_SIZE 1 + +/* + * Head of list of attached devices + */ +extern struct dev_ops dev_name_list[]; +extern int dev_name_count; + +/* + * Macro to search device list + */ +#define dev_search(dp) \ + for (dp = dev_name_list; \ + dp < &dev_name_list[dev_name_count]; \ + dp++) + +/* + * Indirection vectors for certain devices. + */ +struct dev_indirect { + char * d_name; /* name for device */ + dev_ops_t d_ops; /* operations (major device) */ + int d_unit; /* and unit number */ +}; +typedef struct dev_indirect *dev_indirect_t; + +/* + * List of indirect devices. + */ +extern struct dev_indirect dev_indirect_list[]; +extern int dev_indirect_count; + +/* + * Macro to search indirect list + */ +#define dev_indirect_search(di) \ + for (di = dev_indirect_list; \ + di < &dev_indirect_list[dev_indirect_count]; \ + di++) + +#endif /* _DEVICE_CONF_H_ */ + diff --git a/device/cons.c b/device/cons.c new file mode 100644 index 0000000..3f7cb9d --- /dev/null +++ b/device/cons.c @@ -0,0 +1,183 @@ +/* + * Copyright (c) 1988-1994, The University of Utah and + * the Computer Systems Laboratory (CSL). All rights reserved. + * + * Permission to use, copy, modify and distribute this software is hereby + * granted provided that (1) source code retains these copyright, permission, + * and disclaimer notices, and (2) redistributions including binaries + * reproduce the notices in supporting documentation, and (3) all advertising + * materials mentioning features or use of this software display the following + * acknowledgement: ``This product includes software developed by the + * Computer Systems Laboratory at the University of Utah.'' + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + * Utah $Hdr: cons.c 1.14 94/12/14$ + */ + +#include <string.h> +#include <kern/debug.h> +#include <sys/types.h> +#include <device/conf.h> +#include <mach/boolean.h> +#include <device/cons.h> + +#ifdef MACH_KMSG +#include <device/io_req.h> +#include <device/kmsg.h> +#endif /* MACH_KMSG */ + +static boolean_t cn_inited = FALSE; +static struct consdev *cn_tab = 0; /* physical console device info */ + +/* + * ROM getc/putc primitives. + * On some architectures, the boot ROM provides basic character input/output + * routines that can be used before devices are configured or virtual memory + * is enabled. This can be useful to debug (or catch panics from) code early + * in the bootstrap procedure. + */ +int (*romgetc)(char c) = 0; +void (*romputc)(char c) = 0; + +#if CONSBUFSIZE > 0 +/* + * Temporary buffer to store console output before a console is selected. + * This is statically allocated so it can be called before malloc/kmem_alloc + * have been initialized. It is initialized so it won't be clobbered as + * part of the zeroing of BSS (on PA/Mach). + */ +static char consbuf[CONSBUFSIZE] = { 0 }; +static char *consbp = consbuf; +static boolean_t consbufused = FALSE; +#endif /* CONSBUFSIZE > 0 */ + +void +cninit(void) +{ + struct consdev *cp; + dev_ops_t cn_ops; + int x; + + if (cn_inited) + return; + + /* + * Collect information about all possible consoles + * and find the one with highest priority + */ + for (cp = constab; cp->cn_probe; cp++) { + (*cp->cn_probe)(cp); + if (cp->cn_pri > CN_DEAD && + (cn_tab == NULL || cp->cn_pri > cn_tab->cn_pri)) + cn_tab = cp; + } + + /* + * Found a console, initialize it. + */ + if ((cp = cn_tab)) { + /* + * Initialize as console + */ + (*cp->cn_init)(cp); + /* + * Look up its dev_ops pointer in the device table and + * place it in the device indirection table. + */ + if (dev_name_lookup(cp->cn_name, &cn_ops, &x) == FALSE) + panic("cninit: dev_name_lookup failed"); + dev_set_indirection("console", cn_ops, minor(cp->cn_dev)); +#if CONSBUFSIZE > 0 + /* + * Now that the console is initialized, dump any chars in + * the temporary console buffer. + */ + if (consbufused) { + char *cbp = consbp; + do { + if (*cbp) + cnputc(*cbp); + if (++cbp == &consbuf[CONSBUFSIZE]) + cbp = consbuf; + } while (cbp != consbp); + consbufused = FALSE; + } +#endif /* CONSBUFSIZE > 0 */ + cn_inited = TRUE; + return; + } + /* + * No console device found, not a problem for BSD, fatal for Mach + */ + panic("can't find a console device"); +} + + +int +cngetc(void) +{ + if (cn_tab) + return ((*cn_tab->cn_getc)(cn_tab->cn_dev, 1)); + if (romgetc) + return ((*romgetc)(1)); + return (0); +} + +int +cnmaygetc(void) +{ + if (cn_tab) + return((*cn_tab->cn_getc)(cn_tab->cn_dev, 0)); + if (romgetc) + return ((*romgetc)(0)); + return (0); +} + +void +cnputc(char c) +{ + if (c == 0) + return; + +#ifdef MACH_KMSG + /* XXX: Assume that All output routines always use cnputc. */ + kmsg_putchar (c); +#endif + +#if defined(MACH_HYP) && 0 + { + /* Also output on hypervisor's emergency console, for + * debugging */ + unsigned char d = c; + hyp_console_write(&d, 1); + } +#endif /* MACH_HYP */ + + if (cn_tab) { + (*cn_tab->cn_putc)(cn_tab->cn_dev, c); + if (c == '\n') + (*cn_tab->cn_putc)(cn_tab->cn_dev, '\r'); + } else if (romputc) { + (*romputc)(c); + if (c == '\n') + (*romputc)('\r'); + } +#if CONSBUFSIZE > 0 + else { + if (consbufused == FALSE) { + consbp = consbuf; + consbufused = TRUE; + memset(consbuf, 0, CONSBUFSIZE); + } + *consbp++ = c; + if (consbp >= &consbuf[CONSBUFSIZE]) + consbp = consbuf; + } +#endif /* CONSBUFSIZE > 0 */ +} diff --git a/device/cons.h b/device/cons.h new file mode 100644 index 0000000..34f3bc5 --- /dev/null +++ b/device/cons.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 1988-1994, The University of Utah and + * the Computer Systems Laboratory at the University of Utah (CSL). + * All rights reserved. + * + * Permission to use, copy, modify and distribute this software is hereby + * granted provided that (1) source code retains these copyright, permission, + * and disclaimer notices, and (2) redistributions including binaries + * reproduce the notices in supporting documentation, and (3) all advertising + * materials mentioning features or use of this software display the following + * acknowledgement: ``This product includes software developed by the + * Computer Systems Laboratory at the University of Utah.'' + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + * Utah $Hdr: cons.h 1.10 94/12/14$ + */ + +#ifndef _DEVICE_CONS_H +#define _DEVICE_CONS_H +#include <sys/types.h> + +struct consdev { + char *cn_name; /* name of device in dev_name_list */ + int (*cn_probe)(struct consdev *cp); /* probe hardware and fill in consdev info */ + int (*cn_init)(struct consdev *cp); /* turn on as console */ + int (*cn_getc)(dev_t dev, int wait); /* kernel getchar interface */ + int (*cn_putc)(dev_t dev, int c); /* kernel putchar interface */ + dev_t cn_dev; /* major/minor of device */ + short cn_pri; /* pecking order; the higher the better */ +}; + +/* values for cn_pri - reflect our policy for console selection */ +#define CN_DEAD 0 /* device doesn't exist */ +#define CN_NORMAL 1 /* device exists but is nothing special */ +#define CN_INTERNAL 2 /* "internal" bit-mapped display */ +#define CN_REMOTE 3 /* serial interface with remote bit set */ + +#define CONSBUFSIZE 1024 + +#ifdef KERNEL +extern struct consdev constab[]; +#endif + +extern void cninit(void); + +extern int cngetc(void); + +extern int cnmaygetc(void); + +extern void cnputc(char); + +/* + * ROM getc/putc primitives. + * On some architectures, the boot ROM provides basic character input/output + * routines that can be used before devices are configured or virtual memory + * is enabled. This can be useful to debug (or catch panics from) code early + * in the bootstrap procedure. + */ +extern int (*romgetc)(char c); +extern void (*romputc)(char c); + +#endif /* _DEVICE_CONS_H */ diff --git a/device/dev_hdr.h b/device/dev_hdr.h new file mode 100644 index 0000000..ac6ce7e --- /dev/null +++ b/device/dev_hdr.h @@ -0,0 +1,160 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 3/89 + */ + +/* + * Mach device emulation definitions (i386at version). + * + * Copyright (c) 1996 The University of Utah and + * the Computer Systems Laboratory at the University of Utah (CSL). + * All rights reserved. + * + * Permission to use, copy, modify and distribute this software is hereby + * granted provided that (1) source code retains these copyright, permission, + * and disclaimer notices, and (2) redistributions including binaries + * reproduce the notices in supporting documentation, and (3) all advertising + * materials mentioning features or use of this software display the following + * acknowledgement: ``This product includes software developed by the + * Computer Systems Laboratory at the University of Utah.'' + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + * Author: Shantanu Goel, University of Utah CSL + */ + +#ifndef _DEVICE_DEV_HDR_H_ +#define _DEVICE_DEV_HDR_H_ + +#include <ipc/ipc_types.h> +#include <mach/port.h> +#include <kern/lock.h> +#include <kern/queue.h> + +typedef struct dev_ops *dev_ops_t; + +/* This structure is associated with each open device port. + The port representing the device points to this structure. */ +struct device +{ + struct device_emulation_ops *emul_ops; + void *emul_data; +}; + +typedef struct device *device_t; + +#define DEVICE_NULL ((device_t) 0) + +/* + * Generic device header. May be allocated with the device, + * or built when the device is opened. + */ +struct mach_device { + decl_simple_lock_data(,ref_lock)/* lock for reference count */ + int ref_count; /* reference count */ + decl_simple_lock_data(, lock) /* lock for rest of state */ + short state; /* state: */ +#define DEV_STATE_INIT 0 /* not open */ +#define DEV_STATE_OPENING 1 /* being opened */ +#define DEV_STATE_OPEN 2 /* open */ +#define DEV_STATE_CLOSING 3 /* being closed */ + short flag; /* random flags: */ +#define D_EXCL_OPEN 0x0001 /* open only once */ + short open_count; /* number of times open */ + short io_in_progress; /* number of IOs in progress */ + boolean_t io_wait; /* someone waiting for IO to finish */ + + struct ipc_port *port; /* open port */ + queue_chain_t number_chain; /* chain for lookup by number */ + int dev_number; /* device number */ + int bsize; /* replacement for DEV_BSIZE */ + struct dev_ops *dev_ops; /* and operations vector */ + struct device dev; /* the real device structure */ +}; +typedef struct mach_device *mach_device_t; +#define MACH_DEVICE_NULL ((mach_device_t)0) + +/* + * To find and remove device entries + */ +mach_device_t device_lookup(const char *); /* by name */ + +void mach_device_reference(mach_device_t); +void mach_device_deallocate(mach_device_t); + +/* + * To find and remove port-to-device mappings + */ +device_t dev_port_lookup(ipc_port_t); +void dev_port_enter(mach_device_t); +void dev_port_remove(mach_device_t); + +typedef boolean_t (*dev_map_fn)(mach_device_t, mach_port_t); + +/* + * To call a routine on each device + */ +boolean_t dev_map(dev_map_fn, mach_port_t); + +/* + * To lock and unlock state and open-count + */ +#define device_lock(device) simple_lock(&(device)->lock) +#define device_unlock(device) simple_unlock(&(device)->lock) + +/* + * device name lookup + */ +extern boolean_t dev_name_lookup( + const char * name, + dev_ops_t *ops, /* out */ + int *unit); /* out */ + +/* + * Change an entry in the indirection list. + */ +extern void dev_set_indirection( + const char *name, + dev_ops_t ops, + int unit); + +/* + * compare device name + */ +extern boolean_t __attribute__ ((pure)) +name_equal( + const char *src, + int len, + const char *target); + +#endif /* _DEVICE_DEV_HDR_H_ */ diff --git a/device/dev_lookup.c b/device/dev_lookup.c new file mode 100644 index 0000000..c9c39f8 --- /dev/null +++ b/device/dev_lookup.c @@ -0,0 +1,364 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 3/89 + */ + +#include <mach/port.h> +#include <mach/vm_param.h> + +#include <kern/queue.h> +#include <kern/slab.h> + +#include <device/device_types.h> +#include <device/dev_hdr.h> +#include <device/conf.h> +#include <device/param.h> /* DEV_BSIZE, as default */ + +#include <ipc/ipc_port.h> +#include <kern/ipc_kobject.h> + +#include <device/device_emul.h> +#include <device/ds_routines.h> + +/* + * Device structure routines: reference counting, port->device. + */ + +/* + * Lookup/enter by device number. + */ +#define NDEVHASH 8 +#define DEV_NUMBER_HASH(dev) ((dev) & (NDEVHASH-1)) +queue_head_t dev_number_hash_table[NDEVHASH]; + +/* + * Lock for device-number to device lookup. + * Must be held before device-ref_count lock. + */ +def_simple_lock_data(static, dev_number_lock) + +struct kmem_cache dev_hdr_cache; + +/* + * Enter device in the number lookup table. + * The number table lock must be held. + */ +static void +dev_number_enter(const mach_device_t device) +{ + queue_t q; + + q = &dev_number_hash_table[DEV_NUMBER_HASH(device->dev_number)]; + queue_enter(q, device, mach_device_t, number_chain); +} + +/* + * Remove device from the device-number lookup table. + * The device-number table lock must be held. + */ +static void +dev_number_remove(const mach_device_t device) +{ + queue_t q; + + q = &dev_number_hash_table[DEV_NUMBER_HASH(device->dev_number)]; + queue_remove(q, device, mach_device_t, number_chain); +} + +/* + * Lookup a device by device operations and minor number. + * The number table lock must be held. + */ +static mach_device_t +dev_number_lookup(const dev_ops_t ops, int devnum) +{ + queue_t q; + mach_device_t device; + + q = &dev_number_hash_table[DEV_NUMBER_HASH(devnum)]; + queue_iterate(q, device, mach_device_t, number_chain) { + if (device->dev_ops == ops && device->dev_number == devnum) { + return (device); + } + } + return (MACH_DEVICE_NULL); +} + +/* + * Look up a device by name, and create the device structure + * if it does not exist. Enter it in the dev_number lookup + * table. + */ +mach_device_t +device_lookup(const char *name) +{ + dev_ops_t dev_ops; + int dev_minor; + mach_device_t device; + mach_device_t new_device; + + /* + * Get the device and unit number from the name. + */ + if (!dev_name_lookup(name, &dev_ops, &dev_minor)) + return (MACH_DEVICE_NULL); + + /* + * Look up the device in the hash table. If it is + * not there, enter it. + */ + new_device = MACH_DEVICE_NULL; + simple_lock(&dev_number_lock); + while ((device = dev_number_lookup(dev_ops, dev_minor)) + == MACH_DEVICE_NULL) { + /* + * Must unlock to allocate the structure. If + * the structure has appeared after we have allocated, + * release the new structure. + */ + if (new_device != MACH_DEVICE_NULL) + break; /* allocated */ + + simple_unlock(&dev_number_lock); + + new_device = (mach_device_t) kmem_cache_alloc(&dev_hdr_cache); + simple_lock_init(&new_device->ref_lock); + new_device->ref_count = 1; + simple_lock_init(&new_device->lock); + new_device->state = DEV_STATE_INIT; + new_device->flag = 0; + new_device->open_count = 0; + new_device->io_in_progress = 0; + new_device->io_wait = FALSE; + new_device->port = IP_NULL; + new_device->dev_ops = dev_ops; + new_device->dev_number = dev_minor; + new_device->bsize = DEV_BSIZE; /* change later */ + + simple_lock(&dev_number_lock); + } + + if (device == MACH_DEVICE_NULL) { + /* + * No existing device structure. Insert the + * new one. + */ + assert(new_device != MACH_DEVICE_NULL); + device = new_device; + + dev_number_enter(device); + simple_unlock(&dev_number_lock); + } + else { + /* + * Have existing device. + */ + mach_device_reference(device); + simple_unlock(&dev_number_lock); + + if (new_device != MACH_DEVICE_NULL) + kmem_cache_free(&dev_hdr_cache, (vm_offset_t)new_device); + } + + return (device); +} + +/* + * Add a reference to the device. + */ +void +mach_device_reference(mach_device_t device) +{ + simple_lock(&device->ref_lock); + device->ref_count++; + simple_unlock(&device->ref_lock); +} + +/* + * Remove a reference to the device, and deallocate the + * structure if no references are left. + */ +void +mach_device_deallocate(mach_device_t device) +{ + simple_lock(&device->ref_lock); + if (--device->ref_count > 0) { + simple_unlock(&device->ref_lock); + return; + } + device->ref_count = 1; + simple_unlock(&device->ref_lock); + + simple_lock(&dev_number_lock); + simple_lock(&device->ref_lock); + if (--device->ref_count > 0) { + simple_unlock(&device->ref_lock); + simple_unlock(&dev_number_lock); + return; + } + + dev_number_remove(device); + simple_unlock(&device->ref_lock); + simple_unlock(&dev_number_lock); + + kmem_cache_free(&dev_hdr_cache, (vm_offset_t)device); +} + +/* + + */ +/* + * port-to-device lookup routines. + */ + +/* + * Enter a port-to-device mapping. + */ +void +dev_port_enter(mach_device_t device) +{ + mach_device_reference(device); + + ipc_kobject_set(device->port, + (ipc_kobject_t) &device->dev, IKOT_DEVICE); + device->dev.emul_data = device; + { + extern struct device_emulation_ops mach_device_emulation_ops; + + device->dev.emul_ops = &mach_device_emulation_ops; + } +} + +/* + * Remove a port-to-device mapping. + */ +void +dev_port_remove(mach_device_t device) +{ + ipc_kobject_set(device->port, IKO_NULL, IKOT_NONE); + mach_device_deallocate(device); +} + +/* + * Lookup a device by its port. + * Doesn't consume the naked send right; produces a device reference. + */ +device_t +dev_port_lookup(ipc_port_t port) +{ + device_t device; + + if (!IP_VALID(port)) + return (DEVICE_NULL); + + ip_lock(port); + if (ip_active(port) && (ip_kotype(port) == IKOT_DEVICE)) { + device = (device_t) port->ip_kobject; + if (device->emul_ops->reference) + (*device->emul_ops->reference)(device->emul_data); + } + else + device = DEVICE_NULL; + + ip_unlock(port); + return (device); +} + +/* + * Get the port for a device. + * Consumes a device reference; produces a naked send right. + */ +ipc_port_t +convert_device_to_port(const device_t device) +{ + if (device == DEVICE_NULL) + return IP_NULL; + + return (*device->emul_ops->dev_to_port) (device->emul_data); +} + +/* + * Call a supplied routine on each device, passing it + * the port as an argument. If the routine returns TRUE, + * stop the search and return TRUE. If none returns TRUE, + * return FALSE. + */ +boolean_t +dev_map( + dev_map_fn routine, + mach_port_t port) +{ + int i; + queue_t q; + mach_device_t dev, prev_dev; + + for (i = 0, q = &dev_number_hash_table[0]; + i < NDEVHASH; + i++, q++) { + prev_dev = MACH_DEVICE_NULL; + simple_lock(&dev_number_lock); + queue_iterate(q, dev, mach_device_t, number_chain) { + mach_device_reference(dev); + simple_unlock(&dev_number_lock); + if (prev_dev != MACH_DEVICE_NULL) + mach_device_deallocate(prev_dev); + + if ((*routine)(dev, port)) { + /* + * Done + */ + mach_device_deallocate(dev); + return (TRUE); + } + + simple_lock(&dev_number_lock); + prev_dev = dev; + } + simple_unlock(&dev_number_lock); + if (prev_dev != MACH_DEVICE_NULL) + mach_device_deallocate(prev_dev); + } + return (FALSE); +} + +/* + * Initialization + */ +void +dev_lookup_init(void) +{ + int i; + + simple_lock_init(&dev_number_lock); + + for (i = 0; i < NDEVHASH; i++) + queue_init(&dev_number_hash_table[i]); + + kmem_cache_init(&dev_hdr_cache, "mach_device", + sizeof(struct mach_device), 0, NULL, 0); +} diff --git a/device/dev_master.h b/device/dev_master.h new file mode 100644 index 0000000..70d4c63 --- /dev/null +++ b/device/dev_master.h @@ -0,0 +1,65 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 11/89 + * + * Bind an IO operation to the master CPU. + */ + +#ifndef _DEVICE_DEV_MASTER_H_ +#define _DEVICE_DEV_MASTER_H_ + +#include <cpus.h> + +#if NCPUS > 1 + +#include <kern/macros.h> +#include <kern/cpu_number.h> +#include <kern/sched_prim.h> +#include <kern/thread.h> +#include <kern/processor.h> + +#define io_grab_master() \ + MACRO_BEGIN \ + thread_bind(current_thread(), master_processor); \ + if (current_processor() != master_processor) \ + thread_block((void (*)()) 0); \ + MACRO_END + +#define io_release_master() \ + MACRO_BEGIN \ + thread_bind(current_thread(), PROCESSOR_NULL); \ + MACRO_END + +#else NCPUS > 1 + +#define io_grab_master() +#define io_release_master() + +#endif NCPUS > 1 + +#endif /* _DEVICE_DEV_MASTER_H_ */ diff --git a/device/dev_name.c b/device/dev_name.c new file mode 100644 index 0000000..abd525c --- /dev/null +++ b/device/dev_name.c @@ -0,0 +1,242 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 8/89 + */ + +#include <kern/printf.h> +#include <string.h> +#include <device/device_types.h> +#include <device/dev_hdr.h> +#include <device/conf.h> + + + +/* + * Routines placed in empty entries in the device tables + */ +int nulldev_reset(dev_t dev) +{ + return (D_SUCCESS); +} + +int nulldev_open(dev_t dev, int flags, io_req_t ior) +{ + return (D_SUCCESS); +} + +void nulldev_close(dev_t dev, int flags) +{ +} + +int nulldev_read(dev_t dev, io_req_t ior) +{ + return (D_SUCCESS); +} + +int nulldev_write(dev_t dev, io_req_t ior) +{ + return (D_SUCCESS); +} + +io_return_t nulldev_getstat(dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t *count) +{ + return (D_SUCCESS); +} + +io_return_t nulldev_setstat(dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t count) +{ + return (D_SUCCESS); +} + +int nulldev_portdeath(dev_t dev, mach_port_t port) +{ + return (D_SUCCESS); +} + +int nodev_async_in(dev_t dev, const ipc_port_t port, int x, filter_t* filter, unsigned int j) +{ + return (D_INVALID_OPERATION); +} + +int nodev_info(dev_t dev, int a, int* b) +{ + return (D_INVALID_OPERATION); +} + +vm_offset_t +nomap(dev_t dev, vm_offset_t off, int prot) +{ + return -1; +} + +/* + * Name comparison routine. + * Compares first 'len' characters of 'src' + * with 'target', which is zero-terminated. + * Returns TRUE if strings are equal: + * src and target are equal in first 'len' characters + * next character of target is 0 (end of string). + */ +boolean_t __attribute__ ((pure)) +name_equal(const char *src, + int len, + const char *target) +{ + while (--len >= 0) + if (*src++ != *target++) + return FALSE; + return *target == 0; +} + +/* + * device name lookup + */ +boolean_t dev_name_lookup( + const char *name, + dev_ops_t *ops, /* out */ + int *unit) /* out */ +{ + /* + * Assume that block device names are of the form + * + * <device_name><unit_number>[[<slice num>]<partition>] + * + * where + * <device_name> is the name in the device table + * <unit_number> is an integer + * <slice num> * is 's' followed by a number (disks only!) + * <partition> is a letter in [a-h] (disks only?) + */ + + const char *cp = name; + int len; + int j = 0; + int c; + dev_ops_t dev; + boolean_t found; + + int slice_num = 0; + + /* + * Find device type name (characters before digit) + */ + while ((c = *cp) != '\0' && + !(c >= '0' && c <= '9')) + cp++; + + len = cp - name; + if (c != '\0') { + /* + * Find unit number + */ + while ((c = *cp) != '\0' && + c >= '0' && c <= '9') { + j = j * 10 + (c - '0'); + cp++; + } + } + + found = FALSE; + dev_search(dev) { + if (name_equal(name, len, dev->d_name)) { + found = TRUE; + break; + } + } + if (!found) { + /* name not found - try indirection list */ + dev_indirect_t di; + + dev_indirect_search(di) { + if (name_equal(name, len, di->d_name)) { + /* + * Return device and unit from indirect vector. + */ + *ops = di->d_ops; + *unit = di->d_unit; + return (TRUE); + } + } + /* Not found in either list. */ + return (FALSE); + } + + *ops = dev; + *unit = j; + + /* + * Find sub-device number + */ + + j = dev->d_subdev; + if (j > 0) { + /* if no slice string, slice num = 0 */ + + /* <subdev_count>*unit + <slice_number>*16 -- I know it's bad */ + *unit *= j; + + /* find slice ? */ + if (c == 's') { + cp++; + while ((c = *cp) != '\0' && + c >= '0' && c <= '9') { + slice_num = slice_num * 10 + (c - '0'); + cp++; + } + } + + *unit += (slice_num << 4); + /* if slice==0, it is either compatibility or whole device */ + + if (c >= 'a' && c < 'a' + j) { /* note: w/o this -> whole slice */ + /* + * Minor number is <subdev_count>*unit + letter. + * NOW it is slice result + letter + */ + *unit += (c - 'a' +1); + } + } + return (TRUE); +} + +/* + * Change an entry in the indirection list. + */ +void +dev_set_indirection(const char *name, dev_ops_t ops, int unit) +{ + dev_indirect_t di; + + dev_indirect_search(di) { + if (!strcmp(di->d_name, name)) { + di->d_ops = ops; + di->d_unit = unit; + break; + } + } +} diff --git a/device/dev_pager.c b/device/dev_pager.c new file mode 100644 index 0000000..1cd7406 --- /dev/null +++ b/device/dev_pager.c @@ -0,0 +1,662 @@ +/* + * Mach Operating System + * Copyright (c) 1993-1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 3/89 + * + * Device pager. + */ + +#include <string.h> + +#include <mach/boolean.h> +#include <mach/port.h> +#include <mach/message.h> +#include <mach/std_types.h> +#include <mach/mach_types.h> + +#include <ipc/ipc_port.h> +#include <ipc/ipc_space.h> + +#include <kern/debug.h> +#include <kern/printf.h> +#include <kern/queue.h> +#include <kern/slab.h> + +#include <vm/vm_page.h> +#include <vm/vm_kern.h> +#include <vm/vm_user.h> + +#include <device/device_pager.server.h> +#include <device/device_types.h> +#include <device/ds_routines.h> +#include <device/dev_hdr.h> +#include <device/io_req.h> +#include <device/memory_object_reply.user.h> +#include <device/dev_pager.h> +#include <device/blkio.h> +#include <device/conf.h> + +/* + * The device pager routines are called directly from the message + * system (via mach_msg), and thus run in the kernel-internal + * environment. All ports are in internal form (ipc_port_t), + * and must be correctly reference-counted in order to be saved + * in other data structures. Kernel routines may be called + * directly. Kernel types are used for data objects (tasks, + * memory objects, ports). The only IPC routines that may be + * called are ones that masquerade as the kernel task (via + * msg_send_from_kernel). + * + * Port rights and references are maintained as follows: + * Memory object port: + * The device_pager task has all rights. + * Memory object control port: + * The device_pager task has only send rights. + * Memory object name port: + * The device_pager task has only send rights. + * The name port is not even recorded. + * Regardless how the object is created, the control and name + * ports are created by the kernel and passed through the memory + * management interface. + * + * The device_pager assumes that access to its memory objects + * will not be propagated to more that one host, and therefore + * provides no consistency guarantees beyond those made by the + * kernel. + * + * In the event that more than one host attempts to use a device + * memory object, the device_pager will only record the last set + * of port names. [This can happen with only one host if a new + * mapping is being established while termination of all previous + * mappings is taking place.] Currently, the device_pager assumes + * that its clients adhere to the initialization and termination + * protocols in the memory management interface; otherwise, port + * rights or out-of-line memory from erroneous messages may be + * allowed to accumulate. + * + * [The phrase "currently" has been used above to denote aspects of + * the implementation that could be altered without changing the rest + * of the basic documentation.] + */ + +/* + * Basic device pager structure. + */ +struct dev_pager { + decl_simple_lock_data(, lock) /* lock for reference count */ + int ref_count; /* reference count */ + int client_count; /* How many memory_object_create + * calls have we received */ + ipc_port_t pager; /* pager port */ + ipc_port_t pager_request; /* Known request port */ + ipc_port_t pager_name; /* Known name port */ + mach_device_t device; /* Device handle */ + vm_offset_t offset; /* offset within the pager, in bytes*/ + int type; /* to distinguish */ +#define DEV_PAGER_TYPE 0 +#define CHAR_PAGER_TYPE 1 + /* char pager specifics */ + int prot; + vm_size_t size; +}; +typedef struct dev_pager *dev_pager_t; +#define DEV_PAGER_NULL ((dev_pager_t)0) + + +struct kmem_cache dev_pager_cache; + +static void dev_pager_reference(dev_pager_t ds) +{ + simple_lock(&ds->lock); + ds->ref_count++; + simple_unlock(&ds->lock); +} + +static void dev_pager_deallocate(dev_pager_t ds) +{ + simple_lock(&ds->lock); + if (--ds->ref_count > 0) { + simple_unlock(&ds->lock); + return; + } + + simple_unlock(&ds->lock); + kmem_cache_free(&dev_pager_cache, (vm_offset_t)ds); +} + +/* + * A hash table of ports for device_pager backed objects. + */ + +#define DEV_HASH_COUNT 127 + +struct dev_pager_entry { + queue_chain_t links; + ipc_port_t name; + dev_pager_t pager_rec; +}; +typedef struct dev_pager_entry *dev_pager_entry_t; + +/* + * Indexed by port name, each element contains a queue of all dev_pager_entry_t + * which name shares the same hash + */ +queue_head_t dev_pager_hashtable[DEV_HASH_COUNT]; +struct kmem_cache dev_pager_hash_cache; +def_simple_lock_data(static, dev_pager_hash_lock) + +struct dev_device_entry { + queue_chain_t links; + mach_device_t device; + vm_offset_t offset; + dev_pager_t pager_rec; +}; +typedef struct dev_device_entry *dev_device_entry_t; + +/* + * Indexed by device + offset, each element contains a queue of all + * dev_device_entry_t which device + offset shares the same hash + */ +queue_head_t dev_device_hashtable[DEV_HASH_COUNT]; +struct kmem_cache dev_device_hash_cache; +def_simple_lock_data(static, dev_device_hash_lock) + +#define dev_hash(name_port) \ + (((vm_offset_t)(name_port) & 0xffffff) % DEV_HASH_COUNT) + +static void dev_pager_hash_init(void) +{ + int i; + vm_size_t size; + + size = sizeof(struct dev_pager_entry); + kmem_cache_init(&dev_pager_hash_cache, "dev_pager_entry", size, 0, + NULL, 0); + for (i = 0; i < DEV_HASH_COUNT; i++) + queue_init(&dev_pager_hashtable[i]); + simple_lock_init(&dev_pager_hash_lock); +} + +static void dev_pager_hash_insert( + const ipc_port_t name_port, + const dev_pager_t rec) +{ + dev_pager_entry_t new_entry; + + new_entry = (dev_pager_entry_t) kmem_cache_alloc(&dev_pager_hash_cache); + new_entry->name = name_port; + new_entry->pager_rec = rec; + + simple_lock(&dev_pager_hash_lock); + queue_enter(&dev_pager_hashtable[dev_hash(name_port)], + new_entry, dev_pager_entry_t, links); + simple_unlock(&dev_pager_hash_lock); +} + +static void dev_pager_hash_delete(const ipc_port_t name_port) +{ + queue_t bucket; + dev_pager_entry_t entry; + + bucket = &dev_pager_hashtable[dev_hash(name_port)]; + + simple_lock(&dev_pager_hash_lock); + for (entry = (dev_pager_entry_t)queue_first(bucket); + !queue_end(bucket, &entry->links); + entry = (dev_pager_entry_t)queue_next(&entry->links)) { + if (entry->name == name_port) { + queue_remove(bucket, entry, dev_pager_entry_t, links); + break; + } + } + simple_unlock(&dev_pager_hash_lock); + if (!queue_end(bucket, &entry->links)) + kmem_cache_free(&dev_pager_hash_cache, (vm_offset_t)entry); +} + +static dev_pager_t dev_pager_hash_lookup(const ipc_port_t name_port) +{ + queue_t bucket; + dev_pager_entry_t entry; + dev_pager_t pager; + + bucket = &dev_pager_hashtable[dev_hash(name_port)]; + + simple_lock(&dev_pager_hash_lock); + for (entry = (dev_pager_entry_t)queue_first(bucket); + !queue_end(bucket, &entry->links); + entry = (dev_pager_entry_t)queue_next(&entry->links)) { + if (entry->name == name_port) { + pager = entry->pager_rec; + dev_pager_reference(pager); + simple_unlock(&dev_pager_hash_lock); + return (pager); + } + } + simple_unlock(&dev_pager_hash_lock); + return (DEV_PAGER_NULL); +} + +static void dev_device_hash_init(void) +{ + int i; + vm_size_t size; + + size = sizeof(struct dev_device_entry); + kmem_cache_init(&dev_device_hash_cache, "dev_device_entry", size, 0, + NULL, 0); + for (i = 0; i < DEV_HASH_COUNT; i++) { + queue_init(&dev_device_hashtable[i]); + } + simple_lock_init(&dev_device_hash_lock); +} + +static void dev_device_hash_insert( + const mach_device_t device, + const vm_offset_t offset, + const dev_pager_t rec) +{ + dev_device_entry_t new_entry; + + new_entry = (dev_device_entry_t) kmem_cache_alloc(&dev_device_hash_cache); + new_entry->device = device; + new_entry->offset = offset; + new_entry->pager_rec = rec; + + simple_lock(&dev_device_hash_lock); + queue_enter(&dev_device_hashtable[dev_hash(device + offset)], + new_entry, dev_device_entry_t, links); + simple_unlock(&dev_device_hash_lock); +} + +static void dev_device_hash_delete( + const mach_device_t device, + const vm_offset_t offset) +{ + queue_t bucket; + dev_device_entry_t entry; + + bucket = &dev_device_hashtable[dev_hash(device + offset)]; + + simple_lock(&dev_device_hash_lock); + for (entry = (dev_device_entry_t)queue_first(bucket); + !queue_end(bucket, &entry->links); + entry = (dev_device_entry_t)queue_next(&entry->links)) { + if (entry->device == device && entry->offset == offset) { + queue_remove(bucket, entry, dev_device_entry_t, links); + break; + } + } + simple_unlock(&dev_device_hash_lock); + if (!queue_end(bucket, &entry->links)) + kmem_cache_free(&dev_device_hash_cache, (vm_offset_t)entry); +} + +static dev_pager_t dev_device_hash_lookup( + const mach_device_t device, + const vm_offset_t offset) +{ + queue_t bucket; + dev_device_entry_t entry; + dev_pager_t pager; + + bucket = &dev_device_hashtable[dev_hash(device + offset)]; + + simple_lock(&dev_device_hash_lock); + for (entry = (dev_device_entry_t)queue_first(bucket); + !queue_end(bucket, &entry->links); + entry = (dev_device_entry_t)queue_next(&entry->links)) { + if (entry->device == device && entry->offset == offset) { + pager = entry->pager_rec; + dev_pager_reference(pager); + simple_unlock(&dev_device_hash_lock); + return (pager); + } + } + simple_unlock(&dev_device_hash_lock); + return (DEV_PAGER_NULL); +} + +kern_return_t device_pager_setup( + const mach_device_t device, + int prot, + vm_offset_t offset, + vm_size_t size, + mach_port_t *pager) +{ + dev_pager_t d; + + /* + * Verify the device is indeed mappable + */ + if (!device->dev_ops->d_mmap || (device->dev_ops->d_mmap == nomap)) + return (D_INVALID_OPERATION); + + /* + * Allocate a structure to hold the arguments + * and port to represent this object. + */ + + d = dev_device_hash_lookup(device, offset); + if (d != DEV_PAGER_NULL) { + *pager = (mach_port_t) ipc_port_make_send(d->pager); + dev_pager_deallocate(d); + return (D_SUCCESS); + } + + d = (dev_pager_t) kmem_cache_alloc(&dev_pager_cache); + if (d == DEV_PAGER_NULL) + return (KERN_RESOURCE_SHORTAGE); + + simple_lock_init(&d->lock); + d->ref_count = 1; + + /* + * Allocate the pager port. + */ + d->pager = ipc_port_alloc_kernel(); + if (d->pager == IP_NULL) { + dev_pager_deallocate(d); + return (KERN_RESOURCE_SHORTAGE); + } + + d->client_count = 0; + d->pager_request = IP_NULL; + d->pager_name = IP_NULL; + d->device = device; + mach_device_reference(device); + d->offset = offset; + d->prot = prot; + d->size = round_page(size); + if (device->dev_ops->d_mmap == block_io_mmap) { + d->type = DEV_PAGER_TYPE; + } else { + d->type = CHAR_PAGER_TYPE; + } + + dev_pager_hash_insert(d->pager, d); + dev_device_hash_insert(d->device, d->offset, d); + + *pager = (mach_port_t) ipc_port_make_send(d->pager); + return (KERN_SUCCESS); +} + +boolean_t device_pager_debug = FALSE; + +kern_return_t device_pager_data_request( + const ipc_port_t pager, + const ipc_port_t pager_request, + vm_offset_t offset, + vm_size_t length, + vm_prot_t protection_required) +{ + dev_pager_t ds; + kern_return_t ret; + + if (device_pager_debug) + printf("(device_pager)data_request: pager=%p, offset=0x%lx, length=0x%lx\n", + pager, (unsigned long) offset, (unsigned long) length); + + ds = dev_pager_hash_lookup(pager); + if (ds == DEV_PAGER_NULL) + panic("(device_pager)data_request: lookup failed"); + + if (ds->pager_request != pager_request) + panic("(device_pager)data_request: bad pager_request"); + + if (ds->type == CHAR_PAGER_TYPE) { + vm_object_t object; + + object = vm_object_lookup(pager_request); + if (object == VM_OBJECT_NULL) { + (void) r_memory_object_data_error(pager_request, + offset, length, + KERN_FAILURE); + dev_pager_deallocate(ds); + return (KERN_SUCCESS); + } + + ret = vm_object_page_map(object, + offset, length, + device_map_page, (void *)ds); + + if (ret != KERN_SUCCESS) { + (void) r_memory_object_data_error(pager_request, + offset, length, + ret); + vm_object_deallocate(object); + dev_pager_deallocate(ds); + return (KERN_SUCCESS); + } + + vm_object_deallocate(object); + } + else { + panic("(device_pager)data_request: dev pager"); + } + + dev_pager_deallocate(ds); + + return (KERN_SUCCESS); +} + +kern_return_t device_pager_copy( + const ipc_port_t pager, + const ipc_port_t pager_request, + vm_offset_t offset, + vm_size_t length, + const ipc_port_t new_pager) +{ + panic("(device_pager)copy: called"); +} + +kern_return_t +device_pager_supply_completed( + const ipc_port_t pager, + const ipc_port_t pager_request, + vm_offset_t offset, + vm_size_t length, + kern_return_t result, + vm_offset_t error_offset) +{ + panic("(device_pager)supply_completed: called"); +} + +kern_return_t +device_pager_data_return( + const ipc_port_t pager, + const ipc_port_t pager_request, + vm_offset_t offset, + pointer_t addr, + mach_msg_type_number_t data_cnt, + boolean_t dirty, + boolean_t kernel_copy) +{ + panic("(device_pager)data_return: called"); +} + +kern_return_t +device_pager_change_completed( + const ipc_port_t pager, + boolean_t may_cache, + memory_object_copy_strategy_t copy_strategy) +{ + panic("(device_pager)change_completed: called"); +} + +/* + * The mapping function takes a byte offset, but returns + * a machine-dependent page frame number. We convert + * that into something that the pmap module will + * accept later. + */ +phys_addr_t device_map_page( + void *dsp, + vm_offset_t offset) +{ + dev_pager_t ds = (dev_pager_t) dsp; + vm_offset_t pagenum = + (*(ds->device->dev_ops->d_mmap)) + (ds->device->dev_number, + ds->offset + offset, + ds->prot); + + if (pagenum == -1) + return vm_page_fictitious_addr; + + return pmap_phys_address(pagenum); +} + +kern_return_t device_pager_init_pager( + const ipc_port_t pager, + const ipc_port_t pager_request, + const ipc_port_t pager_name, + vm_size_t pager_page_size) +{ + dev_pager_t ds; + + if (device_pager_debug) + printf("(device_pager)init: pager=%p, request=%p, name=%p\n", + pager, pager_request, pager_name); + + assert(pager_page_size == PAGE_SIZE); + assert(IP_VALID(pager_request)); + assert(IP_VALID(pager_name)); + + ds = dev_pager_hash_lookup(pager); + assert(ds != DEV_PAGER_NULL); + + assert(ds->client_count == 0); + assert(ds->pager_request == IP_NULL); + assert(ds->pager_name == IP_NULL); + + ds->client_count = 1; + + /* + * We save the send rights for the request and name ports. + */ + + ds->pager_request = pager_request; + ds->pager_name = pager_name; + + if (ds->type == CHAR_PAGER_TYPE) { + /* + * Reply that the object is ready + */ + (void) r_memory_object_ready(pager_request, + FALSE, /* do not cache */ + MEMORY_OBJECT_COPY_NONE); + } else { + (void) r_memory_object_ready(pager_request, + TRUE, /* cache */ + MEMORY_OBJECT_COPY_DELAY); + } + + dev_pager_deallocate(ds); + return (KERN_SUCCESS); +} + +kern_return_t device_pager_terminate( + const ipc_port_t pager, + const ipc_port_t pager_request, + const ipc_port_t pager_name) +{ + dev_pager_t ds; + + assert(IP_VALID(pager_request)); + assert(IP_VALID(pager_name)); + + ds = dev_pager_hash_lookup(pager); + assert(ds != DEV_PAGER_NULL); + + assert(ds->client_count == 1); + assert(ds->pager_request == pager_request); + assert(ds->pager_name == pager_name); + + dev_pager_hash_delete(ds->pager); + dev_device_hash_delete(ds->device, ds->offset); + mach_device_deallocate(ds->device); + + /* release the send rights we have saved from the init call */ + + ipc_port_release_send(pager_request); + ipc_port_release_send(pager_name); + + /* release the naked receive rights we just acquired */ + + ipc_port_release_receive(pager_request); + ipc_port_release_receive(pager_name); + + /* release the kernel's receive right for the pager port */ + + ipc_port_dealloc_kernel(pager); + + /* once for ref from lookup, once to make it go away */ + dev_pager_deallocate(ds); + dev_pager_deallocate(ds); + + return (KERN_SUCCESS); +} + +kern_return_t device_pager_data_unlock( + const ipc_port_t memory_object, + const ipc_port_t memory_control_port, + vm_offset_t offset, + vm_size_t length, + vm_prot_t desired_access) +{ + panic("(device_pager)data_unlock: called"); + return (KERN_FAILURE); +} + +kern_return_t device_pager_lock_completed( + const ipc_port_t memory_object, + const ipc_port_t pager_request_port, + vm_offset_t offset, + vm_size_t length) +{ + panic("(device_pager)lock_completed: called"); + return (KERN_FAILURE); +} + +void device_pager_init(void) +{ + vm_size_t size; + + /* + * Initialize cache of paging structures. + */ + size = sizeof(struct dev_pager); + kmem_cache_init(&dev_pager_cache, "dev_pager", size, 0, + NULL, 0); + + /* + * Initialize the name port hashing stuff. + */ + dev_pager_hash_init(); + dev_device_hash_init(); +} diff --git a/device/dev_pager.h b/device/dev_pager.h new file mode 100644 index 0000000..dc4b202 --- /dev/null +++ b/device/dev_pager.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2013 Free Software Foundation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef _DEVICE_DEV_PAGER_H_ +#define _DEVICE_DEV_PAGER_H_ + +phys_addr_t device_map_page(void *dsp, vm_offset_t offset); + +boolean_t device_pager_data_request_done(io_req_t ior); + +#endif /* _DEVICE_DEV_PAGER_H_ */ diff --git a/device/device.srv b/device/device.srv new file mode 100644 index 0000000..f63813f --- /dev/null +++ b/device/device.srv @@ -0,0 +1,27 @@ +/* + * Copyright (c) 1994 The University of Utah and + * the Computer Systems Laboratory at the University of Utah (CSL). + * All rights reserved. + * + * Permission to use, copy, modify and distribute this software is hereby + * granted provided that (1) source code retains these copyright, permission, + * and disclaimer notices, and (2) redistributions including binaries + * reproduce the notices in supporting documentation, and (3) all advertising + * materials mentioning features or use of this software display the following + * acknowledgement: ``This product includes software developed by the + * Computer Systems Laboratory at the University of Utah.'' + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + * Author: Bryan Ford, University of Utah CSL + */ +/* This is a server presentation file. */ + +#define KERNEL_SERVER 1 + +#include <device/device.defs> diff --git a/device/device_emul.h b/device/device_emul.h new file mode 100644 index 0000000..873d7f5 --- /dev/null +++ b/device/device_emul.h @@ -0,0 +1,64 @@ +/* + * Mach device emulation definitions (i386at version). + * + * Copyright (c) 1996 The University of Utah and + * the Computer Systems Laboratory at the University of Utah (CSL). + * All rights reserved. + * + * Permission to use, copy, modify and distribute this software is hereby + * granted provided that (1) source code retains these copyright, permission, + * and disclaimer notices, and (2) redistributions including binaries + * reproduce the notices in supporting documentation, and (3) all advertising + * materials mentioning features or use of this software display the following + * acknowledgement: ``This product includes software developed by the + * Computer Systems Laboratory at the University of Utah.'' + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + * Author: Shantanu Goel, University of Utah CSL + */ + +#ifndef _I386AT_DEVICE_EMUL_H_ +#define _I386AT_DEVICE_EMUL_H_ + +#include <mach/notify.h> +#include <device/net_status.h> + +/* Each emulation layer provides these operations. */ +struct device_emulation_ops +{ + void (*reference) (void *); + void (*dealloc) (void *); + ipc_port_t (*dev_to_port) (void *); + io_return_t (*open) (ipc_port_t, mach_msg_type_name_t, + dev_mode_t, const char *, device_t *); + io_return_t (*close) (void *); + io_return_t (*write) (void *, ipc_port_t, mach_msg_type_name_t, + dev_mode_t, recnum_t, io_buf_ptr_t, unsigned, int *); + io_return_t (*write_inband) (void *, ipc_port_t, mach_msg_type_name_t, + dev_mode_t, recnum_t, const io_buf_ptr_inband_t, + unsigned, int *); + io_return_t (*read) (void *, ipc_port_t, mach_msg_type_name_t, + dev_mode_t, recnum_t, int, io_buf_ptr_t *, unsigned *); + io_return_t (*read_inband) (void *, ipc_port_t, mach_msg_type_name_t, + dev_mode_t, recnum_t, int, char *, unsigned *); + io_return_t (*set_status) (void *, dev_flavor_t, dev_status_t, + mach_msg_type_number_t); + io_return_t (*get_status) (void *, dev_flavor_t, dev_status_t, + mach_msg_type_number_t *); + io_return_t (*set_filter) (void *, ipc_port_t, int, filter_t [], unsigned); + io_return_t (*map) (void *, vm_prot_t, vm_offset_t, + vm_size_t, ipc_port_t *, boolean_t); + void (*no_senders) (mach_no_senders_notification_t *); + io_return_t (*write_trap) (void *, dev_mode_t, + rpc_recnum_t, rpc_vm_offset_t, rpc_vm_size_t); + io_return_t (*writev_trap) (void *, dev_mode_t, + rpc_recnum_t, rpc_io_buf_vec_t *, rpc_vm_size_t); +}; + +#endif /* _I386AT_DEVICE_EMUL_H_ */ diff --git a/device/device_init.c b/device/device_init.c new file mode 100644 index 0000000..287d0a2 --- /dev/null +++ b/device/device_init.c @@ -0,0 +1,67 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 8/89 + * + * Initialize device service as part of kernel task. + */ +#include <ipc/ipc_port.h> +#include <ipc/ipc_space.h> +#include <kern/debug.h> +#include <kern/task.h> +#include <xen/xen.h> + +#include <device/device_types.h> +#include <device/device_port.h> +#include <device/tty.h> +#include <device/device_init.h> +#include <device/ds_routines.h> +#include <device/net_io.h> +#include <device/chario.h> + + +ipc_port_t master_device_port; + +void +device_service_create(void) +{ + master_device_port = ipc_port_alloc_kernel(); + if (master_device_port == IP_NULL) + panic("can't allocate master device port"); + + mach_device_init(); +#ifdef MACH_HYP + hyp_dev_init(); +#endif + dev_lookup_init(); + net_io_init(); + device_pager_init(); + chario_init(); + + (void) kernel_thread(kernel_task, io_done_thread, 0); + (void) kernel_thread(kernel_task, net_thread, 0); +} diff --git a/device/device_init.h b/device/device_init.h new file mode 100644 index 0000000..175b34d --- /dev/null +++ b/device/device_init.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2013 Free Software Foundation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef _DEVICE_DEVICE_INIT_H_ +#define _DEVICE_DEVICE_INIT_H_ + +extern void device_service_create(void); + +#endif /* _DEVICE_DEVICE_INIT_H_ */ diff --git a/device/device_pager.srv b/device/device_pager.srv new file mode 100644 index 0000000..410323d --- /dev/null +++ b/device/device_pager.srv @@ -0,0 +1,43 @@ +/* + * Copyright (c) 1994 The University of Utah and + * the Computer Systems Laboratory at the University of Utah (CSL). + * All rights reserved. + * + * Permission to use, copy, modify and distribute this software is hereby + * granted provided that (1) source code retains these copyright, permission, + * and disclaimer notices, and (2) redistributions including binaries + * reproduce the notices in supporting documentation, and (3) all advertising + * materials mentioning features or use of this software display the following + * acknowledgement: ``This product includes software developed by the + * Computer Systems Laboratory at the University of Utah.'' + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + * Author: Bryan Ford, University of Utah CSL + */ +/* This is a server presentation file. */ + +#define KERNEL_SERVER 1 + +#define memory_object device_pager + +/* + * Rename all of the functions in the pager interface, to avoid + * confusing them with the kernel interface. + */ +#define memory_object_init device_pager_init_pager +#define memory_object_terminate device_pager_terminate +#define memory_object_copy device_pager_copy +#define memory_object_data_request device_pager_data_request +#define memory_object_data_unlock device_pager_data_unlock +#define memory_object_lock_completed device_pager_lock_completed +#define memory_object_supply_completed device_pager_supply_completed +#define memory_object_data_return device_pager_data_return +#define memory_object_change_completed device_pager_change_completed + +#include <mach/memory_object.defs> diff --git a/device/device_port.h b/device/device_port.h new file mode 100644 index 0000000..8f8aaaa --- /dev/null +++ b/device/device_port.h @@ -0,0 +1,41 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 8/89 + */ + +#ifndef _DEVICE_DEVICE_PORT_H_ +#define _DEVICE_DEVICE_PORT_H_ + +#include <ipc/ipc_port.h> + +/* + * Master privileged port for this host's device service + */ +extern ipc_port_t master_device_port; + +#endif /* _DEVICE_DEVICE_PORT_H_ */ diff --git a/device/device_reply.cli b/device/device_reply.cli new file mode 100644 index 0000000..956540c --- /dev/null +++ b/device/device_reply.cli @@ -0,0 +1,27 @@ +/* + * Copyright (c) 1994 The University of Utah and + * the Computer Systems Laboratory at the University of Utah (CSL). + * All rights reserved. + * + * Permission to use, copy, modify and distribute this software is hereby + * granted provided that (1) source code retains these copyright, permission, + * and disclaimer notices, and (2) redistributions including binaries + * reproduce the notices in supporting documentation, and (3) all advertising + * materials mentioning features or use of this software display the following + * acknowledgement: ``This product includes software developed by the + * Computer Systems Laboratory at the University of Utah.'' + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + * Author: Bryan Ford, University of Utah CSL + */ +/* This is a client presentation file. */ + +#define KERNEL_USER 1 + +#include <device/device_reply.defs> diff --git a/device/device_types_kernel.h b/device/device_types_kernel.h new file mode 100644 index 0000000..e17055c --- /dev/null +++ b/device/device_types_kernel.h @@ -0,0 +1,43 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 8/89 + */ + +#ifndef _DEVICE_DEVICE_TYPES_KERNEL_H_ +#define _DEVICE_DEVICE_TYPES_KERNEL_H_ + +/* + * Kernel-only type definitions for device server. + */ + +#include <mach/port.h> +#include <device/dev_hdr.h> + +extern ipc_port_t convert_device_to_port(device_t); + +#endif /* _DEVICE_DEVICE_TYPES_KERNEL_H_ */ diff --git a/device/ds_routines.c b/device/ds_routines.c new file mode 100644 index 0000000..d97d229 --- /dev/null +++ b/device/ds_routines.c @@ -0,0 +1,1901 @@ +/* + * Mach Operating System + * Copyright (c) 1993,1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 3/89 + */ + +/* + * Mach device server routines (i386at version). + * + * Copyright (c) 1996 The University of Utah and + * the Computer Systems Laboratory at the University of Utah (CSL). + * All rights reserved. + * + * Permission to use, copy, modify and distribute this software is hereby + * granted provided that (1) source code retains these copyright, permission, + * and disclaimer notices, and (2) redistributions including binaries + * reproduce the notices in supporting documentation, and (3) all advertising + * materials mentioning features or use of this software display the following + * acknowledgement: ``This product includes software developed by the + * Computer Systems Laboratory at the University of Utah.'' + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + * Author: Shantanu Goel, University of Utah CSL + */ + +#include <kern/printf.h> +#include <string.h> + +#include <mach/boolean.h> +#include <mach/kern_return.h> +#include <mach/mig_errors.h> +#include <mach/port.h> +#include <mach/vm_param.h> +#include <mach/notify.h> +#include <machine/locore.h> +#include <machine/machspl.h> /* spl definitions */ + +#include <ipc/ipc_port.h> +#include <ipc/ipc_space.h> + +#include <kern/ast.h> +#include <kern/counters.h> +#include <kern/debug.h> +#include <kern/printf.h> +#include <kern/queue.h> +#include <kern/slab.h> +#include <kern/thread.h> +#include <kern/task.h> +#include <kern/sched_prim.h> + +#include <vm/memory_object.h> +#include <vm/vm_map.h> +#include <vm/vm_kern.h> +#include <vm/vm_user.h> + +#include <device/device_types.h> +#include <device/device.server.h> +#include <device/dev_hdr.h> +#include <device/conf.h> +#include <device/io_req.h> +#include <device/ds_routines.h> +#include <device/net_status.h> +#include <device/device_port.h> +#include <device/device_reply.user.h> +#include <device/device_emul.h> +#include <device/intr.h> + +#include <machine/machspl.h> + +#ifdef LINUX_DEV +extern struct device_emulation_ops linux_block_emulation_ops; +#ifdef CONFIG_INET +extern struct device_emulation_ops linux_net_emulation_ops; +extern void free_skbuffs (void); +#ifdef CONFIG_PCMCIA +extern struct device_emulation_ops linux_pcmcia_emulation_ops; +#endif /* CONFIG_PCMCIA */ +#endif /* CONFIG_INET */ +#endif /* LINUX_DEV */ +#ifdef MACH_HYP +extern struct device_emulation_ops hyp_block_emulation_ops; +extern struct device_emulation_ops hyp_net_emulation_ops; +#endif /* MACH_HYP */ +extern struct device_emulation_ops mach_device_emulation_ops; + +/* List of emulations. */ +static struct device_emulation_ops *emulation_list[] = +{ +#ifdef LINUX_DEV + &linux_block_emulation_ops, +#ifdef CONFIG_INET + &linux_net_emulation_ops, +#ifdef CONFIG_PCMCIA + &linux_pcmcia_emulation_ops, +#endif /* CONFIG_PCMCIA */ +#endif /* CONFIG_INET */ +#endif /* LINUX_DEV */ +#ifdef MACH_HYP + &hyp_block_emulation_ops, + &hyp_net_emulation_ops, +#endif /* MACH_HYP */ + &mach_device_emulation_ops, +}; + +static struct vm_map device_io_map_store; +vm_map_t device_io_map = &device_io_map_store; +struct kmem_cache io_inband_cache; + +#define NUM_EMULATION (sizeof (emulation_list) / sizeof (emulation_list[0])) + +io_return_t +ds_device_open (ipc_port_t open_port, ipc_port_t reply_port, + mach_msg_type_name_t reply_port_type, dev_mode_t mode, + const_dev_name_t name, device_t *devp) +{ + unsigned i; + io_return_t err; + + /* Open must be called on the master device port. */ + if (open_port != master_device_port) + return D_INVALID_OPERATION; + + /* There must be a reply port. */ + if (! IP_VALID (reply_port)) + { + printf ("ds_* invalid reply port\n"); + SoftDebugger ("ds_* reply_port"); + return MIG_NO_REPLY; + } + + /* Call each emulation's open routine to find the device. */ + for (i = 0; i < NUM_EMULATION; i++) + { + err = (*emulation_list[i]->open) (reply_port, reply_port_type, + mode, name, devp); + if (err != D_NO_SUCH_DEVICE) + break; + } + + return err; +} + +io_return_t +ds_device_open_new (ipc_port_t open_port, ipc_port_t reply_port, + mach_msg_type_name_t reply_port_type, dev_mode_t mode, + const_dev_name_t name, device_t *devp) +{ + return ds_device_open (open_port, reply_port, reply_port_type, mode, name, devp); +} + +io_return_t +ds_device_close (device_t dev) +{ + /* Refuse if device is dead or not completely open. */ + if (dev == DEVICE_NULL) + return D_NO_SUCH_DEVICE; + + return (dev->emul_ops->close + ? (*dev->emul_ops->close) (dev->emul_data) + : D_SUCCESS); +} + +io_return_t +ds_device_write (device_t dev, ipc_port_t reply_port, + mach_msg_type_name_t reply_port_type, dev_mode_t mode, + recnum_t recnum, io_buf_ptr_t data, unsigned int count, + int *bytes_written) +{ + /* Refuse if device is dead or not completely open. */ + if (dev == DEVICE_NULL) + return D_NO_SUCH_DEVICE; + + if (data == 0) + return D_INVALID_SIZE; + + if (! dev->emul_ops->write) + return D_INVALID_OPERATION; + + return (*dev->emul_ops->write) (dev->emul_data, reply_port, + reply_port_type, mode, recnum, + data, count, bytes_written); +} + +io_return_t +ds_device_write_inband (device_t dev, ipc_port_t reply_port, + mach_msg_type_name_t reply_port_type, + dev_mode_t mode, recnum_t recnum, + const io_buf_ptr_inband_t data, unsigned count, + int *bytes_written) +{ + /* Refuse if device is dead or not completely open. */ + if (dev == DEVICE_NULL) + return D_NO_SUCH_DEVICE; + + if (data == 0) + return D_INVALID_SIZE; + + if (! dev->emul_ops->write_inband) + return D_INVALID_OPERATION; + + return (*dev->emul_ops->write_inband) (dev->emul_data, reply_port, + reply_port_type, mode, recnum, + data, count, bytes_written); +} + +io_return_t +ds_device_read (device_t dev, ipc_port_t reply_port, + mach_msg_type_name_t reply_port_type, dev_mode_t mode, + recnum_t recnum, int count, io_buf_ptr_t *data, + unsigned *bytes_read) +{ + /* Refuse if device is dead or not completely open. */ + if (dev == DEVICE_NULL) + return D_NO_SUCH_DEVICE; + + if (! dev->emul_ops->read) + return D_INVALID_OPERATION; + + return (*dev->emul_ops->read) (dev->emul_data, reply_port, + reply_port_type, mode, recnum, + count, data, bytes_read); +} + +io_return_t +ds_device_read_inband (device_t dev, ipc_port_t reply_port, + mach_msg_type_name_t reply_port_type, dev_mode_t mode, + recnum_t recnum, int count, io_buf_ptr_inband_t data, + unsigned *bytes_read) +{ + /* Refuse if device is dead or not completely open. */ + if (dev == DEVICE_NULL) + return D_NO_SUCH_DEVICE; + + if (! dev->emul_ops->read_inband) + return D_INVALID_OPERATION; + + return (*dev->emul_ops->read_inband) (dev->emul_data, reply_port, + reply_port_type, mode, recnum, + count, data, bytes_read); +} + +io_return_t +ds_device_set_status (device_t dev, dev_flavor_t flavor, + dev_status_t status, mach_msg_type_number_t status_count) +{ + /* Refuse if device is dead or not completely open. */ + if (dev == DEVICE_NULL) + return D_NO_SUCH_DEVICE; + + if (! dev->emul_ops->set_status) + return D_INVALID_OPERATION; + + return (*dev->emul_ops->set_status) (dev->emul_data, flavor, status, + status_count); +} + +io_return_t +ds_device_get_status (device_t dev, dev_flavor_t flavor, dev_status_t status, + mach_msg_type_number_t *status_count) +{ + /* Refuse if device is dead or not completely open. */ + if (dev == DEVICE_NULL) + return D_NO_SUCH_DEVICE; + + if (! dev->emul_ops->get_status) + return D_INVALID_OPERATION; + + return (*dev->emul_ops->get_status) (dev->emul_data, flavor, status, + status_count); +} + +io_return_t +ds_device_set_filter (device_t dev, ipc_port_t receive_port, int priority, + filter_t *filter, unsigned filter_count) +{ + /* Refuse if device is dead or not completely open. */ + if (dev == DEVICE_NULL) + return D_NO_SUCH_DEVICE; + + if (! dev->emul_ops->set_filter) + return D_INVALID_OPERATION; + + return (*dev->emul_ops->set_filter) (dev->emul_data, receive_port, + priority, filter, filter_count); +} + +io_return_t +ds_device_map (device_t dev, vm_prot_t prot, vm_offset_t offset, + vm_size_t size, ipc_port_t *pager, boolean_t unmap) +{ + /* Refuse if device is dead or not completely open. */ + if (dev == DEVICE_NULL) + return D_NO_SUCH_DEVICE; + + if (! dev->emul_ops->map) + return D_INVALID_OPERATION; + + return (*dev->emul_ops->map) (dev->emul_data, prot, + offset, size, pager, unmap); +} + +/* TODO: missing deregister support */ +io_return_t +ds_device_intr_register (device_t dev, int id, + int flags, ipc_port_t receive_port) +{ +#if defined(MACH_XEN) + return D_INVALID_OPERATION; +#else /* MACH_XEN */ + kern_return_t err; + mach_device_t mdev; + + /* Refuse if device is dead or not completely open. */ + if (dev == DEVICE_NULL) + return D_NO_SUCH_DEVICE; + + mdev = dev->emul_data; + + /* No flag is defined for now */ + if (flags != 0) + return D_INVALID_OPERATION; + + /* Must be called on the irq device only */ + if (! name_equal(mdev->dev_ops->d_name, 3, "irq")) + return D_INVALID_OPERATION; + + user_intr_t *e = insert_intr_entry (&irqtab, id, receive_port); + if (!e) + return D_NO_MEMORY; + + // TODO detect when the port get destroyed because the driver crashes and + // restart, to replace it when the same device driver calls it again. + err = install_user_intr_handler (&irqtab, id, flags, e); + if (err == D_SUCCESS) + { + /* If the port is installed successfully, increase its reference by 1. + * Thus, the port won't be destroyed after its task is terminated. */ + ip_reference (receive_port); + } + return err; +#endif /* MACH_XEN */ +} + +kern_return_t +ds_device_intr_ack (device_t dev, ipc_port_t receive_port) +{ +#if defined(MACH_XEN) + return D_INVALID_OPERATION; +#else /* MACH_XEN */ + mach_device_t mdev; + kern_return_t ret; + + /* Refuse if device is dead or not completely open. */ + if (dev == DEVICE_NULL) + return D_NO_SUCH_DEVICE; + + mdev = dev->emul_data; + + /* Must be called on the irq device only */ + if (! name_equal(mdev->dev_ops->d_name, 3, "irq")) + return D_INVALID_OPERATION; + + ret = irq_acknowledge(receive_port); + + if (ret == D_SUCCESS) + ipc_port_release_send(receive_port); + + return ret; +#endif /* MACH_XEN */ +} + +boolean_t +ds_notify (mach_msg_header_t *msg) +{ + if (msg->msgh_id == MACH_NOTIFY_NO_SENDERS) + { + device_t dev; + mach_no_senders_notification_t *ns; + + ns = (mach_no_senders_notification_t *) msg; + dev = dev_port_lookup((ipc_port_t) ns->not_header.msgh_remote_port); + assert(dev); + if (dev->emul_ops->no_senders) + (*dev->emul_ops->no_senders) (ns); + return TRUE; + } + + printf ("ds_notify: strange notification %d\n", msg->msgh_id); + return FALSE; +} + +io_return_t +ds_device_write_trap (device_t dev, dev_mode_t mode, + rpc_recnum_t recnum, rpc_vm_offset_t data, rpc_vm_size_t count) +{ + /* Refuse if device is dead or not completely open. */ + if (dev == DEVICE_NULL) + return D_NO_SUCH_DEVICE; + + if (! dev->emul_ops->write_trap) + return D_INVALID_OPERATION; + + return (*dev->emul_ops->write_trap) (dev->emul_data, + mode, recnum, data, count); +} + +io_return_t +ds_device_writev_trap (device_t dev, dev_mode_t mode, + rpc_recnum_t recnum, rpc_io_buf_vec_t *iovec, rpc_vm_size_t count) +{ + /* Refuse if device is dead or not completely open. */ + if (dev == DEVICE_NULL) + return D_NO_SUCH_DEVICE; + + if (! dev->emul_ops->writev_trap) + return D_INVALID_OPERATION; + + return (*dev->emul_ops->writev_trap) (dev->emul_data, + mode, recnum, iovec, count); +} + +void +device_reference (device_t dev) +{ + /* Refuse if device is dead or not completely open. */ + if (dev == DEVICE_NULL) + return; + + if (dev->emul_ops->reference) + (*dev->emul_ops->reference) (dev->emul_data); +} + +void +device_deallocate (device_t dev) +{ + /* Refuse if device is dead or not completely open. */ + if (dev == DEVICE_NULL) + return; + + if (dev->emul_ops->dealloc) + (*dev->emul_ops->dealloc) (dev->emul_data); +} + +/* + * What follows is the interface for the native Mach devices. + */ + +static ipc_port_t +mach_convert_device_to_port (mach_device_t device) +{ + ipc_port_t port; + + if (! device) + return IP_NULL; + + device_lock(device); + + if (device->state == DEV_STATE_OPEN) + port = ipc_port_make_send(device->port); + else + port = IP_NULL; + + device_unlock(device); + + mach_device_deallocate(device); + + return port; +} + +static io_return_t +device_open(const ipc_port_t reply_port, + mach_msg_type_name_t reply_port_type, + dev_mode_t mode, + const char * name, + device_t *device_p) +{ + mach_device_t device; + kern_return_t result; + io_req_t ior; + ipc_port_t notify; + + /* + * Find the device. + */ + device = device_lookup(name); + if (device == MACH_DEVICE_NULL) + return (D_NO_SUCH_DEVICE); + + /* + * If the device is being opened or closed, + * wait for that operation to finish. + */ + device_lock(device); + while (device->state == DEV_STATE_OPENING || + device->state == DEV_STATE_CLOSING) { + device->io_wait = TRUE; + thread_sleep((event_t)device, simple_lock_addr(device->lock), TRUE); + device_lock(device); + } + + /* + * If the device is already open, increment the open count + * and return. + */ + if (device->state == DEV_STATE_OPEN) { + + if (device->flag & D_EXCL_OPEN) { + /* + * Cannot open a second time. + */ + device_unlock(device); + mach_device_deallocate(device); + return (D_ALREADY_OPEN); + } + + device->open_count++; + device_unlock(device); + *device_p = &device->dev; + return (D_SUCCESS); + /* + * Return deallocates device reference while acquiring + * port. + */ + } + + /* + * Allocate the device port and register the device before + * opening it. + */ + device->state = DEV_STATE_OPENING; + device_unlock(device); + + /* + * Allocate port, keeping a reference for it. + */ + device->port = ipc_port_alloc_kernel(); + if (device->port == IP_NULL) { + device_lock(device); + device->state = DEV_STATE_INIT; + device->port = IP_NULL; + if (device->io_wait) { + device->io_wait = FALSE; + thread_wakeup((event_t)device); + } + device_unlock(device); + mach_device_deallocate(device); + return (KERN_RESOURCE_SHORTAGE); + } + + dev_port_enter(device); + + /* + * Request no-senders notifications on device port. + */ + notify = ipc_port_make_sonce(device->port); + ip_lock(device->port); + ipc_port_nsrequest(device->port, 1, notify, ¬ify); + assert(notify == IP_NULL); + + /* + * Open the device. + */ + io_req_alloc(ior, 0); + + ior->io_device = device; + ior->io_unit = device->dev_number; + ior->io_op = IO_OPEN | IO_CALL; + ior->io_mode = mode; + ior->io_error = 0; + ior->io_done = ds_open_done; + ior->io_reply_port = reply_port; + ior->io_reply_port_type = reply_port_type; + + result = (*device->dev_ops->d_open)(device->dev_number, (int)mode, ior); + if (result == D_IO_QUEUED) + return (MIG_NO_REPLY); + + /* + * Return result via ds_open_done. + */ + ior->io_error = result; + (void) ds_open_done(ior); + + io_req_free(ior); + + return (MIG_NO_REPLY); /* reply already sent */ +} + +boolean_t +ds_open_done(const io_req_t ior) +{ + kern_return_t result; + mach_device_t device; + + device = ior->io_device; + result = ior->io_error; + + if (result != D_SUCCESS) { + /* + * Open failed. Deallocate port and device. + */ + dev_port_remove(device); + ipc_port_dealloc_kernel(device->port); + device->port = IP_NULL; + + device_lock(device); + device->state = DEV_STATE_INIT; + if (device->io_wait) { + device->io_wait = FALSE; + thread_wakeup((event_t)device); + } + device_unlock(device); + + mach_device_deallocate(device); + device = MACH_DEVICE_NULL; + } + else { + /* + * Open succeeded. + */ + device_lock(device); + device->state = DEV_STATE_OPEN; + device->open_count = 1; + if (device->io_wait) { + device->io_wait = FALSE; + thread_wakeup((event_t)device); + } + device_unlock(device); + + /* donate device reference to get port */ + } + /* + * Must explicitly convert device to port, since + * device_reply interface is built as 'user' side + * (thus cannot get translation). + */ + if (IP_VALID(ior->io_reply_port)) { + (void) ds_device_open_reply(ior->io_reply_port, + ior->io_reply_port_type, + result, + mach_convert_device_to_port(device)); + } else if (device) + mach_device_deallocate(device); + + return (TRUE); +} + +static io_return_t +device_close(void *dev) +{ + mach_device_t device = dev; + + device_lock(device); + + /* + * If device will remain open, do nothing. + */ + if (--device->open_count > 0) { + device_unlock(device); + return (D_SUCCESS); + } + + /* + * If device is being closed, do nothing. + */ + if (device->state == DEV_STATE_CLOSING) { + device_unlock(device); + return (D_SUCCESS); + } + + /* + * Mark device as closing, to prevent new IO. + * Outstanding IO will still be in progress. + */ + device->state = DEV_STATE_CLOSING; + device_unlock(device); + + /* + * ? wait for IO to end ? + * only if device wants to + */ + + /* + * Remove the device-port association. + */ + dev_port_remove(device); + ipc_port_dealloc_kernel(device->port); + + /* + * Close the device + */ + (*device->dev_ops->d_close)(device->dev_number, 0); + + /* + * Finally mark it closed. If someone else is trying + * to open it, the open can now proceed. + */ + device_lock(device); + device->state = DEV_STATE_INIT; + if (device->io_wait) { + device->io_wait = FALSE; + thread_wakeup((event_t)device); + } + device_unlock(device); + + return (D_SUCCESS); +} + +/* + * Write to a device. + */ +static io_return_t +device_write(void *dev, + const ipc_port_t reply_port, + mach_msg_type_name_t reply_port_type, + dev_mode_t mode, + recnum_t recnum, + const io_buf_ptr_t data, + unsigned int data_count, + int *bytes_written) +{ + mach_device_t device = dev; + io_req_t ior; + io_return_t result; + + if (device->state != DEV_STATE_OPEN) + return (D_NO_SUCH_DEVICE); + + /* + * XXX Need logic to reject ridiculously big requests. + */ + + /* XXX note that a CLOSE may proceed at any point */ + + /* + * Package the write request for the device driver + */ + io_req_alloc(ior, data_count); + + ior->io_device = device; + ior->io_unit = device->dev_number; + ior->io_op = IO_WRITE | IO_CALL; + ior->io_mode = mode; + ior->io_recnum = recnum; + ior->io_data = data; + ior->io_count = data_count; + ior->io_total = data_count; + ior->io_alloc_size = 0; + ior->io_residual = 0; + ior->io_error = 0; + ior->io_done = ds_write_done; + ior->io_reply_port = reply_port; + ior->io_reply_port_type = reply_port_type; + ior->io_copy = VM_MAP_COPY_NULL; + + /* + * The ior keeps an extra reference for the device. + */ + mach_device_reference(device); + + /* + * And do the write ... + * + * device_write_dealoc returns false if there's more + * to do; it has updated the ior appropriately and expects + * its caller to reinvoke it on the device. + */ + + do { + + result = (*device->dev_ops->d_write)(device->dev_number, ior); + + /* + * If the IO was queued, delay reply until it is finished. + */ + if (result == D_IO_QUEUED) + return (MIG_NO_REPLY); + + /* + * Discard the local mapping of the data. + */ + + } while (!device_write_dealloc(ior)); + + /* + * Return the number of bytes actually written. + */ + *bytes_written = ior->io_total - ior->io_residual; + + /* + * Remove the extra reference. + */ + mach_device_deallocate(device); + + io_req_free(ior); + return (result); +} + +/* + * Write to a device, but memory is in message. + */ +static io_return_t +device_write_inband(void *dev, + const ipc_port_t reply_port, + mach_msg_type_name_t reply_port_type, + dev_mode_t mode, + recnum_t recnum, + const io_buf_ptr_inband_t data, + unsigned int data_count, + int *bytes_written) +{ + mach_device_t device = dev; + io_req_t ior; + io_return_t result; + + if (device->state != DEV_STATE_OPEN) + return (D_NO_SUCH_DEVICE); + + /* XXX note that a CLOSE may proceed at any point */ + + /* + * Package the write request for the device driver. + */ + io_req_alloc(ior, 0); + + ior->io_device = device; + ior->io_unit = device->dev_number; + ior->io_op = IO_WRITE | IO_CALL | IO_INBAND; + ior->io_mode = mode; + ior->io_recnum = recnum; + ior->io_data = (io_buf_ptr_t)data; + ior->io_count = data_count; + ior->io_total = data_count; + ior->io_alloc_size = 0; + ior->io_residual = 0; + ior->io_error = 0; + ior->io_done = ds_write_done; + ior->io_reply_port = reply_port; + ior->io_reply_port_type = reply_port_type; + + /* + * The ior keeps an extra reference for the device. + */ + mach_device_reference(device); + + /* + * And do the write. + */ + result = (*device->dev_ops->d_write)(device->dev_number, ior); + + /* + * If the IO was queued, delay reply until it is finished. + */ + if (result == D_IO_QUEUED) + return (MIG_NO_REPLY); + + /* + * Return the number of bytes actually written. + */ + *bytes_written = ior->io_total - ior->io_residual; + + /* + * Remove the extra reference. + */ + mach_device_deallocate(device); + + io_req_free(ior); + return (result); +} + +/* + * Wire down incoming memory to give to device. + */ +kern_return_t +device_write_get( + io_req_t ior, + boolean_t *wait) +{ + vm_map_copy_t io_copy; + vm_offset_t new_addr; + kern_return_t result; + int bsize; + vm_size_t min_size; + + /* + * By default, caller does not have to wait. + */ + *wait = FALSE; + + /* + * Nothing to do if no data. + */ + if (ior->io_count == 0) + return (KERN_SUCCESS); + + /* + * Loaned iors already have valid data. + */ + if (ior->io_op & IO_LOANED) + return (KERN_SUCCESS); + + /* + * Inband case. + */ + if (ior->io_op & IO_INBAND) { + assert(ior->io_count <= sizeof (io_buf_ptr_inband_t)); + new_addr = kmem_cache_alloc(&io_inband_cache); + memcpy((void*)new_addr, ior->io_data, ior->io_count); + ior->io_data = (io_buf_ptr_t)new_addr; + ior->io_alloc_size = sizeof (io_buf_ptr_inband_t); + + return (KERN_SUCCESS); + } + + /* + * Figure out how much data to move this time. If the device + * won't return a block size, then we have to do the whole + * request in one shot (ditto if this is a block fragment), + * otherwise, move at least one block's worth. + */ + result = (*ior->io_device->dev_ops->d_dev_info)( + ior->io_device->dev_number, + D_INFO_BLOCK_SIZE, + &bsize); + + if (result != KERN_SUCCESS || ior->io_count < (vm_size_t) bsize) + min_size = (vm_size_t) ior->io_count; + else + min_size = (vm_size_t) bsize; + + /* + * Map the pages from this page list into memory. + * io_data records location of data. + * io_alloc_size is the vm size of the region to deallocate. + */ + io_copy = (vm_map_copy_t) ior->io_data; + result = kmem_io_map_copyout(device_io_map, + (vm_offset_t*)&ior->io_data, &new_addr, + &ior->io_alloc_size, io_copy, min_size); + if (result != KERN_SUCCESS) + return (result); + + if ((ior->io_data + ior->io_count) > + (((char *)new_addr) + ior->io_alloc_size)) { + + /* + * Operation has to be split. Reset io_count for how + * much we can do this time. + */ + assert(vm_map_copy_has_cont(io_copy)); + assert(ior->io_count == io_copy->size); + ior->io_count = ior->io_alloc_size - + (ior->io_data - ((char *)new_addr)); + + /* + * Caller must wait synchronously. + */ + ior->io_op &= ~IO_CALL; + *wait = TRUE; + } + + ior->io_copy = io_copy; /* vm_map_copy to discard */ + return (KERN_SUCCESS); +} + +/* + * Clean up memory allocated for IO. + */ +boolean_t +device_write_dealloc(io_req_t ior) +{ + vm_map_copy_t new_copy = VM_MAP_COPY_NULL; + vm_map_copy_t io_copy; + kern_return_t result; + vm_offset_t size_to_do; + int bsize; + + if (ior->io_alloc_size == 0) + return (TRUE); + + /* + * Inband case. + */ + if (ior->io_op & IO_INBAND) { + kmem_cache_free(&io_inband_cache, (vm_offset_t)ior->io_data); + + return (TRUE); + } + + if ((io_copy = ior->io_copy) == VM_MAP_COPY_NULL) + return (TRUE); + + /* + * To prevent a possible deadlock with the default pager, + * we have to release space in the device_io_map before + * we allocate any memory. (Which vm_map_copy_invoke_cont + * might do.) See the discussion in mach_device_init. + */ + + kmem_io_map_deallocate(device_io_map, + trunc_page(ior->io_data), + ior->io_alloc_size); + + if (vm_map_copy_has_cont(io_copy)) { + + /* + * Remember how much is left, then + * invoke or abort the continuation. + */ + size_to_do = io_copy->size - ior->io_count; + if (ior->io_error == 0) { + vm_map_copy_invoke_cont(io_copy, &new_copy, &result); + } + else { + vm_map_copy_abort_cont(io_copy); + result = KERN_FAILURE; + } + + if (result == KERN_SUCCESS && new_copy != VM_MAP_COPY_NULL) { + int res; + + /* + * We have a new continuation, reset the ior to + * represent the remainder of the request. Must + * adjust the recnum because drivers assume + * that the residual is zero. + */ + ior->io_op &= ~IO_DONE; + ior->io_op |= IO_CALL; + + res = (*ior->io_device->dev_ops->d_dev_info)( + ior->io_device->dev_number, + D_INFO_BLOCK_SIZE, + &bsize); + + if (res != D_SUCCESS) + panic("device_write_dealloc: No block size"); + + ior->io_recnum += ior->io_count/bsize; + ior->io_count = new_copy->size; + } + else { + + /* + * No continuation. Add amount we didn't get + * to into residual. + */ + ior->io_residual += size_to_do; + } + } + + /* + * Clean up the state for the IO that just completed. + */ + vm_map_copy_discard(ior->io_copy); + ior->io_copy = VM_MAP_COPY_NULL; + ior->io_data = (char *) new_copy; + + /* + * Return FALSE if there's more IO to do. + */ + + return(new_copy == VM_MAP_COPY_NULL); +} + +/* + * Send write completion message to client, and discard the data. + */ +boolean_t +ds_write_done(const io_req_t ior) +{ + /* + * device_write_dealloc discards the data that has been + * written, but may decide that there is more to write. + */ + while (!device_write_dealloc(ior)) { + io_return_t result; + mach_device_t device; + + /* + * More IO to do -- invoke it. + */ + device = ior->io_device; + result = (*device->dev_ops->d_write)(device->dev_number, ior); + + /* + * If the IO was queued, return FALSE -- not done yet. + */ + if (result == D_IO_QUEUED) + return (FALSE); + } + + /* + * Now the write is really complete. Send reply. + */ + + if (IP_VALID(ior->io_reply_port)) { + (void) (*((ior->io_op & IO_INBAND) ? + ds_device_write_reply_inband : + ds_device_write_reply))(ior->io_reply_port, + ior->io_reply_port_type, + ior->io_error, + (int) (ior->io_total - + ior->io_residual)); + } + mach_device_deallocate(ior->io_device); + + return (TRUE); +} + +/* + * Read from a device. + */ +static io_return_t +device_read(void *dev, + const ipc_port_t reply_port, + mach_msg_type_name_t reply_port_type, + dev_mode_t mode, + recnum_t recnum, + int bytes_wanted, + io_buf_ptr_t *data, + unsigned int *data_count) +{ + mach_device_t device = dev; + io_req_t ior; + io_return_t result; + + if (device->state != DEV_STATE_OPEN) + return (D_NO_SUCH_DEVICE); + + /* XXX note that a CLOSE may proceed at any point */ + + /* + * There must be a reply port. + */ + if (!IP_VALID(reply_port)) { + printf("ds_* invalid reply port\n"); + SoftDebugger("ds_* reply_port"); + return (MIG_NO_REPLY); /* no sense in doing anything */ + } + + /* + * Package the read request for the device driver + */ + io_req_alloc(ior, 0); + + ior->io_device = device; + ior->io_unit = device->dev_number; + ior->io_op = IO_READ | IO_CALL; + ior->io_mode = mode; + ior->io_recnum = recnum; + ior->io_data = 0; /* driver must allocate data */ + ior->io_count = bytes_wanted; + ior->io_alloc_size = 0; /* no data allocated yet */ + ior->io_residual = 0; + ior->io_error = 0; + ior->io_done = ds_read_done; + ior->io_reply_port = reply_port; + ior->io_reply_port_type = reply_port_type; + + /* + * The ior keeps an extra reference for the device. + */ + mach_device_reference(device); + + /* + * And do the read. + */ + result = (*device->dev_ops->d_read)(device->dev_number, ior); + + /* + * If the IO was queued, delay reply until it is finished. + */ + if (result == D_IO_QUEUED) + return (MIG_NO_REPLY); + + /* + * Return result via ds_read_done. + */ + ior->io_error = result; + (void) ds_read_done(ior); + io_req_free(ior); + + return (MIG_NO_REPLY); /* reply has already been sent. */ +} + +/* + * Read from a device, but return the data 'inband.' + */ +static io_return_t +device_read_inband(void *dev, + const ipc_port_t reply_port, + mach_msg_type_name_t reply_port_type, + dev_mode_t mode, + recnum_t recnum, + int bytes_wanted, + char *data, + unsigned int *data_count) +{ + mach_device_t device = dev; + io_req_t ior; + io_return_t result; + + if (device->state != DEV_STATE_OPEN) + return (D_NO_SUCH_DEVICE); + + /* XXX note that a CLOSE may proceed at any point */ + + /* + * There must be a reply port. + */ + if (!IP_VALID(reply_port)) { + printf("ds_* invalid reply port\n"); + SoftDebugger("ds_* reply_port"); + return (MIG_NO_REPLY); /* no sense in doing anything */ + } + + /* + * Package the read for the device driver + */ + io_req_alloc(ior, 0); + + ior->io_device = device; + ior->io_unit = device->dev_number; + ior->io_op = IO_READ | IO_CALL | IO_INBAND; + ior->io_mode = mode; + ior->io_recnum = recnum; + ior->io_data = 0; /* driver must allocate data */ + ior->io_count = + ((bytes_wanted < sizeof(io_buf_ptr_inband_t)) ? + bytes_wanted : sizeof(io_buf_ptr_inband_t)); + ior->io_alloc_size = 0; /* no data allocated yet */ + ior->io_residual = 0; + ior->io_error = 0; + ior->io_done = ds_read_done; + ior->io_reply_port = reply_port; + ior->io_reply_port_type = reply_port_type; + + /* + * The ior keeps an extra reference for the device. + */ + mach_device_reference(device); + + /* + * Do the read. + */ + result = (*device->dev_ops->d_read)(device->dev_number, ior); + + /* + * If the io was queued, delay reply until it is finished. + */ + if (result == D_IO_QUEUED) + return (MIG_NO_REPLY); + + /* + * Return result, via ds_read_done. + */ + ior->io_error = result; + (void) ds_read_done(ior); + io_req_free(ior); + + return (MIG_NO_REPLY); /* reply has already been sent. */ +} + +/* + * Allocate wired-down memory for device read. + */ +kern_return_t device_read_alloc( + io_req_t ior, + vm_size_t size) +{ + vm_offset_t addr; + kern_return_t kr; + + /* + * Nothing to do if no data. + */ + if (ior->io_count == 0) + return (KERN_SUCCESS); + + if (ior->io_op & IO_INBAND) { + ior->io_data = (io_buf_ptr_t) kmem_cache_alloc(&io_inband_cache); + ior->io_alloc_size = sizeof(io_buf_ptr_inband_t); + } else { + size = round_page(size); + kr = kmem_alloc(kernel_map, &addr, size); + if (kr != KERN_SUCCESS) + return (kr); + + ior->io_data = (io_buf_ptr_t) addr; + ior->io_alloc_size = size; + } + + return (KERN_SUCCESS); +} + +boolean_t ds_read_done(const io_req_t ior) +{ + vm_offset_t start_data, end_data; + vm_offset_t start_sent, end_sent; + vm_size_t size_read; + + if (ior->io_error) + size_read = 0; + else + size_read = ior->io_count - ior->io_residual; + + start_data = (vm_offset_t)ior->io_data; + end_data = start_data + size_read; + + start_sent = (ior->io_op & IO_INBAND) ? start_data : + trunc_page(start_data); + end_sent = (ior->io_op & IO_INBAND) ? + start_data + ior->io_alloc_size : round_page(end_data); + + /* + * Zero memory that the device did not fill. + */ + if (start_sent < start_data) + memset((void *)start_sent, 0, start_data - start_sent); + if (end_sent > end_data) + memset((void *)end_data, 0, end_sent - end_data); + + + /* + * Touch the data being returned, to mark it dirty. + * If the pages were filled by DMA, the pmap module + * may think that they are clean. + */ + { + vm_offset_t touch; + int c; + + for (touch = start_sent; touch < end_sent; touch += PAGE_SIZE) { + c = *(volatile char *)touch; + *(volatile char *)touch = c; + } + } + + /* + * Send the data to the reply port - this + * unwires and deallocates it. + */ + if (ior->io_op & IO_INBAND) { + (void)ds_device_read_reply_inband(ior->io_reply_port, + ior->io_reply_port_type, + ior->io_error, + (char *) start_data, + size_read); + } else { + vm_map_copy_t copy; + kern_return_t kr; + + kr = vm_map_copyin_page_list(kernel_map, start_data, + size_read, TRUE, TRUE, + ©, FALSE); + + if (kr != KERN_SUCCESS) + panic("read_done: vm_map_copyin_page_list failed"); + + (void)ds_device_read_reply(ior->io_reply_port, + ior->io_reply_port_type, + ior->io_error, + (char *) copy, + size_read); + } + + /* + * Free any memory that was allocated but not sent. + */ + if (ior->io_count != 0) { + if (ior->io_op & IO_INBAND) { + if (ior->io_alloc_size > 0) + kmem_cache_free(&io_inband_cache, (vm_offset_t)ior->io_data); + } else { + vm_offset_t end_alloc; + + end_alloc = start_sent + round_page(ior->io_alloc_size); + if (end_alloc > end_sent) + (void) vm_deallocate(kernel_map, + end_sent, + end_alloc - end_sent); + } + } + + mach_device_deallocate(ior->io_device); + + return (TRUE); +} + +static io_return_t +device_set_status( + void *dev, + dev_flavor_t flavor, + dev_status_t status, + mach_msg_type_number_t status_count) +{ + mach_device_t device = dev; + if (device->state != DEV_STATE_OPEN) + return (D_NO_SUCH_DEVICE); + + /* XXX note that a CLOSE may proceed at any point */ + + return ((*device->dev_ops->d_setstat)(device->dev_number, + flavor, + status, + status_count)); +} + +static io_return_t +mach_device_get_status( + void *dev, + dev_flavor_t flavor, + dev_status_t status, /* pointer to OUT array */ + mach_msg_type_number_t *status_count) /* out */ +{ + mach_device_t device = dev; + if (device->state != DEV_STATE_OPEN) + return (D_NO_SUCH_DEVICE); + + /* XXX note that a CLOSE may proceed at any point */ + + return ((*device->dev_ops->d_getstat)(device->dev_number, + flavor, + status, + status_count)); +} + +static io_return_t +device_set_filter(void *dev, + const ipc_port_t receive_port, + int priority, + filter_t filter[], + unsigned int filter_count) +{ + mach_device_t device = dev; + if (device->state != DEV_STATE_OPEN) + return (D_NO_SUCH_DEVICE); + + /* XXX note that a CLOSE may proceed at any point */ + + /* + * Request is absurd if no receive port is specified. + */ + if (!IP_VALID(receive_port)) + return (D_INVALID_OPERATION); + + return ((*device->dev_ops->d_async_in)(device->dev_number, + receive_port, + priority, + filter, + filter_count)); +} + +static io_return_t +device_map( + void *dev, + vm_prot_t protection, + vm_offset_t offset, + vm_size_t size, + ipc_port_t *pager, /* out */ + boolean_t unmap) /* ? */ +{ + mach_device_t device = dev; + if (protection & ~VM_PROT_ALL) + return (KERN_INVALID_ARGUMENT); + + if (device->state != DEV_STATE_OPEN) + return (D_NO_SUCH_DEVICE); + + /* XXX note that a CLOSE may proceed at any point */ + + return (device_pager_setup(device, protection, offset, size, + (mach_port_t*)pager)); +} + +/* + * Doesn't do anything (yet). + */ +static void +ds_no_senders(mach_no_senders_notification_t *notification) +{ + printf("ds_no_senders called! device_port=0x%zx count=%d\n", + notification->not_header.msgh_remote_port, + notification->not_count); +} + +/* Shall be taken at splio only */ +def_simple_lock_irq_data(static, io_done_list_lock) /* Lock for... */ +queue_head_t io_done_list; + +#define splio splsched /* XXX must block ALL io devices */ + +void iodone(io_req_t ior) +{ + spl_t s; + + /* + * If this ior was loaned to us, return it directly. + */ + if (ior->io_op & IO_LOANED) { + (*ior->io_done)(ior); + return; + } + /* + * If !IO_CALL, some thread is waiting for this. Must lock + * structure to interlock correctly with iowait(). Else can + * toss on queue for io_done thread to call completion. + */ + s = splio(); + if ((ior->io_op & IO_CALL) == 0) { + ior_lock(ior); + ior->io_op |= IO_DONE; + ior->io_op &= ~IO_WANTED; + ior_unlock(ior); + thread_wakeup((event_t)ior); + } else { + ior->io_op |= IO_DONE; + simple_lock_nocheck(&io_done_list_lock.slock); + enqueue_tail(&io_done_list, (queue_entry_t)ior); + thread_wakeup((event_t)&io_done_list); + simple_unlock_nocheck(&io_done_list_lock.slock); + } + splx(s); +} + +static void __attribute__ ((noreturn)) io_done_thread_continue(void) +{ + for (;;) { + spl_t s; + io_req_t ior; + +#if defined (LINUX_DEV) && defined (CONFIG_INET) + free_skbuffs (); +#endif + s = simple_lock_irq(&io_done_list_lock); + while ((ior = (io_req_t)dequeue_head(&io_done_list)) != 0) { + simple_unlock_irq(s, &io_done_list_lock); + + if ((*ior->io_done)(ior)) { + /* + * IO done - free io_req_elt + */ + io_req_free(ior); + } + /* else routine has re-queued it somewhere */ + + s = simple_lock_irq(&io_done_list_lock); + } + + assert_wait(&io_done_list, FALSE); + simple_unlock_irq(s, &io_done_list_lock); + counter(c_io_done_thread_block++); + thread_block(io_done_thread_continue); + } +} + +void io_done_thread(void) +{ + /* + * Set thread privileges and highest priority. + */ + current_thread()->vm_privilege = 1; + stack_privilege(current_thread()); + thread_set_own_priority(0); + + io_done_thread_continue(); + /*NOTREACHED*/ +} + +#define DEVICE_IO_MAP_SIZE (16 * 1024 * 1024) + +static void mach_device_trap_init(void); /* forward */ + +void mach_device_init(void) +{ + vm_offset_t device_io_min, device_io_max; + + queue_init(&io_done_list); + simple_lock_init_irq(&io_done_list_lock); + + kmem_submap(device_io_map, kernel_map, &device_io_min, &device_io_max, + DEVICE_IO_MAP_SIZE); + + /* + * If the kernel receives many device_write requests, the + * device_io_map might run out of space. To prevent + * device_write_get from failing in this case, we enable + * wait_for_space on the map. This causes kmem_io_map_copyout + * to block until there is sufficient space. + * (XXX Large writes may be starved by small writes.) + * + * There is a potential deadlock problem with this solution, + * if a device_write from the default pager has to wait + * for the completion of a device_write which needs to wait + * for memory allocation. Hence, once device_write_get + * allocates space in device_io_map, no blocking memory + * allocations should happen until device_write_dealloc + * frees the space. (XXX A large write might starve + * a small write from the default pager.) + */ + device_io_map->wait_for_space = TRUE; + + kmem_cache_init(&io_inband_cache, "io_buf_ptr_inband", + sizeof(io_buf_ptr_inband_t), 0, NULL, 0); + + mach_device_trap_init(); +} + +void iowait(io_req_t ior) +{ + spl_t s; + + s = splio(); + ior_lock(ior); + while ((ior->io_op&IO_DONE)==0) { + assert_wait((event_t)ior, FALSE); + ior_unlock(ior); + thread_block((void (*)()) 0); + ior_lock(ior); + } + ior_unlock(ior); + splx(s); +} + + +/* + * Device trap support. + */ + +/* + * Memory Management + * + * This currently has a single pool of 2k wired buffers + * since we only handle writes to an ethernet device. + * Should be more general. + */ +#define IOTRAP_REQSIZE 2048 + +struct kmem_cache io_trap_cache; + +/* + * Initialization. Called from mach_device_init(). + */ +static void +mach_device_trap_init(void) +{ + kmem_cache_init(&io_trap_cache, "io_req", IOTRAP_REQSIZE, 0, + NULL, 0); +} + +/* + * Allocate an io_req_t. + * Currently allocates from io_trap_cache. + * + * Could have lists of different size caches. + * Could call a device-specific routine. + */ +static io_req_t +ds_trap_req_alloc(const mach_device_t device, vm_size_t data_size) +{ + return (io_req_t) kmem_cache_alloc(&io_trap_cache); +} + +/* + * Called by iodone to release ior. + */ +static boolean_t +ds_trap_write_done(const io_req_t ior) +{ + mach_device_t dev; + + dev = ior->io_device; + + /* + * Should look at reply port and maybe send a message. + */ + kmem_cache_free(&io_trap_cache, (vm_offset_t) ior); + + /* + * Give up device reference from ds_write_trap. + */ + mach_device_deallocate(dev); + return TRUE; +} + +/* + * Like device_write except that data is in user space. + */ +static io_return_t +device_write_trap (mach_device_t device, dev_mode_t mode, + rpc_recnum_t recnum, rpc_vm_offset_t data, rpc_vm_size_t data_count) +{ + io_req_t ior; + io_return_t result; + + if (device->state != DEV_STATE_OPEN) + return (D_NO_SUCH_DEVICE); + + /* XXX note that a CLOSE may proceed at any point */ + + /* + * Get a buffer to hold the ioreq. + */ + ior = ds_trap_req_alloc(device, data_count); + + /* + * Package the write request for the device driver. + */ + + ior->io_device = device; + ior->io_unit = device->dev_number; + ior->io_op = IO_WRITE | IO_CALL | IO_LOANED; + ior->io_mode = mode; + ior->io_recnum = recnum; + ior->io_data = (io_buf_ptr_t) + (vm_offset_t)ior + sizeof(struct io_req); + ior->io_count = data_count; + ior->io_total = data_count; + ior->io_alloc_size = 0; + ior->io_residual = 0; + ior->io_error = 0; + ior->io_done = ds_trap_write_done; + ior->io_reply_port = IP_NULL; /* XXX */ + ior->io_reply_port_type = 0; /* XXX */ + + /* + * Copy the data from user space. + */ + if (data_count > 0) + copyin((void*)(vm_offset_t)data, ior->io_data, data_count); + + /* + * The ior keeps an extra reference for the device. + */ + mach_device_reference(device); + + /* + * And do the write. + */ + result = (*device->dev_ops->d_write)(device->dev_number, ior); + + /* + * If the IO was queued, delay reply until it is finished. + */ + if (result == D_IO_QUEUED) + return (MIG_NO_REPLY); + + /* + * Remove the extra reference. + */ + mach_device_deallocate(device); + + kmem_cache_free(&io_trap_cache, (vm_offset_t) ior); + return (result); +} + +static io_return_t +device_writev_trap (mach_device_t device, dev_mode_t mode, + rpc_recnum_t recnum, rpc_io_buf_vec_t *iovec, rpc_vm_size_t iocount) +{ + io_req_t ior; + io_return_t result; + io_buf_vec_t stack_iovec[16]; /* XXX */ + vm_size_t data_count; + unsigned i; + + if (device->state != DEV_STATE_OPEN) + return (D_NO_SUCH_DEVICE); + + /* XXX note that a CLOSE may proceed at any point */ + + /* + * Copyin user addresses. + */ + if (iocount > 16) + return KERN_INVALID_VALUE; /* lame */ + + for (data_count = 0, i=0; i<iocount; i++) { + rpc_io_buf_vec_t riov; + if (copyin(iovec + i, &riov, sizeof(riov))) + return KERN_INVALID_ARGUMENT; + stack_iovec[i].data = riov.data; + stack_iovec[i].count = riov.count; + data_count += stack_iovec[i].count; + } + + /* + * Get a buffer to hold the ioreq. + */ + ior = ds_trap_req_alloc(device, data_count); + + /* + * Package the write request for the device driver. + */ + + ior->io_device = device; + ior->io_unit = device->dev_number; + ior->io_op = IO_WRITE | IO_CALL | IO_LOANED; + ior->io_mode = mode; + ior->io_recnum = recnum; + ior->io_data = (io_buf_ptr_t) + (vm_offset_t)ior + sizeof(struct io_req); + ior->io_count = data_count; + ior->io_total = data_count; + ior->io_alloc_size = 0; + ior->io_residual = 0; + ior->io_error = 0; + ior->io_done = ds_trap_write_done; + ior->io_reply_port = IP_NULL; /* XXX */ + ior->io_reply_port_type = 0; /* XXX */ + + /* + * Copy the data from user space. + */ + if (data_count > 0) { + vm_offset_t p; + + p = (vm_offset_t) ior->io_data; + for (i = 0; i < iocount; i++) { + copyin((void *) stack_iovec[i].data, + (void *) p, + stack_iovec[i].count); + p += stack_iovec[i].count; + } + } + + /* + * The ior keeps an extra reference for the device. + */ + mach_device_reference(device); + + /* + * And do the write. + */ + result = (*device->dev_ops->d_write)(device->dev_number, ior); + + /* + * If the IO was queued, delay reply until it is finished. + */ + if (result == D_IO_QUEUED) + return (MIG_NO_REPLY); + + /* + * Remove the extra reference. + */ + mach_device_deallocate(device); + + kmem_cache_free(&io_trap_cache, (vm_offset_t) ior); + return (result); +} + +struct device_emulation_ops mach_device_emulation_ops = +{ + (void*) mach_device_reference, + (void*) mach_device_deallocate, + (void*) mach_convert_device_to_port, + device_open, + device_close, + device_write, + device_write_inband, + device_read, + device_read_inband, + device_set_status, + mach_device_get_status, + device_set_filter, + device_map, + ds_no_senders, + (void*) device_write_trap, + (void*) device_writev_trap +}; diff --git a/device/ds_routines.h b/device/ds_routines.h new file mode 100644 index 0000000..48d85dd --- /dev/null +++ b/device/ds_routines.h @@ -0,0 +1,86 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 8/89 + * + * Device service utility routines. + */ + +#ifndef DS_ROUTINES_H +#define DS_ROUTINES_H + +#include <vm/vm_map.h> +#include <device/device_types.h> +#include <device/io_req.h> +#include <mach/machine/vm_types.h> + +/* + * Map for device IO memory. + */ +extern vm_map_t device_io_map; + +extern queue_head_t io_done_list; + +kern_return_t device_read_alloc(io_req_t, vm_size_t); +kern_return_t device_write_get(io_req_t, boolean_t *); +boolean_t device_write_dealloc(io_req_t); +void device_reference(device_t); + +boolean_t ds_notify(mach_msg_header_t *msg); +boolean_t ds_open_done(io_req_t); +boolean_t ds_read_done(io_req_t); +boolean_t ds_write_done(io_req_t); + +void iowait (io_req_t ior); + +kern_return_t device_pager_setup( + const mach_device_t device, + int prot, + vm_offset_t offset, + vm_size_t size, + mach_port_t *pager); + +extern void mach_device_init(void); +extern void dev_lookup_init(void); +extern void device_pager_init(void); +extern void io_done_thread(void) __attribute__ ((noreturn)); + +io_return_t ds_device_write_trap( + device_t dev, + dev_mode_t mode, + rpc_recnum_t recnum, + rpc_vm_offset_t data, + rpc_vm_size_t count); + +io_return_t ds_device_writev_trap( + device_t dev, + dev_mode_t mode, + rpc_recnum_t recnum, + rpc_io_buf_vec_t *iovec, + rpc_vm_size_t count); + +#endif /* DS_ROUTINES_H */ diff --git a/device/if_ether.h b/device/if_ether.h new file mode 100644 index 0000000..91d4d9a --- /dev/null +++ b/device/if_ether.h @@ -0,0 +1,52 @@ +/* + * Mach Operating System + * Copyright (c) 1991 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Ethernet definitions. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 8/89 + */ + +#ifndef _DEVICE_IF_ETHER_H_ +#define _DEVICE_IF_ETHER_H_ + +#include <sys/types.h> + +/* + * Structure of a 10Mb/s Ethernet header. + */ +struct ether_header { + u_char ether_dhost[6]; + u_char ether_shost[6]; + u_short ether_type; +}; + +#ifdef KERNEL +extern char * ether_sprintf(const u_char *); +#endif /* KERNEL */ + +#endif /*_DEVICE_IF_ETHER_H_*/ diff --git a/device/if_hdr.h b/device/if_hdr.h new file mode 100644 index 0000000..e53983b --- /dev/null +++ b/device/if_hdr.h @@ -0,0 +1,165 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Taken from (bsd)net/if.h. Modified for MACH kernel. + */ +/* + * Copyright (c) 1982, 1986 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. The name of the Laboratory may not be used to endorse or promote + * products derived from this software without specific prior written + * permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)if.h 7.3 (Berkeley) 6/27/88 + */ + +#ifndef _IF_HDR_ +#define _IF_HDR_ + +#include <kern/lock.h> +#include <kern/queue.h> + +/* + * Queue for network output and filter input. + */ +struct ifqueue { + queue_head_t ifq_head; /* queue of io_req_t */ + int ifq_len; /* length of queue */ + int ifq_maxlen; /* maximum length of queue */ + int ifq_drops; /* number of packets dropped + because queue full */ + decl_simple_lock_data(, + ifq_lock) /* lock for queue and counters */ +}; + +/* + * Header for network interface drivers. + */ +struct ifnet { + short if_unit; /* unit number */ + short if_flags; /* up/down, broadcast, etc. */ + short if_timer; /* time until if_watchdog called */ + short if_mtu; /* maximum transmission unit */ + short if_header_size; /* length of header */ + short if_header_format; /* format of hardware header */ + short if_address_size; /* length of hardware address */ + short if_alloc_size; /* size of read buffer to allocate */ + char *if_address; /* pointer to hardware address */ + struct ifqueue if_snd; /* output queue */ + queue_head_t if_rcv_port_list; /* input filter list */ + queue_head_t if_snd_port_list; /* output filter list */ + decl_simple_lock_data(, + if_rcv_port_list_lock) /* lock for input filter list */ + decl_simple_lock_data(, + if_snd_port_list_lock) /* lock for output filter list */ +/* statistics */ + int if_ipackets; /* packets received */ + int if_ierrors; /* input errors */ + int if_opackets; /* packets sent */ + int if_oerrors; /* output errors */ + int if_collisions; /* collisions on csma interfaces */ + int if_rcvdrops; /* packets received but dropped */ +}; + +#define IFF_UP 0x0001 /* interface is up */ +#define IFF_BROADCAST 0x0002 /* interface can broadcast */ +#define IFF_DEBUG 0x0004 /* turn on debugging */ +#define IFF_LOOPBACK 0x0008 /* is a loopback net */ +#define IFF_POINTOPOINT 0x0010 /* point-to-point link */ +#define IFF_RUNNING 0x0040 /* resources allocated */ +#define IFF_NOARP 0x0080 /* no address resolution protocol */ +#define IFF_PROMISC 0x0100 /* receive all packets */ +#define IFF_ALLMULTI 0x0200 /* receive all multicast packets */ +#define IFF_BRIDGE 0x0100 /* support token ring routing field */ +#define IFF_SNAP 0x0200 /* support extended sap header */ + +/* internal flags only: */ +#define IFF_CANTCHANGE (IFF_BROADCAST | IFF_POINTOPOINT | IFF_RUNNING) + +/* + * Output queues (ifp->if_snd) + * have queues of messages stored on ifqueue structures. Entries + * are added to and deleted from these structures by these macros, which + * should be called with ipl raised to splimp(). + * XXX locking XXX + */ + +#define IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen) +#define IF_DROP(ifq) ((ifq)->ifq_drops++) +#define IF_ENQUEUE(ifq, ior) { \ + simple_lock(&(ifq)->ifq_lock); \ + enqueue_tail(&(ifq)->ifq_head, (queue_entry_t)ior); \ + (ifq)->ifq_len++; \ + simple_unlock(&(ifq)->ifq_lock); \ +} +#define IF_PREPEND(ifq, ior) { \ + simple_lock(&(ifq)->ifq_lock); \ + enqueue_head(&(ifq)->ifq_head, (queue_entry_t)ior); \ + (ifq)->ifq_len++; \ + simple_unlock(&(ifq)->ifq_lock); \ +} + +#define IF_DEQUEUE(ifq, ior) { \ + simple_lock(&(ifq)->ifq_lock); \ + if (((ior) = (io_req_t)dequeue_head(&(ifq)->ifq_head)) != 0) \ + (ifq)->ifq_len--; \ + simple_unlock(&(ifq)->ifq_lock); \ +} + +#define IFQ_MAXLEN 50 + +#define IFQ_INIT(ifq) { \ + queue_init(&(ifq)->ifq_head); \ + simple_lock_init(&(ifq)->ifq_lock); \ + (ifq)->ifq_len = 0; \ + (ifq)->ifq_maxlen = IFQ_MAXLEN; \ + (ifq)->ifq_drops = 0; \ +} + +#define IFNET_SLOWHZ 1 /* granularity is 1 second */ + +#endif /* _IF_HDR_ */ diff --git a/device/intr.c b/device/intr.c new file mode 100644 index 0000000..9035c03 --- /dev/null +++ b/device/intr.c @@ -0,0 +1,372 @@ +/* + * Copyright (c) 2010, 2011, 2016, 2019 Free Software Foundation, Inc. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * THE FREE SOFTWARE FOUNDATIONALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. THE FREE SOFTWARE FOUNDATION DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + */ + +#include <kern/assert.h> +#include <device/intr.h> +#include <device/device_types.h> +#include <device/device_port.h> +#include <device/notify.h> +#include <kern/printf.h> +#include <machine/spl.h> +#include <machine/irq.h> +#include <ipc/ipc_space.h> + +#ifndef MACH_XEN + +queue_head_t main_intr_queue; +static boolean_t deliver_intr (int id, ipc_port_t dst_port); + +#ifndef LINUX_DEV +#define SA_SHIRQ 0x04000000 + +struct intr_list { + user_intr_t *user_intr; + unsigned long flags; + struct intr_list *next; +}; +static struct intr_list *user_intr_handlers[NINTR]; +#endif + +static user_intr_t * +search_intr (struct irqdev *dev, ipc_port_t dst_port) +{ + user_intr_t *e; + queue_iterate (dev->intr_queue, e, user_intr_t *, chain) + { + if (e->dst_port == dst_port) + return e; + } + return NULL; +} + + +/* + * Interrupt handling logic: + * + * interrupt.S raises spl (thus IF cleared) + * interrupt.S EOI + * interrupt.S calls the handler + * - for pure in-kernel handlers, they do whatever they want with IF cleared. + * - when a userland handler is registered, queue_intr masks the irq. + * interrupt.S lowers spl with splx_cli, thus IF still cleared + * iret, that also sets IF + * + * later on, (irq_acknowledge), userland acks the IRQ, that unmasks the irq + */ +kern_return_t +irq_acknowledge (ipc_port_t receive_port) +{ + user_intr_t *e; + kern_return_t ret = 0; + + spl_t s = splhigh (); + e = search_intr (&irqtab, receive_port); + + if (!e) + { + printf("didn't find user intr for interrupt !?\n"); + ret = KERN_INVALID_ARGUMENT; + } + else + { + if (!e->n_unacked) + ret = D_INVALID_OPERATION; + else + e->n_unacked--; + } + splx (s); + + if (ret) + return ret; + + __enable_irq (irqtab.irq[e->id]); + + return D_SUCCESS; +} + +/* This function can only be used in the interrupt handler. */ +static void +queue_intr (struct irqdev *dev, int id, user_intr_t *e) +{ + /* Until userland has handled the IRQ in the driver, we have to keep it + * disabled. Level-triggered interrupts would keep raising otherwise. */ + __disable_irq (dev->irq[id]); + + spl_t s = splhigh (); + e->n_unacked++; + e->interrupts++; + dev->tot_num_intr++; + splx (s); + + thread_wakeup ((event_t) &intr_thread); +} + +int +deliver_user_intr (struct irqdev *dev, int id, user_intr_t *e) +{ + /* The reference of the port was increased + * when the port was installed. If the reference is 1, it means + * the port was deallocated and we should clean after it. */ + if (!e->dst_port || e->dst_port->ip_references == 1) + { + thread_wakeup ((event_t) &intr_thread); + return 0; + } + else + { + queue_intr (dev, id, e); + return 1; + } +} + +/* insert an interrupt entry in the queue. + * This entry exists in the queue until + * the corresponding interrupt port is removed.*/ +user_intr_t * +insert_intr_entry (struct irqdev *dev, int id, ipc_port_t dst_port) +{ + user_intr_t *e, *new, *ret; + int free = 0; + + new = (user_intr_t *) kalloc (sizeof (*new)); + if (new == NULL) + return NULL; + + /* check whether the intr entry has been in the queue. */ + spl_t s = splhigh (); + e = search_intr (dev, dst_port); + if (e) + { + printf ("the interrupt entry for irq[%d] and port %p has already been inserted\n", id, dst_port); + free = 1; + ret = NULL; + goto out; + } + printf("irq handler [%d]: new delivery port %p entry %p\n", id, dst_port, new); + ret = new; + new->id = id; + new->dst_port = dst_port; + new->interrupts = 0; + new->n_unacked = 0; + + queue_enter (dev->intr_queue, new, user_intr_t *, chain); +out: + splx (s); + if (free) + kfree ((vm_offset_t) new, sizeof (*new)); + return ret; +} + +#ifndef LINUX_DEV + +static void +user_irq_handler (int id) +{ + struct intr_list *handler; + struct intr_list **prev = &user_intr_handlers[id]; + user_intr_t *e; + spl_t s; + + s = splhigh(); + + for (handler = *prev; handler; handler = handler->next) + { + e = handler->user_intr; + if (!deliver_user_intr(&irqtab, id, e)) + { + /* We failed to deliver this interrupt, remove handler from list */ + *prev = handler->next; + } + prev = &handler->next; + } + splx(s); +} + +int +install_user_intr_handler (struct irqdev *dev, int id, unsigned long flags, + user_intr_t *user_intr) +{ + unsigned int irq = dev->irq[id]; + struct intr_list **head = &user_intr_handlers[id]; + struct intr_list *new, *old = *head; + spl_t s; + + flags |= SA_SHIRQ; + + assert (irq < NINTR); + + /* Don't allow overriding hardclock/kdintr etc */ + if ((ivect[irq] != user_irq_handler) && (ivect[irq] != intnull)) + { + printf("You can't have this interrupt\n"); + return D_ALREADY_OPEN; + } + + if (old) + { + if (!(old->flags & flags & SA_SHIRQ)) + { + printf ("Cannot share irq\n"); + return D_ALREADY_OPEN; + } + } + + new = (struct intr_list *)kalloc (sizeof (struct intr_list)); + new->user_intr = user_intr; + new->flags = flags; + + s = splhigh(); + new->next = *head; + *head = new; + ivect[irq] = user_irq_handler; + iunit[irq] = (int)irq; + unmask_irq (irq); + splx(s); + + return D_SUCCESS; +} +#endif + +void +intr_thread (void) +{ + user_intr_t *e; + int id; + ipc_port_t dst_port; + queue_init (&main_intr_queue); + + for (;;) + { + assert_wait ((event_t) &intr_thread, FALSE); + /* Make sure we wake up from times to times to check for aborted processes */ + thread_set_timeout (hz); + spl_t s = splhigh (); + + /* Now check for interrupts */ + int del; + do + { + del = 0; + + queue_iterate (&main_intr_queue, e, user_intr_t *, chain) + { + /* The reference of the port was increased + * when the port was installed. If the reference is 1, it means + * the port was deallocated and we should clean after it. */ + if (e->dst_port->ip_references == 1) + { + clear_wait (current_thread (), 0, 0); + del = 1; + break; + } + + if (e->interrupts) + { + clear_wait (current_thread (), 0, 0); + id = e->id; + dst_port = e->dst_port; + e->interrupts--; + irqtab.tot_num_intr--; + + splx (s); + deliver_intr (id, dst_port); + s = splhigh (); + } + } + + /* remove the entry without dest port from the queue and free it. */ + if (del) + { + /* + * We clear unacked irqs now, so the Linux handling can trigger, + * and we will cleanup later after the Linux handler is cleared. + */ + assert (!queue_empty (&main_intr_queue)); + queue_remove (&main_intr_queue, e, user_intr_t *, chain); + + printf ("irq handler [%d]: release a dead delivery port %p entry %p\n", e->id, e->dst_port, e); + ipc_port_release (e->dst_port); + e->dst_port = MACH_PORT_NULL; + + if (e->n_unacked) + printf("irq handler [%d]: still %d unacked irqs in entry %p\n", e->id, e->n_unacked, e); + while (e->n_unacked) + { + __enable_irq (irqtab.irq[e->id]); + e->n_unacked--; + } + +#if 0 +#ifndef LINUX_DEV + // TODO: remove from the action list +#else + // FIXME: with the Linux irq handler we don't actually control the action list +#endif + splx (s); + kfree ((vm_offset_t) e, sizeof (*e)); + s = splhigh (); +#endif + } + } + while (del || irqtab.tot_num_intr); + splx (s); + thread_block (NULL); + } +} + +static boolean_t +deliver_intr (int id, ipc_port_t dst_port) +{ + ipc_kmsg_t kmsg; + device_intr_notification_t *n; + mach_port_t dest = (mach_port_t) dst_port; + + if (dest == MACH_PORT_NULL) + return FALSE; + + kmsg = ikm_alloc(sizeof *n); + if (kmsg == IKM_NULL) + return FALSE; + + ikm_init(kmsg, sizeof *n); + n = (device_intr_notification_t *) &kmsg->ikm_header; + + mach_msg_header_t *m = &n->intr_header; + mach_msg_type_t *t = &n->intr_type; + + m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0); + m->msgh_size = sizeof *n; + m->msgh_seqno = DEVICE_NOTIFY_MSGH_SEQNO; + m->msgh_local_port = MACH_PORT_NULL; + m->msgh_remote_port = MACH_PORT_NULL; + m->msgh_id = DEVICE_INTR_NOTIFY; + + t->msgt_name = MACH_MSG_TYPE_INTEGER_32; + t->msgt_size = 32; + t->msgt_number = 1; + t->msgt_inline = TRUE; + t->msgt_longform = FALSE; + t->msgt_deallocate = FALSE; + t->msgt_unused = 0; + + n->intr_header.msgh_remote_port = dest; + n->id = id; + + ipc_port_copy_send (dst_port); + ipc_mqueue_send_always(kmsg); + + return TRUE; +} + +#endif /* MACH_XEN */ diff --git a/device/intr.h b/device/intr.h new file mode 100644 index 0000000..cd3e0bc --- /dev/null +++ b/device/intr.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2010, 2011, 2019 Free Software Foundation, Inc. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * THE FREE SOFTWARE FOUNDATIONALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. THE FREE SOFTWARE FOUNDATION DISCLAIMS ANY LIABILITY OF ANY KIND + * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + */ + +#ifndef __INTR_H__ +#define __INTR_H__ + +#ifndef MACH_XEN + +#include <mach/kern_return.h> +#include <mach/port.h> +#include <kern/queue.h> +#include <ipc/ipc_port.h> +#include <device/conf.h> + +#define DEVICE_NOTIFY_MSGH_SEQNO 0 + +#include <sys/types.h> + +struct irqdev; +#include <machine/irq.h> + +typedef struct { + queue_chain_t chain; + int interrupts; /* Number of interrupts occurred since last run of intr_thread */ + int n_unacked; /* Number of times irqs were disabled for this */ + ipc_port_t dst_port; /* Notification port */ + int id; /* Mapping to machine dependent irq_t array elem */ +} user_intr_t; + +struct irqdev { + char *name; + void (*irqdev_ack)(struct irqdev *dev, int id); + + queue_head_t *intr_queue; + int tot_num_intr; /* Total number of unprocessed interrupts */ + + /* Machine dependent */ + irq_t irq[NINTR]; +}; + +extern queue_head_t main_intr_queue; +extern int install_user_intr_handler (struct irqdev *dev, int id, unsigned long flags, user_intr_t *e); +extern int deliver_user_intr (struct irqdev *dev, int id, user_intr_t *e); +extern user_intr_t *insert_intr_entry (struct irqdev *dev, int id, ipc_port_t receive_port); + +void intr_thread (void); +kern_return_t irq_acknowledge (ipc_port_t receive_port); + +#endif /* MACH_XEN */ + +#endif diff --git a/device/io_req.h b/device/io_req.h new file mode 100644 index 0000000..fb63696 --- /dev/null +++ b/device/io_req.h @@ -0,0 +1,145 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 10/88 + */ + +#ifndef _IO_REQ_ +#define _IO_REQ_ + +#include <mach/boolean.h> +#include <mach/port.h> +#include <mach/message.h> +#include <mach/vm_param.h> +#include <kern/slab.h> +#include <kern/kalloc.h> +#include <kern/lock.h> +#include <vm/vm_map.h> +#include <vm/vm_page.h> +#include <device/device_types.h> +#include <device/dev_hdr.h> + +#include <kern/macros.h> + +/* + * IO request element, queued on device for delayed replies. + */ +typedef struct io_req *io_req_t; +struct io_req { + struct io_req * io_next; /* next, ... */ + struct io_req * io_prev; /* prev pointers: link in done, + defered, or in-progress list */ + mach_device_t io_device; /* pointer to open-device structure */ + char * io_dev_ptr; /* pointer to driver structure - + filled in by driver if necessary */ + int io_unit; /* unit number ('minor') of device */ + int io_op; /* IO operation */ + dev_mode_t io_mode; /* operation mode (wait, truncate) */ + recnum_t io_recnum; /* starting record number for + random-access devices */ + + union io_un { + io_buf_ptr_t data; /* data, for IO requests */ + } io_un; +#define io_data io_un.data + + long io_count; /* amount requested */ + vm_size_t io_alloc_size; /* amount allocated */ + long io_residual; /* amount NOT done */ + io_return_t io_error; /* error code */ + /* call when done - returns TRUE if IO really finished */ + boolean_t (*io_done)(io_req_t); + struct ipc_port *io_reply_port; /* reply port, for asynchronous + messages */ + mach_msg_type_name_t io_reply_port_type; + /* send or send-once right? */ + struct io_req * io_link; /* forward link (for driver header) */ + struct io_req * io_rlink; /* reverse link (for driver header) */ + vm_map_copy_t io_copy; /* vm_map_copy obj. for this op. */ + long io_total; /* total op size, for write */ + decl_simple_lock_data(,io_req_lock) + /* Lock for this structure */ + long io_physrec; /* mapping to the physical block + number */ + long io_rectotal; /* total number of blocks to move */ +}; + +/* + * LOCKING NOTE: Operations on io_req's are in general single threaded by + * the invoking code, obviating the need for a lock. The usual IO_CALL + * path through the code is: Initiating thread hands io_req to device driver, + * driver passes it to io_done thread, io_done thread sends reply message. No + * locking is needed in this sequence. Unfortunately, a synchronous wait + * for a buffer requires a lock to avoid problems if the wait and interrupt + * happen simultaneously on different processors. + * + * Shall be taken at splio only + */ + +#define ior_lock(ior) simple_lock(&(ior)->io_req_lock) +#define ior_unlock(ior) simple_unlock(&(ior)->io_req_lock) + +/* + * Flags and operations + */ + +#define IO_WRITE 0x00000000 /* operation is write */ +#define IO_READ 0x00000001 /* operation is read */ +#define IO_OPEN 0x00000002 /* operation is open */ +#define IO_DONE 0x00000100 /* operation complete */ +#define IO_ERROR 0x00000200 /* error on operation */ +#define IO_BUSY 0x00000400 /* operation in progress */ +#define IO_WANTED 0x00000800 /* wakeup when no longer BUSY */ +#define IO_BAD 0x00001000 /* bad disk block */ +#define IO_CALL 0x00002000 /* call io_done_thread when done */ +#define IO_INBAND 0x00004000 /* mig call was inband */ +#define IO_INTERNAL 0x00008000 /* internal, device-driver specific */ +#define IO_LOANED 0x00010000 /* ior loaned by another module */ + +#define IO_SPARE_START 0x00020000 /* start of spare flags */ + +/* + * Standard completion routine for io_requests. + */ +void iodone(io_req_t); + +/* + * Macros to allocate and free IORs - will convert to caches later. + */ +#define io_req_alloc(ior,size) \ + MACRO_BEGIN \ + (ior) = (io_req_t)kalloc(sizeof(struct io_req)); \ + simple_lock_init(&(ior)->io_req_lock); \ + MACRO_END + +#define io_req_free(ior) \ + (kfree((vm_offset_t)(ior), sizeof(struct io_req))) + + +extern struct kmem_cache io_inband_cache; /* for inband reads */ + +#endif /* _IO_REQ_ */ diff --git a/device/kmsg.c b/device/kmsg.c new file mode 100644 index 0000000..e5b518e --- /dev/null +++ b/device/kmsg.c @@ -0,0 +1,254 @@ +/* GNU Mach Kernel Message Device. + + Copyright (C) 1998, 1999, 2007 Free Software Foundation, Inc. + + Written by OKUJI Yoshinori. + +This is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +This software is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with the software; see the file COPYING. If not, write to +the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ + +/* kmsg provides a stream interface. */ + +#include <sys/types.h> +#include <string.h> + +#include <device/conf.h> +#include <device/ds_routines.h> +#include <device/io_req.h> +#include <mach/boolean.h> +#include <kern/lock.h> +#include <device/kmsg.h> + + +#define KMSGBUFSIZE (4096) /* XXX */ + +/* Simple array for buffering messages */ +static char kmsg_buffer[KMSGBUFSIZE]; +/* Point to the offset to write */ +static int kmsg_write_offset; +/* Point to the offset to read */ +static int kmsg_read_offset; +/* I/O request queue for blocking read */ +static queue_head_t kmsg_read_queue; +/* Used for exclusive access to the device */ +static boolean_t kmsg_in_use; +/* Used for exclusive access to the routines */ +def_simple_lock_irq_data (static, kmsg_lock); +/* If already initialized or not */ +static boolean_t kmsg_init_done = FALSE; + +/* Kernel Message Initializer */ +static void +kmsginit (void) +{ + kmsg_write_offset = 0; + kmsg_read_offset = 0; + queue_init (&kmsg_read_queue); + kmsg_in_use = FALSE; + simple_lock_init_irq (&kmsg_lock); +} + +/* Kernel Message Open Handler */ +io_return_t +kmsgopen (dev_t dev, int flag, const io_req_t ior) +{ + spl_t s = simple_lock_irq (&kmsg_lock); + if (kmsg_in_use) + { + simple_unlock_irq (s, &kmsg_lock); + return D_ALREADY_OPEN; + } + + kmsg_in_use = TRUE; + + simple_unlock_irq (s, &kmsg_lock); + return D_SUCCESS; +} + +/* Kernel Message Close Handler */ +void +kmsgclose (dev_t dev, int flag) +{ + spl_t s = simple_lock_irq (&kmsg_lock); + kmsg_in_use = FALSE; + + simple_unlock_irq (s, &kmsg_lock); +} + +static boolean_t kmsg_read_done (io_req_t ior); + +/* Kernel Message Read Handler */ +io_return_t +kmsgread (dev_t dev, io_req_t ior) +{ + int err; + int amt, len; + + err = device_read_alloc (ior, ior->io_count); + if (err != KERN_SUCCESS) + return err; + + spl_t s = simple_lock_irq (&kmsg_lock); + if (kmsg_read_offset == kmsg_write_offset) + { + /* The queue is empty. */ + if (ior->io_mode & D_NOWAIT) + { + simple_unlock_irq (s, &kmsg_lock); + return D_WOULD_BLOCK; + } + + ior->io_done = kmsg_read_done; + enqueue_tail (&kmsg_read_queue, (queue_entry_t) ior); + simple_unlock_irq (s, &kmsg_lock); + return D_IO_QUEUED; + } + + len = kmsg_write_offset - kmsg_read_offset; + if (len < 0) + len += KMSGBUFSIZE; + + amt = ior->io_count; + if (amt > len) + amt = len; + + if (kmsg_read_offset + amt <= KMSGBUFSIZE) + { + memcpy (ior->io_data, kmsg_buffer + kmsg_read_offset, amt); + } + else + { + int cnt; + + cnt = KMSGBUFSIZE - kmsg_read_offset; + memcpy (ior->io_data, kmsg_buffer + kmsg_read_offset, cnt); + memcpy (ior->io_data + cnt, kmsg_buffer, amt - cnt); + } + + kmsg_read_offset += amt; + if (kmsg_read_offset >= KMSGBUFSIZE) + kmsg_read_offset -= KMSGBUFSIZE; + + ior->io_residual = ior->io_count - amt; + + simple_unlock_irq (s, &kmsg_lock); + return D_SUCCESS; +} + +static boolean_t +kmsg_read_done (io_req_t ior) +{ + int amt, len; + + spl_t s = simple_lock_irq (&kmsg_lock); + if (kmsg_read_offset == kmsg_write_offset) + { + /* The queue is empty. */ + ior->io_done = kmsg_read_done; + enqueue_tail (&kmsg_read_queue, (queue_entry_t) ior); + simple_unlock_irq (s, &kmsg_lock); + return FALSE; + } + + len = kmsg_write_offset - kmsg_read_offset; + if (len < 0) + len += KMSGBUFSIZE; + + amt = ior->io_count; + if (amt > len) + amt = len; + + if (kmsg_read_offset + amt <= KMSGBUFSIZE) + { + memcpy (ior->io_data, kmsg_buffer + kmsg_read_offset, amt); + } + else + { + int cnt; + + cnt = KMSGBUFSIZE - kmsg_read_offset; + memcpy (ior->io_data, kmsg_buffer + kmsg_read_offset, cnt); + memcpy (ior->io_data + cnt, kmsg_buffer, amt - cnt); + } + + kmsg_read_offset += amt; + if (kmsg_read_offset >= KMSGBUFSIZE) + kmsg_read_offset -= KMSGBUFSIZE; + + ior->io_residual = ior->io_count - amt; + + simple_unlock_irq (s, &kmsg_lock); + ds_read_done (ior); + + return TRUE; +} + +io_return_t +kmsggetstat (dev_t dev, dev_flavor_t flavor, dev_status_t data, mach_msg_type_number_t *count) +{ + switch (flavor) + { + case DEV_GET_SIZE: + data[DEV_GET_SIZE_DEVICE_SIZE] = 0; + data[DEV_GET_SIZE_RECORD_SIZE] = 1; + *count = DEV_GET_SIZE_COUNT; + break; + + default: + return D_INVALID_OPERATION; + } + + return D_SUCCESS; +} + +/* Write to Kernel Message Buffer */ +void +kmsg_putchar (int c) +{ + io_req_t ior; + int offset; + spl_t s = -1; + + /* XXX: cninit is not called before cnputc is used. So call kmsginit + here if not initialized yet. */ + if (!kmsg_init_done) + { + kmsginit (); + kmsg_init_done = TRUE; + } + + if (spl_init) + s = simple_lock_irq (&kmsg_lock); + offset = kmsg_write_offset + 1; + if (offset == KMSGBUFSIZE) + offset = 0; + + if (offset == kmsg_read_offset) + { + /* Discard C. */ + if (spl_init) + simple_unlock_irq (s, &kmsg_lock); + return; + } + + kmsg_buffer[kmsg_write_offset++] = c; + if (kmsg_write_offset == KMSGBUFSIZE) + kmsg_write_offset = 0; + + while ((ior = (io_req_t) dequeue_head (&kmsg_read_queue)) != NULL) + iodone (ior); + + if (spl_init) + simple_unlock_irq (s, &kmsg_lock); +} diff --git a/device/kmsg.h b/device/kmsg.h new file mode 100644 index 0000000..00a3505 --- /dev/null +++ b/device/kmsg.h @@ -0,0 +1,18 @@ +#ifndef _DEVICE_KMSG_H_ +#define _DEVICE_KMSG_H_ 1 + + +#include <sys/types.h> + +#include <device/device_types.h> +#include <device/io_req.h> + +io_return_t kmsgopen (dev_t dev, int flag, io_req_t ior); +void kmsgclose (dev_t dev, int flag); +io_return_t kmsgread (dev_t dev, io_req_t ior); +io_return_t kmsggetstat (dev_t dev, dev_flavor_t flavor, + dev_status_t data, mach_msg_type_number_t *count); +void kmsg_putchar (int c); + + +#endif /* !_DEVICE_KMSG_H_ */ diff --git a/device/memory_object_reply.cli b/device/memory_object_reply.cli new file mode 100644 index 0000000..f2cd480 --- /dev/null +++ b/device/memory_object_reply.cli @@ -0,0 +1,27 @@ +/* + * Copyright (c) 1994 The University of Utah and + * the Computer Systems Laboratory at the University of Utah (CSL). + * All rights reserved. + * + * Permission to use, copy, modify and distribute this software is hereby + * granted provided that (1) source code retains these copyright, permission, + * and disclaimer notices, and (2) redistributions including binaries + * reproduce the notices in supporting documentation, and (3) all advertising + * materials mentioning features or use of this software display the following + * acknowledgement: ``This product includes software developed by the + * Computer Systems Laboratory at the University of Utah.'' + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + * Author: Bryan Ford, University of Utah CSL + */ +/* This is a client presentation file. */ + +#define KERNEL_USER 1 + +#include <mach/mach.defs> diff --git a/device/net_io.c b/device/net_io.c new file mode 100644 index 0000000..ee9435d --- /dev/null +++ b/device/net_io.c @@ -0,0 +1,2153 @@ + /* + * Mach Operating System + * Copyright (c) 1993-1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 3/98 + * + * Network IO. + * + * Packet filter code taken from vaxif/enet.c written + * CMU and Stanford. + */ + +/* + * Note: don't depend on anything in this file. + * It may change a lot real soon. -cmaeda 11 June 1993 + */ + +#include <sys/types.h> +#include <string.h> + +#include <device/net_status.h> +#include <machine/machspl.h> /* spl definitions */ +#include <device/net_io.h> +#include <device/if_hdr.h> +#include <device/io_req.h> +#include <device/ds_routines.h> + +#include <mach/boolean.h> +#include <mach/vm_param.h> + +#include <ipc/ipc_port.h> +#include <ipc/ipc_kmsg.h> +#include <ipc/ipc_mqueue.h> + +#include <kern/counters.h> +#include <kern/debug.h> +#include <kern/lock.h> +#include <kern/printf.h> +#include <kern/queue.h> +#include <kern/sched_prim.h> +#include <kern/slab.h> +#include <kern/thread.h> + +#include <machine/machspl.h> + +#if MACH_TTD +#include <ttd/ttd_stub.h> +#endif /* MACH_TTD */ + +#if MACH_TTD +int kttd_async_counter= 0; +#endif /* MACH_TTD */ + + +/* + * Packet Buffer Management + * + * This module manages a private pool of kmsg buffers. + */ + +/* + * List of net kmsgs queued to be sent to users. + * Messages can be high priority or low priority. + * The network thread processes high priority messages first. + */ +def_simple_lock_data(static,net_queue_lock) +boolean_t net_thread_awake = FALSE; +struct ipc_kmsg_queue net_queue_high; +int net_queue_high_size = 0; +int net_queue_high_max = 0; /* for debugging */ +struct ipc_kmsg_queue net_queue_low; +int net_queue_low_size = 0; +int net_queue_low_max = 0; /* for debugging */ + +/* + * List of net kmsgs that can be touched at interrupt level. + * If it is empty, we will also steal low priority messages. + */ +def_simple_lock_data(static,net_queue_free_lock) +struct ipc_kmsg_queue net_queue_free; +int net_queue_free_size = 0; /* on free list */ +int net_queue_free_max = 0; /* for debugging */ + +/* + * This value is critical to network performance. + * At least this many buffers should be sitting in net_queue_free. + * If this is set too small, we will drop network packets. + * Even a low drop rate (<1%) can cause severe network throughput problems. + * We add one to net_queue_free_min for every filter. + */ +int net_queue_free_min = 3; + +int net_queue_free_hits = 0; /* for debugging */ +int net_queue_free_steals = 0; /* for debugging */ +int net_queue_free_misses = 0; /* for debugging */ + +int net_kmsg_send_high_hits = 0; /* for debugging */ +int net_kmsg_send_low_hits = 0; /* for debugging */ +int net_kmsg_send_high_misses = 0; /* for debugging */ +int net_kmsg_send_low_misses = 0; /* for debugging */ + +int net_thread_awaken = 0; /* for debugging */ +int net_ast_taken = 0; /* for debugging */ + +def_simple_lock_data(static,net_kmsg_total_lock) +int net_kmsg_total = 0; /* total allocated */ +int net_kmsg_max; /* initialized below */ + +vm_size_t net_kmsg_size; /* initialized below */ + +/* + * We want more buffers when there aren't enough in the free queue + * and the low priority queue. However, we don't want to allocate + * more than net_kmsg_max. + */ + +#define net_kmsg_want_more() \ + (((net_queue_free_size + net_queue_low_size) < net_queue_free_min) && \ + (net_kmsg_total < net_kmsg_max)) + +ipc_kmsg_t +net_kmsg_get(void) +{ + ipc_kmsg_t kmsg; + spl_t s; + + /* + * First check the list of free buffers. + */ + s = splimp(); + simple_lock(&net_queue_free_lock); + kmsg = ipc_kmsg_queue_first(&net_queue_free); + if (kmsg != IKM_NULL) { + ipc_kmsg_rmqueue_first_macro(&net_queue_free, kmsg); + net_queue_free_size--; + net_queue_free_hits++; + } + simple_unlock(&net_queue_free_lock); + + if (kmsg == IKM_NULL) { + /* + * Try to steal from the low priority queue. + */ + simple_lock(&net_queue_lock); + kmsg = ipc_kmsg_queue_first(&net_queue_low); + if (kmsg != IKM_NULL) { + ipc_kmsg_rmqueue_first_macro(&net_queue_low, kmsg); + net_queue_low_size--; + net_queue_free_steals++; + } + simple_unlock(&net_queue_lock); + } + + if (kmsg == IKM_NULL) + net_queue_free_misses++; + (void) splx(s); + + if (net_kmsg_want_more() || (kmsg == IKM_NULL)) { + boolean_t awake; + + s = splimp(); + simple_lock(&net_queue_lock); + awake = net_thread_awake; + net_thread_awake = TRUE; + simple_unlock(&net_queue_lock); + (void) splx(s); + + if (!awake) + thread_wakeup((event_t) &net_thread_awake); + } + + return kmsg; +} + +void +net_kmsg_put(const ipc_kmsg_t kmsg) +{ + spl_t s; + + s = splimp(); + simple_lock(&net_queue_free_lock); + ipc_kmsg_enqueue_macro(&net_queue_free, kmsg); + if (++net_queue_free_size > net_queue_free_max) + net_queue_free_max = net_queue_free_size; + simple_unlock(&net_queue_free_lock); + (void) splx(s); +} + +void +net_kmsg_collect(void) +{ + ipc_kmsg_t kmsg; + spl_t s; + + s = splimp(); + simple_lock(&net_queue_free_lock); + while (net_queue_free_size > net_queue_free_min) { + kmsg = ipc_kmsg_dequeue(&net_queue_free); + net_queue_free_size--; + simple_unlock(&net_queue_free_lock); + (void) splx(s); + + net_kmsg_free(kmsg); + simple_lock(&net_kmsg_total_lock); + net_kmsg_total--; + simple_unlock(&net_kmsg_total_lock); + + s = splimp(); + simple_lock(&net_queue_free_lock); + } + simple_unlock(&net_queue_free_lock); + (void) splx(s); +} + +static void +net_kmsg_more(void) +{ + ipc_kmsg_t kmsg; + + /* + * Replenish net kmsg pool if low. We don't have the locks + * necessary to look at these variables, but that's OK because + * misread values aren't critical. The danger in this code is + * that while we allocate buffers, interrupts are happening + * which take buffers out of the free list. If we are not + * careful, we will sit in the loop and allocate a zillion + * buffers while a burst of packets arrives. So we count + * buffers in the low priority queue as available, because + * net_kmsg_get will make use of them, and we cap the total + * number of buffers we are willing to allocate. + */ + + while (net_kmsg_want_more()) { + simple_lock(&net_kmsg_total_lock); + net_kmsg_total++; + simple_unlock(&net_kmsg_total_lock); + kmsg = net_kmsg_alloc(); + net_kmsg_put(kmsg); + } +} + +/* + * Packet Filter Data Structures + * + * Each network interface has a set of packet filters + * that are run on incoming packets. + * + * Each packet filter may represent a single network + * session or multiple network sessions. For example, + * all application level TCP sessions would be represented + * by a single packet filter data structure. + * + * If a packet filter has a single session, we use a + * struct net_rcv_port to represent it. If the packet + * filter represents multiple sessions, we use a + * struct net_hash_header to represent it. + */ + +/* + * Each interface has a write port and a set of read ports. + * Each read port has one or more filters to determine what packets + * should go to that port. + */ + +/* + * Receive port for net, with packet filter. + * This data structure by itself represents a packet + * filter for a single session. + */ +struct net_rcv_port { + queue_chain_t input; /* list of input open_descriptors */ + queue_chain_t output; /* list of output open_descriptors */ + ipc_port_t rcv_port; /* port to send packet to */ + int rcv_qlimit; /* port's qlimit */ + int rcv_count; /* number of packets received */ + int priority; /* priority for filter */ + filter_t *filter_end; /* pointer to end of filter */ + filter_t filter[NET_MAX_FILTER]; + /* filter operations */ +}; + +struct kmem_cache net_rcv_cache; /* cache of net_rcv_port structs */ + +#define NET_HASH_SIZE 256 +#define N_NET_HASH 4 +#define N_NET_HASH_KEYS 4 + +/* + * A single hash entry. + */ +struct net_hash_entry { + queue_chain_t chain; /* list of entries with same hval */ +#define he_next chain.next +#define he_prev chain.prev + ipc_port_t rcv_port; /* destination port */ + int rcv_qlimit; /* qlimit for the port */ + unsigned int keys[N_NET_HASH_KEYS]; +}; + +struct kmem_cache net_hash_entry_cache; + +/* + * This structure represents a packet filter with multiple sessions. + * + * For example, all application level TCP sessions might be + * represented by one of these structures. It looks like a + * net_rcv_port struct so that both types can live on the + * same packet filter queues. + */ +struct net_hash_header { + struct net_rcv_port rcv; + int n_keys; /* zero if not used */ + int ref_count; /* reference count */ + net_hash_entry_t table[NET_HASH_SIZE]; +} filter_hash_header[N_NET_HASH]; + +def_simple_lock_data(static,net_hash_header_lock) + +#define HASH_ITERATE(head, elt) (elt) = (net_hash_entry_t) (head); do { +#define HASH_ITERATE_END(head, elt) \ + (elt) = (net_hash_entry_t) queue_next((queue_entry_t) (elt)); \ + } while ((elt) != (head)); + +#define FILTER_ITERATE(if_port_list, fp, nextfp, chain) \ + for ((fp) = (net_rcv_port_t) queue_first(if_port_list); \ + !queue_end(if_port_list, (queue_entry_t)(fp)); \ + (fp) = (nextfp)) { \ + (nextfp) = (net_rcv_port_t) queue_next(chain); +#define FILTER_ITERATE_END } + +/* entry_p must be net_rcv_port_t or net_hash_entry_t */ +#define ENQUEUE_DEAD(dead, entry_p, chain) { \ + (entry_p)->chain.next = (queue_entry_t) (dead); \ + (dead) = (queue_entry_t)(entry_p); \ +} + +/* + * ethernet_priority: + * + * This function properly belongs in the ethernet interfaces; + * it should not be called by this module. (We get packet + * priorities as an argument to net_filter.) It is here + * to avoid massive code duplication. + * + * Returns TRUE for high-priority packets. + */ + +boolean_t ethernet_priority(const ipc_kmsg_t kmsg) +{ + unsigned char *addr = + (unsigned char *) net_kmsg(kmsg)->header; + + /* + * A simplistic check for broadcast packets. + */ + + if ((addr[0] == 0xff) && (addr[1] == 0xff) && + (addr[2] == 0xff) && (addr[3] == 0xff) && + (addr[4] == 0xff) && (addr[5] == 0xff)) + return FALSE; + else + return TRUE; +} + +mach_msg_type_t header_type = { + .msgt_name = MACH_MSG_TYPE_BYTE, + .msgt_size = 8, + .msgt_number = NET_HDW_HDR_MAX, + .msgt_inline = TRUE, + .msgt_longform = FALSE, + .msgt_deallocate = FALSE, + .msgt_unused = 0 +}; + +mach_msg_type_t packet_type = { + .msgt_name = MACH_MSG_TYPE_BYTE, + .msgt_size = 8, + .msgt_number = 0, + .msgt_inline = TRUE, + .msgt_longform = FALSE, + .msgt_deallocate = FALSE, + .msgt_unused = 0 +}; + +/* + * net_deliver: + * + * Called and returns holding net_queue_lock, at splimp. + * Dequeues a message and delivers it at spl0. + * Returns FALSE if no messages. + */ +static boolean_t net_deliver(boolean_t nonblocking) +{ + ipc_kmsg_t kmsg; + boolean_t high_priority; + struct ipc_kmsg_queue send_list; + + /* + * Pick up a pending network message and deliver it. + * Deliver high priority messages before low priority. + */ + + if ((kmsg = ipc_kmsg_dequeue(&net_queue_high)) != IKM_NULL) { + net_queue_high_size--; + high_priority = TRUE; + } else if ((kmsg = ipc_kmsg_dequeue(&net_queue_low)) != IKM_NULL) { + net_queue_low_size--; + high_priority = FALSE; + } else + return FALSE; + simple_unlock(&net_queue_lock); + (void) spl0(); + + /* + * Run the packet through the filters, + * getting back a queue of packets to send. + */ + net_filter(kmsg, &send_list); + + if (!nonblocking) { + /* + * There is a danger of running out of available buffers + * because they all get moved into the high priority queue + * or a port queue. In particular, we might need to + * allocate more buffers as we pull (previously available) + * buffers out of the low priority queue. But we can only + * allocate if we are allowed to block. + */ + net_kmsg_more(); + } + + while ((kmsg = ipc_kmsg_dequeue(&send_list)) != IKM_NULL) { + int count; + + /* + * Fill in the rest of the kmsg. + */ + count = net_kmsg(kmsg)->net_rcv_msg_packet_count; + + ikm_init_special(kmsg, IKM_SIZE_NETWORK); + + kmsg->ikm_header.msgh_bits = + MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0); + /* remember message sizes must be rounded up */ + kmsg->ikm_header.msgh_size = + (mach_msg_size_t) P2ROUND(sizeof(struct net_rcv_msg) + - sizeof net_kmsg(kmsg)->sent + - NET_RCV_MAX + count, + __alignof__ (uintptr_t)); + kmsg->ikm_header.msgh_local_port = MACH_PORT_NULL; + kmsg->ikm_header.msgh_kind = MACH_MSGH_KIND_NORMAL; + kmsg->ikm_header.msgh_id = NET_RCV_MSG_ID; + + net_kmsg(kmsg)->header_type = header_type; + net_kmsg(kmsg)->packet_type = packet_type; + net_kmsg(kmsg)->net_rcv_msg_packet_count = count; + + /* + * Send the packet to the destination port. Drop it + * if the destination port is over its backlog. + */ + + if (ipc_mqueue_send(kmsg, MACH_SEND_TIMEOUT, 0) == + MACH_MSG_SUCCESS) { + if (high_priority) + net_kmsg_send_high_hits++; + else + net_kmsg_send_low_hits++; + /* the receiver is responsible for the message now */ + } else { + if (high_priority) + net_kmsg_send_high_misses++; + else + net_kmsg_send_low_misses++; + ipc_kmsg_destroy(kmsg); + } + } + + (void) splimp(); + simple_lock(&net_queue_lock); + return TRUE; +} + +/* + * We want to deliver packets using ASTs, so we can avoid the + * thread_wakeup/thread_block needed to get to the network + * thread. However, we can't allocate memory in the AST handler, + * because memory allocation might block. Hence we have the + * network thread to allocate memory. The network thread also + * delivers packets, so it can be allocating and delivering for a + * burst. net_thread_awake is protected by net_queue_lock + * (instead of net_queue_free_lock) so that net_packet and + * net_ast can safely determine if the network thread is running. + * This prevents a race that might leave a packet sitting without + * being delivered. It is possible for net_kmsg_get to think + * the network thread is awake, and so avoid a wakeup, and then + * have the network thread sleep without allocating. The next + * net_kmsg_get will do a wakeup. + */ + +void net_ast(void) +{ + spl_t s; + + net_ast_taken++; + + /* + * If the network thread is awake, then we would + * rather deliver messages from it, because + * it can also allocate memory. + */ + + s = splimp(); + simple_lock(&net_queue_lock); + while (!net_thread_awake && net_deliver(TRUE)) + continue; + + /* + * Prevent an unnecessary AST. Either the network + * thread will deliver the messages, or there are + * no messages left to deliver. + */ + + simple_unlock(&net_queue_lock); + (void) splsched(); + ast_off(cpu_number(), AST_NETWORK); + (void) splx(s); +} + +static void __attribute__ ((noreturn)) net_thread_continue(void) +{ + for (;;) { + spl_t s; + + net_thread_awaken++; + + /* + * First get more buffers. + */ + net_kmsg_more(); + + s = splimp(); + simple_lock(&net_queue_lock); + while (net_deliver(FALSE)) + continue; + + net_thread_awake = FALSE; + assert_wait(&net_thread_awake, FALSE); + simple_unlock(&net_queue_lock); + (void) splx(s); + counter(c_net_thread_block++); + thread_block(net_thread_continue); + } +} + +void net_thread(void) +{ + spl_t s; + + /* + * We should be very high priority. + */ + + thread_set_own_priority(0); + + /* + * We sleep initially, so that we don't allocate any buffers + * unless the network is really in use and they are needed. + */ + + s = splimp(); + simple_lock(&net_queue_lock); + net_thread_awake = FALSE; + assert_wait(&net_thread_awake, FALSE); + simple_unlock(&net_queue_lock); + (void) splx(s); + counter(c_net_thread_block++); + thread_block(net_thread_continue); + net_thread_continue(); + /*NOTREACHED*/ +} + +static void +reorder_queue( + queue_t first, + queue_t last) +{ + queue_entry_t prev, next; + + prev = first->prev; + next = last->next; + + prev->next = last; + next->prev = first; + + last->prev = prev; + last->next = first; + + first->next = next; + first->prev = last; +} + +/* + * Incoming packet. Header has already been moved to proper place. + * We are already at splimp. + */ +void +net_packet( + struct ifnet *ifp, + ipc_kmsg_t kmsg, + unsigned int count, + boolean_t priority) +{ + boolean_t awake; + +#if MACH_TTD + /* + * Do a quick check to see if it is a kernel TTD packet. + * + * Only check if KernelTTD is enabled, ie. the current + * device driver supports TTD, and the bootp succeeded. + */ + if (kttd_enabled && kttd_handle_async(kmsg)) { + /* + * Packet was a valid ttd packet and + * doesn't need to be passed up to filter. + * The ttd code put the used kmsg buffer + * back onto the free list. + */ + if (kttd_debug) + printf("**%x**", kttd_async_counter++); + return; + } +#endif /* MACH_TTD */ + + kmsg->ikm_header.msgh_remote_port = (mach_port_t) ifp; + net_kmsg(kmsg)->net_rcv_msg_packet_count = count; + + simple_lock(&net_queue_lock); + if (priority) { + ipc_kmsg_enqueue(&net_queue_high, kmsg); + if (++net_queue_high_size > net_queue_high_max) + net_queue_high_max = net_queue_high_size; + } else { + ipc_kmsg_enqueue(&net_queue_low, kmsg); + if (++net_queue_low_size > net_queue_low_max) + net_queue_low_max = net_queue_low_size; + } + /* + * If the network thread is awake, then we don't + * need to take an AST, because the thread will + * deliver the packet. + */ + awake = net_thread_awake; + simple_unlock(&net_queue_lock); + + if (!awake) { + spl_t s = splsched(); + ast_on(cpu_number(), AST_NETWORK); + (void) splx(s); + } +} + +int net_filter_queue_reorder = 0; /* non-zero to enable reordering */ + +/* + * Run a packet through the filters, returning a list of messages. + * We are *not* called at interrupt level. + */ +void +net_filter(const ipc_kmsg_t kmsg, + ipc_kmsg_queue_t send_list) +{ + struct ifnet *ifp; + net_rcv_port_t infp, nextfp; + ipc_kmsg_t new_kmsg; + + net_hash_entry_t entp, *hash_headp; + ipc_port_t dest; + queue_entry_t dead_infp = (queue_entry_t) 0; + queue_entry_t dead_entp = (queue_entry_t) 0; + unsigned int ret_count; + + queue_head_t *if_port_list; + + int count = net_kmsg(kmsg)->net_rcv_msg_packet_count; + ifp = (struct ifnet *) kmsg->ikm_header.msgh_remote_port; + ipc_kmsg_queue_init(send_list); + + if (net_kmsg(kmsg)->sent) + if_port_list = &ifp->if_snd_port_list; + else + if_port_list = &ifp->if_rcv_port_list; + + /* + * Unfortunately we can't allocate or deallocate memory + * while holding these locks. And we can't drop the locks + * while examining the filter lists. + * Both locks are hold in case a filter is removed from both + * queues. + */ + simple_lock(&ifp->if_rcv_port_list_lock); + simple_lock(&ifp->if_snd_port_list_lock); + FILTER_ITERATE(if_port_list, infp, nextfp, + net_kmsg(kmsg)->sent ? &infp->output : &infp->input) + { + entp = (net_hash_entry_t) 0; + if ((infp->filter[0] & NETF_TYPE_MASK) == NETF_BPF) { + ret_count = bpf_do_filter(infp, net_kmsg(kmsg)->packet + + sizeof(struct packet_header), + count - sizeof(struct packet_header), + net_kmsg(kmsg)->header, + ifp->if_header_size, &hash_headp, + &entp); + if (entp == (net_hash_entry_t) 0) + dest = infp->rcv_port; + else + dest = entp->rcv_port; + if (ret_count) + ret_count += sizeof(struct packet_header); + } else { + ret_count = net_do_filter(infp, net_kmsg(kmsg)->packet, count, + net_kmsg(kmsg)->header); + if (ret_count) + ret_count = count; + dest = infp->rcv_port; + } + + if (ret_count) { + + /* + * Make a send right for the destination. + */ + + dest = ipc_port_copy_send(dest); + if (!IP_VALID(dest)) { + /* + * This filter is dead. We remove it from the + * filter list and set it aside for deallocation. + */ + + if (entp == (net_hash_entry_t) 0) { + if (infp->filter[0] & NETF_IN) + queue_remove(&ifp->if_rcv_port_list, infp, + net_rcv_port_t, input); + if (infp->filter[0] & NETF_OUT) + queue_remove(&ifp->if_snd_port_list, infp, + net_rcv_port_t, output); + + /* Use input only for queues of dead filters. */ + ENQUEUE_DEAD(dead_infp, infp, input); + continue; + } else { + hash_ent_remove (ifp, + (net_hash_header_t)infp, + FALSE, /* no longer used */ + hash_headp, + entp, + &dead_entp); + continue; + } + } + + /* + * Deliver copy of packet to this channel. + */ + if (ipc_kmsg_queue_empty(send_list)) { + /* + * Only receiver, so far + */ + new_kmsg = kmsg; + } else { + /* + * Other receivers - must allocate message and copy. + */ + new_kmsg = net_kmsg_get(); + if (new_kmsg == IKM_NULL) { + ipc_port_release_send(dest); + break; + } + + memcpy( + net_kmsg(new_kmsg)->packet, + net_kmsg(kmsg)->packet, + ret_count); + memcpy( + net_kmsg(new_kmsg)->header, + net_kmsg(kmsg)->header, + NET_HDW_HDR_MAX); + } + net_kmsg(new_kmsg)->net_rcv_msg_packet_count = ret_count; + new_kmsg->ikm_header.msgh_remote_port = (mach_port_t) dest; + ipc_kmsg_enqueue(send_list, new_kmsg); + + { + net_rcv_port_t prevfp; + int rcount = ++infp->rcv_count; + + /* + * See if ordering of filters is wrong + */ + if (infp->priority >= NET_HI_PRI) { +#define REORDER_PRIO(chain) \ + prevfp = (net_rcv_port_t) queue_prev(&infp->chain); \ + /* \ + * If infp is not the first element on the queue, \ + * and the previous element is at equal priority \ + * but has a lower count, then promote infp to \ + * be in front of prevfp. \ + */ \ + if ((queue_t)prevfp != if_port_list && \ + infp->priority == prevfp->priority) { \ + /* \ + * Threshold difference to prevent thrashing \ + */ \ + if (net_filter_queue_reorder \ + && (100 + prevfp->rcv_count < rcount)) \ + reorder_queue(&prevfp->chain, &infp->chain);\ + } + + REORDER_PRIO(input); + REORDER_PRIO(output); + + /* + * High-priority filter -> no more deliveries + */ + break; + } + } + } + } + FILTER_ITERATE_END + simple_unlock(&ifp->if_snd_port_list_lock); + simple_unlock(&ifp->if_rcv_port_list_lock); + + /* + * Deallocate dead filters. + */ + if (dead_infp != 0) + net_free_dead_infp(dead_infp); + if (dead_entp != 0) + net_free_dead_entp(dead_entp); + + if (ipc_kmsg_queue_empty(send_list)) { + /* Not sent - recycle */ + net_kmsg_put(kmsg); + } +} + +boolean_t +net_do_filter(net_rcv_port_t infp, + const char * data, + unsigned int data_count, + const char * header) +{ + int stack[NET_FILTER_STACK_DEPTH+1]; + int *sp; + filter_t *fp, *fpe; + unsigned int op, arg; + + /* + * The filter accesses the header and data + * as unsigned short words. + */ + data_count /= sizeof(unsigned short); + +#define data_word ((unsigned short *)data) +#define header_word ((unsigned short *)header) + + sp = &stack[NET_FILTER_STACK_DEPTH]; + fp = &infp->filter[1]; /* filter[0] used for flags */ + fpe = infp->filter_end; + + *sp = TRUE; + + while (fp < fpe) { + arg = *fp++; + op = NETF_OP(arg); + arg = NETF_ARG(arg); + + switch (arg) { + case NETF_NOPUSH: + arg = *sp++; + break; + case NETF_PUSHZERO: + arg = 0; + break; + case NETF_PUSHLIT: + arg = *fp++; + break; + case NETF_PUSHIND: + arg = *sp++; + if (arg >= data_count) + return FALSE; + arg = data_word[arg]; + break; + case NETF_PUSHHDRIND: + arg = *sp++; + if (arg >= NET_HDW_HDR_MAX/sizeof(unsigned short)) + return FALSE; + arg = header_word[arg]; + break; + default: + if (arg >= NETF_PUSHSTK) { + arg = sp[arg - NETF_PUSHSTK]; + } + else if (arg >= NETF_PUSHHDR) { + arg = header_word[arg - NETF_PUSHHDR]; + } + else { + arg -= NETF_PUSHWORD; + if (arg >= data_count) + return FALSE; + arg = data_word[arg]; + } + break; + + } + switch (op) { + case NETF_OP(NETF_NOP): + *--sp = arg; + break; + case NETF_OP(NETF_AND): + *sp &= arg; + break; + case NETF_OP(NETF_OR): + *sp |= arg; + break; + case NETF_OP(NETF_XOR): + *sp ^= arg; + break; + case NETF_OP(NETF_EQ): + *sp = (*sp == arg); + break; + case NETF_OP(NETF_NEQ): + *sp = (*sp != arg); + break; + case NETF_OP(NETF_LT): + *sp = (*sp < arg); + break; + case NETF_OP(NETF_LE): + *sp = (*sp <= arg); + break; + case NETF_OP(NETF_GT): + *sp = (*sp > arg); + break; + case NETF_OP(NETF_GE): + *sp = (*sp >= arg); + break; + case NETF_OP(NETF_COR): + if (*sp++ == arg) + return (TRUE); + break; + case NETF_OP(NETF_CAND): + if (*sp++ != arg) + return (FALSE); + break; + case NETF_OP(NETF_CNOR): + if (*sp++ == arg) + return (FALSE); + break; + case NETF_OP(NETF_CNAND): + if (*sp++ != arg) + return (TRUE); + break; + case NETF_OP(NETF_LSH): + *sp <<= arg; + break; + case NETF_OP(NETF_RSH): + *sp >>= arg; + break; + case NETF_OP(NETF_ADD): + *sp += arg; + break; + case NETF_OP(NETF_SUB): + *sp -= arg; + break; + } + } + return ((*sp) ? TRUE : FALSE); + +#undef data_word +#undef header_word +} + +/* + * Check filter for invalid operations or stack over/under-flow. + */ +static boolean_t +parse_net_filter( + filter_t *filter, + unsigned int count) +{ + int sp; + filter_t *fpe = &filter[count]; + filter_t op, arg; + + /* + * count is at least 1, and filter[0] is used for flags. + */ + filter++; + sp = NET_FILTER_STACK_DEPTH; + + for (; filter < fpe; filter++) { + op = NETF_OP(*filter); + arg = NETF_ARG(*filter); + + switch (arg) { + case NETF_NOPUSH: + break; + case NETF_PUSHZERO: + sp--; + break; + case NETF_PUSHLIT: + filter++; + if (filter >= fpe) + return (FALSE); /* literal value not in filter */ + sp--; + break; + case NETF_PUSHIND: + case NETF_PUSHHDRIND: + break; + default: + if (arg >= NETF_PUSHSTK) { + if (arg - NETF_PUSHSTK + sp > NET_FILTER_STACK_DEPTH) + return FALSE; + } + else if (arg >= NETF_PUSHHDR) { + if (arg - NETF_PUSHHDR >= + NET_HDW_HDR_MAX/sizeof(unsigned short)) + return FALSE; + } + /* else... cannot check for packet bounds + without packet */ + sp--; + break; + } + if (sp < 2) { + return (FALSE); /* stack overflow */ + } + if (op == NETF_OP(NETF_NOP)) + continue; + + /* + * all non-NOP operators are binary. + */ + if (sp > NET_MAX_FILTER-2) + return (FALSE); + + sp++; + switch (op) { + case NETF_OP(NETF_AND): + case NETF_OP(NETF_OR): + case NETF_OP(NETF_XOR): + case NETF_OP(NETF_EQ): + case NETF_OP(NETF_NEQ): + case NETF_OP(NETF_LT): + case NETF_OP(NETF_LE): + case NETF_OP(NETF_GT): + case NETF_OP(NETF_GE): + case NETF_OP(NETF_COR): + case NETF_OP(NETF_CAND): + case NETF_OP(NETF_CNOR): + case NETF_OP(NETF_CNAND): + case NETF_OP(NETF_LSH): + case NETF_OP(NETF_RSH): + case NETF_OP(NETF_ADD): + case NETF_OP(NETF_SUB): + break; + default: + return (FALSE); + } + } + return (TRUE); +} + +/* + * Set a filter for a network interface. + * + * We are given a naked send right for the rcv_port. + * If we are successful, we must consume that right. + */ +io_return_t +net_set_filter( + struct ifnet *ifp, + ipc_port_t rcv_port, + int priority, + filter_t *filter, + unsigned int filter_count) +{ + int filter_bytes; + bpf_insn_t match; + net_rcv_port_t infp, my_infp; + net_rcv_port_t nextfp; + net_hash_header_t hhp; + net_hash_entry_t entp; + net_hash_entry_t *head, nextentp; + queue_entry_t dead_infp, dead_entp; + int i; + int ret, is_new_infp; + io_return_t rval; + boolean_t in, out; + + /* Initialize hash_entp to NULL to quiet GCC + * warning about uninitialized variable. hash_entp is only + * used when match != 0; in that case it is properly initialized + * by kmem_cache_alloc(). + */ + net_hash_entry_t hash_entp = NULL; + + /* + * Check the filter syntax. + */ + + filter_bytes = CSPF_BYTES(filter_count); + match = (bpf_insn_t) 0; + + if (filter_count == 0) { + return (D_INVALID_OPERATION); + } else if (!((filter[0] & NETF_IN) || (filter[0] & NETF_OUT))) { + return (D_INVALID_OPERATION); /* NETF_IN or NETF_OUT required */ + } else if ((filter[0] & NETF_TYPE_MASK) == NETF_BPF) { + ret = bpf_validate((bpf_insn_t)filter, filter_bytes, &match); + if (!ret) + return (D_INVALID_OPERATION); + } else if ((filter[0] & NETF_TYPE_MASK) == 0) { + if (!parse_net_filter(filter, filter_count)) + return (D_INVALID_OPERATION); + } else { + return (D_INVALID_OPERATION); + } + + rval = D_SUCCESS; /* default return value */ + dead_infp = dead_entp = 0; + + if (match == (bpf_insn_t) 0) { + /* + * If there is no match instruction, we allocate + * a normal packet filter structure. + */ + my_infp = (net_rcv_port_t) kmem_cache_alloc(&net_rcv_cache); + my_infp->rcv_port = rcv_port; + is_new_infp = TRUE; + } else { + /* + * If there is a match instruction, we assume there will be + * multiple sessions with a common substructure and allocate + * a hash table to deal with them. + */ + my_infp = 0; + hash_entp = (net_hash_entry_t) kmem_cache_alloc(&net_hash_entry_cache); + is_new_infp = FALSE; + } + + /* + * Look for an existing filter on the same reply port. + * Look for filters with dead ports (for GC). + * Look for a filter with the same code except KEY insns. + */ + void check_filter_list(queue_head_t *if_port_list) + { + FILTER_ITERATE(if_port_list, infp, nextfp, + (if_port_list == &ifp->if_rcv_port_list) + ? &infp->input : &infp->output) + { + if (infp->rcv_port == MACH_PORT_NULL) { + if (match != 0 + && infp->priority == priority + && my_infp == 0 + && (infp->filter_end - infp->filter) == filter_count + && bpf_eq((bpf_insn_t)infp->filter, + (bpf_insn_t)filter, filter_bytes)) + my_infp = infp; + + for (i = 0; i < NET_HASH_SIZE; i++) { + head = &((net_hash_header_t) infp)->table[i]; + if (*head == 0) + continue; + + /* + * Check each hash entry to make sure the + * destination port is still valid. Remove + * any invalid entries. + */ + entp = *head; + do { + nextentp = (net_hash_entry_t) entp->he_next; + + /* checked without + ip_lock(entp->rcv_port) */ + if (entp->rcv_port == rcv_port + || !IP_VALID(entp->rcv_port) + || !ip_active(entp->rcv_port)) { + ret = hash_ent_remove (ifp, + (net_hash_header_t)infp, + (my_infp == infp), + head, + entp, + &dead_entp); + if (ret) + goto hash_loop_end; + } + + entp = nextentp; + /* While test checks head since hash_ent_remove + might modify it. + */ + } while (*head != 0 && entp != *head); + } + + hash_loop_end: + ; + } else if (infp->rcv_port == rcv_port + || !IP_VALID(infp->rcv_port) + || !ip_active(infp->rcv_port)) { + + /* Remove the old filter from lists */ + if (infp->filter[0] & NETF_IN) + queue_remove(&ifp->if_rcv_port_list, infp, + net_rcv_port_t, input); + if (infp->filter[0] & NETF_OUT) + queue_remove(&ifp->if_snd_port_list, infp, + net_rcv_port_t, output); + + ENQUEUE_DEAD(dead_infp, infp, input); + } + } + FILTER_ITERATE_END + } + + in = (filter[0] & NETF_IN) != 0; + out = (filter[0] & NETF_OUT) != 0; + + simple_lock(&ifp->if_rcv_port_list_lock); + simple_lock(&ifp->if_snd_port_list_lock); + + if (in) + check_filter_list(&ifp->if_rcv_port_list); + if (out) + check_filter_list(&ifp->if_snd_port_list); + + if (my_infp == 0) { + /* Allocate a dummy infp */ + simple_lock(&net_hash_header_lock); + for (i = 0; i < N_NET_HASH; i++) { + if (filter_hash_header[i].n_keys == 0) + break; + } + if (i == N_NET_HASH) { + simple_unlock(&net_hash_header_lock); + simple_unlock(&ifp->if_snd_port_list_lock); + simple_unlock(&ifp->if_rcv_port_list_lock); + + ipc_port_release_send(rcv_port); + if (match != 0) + kmem_cache_free(&net_hash_entry_cache, + (vm_offset_t)hash_entp); + + rval = D_NO_MEMORY; + goto clean_and_return; + } + + hhp = &filter_hash_header[i]; + hhp->n_keys = match->jt; + simple_unlock(&net_hash_header_lock); + + hhp->ref_count = 0; + for (i = 0; i < NET_HASH_SIZE; i++) + hhp->table[i] = 0; + + my_infp = (net_rcv_port_t)hhp; + my_infp->rcv_port = MACH_PORT_NULL; /* indication of dummy */ + is_new_infp = TRUE; + } + + if (is_new_infp) { + my_infp->priority = priority; + my_infp->rcv_count = 0; + + /* Copy filter program. */ + memcpy (my_infp->filter, filter, filter_bytes); + my_infp->filter_end = + (filter_t *)((char *)my_infp->filter + filter_bytes); + + if (match == 0) { + my_infp->rcv_qlimit = net_add_q_info(rcv_port); + } else { + my_infp->rcv_qlimit = 0; + } + + /* Insert my_infp according to priority */ + if (in) { + queue_iterate(&ifp->if_rcv_port_list, infp, net_rcv_port_t, input) + if (priority > infp->priority) + break; + + queue_enter(&ifp->if_rcv_port_list, my_infp, net_rcv_port_t, input); + } + + if (out) { + queue_iterate(&ifp->if_snd_port_list, infp, net_rcv_port_t, output) + if (priority > infp->priority) + break; + + queue_enter(&ifp->if_snd_port_list, my_infp, net_rcv_port_t, output); + } + } + + if (match != 0) + { /* Insert to hash list */ + net_hash_entry_t *p; + + hash_entp->rcv_port = rcv_port; + for (i = 0; i < match->jt; i++) /* match->jt is n_keys */ + hash_entp->keys[i] = match[i+1].k; + p = &((net_hash_header_t)my_infp)-> + table[bpf_hash(match->jt, hash_entp->keys)]; + + /* Not checking for the same key values */ + if (*p == 0) { + queue_init (&hash_entp->chain); + *p = hash_entp; + } else { + enqueue_tail(&(*p)->chain, &hash_entp->chain); + } + + ((net_hash_header_t)my_infp)->ref_count++; + hash_entp->rcv_qlimit = net_add_q_info(rcv_port); + } + + simple_unlock(&ifp->if_snd_port_list_lock); + simple_unlock(&ifp->if_rcv_port_list_lock); + +clean_and_return: + /* No locks are held at this point. */ + + if (dead_infp != 0) + net_free_dead_infp(dead_infp); + if (dead_entp != 0) + net_free_dead_entp(dead_entp); + + return (rval); +} + +/* + * Other network operations + */ +io_return_t +net_getstat( + struct ifnet *ifp, + dev_flavor_t flavor, + dev_status_t status, /* pointer to OUT array */ + mach_msg_type_number_t *count) /* OUT */ +{ + switch (flavor) { + case NET_STATUS: + { + struct net_status *ns = (struct net_status *)status; + + if (*count < NET_STATUS_COUNT) + return (D_INVALID_OPERATION); + + ns->min_packet_size = ifp->if_header_size; + ns->max_packet_size = ifp->if_header_size + ifp->if_mtu; + ns->header_format = ifp->if_header_format; + ns->header_size = ifp->if_header_size; + ns->address_size = ifp->if_address_size; + ns->flags = ifp->if_flags; + ns->mapped_size = 0; + + *count = NET_STATUS_COUNT; + break; + } + case NET_ADDRESS: + { + int addr_byte_count; + int addr_int_count; + int i; + + addr_byte_count = ifp->if_address_size; + addr_int_count = (addr_byte_count + (sizeof(int)-1)) + / sizeof(int); + + if (*count < addr_int_count) + { +/* XXX debug hack. */ +printf ("net_getstat: count: %d, addr_int_count: %d\n", + *count, addr_int_count); + return (D_INVALID_OPERATION); + } + + memcpy(status, ifp->if_address, addr_byte_count); + if (addr_byte_count < addr_int_count * sizeof(int)) + memset((char *)status + addr_byte_count, 0, + (addr_int_count * sizeof(int) + - addr_byte_count)); + + for (i = 0; i < addr_int_count; i++) { + int word; + + word = status[i]; + status[i] = htonl(word); + } + *count = addr_int_count; + break; + } + default: + return (D_INVALID_OPERATION); + } + return (D_SUCCESS); +} + +io_return_t +net_write( + struct ifnet *ifp, + net_write_start_device_fn start, + io_req_t ior) +{ + spl_t s; + kern_return_t rc; + boolean_t wait; + + /* + * Reject the write if the interface is down. + */ + if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) + return (D_DEVICE_DOWN); + + /* + * Reject the write if the packet is too large or too small. + */ + if (ior->io_count < ifp->if_header_size || + ior->io_count > ifp->if_header_size + ifp->if_mtu) + return (D_INVALID_SIZE); + + /* + * Wire down the memory. + */ + + rc = device_write_get(ior, &wait); + if (rc != KERN_SUCCESS) + return (rc); + + /* + * Network interfaces can't cope with VM continuations. + * If wait is set, just panic. + */ + if (wait) { + panic("net_write: VM continuation"); + } + + /* + * Queue the packet on the output queue, and + * start the device. + */ + s = splimp(); + IF_ENQUEUE(&ifp->if_snd, ior); + (*start)(ifp->if_unit); + splx(s); + + return (D_IO_QUEUED); +} + +/* + * Initialize the whole package. + */ +void +net_io_init(void) +{ + vm_size_t size; + + size = sizeof(struct net_rcv_port); + kmem_cache_init(&net_rcv_cache, "net_rcv_port", size, 0, + NULL, 0); + + size = sizeof(struct net_hash_entry); + kmem_cache_init(&net_hash_entry_cache, "net_hash_entry", size, 0, + NULL, 0); + + size = ikm_plus_overhead(sizeof(struct net_rcv_msg)); + net_kmsg_size = round_page(size); + + /* + * net_kmsg_max caps the number of buffers + * we are willing to allocate. By default, + * we allow for net_queue_free_min plus + * the queue limit for each filter. + * (Added as the filters are added.) + */ + + simple_lock_init(&net_kmsg_total_lock); + if (net_kmsg_max == 0) + net_kmsg_max = net_queue_free_min; + + simple_lock_init(&net_queue_free_lock); + ipc_kmsg_queue_init(&net_queue_free); + + simple_lock_init(&net_queue_lock); + ipc_kmsg_queue_init(&net_queue_high); + ipc_kmsg_queue_init(&net_queue_low); + + simple_lock_init(&net_hash_header_lock); +} + + +/* ======== BPF: Berkeley Packet Filter ======== */ + +/*- + * Copyright (c) 1990-1991 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from the Stanford/CMU enet packet filter, + * (net/enet.c) distributed as part of 4.3BSD, and code contributed + * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence + * Berkeley Laboratory. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)bpf.c 7.5 (Berkeley) 7/15/91 + */ + +#if defined(sparc) || defined(mips) || defined(ibm032) || defined(alpha) +#define BPF_ALIGN +#endif + +#ifndef BPF_ALIGN +#define EXTRACT_SHORT(p) ((u_short)ntohs(*(u_short *)p)) +#define EXTRACT_LONG(p) (ntohl(*(u_int *)p)) +#else +#define EXTRACT_SHORT(p)\ + ((u_short)\ + ((u_short)*((u_char *)p+0)<<8|\ + (u_short)*((u_char *)p+1)<<0)) +#define EXTRACT_LONG(p)\ + ((u_int)*((u_char *)p+0)<<24|\ + (u_int)*((u_char *)p+1)<<16|\ + (u_int)*((u_char *)p+2)<<8|\ + (u_int)*((u_char *)p+3)<<0) +#endif + +/* + * Execute the filter program starting at pc on the packet p + * wirelen is the length of the original packet + * buflen is the amount of data present + */ + +int +bpf_do_filter( + net_rcv_port_t infp, + char * p, /* packet data */ + unsigned int wirelen, /* data_count (in bytes) */ + char * header, + unsigned int hlen, /* header len (in bytes) */ + net_hash_entry_t **hash_headpp, + net_hash_entry_t *entpp) /* out */ +{ + bpf_insn_t pc, pc_end; + unsigned int buflen; + + unsigned int A, X; + int k; + unsigned int mem[BPF_MEMWORDS]; + + /* Generic pointer to either HEADER or P according to the specified offset. */ + char *data = NULL; + + pc = ((bpf_insn_t) infp->filter) + 1; + /* filter[0].code is (NETF_BPF | flags) */ + pc_end = (bpf_insn_t)infp->filter_end; + buflen = NET_RCV_MAX; + *entpp = 0; /* default */ + + A = 0; + X = 0; + + for (; pc < pc_end; ++pc) { + switch (pc->code) { + + default: +#ifdef KERNEL + return 0; +#else + abort(); +#endif + case BPF_RET|BPF_K: + if (infp->rcv_port == MACH_PORT_NULL && + *entpp == 0) { + return 0; + } + return ((u_int)pc->k <= wirelen) ? + pc->k : wirelen; + + case BPF_RET|BPF_A: + if (infp->rcv_port == MACH_PORT_NULL && + *entpp == 0) { + return 0; + } + return ((u_int)A <= wirelen) ? + A : wirelen; + + case BPF_RET|BPF_MATCH_IMM: + if (bpf_match ((net_hash_header_t)infp, pc->jt, mem, + hash_headpp, entpp)) { + return ((u_int)pc->k <= wirelen) ? + pc->k : wirelen; + } + return 0; + + case BPF_LD|BPF_W|BPF_ABS: + k = pc->k; + + load_word: + if ((u_int)k + sizeof(int) <= hlen) + data = header; + else if ((u_int)k + sizeof(int) <= buflen) { + k -= hlen; + data = p; + } else + return 0; + +#ifdef BPF_ALIGN + if (((int)(data + k) & 3) != 0) + A = EXTRACT_LONG(&data[k]); + else +#endif + A = ntohl(*(int *)(data + k)); + continue; + + case BPF_LD|BPF_H|BPF_ABS: + k = pc->k; + + load_half: + if ((u_int)k + sizeof(short) <= hlen) + data = header; + else if ((u_int)k + sizeof(short) <= buflen) { + k -= hlen; + data = p; + } else + return 0; + + A = EXTRACT_SHORT(&data[k]); + continue; + + case BPF_LD|BPF_B|BPF_ABS: + k = pc->k; + + load_byte: + if ((u_int)k < hlen) + data = header; + else if ((u_int)k < buflen) { + data = p; + k -= hlen; + } else + return 0; + + A = data[k]; + continue; + + case BPF_LD|BPF_W|BPF_LEN: + A = wirelen; + continue; + + case BPF_LDX|BPF_W|BPF_LEN: + X = wirelen; + continue; + + case BPF_LD|BPF_W|BPF_IND: + k = X + pc->k; + goto load_word; + + case BPF_LD|BPF_H|BPF_IND: + k = X + pc->k; + goto load_half; + + case BPF_LD|BPF_B|BPF_IND: + k = X + pc->k; + goto load_byte; + + case BPF_LDX|BPF_MSH|BPF_B: + k = pc->k; + if (k < hlen) + data = header; + else if (k < buflen) { + data = p; + k -= hlen; + } else + return 0; + + X = (data[k] & 0xf) << 2; + continue; + + case BPF_LD|BPF_IMM: + A = pc->k; + continue; + + case BPF_LDX|BPF_IMM: + X = pc->k; + continue; + + case BPF_LD|BPF_MEM: + A = mem[pc->k]; + continue; + + case BPF_LDX|BPF_MEM: + X = mem[pc->k]; + continue; + + case BPF_ST: + mem[pc->k] = A; + continue; + + case BPF_STX: + mem[pc->k] = X; + continue; + + case BPF_JMP|BPF_JA: + pc += pc->k; + continue; + + case BPF_JMP|BPF_JGT|BPF_K: + pc += (A > pc->k) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JGE|BPF_K: + pc += (A >= pc->k) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JEQ|BPF_K: + pc += (A == pc->k) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JSET|BPF_K: + pc += (A & pc->k) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JGT|BPF_X: + pc += (A > X) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JGE|BPF_X: + pc += (A >= X) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JEQ|BPF_X: + pc += (A == X) ? pc->jt : pc->jf; + continue; + + case BPF_JMP|BPF_JSET|BPF_X: + pc += (A & X) ? pc->jt : pc->jf; + continue; + + case BPF_ALU|BPF_ADD|BPF_X: + A += X; + continue; + + case BPF_ALU|BPF_SUB|BPF_X: + A -= X; + continue; + + case BPF_ALU|BPF_MUL|BPF_X: + A *= X; + continue; + + case BPF_ALU|BPF_DIV|BPF_X: + if (X == 0) + return 0; + A /= X; + continue; + + case BPF_ALU|BPF_AND|BPF_X: + A &= X; + continue; + + case BPF_ALU|BPF_OR|BPF_X: + A |= X; + continue; + + case BPF_ALU|BPF_LSH|BPF_X: + A <<= X; + continue; + + case BPF_ALU|BPF_RSH|BPF_X: + A >>= X; + continue; + + case BPF_ALU|BPF_ADD|BPF_K: + A += pc->k; + continue; + + case BPF_ALU|BPF_SUB|BPF_K: + A -= pc->k; + continue; + + case BPF_ALU|BPF_MUL|BPF_K: + A *= pc->k; + continue; + + case BPF_ALU|BPF_DIV|BPF_K: + A /= pc->k; + continue; + + case BPF_ALU|BPF_AND|BPF_K: + A &= pc->k; + continue; + + case BPF_ALU|BPF_OR|BPF_K: + A |= pc->k; + continue; + + case BPF_ALU|BPF_LSH|BPF_K: + A <<= pc->k; + continue; + + case BPF_ALU|BPF_RSH|BPF_K: + A >>= pc->k; + continue; + + case BPF_ALU|BPF_NEG: + A = -A; + continue; + + case BPF_MISC|BPF_TAX: + X = A; + continue; + + case BPF_MISC|BPF_TXA: + A = X; + continue; + } + } + + return 0; +} + +/* + * Return 1 if the 'f' is a valid filter program without a MATCH + * instruction. Return 2 if it is a valid filter program with a MATCH + * instruction. Otherwise, return 0. + * The constraints are that each jump be forward and to a valid + * code. The code must terminate with either an accept or reject. + * 'valid' is an array for use by the routine (it must be at least + * 'len' bytes long). + * + * The kernel needs to be able to verify an application's filter code. + * Otherwise, a bogus program could easily crash the system. + */ +int +bpf_validate( + bpf_insn_t f, + int bytes, + bpf_insn_t *match) +{ + int i, j, len; + bpf_insn_t p; + + len = BPF_BYTES2LEN(bytes); + + /* + * f[0].code is already checked to be (NETF_BPF | flags). + * So skip f[0]. + */ + + for (i = 1; i < len; ++i) { + /* + * Check that that jumps are forward, and within + * the code block. + */ + p = &f[i]; + if (BPF_CLASS(p->code) == BPF_JMP) { + int from = i + 1; + + if (BPF_OP(p->code) == BPF_JA) { + if (from + p->k >= len) + return 0; + } + else if (from + p->jt >= len || from + p->jf >= len) + return 0; + } + /* + * Check that memory operations use valid addresses. + */ + if ((BPF_CLASS(p->code) == BPF_ST || + (BPF_CLASS(p->code) == BPF_LD && + (p->code & 0xe0) == BPF_MEM)) && + (p->k >= BPF_MEMWORDS || p->k < 0)) + return 0; + /* + * Check for constant division by 0. + */ + if (p->code == (BPF_ALU|BPF_DIV|BPF_K) && p->k == 0) + return 0; + /* + * Check for match instruction. + * Only one match instruction per filter is allowed. + */ + if (p->code == (BPF_RET|BPF_MATCH_IMM)) { + if (*match != 0 || + p->jt == 0 || + p->jt > N_NET_HASH_KEYS) + return 0; + i += p->jt; /* skip keys */ + if (i + 1 > len) + return 0; + + for (j = 1; j <= p->jt; j++) { + if (p[j].code != (BPF_MISC|BPF_KEY)) + return 0; + } + + *match = p; + } + } + if (BPF_CLASS(f[len - 1].code) == BPF_RET) + return ((*match == 0) ? 1 : 2); + else + return 0; +} + +int +bpf_eq( + bpf_insn_t f1, + bpf_insn_t f2, + int bytes) +{ + int count; + + count = BPF_BYTES2LEN(bytes); + for (; count--; f1++, f2++) { + if (!BPF_INSN_EQ(f1, f2)) { + if ( f1->code == (BPF_MISC|BPF_KEY) && + f2->code == (BPF_MISC|BPF_KEY) ) + continue; + return FALSE; + } + }; + return TRUE; +} + +unsigned int +bpf_hash (int n, + const unsigned int *keys) +{ + unsigned int hval = 0; + + while (n--) { + hval += *keys++; + } + return (hval % NET_HASH_SIZE); +} + + +int +bpf_match (net_hash_header_t hash, + int n_keys, + const unsigned int *keys, + net_hash_entry_t **hash_headpp, + net_hash_entry_t *entpp) +{ + net_hash_entry_t head, entp; + int i; + + if (n_keys != hash->n_keys) + return FALSE; + + *hash_headpp = &hash->table[bpf_hash(n_keys, keys)]; + head = **hash_headpp; + + if (head == 0) + return FALSE; + + HASH_ITERATE (head, entp) + { + for (i = 0; i < n_keys; i++) { + if (keys[i] != entp->keys[i]) + break; + } + if (i == n_keys) { + *entpp = entp; + return TRUE; + } + } + HASH_ITERATE_END (head, entp) + return FALSE; +} + + +/* + * Removes a hash entry (ENTP) from its queue (HEAD). + * If the reference count of filter (HP) becomes zero and not USED, + * HP is removed from the corresponding port lists and is freed. + */ + +int +hash_ent_remove( + struct ifnet *ifp, + net_hash_header_t hp, + int used, + net_hash_entry_t *head, + net_hash_entry_t entp, + queue_entry_t *dead_p) +{ + hp->ref_count--; + + if (*head == entp) { + if (queue_empty((queue_t) entp)) { + *head = 0; + ENQUEUE_DEAD(*dead_p, entp, chain); + if (hp->ref_count == 0 && !used) { + if (((net_rcv_port_t)hp)->filter[0] & NETF_IN) + queue_remove(&ifp->if_rcv_port_list, + (net_rcv_port_t)hp, + net_rcv_port_t, input); + if (((net_rcv_port_t)hp)->filter[0] & NETF_OUT) + queue_remove(&ifp->if_snd_port_list, + (net_rcv_port_t)hp, + net_rcv_port_t, output); + hp->n_keys = 0; + return TRUE; + } + return FALSE; + } else { + *head = (net_hash_entry_t)queue_next((queue_t) entp); + } + } + + remqueue((queue_t)*head, (queue_entry_t)entp); + ENQUEUE_DEAD(*dead_p, entp, chain); + return FALSE; +} + +int +net_add_q_info(ipc_port_t rcv_port) +{ + mach_port_msgcount_t qlimit = 0; + + /* + * We use a new port, so increase net_queue_free_min + * and net_kmsg_max to allow for more queued messages. + */ + + if (IP_VALID(rcv_port)) { + ip_lock(rcv_port); + if (ip_active(rcv_port)) + qlimit = rcv_port->ip_qlimit; + ip_unlock(rcv_port); + } + + simple_lock(&net_kmsg_total_lock); + net_queue_free_min++; + net_kmsg_max += qlimit + 1; + simple_unlock(&net_kmsg_total_lock); + + return (int)qlimit; +} + +static void +net_del_q_info(int qlimit) +{ + simple_lock(&net_kmsg_total_lock); + net_queue_free_min--; + net_kmsg_max -= qlimit + 1; + simple_unlock(&net_kmsg_total_lock); +} + + +/* + * net_free_dead_infp (dead_infp) + * queue_entry_t dead_infp; list of dead net_rcv_port_t. + * + * Deallocates dead net_rcv_port_t. + * No locks should be held when called. + */ +void +net_free_dead_infp(queue_entry_t dead_infp) +{ + net_rcv_port_t infp, nextfp; + + for (infp = (net_rcv_port_t) dead_infp; infp != 0; infp = nextfp) + { + nextfp = (net_rcv_port_t) queue_next(&infp->input); + ipc_port_release_send(infp->rcv_port); + net_del_q_info(infp->rcv_qlimit); + kmem_cache_free(&net_rcv_cache, (vm_offset_t) infp); + } +} + +/* + * net_free_dead_entp (dead_entp) + * queue_entry_t dead_entp; list of dead net_hash_entry_t. + * + * Deallocates dead net_hash_entry_t. + * No locks should be held when called. + */ +void +net_free_dead_entp(queue_entry_t dead_entp) +{ + net_hash_entry_t entp, nextentp; + + for (entp = (net_hash_entry_t)dead_entp; entp != 0; entp = nextentp) + { + nextentp = (net_hash_entry_t) queue_next(&entp->chain); + + ipc_port_release_send(entp->rcv_port); + net_del_q_info(entp->rcv_qlimit); + kmem_cache_free(&net_hash_entry_cache, (vm_offset_t) entp); + } +} + diff --git a/device/net_io.h b/device/net_io.h new file mode 100644 index 0000000..c9af85e --- /dev/null +++ b/device/net_io.h @@ -0,0 +1,164 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: ll/89 + */ + +#ifndef _DEVICE_NET_IO_H_ +#define _DEVICE_NET_IO_H_ + +/* + * Utilities for playing with network messages. + */ + +#include <mach/machine/vm_types.h> +#include <ipc/ipc_kmsg.h> + +#include <kern/macros.h> +#include <kern/lock.h> +#include <kern/kalloc.h> + +#include <device/if_hdr.h> +#include <device/io_req.h> +#include <device/net_status.h> + +struct net_rcv_port; +typedef struct net_rcv_port *net_rcv_port_t; + +struct net_hash_entry; +typedef struct net_hash_entry *net_hash_entry_t; + +struct net_hash_header; +typedef struct net_hash_header *net_hash_header_t; + +/* + * A network packet is wrapped in a kernel message while in + * the kernel. + */ + +#define net_kmsg(kmsg) ((net_rcv_msg_t)&(kmsg)->ikm_header) + +/* + * Interrupt routines may allocate and free net_kmsgs with these + * functions. net_kmsg_get may return IKM_NULL. + */ + +extern ipc_kmsg_t net_kmsg_get(void); +extern void net_kmsg_put(ipc_kmsg_t); + +/* + * Network utility routines. + */ + +extern void net_ast(void); +extern void net_packet(struct ifnet *, ipc_kmsg_t, unsigned int, boolean_t); +extern void net_filter(ipc_kmsg_t, ipc_kmsg_queue_t); +extern io_return_t net_getstat(struct ifnet *, dev_flavor_t, dev_status_t, + mach_msg_type_number_t *); + +typedef int (*net_write_start_device_fn)(short); +extern io_return_t net_write(struct ifnet *, net_write_start_device_fn, io_req_t); + +/* + * Non-interrupt code may allocate and free net_kmsgs with these functions. + */ + +extern vm_size_t net_kmsg_size; + +extern void net_kmsg_collect (void); + +extern void net_io_init(void); +extern void net_thread(void) __attribute__ ((noreturn)); + +#define net_kmsg_alloc() ((ipc_kmsg_t) kalloc(net_kmsg_size)) +#define net_kmsg_free(kmsg) kfree((vm_offset_t) (kmsg), net_kmsg_size) + +extern unsigned int ntohl(unsigned int); +extern unsigned short int ntohs(unsigned short int); +extern unsigned int htonl(unsigned int); +extern unsigned short int htons(unsigned short int); + +unsigned int bpf_hash(int n, const unsigned int *keys); + +extern boolean_t +net_do_filter( + net_rcv_port_t infp, + const char * data, + unsigned int data_count, + const char * header); /* CSPF */ + +io_return_t +net_set_filter( + struct ifnet *ifp, + ipc_port_t rcv_port, + int priority, + filter_t *filter, + unsigned int filter_count); + +extern int +bpf_do_filter( + net_rcv_port_t infp, + char * p, + unsigned int wirelen, + char * header, + unsigned int hlen, + net_hash_entry_t **hash_headpp, + net_hash_entry_t *entpp); /* BPF */ + +int hash_ent_remove( + struct ifnet *ifp, + net_hash_header_t hp, + int used, + net_hash_entry_t *head, + net_hash_entry_t entp, + queue_entry_t *dead_p); + +void net_free_dead_infp(queue_entry_t dead_infp); +void net_free_dead_entp (queue_entry_t dead_entp); + +int bpf_validate( + bpf_insn_t f, + int bytes, + bpf_insn_t *match); + +int bpf_eq( + bpf_insn_t f1, + bpf_insn_t f2, + int bytes); + +int net_add_q_info(ipc_port_t rcv_port); + +int bpf_match ( + net_hash_header_t hash, + int n_keys, + const unsigned int *keys, + net_hash_entry_t **hash_headpp, + net_hash_entry_t *entpp); + +boolean_t ethernet_priority(const ipc_kmsg_t kmsg); + +#endif /* _DEVICE_NET_IO_H_ */ diff --git a/device/param.h b/device/param.h new file mode 100644 index 0000000..41b4793 --- /dev/null +++ b/device/param.h @@ -0,0 +1,49 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + */ + +#ifndef _DEVICE_PARAM_H_ +#define _DEVICE_PARAM_H_ + +/* + * Compatibility definitions for disk IO. + */ + +/* + * Disk devices do all IO in 512-byte blocks. + */ +#define DEV_BSIZE 512 + +/* + * Conversion between bytes and disk blocks. + */ +#define btodb(byte_offset) ((byte_offset) >> 9) +#define dbtob(block_number) ((block_number) << 9) + +#endif /* _DEVICE_PARAM_H_ */ diff --git a/device/subrs.c b/device/subrs.c new file mode 100644 index 0000000..6e90a81 --- /dev/null +++ b/device/subrs.c @@ -0,0 +1,86 @@ +/* + * Mach Operating System + * Copyright (c) 1993,1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Random device subroutines and stubs. + */ + +#include <kern/debug.h> +#include <kern/printf.h> +#include <vm/vm_kern.h> +#include <vm/vm_user.h> +#include <device/buf.h> +#include <device/if_hdr.h> +#include <device/if_ether.h> +#include <device/subrs.h> + + + +/* + * Convert Ethernet address to printable (loggable) representation. + */ +char * +ether_sprintf(const u_char *ap) +{ + int i; + static char etherbuf[18]; + char *cp = etherbuf; + static char digits[] = "0123456789abcdef"; + + for (i = 0; i < 6; i++) { + *cp++ = digits[*ap >> 4]; + *cp++ = digits[*ap++ & 0xf]; + *cp++ = ':'; + } + *--cp = 0; + return (etherbuf); +} + +/* + * Initialize send and receive queues on an interface. + */ +void if_init_queues(struct ifnet *ifp) +{ + IFQ_INIT(&ifp->if_snd); + queue_init(&ifp->if_rcv_port_list); + queue_init(&ifp->if_snd_port_list); + simple_lock_init(&ifp->if_rcv_port_list_lock); + simple_lock_init(&ifp->if_snd_port_list_lock); +} + + +/* + * Compatibility with BSD device drivers. + */ +void sleep(vm_offset_t channel, int priority) +{ + assert_wait((event_t) channel, FALSE); /* not interruptible XXX */ + thread_block((void (*)()) 0); +} + +void wakeup(vm_offset_t channel) +{ + thread_wakeup((event_t) channel); +} diff --git a/device/subrs.h b/device/subrs.h new file mode 100644 index 0000000..60ea651 --- /dev/null +++ b/device/subrs.h @@ -0,0 +1,37 @@ +/* + * Random device functions + * Copyright (C) 2008 Free Software Foundation, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Barry deFreese. + */ +/* + * Random device functions. + * + */ + +#ifndef _SUBRS_H_ +#define _SUBRS_H_ + +#include <mach/std_types.h> +#include <device/if_hdr.h> + +extern void if_init_queues(struct ifnet *ifp); + +extern void sleep (vm_offset_t channel, int priority); +extern void wakeup (vm_offset_t channel); + +#endif /* _SUBRS_H_ */ diff --git a/device/tty.h b/device/tty.h new file mode 100644 index 0000000..3f8b2f6 --- /dev/null +++ b/device/tty.h @@ -0,0 +1,237 @@ +/* + * Mach Operating System + * Copyright (c) 1993-1990 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 7/90 + * + * Compatibility TTY structure for existing TTY device drivers. + */ + +#ifndef _DEVICE_TTY_H_ +#define _DEVICE_TTY_H_ + +#include <kern/lock.h> +#include <kern/queue.h> +#include <mach/port.h> + +#include <device/device_types.h> +#include <device/tty_status.h> +#include <device/cirbuf.h> +#include <device/io_req.h> + +struct tty { + decl_simple_lock_irq_data(,t_lock) /* Shall be taken at spltty only */ + struct cirbuf t_inq; /* input buffer */ + struct cirbuf t_outq; /* output buffer */ + char * t_addr; /* device pointer */ + int t_dev; /* device number */ + void (*t_start)(struct tty *); + /* routine to start output */ +#define t_oproc t_start + void (*t_stop)(struct tty *, int); + /* routine to stop output */ + int (*t_mctl)(struct tty *, int, int); + /* (optional) routine to control + modem signals */ + unsigned char t_ispeed; /* input speed */ + unsigned char t_ospeed; /* output speed */ + char t_breakc; /* character to deliver when 'break' + condition received */ + int t_flags; /* mode flags */ + int t_state; /* current state */ + int t_line; /* fake line discipline number, + for old drivers - always 0 */ + queue_head_t t_delayed_read; /* pending read requests */ + queue_head_t t_delayed_write;/* pending write requests */ + queue_head_t t_delayed_open; /* pending open requests */ + +/* + * Items beyond this point should be removed to device-specific + * extension structures. + */ + io_return_t (*t_getstat)(dev_t, dev_flavor_t, dev_status_t, mach_msg_type_number_t *); /* routine to get status */ + io_return_t (*t_setstat)(dev_t, dev_flavor_t, dev_status_t, mach_msg_type_number_t); /* routine to set status */ + dev_ops_t t_tops; /* another device to possibly + push through */ +}; +typedef struct tty *tty_t; + +/* + * Common TTY service routines + */ +extern io_return_t char_open( + int dev, + struct tty * tp, + dev_mode_t mode, + io_req_t ior); + +extern io_return_t char_read( + struct tty * tp, + io_req_t ior); + +extern io_return_t char_write( + struct tty * tp, + io_req_t ior); + +extern void ttyinput( + unsigned int c, + struct tty * tp); + +extern void ttyinput_many( + struct tty * tp, + char * chars, + int count); + +extern boolean_t ttymodem( + struct tty * tp, + boolean_t carrier_up); + +extern void tty_cts( + struct tty * tp, + boolean_t cts_up); + +extern void tty_queue_completion( + queue_t queue); +#define tt_open_wakeup(tp) \ + (tty_queue_completion(&(tp)->t_delayed_open)) +#define tt_write_wakeup(tp) \ + (tty_queue_completion(&(tp)->t_delayed_write)) + +extern void ttychars( + struct tty * tp); + +#define TTMINBUF 90 + +extern short tthiwat[NSPEEDS], ttlowat[NSPEEDS]; +#define TTHIWAT(tp) tthiwat[(tp)->t_ospeed] +#define TTLOWAT(tp) ttlowat[(tp)->t_ospeed] + +extern io_return_t tty_get_status( + struct tty * tp, + dev_flavor_t flavor, + int * data, + natural_t * count); + +extern io_return_t tty_set_status( + struct tty * tp, + dev_flavor_t flavor, + int * data, + natural_t count); + +extern void tty_flush( + struct tty * tp, + int rw); + +extern void ttrstrt( + struct tty * tp); + +extern void ttstart( + struct tty * tp); + +extern void ttyclose( + struct tty * tp); + +extern boolean_t tty_portdeath( + struct tty * tp, + ipc_port_t port); + +/* internal state bits */ +#define TS_INIT 0x00000001 /* tty structure initialized */ +#define TS_TIMEOUT 0x00000002 /* delay timeout in progress */ +#define TS_WOPEN 0x00000004 /* waiting for open to complete */ +#define TS_ISOPEN 0x00000008 /* device is open */ +#define TS_FLUSH 0x00000010 /* outq has been flushed during DMA */ +#define TS_CARR_ON 0x00000020 /* software copy of carrier-present */ +#define TS_BUSY 0x00000040 /* output in progress */ +#define TS_ASLEEP 0x00000080 /* wakeup when output done */ + +#define TS_TTSTOP 0x00000100 /* output stopped by ctl-s */ +#define TS_HUPCLS 0x00000200 /* hang up upon last close */ +#define TS_TBLOCK 0x00000400 /* tandem queue blocked */ + +#define TS_NBIO 0x00001000 /* tty in non-blocking mode */ +#define TS_ONDELAY 0x00002000 /* device is open; software copy of + * carrier is not present */ +#define TS_MIN 0x00004000 /* buffer input chars, if possible */ +#define TS_MIN_TO 0x00008000 /* timeout for the above is active */ + +#define TS_OUT 0x00010000 /* tty in use for dialout only */ +#define TS_RTS_DOWN 0x00020000 /* modem pls stop */ + +#define TS_TRANSLATE 0x00100000 /* translation device enabled */ +#define TS_KDB 0x00200000 /* should enter kdb on ALT */ + +#define TS_MIN_TO_RCV 0x00400000 /* character received during + receive timeout interval */ + +/* flags - old names defined in terms of new ones */ + +#define TANDEM TF_TANDEM +#define ODDP TF_ODDP +#define EVENP TF_EVENP +#define ANYP (ODDP|EVENP) +#define MDMBUF TF_MDMBUF +#define LITOUT TF_LITOUT +#define NOHANG TF_NOHANG + +#define ECHO TF_ECHO +#define CRMOD TF_CRMOD +#define XTABS TF_XTABS + +/* these are here only to let old code compile - they are never set */ +#define RAW LITOUT +#define PASS8 LITOUT + +/* + * Hardware bits. + * SHOULD NOT BE HERE. + */ +#define DONE 0200 +#define IENABLE 0100 + +/* + * Modem control commands. + */ +#define DMSET 0 +#define DMBIS 1 +#define DMBIC 2 +#define DMGET 3 + +/* + * Fake 'line discipline' switch, for the benefit of old code + * that wants to call through it. + */ +struct ldisc_switch { + int (*l_read) (struct tty *, io_req_t); /* read */ + int (*l_write)(struct tty *, io_req_t); /* write */ + void (*l_rint) (unsigned int, struct tty *); /* character input */ + boolean_t (*l_modem)(struct tty *, boolean_t); /* modem change */ + void (*l_start)(struct tty *); /* start output */ +}; + +extern struct ldisc_switch linesw[]; + +#endif /* _DEVICE_TTY_H_ */ |