PERFORCE change 36979 for review

Sam Leffler sam at FreeBSD.org
Tue Aug 26 13:48:20 PDT 2003


http://perforce.freebsd.org/chv.cgi?CH=36979

Change 36979 by sam at sam_ebb on 2003/08/26 13:47:31

	fast swi taskqueue support; for queueing swi's from a fast isr

Affected files ...

.. //depot/projects/netperf/sys/kern/subr_taskqueue.c#2 edit
.. //depot/projects/netperf/sys/sys/interrupt.h#2 edit
.. //depot/projects/netperf/sys/sys/taskqueue.h#2 edit

Differences ...

==== //depot/projects/netperf/sys/kern/subr_taskqueue.c#2 (text+ko) ====

@@ -240,3 +240,121 @@
 TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, 0,
 		 swi_add(NULL, "Giant task queue", taskqueue_swi_giant_run,
 		     NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih)); 
+
+int
+taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
+{
+	struct task *ins;
+	struct task *prev;
+
+	mtx_lock_spin(&queue->tq_mutex);
+
+	/*
+	 * Don't allow new tasks on a queue which is being freed.
+	 */
+	if (queue->tq_draining) {
+		mtx_unlock_spin(&queue->tq_mutex);
+		return EPIPE;
+	}
+
+	/*
+	 * Count multiple enqueues.
+	 */
+	if (task->ta_pending) {
+		task->ta_pending++;
+		mtx_unlock_spin(&queue->tq_mutex);
+		return 0;
+	}
+
+	/*
+	 * Optimise the case when all tasks have the same priority.
+	 */
+	prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
+	if (!prev || prev->ta_priority >= task->ta_priority) {
+		STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
+	} else {
+		prev = 0;
+		for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
+		     prev = ins, ins = STAILQ_NEXT(ins, ta_link))
+			if (ins->ta_priority < task->ta_priority)
+				break;
+
+		if (prev)
+			STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
+		else
+			STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
+	}
+
+	task->ta_pending = 1;
+	if (queue->tq_enqueue)
+		queue->tq_enqueue(queue->tq_context);
+
+	mtx_unlock_spin(&queue->tq_mutex);
+
+	return 0;
+}
+
+static void
+taskqueue_run_fast(struct taskqueue *queue)
+{
+	struct task *task;
+	int pending;
+
+	mtx_lock_spin(&queue->tq_mutex);
+	while (STAILQ_FIRST(&queue->tq_queue)) {
+		/*
+		 * Carefully remove the first task from the queue and
+		 * zero its pending count.
+		 */
+		task = STAILQ_FIRST(&queue->tq_queue);
+		STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
+		pending = task->ta_pending;
+		task->ta_pending = 0;
+		mtx_unlock_spin(&queue->tq_mutex);
+
+		task->ta_func(task->ta_context, pending);
+
+		mtx_lock_spin(&queue->tq_mutex);
+	}
+	mtx_unlock_spin(&queue->tq_mutex);
+}
+
+struct taskqueue *taskqueue_fast;
+static void	*taskqueue_fast_ih;
+
+static void
+taskqueue_fast_schedule(void *context)
+{
+	swi_sched(taskqueue_fast_ih, 0);
+}
+
+static void
+taskqueue_fast_run(void *dummy)
+{
+	taskqueue_run_fast(taskqueue_fast);
+}
+
+static void
+taskqueue_define_fast(void *arg)
+{
+	taskqueue_fast = malloc(sizeof(struct taskqueue),
+		M_TASKQUEUE, M_NOWAIT | M_ZERO);
+	if (!taskqueue_fast) {
+		printf("%s: Unable to allocate fast task queue!\n", __func__);
+		return;
+	}
+
+	STAILQ_INIT(&taskqueue_fast->tq_queue);
+	taskqueue_fast->tq_name = "fast";
+	taskqueue_fast->tq_enqueue = taskqueue_fast_schedule;
+	mtx_init(&taskqueue_fast->tq_mutex, "taskqueue", NULL, MTX_SPIN);
+
+	mtx_lock(&taskqueue_queues_mutex);
+	STAILQ_INSERT_TAIL(&taskqueue_queues, taskqueue_fast, tq_link);
+	mtx_unlock(&taskqueue_queues_mutex);
+
+	swi_add(NULL, "Fast task queue", taskqueue_fast_run,
+		NULL, SWI_TQ_FAST, 0, &taskqueue_fast_ih);
+}
+SYSINIT(taskqueue_fast, SI_SUB_CONFIGURE, SI_ORDER_SECOND,
+	taskqueue_define_fast, NULL);

==== //depot/projects/netperf/sys/sys/interrupt.h#2 (text+ko) ====

@@ -94,9 +94,10 @@
 #define	SWI_CAMNET	2
 #define	SWI_CAMBIO	3
 #define	SWI_VM		4
-#define	SWI_TQ_GIANT	5
-#define	SWI_TQ		6
-#define	SWI_CLOCK	7
+#define	SWI_TQ_FAST	5
+#define	SWI_TQ_GIANT	6
+#define	SWI_TQ		7
+#define	SWI_CLOCK	8
 
 extern struct	ithd *tty_ithd;
 extern struct	ithd *clk_ithd;

==== //depot/projects/netperf/sys/sys/taskqueue.h#2 (text+ko) ====

@@ -113,4 +113,13 @@
 TASKQUEUE_DECLARE(swi_giant);
 TASKQUEUE_DECLARE(swi);
 
+/*
+ * Queue for swi handlers dispatched from fast interrupt handlers.
+ * These are necessarily different from the above because the queue
+ * must be locked with spinlocks since sleep mutex's cannot be used
+ * from a fast interrupt handler context.
+ */
+TASKQUEUE_DECLARE(fast);
+int	taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task);
+
 #endif /* !_SYS_TASKQUEUE_H_ */


More information about the p4-projects mailing list