PERFORCE change 135342 for review
Maxim Zhuravlev
thioretic at FreeBSD.org
Wed Feb 13 12:25:25 PST 2008
http://perforce.freebsd.org/chv.cgi?CH=135342
Change 135342 by thioretic at thioretic on 2008/02/13 20:25:23
Basic NewBus IO subsystem API is outlined by now.
Plus(?) some additional helper functions to be implemented.
Affected files ...
.. //depot/projects/soc2007/thioretic_gidl2/kern/subr_busio.c#10 edit
.. //depot/projects/soc2007/thioretic_gidl2/sys/bus.h#10 edit
Differences ...
==== //depot/projects/soc2007/thioretic_gidl2/kern/subr_busio.c#10 (text+ko) ====
@@ -34,14 +34,14 @@
struct ior {
u_int32_t type; //see sys/ior_types.h
void* data;
-#define IORS_NONE 0
-#define IORS_ENQUEUED 1<<0
-#define IORS_OWNED 1<<1
+#define IORS_NONE (0)
+#define IORS_ENQUEUED (1<<1)
+#define IORS_OWNED (1<<2)
+#define IORS_PAUSED (1<<3)
u_int32_t state;
-#define IORF_DONE 1<<0
-#define IORF_INVALIDATE 1<<1
-#define IORF_RETRY 1<<2
-#define IORF_TODESTROY 1<<3
+#define IORF_DONE (1<<0)
+#define IORF_INVALIDATE (1<<1)
+#define IORF_RETRY (1<<2)
u_int32_t flags;
ior_link_list_t parents;
int children;
@@ -72,6 +72,7 @@
struct mtx guard_mtx;
};
+#define IOR_QUEUES_NUM 1
static ior_queue ior_queues[IOR_QUEUES_NUM];
static struct mtx iors_list_mtx;
@@ -215,7 +216,7 @@
if (qid == IOR_QUEUES_NUM)
return (1);
- if (r->state != IORS_ENQUEUED){
+ if (r->state & IORS_OWNED){
mtx_unlock (&q.guard_mtx);
ior_unlock (r);
goto retry;
@@ -224,7 +225,9 @@
q.todo = TAILQ_NEXT(r, link);
mtx_unlock (&q.guard_mtx);
- r->state = IORS_OWNED;
+ ior_dequeue (r);
+ r->state |= IORS_OWNED;
+
ior_unlock (r);
if (ior_get_flags(r) >= IORF_DONE)
@@ -238,7 +241,7 @@
ior_t
ior_create (device_t origin, int type, void* data,
ior_t* parents, int pcount, char* path,
- int enqueue, int queue_id){
+ /*int enqueue,*/ int prio){
ior_t new_ior;
ior_link_t pil, pils;
int i, error = 0;
@@ -274,10 +277,10 @@
new_ior->children = 0;
new_ior->origin = new_ior->curdev = origin;
+ new_ior->queue_id = -1;
- if (enqueue){
- ior_enqueue_adv (new_ior, queue_id);
- }
+ /*if (enqueue)*/
+ ior_enqueue_adv (new_ior, queue_id);
return (new_ior);
@@ -295,7 +298,7 @@
TAILQ_REMOVE (&r->parents, il, link);
}
- mtx_destroy (r->guard_spin_mtx);
+ mtx_destroy (r->guard_mtx);
free (r);
}
@@ -306,8 +309,6 @@
if (r->children)
return (0);
- ior_add_flags (r, IORF_TODESTROY);
-
ior_lock (r);
while (dev = TAILQ_NEXT(r->curdev)){
@@ -320,7 +321,7 @@
}
int
-ior_abort (ior_t r, u_int32_t flags){
+ior_complete (ior_t r, u_int32_t flags){
if (ior_destroy (r)){
ior_add_flags (r, flags);
return (1);
@@ -328,22 +329,12 @@
return (0);
}
-static void
-ior_set_state (ior_t r, u_int32_t val){
- r->state = val;
-}
-
-static u_int32_t
-ior_get_state (ior_t r){
- return (r->state);
-}
-
void
ior_set_flags (ior_t r, u_int32_t val){
r->flags = val;
}
void
-ior_add_flag (ior_t r, u_int32_t val){
+ior_add_flags (ior_t r, u_int32_t val){
r->flags |= val;
}
@@ -352,7 +343,7 @@
return (r->flags);
}
-int
+static int
ior_set_path (ior_t r, device_t origin, char* path){
int i = 0, error = 0, path_len;
device_t *dev_path = NULL;
@@ -416,13 +407,13 @@
return (0);
}
-void
+static void
ior_enqueue_adv (ior_t r, int queue_id){
ior_queue q = ior_queues[queue_id];
ior_lock (r);
- if (r->state >= IORS_ENQUEUED){
+ if (r->state & IORS_ENQUEUED){
ior_unlock (r);
return ();
}
@@ -435,29 +426,29 @@
mtx_unlock (&q.guard_mtx);
- r->queue_id = queue_id + 1;
- r->state = IORS_ENQUEUED;
+ r->queue_id = queue_id;
+ r->state |= IORS_ENQUEUED;
ior_unlock (r);
wakeup_one (&work_kthreads_to_wait_on);
}
-void
+static void
ior_enqueue (ior_t r) {
- return (ior_enqueue_adv (r, IOR_QUEUE_DEF));
+ return (ior_enqueue_adv (r, r->queue_id != -1 ? r->queue_id : IOR_PRIO_DEF));
}
static int
-ior_dequeue_adv (ior_t r, int queue_id){
+ior_dequeue (ior_t r){
int error = 0;
- ior_queue q = ior_queues[queue_id];
+ ior_queue q = ior_queues[r->queue_id];
ior_lock (r);
- if (r->state != IORS_ENQUEUED || r->children){
- ior_unlock (r);
- return (1);
+ if (!(r->state & IORS_ENQUEUED)){
+ ior_unlock (r);
+ return (1);
}
mtx_lock (&q.guard_mtx);
@@ -468,18 +459,13 @@
mtx_unlock (&q.quard_mtx);
- r->state = IORS_NONE;
+ r->state ^= IORS_ENQUEUED;
ior_unlock (r);
return (error);
}
-int
-ior_dequeue (ior_t r) {
- return (ior_dequeue_adv (r, r->queue_id));
-}
-
static void
ior_lock (ior_t r){
mtx_lock (&r->guard_mtx);
@@ -501,10 +487,15 @@
}
r->curdev = nextdev;
- IOR_DO (r->curdev->device_ptr, r);
+ IOR_DO (r->curdev->device_ptr, r); /*can change r->state to IORS_PAUSED|IORS_OWNED;*/
- if (ior_get_state(r) == IORS_NONE)
+ ior_lock (r);
+ if (r->state & IORS_PAUSED){
+ r->state ^= IORS_OWNED;
+ ior_unlock (r);
return();
+ }
+ ior_unlock (r);
if ((nextdev = TAILQ_NEXT(r->curdev, link)) &&
(device_get_flags(nextdev->device_ptr) & DF_ENQUEUEIORS))
@@ -527,14 +518,39 @@
r->curdev = nextdev;
IOR_DONE (r->curdev->device_ptr, r);
- if (ior_get_flags(r) & IORF_TODESTOY){
- ior_destroy_async (r);
- return;
+ ior_lock (r);
+ if (r->state & IORS_PAUSED){
+ r->state ^= IORS_OWNED;
+ ior_unlock (r);
+ return();
}
-
+ ior_unlock (r);
+
if ((nextdev = TAILQ_PREV(r->curdev, devicelink_list, link)) &&
(device_get_flags(nextdev->device_ptr) & DF_ENQUEUEIORS))
break;
}
ior_enqueue_adv (r, r->queue_id);
}
+
+void
+ior_pause (ior_t r){
+ r->state |= IORS_PAUSED;
+}
+
+void
+ior_resume (ior_t r){
+ ior_lock (r);
+ if (r->state & IORS_OWNED){
+ r->state ^= IORS_PAUSED;
+ ior_unlock (r);
+ } else {
+ ior_unlock (r);
+ ior_enqueue(r);
+ }
+}
+
+void
+ior_set_priority (ior_t r, int prio){
+ r->queue_id = prio;
+}
==== //depot/projects/soc2007/thioretic_gidl2/sys/bus.h#10 (text+ko) ====
@@ -713,7 +713,7 @@
bus_space_write_region_stream_8(rman_get_bustag(r), rman_get_bushandle(r), (o), (d), (c))
/**
- * event support functions
+ * EVENTS SUPPORT API
*/
#define EV_ATTACH 0
#define EV_DETACH 1
@@ -727,11 +727,11 @@
#define REL_CHILD (1<<29)
#define REL_PARENT (1<<30)
#define EVP_ENQALWAYS (1<<31)
-/**
- * event_pref array is an array of event settings.
- * includes:
- * what devices should receive the event (children and/or parents)
- * what enqueued events should be dequeued
+/*
+ event_pref array is an array of event settings.
+ includes:
+ what devices should receive the event (children and/or parents)
+ what enqueued events should be dequeued
*/
uint32_t event_pref[32] =
@@ -743,24 +743,40 @@
REL_CHILD | REL_PARENT, /* EV_SHUTDOWN */ /* TODO */
};
-#define IOR_QUEUES_NUM 1
-#define IOR_QUEUE_DEF 0
+
+/**
+ * BUS IO SUBSYSTEM API
+ */
struct ior;
typedef struct ior* ior_t;
+/*
+ ior_create() and ior_destroy() should be used by origin device
+ */
ior_t ior_create (device_t origin, int type, void* data,
- ior_t *parents, int pcount, char* path,
- int enqueue, int queue_id);
+ ior_t *parents, int pcount, char* path, int prio);
int ior_destroy (ior_t r);
void ior_set_flags (ior_t r, u_int32_t val);
void ior_add_flags (ior_t r, u_int32_t val);
u_int32_t ior_get_flags (ior_t r);
-int ior_set_path (ior_t r, device_t origin, char* path);
-int ior_get_path (ior_t r, device_t** dev_path, int* path_len);
-void ior_enqueue_adv (ior_t r, int queue_id);
-void ior_enqueue (ior_t r);
-int ior_dequeue (ior_t r);
+int ior_get_path (ior_t r, device_t** dev_path, int* path_len);
+/*
+ ior_pause() and ior_resume() should be used to revoke an ior
+ from bus io subsystem for delayed processing so that we don't need
+ to make a working thread to sleep.
+ ior_pause() should be called JUST within ior_do() and ior_done()
+ functions
+ */
+void ior_pause (ior_t r);
+void ior_resume (ior_t r);
+/*
+ ior_complete() is to be used by non-endpoint devices to stop an ior processing.
+ The ior flags are set to >= IORF_DONE.
+ */
+int ior_complete (ior_t r, u_int32_t flags);
+void ior_set_priority (ior_t r, int prio);
+#define IOR_PRIO_DEF 0
#endif /* _KERNEL */
More information about the p4-projects
mailing list