[OPTIM] reduce the number of calls to task_wakeup()

A test has shown that more than 16% of the calls to task_wakeup()
could be avoided because the task is already woken up. So make it
inline and move the test to the inline part.
diff --git a/include/proto/task.h b/include/proto/task.h
index 10f6b42..fbb9ee8 100644
--- a/include/proto/task.h
+++ b/include/proto/task.h
@@ -41,7 +41,13 @@
 int init_task();
 
 /* puts the task <t> in run queue <q>, and returns <t> */
-struct task *task_wakeup(struct task *t);
+struct task *__task_wakeup(struct task *t);
+static inline struct task *task_wakeup(struct task *t)
+{
+	if (t->state == TASK_RUNNING)
+		return t;
+	return __task_wakeup(t);
+}
 
 /* removes the task <t> from the run queue if it was in it.
  * returns <t>.
diff --git a/src/task.c b/src/task.c
index 15539c2..5182b81 100644
--- a/src/task.c
+++ b/src/task.c
@@ -131,11 +131,8 @@
  * size. A nice value of -1024 sets the task to -run_queue*32, while a nice
  * value of 1024 sets the task to run_queue*32.
  */
-struct task *task_wakeup(struct task *t)
+struct task *__task_wakeup(struct task *t)
 {
-	if (t->state == TASK_RUNNING)
-		return t;
-
 	task_dequeue(t);
 
 	run_queue++;
@@ -231,7 +228,7 @@
 
 			/* detach the task from the queue and add the task to the run queue */
 			eb = eb32_next(eb);
-			task_wakeup(task);
+			__task_wakeup(task);
 		}
 		tree = (tree + 1) & TIMER_TREE_MASK;
 	} while (((tree - now_tree) & TIMER_TREE_MASK) < TIMER_TREES/2);