/*
* rb-tree defines
*/
-#define RB_NONE (2)
#define RB_EMPTY(node) ((node)->rb_node == NULL)
-#define RB_CLEAR_COLOR(node) (node)->rb_color = RB_NONE
#define RB_CLEAR(node) do { \
- (node)->rb_parent = NULL; \
- RB_CLEAR_COLOR((node)); \
- (node)->rb_right = NULL; \
- (node)->rb_left = NULL; \
+ memset(node, 0, sizeof(*node)); \
} while (0)
#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL)
#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
mempool_t *crq_pool;
int rq_in_driver;
+ int hw_tag;
/*
* schedule slice state info
/*
* if queue was preempted, just add to front to be fair. busy_rr
- * isn't sorted.
+ * isn't sorted, but insert at the back for fairness.
*/
if (preempted || list == &cfqd->busy_rr) {
- list_add(&cfqq->cfq_list, list);
+ if (preempted)
+ list = list->prev;
+
+ list_add_tail(&cfqq->cfq_list, list);
return;
}
cfq_update_next_crq(crq);
rb_erase(&crq->rb_node, &cfqq->sort_list);
- RB_CLEAR_COLOR(&crq->rb_node);
if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list))
cfq_del_cfqq_rr(cfqd, cfqq);
struct cfq_data *cfqd = q->elevator->elevator_data;
cfqd->rq_in_driver++;
+
+ /*
+ * If the depth is larger 1, it really could be queueing. But lets
+ * make the mark a little higher - idling could still be good for
+ * low queueing, and a low queueing number could also just indicate
+ * a SCSI mid layer like behaviour where limit+1 is often seen.
+ */
+ if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
+ cfqd->hw_tag = 1;
}
static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
if (cic) {
- RB_CLEAR(&cic->rb_node);
- cic->key = NULL;
- cic->cfqq[ASYNC] = NULL;
- cic->cfqq[SYNC] = NULL;
+ memset(cic, 0, sizeof(*cic));
cic->last_end_request = jiffies;
- cic->ttime_total = 0;
- cic->ttime_samples = 0;
- cic->ttime_mean = 0;
+ INIT_LIST_HEAD(&cic->queue_list);
cic->dtor = cfq_free_io_context;
cic->exit = cfq_exit_io_context;
- INIT_LIST_HEAD(&cic->queue_list);
atomic_inc(&ioc_count);
}
* set ->slice_left to allow preemption for a new process
*/
cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
- cfq_mark_cfqq_idle_window(cfqq);
+ if (!cfqd->hw_tag)
+ cfq_mark_cfqq_idle_window(cfqq);
cfq_mark_cfqq_prio_changed(cfqq);
cfq_init_prio_data(cfqq);
}
{
int enable_idle = cfq_cfqq_idle_window(cfqq);
- if (!cic->ioc->task || !cfqd->cfq_slice_idle)
+ if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag)
enable_idle = 0;
else if (sample_valid(cic->ttime_samples)) {
if (cic->ttime_mean > cfqd->cfq_slice_idle)
* race with a non-idle queue, reset timer
*/
end = cfqd->last_end_request + CFQ_IDLE_GRACE;
- if (!time_after_eq(jiffies, end)) {
- cfqd->idle_class_timer.expires = end;
- add_timer(&cfqd->idle_class_timer);
- } else
+ if (!time_after_eq(jiffies, end))
+ mod_timer(&cfqd->idle_class_timer, end);
+ else
cfq_schedule_dispatch(cfqd);
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
kfree(cfqd);
}
-static int cfq_init_queue(request_queue_t *q, elevator_t *e)
+static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
{
struct cfq_data *cfqd;
int i;
cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
if (!cfqd)
- return -ENOMEM;
+ return NULL;
memset(cfqd, 0, sizeof(*cfqd));
for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
- e->elevator_data = cfqd;
-
cfqd->queue = q;
cfqd->max_queued = q->nr_requests / 4;
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
cfqd->cfq_slice_idle = cfq_slice_idle;
- return 0;
+ return cfqd;
out_crqpool:
kfree(cfqd->cfq_hash);
out_cfqhash:
kfree(cfqd->crq_hash);
out_crqhash:
kfree(cfqd);
- return -ENOMEM;
+ return NULL;
}
static void cfq_slab_kill(void)