diff --git a/block/as-iosched.c b/block/as-iosched.c
index 8da3cf66894c379b1b4908d94602c49f77cb473e..296708ceceb2d62a8bacd8a954efeb1108d5547e 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -182,6 +182,9 @@ struct as_rq {
 
 static kmem_cache_t *arq_pool;
 
+static atomic_t ioc_count = ATOMIC_INIT(0);
+static struct completion *ioc_gone;
+
 static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq);
 static void as_antic_stop(struct as_data *ad);
 
@@ -193,6 +196,15 @@ static void as_antic_stop(struct as_data *ad);
 static void free_as_io_context(struct as_io_context *aic)
 {
 	kfree(aic);
+	if (atomic_dec_and_test(&ioc_count) && ioc_gone)
+		complete(ioc_gone);
+}
+
+static void as_trim(struct io_context *ioc)
+{
+	if (ioc->aic)
+		free_as_io_context(ioc->aic);
+	ioc->aic = NULL;
 }
 
 /* Called when the task exits */
@@ -220,6 +232,7 @@ static struct as_io_context *alloc_as_io_context(void)
 		ret->seek_total = 0;
 		ret->seek_samples = 0;
 		ret->seek_mean = 0;
+		atomic_inc(&ioc_count);
 	}
 
 	return ret;
@@ -1696,11 +1709,6 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
 /*
  * sysfs parts below
  */
-struct as_fs_entry {
-	struct attribute attr;
-	ssize_t (*show)(struct as_data *, char *);
-	ssize_t (*store)(struct as_data *, const char *, size_t);
-};
 
 static ssize_t
 as_var_show(unsigned int var, char *page)
@@ -1717,8 +1725,9 @@ as_var_store(unsigned long *var, const char *page, size_t count)
 	return count;
 }
 
-static ssize_t as_est_show(struct as_data *ad, char *page)
+static ssize_t est_time_show(elevator_t *e, char *page)
 {
+	struct as_data *ad = e->elevator_data;
 	int pos = 0;
 
 	pos += sprintf(page+pos, "%lu %% exit probability\n",
@@ -1734,21 +1743,23 @@ static ssize_t as_est_show(struct as_data *ad, char *page)
 }
 
 #define SHOW_FUNCTION(__FUNC, __VAR)				\
-static ssize_t __FUNC(struct as_data *ad, char *page)		\
+static ssize_t __FUNC(elevator_t *e, char *page)		\
 {								\
+	struct as_data *ad = e->elevator_data;			\
 	return as_var_show(jiffies_to_msecs((__VAR)), (page));	\
 }
-SHOW_FUNCTION(as_readexpire_show, ad->fifo_expire[REQ_SYNC]);
-SHOW_FUNCTION(as_writeexpire_show, ad->fifo_expire[REQ_ASYNC]);
-SHOW_FUNCTION(as_anticexpire_show, ad->antic_expire);
-SHOW_FUNCTION(as_read_batchexpire_show, ad->batch_expire[REQ_SYNC]);
-SHOW_FUNCTION(as_write_batchexpire_show, ad->batch_expire[REQ_ASYNC]);
+SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[REQ_SYNC]);
+SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[REQ_ASYNC]);
+SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire);
+SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[REQ_SYNC]);
+SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]);
 #undef SHOW_FUNCTION
 
 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX)				\
-static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count)	\
+static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)	\
 {									\
-	int ret = as_var_store(__PTR, (page), count);		\
+	struct as_data *ad = e->elevator_data;				\
+	int ret = as_var_store(__PTR, (page), count);			\
 	if (*(__PTR) < (MIN))						\
 		*(__PTR) = (MIN);					\
 	else if (*(__PTR) > (MAX))					\
@@ -1756,90 +1767,26 @@ static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count)	\
 	*(__PTR) = msecs_to_jiffies(*(__PTR));				\
 	return ret;							\
 }
-STORE_FUNCTION(as_readexpire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX);
-STORE_FUNCTION(as_writeexpire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX);
-STORE_FUNCTION(as_anticexpire_store, &ad->antic_expire, 0, INT_MAX);
-STORE_FUNCTION(as_read_batchexpire_store,
+STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX);
+STORE_FUNCTION(as_write_expire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX);
+STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX);
+STORE_FUNCTION(as_read_batch_expire_store,
 			&ad->batch_expire[REQ_SYNC], 0, INT_MAX);
-STORE_FUNCTION(as_write_batchexpire_store,
+STORE_FUNCTION(as_write_batch_expire_store,
 			&ad->batch_expire[REQ_ASYNC], 0, INT_MAX);
 #undef STORE_FUNCTION
 
-static struct as_fs_entry as_est_entry = {
-	.attr = {.name = "est_time", .mode = S_IRUGO },
-	.show = as_est_show,
-};
-static struct as_fs_entry as_readexpire_entry = {
-	.attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR },
-	.show = as_readexpire_show,
-	.store = as_readexpire_store,
-};
-static struct as_fs_entry as_writeexpire_entry = {
-	.attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR },
-	.show = as_writeexpire_show,
-	.store = as_writeexpire_store,
-};
-static struct as_fs_entry as_anticexpire_entry = {
-	.attr = {.name = "antic_expire", .mode = S_IRUGO | S_IWUSR },
-	.show = as_anticexpire_show,
-	.store = as_anticexpire_store,
-};
-static struct as_fs_entry as_read_batchexpire_entry = {
-	.attr = {.name = "read_batch_expire", .mode = S_IRUGO | S_IWUSR },
-	.show = as_read_batchexpire_show,
-	.store = as_read_batchexpire_store,
-};
-static struct as_fs_entry as_write_batchexpire_entry = {
-	.attr = {.name = "write_batch_expire", .mode = S_IRUGO | S_IWUSR },
-	.show = as_write_batchexpire_show,
-	.store = as_write_batchexpire_store,
-};
-
-static struct attribute *default_attrs[] = {
-	&as_est_entry.attr,
-	&as_readexpire_entry.attr,
-	&as_writeexpire_entry.attr,
-	&as_anticexpire_entry.attr,
-	&as_read_batchexpire_entry.attr,
-	&as_write_batchexpire_entry.attr,
-	NULL,
-};
-
-#define to_as(atr) container_of((atr), struct as_fs_entry, attr)
-
-static ssize_t
-as_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
-{
-	elevator_t *e = container_of(kobj, elevator_t, kobj);
-	struct as_fs_entry *entry = to_as(attr);
-
-	if (!entry->show)
-		return -EIO;
-
-	return entry->show(e->elevator_data, page);
-}
-
-static ssize_t
-as_attr_store(struct kobject *kobj, struct attribute *attr,
-		    const char *page, size_t length)
-{
-	elevator_t *e = container_of(kobj, elevator_t, kobj);
-	struct as_fs_entry *entry = to_as(attr);
-
-	if (!entry->store)
-		return -EIO;
-
-	return entry->store(e->elevator_data, page, length);
-}
-
-static struct sysfs_ops as_sysfs_ops = {
-	.show	= as_attr_show,
-	.store	= as_attr_store,
-};
-
-static struct kobj_type as_ktype = {
-	.sysfs_ops	= &as_sysfs_ops,
-	.default_attrs	= default_attrs,
+#define AS_ATTR(name) \
+	__ATTR(name, S_IRUGO|S_IWUSR, as_##name##_show, as_##name##_store)
+
+static struct elv_fs_entry as_attrs[] = {
+	__ATTR_RO(est_time),
+	AS_ATTR(read_expire),
+	AS_ATTR(write_expire),
+	AS_ATTR(antic_expire),
+	AS_ATTR(read_batch_expire),
+	AS_ATTR(write_batch_expire),
+	__ATTR_NULL
 };
 
 static struct elevator_type iosched_as = {
@@ -1860,9 +1807,10 @@ static struct elevator_type iosched_as = {
 		.elevator_may_queue_fn =	as_may_queue,
 		.elevator_init_fn =		as_init_queue,
 		.elevator_exit_fn =		as_exit_queue,
+		.trim =				as_trim,
 	},
 
-	.elevator_ktype = &as_ktype,
+	.elevator_attrs = as_attrs,
 	.elevator_name = "anticipatory",
 	.elevator_owner = THIS_MODULE,
 };
@@ -1893,7 +1841,13 @@ static int __init as_init(void)
 
 static void __exit as_exit(void)
 {
+	DECLARE_COMPLETION(all_gone);
 	elv_unregister(&iosched_as);
+	ioc_gone = &all_gone;
+	barrier();
+	if (atomic_read(&ioc_count))
+		complete(ioc_gone);
+	synchronize_rcu();
 	kmem_cache_destroy(arq_pool);
 }
 
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index c8dbe38c81c80bf1544901bb7b16ac6cd73337e0..c4a0d5d8d7f0dd4080ae63c7897c8fcdde3b10d7 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -6,21 +6,13 @@
  *
  *  Copyright (C) 2003 Jens Axboe <axboe@suse.de>
  */
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/blkdev.h>
-#include <linux/elevator.h>
-#include <linux/bio.h>
 #include <linux/config.h>
 #include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/compiler.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
 #include <linux/hash.h>
 #include <linux/rbtree.h>
-#include <linux/mempool.h>
 #include <linux/ioprio.h>
-#include <linux/writeback.h>
 
 /*
  * tunables
@@ -47,6 +39,8 @@ static int cfq_slice_idle = HZ / 100;
  */
 static const int cfq_max_depth = 2;
 
+static DEFINE_RWLOCK(cfq_exit_lock);
+
 /*
  * for the hash of cfqq inside the cfqd
  */
@@ -89,6 +83,9 @@ static kmem_cache_t *crq_pool;
 static kmem_cache_t *cfq_pool;
 static kmem_cache_t *cfq_ioc_pool;
 
+static atomic_t ioc_count = ATOMIC_INIT(0);
+static struct completion *ioc_gone;
+
 #define CFQ_PRIO_LISTS		IOPRIO_BE_NR
 #define cfq_class_idle(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
 #define cfq_class_be(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_BE)
@@ -109,7 +106,6 @@ static kmem_cache_t *cfq_ioc_pool;
  * Per block device queue structure
  */
 struct cfq_data {
-	atomic_t ref;
 	request_queue_t *queue;
 
 	/*
@@ -175,6 +171,8 @@ struct cfq_data {
 	unsigned int cfq_slice_async_rq;
 	unsigned int cfq_slice_idle;
 	unsigned int cfq_max_depth;
+
+	struct list_head cic_list;
 };
 
 /*
@@ -288,7 +286,7 @@ CFQ_CRQ_FNS(is_sync);
 
 static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
 static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
-static void cfq_put_cfqd(struct cfq_data *cfqd);
+static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
 
 #define process_sync(tsk)	((tsk)->flags & PF_SYNCWRITE)
 
@@ -1160,8 +1158,6 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
 	if (unlikely(cfqd->active_queue == cfqq))
 		__cfq_slice_expired(cfqd, cfqq, 0);
 
-	cfq_put_cfqd(cfqq->cfqd);
-
 	/*
 	 * it's on the empty list and still hashed
 	 */
@@ -1179,7 +1175,7 @@ __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
 
 	hlist_for_each_safe(entry, next, hash_list) {
 		struct cfq_queue *__cfqq = list_entry_qhash(entry);
-		const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->ioprio_class, __cfqq->ioprio);
+		const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio);
 
 		if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY))
 			return __cfqq;
@@ -1198,13 +1194,24 @@ static void cfq_free_io_context(struct cfq_io_context *cic)
 {
 	struct cfq_io_context *__cic;
 	struct list_head *entry, *next;
+	int freed = 1;
 
 	list_for_each_safe(entry, next, &cic->list) {
 		__cic = list_entry(entry, struct cfq_io_context, list);
 		kmem_cache_free(cfq_ioc_pool, __cic);
+		freed++;
 	}
 
 	kmem_cache_free(cfq_ioc_pool, cic);
+	if (atomic_sub_and_test(freed, &ioc_count) && ioc_gone)
+		complete(ioc_gone);
+}
+
+static void cfq_trim(struct io_context *ioc)
+{
+	ioc->set_ioprio = NULL;
+	if (ioc->cic)
+		cfq_free_io_context(ioc->cic);
 }
 
 /*
@@ -1212,25 +1219,37 @@ static void cfq_free_io_context(struct cfq_io_context *cic)
  */
 static void cfq_exit_single_io_context(struct cfq_io_context *cic)
 {
-	struct cfq_data *cfqd = cic->cfqq->cfqd;
-	request_queue_t *q = cfqd->queue;
+	struct cfq_data *cfqd = cic->key;
+	request_queue_t *q;
+
+	if (!cfqd)
+		return;
+
+	q = cfqd->queue;
 
 	WARN_ON(!irqs_disabled());
 
 	spin_lock(q->queue_lock);
 
-	if (unlikely(cic->cfqq == cfqd->active_queue))
-		__cfq_slice_expired(cfqd, cic->cfqq, 0);
+	if (cic->cfqq[ASYNC]) {
+		if (unlikely(cic->cfqq[ASYNC] == cfqd->active_queue))
+			__cfq_slice_expired(cfqd, cic->cfqq[ASYNC], 0);
+		cfq_put_queue(cic->cfqq[ASYNC]);
+		cic->cfqq[ASYNC] = NULL;
+	}
+
+	if (cic->cfqq[SYNC]) {
+		if (unlikely(cic->cfqq[SYNC] == cfqd->active_queue))
+			__cfq_slice_expired(cfqd, cic->cfqq[SYNC], 0);
+		cfq_put_queue(cic->cfqq[SYNC]);
+		cic->cfqq[SYNC] = NULL;
+	}
 
-	cfq_put_queue(cic->cfqq);
-	cic->cfqq = NULL;
+	cic->key = NULL;
+	list_del_init(&cic->queue_list);
 	spin_unlock(q->queue_lock);
 }
 
-/*
- * Another task may update the task cic list, if it is doing a queue lookup
- * on its behalf. cfq_cic_lock excludes such concurrent updates
- */
 static void cfq_exit_io_context(struct cfq_io_context *cic)
 {
 	struct cfq_io_context *__cic;
@@ -1242,12 +1261,14 @@ static void cfq_exit_io_context(struct cfq_io_context *cic)
 	/*
 	 * put the reference this task is holding to the various queues
 	 */
+	read_lock(&cfq_exit_lock);
 	list_for_each(entry, &cic->list) {
 		__cic = list_entry(entry, struct cfq_io_context, list);
 		cfq_exit_single_io_context(__cic);
 	}
 
 	cfq_exit_single_io_context(cic);
+	read_unlock(&cfq_exit_lock);
 	local_irq_restore(flags);
 }
 
@@ -1258,7 +1279,8 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
 
 	if (cic) {
 		INIT_LIST_HEAD(&cic->list);
-		cic->cfqq = NULL;
+		cic->cfqq[ASYNC] = NULL;
+		cic->cfqq[SYNC] = NULL;
 		cic->key = NULL;
 		cic->last_end_request = jiffies;
 		cic->ttime_total = 0;
@@ -1266,6 +1288,8 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
 		cic->ttime_mean = 0;
 		cic->dtor = cfq_free_io_context;
 		cic->exit = cfq_exit_io_context;
+		INIT_LIST_HEAD(&cic->queue_list);
+		atomic_inc(&ioc_count);
 	}
 
 	return cic;
@@ -1318,14 +1342,27 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq)
 	cfq_clear_cfqq_prio_changed(cfqq);
 }
 
-static inline void changed_ioprio(struct cfq_queue *cfqq)
+static inline void changed_ioprio(struct cfq_io_context *cic)
 {
-	if (cfqq) {
-		struct cfq_data *cfqd = cfqq->cfqd;
-
+	struct cfq_data *cfqd = cic->key;
+	struct cfq_queue *cfqq;
+	if (cfqd) {
 		spin_lock(cfqd->queue->queue_lock);
-		cfq_mark_cfqq_prio_changed(cfqq);
-		cfq_init_prio_data(cfqq);
+		cfqq = cic->cfqq[ASYNC];
+		if (cfqq) {
+			struct cfq_queue *new_cfqq;
+			new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC,
+						cic->ioc->task, GFP_ATOMIC);
+			if (new_cfqq) {
+				cic->cfqq[ASYNC] = new_cfqq;
+				cfq_put_queue(cfqq);
+			}
+		}
+		cfqq = cic->cfqq[SYNC];
+		if (cfqq) {
+			cfq_mark_cfqq_prio_changed(cfqq);
+			cfq_init_prio_data(cfqq);
+		}
 		spin_unlock(cfqd->queue->queue_lock);
 	}
 }
@@ -1335,24 +1372,32 @@ static inline void changed_ioprio(struct cfq_queue *cfqq)
  */
 static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
 {
-	struct cfq_io_context *cic = ioc->cic;
+	struct cfq_io_context *cic;
+
+	write_lock(&cfq_exit_lock);
+
+	cic = ioc->cic;
 
-	changed_ioprio(cic->cfqq);
+	changed_ioprio(cic);
 
 	list_for_each_entry(cic, &cic->list, list)
-		changed_ioprio(cic->cfqq);
+		changed_ioprio(cic);
+
+	write_unlock(&cfq_exit_lock);
 
 	return 0;
 }
 
 static struct cfq_queue *
-cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio,
+cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk,
 	      gfp_t gfp_mask)
 {
 	const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
 	struct cfq_queue *cfqq, *new_cfqq = NULL;
+	unsigned short ioprio;
 
 retry:
+	ioprio = tsk->ioprio;
 	cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
 
 	if (!cfqq) {
@@ -1381,7 +1426,6 @@ retry:
 		hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
 		atomic_set(&cfqq->ref, 0);
 		cfqq->cfqd = cfqd;
-		atomic_inc(&cfqd->ref);
 		cfqq->service_last = 0;
 		/*
 		 * set ->slice_left to allow preemption for a new process
@@ -1419,6 +1463,7 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
 	if (!ioc)
 		return NULL;
 
+restart:
 	if ((cic = ioc->cic) == NULL) {
 		cic = cfq_alloc_io_context(cfqd, gfp_mask);
 
@@ -1429,11 +1474,13 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
 		 * manually increment generic io_context usage count, it
 		 * cannot go away since we are already holding one ref to it
 		 */
-		ioc->cic = cic;
-		ioc->set_ioprio = cfq_ioc_set_ioprio;
 		cic->ioc = ioc;
 		cic->key = cfqd;
-		atomic_inc(&cfqd->ref);
+		read_lock(&cfq_exit_lock);
+		ioc->set_ioprio = cfq_ioc_set_ioprio;
+		ioc->cic = cic;
+		list_add(&cic->queue_list, &cfqd->cic_list);
+		read_unlock(&cfq_exit_lock);
 	} else {
 		struct cfq_io_context *__cic;
 
@@ -1443,6 +1490,20 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
 		if (cic->key == cfqd)
 			goto out;
 
+		if (unlikely(!cic->key)) {
+			read_lock(&cfq_exit_lock);
+			if (list_empty(&cic->list))
+				ioc->cic = NULL;
+			else
+				ioc->cic = list_entry(cic->list.next,
+						      struct cfq_io_context,
+						      list);
+			read_unlock(&cfq_exit_lock);
+			kmem_cache_free(cfq_ioc_pool, cic);
+			atomic_dec(&ioc_count);
+			goto restart;
+		}
+
 		/*
 		 * cic exists, check if we already are there. linear search
 		 * should be ok here, the list will usually not be more than
@@ -1457,6 +1518,14 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
 				cic = __cic;
 				goto out;
 			}
+			if (unlikely(!__cic->key)) {
+				read_lock(&cfq_exit_lock);
+				list_del(&__cic->list);
+				read_unlock(&cfq_exit_lock);
+				kmem_cache_free(cfq_ioc_pool, __cic);
+				atomic_dec(&ioc_count);
+				goto restart;
+			}
 		}
 
 		/*
@@ -1469,8 +1538,10 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask)
 
 		__cic->ioc = ioc;
 		__cic->key = cfqd;
-		atomic_inc(&cfqd->ref);
+		read_lock(&cfq_exit_lock);
 		list_add(&__cic->list, &cic->list);
+		list_add(&__cic->queue_list, &cfqd->cic_list);
+		read_unlock(&cfq_exit_lock);
 		cic = __cic;
 	}
 
@@ -1890,6 +1961,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
 	struct cfq_queue *cfqq;
 	struct cfq_rq *crq;
 	unsigned long flags;
+	int is_sync = key != CFQ_KEY_ASYNC;
 
 	might_sleep_if(gfp_mask & __GFP_WAIT);
 
@@ -1900,14 +1972,14 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
 	if (!cic)
 		goto queue_fail;
 
-	if (!cic->cfqq) {
-		cfqq = cfq_get_queue(cfqd, key, tsk->ioprio, gfp_mask);
+	if (!cic->cfqq[is_sync]) {
+		cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask);
 		if (!cfqq)
 			goto queue_fail;
 
-		cic->cfqq = cfqq;
+		cic->cfqq[is_sync] = cfqq;
 	} else
-		cfqq = cic->cfqq;
+		cfqq = cic->cfqq[is_sync];
 
 	cfqq->allocated[rw]++;
 	cfq_clear_cfqq_must_alloc(cfqq);
@@ -1924,7 +1996,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
 		crq->cfq_queue = cfqq;
 		crq->io_context = cic;
 
-		if (rw == READ || process_sync(tsk))
+		if (is_sync)
 			cfq_mark_crq_is_sync(crq);
 		else
 			cfq_clear_crq_is_sync(crq);
@@ -2055,15 +2127,35 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
 	blk_sync_queue(cfqd->queue);
 }
 
-static void cfq_put_cfqd(struct cfq_data *cfqd)
+static void cfq_exit_queue(elevator_t *e)
 {
+	struct cfq_data *cfqd = e->elevator_data;
 	request_queue_t *q = cfqd->queue;
 
-	if (!atomic_dec_and_test(&cfqd->ref))
-		return;
+	cfq_shutdown_timer_wq(cfqd);
+	write_lock(&cfq_exit_lock);
+	spin_lock_irq(q->queue_lock);
+	if (cfqd->active_queue)
+		__cfq_slice_expired(cfqd, cfqd->active_queue, 0);
+	while(!list_empty(&cfqd->cic_list)) {
+		struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
+							struct cfq_io_context,
+							queue_list);
+		if (cic->cfqq[ASYNC]) {
+			cfq_put_queue(cic->cfqq[ASYNC]);
+			cic->cfqq[ASYNC] = NULL;
+		}
+		if (cic->cfqq[SYNC]) {
+			cfq_put_queue(cic->cfqq[SYNC]);
+			cic->cfqq[SYNC] = NULL;
+		}
+		cic->key = NULL;
+		list_del_init(&cic->queue_list);
+	}
+	spin_unlock_irq(q->queue_lock);
+	write_unlock(&cfq_exit_lock);
 
 	cfq_shutdown_timer_wq(cfqd);
-	blk_put_queue(q);
 
 	mempool_destroy(cfqd->crq_pool);
 	kfree(cfqd->crq_hash);
@@ -2071,14 +2163,6 @@ static void cfq_put_cfqd(struct cfq_data *cfqd)
 	kfree(cfqd);
 }
 
-static void cfq_exit_queue(elevator_t *e)
-{
-	struct cfq_data *cfqd = e->elevator_data;
-
-	cfq_shutdown_timer_wq(cfqd);
-	cfq_put_cfqd(cfqd);
-}
-
 static int cfq_init_queue(request_queue_t *q, elevator_t *e)
 {
 	struct cfq_data *cfqd;
@@ -2097,6 +2181,7 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
 	INIT_LIST_HEAD(&cfqd->cur_rr);
 	INIT_LIST_HEAD(&cfqd->idle_rr);
 	INIT_LIST_HEAD(&cfqd->empty_list);
+	INIT_LIST_HEAD(&cfqd->cic_list);
 
 	cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
 	if (!cfqd->crq_hash)
@@ -2118,7 +2203,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
 	e->elevator_data = cfqd;
 
 	cfqd->queue = q;
-	atomic_inc(&q->refcnt);
 
 	cfqd->max_queued = q->nr_requests / 4;
 	q->nr_batching = cfq_queued;
@@ -2133,8 +2217,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
 
 	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
 
-	atomic_set(&cfqd->ref, 1);
-
 	cfqd->cfq_queued = cfq_queued;
 	cfqd->cfq_quantum = cfq_quantum;
 	cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
@@ -2193,11 +2275,6 @@ fail:
 /*
  * sysfs parts below -->
  */
-struct cfq_fs_entry {
-	struct attribute attr;
-	ssize_t (*show)(struct cfq_data *, char *);
-	ssize_t (*store)(struct cfq_data *, const char *, size_t);
-};
 
 static ssize_t
 cfq_var_show(unsigned int var, char *page)
@@ -2215,8 +2292,9 @@ cfq_var_store(unsigned int *var, const char *page, size_t count)
 }
 
 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
-static ssize_t __FUNC(struct cfq_data *cfqd, char *page)		\
+static ssize_t __FUNC(elevator_t *e, char *page)			\
 {									\
+	struct cfq_data *cfqd = e->elevator_data;			\
 	unsigned int __data = __VAR;					\
 	if (__CONV)							\
 		__data = jiffies_to_msecs(__data);			\
@@ -2226,8 +2304,8 @@ SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
 SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0);
 SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
 SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
-SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0);
-SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0);
+SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
+SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
 SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
@@ -2236,8 +2314,9 @@ SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0);
 #undef SHOW_FUNCTION
 
 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
-static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count)	\
+static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)	\
 {									\
+	struct cfq_data *cfqd = e->elevator_data;			\
 	unsigned int __data;						\
 	int ret = cfq_var_store(&__data, (page), count);		\
 	if (__data < (MIN))						\
@@ -2254,8 +2333,8 @@ STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
 STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0);
 STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
 STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
-STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
-STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
+STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
+STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
@@ -2263,112 +2342,22 @@ STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX,
 STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0);
 #undef STORE_FUNCTION
 
-static struct cfq_fs_entry cfq_quantum_entry = {
-	.attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR },
-	.show = cfq_quantum_show,
-	.store = cfq_quantum_store,
-};
-static struct cfq_fs_entry cfq_queued_entry = {
-	.attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR },
-	.show = cfq_queued_show,
-	.store = cfq_queued_store,
-};
-static struct cfq_fs_entry cfq_fifo_expire_sync_entry = {
-	.attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR },
-	.show = cfq_fifo_expire_sync_show,
-	.store = cfq_fifo_expire_sync_store,
-};
-static struct cfq_fs_entry cfq_fifo_expire_async_entry = {
-	.attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR },
-	.show = cfq_fifo_expire_async_show,
-	.store = cfq_fifo_expire_async_store,
-};
-static struct cfq_fs_entry cfq_back_max_entry = {
-	.attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR },
-	.show = cfq_back_max_show,
-	.store = cfq_back_max_store,
-};
-static struct cfq_fs_entry cfq_back_penalty_entry = {
-	.attr = {.name = "back_seek_penalty", .mode = S_IRUGO | S_IWUSR },
-	.show = cfq_back_penalty_show,
-	.store = cfq_back_penalty_store,
-};
-static struct cfq_fs_entry cfq_slice_sync_entry = {
-	.attr = {.name = "slice_sync", .mode = S_IRUGO | S_IWUSR },
-	.show = cfq_slice_sync_show,
-	.store = cfq_slice_sync_store,
-};
-static struct cfq_fs_entry cfq_slice_async_entry = {
-	.attr = {.name = "slice_async", .mode = S_IRUGO | S_IWUSR },
-	.show = cfq_slice_async_show,
-	.store = cfq_slice_async_store,
-};
-static struct cfq_fs_entry cfq_slice_async_rq_entry = {
-	.attr = {.name = "slice_async_rq", .mode = S_IRUGO | S_IWUSR },
-	.show = cfq_slice_async_rq_show,
-	.store = cfq_slice_async_rq_store,
-};
-static struct cfq_fs_entry cfq_slice_idle_entry = {
-	.attr = {.name = "slice_idle", .mode = S_IRUGO | S_IWUSR },
-	.show = cfq_slice_idle_show,
-	.store = cfq_slice_idle_store,
-};
-static struct cfq_fs_entry cfq_max_depth_entry = {
-	.attr = {.name = "max_depth", .mode = S_IRUGO | S_IWUSR },
-	.show = cfq_max_depth_show,
-	.store = cfq_max_depth_store,
-};
-
-static struct attribute *default_attrs[] = {
-	&cfq_quantum_entry.attr,
-	&cfq_queued_entry.attr,
-	&cfq_fifo_expire_sync_entry.attr,
-	&cfq_fifo_expire_async_entry.attr,
-	&cfq_back_max_entry.attr,
-	&cfq_back_penalty_entry.attr,
-	&cfq_slice_sync_entry.attr,
-	&cfq_slice_async_entry.attr,
-	&cfq_slice_async_rq_entry.attr,
-	&cfq_slice_idle_entry.attr,
-	&cfq_max_depth_entry.attr,
-	NULL,
-};
-
-#define to_cfq(atr) container_of((atr), struct cfq_fs_entry, attr)
-
-static ssize_t
-cfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
-{
-	elevator_t *e = container_of(kobj, elevator_t, kobj);
-	struct cfq_fs_entry *entry = to_cfq(attr);
-
-	if (!entry->show)
-		return -EIO;
-
-	return entry->show(e->elevator_data, page);
-}
-
-static ssize_t
-cfq_attr_store(struct kobject *kobj, struct attribute *attr,
-	       const char *page, size_t length)
-{
-	elevator_t *e = container_of(kobj, elevator_t, kobj);
-	struct cfq_fs_entry *entry = to_cfq(attr);
-
-	if (!entry->store)
-		return -EIO;
-
-	return entry->store(e->elevator_data, page, length);
-}
-
-static struct sysfs_ops cfq_sysfs_ops = {
-	.show	= cfq_attr_show,
-	.store	= cfq_attr_store,
-};
-
-static struct kobj_type cfq_ktype = {
-	.sysfs_ops	= &cfq_sysfs_ops,
-	.default_attrs	= default_attrs,
+#define CFQ_ATTR(name) \
+	__ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
+
+static struct elv_fs_entry cfq_attrs[] = {
+	CFQ_ATTR(quantum),
+	CFQ_ATTR(queued),
+	CFQ_ATTR(fifo_expire_sync),
+	CFQ_ATTR(fifo_expire_async),
+	CFQ_ATTR(back_seek_max),
+	CFQ_ATTR(back_seek_penalty),
+	CFQ_ATTR(slice_sync),
+	CFQ_ATTR(slice_async),
+	CFQ_ATTR(slice_async_rq),
+	CFQ_ATTR(slice_idle),
+	CFQ_ATTR(max_depth),
+	__ATTR_NULL
 };
 
 static struct elevator_type iosched_cfq = {
@@ -2389,8 +2378,9 @@ static struct elevator_type iosched_cfq = {
 		.elevator_may_queue_fn =	cfq_may_queue,
 		.elevator_init_fn =		cfq_init_queue,
 		.elevator_exit_fn =		cfq_exit_queue,
+		.trim =				cfq_trim,
 	},
-	.elevator_ktype =	&cfq_ktype,
+	.elevator_attrs =	cfq_attrs,
 	.elevator_name =	"cfq",
 	.elevator_owner =	THIS_MODULE,
 };
@@ -2419,7 +2409,13 @@ static int __init cfq_init(void)
 
 static void __exit cfq_exit(void)
 {
+	DECLARE_COMPLETION(all_gone);
 	elv_unregister(&iosched_cfq);
+	ioc_gone = &all_gone;
+	barrier();
+	if (atomic_read(&ioc_count))
+		complete(ioc_gone);
+	synchronize_rcu();
 	cfq_slab_kill();
 }
 
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 27e494b1bf97039d4095c47d71b42e1212b0b169..399fa1e60e1fd6b7ca8200b7d5ef14a67b0b7672 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -694,11 +694,6 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
 /*
  * sysfs parts below
  */
-struct deadline_fs_entry {
-	struct attribute attr;
-	ssize_t (*show)(struct deadline_data *, char *);
-	ssize_t (*store)(struct deadline_data *, const char *, size_t);
-};
 
 static ssize_t
 deadline_var_show(int var, char *page)
@@ -716,23 +711,25 @@ deadline_var_store(int *var, const char *page, size_t count)
 }
 
 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
-static ssize_t __FUNC(struct deadline_data *dd, char *page)		\
+static ssize_t __FUNC(elevator_t *e, char *page)			\
 {									\
-	int __data = __VAR;					\
+	struct deadline_data *dd = e->elevator_data;			\
+	int __data = __VAR;						\
 	if (__CONV)							\
 		__data = jiffies_to_msecs(__data);			\
 	return deadline_var_show(__data, (page));			\
 }
-SHOW_FUNCTION(deadline_readexpire_show, dd->fifo_expire[READ], 1);
-SHOW_FUNCTION(deadline_writeexpire_show, dd->fifo_expire[WRITE], 1);
-SHOW_FUNCTION(deadline_writesstarved_show, dd->writes_starved, 0);
-SHOW_FUNCTION(deadline_frontmerges_show, dd->front_merges, 0);
-SHOW_FUNCTION(deadline_fifobatch_show, dd->fifo_batch, 0);
+SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
+SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
+SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
+SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
+SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
 #undef SHOW_FUNCTION
 
 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
-static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count)	\
+static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)	\
 {									\
+	struct deadline_data *dd = e->elevator_data;			\
 	int __data;							\
 	int ret = deadline_var_store(&__data, (page), count);		\
 	if (__data < (MIN))						\
@@ -745,83 +742,24 @@ static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count)
 		*(__PTR) = __data;					\
 	return ret;							\
 }
-STORE_FUNCTION(deadline_readexpire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
-STORE_FUNCTION(deadline_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
-STORE_FUNCTION(deadline_writesstarved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
-STORE_FUNCTION(deadline_frontmerges_store, &dd->front_merges, 0, 1, 0);
-STORE_FUNCTION(deadline_fifobatch_store, &dd->fifo_batch, 0, INT_MAX, 0);
+STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
+STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
+STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
+STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
+STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
 #undef STORE_FUNCTION
 
-static struct deadline_fs_entry deadline_readexpire_entry = {
-	.attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR },
-	.show = deadline_readexpire_show,
-	.store = deadline_readexpire_store,
-};
-static struct deadline_fs_entry deadline_writeexpire_entry = {
-	.attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR },
-	.show = deadline_writeexpire_show,
-	.store = deadline_writeexpire_store,
-};
-static struct deadline_fs_entry deadline_writesstarved_entry = {
-	.attr = {.name = "writes_starved", .mode = S_IRUGO | S_IWUSR },
-	.show = deadline_writesstarved_show,
-	.store = deadline_writesstarved_store,
-};
-static struct deadline_fs_entry deadline_frontmerges_entry = {
-	.attr = {.name = "front_merges", .mode = S_IRUGO | S_IWUSR },
-	.show = deadline_frontmerges_show,
-	.store = deadline_frontmerges_store,
-};
-static struct deadline_fs_entry deadline_fifobatch_entry = {
-	.attr = {.name = "fifo_batch", .mode = S_IRUGO | S_IWUSR },
-	.show = deadline_fifobatch_show,
-	.store = deadline_fifobatch_store,
-};
-
-static struct attribute *default_attrs[] = {
-	&deadline_readexpire_entry.attr,
-	&deadline_writeexpire_entry.attr,
-	&deadline_writesstarved_entry.attr,
-	&deadline_frontmerges_entry.attr,
-	&deadline_fifobatch_entry.attr,
-	NULL,
-};
-
-#define to_deadline(atr) container_of((atr), struct deadline_fs_entry, attr)
-
-static ssize_t
-deadline_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
-{
-	elevator_t *e = container_of(kobj, elevator_t, kobj);
-	struct deadline_fs_entry *entry = to_deadline(attr);
-
-	if (!entry->show)
-		return -EIO;
-
-	return entry->show(e->elevator_data, page);
-}
-
-static ssize_t
-deadline_attr_store(struct kobject *kobj, struct attribute *attr,
-		    const char *page, size_t length)
-{
-	elevator_t *e = container_of(kobj, elevator_t, kobj);
-	struct deadline_fs_entry *entry = to_deadline(attr);
-
-	if (!entry->store)
-		return -EIO;
-
-	return entry->store(e->elevator_data, page, length);
-}
-
-static struct sysfs_ops deadline_sysfs_ops = {
-	.show	= deadline_attr_show,
-	.store	= deadline_attr_store,
-};
-
-static struct kobj_type deadline_ktype = {
-	.sysfs_ops	= &deadline_sysfs_ops,
-	.default_attrs	= default_attrs,
+#define DD_ATTR(name) \
+	__ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \
+				      deadline_##name##_store)
+
+static struct elv_fs_entry deadline_attrs[] = {
+	DD_ATTR(read_expire),
+	DD_ATTR(write_expire),
+	DD_ATTR(writes_starved),
+	DD_ATTR(front_merges),
+	DD_ATTR(fifo_batch),
+	__ATTR_NULL
 };
 
 static struct elevator_type iosched_deadline = {
@@ -840,7 +778,7 @@ static struct elevator_type iosched_deadline = {
 		.elevator_exit_fn =		deadline_exit_queue,
 	},
 
-	.elevator_ktype = &deadline_ktype,
+	.elevator_attrs = deadline_attrs,
 	.elevator_name = "deadline",
 	.elevator_owner = THIS_MODULE,
 };
diff --git a/block/elevator.c b/block/elevator.c
index 24b702d649a953977cafb7125ca1b81d73947cc3..db3d0d8296a05c908f5dd38100593217c8c7d114 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -120,15 +120,10 @@ static struct elevator_type *elevator_get(const char *name)
 	return e;
 }
 
-static int elevator_attach(request_queue_t *q, struct elevator_type *e,
-			   struct elevator_queue *eq)
+static int elevator_attach(request_queue_t *q, struct elevator_queue *eq)
 {
 	int ret = 0;
 
-	memset(eq, 0, sizeof(*eq));
-	eq->ops = &e->ops;
-	eq->elevator_type = e;
-
 	q->elevator = eq;
 
 	if (eq->ops->elevator_init_fn)
@@ -154,6 +149,32 @@ static int __init elevator_setup(char *str)
 
 __setup("elevator=", elevator_setup);
 
+static struct kobj_type elv_ktype;
+
+static elevator_t *elevator_alloc(struct elevator_type *e)
+{
+	elevator_t *eq = kmalloc(sizeof(elevator_t), GFP_KERNEL);
+	if (eq) {
+		memset(eq, 0, sizeof(*eq));
+		eq->ops = &e->ops;
+		eq->elevator_type = e;
+		kobject_init(&eq->kobj);
+		snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
+		eq->kobj.ktype = &elv_ktype;
+		mutex_init(&eq->sysfs_lock);
+	} else {
+		elevator_put(e);
+	}
+	return eq;
+}
+
+static void elevator_release(struct kobject *kobj)
+{
+	elevator_t *e = container_of(kobj, elevator_t, kobj);
+	elevator_put(e->elevator_type);
+	kfree(e);
+}
+
 int elevator_init(request_queue_t *q, char *name)
 {
 	struct elevator_type *e = NULL;
@@ -176,29 +197,26 @@ int elevator_init(request_queue_t *q, char *name)
 		e = elevator_get("noop");
 	}
 
-	eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
-	if (!eq) {
-		elevator_put(e);
+	eq = elevator_alloc(e);
+	if (!eq)
 		return -ENOMEM;
-	}
 
-	ret = elevator_attach(q, e, eq);
-	if (ret) {
-		kfree(eq);
-		elevator_put(e);
-	}
+	ret = elevator_attach(q, eq);
+	if (ret)
+		kobject_put(&eq->kobj);
 
 	return ret;
 }
 
 void elevator_exit(elevator_t *e)
 {
+	mutex_lock(&e->sysfs_lock);
 	if (e->ops->elevator_exit_fn)
 		e->ops->elevator_exit_fn(e);
+	e->ops = NULL;
+	mutex_unlock(&e->sysfs_lock);
 
-	elevator_put(e->elevator_type);
-	e->elevator_type = NULL;
-	kfree(e);
+	kobject_put(&e->kobj);
 }
 
 /*
@@ -627,26 +645,79 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
 	}
 }
 
-int elv_register_queue(struct request_queue *q)
+#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
+
+static ssize_t
+elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 {
-	elevator_t *e = q->elevator;
+	elevator_t *e = container_of(kobj, elevator_t, kobj);
+	struct elv_fs_entry *entry = to_elv(attr);
+	ssize_t error;
 
-	e->kobj.parent = kobject_get(&q->kobj);
-	if (!e->kobj.parent)
-		return -EBUSY;
+	if (!entry->show)
+		return -EIO;
 
-	snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
-	e->kobj.ktype = e->elevator_type->elevator_ktype;
+	mutex_lock(&e->sysfs_lock);
+	error = e->ops ? entry->show(e, page) : -ENOENT;
+	mutex_unlock(&e->sysfs_lock);
+	return error;
+}
+
+static ssize_t
+elv_attr_store(struct kobject *kobj, struct attribute *attr,
+	       const char *page, size_t length)
+{
+	elevator_t *e = container_of(kobj, elevator_t, kobj);
+	struct elv_fs_entry *entry = to_elv(attr);
+	ssize_t error;
+
+	if (!entry->store)
+		return -EIO;
+
+	mutex_lock(&e->sysfs_lock);
+	error = e->ops ? entry->store(e, page, length) : -ENOENT;
+	mutex_unlock(&e->sysfs_lock);
+	return error;
+}
+
+static struct sysfs_ops elv_sysfs_ops = {
+	.show	= elv_attr_show,
+	.store	= elv_attr_store,
+};
+
+static struct kobj_type elv_ktype = {
+	.sysfs_ops	= &elv_sysfs_ops,
+	.release	= elevator_release,
+};
 
-	return kobject_register(&e->kobj);
+int elv_register_queue(struct request_queue *q)
+{
+	elevator_t *e = q->elevator;
+	int error;
+
+	e->kobj.parent = &q->kobj;
+
+	error = kobject_add(&e->kobj);
+	if (!error) {
+		struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
+		if (attr) {
+			while (attr->attr.name) {
+				if (sysfs_create_file(&e->kobj, &attr->attr))
+					break;
+				attr++;
+			}
+		}
+		kobject_uevent(&e->kobj, KOBJ_ADD);
+	}
+	return error;
 }
 
 void elv_unregister_queue(struct request_queue *q)
 {
 	if (q) {
 		elevator_t *e = q->elevator;
-		kobject_unregister(&e->kobj);
-		kobject_put(&q->kobj);
+		kobject_uevent(&e->kobj, KOBJ_REMOVE);
+		kobject_del(&e->kobj);
 	}
 }
 
@@ -675,21 +746,15 @@ void elv_unregister(struct elevator_type *e)
 	/*
 	 * Iterate every thread in the process to remove the io contexts.
 	 */
-	read_lock(&tasklist_lock);
-	do_each_thread(g, p) {
-		struct io_context *ioc = p->io_context;
-		if (ioc && ioc->cic) {
-			ioc->cic->exit(ioc->cic);
-			ioc->cic->dtor(ioc->cic);
-			ioc->cic = NULL;
-		}
-		if (ioc && ioc->aic) {
-			ioc->aic->exit(ioc->aic);
-			ioc->aic->dtor(ioc->aic);
-			ioc->aic = NULL;
-		}
-	} while_each_thread(g, p);
-	read_unlock(&tasklist_lock);
+	if (e->ops.trim) {
+		read_lock(&tasklist_lock);
+		do_each_thread(g, p) {
+			task_lock(p);
+			e->ops.trim(p->io_context);
+			task_unlock(p);
+		} while_each_thread(g, p);
+		read_unlock(&tasklist_lock);
+	}
 
 	spin_lock_irq(&elv_list_lock);
 	list_del_init(&e->list);
@@ -703,16 +768,16 @@ EXPORT_SYMBOL_GPL(elv_unregister);
  * need for the new one. this way we have a chance of going back to the old
  * one, if the new one fails init for some reason.
  */
-static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
+static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
 {
 	elevator_t *old_elevator, *e;
 
 	/*
 	 * Allocate new elevator
 	 */
-	e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
+	e = elevator_alloc(new_e);
 	if (!e)
-		goto error;
+		return 0;
 
 	/*
 	 * Turn on BYPASS and drain all requests w/ elevator private data
@@ -743,7 +808,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
 	/*
 	 * attach and start new elevator
 	 */
-	if (elevator_attach(q, new_e, e))
+	if (elevator_attach(q, e))
 		goto fail;
 
 	if (elv_register_queue(q))
@@ -754,7 +819,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
 	 */
 	elevator_exit(old_elevator);
 	clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
-	return;
+	return 1;
 
 fail_register:
 	/*
@@ -767,10 +832,9 @@ fail:
 	q->elevator = old_elevator;
 	elv_register_queue(q);
 	clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
-	kfree(e);
-error:
-	elevator_put(new_e);
-	printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name);
+	if (e)
+		kobject_put(&e->kobj);
+	return 0;
 }
 
 ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
@@ -797,7 +861,8 @@ ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
 		return count;
 	}
 
-	elevator_switch(q, e);
+	if (!elevator_switch(q, e))
+		printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name);
 	return count;
 }
 
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 0ef2971a9e8207ee5bbee9fd50e3873466308b33..6c793b196aa918da946d7378667f8a08dd7c4005 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1740,16 +1740,11 @@ EXPORT_SYMBOL(blk_run_queue);
  *     Hopefully the low level driver will have finished any
  *     outstanding requests first...
  **/
-void blk_cleanup_queue(request_queue_t * q)
+static void blk_release_queue(struct kobject *kobj)
 {
+	request_queue_t *q = container_of(kobj, struct request_queue, kobj);
 	struct request_list *rl = &q->rq;
 
-	if (!atomic_dec_and_test(&q->refcnt))
-		return;
-
-	if (q->elevator)
-		elevator_exit(q->elevator);
-
 	blk_sync_queue(q);
 
 	if (rl->rq_pool)
@@ -1761,6 +1756,24 @@ void blk_cleanup_queue(request_queue_t * q)
 	kmem_cache_free(requestq_cachep, q);
 }
 
+void blk_put_queue(request_queue_t *q)
+{
+	kobject_put(&q->kobj);
+}
+EXPORT_SYMBOL(blk_put_queue);
+
+void blk_cleanup_queue(request_queue_t * q)
+{
+	mutex_lock(&q->sysfs_lock);
+	set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
+	mutex_unlock(&q->sysfs_lock);
+
+	if (q->elevator)
+		elevator_exit(q->elevator);
+
+	blk_put_queue(q);
+}
+
 EXPORT_SYMBOL(blk_cleanup_queue);
 
 static int blk_init_free_list(request_queue_t *q)
@@ -1788,6 +1801,8 @@ request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
 }
 EXPORT_SYMBOL(blk_alloc_queue);
 
+static struct kobj_type queue_ktype;
+
 request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 {
 	request_queue_t *q;
@@ -1798,11 +1813,16 @@ request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 
 	memset(q, 0, sizeof(*q));
 	init_timer(&q->unplug_timer);
-	atomic_set(&q->refcnt, 1);
+
+	snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
+	q->kobj.ktype = &queue_ktype;
+	kobject_init(&q->kobj);
 
 	q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
 	q->backing_dev_info.unplug_io_data = q;
 
+	mutex_init(&q->sysfs_lock);
+
 	return q;
 }
 EXPORT_SYMBOL(blk_alloc_queue_node);
@@ -1854,8 +1874,10 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
 		return NULL;
 
 	q->node = node_id;
-	if (blk_init_free_list(q))
-		goto out_init;
+	if (blk_init_free_list(q)) {
+		kmem_cache_free(requestq_cachep, q);
+		return NULL;
+	}
 
 	/*
 	 * if caller didn't supply a lock, they get per-queue locking with
@@ -1891,9 +1913,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
 		return q;
 	}
 
-	blk_cleanup_queue(q);
-out_init:
-	kmem_cache_free(requestq_cachep, q);
+	blk_put_queue(q);
 	return NULL;
 }
 EXPORT_SYMBOL(blk_init_queue_node);
@@ -1901,7 +1921,7 @@ EXPORT_SYMBOL(blk_init_queue_node);
 int blk_get_queue(request_queue_t *q)
 {
 	if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
-		atomic_inc(&q->refcnt);
+		kobject_get(&q->kobj);
 		return 0;
 	}
 
@@ -3477,10 +3497,12 @@ void put_io_context(struct io_context *ioc)
 	BUG_ON(atomic_read(&ioc->refcount) == 0);
 
 	if (atomic_dec_and_test(&ioc->refcount)) {
+		rcu_read_lock();
 		if (ioc->aic && ioc->aic->dtor)
 			ioc->aic->dtor(ioc->aic);
 		if (ioc->cic && ioc->cic->dtor)
 			ioc->cic->dtor(ioc->cic);
+		rcu_read_unlock();
 
 		kmem_cache_free(iocontext_cachep, ioc);
 	}
@@ -3614,10 +3636,13 @@ static ssize_t
 queue_requests_store(struct request_queue *q, const char *page, size_t count)
 {
 	struct request_list *rl = &q->rq;
+	unsigned long nr;
+	int ret = queue_var_store(&nr, page, count);
+	if (nr < BLKDEV_MIN_RQ)
+		nr = BLKDEV_MIN_RQ;
 
-	int ret = queue_var_store(&q->nr_requests, page, count);
-	if (q->nr_requests < BLKDEV_MIN_RQ)
-		q->nr_requests = BLKDEV_MIN_RQ;
+	spin_lock_irq(q->queue_lock);
+	q->nr_requests = nr;
 	blk_queue_congestion_threshold(q);
 
 	if (rl->count[READ] >= queue_congestion_on_threshold(q))
@@ -3643,6 +3668,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
 		blk_clear_queue_full(q, WRITE);
 		wake_up(&rl->wait[WRITE]);
 	}
+	spin_unlock_irq(q->queue_lock);
 	return ret;
 }
 
@@ -3758,13 +3784,19 @@ static ssize_t
 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 {
 	struct queue_sysfs_entry *entry = to_queue(attr);
-	struct request_queue *q;
+	request_queue_t *q = container_of(kobj, struct request_queue, kobj);
+	ssize_t res;
 
-	q = container_of(kobj, struct request_queue, kobj);
 	if (!entry->show)
 		return -EIO;
-
-	return entry->show(q, page);
+	mutex_lock(&q->sysfs_lock);
+	if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+		mutex_unlock(&q->sysfs_lock);
+		return -ENOENT;
+	}
+	res = entry->show(q, page);
+	mutex_unlock(&q->sysfs_lock);
+	return res;
 }
 
 static ssize_t
@@ -3772,13 +3804,20 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
 		    const char *page, size_t length)
 {
 	struct queue_sysfs_entry *entry = to_queue(attr);
-	struct request_queue *q;
+	request_queue_t *q = container_of(kobj, struct request_queue, kobj);
+
+	ssize_t res;
 
-	q = container_of(kobj, struct request_queue, kobj);
 	if (!entry->store)
 		return -EIO;
-
-	return entry->store(q, page, length);
+	mutex_lock(&q->sysfs_lock);
+	if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+		mutex_unlock(&q->sysfs_lock);
+		return -ENOENT;
+	}
+	res = entry->store(q, page, length);
+	mutex_unlock(&q->sysfs_lock);
+	return res;
 }
 
 static struct sysfs_ops queue_sysfs_ops = {
@@ -3789,6 +3828,7 @@ static struct sysfs_ops queue_sysfs_ops = {
 static struct kobj_type queue_ktype = {
 	.sysfs_ops	= &queue_sysfs_ops,
 	.default_attrs	= default_attrs,
+	.release	= blk_release_queue,
 };
 
 int blk_register_queue(struct gendisk *disk)
@@ -3801,19 +3841,17 @@ int blk_register_queue(struct gendisk *disk)
 		return -ENXIO;
 
 	q->kobj.parent = kobject_get(&disk->kobj);
-	if (!q->kobj.parent)
-		return -EBUSY;
 
-	snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
-	q->kobj.ktype = &queue_ktype;
-
-	ret = kobject_register(&q->kobj);
+	ret = kobject_add(&q->kobj);
 	if (ret < 0)
 		return ret;
 
+	kobject_uevent(&q->kobj, KOBJ_ADD);
+
 	ret = elv_register_queue(q);
 	if (ret) {
-		kobject_unregister(&q->kobj);
+		kobject_uevent(&q->kobj, KOBJ_REMOVE);
+		kobject_del(&q->kobj);
 		return ret;
 	}
 
@@ -3827,7 +3865,8 @@ void blk_unregister_queue(struct gendisk *disk)
 	if (q && q->request_fn) {
 		elv_unregister_queue(q);
 
-		kobject_unregister(&q->kobj);
+		kobject_uevent(&q->kobj, KOBJ_REMOVE);
+		kobject_del(&q->kobj);
 		kobject_put(&disk->kobj);
 	}
 }
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 5f6d1a5cce11fc3bebdf1f0ab29c875db7850e09..0010704739e381b21db668d9a81d6b0392c888b5 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1307,7 +1307,7 @@ static int __init loop_init(void)
 
 out_mem4:
 	while (i--)
-		blk_put_queue(loop_dev[i].lo_queue);
+		blk_cleanup_queue(loop_dev[i].lo_queue);
 	devfs_remove("loop");
 	i = max_loop;
 out_mem3:
@@ -1328,7 +1328,7 @@ static void loop_exit(void)
 
 	for (i = 0; i < max_loop; i++) {
 		del_gendisk(disks[i]);
-		blk_put_queue(loop_dev[i].lo_queue);
+		blk_cleanup_queue(loop_dev[i].lo_queue);
 		put_disk(disks[i]);
 	}
 	devfs_remove("loop");
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index bc9b2bcd7dba71023824ebb8adc6cbf9e9e0c712..476a5b553f34d9ae9d57171b78d7445203cfaf24 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2514,7 +2514,7 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd)
 	return 0;
 
 out_new_dev:
-	blk_put_queue(disk->queue);
+	blk_cleanup_queue(disk->queue);
 out_mem2:
 	put_disk(disk);
 out_mem:
@@ -2555,7 +2555,7 @@ static int pkt_remove_dev(struct pkt_ctrl_command *ctrl_cmd)
 	DPRINTK("pktcdvd: writer %s unmapped\n", pd->name);
 
 	del_gendisk(pd->disk);
-	blk_put_queue(pd->disk->queue);
+	blk_cleanup_queue(pd->disk->queue);
 	put_disk(pd->disk);
 
 	pkt_devs[idx] = NULL;
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 4ada1268b40d1abda498b2eddf9ed6caa830a665..c16e66b9c7a74f56d1fef2b55ba8f7bdf2de66e8 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -1131,7 +1131,7 @@ static void mm_pci_remove(struct pci_dev *dev)
 		pci_free_consistent(card->dev, PAGE_SIZE*2,
 				    card->mm_pages[1].desc,
 				    card->mm_pages[1].page_dma);
-	blk_put_queue(card->queue);
+	blk_cleanup_queue(card->queue);
 }
 
 static const struct pci_device_id mm_pci_ids[] = { {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 745ca1f67b14a93db8e7c2802746d7963641aa7e..88d60202b9db409bcfcc48069ef7bc49547172af 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -840,7 +840,7 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
  bad3:
 	mempool_destroy(md->io_pool);
  bad2:
-	blk_put_queue(md->queue);
+	blk_cleanup_queue(md->queue);
 	free_minor(minor);
  bad1:
 	kfree(md);
@@ -860,7 +860,7 @@ static void free_dev(struct mapped_device *md)
 	del_gendisk(md->disk);
 	free_minor(minor);
 	put_disk(md->disk);
-	blk_put_queue(md->queue);
+	blk_cleanup_queue(md->queue);
 	kfree(md);
 }
 
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d05e3125d298bd5c7083204d6f24d098e002c66e..5ed2228745cb306743761a44809d20d47a1652aa 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -213,8 +213,11 @@ static void mddev_put(mddev_t *mddev)
 		return;
 	if (!mddev->raid_disks && list_empty(&mddev->disks)) {
 		list_del(&mddev->all_mddevs);
-		blk_put_queue(mddev->queue);
+		/* that blocks */
+		blk_cleanup_queue(mddev->queue);
+		/* that also blocks */
 		kobject_unregister(&mddev->kobj);
+		/* result blows... */
 	}
 	spin_unlock(&all_mddevs_lock);
 }
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 2e727f49ad19c90f7e7440ab012bc3e9f8b4c6aa..44133250da2eeddb5252d244224607d333c4a5a0 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -273,7 +273,7 @@ removeseg:
 	list_del(&dev_info->lh);
 
 	del_gendisk(dev_info->gd);
-	blk_put_queue(dev_info->dcssblk_queue);
+	blk_cleanup_queue(dev_info->dcssblk_queue);
 	dev_info->gd->queue = NULL;
 	put_disk(dev_info->gd);
 	device_unregister(dev);
@@ -491,7 +491,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
 unregister_dev:
 	PRINT_ERR("device_create_file() failed!\n");
 	list_del(&dev_info->lh);
-	blk_put_queue(dev_info->dcssblk_queue);
+	blk_cleanup_queue(dev_info->dcssblk_queue);
 	dev_info->gd->queue = NULL;
 	put_disk(dev_info->gd);
 	device_unregister(&dev_info->dev);
@@ -505,7 +505,7 @@ list_del:
 unload_seg:
 	segment_unload(local_buf);
 dealloc_gendisk:
-	blk_put_queue(dev_info->dcssblk_queue);
+	blk_cleanup_queue(dev_info->dcssblk_queue);
 	dev_info->gd->queue = NULL;
 	put_disk(dev_info->gd);
 free_dev_info:
@@ -562,7 +562,7 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
 	list_del(&dev_info->lh);
 
 	del_gendisk(dev_info->gd);
-	blk_put_queue(dev_info->dcssblk_queue);
+	blk_cleanup_queue(dev_info->dcssblk_queue);
 	dev_info->gd->queue = NULL;
 	put_disk(dev_info->gd);
 	device_unregister(&dev_info->dev);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 860e7a485a5f08d2aaa10129d65c6339c128a008..56bb6a4e15f3b2afe80bb4dee0026f6fc1518852 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -58,7 +58,7 @@ struct cfq_io_context {
 	 * circular list of cfq_io_contexts belonging to a process io context
 	 */
 	struct list_head list;
-	struct cfq_queue *cfqq;
+	struct cfq_queue *cfqq[2];
 	void *key;
 
 	struct io_context *ioc;
@@ -69,6 +69,8 @@ struct cfq_io_context {
 	unsigned long ttime_samples;
 	unsigned long ttime_mean;
 
+	struct list_head queue_list;
+
 	void (*dtor)(struct cfq_io_context *);
 	void (*exit)(struct cfq_io_context *);
 };
@@ -404,8 +406,6 @@ struct request_queue
 
 	struct blk_queue_tag	*queue_tags;
 
-	atomic_t		refcnt;
-
 	unsigned int		nr_sorted;
 	unsigned int		in_flight;
 
@@ -424,6 +424,8 @@ struct request_queue
 	struct request		pre_flush_rq, bar_rq, post_flush_rq;
 	struct request		*orig_bar_rq;
 	unsigned int		bi_size;
+
+	struct mutex		sysfs_lock;
 };
 
 #define RQ_INACTIVE		(-1)
@@ -725,7 +727,7 @@ extern long nr_blockdev_pages(void);
 int blk_get_queue(request_queue_t *);
 request_queue_t *blk_alloc_queue(gfp_t);
 request_queue_t *blk_alloc_queue_node(gfp_t, int);
-#define blk_put_queue(q) blk_cleanup_queue((q))
+extern void blk_put_queue(request_queue_t *);
 
 /*
  * tag stuff
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 18cf1f3e11845dd6ba28f81b3defd2e4ed52adff..ad133fcfb239ddbf16e6961e88f47161e8d1447e 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -48,10 +48,17 @@ struct elevator_ops
 
 	elevator_init_fn *elevator_init_fn;
 	elevator_exit_fn *elevator_exit_fn;
+	void (*trim)(struct io_context *);
 };
 
 #define ELV_NAME_MAX	(16)
 
+struct elv_fs_entry {
+	struct attribute attr;
+	ssize_t (*show)(elevator_t *, char *);
+	ssize_t (*store)(elevator_t *, const char *, size_t);
+};
+
 /*
  * identifies an elevator type, such as AS or deadline
  */
@@ -60,7 +67,7 @@ struct elevator_type
 	struct list_head list;
 	struct elevator_ops ops;
 	struct elevator_type *elevator_type;
-	struct kobj_type *elevator_ktype;
+	struct elv_fs_entry *elevator_attrs;
 	char elevator_name[ELV_NAME_MAX];
 	struct module *elevator_owner;
 };
@@ -74,6 +81,7 @@ struct elevator_queue
 	void *elevator_data;
 	struct kobject kobj;
 	struct elevator_type *elevator_type;
+	struct mutex sysfs_lock;
 };
 
 /*
diff --git a/kernel/exit.c b/kernel/exit.c
index 531aadca553030543b643a0553f6144785d02bb5..d1e8d500a7e127cb2915da8e2da4beab942c6e99 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -807,8 +807,6 @@ fastcall NORET_TYPE void do_exit(long code)
 		panic("Attempted to kill the idle task!");
 	if (unlikely(tsk->pid == 1))
 		panic("Attempted to kill init!");
-	if (tsk->io_context)
-		exit_io_context();
 
 	if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
 		current->ptrace_message = code;
@@ -822,6 +820,8 @@ fastcall NORET_TYPE void do_exit(long code)
 	if (unlikely(tsk->flags & PF_EXITING)) {
 		printk(KERN_ALERT
 			"Fixing recursive fault but reboot is needed!\n");
+		if (tsk->io_context)
+			exit_io_context();
 		set_current_state(TASK_UNINTERRUPTIBLE);
 		schedule();
 	}
@@ -881,6 +881,9 @@ fastcall NORET_TYPE void do_exit(long code)
 	 */
 	mutex_debug_check_no_locks_held(tsk);
 
+	if (tsk->io_context)
+		exit_io_context();
+
 	/* PF_DEAD causes final put_task_struct after we schedule. */
 	preempt_disable();
 	BUG_ON(tsk->flags & PF_DEAD);