===== drivers/block/cciss.c 1.85 vs edited ===== --- 1.85/drivers/block/cciss.c Thu Aug 7 11:25:23 2003 +++ edited/drivers/block/cciss.c Fri Aug 8 08:55:57 2003 @@ -114,7 +114,7 @@ static struct proc_dir_entry *proc_cciss; -static void do_cciss_request(request_queue_t *q); +static int do_cciss_request(request_queue_t *q, struct request *rq); static int cciss_open(struct inode *inode, struct file *filep); static int cciss_release(struct inode *inode, struct file *filep); static int cciss_ioctl(struct inode *inode, struct file *filep, @@ -1733,6 +1733,7 @@ int i; int retry_cmd = 0; u64bit temp64; + struct request *rq = cmd->rq; if (timeout) status = 0; @@ -1856,41 +1857,35 @@ printk("Done with %p\n", cmd->rq); #endif /* CCISS_DEBUG */ - end_that_request_last(cmd->rq); cmd_free(h,cmd,1); + end_that_request_last(rq); +} + +/* + * send off the requests to the hardware + */ +static void do_cciss_request_commit(request_queue_t *q) +{ + start_io(q->queuedata); } /* * Get a request and submit it to the controller. */ -static void do_cciss_request(request_queue_t *q) +static int do_cciss_request(request_queue_t *q, struct request *creq) { ctlr_info_t *h= q->queuedata; CommandList_struct *c; int start_blk, seg; - struct request *creq; u64bit temp64; struct scatterlist tmp_sg[MAXSGENTRIES]; drive_info_struct *drv; int i, dir; - if (blk_queue_plugged(q)) - goto startio; - -queue: - creq = elv_next_request(q); - if (!creq) - goto startio; + BUG_ON(creq->nr_phys_segments > MAXSGENTRIES); - if (creq->nr_phys_segments > MAXSGENTRIES) - BUG(); - - if (( c = cmd_alloc(h, 1)) == NULL) - goto full; - - blkdev_dequeue_request(creq); - - spin_unlock_irq(q->queue_lock); + if ((c = cmd_alloc(h, 1)) == NULL) + return 1; c->cmd_type = CMD_RWREQ; c->rq = creq; @@ -1951,18 +1946,12 @@ c->Request.CDB[8]= creq->nr_sectors & 0xff; c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0; - spin_lock_irq(q->queue_lock); - addQ(&(h->reqQ),c); h->Qdepth++; if(h->Qdepth > h->maxQsinceinit) h->maxQsinceinit = h->Qdepth; - goto queue; -full: - blk_stop_queue(q); -startio: - start_io(h); + return 0; } static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs) @@ -2521,7 +2510,7 @@ * someone needs to clean up this failure handling mess */ spin_lock_init(&hba[i]->lock); - q = blk_init_queue(do_cciss_request, &hba[i]->lock); + q = blk_init_queue_one(do_cciss_request, do_cciss_request_commit, &hba[i]->lock); if (!q) goto err_all; ===== drivers/block/cpqarray.c 1.79 vs edited ===== --- 1.79/drivers/block/cpqarray.c Thu Aug 7 11:25:24 2003 +++ edited/drivers/block/cpqarray.c Fri Aug 8 08:55:57 2003 @@ -135,7 +135,8 @@ static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg); static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io); -static void do_ida_request(request_queue_t *q); +static int do_ida_request(request_queue_t *q, struct request *creq); +static void do_ida_commit(request_queue_t *q); static void start_io(ctlr_info_t *h); static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c); @@ -380,7 +381,7 @@ hba[i]->devname); spin_lock_init(&hba[i]->lock); - q = blk_init_queue(do_ida_request, &hba[i]->lock); + q = blk_init_queue(do_ida_request, do_ida_commit,&hba[i]->lock); if (!q) goto Enomem1; @@ -777,6 +778,13 @@ return c; } +static void do_ida_commit(request_queue_t *q) +{ + ctlr_info_t *h = q->queuedata; + + start_io(h); +} + /* * Get a request and submit it to the controller. * This routine needs to grab all the requests it possibly can from the @@ -784,29 +792,18 @@ * are in here (either via the dummy do_ida_request functions or by being * called from the interrupt handler */ -static void do_ida_request(request_queue_t *q) +static int do_ida_request(request_queue_t *q, struct request *creq) { ctlr_info_t *h = q->queuedata; cmdlist_t *c; - struct request *creq; struct scatterlist tmp_sg[SG_MAX]; int i, dir, seg; - if (blk_queue_plugged(q)) - goto startio; - -queue_next: - creq = elv_next_request(q); - if (!creq) - goto startio; - - if (creq->nr_phys_segments > SG_MAX) - BUG(); + BUG_ON(blk_queue_plugged(q)); + BUG_ON(creq->nr_phys_segments > SG_MAX); if ((c = cmd_alloc(h,1)) == NULL) - goto startio; - - blkdev_dequeue_request(creq); + return 1; c->ctlr = h->ctlr; c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv; @@ -845,10 +842,7 @@ if (h->Qdepth > h->maxQsinceinit) h->maxQsinceinit = h->Qdepth; - goto queue_next; - -startio: - start_io(h); + return 0; } /* @@ -898,6 +892,7 @@ */ static inline void complete_command(cmdlist_t *cmd, int timeout) { + struct request *rq = cmd->rq; int ok=1; int i, ddir; @@ -929,10 +924,11 @@ pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr, cmd->req.sg[i].size, ddir); - complete_buffers(cmd->rq->bio, ok); + complete_buffers(rq->bio, ok); - DBGPX(printk("Done with %p\n", cmd->rq);); - end_that_request_last(cmd->rq); + DBGPX(printk("Done with %p\n", rq);); + cmd_free(hba[cmd->ctlr], cmd, 1); + end_that_request_last(rq); } /* @@ -988,7 +984,6 @@ } if (c->type == CMD_RWREQ) { complete_command(c, 0); - cmd_free(h, c, 1); } else if (c->type == CMD_IOCTL_PEND) { c->type = CMD_IOCTL_DONE; } @@ -1000,7 +995,6 @@ /* * See if we can queue up some more IO */ - do_ida_request(h->queue); spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); return IRQ_HANDLED; } ===== drivers/block/ll_rw_blk.c 1.203 vs edited ===== --- 1.203/drivers/block/ll_rw_blk.c Thu Aug 7 11:25:24 2003 +++ edited/drivers/block/ll_rw_blk.c Fri Aug 8 08:59:16 2003 @@ -1088,7 +1088,9 @@ void blk_start_queue(request_queue_t *q) { clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); - schedule_work(&q->unplug_work); + clear_bit(QUEUE_FLAG_HWFULL, &q->queue_flags); + BUG_ON(blk_queue_plugged(q)); + q->request_fn(q); } /** @@ -1251,35 +1253,7 @@ return q; } -/** - * blk_init_queue - prepare a request queue for use with a block device - * @q: The &request_queue_t to be initialised - * @rfn: The function to be called to process requests that have been - * placed on the queue. - * - * Description: - * If a block device wishes to use the standard request handling procedures, - * which sorts requests and coalesces adjacent requests, then it must - * call blk_init_queue(). The function @rfn will be called when there - * are requests on the queue that need to be processed. If the device - * supports plugging, then @rfn may not be called immediately when requests - * are available on the queue, but may be called at some time later instead. - * Plugged queues are generally unplugged when a buffer belonging to one - * of the requests on the queue is needed, or due to memory pressure. - * - * @rfn is not required, or even expected, to remove all requests off the - * queue, but only as many as it can handle at a time. If it does leave - * requests on the queue, it is responsible for arranging that the requests - * get dealt with eventually. - * - * The queue spin lock must be held while manipulating the requests on the - * request queue. - * - * Note: - * blk_init_queue() must be paired with a blk_cleanup_queue() call - * when the block device is deactivated (such as at module unload). - **/ -request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) +static request_queue_t *__blk_init_queue(spinlock_t *lock) { request_queue_t *q; static int printed; @@ -1299,7 +1273,7 @@ if (elevator_init(q, chosen_elevator)) goto out_elv; - q->request_fn = rfn; + q->request_commit_fn = NULL; q->back_merge_fn = ll_back_merge_fn; q->front_merge_fn = ll_front_merge_fn; q->merge_requests_fn = ll_merge_requests_fn; @@ -1325,6 +1299,116 @@ } +/** + * blk_init_queue - prepare a request queue for use with a block device + * @q: The &request_queue_t to be initialised + * @rfn: The function to be called to process requests that have been + * placed on the queue. + * + * Description: + * If a block device wishes to use the standard request handling procedures, + * which sorts requests and coalesces adjacent requests, then it must + * call blk_init_queue(). The function @rfn will be called when there + * are requests on the queue that need to be processed. If the device + * supports plugging, then @rfn may not be called immediately when requests + * are available on the queue, but may be called at some time later instead. + * Plugged queues are generally unplugged when a buffer belonging to one + * of the requests on the queue is needed, or due to memory pressure. + * + * @rfn is not required, or even expected, to remove all requests off the + * queue, but only as many as it can handle at a time. If it does leave + * requests on the queue, it is responsible for arranging that the requests + * get dealt with eventually. + * + * The queue spin lock must be held while manipulating the requests on the + * request queue. + * + * Note: + * blk_init_queue() must be paired with a blk_cleanup_queue() call + * when the block device is deactivated (such as at module unload). + **/ +request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) +{ + request_queue_t *q = __blk_init_queue(lock); + + if (!q) + return NULL; + + q->request_fn = rfn; + q->queue_request_fn = NULL; + return q; +} + +static void blk_queue_request_fn(request_queue_t *q) +{ + struct request *rq; + int ret, queued; + + ret = queued = 0; + while ((rq = elv_next_request(q))) { + + ret = q->queue_request_fn(q, rq); + if (ret) + break; + + /* + * queued, remove it and send one more + */ + blkdev_dequeue_request(rq); + queued++; + } + + /* + * driver said don't queue any more requests, mark + * the queue full. the next released request will + * restart queueing operations + */ + if (ret) { + q->queue_flags |= (1 << QUEUE_FLAG_HWFULL); + blk_stop_queue(q); + } + + if (queued && q->request_commit_fn) + q->request_commit_fn(q); +} + +/** + * blk_init_queue_one - prepare a request queue for use with a block device + * @q: The &request_queue_t to be initialised + * @qrf: The function to be called to queue a request with the driver + * + * Description: + * Generally, see description for blk_init_queue(). This function causes + * a change in how the driver does queueing of requests, as compared to + * blk_init_queue(). Instead of being handed a queue and pealing requests + * off of it, blk_init_queue_one() hands the driver a single request at + * the time and takes care of stopping and starting the queue when + * necessary. + * + * There is no change in how IO scheduling is applied to incoming requests. + * + * The queue spin lock must be held while manipulating the requests on the + * request queue. This lock is acquired before calling into the @qrf + * supplied. + * + * Note: + * blk_init_queue_one() must be paired with a blk_cleanup_queue() call + * when the block device is deactivated (such as at module unload). + **/ +request_queue_t *blk_init_queue_one(queue_request_fn *qrf, + request_commit_fn *rcf, spinlock_t *lock) +{ + request_queue_t *q = __blk_init_queue(lock); + + if (!q) + return NULL; + + q->request_fn = blk_queue_request_fn; + q->queue_request_fn = qrf; + q->request_commit_fn = rcf; + return 0; +} + int blk_get_queue(request_queue_t *q) { if (!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { @@ -2435,6 +2519,7 @@ { struct gendisk *disk = req->rq_disk; struct completion *waiting = req->waiting; + request_queue_t *q = req->q; if (disk && blk_fs_request(req)) { unsigned long duration = jiffies - req->start_time; @@ -2451,8 +2536,22 @@ disk_round_stats(disk); disk_stat_dec(disk, in_flight); } - __blk_put_request(req->q, req); - /* Do this LAST! The structure may be freed immediately afterwards */ + + if (q) { + __blk_put_request(q, req); + + /* + * request has completed, try and restart queueing + */ + if (q->queue_flags & (1 << QUEUE_FLAG_HWFULL)) + blk_start_queue(q); + } + + /* + * do this last, as the request structure may have come from a + * process stack that might become invalid right after the + * complete + */ if (waiting) complete(waiting); } @@ -2786,6 +2885,7 @@ EXPORT_SYMBOL(end_that_request_last); EXPORT_SYMBOL(end_request); EXPORT_SYMBOL(blk_init_queue); +EXPORT_SYMBOL(blk_init_queue_one); EXPORT_SYMBOL(blk_cleanup_queue); EXPORT_SYMBOL(blk_get_queue); EXPORT_SYMBOL(blk_alloc_queue); ===== drivers/scsi/scsi_lib.c 1.106 vs edited ===== --- 1.106/drivers/scsi/scsi_lib.c Thu Aug 7 11:25:25 2003 +++ edited/drivers/scsi/scsi_lib.c Fri Aug 8 09:04:56 2003 @@ -476,8 +476,6 @@ * uptodate - 1 if I/O indicates success, 0 for I/O error. * sectors - number of sectors we want to mark. * requeue - indicates whether we should requeue leftovers. - * frequeue - indicates that if we release the command block - * that the queue request function should be called. * * Lock status: Assumed that lock is not held upon entry. * @@ -905,6 +903,14 @@ return BLKPREP_KILL; } +static void scsi_queuecommit_fn(request_queue_t *q) +{ + struct scsi_device *sdev = q->queuedata; + struct Scsi_Host *shost = sdev->host; + + shost->hostt->queuecommit(sdev); +} + static int scsi_prep_fn(struct request_queue *q, struct request *req) { struct scsi_device *sdev = q->queuedata; @@ -1096,113 +1102,103 @@ } /* - * Function: scsi_request_fn() + * Function: scsi_queue_request_fn() * * Purpose: Main strategy routine for SCSI. * * Arguments: q - Pointer to actual queue. + * req - Pointer to request * - * Returns: Nothing + * Returns: 1 when busy, 0 when we can queue more * - * Lock status: IO request lock assumed to be held when called. + * Lock status: Queue lock assumed to be held when called. */ -static void scsi_request_fn(struct request_queue *q) +static int scsi_queue_request_fn(struct request_queue *q, struct request *req) { struct scsi_device *sdev = q->queuedata; struct Scsi_Host *shost = sdev->host; struct scsi_cmnd *cmd; - struct request *req; + int rtn; - /* - * To start with, we keep looping until the queue is empty, or until - * the host is no longer able to accept any more requests. - */ - while (!blk_queue_plugged(q)) { - int rtn; - /* - * get next queueable request. We do this early to make sure - * that the request is fully prepared even if we cannot - * accept it. - */ - req = elv_next_request(q); - if (!req || !scsi_dev_queue_ready(q, sdev)) - break; + BUG_ON(!blk_queue_plugged(q)); - /* - * Remove the request from the request list. - */ - if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) - blkdev_dequeue_request(req); - sdev->device_busy++; + if (!scsi_dev_queue_ready(q, sdev)) + goto out; + + if (blk_queue_tagged(q) && blk_queue_start_tag(q, req)) + goto out; - spin_unlock(q->queue_lock); - spin_lock(shost->host_lock); + sdev->device_busy++; - if (!scsi_host_queue_ready(q, shost, sdev)) + spin_unlock(q->queue_lock); + spin_lock(shost->host_lock); + + if (!scsi_host_queue_ready(q, shost, sdev)) + goto not_ready; + if (sdev->single_lun) { + if (sdev->sdev_target->starget_sdev_user && + sdev->sdev_target->starget_sdev_user != sdev) goto not_ready; - if (sdev->single_lun) { - if (sdev->sdev_target->starget_sdev_user && - sdev->sdev_target->starget_sdev_user != sdev) - goto not_ready; - sdev->sdev_target->starget_sdev_user = sdev; - } - shost->host_busy++; + sdev->sdev_target->starget_sdev_user = sdev; + } + shost->host_busy++; - /* - * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will - * take the lock again. - */ - spin_unlock_irq(shost->host_lock); + /* + * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will + * take the lock again. + */ + spin_unlock_irq(shost->host_lock); - cmd = req->special; - if (unlikely(cmd == NULL)) { - printk(KERN_CRIT "impossible request in %s.\n" - "please mail a stack trace to " - "linux-scsi@vger.kernel.org", - __FUNCTION__); - BUG(); - } + cmd = req->special; + if (unlikely(cmd == NULL)) { + printk(KERN_CRIT "impossible request in %s.\n" + "please mail a stack trace to " + "linux-scsi@vger.kernel.org", + __FUNCTION__); + BUG(); + } - /* - * Finally, initialize any error handling parameters, and set up - * the timers for timeouts. - */ - scsi_init_cmd_errh(cmd); + /* + * Finally, initialize any error handling parameters, and set up + * the timers for timeouts. + */ + scsi_init_cmd_errh(cmd); - /* - * Dispatch the command to the low-level driver. - */ - rtn = scsi_dispatch_cmd(cmd); - spin_lock_irq(q->queue_lock); - if(rtn) { - /* we're refusing the command; because of - * the way locks get dropped, we need to - * check here if plugging is required */ - if(sdev->device_busy == 0) - blk_plug_device(q); + /* + * Dispatch the command to the low-level driver. + */ + rtn = scsi_dispatch_cmd(cmd); + spin_lock_irq(q->queue_lock); - break; - } - } + /* + * command refused + */ + if (rtn) + goto out; - return; + /* + * successfully queued + */ + return 0; - not_ready: +not_ready: spin_unlock_irq(shost->host_lock); /* - * lock q, handle tag, requeue req, and decrement device_busy. We - * must return with queue_lock held. - * * Decrementing device_busy without checking it is OK, as all such * cases (host limits or settings) should run the queue at some * later time. */ spin_lock_irq(q->queue_lock); - blk_requeue_request(q, req); sdev->device_busy--; - if(sdev->device_busy == 0) +out: + /* + * if device has no pending commands, queue needs to be plugged again + */ + if (sdev->device_busy == 0) blk_plug_device(q); + + return 1; } u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) @@ -1227,9 +1223,13 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) { struct Scsi_Host *shost = sdev->host; + request_commit_fn *rcf = NULL; struct request_queue *q; - q = blk_init_queue(scsi_request_fn, &sdev->sdev_lock); + if (shost->hostt->queuecommit) + rcf = scsi_queuecommit_fn; + + q = blk_init_queue_one(scsi_queue_request_fn, rcf, &sdev->sdev_lock); if (!q) return NULL; ===== include/linux/blkdev.h 1.122 vs edited ===== --- 1.122/include/linux/blkdev.h Thu Aug 7 11:25:25 2003 +++ edited/include/linux/blkdev.h Fri Aug 8 08:55:57 2003 @@ -240,6 +240,8 @@ typedef int (make_request_fn) (request_queue_t *q, struct bio *bio); typedef int (prep_rq_fn) (request_queue_t *, struct request *); typedef void (unplug_fn) (void *q); +typedef int (queue_request_fn) (request_queue_t *q, struct request *); +typedef void (request_commit_fn) (request_queue_t *q); struct bio_vec; typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *); @@ -276,6 +278,8 @@ struct request_list rq; request_fn_proc *request_fn; + queue_request_fn *queue_request_fn; + request_commit_fn *request_commit_fn; merge_request_fn *back_merge_fn; merge_request_fn *front_merge_fn; merge_requests_fn *merge_requests_fn; @@ -360,6 +364,7 @@ #define QUEUE_FLAG_READFULL 3 /* write queue has been filled */ #define QUEUE_FLAG_WRITEFULL 4 /* read queue has been filled */ #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ +#define QUEUE_FLAG_HWFULL 6 /* queue stopped because of hw full */ #define blk_queue_plugged(q) !list_empty(&(q)->plug_list) #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) @@ -539,6 +544,7 @@ * Access functions for manipulating queue properties */ extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *); +extern request_queue_t *blk_init_queue_one(queue_request_fn *, request_commit_fn *, spinlock_t *); extern void blk_cleanup_queue(request_queue_t *); extern void blk_queue_make_request(request_queue_t *, make_request_fn *); extern void blk_queue_bounce_limit(request_queue_t *, u64); ===== include/scsi/scsi_host.h 1.5 vs edited ===== --- 1.5/include/scsi/scsi_host.h Sun Jul 6 20:14:28 2003 +++ edited/include/scsi/scsi_host.h Fri Aug 8 08:55:58 2003 @@ -101,6 +101,14 @@ void (*done)(struct scsi_cmnd *)); /* + * if the host adapter wants to be informed of when we are done + * sending a batch of requests to it, it can set this as the notifier. + * + * STATUS: OPTIONAL + */ + void (*queuecommit)(struct scsi_device *); + + /* * This is an error handling strategy routine. You don't need to * define one of these if you don't want to - there is a default * routine that is present that should work in most cases. For those