1 /* 2 * BSG helper library 3 * 4 * Copyright (C) 2008 James Smart, Emulex Corporation 5 * Copyright (C) 2011 Red Hat, Inc. All rights reserved. 6 * Copyright (C) 2011 Mike Christie 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * 22 */ 23 #include <linux/slab.h> 24 #include <linux/blkdev.h> 25 #include <linux/delay.h> 26 #include <linux/scatterlist.h> 27 #include <linux/bsg-lib.h> 28 #include <linux/export.h> 29 #include <scsi/scsi_cmnd.h> 30 31 /** 32 * bsg_destroy_job - routine to teardown/delete a bsg job 33 * @job: bsg_job that is to be torn down 34 */ 35 static void bsg_destroy_job(struct kref *kref) 36 { 37 struct bsg_job *job = container_of(kref, struct bsg_job, kref); 38 struct request *rq = job->req; 39 40 blk_end_request_all(rq, scsi_req(rq)->result); 41 42 put_device(job->dev); /* release reference for the request */ 43 44 kfree(job->request_payload.sg_list); 45 kfree(job->reply_payload.sg_list); 46 kfree(job); 47 } 48 49 void bsg_job_put(struct bsg_job *job) 50 { 51 kref_put(&job->kref, bsg_destroy_job); 52 } 53 EXPORT_SYMBOL_GPL(bsg_job_put); 54 55 int bsg_job_get(struct bsg_job *job) 56 { 57 return kref_get_unless_zero(&job->kref); 58 } 59 EXPORT_SYMBOL_GPL(bsg_job_get); 60 61 /** 62 * bsg_job_done - completion routine for bsg requests 63 * @job: bsg_job that is complete 64 * @result: job reply result 65 * @reply_payload_rcv_len: length of payload recvd 66 * 67 * The LLD should call this when the bsg job has completed. 68 */ 69 void bsg_job_done(struct bsg_job *job, int result, 70 unsigned int reply_payload_rcv_len) 71 { 72 struct request *req = job->req; 73 struct request *rsp = req->next_rq; 74 struct scsi_request *rq = scsi_req(req); 75 int err; 76 77 err = scsi_req(job->req)->result = result; 78 if (err < 0) 79 /* we're only returning the result field in the reply */ 80 rq->sense_len = sizeof(u32); 81 else 82 rq->sense_len = job->reply_len; 83 /* we assume all request payload was transferred, residual == 0 */ 84 rq->resid_len = 0; 85 86 if (rsp) { 87 WARN_ON(reply_payload_rcv_len > scsi_req(rsp)->resid_len); 88 89 /* set reply (bidi) residual */ 90 scsi_req(rsp)->resid_len -= 91 min(reply_payload_rcv_len, scsi_req(rsp)->resid_len); 92 } 93 blk_complete_request(req); 94 } 95 EXPORT_SYMBOL_GPL(bsg_job_done); 96 97 /** 98 * bsg_softirq_done - softirq done routine for destroying the bsg requests 99 * @rq: BSG request that holds the job to be destroyed 100 */ 101 static void bsg_softirq_done(struct request *rq) 102 { 103 struct bsg_job *job = rq->special; 104 105 bsg_job_put(job); 106 } 107 108 static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req) 109 { 110 size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments); 111 112 BUG_ON(!req->nr_phys_segments); 113 114 buf->sg_list = kzalloc(sz, GFP_KERNEL); 115 if (!buf->sg_list) 116 return -ENOMEM; 117 sg_init_table(buf->sg_list, req->nr_phys_segments); 118 scsi_req(req)->resid_len = blk_rq_bytes(req); 119 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); 120 buf->payload_len = blk_rq_bytes(req); 121 return 0; 122 } 123 124 /** 125 * bsg_create_job - create the bsg_job structure for the bsg request 126 * @dev: device that is being sent the bsg request 127 * @req: BSG request that needs a job structure 128 */ 129 static int bsg_create_job(struct device *dev, struct request *req) 130 { 131 struct request *rsp = req->next_rq; 132 struct request_queue *q = req->q; 133 struct scsi_request *rq = scsi_req(req); 134 struct bsg_job *job; 135 int ret; 136 137 BUG_ON(req->special); 138 139 job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL); 140 if (!job) 141 return -ENOMEM; 142 143 req->special = job; 144 job->req = req; 145 if (q->bsg_job_size) 146 job->dd_data = (void *)&job[1]; 147 job->request = rq->cmd; 148 job->request_len = rq->cmd_len; 149 job->reply = rq->sense; 150 job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer 151 * allocated */ 152 if (req->bio) { 153 ret = bsg_map_buffer(&job->request_payload, req); 154 if (ret) 155 goto failjob_rls_job; 156 } 157 if (rsp && rsp->bio) { 158 ret = bsg_map_buffer(&job->reply_payload, rsp); 159 if (ret) 160 goto failjob_rls_rqst_payload; 161 } 162 job->dev = dev; 163 /* take a reference for the request */ 164 get_device(job->dev); 165 kref_init(&job->kref); 166 return 0; 167 168 failjob_rls_rqst_payload: 169 kfree(job->request_payload.sg_list); 170 failjob_rls_job: 171 kfree(job); 172 return -ENOMEM; 173 } 174 175 /** 176 * bsg_request_fn - generic handler for bsg requests 177 * @q: request queue to manage 178 * 179 * On error the create_bsg_job function should return a -Exyz error value 180 * that will be set to ->result. 181 * 182 * Drivers/subsys should pass this to the queue init function. 183 */ 184 static void bsg_request_fn(struct request_queue *q) 185 __releases(q->queue_lock) 186 __acquires(q->queue_lock) 187 { 188 struct device *dev = q->queuedata; 189 struct request *req; 190 struct bsg_job *job; 191 int ret; 192 193 if (!get_device(dev)) 194 return; 195 196 while (1) { 197 req = blk_fetch_request(q); 198 if (!req) 199 break; 200 spin_unlock_irq(q->queue_lock); 201 202 ret = bsg_create_job(dev, req); 203 if (ret) { 204 scsi_req(req)->result = ret; 205 blk_end_request_all(req, ret); 206 spin_lock_irq(q->queue_lock); 207 continue; 208 } 209 210 job = req->special; 211 ret = q->bsg_job_fn(job); 212 spin_lock_irq(q->queue_lock); 213 if (ret) 214 break; 215 } 216 217 spin_unlock_irq(q->queue_lock); 218 put_device(dev); 219 spin_lock_irq(q->queue_lock); 220 } 221 222 /** 223 * bsg_setup_queue - Create and add the bsg hooks so we can receive requests 224 * @dev: device to attach bsg device to 225 * @name: device to give bsg device 226 * @job_fn: bsg job handler 227 * @dd_job_size: size of LLD data needed for each job 228 */ 229 struct request_queue *bsg_setup_queue(struct device *dev, char *name, 230 bsg_job_fn *job_fn, int dd_job_size) 231 { 232 struct request_queue *q; 233 int ret; 234 235 q = blk_alloc_queue(GFP_KERNEL); 236 if (!q) 237 return ERR_PTR(-ENOMEM); 238 q->cmd_size = sizeof(struct scsi_request); 239 q->request_fn = bsg_request_fn; 240 241 ret = blk_init_allocated_queue(q); 242 if (ret) 243 goto out_cleanup_queue; 244 245 q->queuedata = dev; 246 q->bsg_job_size = dd_job_size; 247 q->bsg_job_fn = job_fn; 248 queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); 249 blk_queue_softirq_done(q, bsg_softirq_done); 250 blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); 251 252 ret = bsg_register_queue(q, dev, name, NULL); 253 if (ret) { 254 printk(KERN_ERR "%s: bsg interface failed to " 255 "initialize - register queue\n", dev->kobj.name); 256 goto out_cleanup_queue; 257 } 258 259 return q; 260 out_cleanup_queue: 261 blk_cleanup_queue(q); 262 return ERR_PTR(ret); 263 } 264 EXPORT_SYMBOL_GPL(bsg_setup_queue); 265