block.c (1e8e55b67030c6a2fef893d428bdcd611f73705c) | block.c (10f21df4a23540b5da8e88d1030ff8c37818e04f) |
---|---|
1/* 2 * Block driver for media (i.e., flash cards) 3 * 4 * Copyright 2002 Hewlett-Packard Company 5 * Copyright 2005-2008 Pierre Ossman 6 * 7 * Use consistent with the GNU GPL is permitted, 8 * provided that this copyright notice is --- 2117 unchanged lines hidden (view full) --- 2126 brq->data.bytes_xfered = 0; 2127 mqrq->retries = MMC_NO_RETRIES; 2128 pr_err("%s: Unhandled return value (%d)", 2129 req->rq_disk->disk_name, status); 2130 break; 2131 } 2132} 2133 | 1/* 2 * Block driver for media (i.e., flash cards) 3 * 4 * Copyright 2002 Hewlett-Packard Company 5 * Copyright 2005-2008 Pierre Ossman 6 * 7 * Use consistent with the GNU GPL is permitted, 8 * provided that this copyright notice is --- 2117 unchanged lines hidden (view full) --- 2126 brq->data.bytes_xfered = 0; 2127 mqrq->retries = MMC_NO_RETRIES; 2128 pr_err("%s: Unhandled return value (%d)", 2129 req->rq_disk->disk_name, status); 2130 break; 2131 } 2132} 2133 |
2134static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq) 2135{ 2136 mmc_blk_eval_resp_error(brq); 2137 2138 return brq->sbc.error || brq->cmd.error || brq->stop.error || 2139 brq->data.error || brq->cmd.resp[0] & CMD_ERRORS; 2140} 2141 2142static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq, 2143 struct request *req) 2144{ 2145 int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; 2146 2147 mmc_blk_reset_success(mq->blkdata, type); 2148} 2149 |
|
2134static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req) 2135{ 2136 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2137 unsigned int nr_bytes = mqrq->brq.data.bytes_xfered; 2138 2139 if (nr_bytes) { 2140 if (blk_update_request(req, BLK_STS_OK, nr_bytes)) 2141 blk_mq_requeue_request(req, true); --- 66 unchanged lines hidden (view full) --- 2208static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) 2209{ 2210 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2211 struct mmc_request *mrq = &mqrq->brq.mrq; 2212 struct mmc_host *host = mq->card->host; 2213 2214 mmc_post_req(host, mrq, 0); 2215 | 2150static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req) 2151{ 2152 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2153 unsigned int nr_bytes = mqrq->brq.data.bytes_xfered; 2154 2155 if (nr_bytes) { 2156 if (blk_update_request(req, BLK_STS_OK, nr_bytes)) 2157 blk_mq_requeue_request(req, true); --- 66 unchanged lines hidden (view full) --- 2224static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) 2225{ 2226 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2227 struct mmc_request *mrq = &mqrq->brq.mrq; 2228 struct mmc_host *host = mq->card->host; 2229 2230 mmc_post_req(host, mrq, 0); 2231 |
2216 blk_mq_complete_request(req); | 2232 /* 2233 * Block layer timeouts race with completions which means the normal 2234 * completion path cannot be used during recovery. 2235 */ 2236 if (mq->in_recovery) 2237 mmc_blk_mq_complete_rq(mq, req); 2238 else 2239 blk_mq_complete_request(req); |
2217 2218 mmc_blk_mq_dec_in_flight(mq, req); 2219} 2220 | 2240 2241 mmc_blk_mq_dec_in_flight(mq, req); 2242} 2243 |
2244void mmc_blk_mq_recovery(struct mmc_queue *mq) 2245{ 2246 struct request *req = mq->recovery_req; 2247 struct mmc_host *host = mq->card->host; 2248 struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); 2249 2250 mq->recovery_req = NULL; 2251 mq->rw_wait = false; 2252 2253 if (mmc_blk_rq_error(&mqrq->brq)) { 2254 mmc_retune_hold_now(host); 2255 mmc_blk_mq_rw_recovery(mq, req); 2256 } 2257 2258 mmc_blk_urgent_bkops(mq, mqrq); 2259 2260 mmc_blk_mq_post_req(mq, req); 2261} 2262 |
|
2221static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq, 2222 struct request **prev_req) 2223{ | 2263static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq, 2264 struct request **prev_req) 2265{ |
2266 if (mmc_host_done_complete(mq->card->host)) 2267 return; 2268 |
|
2224 mutex_lock(&mq->complete_lock); 2225 2226 if (!mq->complete_req) 2227 goto out_unlock; 2228 2229 mmc_blk_mq_poll_completion(mq, mq->complete_req); 2230 2231 if (prev_req) --- 17 unchanged lines hidden (view full) --- 2249 2250static void mmc_blk_mq_req_done(struct mmc_request *mrq) 2251{ 2252 struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, 2253 brq.mrq); 2254 struct request *req = mmc_queue_req_to_req(mqrq); 2255 struct request_queue *q = req->q; 2256 struct mmc_queue *mq = q->queuedata; | 2269 mutex_lock(&mq->complete_lock); 2270 2271 if (!mq->complete_req) 2272 goto out_unlock; 2273 2274 mmc_blk_mq_poll_completion(mq, mq->complete_req); 2275 2276 if (prev_req) --- 17 unchanged lines hidden (view full) --- 2294 2295static void mmc_blk_mq_req_done(struct mmc_request *mrq) 2296{ 2297 struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, 2298 brq.mrq); 2299 struct request *req = mmc_queue_req_to_req(mqrq); 2300 struct request_queue *q = req->q; 2301 struct mmc_queue *mq = q->queuedata; |
2302 struct mmc_host *host = mq->card->host; |
|
2257 unsigned long flags; | 2303 unsigned long flags; |
2258 bool waiting; | |
2259 | 2304 |
2260 /* 2261 * We cannot complete the request in this context, so record that there 2262 * is a request to complete, and that a following request does not need 2263 * to wait (although it does need to complete complete_req first). 2264 */ 2265 spin_lock_irqsave(q->queue_lock, flags); 2266 mq->complete_req = req; 2267 mq->rw_wait = false; 2268 waiting = mq->waiting; 2269 spin_unlock_irqrestore(q->queue_lock, flags); | 2305 if (!mmc_host_done_complete(host)) { 2306 bool waiting; |
2270 | 2307 |
2271 /* 2272 * If 'waiting' then the waiting task will complete this request, 2273 * otherwise queue a work to do it. Note that complete_work may still 2274 * race with the dispatch of a following request. 2275 */ 2276 if (waiting) | 2308 /* 2309 * We cannot complete the request in this context, so record 2310 * that there is a request to complete, and that a following 2311 * request does not need to wait (although it does need to 2312 * complete complete_req first). 2313 */ 2314 spin_lock_irqsave(q->queue_lock, flags); 2315 mq->complete_req = req; 2316 mq->rw_wait = false; 2317 waiting = mq->waiting; 2318 spin_unlock_irqrestore(q->queue_lock, flags); 2319 2320 /* 2321 * If 'waiting' then the waiting task will complete this 2322 * request, otherwise queue a work to do it. Note that 2323 * complete_work may still race with the dispatch of a following 2324 * request. 2325 */ 2326 if (waiting) 2327 wake_up(&mq->wait); 2328 else 2329 kblockd_schedule_work(&mq->complete_work); 2330 2331 return; 2332 } 2333 2334 /* Take the recovery path for errors or urgent background operations */ 2335 if (mmc_blk_rq_error(&mqrq->brq) || 2336 mmc_blk_urgent_bkops_needed(mq, mqrq)) { 2337 spin_lock_irqsave(q->queue_lock, flags); 2338 mq->recovery_needed = true; 2339 mq->recovery_req = req; 2340 spin_unlock_irqrestore(q->queue_lock, flags); |
2277 wake_up(&mq->wait); | 2341 wake_up(&mq->wait); |
2278 else 2279 kblockd_schedule_work(&mq->complete_work); | 2342 schedule_work(&mq->recovery_work); 2343 return; 2344 } 2345 2346 mmc_blk_rw_reset_success(mq, req); 2347 2348 mq->rw_wait = false; 2349 wake_up(&mq->wait); 2350 2351 mmc_blk_mq_post_req(mq, req); |
2280} 2281 2282static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err) 2283{ 2284 struct request_queue *q = mq->queue; 2285 unsigned long flags; 2286 bool done; 2287 2288 /* | 2352} 2353 2354static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err) 2355{ 2356 struct request_queue *q = mq->queue; 2357 unsigned long flags; 2358 bool done; 2359 2360 /* |
2289 * Wait while there is another request in progress. Also indicate that 2290 * there is a request waiting to start. | 2361 * Wait while there is another request in progress, but not if recovery 2362 * is needed. Also indicate whether there is a request waiting to start. |
2291 */ 2292 spin_lock_irqsave(q->queue_lock, flags); | 2363 */ 2364 spin_lock_irqsave(q->queue_lock, flags); |
2293 done = !mq->rw_wait; | 2365 if (mq->recovery_needed) { 2366 *err = -EBUSY; 2367 done = true; 2368 } else { 2369 done = !mq->rw_wait; 2370 } |
2294 mq->waiting = !done; 2295 spin_unlock_irqrestore(q->queue_lock, flags); 2296 2297 return done; 2298} 2299 2300static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req) 2301{ --- 27 unchanged lines hidden (view full) --- 2329 2330 mq->rw_wait = true; 2331 2332 err = mmc_start_request(host, &mqrq->brq.mrq); 2333 2334 if (prev_req) 2335 mmc_blk_mq_post_req(mq, prev_req); 2336 | 2371 mq->waiting = !done; 2372 spin_unlock_irqrestore(q->queue_lock, flags); 2373 2374 return done; 2375} 2376 2377static int mmc_blk_rw_wait(struct mmc_queue *mq, struct request **prev_req) 2378{ --- 27 unchanged lines hidden (view full) --- 2406 2407 mq->rw_wait = true; 2408 2409 err = mmc_start_request(host, &mqrq->brq.mrq); 2410 2411 if (prev_req) 2412 mmc_blk_mq_post_req(mq, prev_req); 2413 |
2337 if (err) { | 2414 if (err) |
2338 mq->rw_wait = false; | 2415 mq->rw_wait = false; |
2416 2417 /* Release re-tuning here where there is no synchronization required */ 2418 if (err || mmc_host_done_complete(host)) |
|
2339 mmc_retune_release(host); | 2419 mmc_retune_release(host); |
2340 } | |
2341 2342out_post_req: 2343 if (err) 2344 mmc_post_req(host, &mqrq->brq.mrq, err); 2345 2346 return err; 2347} 2348 --- 1230 unchanged lines hidden --- | 2420 2421out_post_req: 2422 if (err) 2423 mmc_post_req(host, &mqrq->brq.mrq, err); 2424 2425 return err; 2426} 2427 --- 1230 unchanged lines hidden --- |