Lines Matching refs:request
96 * For request batching
98 unsigned long last_waited; /* Time last woken after wait for request */
112 struct request;
113 typedef void (rq_end_io_fn)(struct request *, int);
128 struct request {
184 * when request is used as a packet command carrier
210 __REQ_SORTED, /* elevator knows about this request */
214 __REQ_CMD, /* is a regular fs rw request */
227 __REQ_FAILED, /* set if the request failed */
234 __REQ_PM_SUSPEND, /* suspend request */
235 __REQ_PM_RESUME, /* resume request */
236 __REQ_PM_SHUTDOWN, /* shutdown request */
238 __REQ_RW_SYNC, /* request is sync (O_DIRECT) */
285 typedef int (merge_request_fn) (request_queue_t *, struct request *,
287 typedef int (merge_requests_fn) (request_queue_t *, struct request *,
288 struct request *);
291 typedef int (prep_rq_fn) (request_queue_t *, struct request *);
298 typedef void (prepare_flush_fn) (request_queue_t *, struct request *);
299 typedef void (softirq_done_fn)(struct request *);
307 struct request **tag_index; /* map of busy tags */
322 struct request *last_merge;
326 * the queue request freelist, one for reads and one for writes
347 struct request *boundary_rq;
428 struct request pre_flush_rq, bar_rq, post_flush_rq;
429 struct request *orig_bar_rq;
509 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
538 * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may
598 extern void blk_put_request(struct request *);
599 extern void __blk_put_request(request_queue_t *, struct request *);
600 extern void blk_end_sync_rq(struct request *rq, int error);
601 extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
602 extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
603 extern void blk_requeue_request(request_queue_t *, struct request *);
616 extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned int);
618 extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t);
619 extern int blk_rq_map_user_iov(request_queue_t *, struct request *, struct sg_iovec *, int);
621 struct request *, int);
623 struct request *, int, rq_end_io_fn *);
644 * end_request() and friends. Must be called with the request queue spinlock
652 extern int end_that_request_first(struct request *, int, int);
653 extern int end_that_request_chunk(struct request *, int, int);
654 extern void end_that_request_last(struct request *, int);
655 extern void end_request(struct request *req, int uptodate);
656 extern void blk_complete_request(struct request *);
658 static inline int rq_all_done(struct request *rq, unsigned int nr_bytes)
676 static inline void blkdev_dequeue_request(struct request *req)
685 struct request *rq)
719 extern int blk_do_ordered(request_queue_t *, struct request **);
721 extern unsigned blk_ordered_req_seq(struct request *);
724 extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
725 extern void blk_dump_rq_flags(struct request *, char *);
741 extern int blk_queue_start_tag(request_queue_t *, struct request *);
742 extern struct request *blk_queue_find_tag(request_queue_t *, int);
743 extern void blk_queue_end_tag(request_queue_t *, struct request *);
750 extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *);
760 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)