q 23 arch/x86/include/asm/asm.h inst##q##__VA_ARGS__) q 18 arch/x86/include/asm/msr.h u64 q; q 223 arch/x86/include/asm/msr.h int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); q 224 arch/x86/include/asm/msr.h int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q); q 229 arch/x86/include/asm/msr.h int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); q 230 arch/x86/include/asm/msr.h int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q); q 244 arch/x86/include/asm/msr.h static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) q 246 arch/x86/include/asm/msr.h rdmsrl(msr_no, *q); q 249 arch/x86/include/asm/msr.h static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) q 251 arch/x86/include/asm/msr.h wrmsrl(msr_no, q); q 273 arch/x86/include/asm/msr.h static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) q 275 arch/x86/include/asm/msr.h return rdmsrl_safe(msr_no, q); q 277 arch/x86/include/asm/msr.h static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) q 279 arch/x86/include/asm/msr.h return wrmsrl_safe(msr_no, q); q 64 include/crypto/b128ops.h static inline void u128_xor(u128 *r, const u128 *p, const u128 *q) q 66 include/crypto/b128ops.h r->a = p->a ^ q->a; q 67 include/crypto/b128ops.h r->b = p->b ^ q->b; q 70 include/crypto/b128ops.h static inline void be128_xor(be128 *r, const be128 *p, const be128 *q) q 72 include/crypto/b128ops.h u128_xor((u128 *)r, (u128 *)p, (u128 *)q); q 75 include/crypto/b128ops.h static inline void le128_xor(le128 *r, const le128 *p, const le128 *q) q 77 include/crypto/b128ops.h u128_xor((u128 *)r, (u128 *)p, (u128 *)q); q 60 include/crypto/public_key.h MPI q; /* DSA group order */ q 70 include/crypto/public_key.h MPI q; /* RSA secret prime (if present) */ q 186 include/linux/bio.h #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ q 187 include/linux/bio.h __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) q 205 include/linux/blk-mq.h for ((i) = 0; (i) < (q)->nr_hw_queues && \ q 206 include/linux/blk-mq.h ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) q 208 include/linux/blk-mq.h #define queue_for_each_ctx(q, ctx, i) \ q 209 include/linux/blk-mq.h for ((i) = 0; (i) < (q)->nr_queues && \ q 210 include/linux/blk-mq.h ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++) q 216 include/linux/blk-mq.h #define blk_ctx_sum(q, sum) \ q 221 include/linux/blk-mq.h queue_for_each_ctx((q), __x, __i) \ q 57 include/linux/blkdev.h struct request_queue *q; /* the queue this rl belongs to */ q 107 include/linux/blkdev.h struct request_queue *q; q 236 include/linux/blkdev.h typedef void (request_fn_proc) (struct request_queue *q); q 237 include/linux/blkdev.h typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); q 252 include/linux/blkdev.h typedef int (lld_busy_fn) (struct request_queue *q); q 521 include/linux/blkdev.h static inline void queue_lockdep_assert_held(struct request_queue *q) q 523 include/linux/blkdev.h if (q->queue_lock) q 524 include/linux/blkdev.h lockdep_assert_held(q->queue_lock); q 528 include/linux/blkdev.h struct request_queue *q) q 530 include/linux/blkdev.h __set_bit(flag, &q->queue_flags); q 534 include/linux/blkdev.h struct request_queue *q) q 538 include/linux/blkdev.h if (test_bit(flag, &q->queue_flags)) { q 539 include/linux/blkdev.h __clear_bit(flag, &q->queue_flags); q 547 include/linux/blkdev.h struct request_queue *q) q 551 include/linux/blkdev.h if (!test_bit(flag, &q->queue_flags)) { q 552 include/linux/blkdev.h __set_bit(flag, &q->queue_flags); q 559 include/linux/blkdev.h static inline void queue_flag_set(unsigned int flag, struct request_queue *q) q 562 include/linux/blkdev.h __set_bit(flag, &q->queue_flags); q 566 include/linux/blkdev.h struct request_queue *q) q 568 include/linux/blkdev.h __clear_bit(flag, &q->queue_flags); q 571 include/linux/blkdev.h static inline int queue_in_flight(struct request_queue *q) q 573 include/linux/blkdev.h return q->in_flight[0] + q->in_flight[1]; q 576 include/linux/blkdev.h static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) q 579 include/linux/blkdev.h __clear_bit(flag, &q->queue_flags); q 582 include/linux/blkdev.h #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) q 583 include/linux/blkdev.h #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) q 584 include/linux/blkdev.h #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) q 585 include/linux/blkdev.h #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) q 586 include/linux/blkdev.h #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) q 587 include/linux/blkdev.h #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) q 588 include/linux/blkdev.h #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) q 589 include/linux/blkdev.h #define blk_queue_noxmerges(q) \ q 590 include/linux/blkdev.h test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) q 591 include/linux/blkdev.h #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) q 592 include/linux/blkdev.h #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) q 593 include/linux/blkdev.h #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) q 594 include/linux/blkdev.h #define blk_queue_stackable(q) \ q 595 include/linux/blkdev.h test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) q 596 include/linux/blkdev.h #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) q 597 include/linux/blkdev.h #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ q 598 include/linux/blkdev.h test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) q 625 include/linux/blkdev.h static inline bool queue_is_rq_based(struct request_queue *q) q 627 include/linux/blkdev.h return q->request_fn || q->mq_ops; q 630 include/linux/blkdev.h static inline unsigned int blk_queue_cluster(struct request_queue *q) q 632 include/linux/blkdev.h return q->limits.cluster; q 790 include/linux/blkdev.h extern void blk_rq_init(struct request_queue *q, struct request *rq); q 800 include/linux/blkdev.h extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); q 801 include/linux/blkdev.h extern int blk_lld_busy(struct request_queue *q); q 807 include/linux/blkdev.h extern int blk_insert_cloned_request(struct request_queue *q, q 819 include/linux/blkdev.h extern void blk_queue_bio(struct request_queue *q, struct bio *bio); q 826 include/linux/blkdev.h static inline void blk_clear_queue_congested(struct request_queue *q, int sync) q 828 include/linux/blkdev.h clear_bdi_congested(&q->backing_dev_info, sync); q 835 include/linux/blkdev.h static inline void blk_set_queue_congested(struct request_queue *q, int sync) q 837 include/linux/blkdev.h set_bdi_congested(&q->backing_dev_info, sync); q 840 include/linux/blkdev.h extern void blk_start_queue(struct request_queue *q); q 841 include/linux/blkdev.h extern void blk_stop_queue(struct request_queue *q); q 842 include/linux/blkdev.h extern void blk_sync_queue(struct request_queue *q); q 843 include/linux/blkdev.h extern void __blk_stop_queue(struct request_queue *q); q 844 include/linux/blkdev.h extern void __blk_run_queue(struct request_queue *q); q 846 include/linux/blkdev.h extern void blk_run_queue_async(struct request_queue *q); q 900 include/linux/blkdev.h static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, q 904 include/linux/blkdev.h return min(q->limits.max_discard_sectors, UINT_MAX >> 9); q 907 include/linux/blkdev.h return q->limits.max_write_same_sectors; q 909 include/linux/blkdev.h return q->limits.max_sectors; q 916 include/linux/blkdev.h static inline unsigned int blk_max_size_offset(struct request_queue *q, q 919 include/linux/blkdev.h if (!q->limits.chunk_sectors) q 920 include/linux/blkdev.h return q->limits.max_sectors; q 922 include/linux/blkdev.h return q->limits.chunk_sectors - q 923 include/linux/blkdev.h (offset & (q->limits.chunk_sectors - 1)); q 928 include/linux/blkdev.h struct request_queue *q = rq->q; q 931 include/linux/blkdev.h return q->limits.max_hw_sectors; q 933 include/linux/blkdev.h if (!q->limits.chunk_sectors) q 934 include/linux/blkdev.h return blk_queue_get_max_sectors(q, rq->cmd_flags); q 936 include/linux/blkdev.h return min(blk_max_size_offset(q, blk_rq_pos(rq)), q 937 include/linux/blkdev.h blk_queue_get_max_sectors(q, rq->cmd_flags)); q 954 include/linux/blkdev.h extern struct request *blk_peek_request(struct request_queue *q); q 956 include/linux/blkdev.h extern struct request *blk_fetch_request(struct request_queue *q); q 1006 include/linux/blkdev.h extern void blk_queue_max_discard_sectors(struct request_queue *q, q 1008 include/linux/blkdev.h extern void blk_queue_max_write_same_sectors(struct request_queue *q, q 1012 include/linux/blkdev.h extern void blk_queue_alignment_offset(struct request_queue *q, q 1015 include/linux/blkdev.h extern void blk_queue_io_min(struct request_queue *q, unsigned int min); q 1017 include/linux/blkdev.h extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); q 1029 include/linux/blkdev.h extern int blk_queue_dma_drain(struct request_queue *q, q 1032 include/linux/blkdev.h extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); q 1042 include/linux/blkdev.h extern void blk_queue_flush(struct request_queue *q, unsigned int flush); q 1043 include/linux/blkdev.h extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); q 1047 include/linux/blkdev.h extern int blk_bio_map_sg(struct request_queue *q, struct bio *bio, q 1061 include/linux/blkdev.h extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); q 1062 include/linux/blkdev.h extern int blk_pre_runtime_suspend(struct request_queue *q); q 1063 include/linux/blkdev.h extern void blk_post_runtime_suspend(struct request_queue *q, int err); q 1064 include/linux/blkdev.h extern void blk_pre_runtime_resume(struct request_queue *q); q 1065 include/linux/blkdev.h extern void blk_post_runtime_resume(struct request_queue *q, int err); q 1067 include/linux/blkdev.h static inline void blk_pm_runtime_init(struct request_queue *q, q 1069 include/linux/blkdev.h static inline int blk_pre_runtime_suspend(struct request_queue *q) q 1073 include/linux/blkdev.h static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {} q 1074 include/linux/blkdev.h static inline void blk_pre_runtime_resume(struct request_queue *q) {} q 1075 include/linux/blkdev.h static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} q 1195 include/linux/blkdev.h static inline unsigned long queue_bounce_pfn(struct request_queue *q) q 1197 include/linux/blkdev.h return q->limits.bounce_pfn; q 1200 include/linux/blkdev.h static inline unsigned long queue_segment_boundary(struct request_queue *q) q 1202 include/linux/blkdev.h return q->limits.seg_boundary_mask; q 1205 include/linux/blkdev.h static inline unsigned int queue_max_sectors(struct request_queue *q) q 1207 include/linux/blkdev.h return q->limits.max_sectors; q 1210 include/linux/blkdev.h static inline unsigned int queue_max_hw_sectors(struct request_queue *q) q 1212 include/linux/blkdev.h return q->limits.max_hw_sectors; q 1215 include/linux/blkdev.h static inline unsigned short queue_max_segments(struct request_queue *q) q 1217 include/linux/blkdev.h return q->limits.max_segments; q 1220 include/linux/blkdev.h static inline unsigned int queue_max_segment_size(struct request_queue *q) q 1222 include/linux/blkdev.h return q->limits.max_segment_size; q 1225 include/linux/blkdev.h static inline unsigned short queue_logical_block_size(struct request_queue *q) q 1229 include/linux/blkdev.h if (q && q->limits.logical_block_size) q 1230 include/linux/blkdev.h retval = q->limits.logical_block_size; q 1240 include/linux/blkdev.h static inline unsigned int queue_physical_block_size(struct request_queue *q) q 1242 include/linux/blkdev.h return q->limits.physical_block_size; q 1250 include/linux/blkdev.h static inline unsigned int queue_io_min(struct request_queue *q) q 1252 include/linux/blkdev.h return q->limits.io_min; q 1260 include/linux/blkdev.h static inline unsigned int queue_io_opt(struct request_queue *q) q 1262 include/linux/blkdev.h return q->limits.io_opt; q 1270 include/linux/blkdev.h static inline int queue_alignment_offset(struct request_queue *q) q 1272 include/linux/blkdev.h if (q->limits.misaligned) q 1275 include/linux/blkdev.h return q->limits.alignment_offset; q 1288 include/linux/blkdev.h struct request_queue *q = bdev_get_queue(bdev); q 1290 include/linux/blkdev.h if (q->limits.misaligned) q 1296 include/linux/blkdev.h return q->limits.alignment_offset; q 1299 include/linux/blkdev.h static inline int queue_discard_alignment(struct request_queue *q) q 1301 include/linux/blkdev.h if (q->limits.discard_misaligned) q 1304 include/linux/blkdev.h return q->limits.discard_alignment; q 1332 include/linux/blkdev.h struct request_queue *q = bdev_get_queue(bdev); q 1337 include/linux/blkdev.h return q->limits.discard_alignment; q 1340 include/linux/blkdev.h static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) q 1342 include/linux/blkdev.h if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) q 1355 include/linux/blkdev.h struct request_queue *q = bdev_get_queue(bdev); q 1357 include/linux/blkdev.h if (q) q 1358 include/linux/blkdev.h return q->limits.max_write_same_sectors; q 1363 include/linux/blkdev.h static inline int queue_dma_alignment(struct request_queue *q) q 1365 include/linux/blkdev.h return q ? q->dma_alignment : 511; q 1368 include/linux/blkdev.h static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, q 1371 include/linux/blkdev.h unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; q 1391 include/linux/blkdev.h static inline bool queue_flush_queueable(struct request_queue *q) q 1393 include/linux/blkdev.h return !q->flush_not_queueable; q 1519 include/linux/blkdev.h static inline void blk_queue_max_integrity_segments(struct request_queue *q, q 1522 include/linux/blkdev.h q->limits.max_integrity_segments = segs; q 1526 include/linux/blkdev.h queue_max_integrity_segments(struct request_queue *q) q 1528 include/linux/blkdev.h return q->limits.max_integrity_segments; q 1542 include/linux/blkdev.h static inline int blk_rq_count_integrity_sg(struct request_queue *q, q 1547 include/linux/blkdev.h static inline int blk_rq_map_integrity_sg(struct request_queue *q, q 1573 include/linux/blkdev.h static inline void blk_queue_max_integrity_segments(struct request_queue *q, q 1577 include/linux/blkdev.h static inline unsigned short queue_max_integrity_segments(struct request_queue *q) q 33 include/linux/blktrace_api.h extern int do_blk_trace_setup(struct request_queue *q, char *name, q 52 include/linux/blktrace_api.h #define blk_add_trace_msg(q, fmt, ...) \ q 54 include/linux/blktrace_api.h struct blk_trace *bt = (q)->blk_trace; \ q 60 include/linux/blktrace_api.h extern void blk_add_driver_data(struct request_queue *q, struct request *rq, q 62 include/linux/blktrace_api.h extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, q 65 include/linux/blktrace_api.h extern int blk_trace_startstop(struct request_queue *q, int start); q 66 include/linux/blktrace_api.h extern int blk_trace_remove(struct request_queue *q); q 74 include/linux/blktrace_api.h # define blk_trace_shutdown(q) do { } while (0) q 75 include/linux/blktrace_api.h # define do_blk_trace_setup(q, name, dev, bdev, buts) (-ENOTTY) q 76 include/linux/blktrace_api.h # define blk_add_driver_data(q, rq, data, len) do {} while (0) q 77 include/linux/blktrace_api.h # define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY) q 78 include/linux/blktrace_api.h # define blk_trace_startstop(q, start) (-ENOTTY) q 79 include/linux/blktrace_api.h # define blk_trace_remove(q) (-ENOTTY) q 80 include/linux/blktrace_api.h # define blk_add_trace_msg(q, fmt, ...) do { } while (0) q 67 include/linux/bsg-lib.h int bsg_setup_queue(struct device *dev, struct request_queue *q, char *name, q 69 include/linux/bsg-lib.h void bsg_request_fn(struct request_queue *q); q 17 include/linux/bsg.h extern int bsg_register_queue(struct request_queue *q, q 22 include/linux/bsg.h static inline int bsg_register_queue(struct request_queue *q, q 28 include/linux/bsg.h static inline void bsg_unregister_queue(struct request_queue *q) q 29 include/linux/cordic.h s32 q; q 604 include/linux/device-mapper.h int dm_underlying_device_busy(struct request_queue *q); q 128 include/linux/elevator.h extern void elv_bio_merged(struct request_queue *q, struct request *, q 133 include/linux/elevator.h extern int elv_register_queue(struct request_queue *q); q 134 include/linux/elevator.h extern void elv_unregister_queue(struct request_queue *q); q 137 include/linux/elevator.h extern int elv_set_request(struct request_queue *q, struct request *rq, q 72 include/linux/iocontext.h struct request_queue *q; q 578 include/linux/netdevice.h static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) q 581 include/linux/netdevice.h return q->numa_node; q 587 include/linux/netdevice.h static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) q 590 include/linux/netdevice.h q->numa_node = node; q 2371 include/linux/netdevice.h void __netif_schedule(struct Qdisc *q); q 2602 include/linux/netdevice.h static inline void netdev_tx_reset_queue(struct netdev_queue *q) q 2605 include/linux/netdevice.h clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); q 2606 include/linux/netdevice.h dql_reset(&q->dql); q 35 include/linux/quicklist.h struct quicklist *q; q 38 include/linux/quicklist.h q =&get_cpu_var(quicklist)[nr]; q 39 include/linux/quicklist.h p = q->page; q 41 include/linux/quicklist.h q->page = p[0]; q 43 include/linux/quicklist.h q->nr_pages--; q 58 include/linux/quicklist.h struct quicklist *q; q 60 include/linux/quicklist.h q = &get_cpu_var(quicklist)[nr]; q 61 include/linux/quicklist.h *(void **)p = q->page; q 62 include/linux/quicklist.h q->page = p; q 63 include/linux/quicklist.h q->nr_pages++; q 201 include/linux/sunrpc/sched.h #define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0) q 255 include/linux/sunrpc/sched.h static inline const char * rpc_qname(const struct rpc_wait_queue *q) q 257 include/linux/sunrpc/sched.h return ((q && q->name) ? q->name : "unknown"); q 260 include/linux/sunrpc/sched.h static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q, q 263 include/linux/sunrpc/sched.h q->name = name; q 266 include/linux/sunrpc/sched.h static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q, q 69 include/linux/wait.h extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *); q 71 include/linux/wait.h #define init_waitqueue_head(q) \ q 75 include/linux/wait.h __init_waitqueue_head((q), #q, &__key); \ q 87 include/linux/wait.h static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) q 89 include/linux/wait.h q->flags = 0; q 90 include/linux/wait.h q->private = p; q 91 include/linux/wait.h q->func = default_wake_function; q 95 include/linux/wait.h init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func) q 97 include/linux/wait.h q->flags = 0; q 98 include/linux/wait.h q->private = NULL; q 99 include/linux/wait.h q->func = func; q 102 include/linux/wait.h static inline int waitqueue_active(wait_queue_head_t *q) q 104 include/linux/wait.h return !list_empty(&q->task_list); q 107 include/linux/wait.h extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); q 108 include/linux/wait.h extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); q 109 include/linux/wait.h extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); q 120 include/linux/wait.h __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) q 123 include/linux/wait.h __add_wait_queue(q, wait); q 133 include/linux/wait.h __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait) q 136 include/linux/wait.h __add_wait_queue_tail(q, wait); q 146 include/linux/wait.h void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); q 147 include/linux/wait.h void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); q 148 include/linux/wait.h void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); q 149 include/linux/wait.h void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); q 150 include/linux/wait.h void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); q 828 include/linux/wait.h void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); q 829 include/linux/wait.h void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); q 830 include/linux/wait.h long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state); q 831 include/linux/wait.h void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); q 832 include/linux/wait.h void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key); q 253 include/math-emu/op-1.h #define _FP_SQRT_MEAT_1(R, S, T, X, q) \ q 255 include/math-emu/op-1.h while (q != _FP_WORK_ROUND) \ q 257 include/math-emu/op-1.h T##_f = S##_f + q; \ q 260 include/math-emu/op-1.h S##_f = T##_f + q; \ q 262 include/math-emu/op-1.h R##_f += q; \ q 265 include/math-emu/op-1.h q >>= 1; \ q 529 include/math-emu/op-2.h #define _FP_SQRT_MEAT_2(R, S, T, X, q) \ q 531 include/math-emu/op-2.h while (q) \ q 533 include/math-emu/op-2.h T##_f1 = S##_f1 + q; \ q 536 include/math-emu/op-2.h S##_f1 = T##_f1 + q; \ q 538 include/math-emu/op-2.h R##_f1 += q; \ q 541 include/math-emu/op-2.h q >>= 1; \ q 543 include/math-emu/op-2.h q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \ q 544 include/math-emu/op-2.h while (q != _FP_WORK_ROUND) \ q 546 include/math-emu/op-2.h T##_f0 = S##_f0 + q; \ q 551 include/math-emu/op-2.h S##_f0 = T##_f0 + q; \ q 554 include/math-emu/op-2.h R##_f0 += q; \ q 557 include/math-emu/op-2.h q >>= 1; \ q 429 include/math-emu/op-4.h #define _FP_SQRT_MEAT_4(R, S, T, X, q) \ q 431 include/math-emu/op-4.h while (q) \ q 433 include/math-emu/op-4.h T##_f[3] = S##_f[3] + q; \ q 436 include/math-emu/op-4.h S##_f[3] = T##_f[3] + q; \ q 438 include/math-emu/op-4.h R##_f[3] += q; \ q 441 include/math-emu/op-4.h q >>= 1; \ q 443 include/math-emu/op-4.h q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \ q 444 include/math-emu/op-4.h while (q) \ q 446 include/math-emu/op-4.h T##_f[2] = S##_f[2] + q; \ q 451 include/math-emu/op-4.h S##_f[2] = T##_f[2] + q; \ q 455 include/math-emu/op-4.h R##_f[2] += q; \ q 458 include/math-emu/op-4.h q >>= 1; \ q 460 include/math-emu/op-4.h q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \ q 461 include/math-emu/op-4.h while (q) \ q 463 include/math-emu/op-4.h T##_f[1] = S##_f[1] + q; \ q 470 include/math-emu/op-4.h S##_f[1] = T##_f[1] + q; \ q 475 include/math-emu/op-4.h R##_f[1] += q; \ q 478 include/math-emu/op-4.h q >>= 1; \ q 480 include/math-emu/op-4.h q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \ q 481 include/math-emu/op-4.h while (q != _FP_WORK_ROUND) \ q 483 include/math-emu/op-4.h T##_f[0] = S##_f[0] + q; \ q 489 include/math-emu/op-4.h S##_f[0] = T##_f[0] + q; \ q 494 include/math-emu/op-4.h R##_f[0] += q; \ q 497 include/math-emu/op-4.h q >>= 1; \ q 589 include/math-emu/op-common.h _FP_W_TYPE q; \ q 631 include/math-emu/op-common.h q = _FP_OVERFLOW_##fs >> 1; \ q 632 include/math-emu/op-common.h _FP_SQRT_MEAT_##wc(R, S, T, X, q); \ q 871 include/math-emu/op-common.h #define _FP_DIV_HELP_imm(q, r, n, d) \ q 873 include/math-emu/op-common.h q = n / d, r = n % d; \ q 183 include/media/saa7146_vv.h void saa7146_buffer_finish(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, int state); q 184 include/media/saa7146_vv.h void saa7146_buffer_next(struct saa7146_dev *dev, struct saa7146_dmaqueue *q,int vbi); q 185 include/media/saa7146_vv.h int saa7146_buffer_queue(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, struct saa7146_buf *buf); q 187 include/media/saa7146_vv.h void saa7146_dma_free(struct saa7146_dev* dev,struct videobuf_queue *q, q 56 include/media/v4l2-mem2mem.h struct vb2_queue q; q 205 include/media/v4l2-mem2mem.h return &m2m_ctx->out_q_ctx.q; q 214 include/media/v4l2-mem2mem.h return &m2m_ctx->cap_q_ctx.q; q 53 include/media/videobuf-core.h struct videobuf_queue *q; q 106 include/media/videobuf-core.h int (*buf_setup)(struct videobuf_queue *q, q 108 include/media/videobuf-core.h int (*buf_prepare)(struct videobuf_queue *q, q 111 include/media/videobuf-core.h void (*buf_queue)(struct videobuf_queue *q, q 113 include/media/videobuf-core.h void (*buf_release)(struct videobuf_queue *q, q 125 include/media/videobuf-core.h int (*iolock) (struct videobuf_queue *q, q 128 include/media/videobuf-core.h int (*sync) (struct videobuf_queue *q, q 130 include/media/videobuf-core.h int (*mmap_mapper) (struct videobuf_queue *q, q 165 include/media/videobuf-core.h static inline void videobuf_queue_lock(struct videobuf_queue *q) q 167 include/media/videobuf-core.h if (!q->ext_lock) q 168 include/media/videobuf-core.h mutex_lock(&q->vb_lock); q 171 include/media/videobuf-core.h static inline void videobuf_queue_unlock(struct videobuf_queue *q) q 173 include/media/videobuf-core.h if (!q->ext_lock) q 174 include/media/videobuf-core.h mutex_unlock(&q->vb_lock); q 177 include/media/videobuf-core.h int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb, q 179 include/media/videobuf-core.h int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb, q 182 include/media/videobuf-core.h struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q); q 185 include/media/videobuf-core.h void *videobuf_queue_to_vaddr(struct videobuf_queue *q, q 188 include/media/videobuf-core.h void videobuf_queue_core_init(struct videobuf_queue *q, q 198 include/media/videobuf-core.h int videobuf_queue_is_busy(struct videobuf_queue *q); q 199 include/media/videobuf-core.h void videobuf_queue_cancel(struct videobuf_queue *q); q 201 include/media/videobuf-core.h enum v4l2_field videobuf_next_field(struct videobuf_queue *q); q 202 include/media/videobuf-core.h int videobuf_reqbufs(struct videobuf_queue *q, q 204 include/media/videobuf-core.h int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b); q 205 include/media/videobuf-core.h int videobuf_qbuf(struct videobuf_queue *q, q 207 include/media/videobuf-core.h int videobuf_dqbuf(struct videobuf_queue *q, q 209 include/media/videobuf-core.h int videobuf_streamon(struct videobuf_queue *q); q 210 include/media/videobuf-core.h int videobuf_streamoff(struct videobuf_queue *q); q 212 include/media/videobuf-core.h void videobuf_stop(struct videobuf_queue *q); q 214 include/media/videobuf-core.h int videobuf_read_start(struct videobuf_queue *q); q 215 include/media/videobuf-core.h void videobuf_read_stop(struct videobuf_queue *q); q 216 include/media/videobuf-core.h ssize_t videobuf_read_stream(struct videobuf_queue *q, q 219 include/media/videobuf-core.h ssize_t videobuf_read_one(struct videobuf_queue *q, q 223 include/media/videobuf-core.h struct videobuf_queue *q, q 226 include/media/videobuf-core.h int videobuf_mmap_setup(struct videobuf_queue *q, q 229 include/media/videobuf-core.h int __videobuf_mmap_setup(struct videobuf_queue *q, q 232 include/media/videobuf-core.h int videobuf_mmap_free(struct videobuf_queue *q); q 233 include/media/videobuf-core.h int videobuf_mmap_mapper(struct videobuf_queue *q, q 19 include/media/videobuf-dma-contig.h void videobuf_queue_dma_contig_init(struct videobuf_queue *q, q 30 include/media/videobuf-dma-contig.h void videobuf_dma_contig_free(struct videobuf_queue *q, q 102 include/media/videobuf-dma-sg.h void videobuf_queue_sg_init(struct videobuf_queue *q, q 32 include/media/videobuf-vmalloc.h void videobuf_queue_vmalloc_init(struct videobuf_queue *q, q 318 include/media/videobuf2-core.h int (*queue_setup)(struct vb2_queue *q, const struct v4l2_format *fmt, q 322 include/media/videobuf2-core.h void (*wait_prepare)(struct vb2_queue *q); q 323 include/media/videobuf2-core.h void (*wait_finish)(struct vb2_queue *q); q 330 include/media/videobuf2-core.h int (*start_streaming)(struct vb2_queue *q, unsigned int count); q 331 include/media/videobuf2-core.h void (*stop_streaming)(struct vb2_queue *q); q 447 include/media/videobuf2-core.h void vb2_discard_done(struct vb2_queue *q); q 448 include/media/videobuf2-core.h int vb2_wait_for_all_buffers(struct vb2_queue *q); q 450 include/media/videobuf2-core.h int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b); q 451 include/media/videobuf2-core.h int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req); q 453 include/media/videobuf2-core.h int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create); q 454 include/media/videobuf2-core.h int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b); q 456 include/media/videobuf2-core.h int __must_check vb2_queue_init(struct vb2_queue *q); q 458 include/media/videobuf2-core.h void vb2_queue_release(struct vb2_queue *q); q 459 include/media/videobuf2-core.h void vb2_queue_error(struct vb2_queue *q); q 461 include/media/videobuf2-core.h int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b); q 462 include/media/videobuf2-core.h int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb); q 463 include/media/videobuf2-core.h int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking); q 465 include/media/videobuf2-core.h int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type); q 466 include/media/videobuf2-core.h int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type); q 468 include/media/videobuf2-core.h int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma); q 470 include/media/videobuf2-core.h unsigned long vb2_get_unmapped_area(struct vb2_queue *q, q 476 include/media/videobuf2-core.h unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait); q 477 include/media/videobuf2-core.h size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, q 479 include/media/videobuf2-core.h size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, q 502 include/media/videobuf2-core.h int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, q 509 include/media/videobuf2-core.h int vb2_thread_stop(struct vb2_queue *q); q 515 include/media/videobuf2-core.h static inline bool vb2_is_streaming(struct vb2_queue *q) q 517 include/media/videobuf2-core.h return q->streaming; q 532 include/media/videobuf2-core.h static inline bool vb2_fileio_is_active(struct vb2_queue *q) q 534 include/media/videobuf2-core.h return q->fileio; q 543 include/media/videobuf2-core.h static inline bool vb2_is_busy(struct vb2_queue *q) q 545 include/media/videobuf2-core.h return (q->num_buffers > 0); q 552 include/media/videobuf2-core.h static inline void *vb2_get_drv_priv(struct vb2_queue *q) q 554 include/media/videobuf2-core.h return q->drv_priv; q 601 include/media/videobuf2-core.h static inline bool vb2_start_streaming_called(struct vb2_queue *q) q 603 include/media/videobuf2-core.h return q->start_streaming_called; q 32 include/net/dn_nsp.h struct sk_buff_head *q, unsigned short acknum); q 45 include/net/gen_stats.h struct gnet_stats_queue *q, __u32 qlen); q 97 include/net/inet_frag.h bool (*match)(const struct inet_frag_queue *q, q 99 include/net/inet_frag.h void (*constructor)(struct inet_frag_queue *q, q 114 include/net/inet_frag.h void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f); q 115 include/net/inet_frag.h void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f); q 119 include/net/inet_frag.h void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, q 122 include/net/inet_frag.h static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) q 124 include/net/inet_frag.h if (atomic_dec_and_test(&q->refcnt)) q 125 include/net/inet_frag.h inet_frag_destroy(q, f); q 142 include/net/inet_frag.h static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) q 144 include/net/inet_frag.h __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch); q 147 include/net/inet_frag.h static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) q 149 include/net/inet_frag.h __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch); q 496 include/net/ipv6.h void ip6_frag_init(struct inet_frag_queue *q, const void *a); q 497 include/net/ipv6.h bool ip6_frag_match(const struct inet_frag_queue *q, const void *a); q 503 include/net/ipv6.h struct inet_frag_queue q; q 75 include/net/irda/discovery.h irda_queue_t q; /* Must be first! */ q 92 include/net/irda/irda_device.h irda_queue_t q; q 124 include/net/irda/irda_device.h irda_queue_t q; /* Must be first */ q 60 include/net/irda/iriap.h irda_queue_t q; /* Must be first */ q 45 include/net/irda/irias_object.h irda_queue_t q; /* Must be first! */ q 74 include/net/irda/irias_object.h irda_queue_t q; /* Must be first! */ q 119 include/net/irda/irlap.h irda_queue_t q; /* Must be first */ q 104 include/net/irda/irttp.h irda_queue_t q; /* Must be first */ q 44 include/net/pkt_cls.h cl = tp->q->ops->cl_ops->bind_tcf(tp->q, base, r->classid); q 47 include/net/pkt_cls.h tp->q->ops->cl_ops->unbind_tcf(tp->q, cl); q 56 include/net/pkt_cls.h tp->q->ops->cl_ops->unbind_tcf(tp->q, cl); q 18 include/net/pkt_sched.h static inline void *qdisc_priv(struct Qdisc *q) q 20 include/net/pkt_sched.h return (char *) q + QDISC_ALIGN(sizeof(struct Qdisc)); q 82 include/net/pkt_sched.h int fifo_set_limit(struct Qdisc *q, unsigned int limit); q 91 include/net/pkt_sched.h void qdisc_list_add(struct Qdisc *q); q 92 include/net/pkt_sched.h void qdisc_list_del(struct Qdisc *q); q 100 include/net/pkt_sched.h int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, q 104 include/net/pkt_sched.h void __qdisc_run(struct Qdisc *q); q 106 include/net/pkt_sched.h static inline void qdisc_run(struct Qdisc *q) q 108 include/net/pkt_sched.h if (qdisc_run_begin(q)) q 109 include/net/pkt_sched.h __qdisc_run(q); q 71 include/net/sch_generic.h struct Qdisc *q); q 91 include/net/sch_generic.h struct sk_buff_head q; q 246 include/net/sch_generic.h struct Qdisc *q; q 268 include/net/sch_generic.h static inline int qdisc_qlen(const struct Qdisc *q) q 270 include/net/sch_generic.h return q->q.qlen; q 280 include/net/sch_generic.h return &qdisc->q.lock; q 285 include/net/sch_generic.h struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); q 287 include/net/sch_generic.h return q; q 327 include/net/sch_generic.h static inline void sch_tree_lock(const struct Qdisc *q) q 329 include/net/sch_generic.h spin_lock_bh(qdisc_root_sleeping_lock(q)); q 332 include/net/sch_generic.h static inline void sch_tree_unlock(const struct Qdisc *q) q 334 include/net/sch_generic.h spin_unlock_bh(qdisc_root_sleeping_lock(q)); q 337 include/net/sch_generic.h #define tcf_tree_lock(tp) sch_tree_lock((tp)->q) q 338 include/net/sch_generic.h #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q) q 434 include/net/sch_generic.h const struct Qdisc *q = rcu_dereference(txq->qdisc); q 436 include/net/sch_generic.h if (q->q.qlen) { q 511 include/net/sch_generic.h static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) q 513 include/net/sch_generic.h return q->flags & TCQ_F_CPUSTATS; q 585 include/net/sch_generic.h return __qdisc_enqueue_tail(skb, sch, &sch->q); q 603 include/net/sch_generic.h return __qdisc_dequeue_head(sch, &sch->q); q 623 include/net/sch_generic.h return __qdisc_queue_drop_head(sch, &sch->q); q 639 include/net/sch_generic.h return __qdisc_dequeue_tail(sch, &sch->q); q 644 include/net/sch_generic.h return skb_peek(&sch->q); q 655 include/net/sch_generic.h sch->q.qlen++; q 668 include/net/sch_generic.h sch->q.qlen--; q 688 include/net/sch_generic.h __qdisc_reset_queue(sch, &sch->q); q 708 include/net/sch_generic.h return __qdisc_queue_drop(sch, &sch->q); q 1064 include/net/sctp/structs.h static inline void sctp_outq_cork(struct sctp_outq *q) q 1066 include/net/sctp/structs.h q->cork = 1; q 80 include/scsi/scsi_dh.h static inline void scsi_dh_detach(struct request_queue *q) q 84 include/scsi/scsi_dh.h static inline const char *scsi_dh_attached_handler_name(struct request_queue *q, q 88 include/scsi/scsi_transport_sas.h struct request_queue *q; q 84 include/sound/asequencer.h #define snd_seq_queue_sync_port(q) ((q) + 16) q 455 include/sound/core.h #define snd_pci_quirk_name(q) ((q)->name) q 463 include/sound/core.h #define snd_pci_quirk_name(q) "" q 66 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct request *rq), q 68 include/trace/events/block.h TP_ARGS(q, rq), q 110 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct request *rq), q 112 include/trace/events/block.h TP_ARGS(q, rq) q 126 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct request *rq), q 128 include/trace/events/block.h TP_ARGS(q, rq) q 145 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct request *rq, q 148 include/trace/events/block.h TP_ARGS(q, rq, nr_bytes), q 178 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct request *rq), q 180 include/trace/events/block.h TP_ARGS(q, rq), q 225 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct request *rq), q 227 include/trace/events/block.h TP_ARGS(q, rq) q 240 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct request *rq), q 242 include/trace/events/block.h TP_ARGS(q, rq) q 258 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct bio *bio), q 260 include/trace/events/block.h TP_ARGS(q, bio), q 296 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct bio *bio, int error), q 298 include/trace/events/block.h TP_ARGS(q, bio, error), q 324 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), q 326 include/trace/events/block.h TP_ARGS(q, rq, bio), q 361 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), q 363 include/trace/events/block.h TP_ARGS(q, rq, bio) q 377 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), q 379 include/trace/events/block.h TP_ARGS(q, rq, bio) q 391 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct bio *bio), q 393 include/trace/events/block.h TP_ARGS(q, bio), q 419 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct bio *bio, int rw), q 421 include/trace/events/block.h TP_ARGS(q, bio, rw), q 457 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct bio *bio, int rw), q 459 include/trace/events/block.h TP_ARGS(q, bio, rw) q 475 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct bio *bio, int rw), q 477 include/trace/events/block.h TP_ARGS(q, bio, rw) q 490 include/trace/events/block.h TP_PROTO(struct request_queue *q), q 492 include/trace/events/block.h TP_ARGS(q), q 507 include/trace/events/block.h TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), q 509 include/trace/events/block.h TP_ARGS(q, depth, explicit), q 535 include/trace/events/block.h TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), q 537 include/trace/events/block.h TP_ARGS(q, depth, explicit) q 553 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct bio *bio, q 556 include/trace/events/block.h TP_ARGS(q, bio, new_sector), q 593 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, q 596 include/trace/events/block.h TP_ARGS(q, bio, dev, from), q 637 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, q 640 include/trace/events/block.h TP_ARGS(q, rq, dev, from), q 129 include/trace/events/sunrpc.h TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q), q 131 include/trace/events/sunrpc.h TP_ARGS(clnt, task, q), q 140 include/trace/events/sunrpc.h __string(q_name, rpc_qname(q)) q 150 include/trace/events/sunrpc.h __assign_str(q_name, rpc_qname(q)); q 165 include/trace/events/sunrpc.h TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q), q 167 include/trace/events/sunrpc.h TP_ARGS(clnt, task, q) q 173 include/trace/events/sunrpc.h TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const struct rpc_wait_queue *q), q 175 include/trace/events/sunrpc.h TP_ARGS(clnt, task, q) q 349 scripts/asn1_compiler.c char *line, *nl, *p, *q; q 381 scripts/asn1_compiler.c q = p + 2; q 382 scripts/asn1_compiler.c while ((q = memchr(q, '-', nl - q))) { q 383 scripts/asn1_compiler.c if (q[1] == '-') { q 385 scripts/asn1_compiler.c q += 2; q 386 scripts/asn1_compiler.c memmove(p, q, nl - q); q 389 scripts/asn1_compiler.c q++; q 417 scripts/asn1_compiler.c q = p + 1; q 418 scripts/asn1_compiler.c while (q < nl && (isalnum(*q) || *q == '-' || *q == '_')) q 419 scripts/asn1_compiler.c q++; q 420 scripts/asn1_compiler.c tokens[tix].size = q - p; q 421 scripts/asn1_compiler.c p = q; q 450 scripts/asn1_compiler.c q = p + 1; q 451 scripts/asn1_compiler.c while (q < nl && (isdigit(*q))) q 452 scripts/asn1_compiler.c q++; q 453 scripts/asn1_compiler.c tokens[tix].size = q - p; q 454 scripts/asn1_compiler.c p = q; q 241 scripts/basic/fixdep.c const char *p, *q; q 254 scripts/basic/fixdep.c for (q = p + 7; q < map + len; q++) { q 255 scripts/basic/fixdep.c if (!(isalnum(*q) || *q == '_')) q 261 scripts/basic/fixdep.c if (!memcmp(q - 7, "_MODULE", 7)) q 262 scripts/basic/fixdep.c q -= 7; q 263 scripts/basic/fixdep.c if( (q-p-7) < 0 ) q 265 scripts/basic/fixdep.c use_config(p+7, q-p-7); q 75 scripts/dtc/data.c char *q; q 79 scripts/dtc/data.c q = d.val; q 86 scripts/dtc/data.c q[d.len++] = c; q 89 scripts/dtc/data.c q[d.len++] = '\0'; q 167 scripts/dtc/libfdt/fdt_ro.c const char *q = strchr(path, '/'); q 169 scripts/dtc/libfdt/fdt_ro.c if (!q) q 170 scripts/dtc/libfdt/fdt_ro.c q = end; q 172 scripts/dtc/libfdt/fdt_ro.c p = fdt_get_alias_namelen(fdt, p, q - p); q 177 scripts/dtc/libfdt/fdt_ro.c p = q; q 181 scripts/dtc/libfdt/fdt_ro.c const char *q; q 187 scripts/dtc/libfdt/fdt_ro.c q = strchr(p, '/'); q 188 scripts/dtc/libfdt/fdt_ro.c if (! q) q 189 scripts/dtc/libfdt/fdt_ro.c q = end; q 191 scripts/dtc/libfdt/fdt_ro.c offset = fdt_subnode_offset_namelen(fdt, offset, p, q-p); q 195 scripts/dtc/libfdt/fdt_ro.c p = q;