cpu 32 arch/x86/include/asm/amd_nb.h unsigned int cpu; cpu 36 arch/x86/include/asm/apb_timer.h extern int arch_setup_apbt_irqs(int irq, int trigger, int mask, int cpu); cpu 296 arch/x86/include/asm/apic.h void (*vector_allocation_domain)(int cpu, struct cpumask *retmask, cpu 357 arch/x86/include/asm/apic.h int (*x86_32_early_logical_apicid)(int cpu); cpu 530 arch/x86/include/asm/apic.h static inline int noop_x86_32_early_logical_apicid(int cpu) cpu 580 arch/x86/include/asm/apic.h flat_vector_allocation_domain(int cpu, struct cpumask *retmask, cpu 596 arch/x86/include/asm/apic.h default_vector_allocation_domain(int cpu, struct cpumask *retmask, cpu 599 arch/x86/include/asm/apic.h cpumask_copy(retmask, cpumask_of(cpu)); cpu 18 arch/x86/include/asm/cpu.h #define cpu_physical_id(cpu) boot_cpu_physical_apicid cpu 25 arch/x86/include/asm/cpu.h struct cpu cpu; cpu 33 arch/x86/include/asm/cpu.h extern int _debug_hotplug_cpu(int cpu, int action); cpu 48 arch/x86/include/asm/desc.h static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) cpu 50 arch/x86/include/asm/desc.h return per_cpu(gdt_page, cpu).gdt; cpu 101 arch/x86/include/asm/desc.h #define load_TLS(t, cpu) native_load_tls(t, cpu) cpu 175 arch/x86/include/asm/desc.h static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr) cpu 177 arch/x86/include/asm/desc.h struct desc_struct *d = get_cpu_gdt_table(cpu); cpu 193 arch/x86/include/asm/desc.h #define set_tss_desc(cpu, addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) cpu 200 arch/x86/include/asm/desc.h unsigned cpu = smp_processor_id(); cpu 205 arch/x86/include/asm/desc.h write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, cpu 245 arch/x86/include/asm/desc.h static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) cpu 247 arch/x86/include/asm/desc.h struct desc_struct *gdt = get_cpu_gdt_table(cpu); cpu 408 arch/x86/include/asm/fpu-internal.h static inline void __cpu_disable_lazy_restore(unsigned int cpu) cpu 410 arch/x86/include/asm/fpu-internal.h per_cpu(fpu_owner_task, cpu) = NULL; cpu 413 arch/x86/include/asm/fpu-internal.h static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) cpu 416 arch/x86/include/asm/fpu-internal.h cpu == new->thread.fpu.last_cpu; cpu 419 arch/x86/include/asm/fpu-internal.h static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu) cpu 431 arch/x86/include/asm/fpu-internal.h cpu = ~0; cpu 432 arch/x86/include/asm/fpu-internal.h old->thread.fpu.last_cpu = cpu; cpu 447 arch/x86/include/asm/fpu-internal.h if (!use_eager_fpu() && fpu_lazy_restore(new, cpu)) cpu 57 arch/x86/include/asm/hardirq.h extern u64 arch_irq_stat_cpu(unsigned int cpu); cpu 198 arch/x86/include/asm/hw_irq.h extern void setup_vector_irq(int cpu); cpu 203 arch/x86/include/asm/hw_irq.h extern void __setup_vector_irq(int cpu); cpu 207 arch/x86/include/asm/hw_irq.h static inline void __setup_vector_irq(int cpu) {} cpu 20 arch/x86/include/asm/idle.h void amd_e400_remove_cpu(int cpu); cpu 19 arch/x86/include/asm/irq.h extern void irq_ctx_init(int cpu); cpu 21 arch/x86/include/asm/irq.h # define irq_ctx_init(cpu) do { } while (0) cpu 679 arch/x86/include/asm/kvm_host.h void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); cpu 773 arch/x86/include/asm/kvm_host.h void (*sched_in)(struct kvm_vcpu *kvm, int cpu); cpu 214 arch/x86/include/asm/mce.h extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); cpu 31 arch/x86/include/asm/microcode.h enum ucode_state (*request_microcode_user) (int cpu, cpu 34 arch/x86/include/asm/microcode.h enum ucode_state (*request_microcode_fw) (int cpu, struct device *, cpu 37 arch/x86/include/asm/microcode.h void (*microcode_fini_cpu) (int cpu); cpu 45 arch/x86/include/asm/microcode.h int (*apply_microcode) (int cpu); cpu 46 arch/x86/include/asm/microcode.h int (*collect_cpu_info) (int cpu, struct cpu_signature *csig); cpu 61 arch/x86/include/asm/microcode_amd.h extern int apply_microcode_amd(int cpu); cpu 62 arch/x86/include/asm/microcode_amd.h extern enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size); cpu 25 arch/x86/include/asm/mmu.h void leave_mm(int cpu); cpu 27 arch/x86/include/asm/mmu.h static inline void leave_mm(int cpu) cpu 40 arch/x86/include/asm/mmu_context.h unsigned cpu = smp_processor_id(); cpu 47 arch/x86/include/asm/mmu_context.h cpumask_set_cpu(cpu, mm_cpumask(next)); cpu 54 arch/x86/include/asm/mmu_context.h cpumask_clear_cpu(cpu, mm_cpumask(prev)); cpu 65 arch/x86/include/asm/mmu_context.h if (!cpumask_test_cpu(cpu, mm_cpumask(next))) { cpu 72 arch/x86/include/asm/mmu_context.h cpumask_set_cpu(cpu, mm_cpumask(next)); cpu 221 arch/x86/include/asm/msr.h int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); cpu 222 arch/x86/include/asm/msr.h int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); cpu 223 arch/x86/include/asm/msr.h int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); cpu 224 arch/x86/include/asm/msr.h int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q); cpu 227 arch/x86/include/asm/msr.h int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); cpu 228 arch/x86/include/asm/msr.h int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); cpu 229 arch/x86/include/asm/msr.h int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); cpu 230 arch/x86/include/asm/msr.h int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q); cpu 231 arch/x86/include/asm/msr.h int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); cpu 232 arch/x86/include/asm/msr.h int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); cpu 234 arch/x86/include/asm/msr.h static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) cpu 239 arch/x86/include/asm/msr.h static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) cpu 244 arch/x86/include/asm/msr.h static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) cpu 249 arch/x86/include/asm/msr.h static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) cpu 264 arch/x86/include/asm/msr.h static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, cpu 269 arch/x86/include/asm/msr.h static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) cpu 273 arch/x86/include/asm/msr.h static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) cpu 277 arch/x86/include/asm/msr.h static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) cpu 281 arch/x86/include/asm/msr.h static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) cpu 285 arch/x86/include/asm/msr.h static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) cpu 41 arch/x86/include/asm/numa.h extern int numa_cpu_node(int cpu); cpu 48 arch/x86/include/asm/numa.h static inline int numa_cpu_node(int cpu) cpu 59 arch/x86/include/asm/numa.h extern void numa_set_node(int cpu, int node); cpu 60 arch/x86/include/asm/numa.h extern void numa_clear_node(int cpu); cpu 62 arch/x86/include/asm/numa.h extern void numa_add_cpu(int cpu); cpu 63 arch/x86/include/asm/numa.h extern void numa_remove_cpu(int cpu); cpu 65 arch/x86/include/asm/numa.h static inline void numa_set_node(int cpu, int node) { } cpu 66 arch/x86/include/asm/numa.h static inline void numa_clear_node(int cpu) { } cpu 68 arch/x86/include/asm/numa.h static inline void numa_add_cpu(int cpu) { } cpu 69 arch/x86/include/asm/numa.h static inline void numa_remove_cpu(int cpu) { } cpu 73 arch/x86/include/asm/numa.h void debug_cpumask_set_cpu(int cpu, int node, bool enable); cpu 199 arch/x86/include/asm/paravirt.h static inline u64 paravirt_steal_clock(int cpu) cpu 201 arch/x86/include/asm/paravirt.h return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu); cpu 274 arch/x86/include/asm/paravirt.h static inline void load_TLS(struct thread_struct *t, unsigned cpu) cpu 276 arch/x86/include/asm/paravirt.h PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu); cpu 99 arch/x86/include/asm/paravirt_types.h unsigned long long (*steal_clock)(int cpu); cpu 130 arch/x86/include/asm/paravirt_types.h void (*load_tls)(struct thread_struct *t, unsigned int cpu); cpu 188 arch/x86/include/asm/perf_event_p4.h static inline int p4_ht_thread(int cpu) cpu 192 arch/x86/include/asm/perf_event_p4.h return cpu != cpumask_first(this_cpu_cpumask_var_ptr(cpu_sibling_map)); cpu 197 arch/x86/include/asm/perf_event_p4.h static inline int p4_should_swap_ts(u64 config, int cpu) cpu 199 arch/x86/include/asm/perf_event_p4.h return p4_ht_config_thread(config) ^ p4_ht_thread(cpu); cpu 202 arch/x86/include/asm/perf_event_p4.h static inline u32 p4_default_cccr_conf(int cpu) cpu 211 arch/x86/include/asm/perf_event_p4.h if (!p4_ht_thread(cpu)) cpu 219 arch/x86/include/asm/perf_event_p4.h static inline u32 p4_default_escr_conf(int cpu, int exclude_os, int exclude_usr) cpu 223 arch/x86/include/asm/perf_event_p4.h if (!p4_ht_thread(cpu)) { cpu 40 arch/x86/include/asm/preempt.h #define init_idle_preempt_count(p, cpu) do { \ cpu 42 arch/x86/include/asm/preempt.h per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \ cpu 155 arch/x86/include/asm/processor.h #define cpu_data(cpu) per_cpu(cpu_info, cpu) cpu 158 arch/x86/include/asm/processor.h #define cpu_data(cpu) boot_cpu_data cpu 956 arch/x86/include/asm/processor.h extern u16 amd_get_nb_id(int cpu); cpu 105 arch/x86/include/asm/pvclock.h struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu); cpu 40 arch/x86/include/asm/smp.h static inline struct cpumask *cpu_sibling_mask(int cpu) cpu 42 arch/x86/include/asm/smp.h return per_cpu(cpu_sibling_map, cpu); cpu 45 arch/x86/include/asm/smp.h static inline struct cpumask *cpu_core_mask(int cpu) cpu 47 arch/x86/include/asm/smp.h return per_cpu(cpu_core_map, cpu); cpu 50 arch/x86/include/asm/smp.h static inline struct cpumask *cpu_llc_shared_mask(int cpu) cpu 52 arch/x86/include/asm/smp.h return per_cpu(cpu_llc_shared_map, cpu); cpu 72 arch/x86/include/asm/smp.h void (*smp_send_reschedule)(int cpu); cpu 74 arch/x86/include/asm/smp.h int (*cpu_up)(unsigned cpu, struct task_struct *tidle); cpu 76 arch/x86/include/asm/smp.h void (*cpu_die)(unsigned int cpu); cpu 80 arch/x86/include/asm/smp.h void (*send_call_func_single_ipi)(int cpu); cpu 84 arch/x86/include/asm/smp.h extern void set_cpu_sibling_map(int cpu); cpu 117 arch/x86/include/asm/smp.h static inline int __cpu_up(unsigned int cpu, struct task_struct *tidle) cpu 119 arch/x86/include/asm/smp.h return smp_ops.cpu_up(cpu, tidle); cpu 127 arch/x86/include/asm/smp.h static inline void __cpu_die(unsigned int cpu) cpu 129 arch/x86/include/asm/smp.h smp_ops.cpu_die(cpu); cpu 137 arch/x86/include/asm/smp.h static inline void smp_send_reschedule(int cpu) cpu 139 arch/x86/include/asm/smp.h smp_ops.smp_send_reschedule(cpu); cpu 142 arch/x86/include/asm/smp.h static inline void arch_send_call_function_single_ipi(int cpu) cpu 144 arch/x86/include/asm/smp.h smp_ops.send_call_func_single_ipi(cpu); cpu 153 arch/x86/include/asm/smp.h void cpu_die_common(unsigned int cpu); cpu 159 arch/x86/include/asm/smp.h void native_cpu_die(unsigned int cpu); cpu 162 arch/x86/include/asm/smp.h void wbinvd_on_cpu(int cpu); cpu 166 arch/x86/include/asm/smp.h void native_send_call_func_single_ipi(int cpu); cpu 167 arch/x86/include/asm/smp.h void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle); cpu 171 arch/x86/include/asm/smp.h #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) cpu 174 arch/x86/include/asm/smp.h #define wbinvd_on_cpu(cpu) wbinvd() cpu 200 arch/x86/include/asm/smp.h ti->cpu; \ cpu 84 arch/x86/include/asm/stackprotector.h static inline void setup_stack_canary_segment(int cpu) cpu 87 arch/x86/include/asm/stackprotector.h unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu); cpu 88 arch/x86/include/asm/stackprotector.h struct desc_struct *gdt_table = get_cpu_gdt_table(cpu); cpu 110 arch/x86/include/asm/stackprotector.h static inline void setup_stack_canary_segment(int cpu) cpu 31 arch/x86/include/asm/thread_info.h __u32 cpu; /* current CPU */ cpu 45 arch/x86/include/asm/thread_info.h .cpu = 0, \ cpu 57 arch/x86/include/asm/topology.h extern int __cpu_to_node(int cpu); cpu 60 arch/x86/include/asm/topology.h extern int early_cpu_to_node(int cpu); cpu 65 arch/x86/include/asm/topology.h static inline int early_cpu_to_node(int cpu) cpu 67 arch/x86/include/asm/topology.h return early_per_cpu(x86_cpu_to_node_map, cpu); cpu 109 arch/x86/include/asm/topology.h static inline int early_cpu_to_node(int cpu) cpu 120 arch/x86/include/asm/topology.h extern const struct cpumask *cpu_coregroup_mask(int cpu); cpu 122 arch/x86/include/asm/topology.h #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) cpu 123 arch/x86/include/asm/topology.h #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) cpu 126 arch/x86/include/asm/topology.h #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) cpu 127 arch/x86/include/asm/topology.h #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) cpu 61 arch/x86/include/asm/tsc.h extern void check_tsc_sync_source(int cpu); cpu 20 arch/x86/include/asm/uv/uv.h unsigned int cpu); cpu 30 arch/x86/include/asm/uv/uv.h unsigned long start, unsigned long end, unsigned int cpu) cpu 60 arch/x86/include/asm/uv/uv_bau.h #define cpubit_isset(cpu, bau_local_cpumask) \ cpu 61 arch/x86/include/asm/uv/uv_bau.h test_bit((cpu), (bau_local_cpumask).bits) cpu 603 arch/x86/include/asm/uv/uv_bau.h short cpu; cpu 168 arch/x86/include/asm/uv/uv_hub.h #define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) cpu 516 arch/x86/include/asm/uv/uv_hub.h static inline int uv_cpu_to_blade_id(int cpu) cpu 518 arch/x86/include/asm/uv/uv_hub.h return uv_cpu_to_blade[cpu]; cpu 552 arch/x86/include/asm/uv/uv_hub.h static inline int uv_cpu_to_pnode(int cpu) cpu 554 arch/x86/include/asm/uv/uv_hub.h return uv_blade_info[uv_cpu_to_blade_id(cpu)].pnode; cpu 613 arch/x86/include/asm/uv/uv_hub.h #define uv_cpu_nmi_per(cpu) (per_cpu(uv_cpu_nmi, cpu)) cpu 614 arch/x86/include/asm/uv/uv_hub.h #define uv_hub_nmi_per(cpu) (uv_cpu_nmi_per(cpu).hub) cpu 636 arch/x86/include/asm/uv/uv_hub.h static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) cpu 638 arch/x86/include/asm/uv/uv_hub.h if (uv_cpu_hub_info(cpu)->scir.state != value) { cpu 639 arch/x86/include/asm/uv/uv_hub.h uv_write_global_mmr8(uv_cpu_to_pnode(cpu), cpu 640 arch/x86/include/asm/uv/uv_hub.h uv_cpu_hub_info(cpu)->scir.offset, value); cpu 641 arch/x86/include/asm/uv/uv_hub.h uv_cpu_hub_info(cpu)->scir.state = value; cpu 22 arch/x86/include/uapi/asm/mce.h __u8 cpu; /* cpu number; obsolete; use extcpu now */ cpu 233 include/acpi/processor.h *performance, unsigned int cpu); cpu 237 include/acpi/processor.h unsigned int cpu); cpu 252 include/acpi/processor.h unsigned int cpu); cpu 253 include/acpi/processor.h int acpi_processor_ffh_cstate_probe(unsigned int cpu, cpu 260 include/acpi/processor.h *flags, unsigned int cpu) cpu 265 include/acpi/processor.h static inline int acpi_processor_ffh_cstate_probe(unsigned int cpu, cpu 285 include/acpi/processor.h extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit); cpu 308 include/acpi/processor.h static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) cpu 33 include/asm-generic/preempt.h #define init_idle_preempt_count(p, cpu) do { \ cpu 35 include/asm-generic/topology.h #define cpu_to_node(cpu) ((void)(cpu),0) cpu 41 include/asm-generic/topology.h #define set_cpu_numa_node(cpu, node) cpu 44 include/asm-generic/topology.h #define cpu_to_mem(cpu) ((void)(cpu),0) cpu 71 include/asm-generic/topology.h #define set_cpu_numa_mem(cpu, node) cpu 74 include/asm-generic/vmlinux.lds.h #define CPU_KEEP(sec) *(.cpu##sec) cpu 78 include/asm-generic/vmlinux.lds.h #define CPU_DISCARD(sec) *(.cpu##sec) cpu 52 include/crypto/mcryptd.h int cpu; cpu 85 include/crypto/mcryptd.h int cpu; cpu 146 include/linux/acpi.h int acpi_unmap_lsapic(int cpu); cpu 12 include/linux/blk-mq.h int (*notify)(void *data, unsigned long action, unsigned int cpu); cpu 114 include/linux/blkdev.h int cpu; cpu 612 include/linux/blkdev.h #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) cpu 1408 include/linux/blkdev.h int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); cpu 11 include/linux/cgroup_subsys.h SUBSYS(cpu) cpu 43 include/linux/clk/tegra.h void (*wait_for_reset)(u32 cpu); cpu 44 include/linux/clk/tegra.h void (*put_in_reset)(u32 cpu); cpu 45 include/linux/clk/tegra.h void (*out_of_reset)(u32 cpu); cpu 46 include/linux/clk/tegra.h void (*enable_clock)(u32 cpu); cpu 47 include/linux/clk/tegra.h void (*disable_clock)(u32 cpu); cpu 57 include/linux/clk/tegra.h static inline void tegra_wait_cpu_in_reset(u32 cpu) cpu 62 include/linux/clk/tegra.h tegra_cpu_car_ops->wait_for_reset(cpu); cpu 65 include/linux/clk/tegra.h static inline void tegra_put_cpu_in_reset(u32 cpu) cpu 70 include/linux/clk/tegra.h tegra_cpu_car_ops->put_in_reset(cpu); cpu 73 include/linux/clk/tegra.h static inline void tegra_cpu_out_of_reset(u32 cpu) cpu 78 include/linux/clk/tegra.h tegra_cpu_car_ops->out_of_reset(cpu); cpu 81 include/linux/clk/tegra.h static inline void tegra_enable_cpu_clock(u32 cpu) cpu 86 include/linux/clk/tegra.h tegra_cpu_car_ops->enable_clock(cpu); cpu 89 include/linux/clk/tegra.h static inline void tegra_disable_cpu_clock(u32 cpu) cpu 94 include/linux/clk/tegra.h tegra_cpu_car_ops->disable_clock(cpu); cpu 152 include/linux/clockchips.h extern int clockevents_unbind_device(struct clock_event_device *ced, int cpu); cpu 11 include/linux/context_tracking.h extern void context_tracking_cpu_set(int cpu); cpu 29 include/linux/cpu.h extern int register_cpu(struct cpu *cpu, int num); cpu 30 include/linux/cpu.h extern struct device *get_cpu_device(unsigned cpu); cpu 31 include/linux/cpu.h extern bool cpu_is_hotpluggable(unsigned cpu); cpu 32 include/linux/cpu.h extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id); cpu 34 include/linux/cpu.h int cpu, unsigned int *thread); cpu 43 include/linux/cpu.h extern void unregister_cpu(struct cpu *cpu); cpu 160 include/linux/cpu.h int cpu_up(unsigned int cpu); cpu 161 include/linux/cpu.h void notify_cpu_starting(unsigned int cpu); cpu 226 include/linux/cpu.h void clear_tasks_mm_cpumask(int cpu); cpu 227 include/linux/cpu.h int cpu_down(unsigned int cpu); cpu 63 include/linux/cpu_cooling.h unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq); cpu 82 include/linux/cpu_cooling.h unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq) cpu 45 include/linux/cpu_rmap.h static inline u16 cpu_rmap_lookup_index(struct cpu_rmap *rmap, unsigned int cpu) cpu 47 include/linux/cpu_rmap.h return rmap->near[cpu].index; cpu 50 include/linux/cpu_rmap.h static inline void *cpu_rmap_lookup_obj(struct cpu_rmap *rmap, unsigned int cpu) cpu 52 include/linux/cpu_rmap.h return rmap->obj[rmap->near[cpu].index]; cpu 49 include/linux/cpufeature.h MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \ cpu 40 include/linux/cpufreq.h unsigned int cpu; /* cpu nr */ cpu 68 include/linux/cpufreq.h unsigned int cpu; /* cpu nr of CPU managing this policy */ cpu 127 include/linux/cpufreq.h struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); cpu 130 include/linux/cpufreq.h static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) cpu 150 include/linux/cpufreq.h unsigned int cpufreq_get(unsigned int cpu); cpu 151 include/linux/cpufreq.h unsigned int cpufreq_quick_get(unsigned int cpu); cpu 152 include/linux/cpufreq.h unsigned int cpufreq_quick_get_max(unsigned int cpu); cpu 155 include/linux/cpufreq.h u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); cpu 156 include/linux/cpufreq.h int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); cpu 157 include/linux/cpufreq.h int cpufreq_update_policy(unsigned int cpu); cpu 161 include/linux/cpufreq.h static inline unsigned int cpufreq_get(unsigned int cpu) cpu 165 include/linux/cpufreq.h static inline unsigned int cpufreq_quick_get(unsigned int cpu) cpu 169 include/linux/cpufreq.h static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) cpu 261 include/linux/cpufreq.h unsigned int (*get) (unsigned int cpu); cpu 264 include/linux/cpufreq.h int (*bios_limit) (int cpu, unsigned int *limit); cpu 588 include/linux/cpufreq.h struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); cpu 596 include/linux/cpufreq.h unsigned int cpufreq_generic_get(unsigned int cpu); cpu 69 include/linux/cpuidle.h unsigned int cpu; cpu 89 include/linux/cpumask.h #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) cpu 90 include/linux/cpumask.h #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) cpu 91 include/linux/cpumask.h #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) cpu 92 include/linux/cpumask.h #define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask) cpu 98 include/linux/cpumask.h #define cpu_online(cpu) ((cpu) == 0) cpu 99 include/linux/cpumask.h #define cpu_possible(cpu) ((cpu) == 0) cpu 100 include/linux/cpumask.h #define cpu_present(cpu) ((cpu) == 0) cpu 101 include/linux/cpumask.h #define cpu_active(cpu) ((cpu) == 0) cpu 105 include/linux/cpumask.h static inline unsigned int cpumask_check(unsigned int cpu) cpu 108 include/linux/cpumask.h WARN_ON_ONCE(cpu >= nr_cpumask_bits); cpu 110 include/linux/cpumask.h return cpu; cpu 140 include/linux/cpumask.h unsigned int cpu) cpu 152 include/linux/cpumask.h #define for_each_cpu(cpu, mask) \ cpu 153 include/linux/cpumask.h for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) cpu 154 include/linux/cpumask.h #define for_each_cpu_not(cpu, mask) \ cpu 155 include/linux/cpumask.h for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) cpu 156 include/linux/cpumask.h #define for_each_cpu_and(cpu, mask, and) \ cpu 157 include/linux/cpumask.h for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and) cpu 201 include/linux/cpumask.h int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); cpu 211 include/linux/cpumask.h #define for_each_cpu(cpu, mask) \ cpu 212 include/linux/cpumask.h for ((cpu) = -1; \ cpu 213 include/linux/cpumask.h (cpu) = cpumask_next((cpu), (mask)), \ cpu 214 include/linux/cpumask.h (cpu) < nr_cpu_ids;) cpu 223 include/linux/cpumask.h #define for_each_cpu_not(cpu, mask) \ cpu 224 include/linux/cpumask.h for ((cpu) = -1; \ cpu 225 include/linux/cpumask.h (cpu) = cpumask_next_zero((cpu), (mask)), \ cpu 226 include/linux/cpumask.h (cpu) < nr_cpu_ids;) cpu 242 include/linux/cpumask.h #define for_each_cpu_and(cpu, mask, and) \ cpu 243 include/linux/cpumask.h for ((cpu) = -1; \ cpu 244 include/linux/cpumask.h (cpu) = cpumask_next_and((cpu), (mask), (and)), \ cpu 245 include/linux/cpumask.h (cpu) < nr_cpu_ids;) cpu 263 include/linux/cpumask.h static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) cpu 265 include/linux/cpumask.h set_bit(cpumask_check(cpu), cpumask_bits(dstp)); cpu 273 include/linux/cpumask.h static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) cpu 275 include/linux/cpumask.h clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); cpu 287 include/linux/cpumask.h #define cpumask_test_cpu(cpu, cpumask) \ cpu 288 include/linux/cpumask.h test_bit(cpumask_check(cpu), cpumask_bits((cpumask))) cpu 299 include/linux/cpumask.h static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) cpu 301 include/linux/cpumask.h return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask)); cpu 313 include/linux/cpumask.h static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) cpu 315 include/linux/cpumask.h return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask)); cpu 539 include/linux/cpumask.h #define cpumask_of(cpu) (get_cpu_mask(cpu)) cpu 740 include/linux/cpumask.h #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask) cpu 741 include/linux/cpumask.h #define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask) cpu 742 include/linux/cpumask.h #define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) cpu 745 include/linux/cpumask.h void set_cpu_possible(unsigned int cpu, bool possible); cpu 746 include/linux/cpumask.h void set_cpu_present(unsigned int cpu, bool present); cpu 747 include/linux/cpumask.h void set_cpu_online(unsigned int cpu, bool online); cpu 748 include/linux/cpumask.h void set_cpu_active(unsigned int cpu, bool active); cpu 782 include/linux/cpumask.h static inline const struct cpumask *get_cpu_mask(unsigned int cpu) cpu 784 include/linux/cpumask.h const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; cpu 785 include/linux/cpumask.h p -= cpu / BITS_PER_LONG; cpu 789 include/linux/cpumask.h #define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) cpu 812 include/linux/cpumask.h #define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu)) cpu 847 include/linux/cpumask.h #define for_each_cpu_mask(cpu, mask) \ cpu 848 include/linux/cpumask.h for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) cpu 856 include/linux/cpumask.h #define for_each_cpu_mask(cpu, mask) \ cpu 857 include/linux/cpumask.h for ((cpu) = -1; \ cpu 858 include/linux/cpumask.h (cpu) = next_cpu((cpu), (mask)), \ cpu 859 include/linux/cpumask.h (cpu) < NR_CPUS; ) cpu 864 include/linux/cpumask.h #define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) cpu 869 include/linux/cpumask.h #define for_each_cpu_mask_nr(cpu, mask) \ cpu 870 include/linux/cpumask.h for ((cpu) = -1; \ cpu 871 include/linux/cpumask.h (cpu) = __next_cpu_nr((cpu), &(mask)), \ cpu 872 include/linux/cpumask.h (cpu) < nr_cpu_ids; ) cpu 878 include/linux/cpumask.h #define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) cpu 879 include/linux/cpumask.h static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) cpu 881 include/linux/cpumask.h set_bit(cpu, dstp->bits); cpu 884 include/linux/cpumask.h #define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst)) cpu 885 include/linux/cpumask.h static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp) cpu 887 include/linux/cpumask.h clear_bit(cpu, dstp->bits); cpu 903 include/linux/cpumask.h #define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits) cpu 905 include/linux/cpumask.h #define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask)) cpu 906 include/linux/cpumask.h static inline int __cpu_test_and_set(int cpu, cpumask_t *addr) cpu 908 include/linux/cpumask.h return test_and_set_bit(cpu, addr->bits); cpu 52 include/linux/dca.h int cpu); cpu 70 include/linux/dca.h u8 dca_get_tag(int cpu); cpu 71 include/linux/dca.h u8 dca3_get_tag(struct device *dev, int cpu); cpu 46 include/linux/dw_apb_timer.h dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, cpu 759 include/linux/ftrace.h extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); cpu 783 include/linux/ftrace.h static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } cpu 97 include/linux/ftrace_event.h int cpu; cpu 310 include/linux/genhd.h #define __part_stat_add(cpu, part, field, addnd) \ cpu 311 include/linux/genhd.h (per_cpu_ptr((part)->dkstats, (cpu))->field += (addnd)) cpu 348 include/linux/genhd.h #define __part_stat_add(cpu, part, field, addnd) \ cpu 369 include/linux/genhd.h #define part_stat_add(cpu, part, field, addnd) do { \ cpu 370 include/linux/genhd.h __part_stat_add((cpu), (part), field, addnd); \ cpu 372 include/linux/genhd.h __part_stat_add((cpu), &part_to_disk((part))->part0, \ cpu 376 include/linux/genhd.h #define part_stat_dec(cpu, gendiskp, field) \ cpu 377 include/linux/genhd.h part_stat_add(cpu, gendiskp, field, -1) cpu 378 include/linux/genhd.h #define part_stat_inc(cpu, gendiskp, field) \ cpu 379 include/linux/genhd.h part_stat_add(cpu, gendiskp, field, 1) cpu 380 include/linux/genhd.h #define part_stat_sub(cpu, gendiskp, field, subnd) \ cpu 381 include/linux/genhd.h part_stat_add(cpu, gendiskp, field, -subnd) cpu 416 include/linux/genhd.h extern void part_round_stats(int cpu, struct hd_struct *part); cpu 183 include/linux/hrtimer.h unsigned int cpu; cpu 63 include/linux/hw_breakpoint.h int cpu); cpu 103 include/linux/hw_breakpoint.h int cpu) { return NULL; } cpu 566 include/linux/interrupt.h extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); cpu 21 include/linux/irq_cpustat.h #define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) cpu 29 include/linux/irq_cpustat.h #define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386 */ cpu 38 include/linux/irq_work.h bool irq_work_queue_on(struct irq_work *work, int cpu); cpu 104 include/linux/irqchip/arm-gic.h void __iomem *dist , void __iomem *cpu) cpu 106 include/linux/irqchip/arm-gic.h gic_init_bases(nr, start, dist, cpu, 0, NULL); cpu 110 include/linux/irqchip/arm-gic.h int gic_get_cpu_id(unsigned int cpu); cpu 130 include/linux/kdb.h unsigned int cpu = task_thread_info(p)->cpu; cpu 131 include/linux/kdb.h if (cpu > num_possible_cpus()) cpu 132 include/linux/kdb.h cpu = 0; cpu 133 include/linux/kdb.h return cpu; cpu 49 include/linux/kernel_stat.h #define kstat_cpu(cpu) per_cpu(kstat, cpu) cpu 50 include/linux/kernel_stat.h #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) cpu 54 include/linux/kernel_stat.h extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); cpu 62 include/linux/kernel_stat.h static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) cpu 64 include/linux/kernel_stat.h return kstat_cpu(cpu).softirqs[irq]; cpu 76 include/linux/kernel_stat.h static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) cpu 78 include/linux/kernel_stat.h return kstat_cpu(cpu).irqs_sum; cpu 231 include/linux/kexec.h void crash_save_cpu(struct pt_regs *regs, int cpu); cpu 312 include/linux/kgdb.h extern int kgdb_nmicallback(int cpu, void *regs); cpu 313 include/linux/kgdb.h extern int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code, cpu 19 include/linux/kthread.h unsigned int cpu, cpu 40 include/linux/kthread.h void kthread_bind(struct task_struct *k, unsigned int cpu); cpu 235 include/linux/kvm_host.h int cpu; cpu 638 include/linux/kvm_host.h void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); cpu 641 include/linux/kvm_host.h void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); cpu 57 include/linux/lglock.h void lg_local_lock_cpu(struct lglock *lg, int cpu); cpu 58 include/linux/lglock.h void lg_local_unlock_cpu(struct lglock *lg, int cpu); cpu 70 include/linux/lglock.h #define lg_local_lock_cpu(lg, cpu) spin_lock(lg) cpu 71 include/linux/lglock.h #define lg_local_unlock_cpu(lg, cpu) spin_unlock(lg) cpu 155 include/linux/lockdep.h int cpu; cpu 776 include/linux/mm.h static inline int cpu_pid_to_cpupid(int cpu, int pid) cpu 778 include/linux/mm.h return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK); cpu 15 include/linux/mvebu-pmsu.h int mvebu_pmsu_dfs_request(int cpu); cpu 17 include/linux/mvebu-pmsu.h static inline int mvebu_pmsu_dfs_request(int cpu) { return -ENODEV; } cpu 612 include/linux/netdevice.h u16 cpu; cpu 646 include/linux/netdevice.h unsigned int cpu, index = hash & table->mask; cpu 649 include/linux/netdevice.h cpu = raw_smp_processor_id(); cpu 651 include/linux/netdevice.h if (table->ents[index] != cpu) cpu 652 include/linux/netdevice.h table->ents[index] = cpu; cpu 2341 include/linux/netdevice.h unsigned int cpu; cpu 3057 include/linux/netdevice.h static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) cpu 3060 include/linux/netdevice.h txq->xmit_lock_owner = cpu; cpu 3104 include/linux/netdevice.h int cpu; cpu 3107 include/linux/netdevice.h cpu = smp_processor_id(); cpu 3117 include/linux/netdevice.h __netif_tx_lock(txq, cpu); cpu 3152 include/linux/netdevice.h #define HARD_TX_LOCK(dev, txq, cpu) { \ cpu 3154 include/linux/netdevice.h __netif_tx_lock(txq, cpu); \ cpu 3172 include/linux/netdevice.h int cpu; cpu 3175 include/linux/netdevice.h cpu = smp_processor_id(); cpu 3179 include/linux/netdevice.h __netif_tx_lock(txq, cpu); cpu 37 include/linux/node.h extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); cpu 38 include/linux/node.h extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); cpu 57 include/linux/node.h static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid) cpu 61 include/linux/node.h static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) cpu 282 include/linux/of.h extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); cpu 501 include/linux/of.h static inline struct device_node *of_get_cpu_node(int cpu, cpu 47 include/linux/of_device.h static inline struct device_node *of_cpu_device_node_get(int cpu) cpu 50 include/linux/of_device.h cpu_dev = get_cpu_device(cpu); cpu 89 include/linux/of_device.h static inline struct device_node *of_cpu_device_node_get(int cpu) cpu 181 include/linux/padata.h extern int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask); cpu 182 include/linux/padata.h extern int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask); cpu 220 include/linux/percpu-defs.h #define per_cpu_ptr(ptr, cpu) \ cpu 223 include/linux/percpu-defs.h SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))); \ cpu 250 include/linux/percpu-defs.h #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) cpu 256 include/linux/percpu-defs.h #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) cpu 89 include/linux/percpu.h typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size, cpu 81 include/linux/percpu_ida.h unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu); cpu 407 include/linux/perf_event.h int cpu; cpu 572 include/linux/perf_event.h int cpu, cpu 595 include/linux/perf_event.h u32 cpu; cpu 863 include/linux/perf_event.h unsigned long cpu = smp_processor_id(); \ cpu 868 include/linux/perf_event.h (void *)(unsigned long)cpu); \ cpu 871 include/linux/perf_event.h (void *)(unsigned long)cpu); \ cpu 874 include/linux/perf_event.h (void *)(unsigned long)cpu); \ cpu 17 include/linux/platform_data/arm-ux500-pm.h bool prcmu_is_cpu_in_wfi(int cpu); cpu 84 include/linux/posix-timers.h struct cpu_timer_list cpu; cpu 166 include/linux/preempt.h void (*sched_in)(struct preempt_notifier *notifier, int cpu); cpu 263 include/linux/rcupdate.h void rcu_check_callbacks(int cpu, int user); cpu 1121 include/linux/rcupdate.h static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) cpu 1129 include/linux/rcupdate.h static inline bool rcu_is_nocb_cpu(int cpu) { return true; } cpu 1131 include/linux/rcupdate.h bool rcu_is_nocb_cpu(int cpu); cpu 1133 include/linux/rcupdate.h static inline bool rcu_is_nocb_cpu(int cpu) { return false; } cpu 81 include/linux/rcutiny.h static inline void rcu_note_context_switch(int cpu) cpu 90 include/linux/rcutiny.h static inline void rcu_virt_note_context_switch(int cpu) cpu 33 include/linux/rcutree.h void rcu_note_context_switch(int cpu); cpu 35 include/linux/rcutree.h int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies); cpu 44 include/linux/rcutree.h static inline void rcu_virt_note_context_switch(int cpu) cpu 46 include/linux/rcutree.h rcu_note_context_switch(cpu); cpu 50 include/linux/relay.h unsigned int cpu; /* this buf's cpu */ cpu 178 include/linux/relay.h unsigned int cpu, cpu 100 include/linux/ring_buffer.h int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full); cpu 101 include/linux/ring_buffer.h int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, cpu 109 include/linux/ring_buffer.h int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu); cpu 121 include/linux/ring_buffer.h ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, cpu 124 include/linux/ring_buffer.h ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, cpu 128 include/linux/ring_buffer.h ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu); cpu 140 include/linux/ring_buffer.h unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu); cpu 142 include/linux/ring_buffer.h void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu); cpu 147 include/linux/ring_buffer.h struct ring_buffer *buffer_b, int cpu); cpu 151 include/linux/ring_buffer.h struct ring_buffer *buffer_b, int cpu) cpu 158 include/linux/ring_buffer.h int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); cpu 165 include/linux/ring_buffer.h void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); cpu 166 include/linux/ring_buffer.h void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); cpu 168 include/linux/ring_buffer.h u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu); cpu 169 include/linux/ring_buffer.h unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu); cpu 172 include/linux/ring_buffer.h unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); cpu 173 include/linux/ring_buffer.h unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); cpu 174 include/linux/ring_buffer.h unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); cpu 175 include/linux/ring_buffer.h unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu); cpu 176 include/linux/ring_buffer.h unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu); cpu 178 include/linux/ring_buffer.h u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); cpu 180 include/linux/ring_buffer.h int cpu, u64 *ts); cpu 187 include/linux/ring_buffer.h void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu); cpu 190 include/linux/ring_buffer.h size_t len, int cpu, int full); cpu 173 include/linux/sched.h extern unsigned long nr_iowait_cpu(int cpu); cpu 181 include/linux/sched.h extern void dump_cpu_task(int cpu); cpu 190 include/linux/sched.h print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); cpu 290 include/linux/sched.h extern void init_idle(struct task_struct *idle, int cpu); cpu 293 include/linux/sched.h extern int runqueue_is_locked(int cpu); cpu 296 include/linux/sched.h extern void nohz_balance_enter_idle(int cpu); cpu 300 include/linux/sched.h static inline void nohz_balance_enter_idle(int cpu) { } cpu 1004 include/linux/sched.h typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); cpu 1029 include/linux/sched.h extern void wake_up_if_idle(int cpu); cpu 2102 include/linux/sched.h extern u64 cpu_clock(int cpu); cpu 2104 include/linux/sched.h extern u64 sched_clock_cpu(int cpu); cpu 2170 include/linux/sched.h extern void wake_up_nohz_cpu(int cpu); cpu 2172 include/linux/sched.h static inline void wake_up_nohz_cpu(int cpu) { } cpu 2213 include/linux/sched.h extern int idle_cpu(int cpu); cpu 2220 include/linux/sched.h extern struct task_struct *idle_task(int cpu); cpu 2231 include/linux/sched.h extern struct task_struct *curr_task(int cpu); cpu 2232 include/linux/sched.h extern void set_curr_task(int cpu, struct task_struct *p); cpu 2929 include/linux/sched.h return task_thread_info(p)->cpu; cpu 2937 include/linux/sched.h extern void set_task_cpu(struct task_struct *p, unsigned int cpu); cpu 2946 include/linux/sched.h static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) cpu 193 include/linux/seq_file.h extern struct hlist_node *seq_hlist_start_percpu(struct hlist_head __percpu *head, int *cpu, loff_t pos); cpu 195 include/linux/seq_file.h extern struct hlist_node *seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head, int *cpu, loff_t *pos); cpu 47 include/linux/smp.h void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), cpu 51 include/linux/smp.h int smp_call_function_single_async(int cpu, struct call_single_data *csd); cpu 74 include/linux/smp.h extern void smp_send_reschedule(int cpu); cpu 138 include/linux/smp.h static inline void smp_send_reschedule(int cpu) { } cpu 36 include/linux/smpboot.h int (*thread_should_run)(unsigned int cpu); cpu 37 include/linux/smpboot.h void (*thread_fn)(unsigned int cpu); cpu 38 include/linux/smpboot.h void (*create)(unsigned int cpu); cpu 39 include/linux/smpboot.h void (*setup)(unsigned int cpu); cpu 40 include/linux/smpboot.h void (*cleanup)(unsigned int cpu, bool online); cpu 41 include/linux/smpboot.h void (*park)(unsigned int cpu); cpu 42 include/linux/smpboot.h void (*unpark)(unsigned int cpu); cpu 43 include/linux/smpboot.h void (*pre_unpark)(unsigned int cpu); cpu 30 include/linux/stop_machine.h int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); cpu 32 include/linux/stop_machine.h void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, cpu 47 include/linux/stop_machine.h static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) cpu 51 include/linux/stop_machine.h if (cpu == smp_processor_id()) cpu 66 include/linux/stop_machine.h static inline void stop_one_cpu_nowait(unsigned int cpu, cpu 70 include/linux/stop_machine.h if (cpu == smp_processor_id()) { cpu 447 include/linux/sunrpc/svc.h struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu); cpu 315 include/linux/swap.h extern void lru_add_drain_cpu(int cpu); cpu 800 include/linux/syscalls.h asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache); cpu 846 include/linux/syscalls.h pid_t pid, int cpu, int group_fd, unsigned long flags); cpu 80 include/linux/tick.h extern struct tick_device *tick_get_device(int cpu); cpu 89 include/linux/tick.h extern void tick_cancel_sched_timer(int cpu); cpu 91 include/linux/tick.h static inline void tick_cancel_sched_timer(int cpu) { } cpu 107 include/linux/tick.h extern struct tick_sched *tick_get_tick_sched(int cpu); cpu 122 include/linux/tick.h static inline void tick_cancel_sched_timer(int cpu) { } cpu 141 include/linux/tick.h extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); cpu 142 include/linux/tick.h extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); cpu 159 include/linux/tick.h static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } cpu 160 include/linux/tick.h static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } cpu 176 include/linux/tick.h static inline bool tick_nohz_full_cpu(int cpu) cpu 181 include/linux/tick.h return cpumask_test_cpu(cpu, tick_nohz_full_mask); cpu 186 include/linux/tick.h extern void tick_nohz_full_kick_cpu(int cpu); cpu 191 include/linux/tick.h static inline bool tick_nohz_full_cpu(int cpu) { return false; } cpu 193 include/linux/tick.h static inline void tick_nohz_full_kick_cpu(int cpu) { } cpu 199 include/linux/tick.h static inline bool is_housekeeping_cpu(int cpu) cpu 203 include/linux/tick.h return cpumask_test_cpu(cpu, housekeeping_mask); cpu 174 include/linux/timer.h extern void add_timer_on(struct timer_list *timer, int cpu); cpu 257 include/linux/timer.h unsigned long __round_jiffies(unsigned long j, int cpu); cpu 258 include/linux/timer.h unsigned long __round_jiffies_relative(unsigned long j, int cpu); cpu 262 include/linux/timer.h unsigned long __round_jiffies_up(unsigned long j, int cpu); cpu 263 include/linux/timer.h unsigned long __round_jiffies_up_relative(unsigned long j, int cpu); cpu 82 include/linux/topology.h static inline int cpu_to_node(int cpu) cpu 84 include/linux/topology.h return per_cpu(numa_node, cpu); cpu 96 include/linux/topology.h static inline void set_cpu_numa_node(int cpu, int node) cpu 98 include/linux/topology.h per_cpu(numa_node, cpu) = node; cpu 148 include/linux/topology.h static inline int cpu_to_mem(int cpu) cpu 150 include/linux/topology.h return per_cpu(_numa_mem_, cpu); cpu 155 include/linux/topology.h static inline void set_cpu_numa_mem(int cpu, int node) cpu 157 include/linux/topology.h per_cpu(_numa_mem_, cpu) = node; cpu 158 include/linux/topology.h _node_numa_mem_[cpu_to_node(cpu)] = node; cpu 180 include/linux/topology.h static inline int cpu_to_mem(int cpu) cpu 182 include/linux/topology.h return cpu_to_node(cpu); cpu 189 include/linux/topology.h #define topology_physical_package_id(cpu) ((void)(cpu), -1) cpu 192 include/linux/topology.h #define topology_core_id(cpu) ((void)(cpu), 0) cpu 195 include/linux/topology.h #define topology_thread_cpumask(cpu) cpumask_of(cpu) cpu 198 include/linux/topology.h #define topology_core_cpumask(cpu) cpumask_of(cpu) cpu 202 include/linux/topology.h static inline const struct cpumask *cpu_smt_mask(int cpu) cpu 204 include/linux/topology.h return topology_thread_cpumask(cpu); cpu 208 include/linux/topology.h static inline const struct cpumask *cpu_cpu_mask(int cpu) cpu 210 include/linux/topology.h return cpumask_of_node(cpu_to_node(cpu)); cpu 72 include/linux/virtio_config.h int (*set_vq_affinity)(struct virtqueue *vq, int cpu); cpu 147 include/linux/virtio_config.h int virtqueue_set_affinity(struct virtqueue *vq, int cpu) cpu 151 include/linux/virtio_config.h return vdev->config->set_vq_affinity(vq, cpu); cpu 56 include/linux/vmstat.h extern void vm_events_fold_cpu(int cpu); cpu 76 include/linux/vmstat.h static inline void vm_events_fold_cpu(int cpu) cpu 153 include/linux/vmstat.h int cpu; cpu 154 include/linux/vmstat.h for_each_online_cpu(cpu) cpu 155 include/linux/vmstat.h x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; cpu 214 include/linux/vmstat.h void cpu_vm_stats_fold(int cpu); cpu 272 include/linux/vmstat.h static inline void refresh_cpu_vm_stats(int cpu) { } cpu 274 include/linux/vmstat.h static inline void cpu_vm_stats_fold(int cpu) { } cpu 93 include/linux/vtime.h extern void vtime_init_idle(struct task_struct *tsk, int cpu); cpu 104 include/linux/vtime.h static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } cpu 118 include/linux/workqueue.h int cpu; cpu 431 include/linux/workqueue.h extern bool queue_work_on(int cpu, struct workqueue_struct *wq, cpu 433 include/linux/workqueue.h extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, cpu 435 include/linux/workqueue.h extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, cpu 456 include/linux/workqueue.h extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); cpu 514 include/linux/workqueue.h static inline bool schedule_work_on(int cpu, struct work_struct *work) cpu 516 include/linux/workqueue.h return queue_work_on(cpu, system_wq, work); cpu 544 include/linux/workqueue.h static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork, cpu 547 include/linux/workqueue.h return queue_delayed_work_on(cpu, system_wq, dwork, delay); cpu 573 include/linux/workqueue.h static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) cpu 578 include/linux/workqueue.h long work_on_cpu(int cpu, long (*fn)(void *), void *arg); cpu 65 include/media/saa7146.h __le32 *cpu; cpu 35 include/net/gen_stats.h struct gnet_stats_basic_cpu __percpu *cpu, cpu 38 include/net/gen_stats.h struct gnet_stats_basic_cpu __percpu *cpu, cpu 86 include/net/netfilter/nf_conntrack.h u16 cpu; cpu 26 include/trace/events/mce.h __field( u32, cpu ) cpu 44 include/trace/events/mce.h __entry->cpu = m->extcpu; cpu 54 include/trace/events/mce.h __entry->cpu, cpu 14 include/trace/events/power.h DECLARE_EVENT_CLASS(cpu, cpu 34 include/trace/events/power.h DEFINE_EVENT(cpu, cpu_idle, cpu 107 include/trace/events/power.h DEFINE_EVENT(cpu, cpu_frequency, cpu 198 include/trace/events/rcu.h TP_PROTO(const char *rcuname, int cpu, const char *reason), cpu 200 include/trace/events/rcu.h TP_ARGS(rcuname, cpu, reason), cpu 204 include/trace/events/rcu.h __field(int, cpu) cpu 210 include/trace/events/rcu.h __entry->cpu = cpu; cpu 214 include/trace/events/rcu.h TP_printk("%s %d %s", __entry->rcuname, __entry->cpu, __entry->reason) cpu 325 include/trace/events/rcu.h TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent), cpu 327 include/trace/events/rcu.h TP_ARGS(rcuname, gpnum, cpu, qsevent), cpu 332 include/trace/events/rcu.h __field(int, cpu) cpu 339 include/trace/events/rcu.h __entry->cpu = cpu; cpu 345 include/trace/events/rcu.h __entry->cpu, __entry->qsevent) cpu 680 include/trace/events/rcu.h TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done), cpu 682 include/trace/events/rcu.h TP_ARGS(rcuname, s, cpu, cnt, done), cpu 687 include/trace/events/rcu.h __field(int, cpu) cpu 695 include/trace/events/rcu.h __entry->cpu = cpu; cpu 701 include/trace/events/rcu.h __entry->rcuname, __entry->s, __entry->cpu, __entry->cnt, cpu 713 include/trace/events/rcu.h #define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0) cpu 719 include/trace/events/rcu.h #define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0) cpu 733 include/trace/events/rcu.h #define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0) cpu 539 include/trace/events/sched.h TP_PROTO(int cpu), cpu 541 include/trace/events/sched.h TP_ARGS(cpu), cpu 544 include/trace/events/sched.h __field( int, cpu ) cpu 548 include/trace/events/sched.h __entry->cpu = cpu; cpu 551 include/trace/events/sched.h TP_printk("cpu=%d", __entry->cpu) cpu 49 include/trace/events/workqueue.h __field( unsigned int, cpu ) cpu 57 include/trace/events/workqueue.h __entry->cpu = pwq->pool->cpu; cpu 62 include/trace/events/workqueue.h __entry->req_cpu, __entry->cpu) cpu 107 include/uapi/linux/blktrace_api.h __u32 cpu; /* on what cpu did it happen */ cpu 64 include/uapi/linux/cn_proc.h __u32 cpu; cpu 7 include/uapi/linux/netfilter/xt_cpu.h __u32 cpu; cpu 20 include/xen/events.h int bind_virq_to_irq(unsigned int virq, unsigned int cpu); cpu 21 include/xen/events.h int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, cpu 26 include/xen/events.h unsigned int cpu, cpu 59 include/xen/events.h void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector); cpu 87 include/xen/events.h int irq_from_virq(unsigned int cpu, unsigned int virq); cpu 289 include/xen/interface/platform.h uint32_t cpu; /* Physical cpu. */ cpu 350 include/xen/interface/xen-mca.h __u8 cpu; /* cpu number; obsolete; use extcpu now */