Produces better code for x86-64.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
* Reads a consistent set of time-base values from Xen, into a shadow data
* area.
*/
-static void get_time_values_from_xen(int cpu)
+static void get_time_values_from_xen(unsigned int cpu)
{
struct vcpu_time_info *src;
struct shadow_time_info *dst;
dst->tsc_to_usec_mul = dst->tsc_to_nsec_mul / 1000;
}
-static inline int time_values_up_to_date(int cpu)
+static inline int time_values_up_to_date(unsigned int cpu)
{
struct vcpu_time_info *src;
struct shadow_time_info *dst;
*/
unsigned long long monotonic_clock(void)
{
- int cpu = get_cpu();
+ unsigned int cpu = get_cpu();
struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
u64 time;
u32 local_time_version;
{
s64 delta, delta_cpu, stolen, blocked;
u64 sched_time;
- int i, cpu = smp_processor_id();
+ unsigned int i, cpu = smp_processor_id();
struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
if ((unlikely(delta < -(s64)permitted_clock_jitter) ||
unlikely(delta_cpu < -(s64)permitted_clock_jitter))
&& printk_ratelimit()) {
- printk("Timer ISR/%d: Time went backwards: "
+ printk("Timer ISR/%u: Time went backwards: "
"delta=%lld delta_cpu=%lld shadow=%lld "
"off=%lld processed=%lld cpu_processed=%lld\n",
cpu, delta, delta_cpu, shadow->system_timestamp,
return IRQ_HANDLED;
}
-static void init_missing_ticks_accounting(int cpu)
+static void init_missing_ticks_accounting(unsigned int cpu)
{
struct vcpu_register_runstate_memory_area area;
struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
init_missing_ticks_accounting(cpu);
} while (read_seqretry(&xtime_lock, seq));
- sprintf(timer_name[cpu], "timer%d", cpu);
+ sprintf(timer_name[cpu], "timer%u", cpu);
irq = bind_virq_to_irqhandler(VIRQ_TIMER,
cpu,
timer_interrupt,
unsigned long vstart, unsigned int order, unsigned int address_bits)
{
unsigned long *in_frames = discontig_frames, out_frame;
- unsigned long frame, i, flags;
- long rc;
- int success;
+ unsigned long frame, flags;
+ unsigned int i;
+ int rc, success;
struct xen_memory_exchange exchange = {
.in = {
.nr_extents = 1UL << order,
balloon_lock(flags);
/* 1. Zap current PTEs, remembering MFNs. */
- for (i = 0; i < (1UL<<order); i++) {
+ for (i = 0; i < (1U<<order); i++) {
in_frames[i] = pfn_to_mfn((__pa(vstart) >> PAGE_SHIFT) + i);
MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
__pte_ma(0), 0);
&exchange.out) == 1);
if (!success) {
/* Couldn't get special memory: fall back to normal. */
- for (i = 0; i < (1UL<<order); i++)
+ for (i = 0; i < (1U<<order); i++)
in_frames[i] = (__pa(vstart)>>PAGE_SHIFT) + i;
if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
&exchange.in) != (1UL<<order))
#endif
/* 3. Map the new extent in place of old pages. */
- for (i = 0; i < (1UL<<order); i++) {
+ for (i = 0; i < (1U<<order); i++) {
frame = success ? (out_frame + i) : in_frames[i];
MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
pfn_pte_ma(frame, PAGE_KERNEL), 0);
void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
{
unsigned long *out_frames = discontig_frames, in_frame;
- unsigned long frame, i, flags;
- long rc;
- int success;
+ unsigned long frame, flags;
+ unsigned int i;
+ int rc, success;
struct xen_memory_exchange exchange = {
.in = {
.nr_extents = 1,
in_frame = pfn_to_mfn(__pa(vstart) >> PAGE_SHIFT);
/* 2. Zap current PTEs. */
- for (i = 0; i < (1UL<<order); i++) {
+ for (i = 0; i < (1U<<order); i++) {
MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
__pte_ma(0), 0);
set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
#endif
/* 4. Map new pages in place of old pages. */
- for (i = 0; i < (1UL<<order); i++) {
+ for (i = 0; i < (1U<<order); i++) {
frame = success ? out_frames[i] : (in_frame + i);
MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
pfn_pte_ma(frame, PAGE_KERNEL), 0);
unsigned long *in_frames = discontig_frames, *out_frames = limited_frames;
void *v;
struct page *page;
- int i, nr_mcl, rc, success;
+ unsigned int i, nr_mcl;
+ int rc, success;
struct xen_memory_exchange exchange = {
.in = {
balloon_lock(flags);
/* 1. Zap current PTEs (if any), remembering MFNs. */
- for (i = 0, nr_mcl = 0; i < (1UL<<order); i++) {
+ for (i = 0, nr_mcl = 0; i < (1U<<order); i++) {
page = &pages[i];
out_frames[i] = page_to_pfn(page);
#endif
/* 3. Map the new pages in place of old pages. */
- for (i = 0, nr_mcl = 0; i < (1UL<<order); i++) {
+ for (i = 0, nr_mcl = 0; i < (1U<<order); i++) {
unsigned long pfn;
page = &pages[i];
pfn = page_to_pfn(page);
if ((cpu >= NR_CPUS) || !cpu_possible(cpu))
return;
- sprintf(dir, "cpu/%d", cpu);
+ sprintf(dir, "cpu/%u", cpu);
err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state);
if (err != 1) {
printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
static void handle_vcpu_hotplug_event(
struct xenbus_watch *watch, const char **vec, unsigned int len)
{
- int cpu;
+ unsigned int cpu;
char *cpustr;
const char *node = vec[XS_WATCH_PATH];
if ((cpustr = strstr(node, "cpu/")) != NULL) {
- sscanf(cpustr, "cpu/%d", &cpu);
+ sscanf(cpustr, "cpu/%u", &cpu);
vcpu_hotplug(cpu);
}
}
static int smpboot_cpu_notify(struct notifier_block *notifier,
unsigned long action, void *hcpu)
{
- int cpu = (long)hcpu;
+ unsigned int cpu = (long)hcpu;
/*
* We do this in a callback notifier rather than __cpu_disable()
static int setup_cpu_watcher(struct notifier_block *notifier,
unsigned long event, void *data)
{
- int i;
+ unsigned int i;
static struct xenbus_watch cpu_watch = {
.node = "cpu",
int smp_suspend(void)
{
- int cpu, err;
+ unsigned int cpu;
+ int err;
for_each_online_cpu(cpu) {
if (cpu == 0)
void smp_resume(void)
{
- int cpu;
+ unsigned int cpu;
for_each_possible_cpu(cpu)
vcpu_hotplug(cpu);
unsigned long l1, l2;
unsigned long masked_l1, masked_l2;
unsigned int l1i, l2i, port, count;
- int irq, cpu = smp_processor_id();
+ int irq;
+ unsigned int cpu = smp_processor_id();
shared_info_t *s = HYPERVISOR_shared_info;
vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
static void unbind_from_irq(unsigned int irq)
{
struct evtchn_close close;
- int cpu, evtchn = evtchn_from_irq(irq);
+ unsigned int cpu;
+ int evtchn = evtchn_from_irq(irq);
spin_lock(&irq_mapping_update_lock);
synch_set_bit(i, &s->evtchn_mask[0]);
}
-static void restore_cpu_virqs(int cpu)
+static void restore_cpu_virqs(unsigned int cpu)
{
struct evtchn_bind_virq bind_virq;
int virq, irq, evtchn;
}
}
-static void restore_cpu_ipis(int cpu)
+static void restore_cpu_ipis(unsigned int cpu)
{
struct evtchn_bind_ipi bind_ipi;
int ipi, irq, evtchn;
void irq_resume(void)
{
- int cpu, pirq, irq, evtchn;
+ unsigned int cpu, pirq, irq, evtchn;
init_evtchn_cpu_bindings();
void __init xen_init_IRQ(void)
{
- int i;
+ unsigned int i;
init_evtchn_cpu_bindings();
}
static inline void
-set_cpu_sibling_map(int cpu)
+set_cpu_sibling_map(unsigned int cpu)
{
cpu_data[cpu].phys_proc_id = cpu;
cpu_data[cpu].cpu_core_id = 0;
}
static void
-remove_siblinginfo(int cpu)
+remove_siblinginfo(unsigned int cpu)
{
cpu_data[cpu].phys_proc_id = BAD_APICID;
cpu_data[cpu].cpu_core_id = BAD_APICID;
per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = -1;
- sprintf(resched_name[cpu], "resched%d", cpu);
+ sprintf(resched_name[cpu], "resched%u", cpu);
rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR,
cpu,
smp_reschedule_interrupt,
goto fail;
per_cpu(resched_irq, cpu) = rc;
- sprintf(callfunc_name[cpu], "callfunc%d", cpu);
+ sprintf(callfunc_name[cpu], "callfunc%u", cpu);
rc = bind_ipi_to_irqhandler(CALL_FUNCTION_VECTOR,
cpu,
smp_call_function_interrupt,
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- int cpu;
+ unsigned int cpu;
struct task_struct *idle;
#ifdef __x86_64__
struct desc_ptr *gdt_descr;
int __cpu_disable(void)
{
cpumask_t map = cpu_online_map;
- int cpu = smp_processor_id();
+ unsigned int cpu = smp_processor_id();
if (cpu == 0)
return -EBUSY;
xenoprof_ovf_interrupt(int irq, void * dev_id, struct pt_regs * regs)
{
struct xenoprof_buf * buf;
- int cpu;
static unsigned long flag;
- cpu = smp_processor_id();
- buf = xenoprof_buf[cpu];
+ buf = xenoprof_buf[smp_processor_id()];
xenoprof_add_pc(buf, 0);
static void unbind_virq(void)
{
- int i;
+ unsigned int i;
for_each_online_cpu(i) {
if (ovf_irq[i] >= 0) {
static int bind_virq(void)
{
- int i, result;
+ unsigned int i;
+ int result;
for_each_online_cpu(i) {
result = bind_virq_to_irqhandler(VIRQ_XENOPROF,
unsigned int pdoms)
{
int ret;
- int i, j;
+ unsigned int i, j;
struct xenoprof_buf *buf;
if (!xenoprof_is_primary)
int __init xenoprofile_init(struct oprofile_operations * ops)
{
struct xenoprof_init init;
- int ret, i;
+ unsigned int i;
+ int ret;
ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
if (!ret) {
static inline int
HYPERVISOR_vcpu_op(
- int cmd, int vcpuid, void *extra_args)
+ int cmd, unsigned int vcpuid, void *extra_args)
{
return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
}
static inline int
HYPERVISOR_vcpu_op(
- int cmd, int vcpuid, void *extra_args)
+ int cmd, unsigned int vcpuid, void *extra_args)
{
return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
}