ia64/xen-unstable

changeset 6285:3d45fb64b064

Tidy up a bit.

Signed-off-by: Steven Smith, sos22@cam.ac.uk
author sos22@douglas.cl.cam.ac.uk
date Fri Aug 19 10:18:53 2005 +0000 (2005-08-19)
parents 5a7efe0cf5fb
children 509316987d65
files linux-2.6-xen-sparse/arch/xen/i386/kernel/smp.c linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c linux-2.6-xen-sparse/arch/xen/kernel/reboot.c
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smp.c	Thu Aug 18 16:28:41 2005 +0000
     1.2 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/smp.c	Fri Aug 19 10:18:53 2005 +0000
     1.3 @@ -129,10 +129,11 @@ static inline int __prepare_ICR2 (unsign
     1.4  
     1.5  DECLARE_PER_CPU(int, ipi_to_evtchn[NR_IPIS]);
     1.6  
     1.7 +extern unsigned uber_debug;
     1.8 +
     1.9  static inline void __send_IPI_one(unsigned int cpu, int vector)
    1.10  {
    1.11  	unsigned int evtchn;
    1.12 -	int r;
    1.13  
    1.14  	evtchn = per_cpu(ipi_to_evtchn, cpu)[vector];
    1.15  	// printk("send_IPI_mask_bitmask cpu %d vector %d evtchn %d\n", cpu, vector, evtchn);
    1.16 @@ -143,6 +144,9 @@ static inline void __send_IPI_one(unsign
    1.17  		       synch_test_bit(evtchn, &s->evtchn_mask[0]))
    1.18  			;
    1.19  #endif
    1.20 +		if (uber_debug)
    1.21 +			printk("<0>Send ipi %d to %d evtchn %d.\n",
    1.22 +			       vector, cpu, evtchn);
    1.23  		notify_via_evtchn(evtchn);
    1.24  	} else
    1.25  		printk("send_IPI to unbound port %d/%d",
    1.26 @@ -601,6 +605,7 @@ irqreturn_t smp_call_function_interrupt(
    1.27  	void (*func) (void *info) = call_data->func;
    1.28  	void *info = call_data->info;
    1.29  	int wait = call_data->wait;
    1.30 +	extern unsigned uber_debug;
    1.31  
    1.32  	/*
    1.33  	 * Notify initiating CPU that I've grabbed the data and am
    1.34 @@ -612,6 +617,9 @@ irqreturn_t smp_call_function_interrupt(
    1.35  	 * At this point the info structure may be out of scope unless wait==1
    1.36  	 */
    1.37  	irq_enter();
    1.38 +	if (uber_debug && smp_processor_id())
    1.39 +		printk("<0>Processor %d calling %p.\n", smp_processor_id(),
    1.40 +		       func);
    1.41  	(*func)(info);
    1.42  	irq_exit();
    1.43  
     2.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c	Thu Aug 18 16:28:41 2005 +0000
     2.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/evtchn.c	Fri Aug 19 10:18:53 2005 +0000
     2.3 @@ -124,6 +124,8 @@ extern asmlinkage unsigned int do_IRQ(st
     2.4  
     2.5  #define VALID_EVTCHN(_chn) ((_chn) >= 0)
     2.6  
     2.7 +unsigned uber_debug;
     2.8 +
     2.9  /*
    2.10   * Force a proper event-channel callback from Xen after clearing the
    2.11   * callback mask. We do this in a very simple manner, by making a call
    2.12 @@ -144,7 +146,7 @@ asmlinkage void evtchn_do_upcall(struct 
    2.13      vcpu_info_t   *vcpu_info = &s->vcpu_data[cpu];
    2.14  
    2.15      vcpu_info->evtchn_upcall_pending = 0;
    2.16 -    
    2.17 +
    2.18      /* NB. No need for a barrier here -- XCHG is a barrier on x86. */
    2.19      l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
    2.20      while ( l1 != 0 )
    2.21 @@ -158,9 +160,13 @@ asmlinkage void evtchn_do_upcall(struct 
    2.22              l2 &= ~(1 << l2i);
    2.23              
    2.24              port = (l1i << 5) + l2i;
    2.25 -            if ( (irq = evtchn_to_irq[port]) != -1 )
    2.26 +	    if (uber_debug && cpu)
    2.27 +		printk("<0>Upcall to %d on %d.\n", port, cpu);
    2.28 +            if ( (irq = evtchn_to_irq[port]) != -1 ) {
    2.29 +		if (uber_debug && cpu)
    2.30 +		    printk("<0>IRQ %d.\n", irq);
    2.31                  do_IRQ(irq, regs);
    2.32 -	    else
    2.33 +	    } else
    2.34                  evtchn_device_upcall(port);
    2.35          }
    2.36      }
    2.37 @@ -272,6 +278,8 @@ void _bind_ipi_to_irq(int ipi, int vcpu,
    2.38      evtchn_to_irq[evtchn] = irq;
    2.39      irq_to_evtchn[irq]    = evtchn;
    2.40  
    2.41 +    printk("<0>evtchn_to_irq[%d] = %d.\n", evtchn,
    2.42 +	   evtchn_to_irq[evtchn]);
    2.43      per_cpu(ipi_to_evtchn, vcpu)[ipi] = evtchn;
    2.44  
    2.45      bind_evtchn_to_cpu(evtchn, vcpu);
    2.46 @@ -279,6 +287,7 @@ void _bind_ipi_to_irq(int ipi, int vcpu,
    2.47      spin_unlock(&irq_mapping_update_lock);
    2.48  
    2.49      clear_bit(evtchn, (unsigned long *)HYPERVISOR_shared_info->evtchn_mask);
    2.50 +    clear_bit(evtchn, (unsigned long *)HYPERVISOR_shared_info->evtchn_pending);
    2.51  }
    2.52  
    2.53  void _bind_virq_to_irq(int virq, int cpu, int irq)
    2.54 @@ -294,7 +303,6 @@ void _bind_virq_to_irq(int virq, int cpu
    2.55              panic("Failed to bind virtual IRQ %d\n", virq);
    2.56      evtchn = op.u.bind_virq.port;
    2.57  
    2.58 -
    2.59      evtchn_to_irq[irq_to_evtchn[irq]] = -1;
    2.60      irq_to_evtchn[irq] = -1;
    2.61  
    2.62 @@ -306,6 +314,9 @@ void _bind_virq_to_irq(int virq, int cpu
    2.63      bind_evtchn_to_cpu(evtchn, cpu);
    2.64  
    2.65      spin_unlock(&irq_mapping_update_lock);
    2.66 +
    2.67 +    clear_bit(evtchn, (unsigned long *)HYPERVISOR_shared_info->evtchn_mask);
    2.68 +    clear_bit(evtchn, (unsigned long *)HYPERVISOR_shared_info->evtchn_pending);
    2.69  }
    2.70  
    2.71  int bind_ipi_to_irq(int ipi)
     3.1 --- a/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c	Thu Aug 18 16:28:41 2005 +0000
     3.2 +++ b/linux-2.6-xen-sparse/arch/xen/kernel/reboot.c	Fri Aug 19 10:18:53 2005 +0000
     3.3 @@ -70,7 +70,13 @@ static void save_vcpu_context(int vcpu, 
     3.4      int r;
     3.5      int gdt_pages;
     3.6      r = HYPERVISOR_vcpu_pickle(vcpu, ctxt);
     3.7 -    BUG_ON(r != 0);
     3.8 +    if (r != 0)
     3.9 +	panic("pickling vcpu %d -> %d!\n", vcpu, r);
    3.10 +
    3.11 +    /* Translate from machine to physical addresses where necessary,
    3.12 +       so that they can be translated to our new machine address space
    3.13 +       after resume.  libxc is responsible for doing this to vcpu0,
    3.14 +       but we do it to the others. */
    3.15      gdt_pages = (ctxt->gdt_ents + 511) / 512;
    3.16      ctxt->ctrlreg[3] = machine_to_phys(ctxt->ctrlreg[3]);
    3.17      for (r = 0; r < gdt_pages; r++)
    3.18 @@ -81,7 +87,7 @@ void _restore_vcpu(int cpu);
    3.19  
    3.20  atomic_t vcpus_rebooting;
    3.21  
    3.22 -static void restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt)
    3.23 +static int restore_vcpu_context(int vcpu, vcpu_guest_context_t *ctxt)
    3.24  {
    3.25      int r;
    3.26      int gdt_pages = (ctxt->gdt_ents + 511) / 512;
    3.27 @@ -93,21 +99,25 @@ static void restore_vcpu_context(int vcp
    3.28      ((unsigned long *)ctxt->user_regs.esp)[0] = ctxt->user_regs.eip;
    3.29      ctxt->user_regs.eip = (unsigned long)_restore_vcpu;
    3.30  
    3.31 +    /* De-canonicalise.  libxc handles this for vcpu 0, but we need
    3.32 +       to do it for the other vcpus. */
    3.33      ctxt->ctrlreg[3] = phys_to_machine(ctxt->ctrlreg[3]);
    3.34      for (r = 0; r < gdt_pages; r++)
    3.35  	ctxt->gdt_frames[r] = pfn_to_mfn(ctxt->gdt_frames[r]);
    3.36 +
    3.37      atomic_set(&vcpus_rebooting, 1);
    3.38      r = HYPERVISOR_boot_vcpu(vcpu, ctxt);
    3.39      if (r != 0) {
    3.40  	printk(KERN_EMERG "Failed to reboot vcpu %d (%d)\n", vcpu, r);
    3.41 -	return;
    3.42 +	return -1;
    3.43      }
    3.44 -    /* Hmm... slight hack: make sure the cpus come up in order,
    3.45 -       because that way they get the same evtchn numbers this time as
    3.46 -       they did last time, which works around a few bugs. */
    3.47 -    /* XXX */
    3.48 +
    3.49 +    /* Make sure we wait for the new vcpu to come up before trying to do
    3.50 +       anything with it or starting the next one. */
    3.51      while (atomic_read(&vcpus_rebooting))
    3.52  	barrier();
    3.53 +
    3.54 +    return 0;
    3.55  }
    3.56  
    3.57  extern unsigned uber_debug;
    3.58 @@ -159,7 +169,7 @@ static int __do_suspend(void *ignore)
    3.59      extern unsigned long max_pfn;
    3.60      extern unsigned int *pfn_to_mfn_frame_list;
    3.61  
    3.62 -    cpumask_t feasible_cpus;
    3.63 +    cpumask_t prev_online_cpus, prev_present_cpus;
    3.64      int err = 0;
    3.65  
    3.66      BUG_ON(smp_processor_id() != 0);
    3.67 @@ -186,7 +196,7 @@ static int __do_suspend(void *ignore)
    3.68      /* (We don't need to worry about other cpus bringing stuff up,
    3.69         since by the time num_online_cpus() == 1, there aren't any
    3.70         other cpus) */
    3.71 -    cpus_clear(feasible_cpus);
    3.72 +    cpus_clear(prev_online_cpus);
    3.73      preempt_disable();
    3.74      while (num_online_cpus() > 1) {
    3.75  	preempt_enable();
    3.76 @@ -198,17 +208,24 @@ static int __do_suspend(void *ignore)
    3.77  		printk(KERN_CRIT "Failed to take all CPUs down: %d.\n", err);
    3.78  		goto out_reenable_cpus;
    3.79  	    }
    3.80 -	    cpu_set(i, feasible_cpus);
    3.81 +	    cpu_set(i, prev_online_cpus);
    3.82  	}
    3.83 +	preempt_disable();
    3.84      }
    3.85  
    3.86      suspend_record->nr_pfns = max_pfn; /* final number of pfns */
    3.87  
    3.88      __cli();
    3.89  
    3.90 -    for (i = 0; i < NR_CPUS; i++)
    3.91 -	if (cpu_isset(i, feasible_cpus))
    3.92 -	    save_vcpu_context(i, &suspended_cpu_records[i]);
    3.93 +    preempt_enable();
    3.94 +
    3.95 +    cpus_clear(prev_present_cpus);
    3.96 +    for_each_present_cpu(i) {
    3.97 +	if (i == 0)
    3.98 +	    continue;
    3.99 +	save_vcpu_context(i, &suspended_cpu_records[i]);
   3.100 +	cpu_set(i, prev_present_cpus);
   3.101 +    }
   3.102  
   3.103  #ifdef __i386__
   3.104      mm_pin_all();
   3.105 @@ -282,27 +299,24 @@ static int __do_suspend(void *ignore)
   3.106  
   3.107      usbif_resume();
   3.108  
   3.109 -    for (i = 0; i < NR_CPUS; i++)
   3.110 -	if (cpu_isset(i, feasible_cpus))
   3.111 -	    restore_vcpu_context(i, &suspended_cpu_records[i]);
   3.112 +    for_each_cpu_mask(i, prev_present_cpus) {
   3.113 +	restore_vcpu_context(i, &suspended_cpu_records[i]);
   3.114 +    }
   3.115  
   3.116 -    printk("<0>All cpus rebooted...\n");
   3.117      __sti();
   3.118  
   3.119   out_reenable_cpus:
   3.120 -    while (!cpus_empty(feasible_cpus)) {
   3.121 -	i = first_cpu(feasible_cpus);
   3.122 -	printk("<0>Bring %d up.\n", i);
   3.123 +    for_each_cpu_mask(i, prev_online_cpus) {
   3.124  	j = cpu_up(i);
   3.125 -	printk("<0>cpu_up(%d) -> %d.\n", i, j);
   3.126  	if (j != 0) {
   3.127  	    printk(KERN_CRIT "Failed to bring cpu %d back up (%d).\n",
   3.128  		   i, j);
   3.129  	    err = j;
   3.130  	}
   3.131 -	cpu_clear(i, feasible_cpus);
   3.132      }
   3.133  
   3.134 +    uber_debug = 0;
   3.135 +
   3.136   out:
   3.137      if ( suspend_record != NULL )
   3.138          free_page((unsigned long)suspend_record);