direct-io.hg

changeset 1273:9d7daee83fa1

bitkeeper revision 1.837 (406d9e1fPZYe6CULYL8E7fCs3l-PlQ)

processor.h, schedule.c, memory.c:
Cleanups.
author kaf24@scramble.cl.cam.ac.uk
date Fri Apr 02 17:08:47 2004 +0000 (2004-04-02)
parents 498890e33619
children 20a5f571fb14
files xen/common/memory.c xen/common/schedule.c xen/include/asm-i386/processor.h
line diff
     1.1 --- a/xen/common/memory.c	Fri Apr 02 16:27:52 2004 +0000
     1.2 +++ b/xen/common/memory.c	Fri Apr 02 17:08:47 2004 +0000
     1.3 @@ -766,32 +766,32 @@ void free_page_type(struct pfn_info *pag
     1.4      {
     1.5      case PGT_l1_page_table:
     1.6          free_l1_table(page);
     1.7 -	if ( unlikely(current->mm.shadow_mode) && 
     1.8 -	     (get_shadow_status(&current->mm, 
     1.9 -				page-frame_table) & PSH_shadowed) )
    1.10 -	{
    1.11 -	    /* using 'current-mm' is safe because page type changes only
    1.12 -	       occur within the context of the currently running domain as 
    1.13 -	       pagetable pages can not be shared across domains. The one
    1.14 -	       exception is when destroying a domain. However, we get away 
    1.15 -	       with this as there's no way the current domain can have this
    1.16 -	       mfn shadowed, so we won't get here... Phew! */
    1.17 +        if ( unlikely(current->mm.shadow_mode) && 
    1.18 +             (get_shadow_status(&current->mm, 
    1.19 +                                page-frame_table) & PSH_shadowed) )
    1.20 +        {
    1.21 +            /* using 'current-mm' is safe because page type changes only
    1.22 +               occur within the context of the currently running domain as 
    1.23 +               pagetable pages can not be shared across domains. The one
    1.24 +               exception is when destroying a domain. However, we get away 
    1.25 +               with this as there's no way the current domain can have this
    1.26 +               mfn shadowed, so we won't get here... Phew! */
    1.27  
    1.28 - 	    unshadow_table( page-frame_table, type );
    1.29 -	    put_shadow_status(&current->mm);
    1.30 +            unshadow_table( page-frame_table, type );
    1.31 +            put_shadow_status(&current->mm);
    1.32          }
    1.33 -	return;
    1.34 +        return;
    1.35  
    1.36      case PGT_l2_page_table:
    1.37          free_l2_table(page);
    1.38 -	if ( unlikely(current->mm.shadow_mode) && 
    1.39 -	     (get_shadow_status(&current->mm, 
    1.40 -				page-frame_table) & PSH_shadowed) )
    1.41 -	{
    1.42 -	    unshadow_table( page-frame_table, type );
    1.43 -	    put_shadow_status(&current->mm);
    1.44 +        if ( unlikely(current->mm.shadow_mode) && 
    1.45 +             (get_shadow_status(&current->mm, 
    1.46 +                                page-frame_table) & PSH_shadowed) )
    1.47 +        {
    1.48 +            unshadow_table( page-frame_table, type );
    1.49 +            put_shadow_status(&current->mm);
    1.50          }
    1.51 -	return;
    1.52 +        return;
    1.53  
    1.54      default:
    1.55          BUG();
    1.56 @@ -813,7 +813,7 @@ static int do_extended_command(unsigned 
    1.57      case MMUEXT_PIN_L2_TABLE:
    1.58          okay = get_page_and_type_from_pagenr(
    1.59              pfn, (cmd == MMUEXT_PIN_L2_TABLE) ? PGT_l2_page_table : 
    1.60 -                                                PGT_l1_page_table,
    1.61 +            PGT_l1_page_table,
    1.62              CHECK_STRICT);
    1.63          if ( unlikely(!okay) )
    1.64          {
    1.65 @@ -866,7 +866,7 @@ static int do_extended_command(unsigned 
    1.66  
    1.67              shadow_mk_pagetable(&current->mm);
    1.68  
    1.69 -	    write_ptbase(&current->mm);
    1.70 +            write_ptbase(&current->mm);
    1.71  
    1.72              put_page_and_type(&frame_table[old_base_pfn]);    
    1.73          }
    1.74 @@ -1005,14 +1005,14 @@ int do_mmu_update(mmu_update_t *ureqs, i
    1.75                      okay = mod_l1_entry((l1_pgentry_t *)va, 
    1.76                                          mk_l1_pgentry(req.val)); 
    1.77  
    1.78 -		    if ( okay && unlikely(current->mm.shadow_mode) &&
    1.79 -			 (get_shadow_status(&current->mm, page-frame_table) &
    1.80 -			  PSH_shadowed) )
    1.81 -		    {
    1.82 -		        shadow_l1_normal_pt_update( req.ptr, req.val, 
    1.83 -						    &prev_spfn, &prev_spl1e );
    1.84 -			put_shadow_status(&current->mm);
    1.85 -		    }
    1.86 +                    if ( okay && unlikely(current->mm.shadow_mode) &&
    1.87 +                         (get_shadow_status(&current->mm, page-frame_table) &
    1.88 +                          PSH_shadowed) )
    1.89 +                    {
    1.90 +                        shadow_l1_normal_pt_update( req.ptr, req.val, 
    1.91 +                                                    &prev_spfn, &prev_spl1e );
    1.92 +                        put_shadow_status(&current->mm);
    1.93 +                    }
    1.94  
    1.95                      put_page_type(page);
    1.96                  }
    1.97 @@ -1024,13 +1024,13 @@ int do_mmu_update(mmu_update_t *ureqs, i
    1.98                                          mk_l2_pgentry(req.val),
    1.99                                          pfn); 
   1.100  
   1.101 -		    if ( okay && unlikely(current->mm.shadow_mode) &&
   1.102 -			 (get_shadow_status(&current->mm, page-frame_table) & 
   1.103 -			  PSH_shadowed) )
   1.104 -		    {
   1.105 -		        shadow_l2_normal_pt_update( req.ptr, req.val );
   1.106 -			put_shadow_status(&current->mm);
   1.107 -		    }
   1.108 +                    if ( okay && unlikely(current->mm.shadow_mode) &&
   1.109 +                         (get_shadow_status(&current->mm, page-frame_table) & 
   1.110 +                          PSH_shadowed) )
   1.111 +                    {
   1.112 +                        shadow_l2_normal_pt_update( req.ptr, req.val );
   1.113 +                        put_shadow_status(&current->mm);
   1.114 +                    }
   1.115  
   1.116                      put_page_type(page);
   1.117                  }
   1.118 @@ -1041,8 +1041,6 @@ int do_mmu_update(mmu_update_t *ureqs, i
   1.119                      *(unsigned long *)va = req.val;
   1.120                      okay = 1;
   1.121                      put_page_type(page);
   1.122 -
   1.123 -                    // at present, we don't shadowing such pages
   1.124                  }
   1.125                  break;
   1.126              }
   1.127 @@ -1127,8 +1125,10 @@ int do_update_va_mapping(unsigned long p
   1.128      if ( unlikely(page_nr >= (HYPERVISOR_VIRT_START >> PAGE_SHIFT)) )
   1.129          return -EINVAL;
   1.130  
   1.131 -    // XXX when we make this support 4MB pages we should also
   1.132 -    // deal with the case of updating L2s
   1.133 +    /*
   1.134 +     * XXX When we make this support 4MB superpages we should also deal with 
   1.135 +     * the case of updating L2 entries.
   1.136 +     */
   1.137  
   1.138      if ( unlikely(!mod_l1_entry(&linear_pg_table[page_nr], 
   1.139                                  mk_l1_pgentry(val))) )
   1.140 @@ -1138,21 +1138,21 @@ int do_update_va_mapping(unsigned long p
   1.141      {
   1.142          unsigned long sval;
   1.143  
   1.144 -	l1pte_no_fault( &current->mm, &val, &sval );
   1.145 +        l1pte_no_fault( &current->mm, &val, &sval );
   1.146  
   1.147 -	if ( unlikely(__put_user( sval, ((unsigned long *) (&shadow_linear_pg_table[page_nr])) ) ) )
   1.148 -	{
   1.149 -	    // Since L2's are guranteed RW, failure indicates the page
   1.150 -	    // was not shadowed, so ignore.
   1.151 +        if ( unlikely(__put_user(sval, ((unsigned long *)(
   1.152 +            &shadow_linear_pg_table[page_nr])))) )
   1.153 +        {
   1.154 +            /*
   1.155 +             * Since L2's are guranteed RW, failure indicates the page was not 
   1.156 +             * shadowed, so ignore.
   1.157 +             */
   1.158              perfc_incrc(shadow_update_va_fail);
   1.159 -	    //MEM_LOG("update_va_map: couldn't write update\n");	
   1.160 -	}
   1.161 +        }
   1.162  
   1.163 -	check_pagetable( p, p->mm.pagetable, "va" ); // debug
   1.164 -    
   1.165 +        check_pagetable( p, p->mm.pagetable, "va" ); /* debug */
   1.166      }
   1.167  
   1.168 -
   1.169      deferred_ops = percpu_info[cpu].deferred_ops;
   1.170      percpu_info[cpu].deferred_ops = 0;
   1.171  
   1.172 @@ -1309,7 +1309,7 @@ void audit_all_pages(u_char key, void *d
   1.173  
   1.174          /* check ref count for leaf pages */
   1.175          if ( ((frame_table[i].type_and_flags & PGT_type_mask) ==
   1.176 -               PGT_writeable_page) )
   1.177 +              PGT_writeable_page) )
   1.178          {
   1.179              ref_count = 0;
   1.180  
     2.1 --- a/xen/common/schedule.c	Fri Apr 02 16:27:52 2004 +0000
     2.2 +++ b/xen/common/schedule.c	Fri Apr 02 17:08:47 2004 +0000
     2.3 @@ -175,7 +175,7 @@ void init_idle_task(void)
     2.4      struct task_struct *p = current;
     2.5  
     2.6      if ( SCHED_OP(alloc_task, p) < 0)
     2.7 -		panic("Failed to allocate scheduler private data for idle task");
     2.8 +        panic("Failed to allocate scheduler private data for idle task");
     2.9      SCHED_OP(add_task, p);
    2.10  
    2.11      spin_lock_irqsave(&schedule_lock[p->processor], flags);
    2.12 @@ -283,13 +283,11 @@ long do_sched_op(unsigned long op)
    2.13  }
    2.14  
    2.15  
    2.16 -/* sched_pause_sync - synchronously pause a domain's execution 
    2.17 -
    2.18 -XXXX This is horibly broken -- here just as a place holder at present,
    2.19 -                               do not use.
    2.20 -
    2.21 -*/
    2.22 -
    2.23 +/*
    2.24 + * sched_pause_sync - synchronously pause a domain's execution 
    2.25 + * XXXX This is horibly broken -- here just as a place holder at present,
    2.26 + *                                do not use.
    2.27 + */
    2.28  void sched_pause_sync(struct task_struct *p)
    2.29  {
    2.30      unsigned long flags;
    2.31 @@ -297,22 +295,21 @@ void sched_pause_sync(struct task_struct
    2.32  
    2.33      spin_lock_irqsave(&schedule_lock[cpu], flags);
    2.34  
    2.35 +    /* If not the current task, we can remove it from scheduling now. */
    2.36      if ( schedule_data[cpu].curr != p )
    2.37 -        /* if not the current task, we can remove it from scheduling now */
    2.38          SCHED_OP(pause, p);
    2.39  
    2.40      p->state = TASK_PAUSED;
    2.41      
    2.42      spin_unlock_irqrestore(&schedule_lock[cpu], flags);
    2.43  
    2.44 -    /* spin until domain is descheduled by its local scheduler */
    2.45 +    /* Spin until domain is descheduled by its local scheduler. */
    2.46      while ( schedule_data[cpu].curr == p )
    2.47      {
    2.48 -		send_hyp_event(p, _HYP_EVENT_NEED_RESCHED );
    2.49 -		do_yield();
    2.50 +        send_hyp_event(p, _HYP_EVENT_NEED_RESCHED );
    2.51 +        do_yield();
    2.52      }
    2.53 -    
    2.54 -    
    2.55 +        
    2.56      /* The domain will not be scheduled again until we do a wake_up(). */
    2.57  }
    2.58  
     3.1 --- a/xen/include/asm-i386/processor.h	Fri Apr 02 16:27:52 2004 +0000
     3.2 +++ b/xen/include/asm-i386/processor.h	Fri Apr 02 17:08:47 2004 +0000
     3.3 @@ -425,9 +425,9 @@ struct mm_struct {
     3.4      spinlock_t shadow_lock;
     3.5      struct shadow_status *shadow_ht;
     3.6      struct shadow_status *shadow_ht_free;
     3.7 -    struct shadow_status *shadow_ht_extras; // extra allocation units
     3.8 +    struct shadow_status *shadow_ht_extras; /* extra allocation units */
     3.9      unsigned int *shadow_dirty_bitmap;
    3.10 -    unsigned int shadow_dirty_bitmap_size;  // in pages, bit per page
    3.11 +    unsigned int shadow_dirty_bitmap_size;  /* in pages, bit per page */
    3.12      unsigned int shadow_page_count;
    3.13      unsigned int shadow_max_page_count;
    3.14      unsigned int shadow_extras_count;
    3.15 @@ -440,20 +440,12 @@ struct mm_struct {
    3.16  
    3.17  static inline void write_ptbase( struct mm_struct *m )
    3.18  {
    3.19 -/*    printk("write_ptbase mode=%08x pt=%08lx st=%08lx\n",
    3.20 -	   m->shadow_mode, pagetable_val(m->pagetable),
    3.21 -	   pagetable_val(m->shadow_table) );
    3.22 - */
    3.23 -    if( m->shadow_mode )
    3.24 -      {
    3.25 -	//check_pagetable( m, m->pagetable, "write_ptbase" );
    3.26 -	write_cr3_counted(pagetable_val(m->shadow_table));
    3.27 -      }
    3.28 +    if ( unlikely(m->shadow_mode) )
    3.29 +        write_cr3_counted(pagetable_val(m->shadow_table));
    3.30      else
    3.31 -      write_cr3_counted(pagetable_val(m->pagetable));
    3.32 +        write_cr3_counted(pagetable_val(m->pagetable));
    3.33  }
    3.34  
    3.35 -
    3.36  #define IDLE0_MM                                                    \
    3.37  {                                                                   \
    3.38      perdomain_pt: 0,                                                \