* this program; If not, see <http://www.gnu.org/licenses/>.
*/
+#include <xen/cpu.h>
#include <xen/sched.h>
#include <xen/iocap.h>
#include <xen/iommu.h>
return pg;
}
+/*
+ * Intermediate page tables which get replaced by large pages may only be
+ * freed after a suitable IOTLB flush. Hence such pages get queued on a
+ * per-CPU list, with a per-CPU tasklet processing the list on the assumption
+ * that the necessary IOTLB flush will have occurred by the time tasklets get
+ * to run. (List and tasklet being per-CPU has the benefit of accesses not
+ * requiring any locking.)
+ */
+static DEFINE_PER_CPU(struct page_list_head, free_pgt_list);
+static DEFINE_PER_CPU(struct tasklet, free_pgt_tasklet);
+
+static void cf_check free_queued_pgtables(void *arg)
+{
+ struct page_list_head *list = arg;
+ struct page_info *pg;
+ unsigned int done = 0;
+
+ ASSERT(list == &this_cpu(free_pgt_list));
+
+ while ( (pg = page_list_remove_head(list)) )
+ {
+ free_domheap_page(pg);
+
+ /*
+ * Just to be on the safe side, check for processing softirqs every
+ * once in a while. Generally it is expected that parties queuing
+ * pages for freeing will find a need for preemption before too many
+ * pages can be queued. Granularity of checking is somewhat arbitrary.
+ */
+ if ( !(++done & 0x1ff) )
+ process_pending_softirqs();
+ }
+}
+
+void iommu_queue_free_pgtable(struct domain_iommu *hd, struct page_info *pg)
+{
+ unsigned int cpu = smp_processor_id();
+
+ spin_lock(&hd->arch.pgtables.lock);
+ page_list_del(pg, &hd->arch.pgtables.list);
+ spin_unlock(&hd->arch.pgtables.lock);
+
+ page_list_add_tail(pg, &per_cpu(free_pgt_list, cpu));
+
+ tasklet_schedule(&per_cpu(free_pgt_tasklet, cpu));
+}
+
+static int cf_check cpu_callback(
+ struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ struct page_list_head *list = &per_cpu(free_pgt_list, cpu);
+ struct tasklet *tasklet = &per_cpu(free_pgt_tasklet, cpu);
+
+ switch ( action )
+ {
+ case CPU_DOWN_PREPARE:
+ tasklet_kill(tasklet);
+ break;
+
+ case CPU_DEAD:
+ if ( !page_list_empty(list) )
+ {
+ page_list_splice(list, &this_cpu(free_pgt_list));
+ INIT_PAGE_LIST_HEAD(list);
+ tasklet_schedule(&this_cpu(free_pgt_tasklet));
+ }
+ break;
+
+ case CPU_UP_PREPARE:
+ INIT_PAGE_LIST_HEAD(list);
+ fallthrough;
+ case CPU_DOWN_FAILED:
+ tasklet_init(tasklet, free_queued_pgtables, list);
+ if ( !page_list_empty(list) )
+ tasklet_schedule(tasklet);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+ .notifier_call = cpu_callback,
+};
+
+static int __init cf_check bsp_init(void)
+{
+ if ( iommu_enabled )
+ {
+ cpu_callback(&cpu_nfb, CPU_UP_PREPARE,
+ (void *)(unsigned long)smp_processor_id());
+ register_cpu_notifier(&cpu_nfb);
+ }
+
+ return 0;
+}
+presmp_initcall(bsp_init);
+
bool arch_iommu_use_permitted(const struct domain *d)
{
/*