return (req - pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
}
+#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
+
static inline unsigned long vaddr(pending_req_t *req, int seg)
{
- unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
+ unsigned long pfn = page_to_pfn(pending_page(req, seg));
return (unsigned long)pfn_to_kaddr(pfn);
}
handle = pending_handle(req, i);
if (handle == BLKBACK_INVALID_HANDLE)
continue;
- blkback_pagemap_clear(virt_to_page(vaddr(req, i)));
+ blkback_pagemap_clear(pending_page(req, i));
gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
GNTMAP_host_map, handle);
pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
ret |= 1;
} else {
blkback_pagemap_set(vaddr_pagenr(pending_req, i),
- virt_to_page(vaddr(pending_req, i)),
+ pending_page(pending_req, i),
blkif->domid, req->handle,
req->seg[i].gref);
}
if (ret)
continue;
- set_phys_to_machine(__pa(vaddr(
- pending_req, i)) >> PAGE_SHIFT,
+ set_phys_to_machine(
+ page_to_pfn(pending_page(pending_req, i)),
FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
seg[i].buf = map[i].dev_bus_addr |
(req->seg[i].first_sect << 9);
while ((bio == NULL) ||
(bio_add_page(bio,
- virt_to_page(vaddr(pending_req, i)),
+ pending_page(pending_req, i),
seg[i].nsec << 9,
seg[i].buf & ~PAGE_MASK) == 0)) {
if (bio) {
#define BLKBACK_INVALID_HANDLE (~0)
static struct page **foreign_pages[MAX_DYNAMIC_MEM];
-static inline unsigned long idx_to_kaddr(
+static inline struct page *idx_to_page(
unsigned int mmap_idx, unsigned int req_idx, unsigned int sg_idx)
{
unsigned int arr_idx = req_idx*BLKIF_MAX_SEGMENTS_PER_REQUEST + sg_idx;
- unsigned long pfn = page_to_pfn(foreign_pages[mmap_idx][arr_idx]);
+ return foreign_pages[mmap_idx][arr_idx];
+}
+static inline unsigned long idx_to_kaddr(
+ unsigned int mmap_idx, unsigned int req_idx, unsigned int sg_idx)
+{
+ unsigned long pfn = page_to_pfn(idx_to_page(mmap_idx,req_idx,sg_idx));
return (unsigned long)pfn_to_kaddr(pfn);
}
mmap_idx = ID_TO_MIDX(info->idx_map[usr_idx]);
kvaddr = idx_to_kaddr(mmap_idx, pending_idx, seg);
- pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ pg = idx_to_page(mmap_idx, pending_idx, seg);
ClearPageReserved(pg);
info->foreign_map.map[offset + RING_PAGES] = NULL;
struct grant_handle_pair *khandle;
uint64_t ptep;
int ret, mmap_idx;
- unsigned long kvaddr, uvaddr;
+ unsigned long uvaddr;
tap_blkif_t *info;
struct mm_struct *mm;
mmap_idx = req->mem_idx;
for (i = 0; i < req->nr_pages; i++) {
- kvaddr = idx_to_kaddr(mmap_idx, k_idx, i);
uvaddr = MMAP_VADDR(info->user_vstart, u_idx, i);
khandle = &pending_handle(mmap_idx, k_idx, i);
invcount++;
set_phys_to_machine(
- __pa(idx_to_kaddr(mmap_idx, k_idx, i))
- >> PAGE_SHIFT, INVALID_P2M_ENTRY);
+ page_to_pfn(idx_to_page(mmap_idx, k_idx, i)),
+ INVALID_P2M_ENTRY);
}
if (khandle->user != INVALID_GRANT_HANDLE) {
for (j = 0; j < pending_req->nr_pages; j++) {
- unsigned long kvaddr, uvaddr;
+ unsigned long uvaddr;
struct page *pg;
int offset;
uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, j);
- kvaddr = idx_to_kaddr(mmap_idx, pending_idx, j);
- pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ pg = idx_to_page(mmap_idx, pending_idx, j);
ClearPageReserved(pg);
offset = (uvaddr - info->rings_vstart) >> PAGE_SHIFT;
info->foreign_map.map[offset] = NULL;
for (i = 0; i < (nseg*2); i+=2) {
unsigned long uvaddr;
- unsigned long kvaddr;
unsigned long offset;
struct page *pg;
uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i/2);
- kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i/2);
if (unlikely(map[i].status != 0)) {
WPRINTK("invalid kernel buffer -- "
if (ret)
continue;
- set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
+ pg = idx_to_page(mmap_idx, pending_idx, i/2);
+ set_phys_to_machine(page_to_pfn(pg),
FOREIGN_FRAME(map[i].dev_bus_addr
>> PAGE_SHIFT));
offset = (uvaddr - info->rings_vstart) >> PAGE_SHIFT;
- pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
info->foreign_map.map[offset] = pg;
}
} else {
for (i = 0; i < nseg; i++) {
unsigned long uvaddr;
- unsigned long kvaddr;
unsigned long offset;
struct page *pg;
uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
- kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
if (unlikely(map[i].status != 0)) {
WPRINTK("invalid kernel buffer -- "
continue;
offset = (uvaddr - info->rings_vstart) >> PAGE_SHIFT;
- pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ pg = idx_to_page(mmap_idx, pending_idx, i);
info->foreign_map.map[offset] = pg;
}
}
down_write(&mm->mmap_sem);
/* Mark mapped pages as reserved: */
for (i = 0; i < req->nr_segments; i++) {
- unsigned long kvaddr;
struct page *pg;
- kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
- pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ pg = idx_to_page(mmap_idx, pending_idx, i);
SetPageReserved(pg);
if (xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long uvaddr = MMAP_VADDR(info->user_vstart,
int blktap_request_pool_shrink(void);
struct blktap_request *blktap_request_allocate(struct blktap *);
void blktap_request_free(struct blktap *, struct blktap_request *);
-unsigned long request_to_kaddr(struct blktap_request *, int);
+struct page *request_to_page(struct blktap_request *, int);
+
+static inline unsigned long
+request_to_kaddr(struct blktap_request *req, int seg)
+{
+ unsigned long pfn = page_to_pfn(request_to_page(req, seg));
+ return (unsigned long)pfn_to_kaddr(pfn);
+}
#endif
down_write(&tap->ring.vma->vm_mm->mmap_sem);
for (i = 0; i < request->nr_pages; i++) {
+ kvaddr = request_to_kaddr(request, i);
BTDBG("request: %p, seg: %d, kvaddr: 0x%08lx, khandle: %u, "
"uvaddr: 0x%08lx, uhandle: %u\n", request, i,
- request_to_kaddr(request, i),
- request->handles[i].kernel,
+ kvaddr, request->handles[i].kernel,
MMAP_VADDR(tap->ring.user_vstart, usr_idx, i),
request->handles[i].user);
if (!xen_feature(XENFEAT_auto_translated_physmap) &&
request->handles[i].kernel == INVALID_GRANT_HANDLE) {
- kvaddr = request_to_kaddr(request, i);
blktap_umap_uaddr(&init_mm, kvaddr);
flush_tlb_kernel_page(kvaddr);
set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
table->cnt++;
/* enable chained tap devices */
- tap_page = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ tap_page = request_to_page(request, seg);
set_page_private(tap_page, page_private(page));
SetPageBlkback(tap_page);
struct page *page;
int i, grant, err, usr_idx;
struct blktap_ring *ring;
- unsigned long uvaddr, kvaddr, foreign_mfn;
+ unsigned long uvaddr, foreign_mfn;
if (!table->cnt)
return 0;
continue;
uvaddr = MMAP_VADDR(ring->user_vstart, usr_idx, i);
- kvaddr = request_to_kaddr(request, i);
if (unlikely(table->grants[grant].status)) {
BTERR("invalid kernel buffer: could not remap it\n");
if (err)
continue;
- page = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ page = request_to_page(request, i);
if (!xen_feature(XENFEAT_auto_translated_physmap))
- set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
+ set_phys_to_machine(page_to_pfn(page),
FOREIGN_FRAME(foreign_mfn));
else if (vm_insert_page(ring->vma, uvaddr, page))
err |= 1;
BTDBG("pending_req: %p, seg: %d, page: %p, "
- "kvaddr: 0x%08lx, khandle: %u, uvaddr: 0x%08lx, "
+ "kvaddr: 0x%p, khandle: %u, uvaddr: 0x%08lx, "
"uhandle: %u\n", request, i, page,
- kvaddr, request->handles[i].kernel,
+ pfn_to_kaddr(page_to_pfn(page)),
+ request->handles[i].kernel,
uvaddr, request->handles[i].user);
}
gnttab_set_map_op(&map, kvaddr, flags, gref, domid);
/* enable chained tap devices */
- tap_page = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ tap_page = request_to_page(request, seg);
set_page_private(tap_page, page_private(page));
SetPageBlkback(tap_page);
struct scatterlist *sg;
struct blktap_grant_table table;
unsigned int fsect, lsect, nr_sects;
- unsigned long offset, uvaddr, kvaddr;
+ unsigned long offset, uvaddr;
struct blkif_request blkif_req, *target;
err = -1;
}
uvaddr = MMAP_VADDR(ring->user_vstart, usr_idx, i);
- kvaddr = request_to_kaddr(request, i);
offset = (uvaddr - ring->vma->vm_start) >> PAGE_SHIFT;
- page = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
+ page = request_to_page(request, i);
ring->foreign_map.map[offset] = page;
SetPageReserved(page);
BTDBG("mapped uaddr %08lx to page %p pfn 0x%lx\n",
- uvaddr, page, __pa(kvaddr) >> PAGE_SHIFT);
+ uvaddr, page, page_to_pfn(page));
BTDBG("offset: 0x%08lx, pending_req: %p, seg: %d, "
- "page: %p, kvaddr: 0x%08lx, uvaddr: 0x%08lx\n",
+ "page: %p, kvaddr: %p, uvaddr: 0x%08lx\n",
offset, request, i,
- page, kvaddr, uvaddr);
+ page, pfn_to_kaddr(page_to_pfn(page)), uvaddr);
request->nr_pages++;
}
kfree(bucket);
}
-unsigned long
-request_to_kaddr(struct blktap_request *req, int seg)
+struct page *
+request_to_page(struct blktap_request *req, int seg)
{
struct blktap_request_handle *handle = blktap_request_to_handle(req);
int idx = handle->slot * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
- unsigned long pfn = page_to_pfn(handle->bucket->foreign_pages[idx]);
- return (unsigned long)pfn_to_kaddr(pfn);
+ return handle->bucket->foreign_pages[idx];
}
int
kernel_vaddr = get_kernel_vaddr(private_data, slot_index + i);
user_vaddr = get_user_vaddr(vma, i);
- page = pfn_to_page(__pa(kernel_vaddr) >> PAGE_SHIFT);
+ page = private_data->foreign_pages[slot_index + i];
gnttab_set_map_op(&op, kernel_vaddr, flags,
private_data->grants[slot_index+i]
GNTDEV_SLOT_NOT_YET_MAPPED;
/* Invalidate the physical to machine mapping for this page. */
- set_phys_to_machine(__pa(get_kernel_vaddr(private_data,
- slot_index))
- >> PAGE_SHIFT, INVALID_P2M_ENTRY);
+ set_phys_to_machine(
+ page_to_pfn(private_data->foreign_pages[slot_index]),
+ INVALID_P2M_ENTRY);
} else {
pte_clear_full(vma->vm_mm, addr, ptep, is_fullmm);
pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
netif_put(netif);
} else {
- set_phys_to_machine(
- __pa(idx_to_kaddr(pending_idx)) >> PAGE_SHIFT,
+ set_phys_to_machine(idx_to_pfn(pending_idx),
FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
grant_tx_handle[pending_idx] = mop->handle;
}
/* Check error status: if okay then remember grant handle. */
newerr = (++mop)->status;
if (likely(!newerr)) {
- set_phys_to_machine(
- __pa(idx_to_kaddr(pending_idx))>>PAGE_SHIFT,
+ set_phys_to_machine(idx_to_pfn(pending_idx),
FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT));
grant_tx_handle[pending_idx] = mop->handle;
/* Had a previous error? Invalidate this fragment. */
&pending_inuse_head);
txp = &pending_tx_info[pending_idx].req;
- frag->page = virt_to_page(idx_to_kaddr(pending_idx));
+ frag->page = mmap_pages[pending_idx];
frag->size = txp->size;
frag->page_offset = txp->offset;
BUG_ON(err);
for (i = 0; i < nr_segments; i++) {
+ struct page *pg;
+
if (unlikely(map[i].status != 0)) {
printk(KERN_ERR "scsiback: invalid buffer -- could not remap it\n");
map[i].handle = SCSIBACK_INVALID_HANDLE;
if (err)
continue;
- set_phys_to_machine(__pa(vaddr(
- pending_req, i)) >> PAGE_SHIFT,
+ pg = pending_pages[vaddr_pagenr(pending_req, i)];
+
+ set_phys_to_machine(page_to_pfn(pg),
FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
- pending_req->sgl[i].page = virt_to_page(vaddr(pending_req, i));
+ pending_req->sgl[i].page = pg;
pending_req->sgl[i].offset = ring_req->seg[i].offset;
pending_req->sgl[i].length = ring_req->seg[i].length;
data_len += pending_req->sgl[i].length;