arbitrary cache attributes (x86 only).
Signed-off-by: Kieran Mansley <kmansley@solarflare.com>
tpmif_set_connected_state(tp, 0);
if (tp->ring_ref != GRANT_INVALID_REF) {
- gnttab_end_foreign_access(tp->ring_ref, 0,
- (unsigned long)tp->tx);
+ gnttab_end_foreign_access(tp->ring_ref, (unsigned long)tp->tx);
tp->ring_ref = GRANT_INVALID_REF;
tp->tx = NULL;
}
ref,
info->xbdev->otherend_id,
buffer_mfn,
- rq_data_dir(req) );
+ rq_data_dir(req) ? GTF_readonly : 0 );
info->shadow[id].frame[ring_req->nr_segments] =
mfn_to_pfn(buffer_mfn);
/* Free resources associated with old device channel. */
if (info->ring_ref != GRANT_INVALID_REF) {
- gnttab_end_foreign_access(info->ring_ref, 0,
+ gnttab_end_foreign_access(info->ring_ref,
(unsigned long)info->ring.sring);
info->ring_ref = GRANT_INVALID_REF;
info->ring.sring = NULL;
{
int i;
for (i = 0; i < s->req.nr_segments; i++)
- gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
+ gnttab_end_foreign_access(s->req.seg[i].gref, 0UL);
}
static void blkif_recover(struct blkfront_info *info)
req->seg[j].gref,
info->xbdev->otherend_id,
pfn_to_mfn(info->shadow[req->id].frame[j]),
- rq_data_dir(
- (struct request *)
- info->shadow[req->id].request));
+ rq_data_dir((struct request *)
+ info->shadow[req->id].request) ?
+ GTF_readonly : 0);
info->shadow[req->id].req = *req;
info->ring.req_prod_pvt++;
*/
int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
- int readonly)
+ int flags)
{
int ref;
shared[ref].frame = frame;
shared[ref].domid = domid;
wmb();
- shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
+ BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing));
+ shared[ref].flags = GTF_permit_access | flags;
return ref;
}
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
- unsigned long frame, int readonly)
+ unsigned long frame, int flags)
{
shared[ref].frame = frame;
shared[ref].domid = domid;
wmb();
- shared[ref].flags = GTF_permit_access | (readonly ? GTF_readonly : 0);
+ BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing));
+ shared[ref].flags = GTF_permit_access | flags;
}
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
}
EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
-int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
+int gnttab_end_foreign_access_ref(grant_ref_t ref)
{
u16 flags, nflags;
}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
-void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
- unsigned long page)
+void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page)
{
- if (gnttab_end_foreign_access_ref(ref, readonly)) {
+ if (gnttab_end_foreign_access_ref(ref)) {
put_free_entry(ref);
if (page != 0)
free_page(page);
"domain.\n");
BUG();
}
- gnttab_end_foreign_access_ref(
- np->grant_tx_ref[id], GNTMAP_readonly);
+ gnttab_end_foreign_access_ref(np->grant_tx_ref[id]);
gnttab_release_grant_reference(
&np->gref_tx_head, np->grant_tx_ref[id]);
np->grant_tx_ref[id] = GRANT_INVALID_REF;
mfn = virt_to_mfn(data);
gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
- mfn, GNTMAP_readonly);
+ mfn, GTF_readonly);
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = offset;
mfn = pfn_to_mfn(page_to_pfn(frag->page));
gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
- mfn, GNTMAP_readonly);
+ mfn, GTF_readonly);
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = frag->page_offset;
BUG_ON((signed short)ref < 0);
mfn = virt_to_mfn(data);
gnttab_grant_foreign_access_ref(
- ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
+ ref, np->xbdev->otherend_id, mfn, GTF_readonly);
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = offset;
tx->size = len;
}
pages_flipped++;
} else {
- ret = gnttab_end_foreign_access_ref(ref, 0);
+ ret = gnttab_end_foreign_access_ref(ref);
BUG_ON(!ret);
}
continue;
skb = np->tx_skbs[i];
- gnttab_end_foreign_access_ref(
- np->grant_tx_ref[i], GNTMAP_readonly);
+ gnttab_end_foreign_access_ref(np->grant_tx_ref[i]);
gnttab_release_grant_reference(
&np->gref_tx_head, np->grant_tx_ref[i]);
np->grant_tx_ref[i] = GRANT_INVALID_REF;
skb = np->rx_skbs[i];
- if (!gnttab_end_foreign_access_ref(ref, 0))
+ if (!gnttab_end_foreign_access_ref(ref))
{
busy++;
continue;
static void end_access(int ref, void *page)
{
if (ref != GRANT_INVALID_REF)
- gnttab_end_foreign_access(ref, 0, (unsigned long)page);
+ gnttab_end_foreign_access(ref, (unsigned long)page);
}
xenbus_free_evtchn(pdev->xdev, pdev->evtchn);
if (pdev->gnt_ref != INVALID_GRANT_REF)
- gnttab_end_foreign_access(pdev->gnt_ref, 0,
+ gnttab_end_foreign_access(pdev->gnt_ref,
(unsigned long)pdev->sh_info);
pdev->xdev->dev.driver_data = NULL;
};
int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
- int readonly);
+ int flags);
/*
* End access through the given grant reference, iff the grant entry is no
* longer in use. Return 1 if the grant entry was freed, 0 if it is still in
* use.
*/
-int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
+int gnttab_end_foreign_access_ref(grant_ref_t ref);
/*
* Eventually end access through the given grant reference, and once that
* immediately iff the grant entry is not in use, otherwise it will happen
* some time later. page may be 0, in which case no freeing will occur.
*/
-void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
- unsigned long page);
+void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page);
int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
- unsigned long frame, int readonly);
+ unsigned long frame, int flags);
void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
unsigned long pfn);
* GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
* GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
* GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
+ * GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags for the grant [GST]
*/
#define _GTF_readonly (2)
#define GTF_readonly (1U<<_GTF_readonly)
#define GTF_reading (1U<<_GTF_reading)
#define _GTF_writing (4)
#define GTF_writing (1U<<_GTF_writing)
+#define _GTF_PWT (5)
+#define GTF_PWT (1U<<_GTF_PWT)
+#define _GTF_PCD (6)
+#define GTF_PCD (1U<<_GTF_PCD)
+#define _GTF_PAT (7)
+#define GTF_PAT (1U<<_GTF_PAT)
/*
* Subflags for GTF_accept_transfer: