goto out;
}
- if ( !spin_trylock(&ctx->rx_lock) )
- {
- ret = FFA_RET_BUSY;
+ ret = ffa_rx_acquire(d);
+ if ( ret != FFA_RET_OK )
goto out;
- }
dst_buf = ctx->rx;
goto out_rx_release;
}
- if ( !ctx->page_count || !ctx->rx_is_free )
- {
- ret = FFA_RET_DENIED;
- goto out_rx_release;
- }
-
spin_lock(&ffa_rx_buffer_lock);
ret = ffa_partition_info_get(uuid, 0, &ffa_sp_count, &src_size);
if ( ret )
- goto out_rx_buf_unlock;
+ goto out_rx_hyp_unlock;
/*
* ffa_partition_info_get() succeeded so we now own the RX buffer we
- * share with the SPMC. We must give it back using ffa_rx_release()
+ * share with the SPMC. We must give it back using ffa_hyp_rx_release()
* once we've copied the content.
*/
}
}
- ctx->rx_is_free = false;
-
out_rx_hyp_release:
- ffa_rx_release();
-out_rx_buf_unlock:
+ ffa_hyp_rx_release();
+out_rx_hyp_unlock:
spin_unlock(&ffa_rx_buffer_lock);
out_rx_release:
- spin_unlock(&ctx->rx_lock);
-
+ /*
+ * The calling VM RX buffer only contains data to be used by the VM if the
+ * call was successful, in which case the VM has to release the buffer
+ * once it has used the data.
+ * If something went wrong during the call, we have to release the RX
+ * buffer back to the SPMC as the VM will not do it.
+ */
+ if ( ret != FFA_RET_OK )
+ ffa_rx_release(d);
out:
if ( ret )
ffa_set_regs_error(regs, ret);
ret = init_subscribers(count, fpi_size);
out:
- ffa_rx_release();
-
+ ffa_hyp_rx_release();
return ret;
}
#define FFA_ABI_BITNUM(id) ((FFA_ABI_ID(id) - FFA_ABI_MIN) << 1 | \
FFA_ABI_CONV(id))
+/* Constituent memory region descriptor */
+struct ffa_address_range {
+ uint64_t address;
+ uint32_t page_count;
+ uint32_t reserved;
+};
+
+/* Composite memory region descriptor */
+struct ffa_mem_region {
+ uint32_t total_page_count;
+ uint32_t address_range_count;
+ uint64_t reserved;
+ struct ffa_address_range address_range_array[];
+};
+
struct ffa_ctx_notif {
bool enabled;
struct ffa_ctx_notif notif;
/*
* tx_lock is used to serialize access to tx
- * rx_lock is used to serialize access to rx
+ * rx_lock is used to serialize access to rx_is_free
* lock is used for the rest in this struct
*/
spinlock_t tx_lock;
uint32_t ffa_handle_rxtx_map(uint32_t fid, register_t tx_addr,
register_t rx_addr, uint32_t page_count);
uint32_t ffa_handle_rxtx_unmap(void);
-int32_t ffa_handle_rx_release(void);
+int32_t ffa_rx_acquire(struct domain *d);
+int32_t ffa_rx_release(struct domain *d);
void ffa_notif_init(void);
void ffa_notif_init_interrupt(void);
return ffa_get_ret_code(&resp);
}
-static inline int32_t ffa_rx_release(void)
+static inline int32_t ffa_hyp_rx_release(void)
{
return ffa_simple_call(FFA_RX_RELEASE, 0, 0, 0, 0);
}
uint32_t tx_region_offs;
};
+static int32_t ffa_rxtx_map(paddr_t tx_addr, paddr_t rx_addr,
+ uint32_t page_count)
+{
+ return ffa_simple_call(FFA_RXTX_MAP_64, tx_addr, rx_addr, page_count, 0);
+}
+
+static int32_t ffa_rxtx_unmap(uint16_t id)
+{
+ return ffa_simple_call(FFA_RXTX_UNMAP, ((uint64_t)id) << 16, 0, 0, 0);
+}
+
uint32_t ffa_handle_rxtx_map(uint32_t fid, register_t tx_addr,
register_t rx_addr, uint32_t page_count)
{
void *rx;
void *tx;
+ /* The code is considering that we only get one page for now */
+ BUILD_BUG_ON(FFA_MAX_RXTX_PAGE_COUNT != 1);
+
if ( !smccc_is_conv_64(fid) )
{
/*
goto err_put_tx_pg;
rx_pg = get_page_from_gfn(d, gfn_x(gaddr_to_gfn(rx_addr)), &t, P2M_ALLOC);
- if ( !tx_pg )
+ if ( !rx_pg )
goto err_put_tx_pg;
/* Only normal RW RAM for now */
if ( !rx )
goto err_unmap_tx;
+ /*
+ * Transmit the RX/TX buffer information to the SPM if acquire is supported
+ * as the spec says that if not there is not need to acquire/release/map
+ * rxtx buffers from the SPMC
+ */
+ if ( ffa_fw_supports_fid(FFA_RX_ACQUIRE) )
+ {
+ struct ffa_endpoint_rxtx_descriptor_1_1 *rxtx_desc;
+ struct ffa_mem_region *mem_reg;
+
+ /* All must fit in our TX buffer */
+ BUILD_BUG_ON(sizeof(*rxtx_desc) + sizeof(*mem_reg) * 2 +
+ sizeof(struct ffa_address_range) * 2 >
+ FFA_MAX_RXTX_PAGE_COUNT * FFA_PAGE_SIZE);
+
+ spin_lock(&ffa_tx_buffer_lock);
+ rxtx_desc = ffa_tx;
+
+ /*
+ * We have only one page for each so we pack everything:
+ * - rx region descriptor
+ * - rx region range
+ * - tx region descriptor
+ * - tx region range
+ */
+ rxtx_desc->sender_id = ffa_get_vm_id(d);
+ rxtx_desc->reserved = 0;
+ rxtx_desc->rx_region_offs = sizeof(*rxtx_desc);
+ rxtx_desc->tx_region_offs = sizeof(*rxtx_desc) +
+ offsetof(struct ffa_mem_region,
+ address_range_array[1]);
+
+ /* rx buffer */
+ mem_reg = ffa_tx + sizeof(*rxtx_desc);
+ mem_reg->total_page_count = 1;
+ mem_reg->address_range_count = 1;
+ mem_reg->reserved = 0;
+
+ mem_reg->address_range_array[0].address = page_to_maddr(rx_pg);
+ mem_reg->address_range_array[0].page_count = 1;
+ mem_reg->address_range_array[0].reserved = 0;
+
+ /* tx buffer */
+ mem_reg = ffa_tx + rxtx_desc->tx_region_offs;
+ mem_reg->total_page_count = 1;
+ mem_reg->address_range_count = 1;
+ mem_reg->reserved = 0;
+
+ mem_reg->address_range_array[0].address = page_to_maddr(tx_pg);
+ mem_reg->address_range_array[0].page_count = 1;
+ mem_reg->address_range_array[0].reserved = 0;
+
+ ret = ffa_rxtx_map(0, 0, 0);
+
+ spin_unlock(&ffa_tx_buffer_lock);
+
+ if ( ret != FFA_RET_OK )
+ goto err_unmap_rx;
+ }
+
ctx->rx = rx;
ctx->tx = tx;
ctx->rx_pg = rx_pg;
ctx->rx_is_free = true;
return FFA_RET_OK;
+err_unmap_rx:
+ unmap_domain_page_global(rx);
err_unmap_tx:
unmap_domain_page_global(tx);
err_put_rx_pg:
return ret;
}
-static void rxtx_unmap(struct ffa_ctx *ctx)
+static uint32_t rxtx_unmap(struct domain *d)
{
+ struct ffa_ctx *ctx = d->arch.tee;
+
+ if ( !ctx->page_count )
+ return FFA_RET_INVALID_PARAMETERS;
+
+ if ( ffa_fw_supports_fid(FFA_RX_ACQUIRE) )
+ {
+ uint32_t ret;
+
+ ret = ffa_rxtx_unmap(ffa_get_vm_id(d));
+ if ( ret != FFA_RET_OK )
+ return ret;
+ }
+
unmap_domain_page_global(ctx->rx);
unmap_domain_page_global(ctx->tx);
put_page(ctx->rx_pg);
ctx->tx_pg = NULL;
ctx->page_count = 0;
ctx->rx_is_free = false;
+
+ return FFA_RET_OK;
}
uint32_t ffa_handle_rxtx_unmap(void)
{
- struct domain *d = current->domain;
+ return rxtx_unmap(current->domain);
+}
+
+int32_t ffa_rx_acquire(struct domain *d)
+{
+ int32_t ret = FFA_RET_OK;
struct ffa_ctx *ctx = d->arch.tee;
- if ( !ctx->rx )
- return FFA_RET_INVALID_PARAMETERS;
+ spin_lock(&ctx->rx_lock);
- rxtx_unmap(ctx);
+ if ( !ctx->page_count )
+ {
+ ret = FFA_RET_DENIED;
+ goto out;
+ }
- return FFA_RET_OK;
+ if ( !ctx->rx_is_free )
+ {
+ ret = FFA_RET_BUSY;
+ goto out;
+ }
+
+ if ( ffa_fw_supports_fid(FFA_RX_ACQUIRE) )
+ {
+ ret = ffa_simple_call(FFA_RX_ACQUIRE, ffa_get_vm_id(d), 0, 0, 0);
+ if ( ret != FFA_RET_OK )
+ goto out;
+ }
+ ctx->rx_is_free = false;
+out:
+ spin_unlock(&ctx->rx_lock);
+
+ return ret;
}
-int32_t ffa_handle_rx_release(void)
+int32_t ffa_rx_release(struct domain *d)
{
int32_t ret = FFA_RET_DENIED;
- struct domain *d = current->domain;
struct ffa_ctx *ctx = d->arch.tee;
- if ( !spin_trylock(&ctx->rx_lock) )
- return FFA_RET_BUSY;
+ spin_lock(&ctx->rx_lock);
if ( !ctx->page_count || ctx->rx_is_free )
goto out;
+
+ if ( ffa_fw_supports_fid(FFA_RX_ACQUIRE) )
+ {
+ ret = ffa_simple_call(FFA_RX_RELEASE, ffa_get_vm_id(d), 0, 0, 0);
+ if ( ret != FFA_RET_OK )
+ goto out;
+ }
ret = FFA_RET_OK;
ctx->rx_is_free = true;
out:
return ret;
}
-static int32_t ffa_rxtx_map(paddr_t tx_addr, paddr_t rx_addr,
- uint32_t page_count)
-{
- return ffa_simple_call(FFA_RXTX_MAP_64, tx_addr, rx_addr, page_count, 0);
-}
-
-static int32_t ffa_rxtx_unmap(void)
-{
- return ffa_simple_call(FFA_RXTX_UNMAP, 0, 0, 0, 0);
-}
-
void ffa_rxtx_domain_destroy(struct domain *d)
{
- struct ffa_ctx *ctx = d->arch.tee;
-
- if ( ctx->rx )
- rxtx_unmap(ctx);
+ rxtx_unmap(d);
}
void ffa_rxtx_destroy(void)
}
if ( need_unmap )
- ffa_rxtx_unmap();
+ ffa_rxtx_unmap(0);
}
bool ffa_rxtx_init(void)