obj-$(CONFIG_FFA) += ffa.o
obj-$(CONFIG_FFA) += ffa_shm.o
obj-$(CONFIG_FFA) += ffa_partinfo.o
+obj-$(CONFIG_FFA) += ffa_rxtx.o
obj-y += tee.o
obj-$(CONFIG_OPTEE) += optee.o
#include "ffa_private.h"
-/*
- * Structs below ending with _1_0 are defined in FF-A-1.0-REL and
- * structs ending with _1_1 are defined in FF-A-1.1-REL0.
- */
-
-/* Endpoint RX/TX descriptor */
-struct ffa_endpoint_rxtx_descriptor_1_0 {
- uint16_t sender_id;
- uint16_t reserved;
- uint32_t rx_range_count;
- uint32_t tx_range_count;
-};
-
-struct ffa_endpoint_rxtx_descriptor_1_1 {
- uint16_t sender_id;
- uint16_t reserved;
- uint32_t rx_region_offs;
- uint32_t tx_region_offs;
-};
-
/* Negotiated FF-A version to use with the SPMC */
static uint32_t __ro_after_init ffa_version;
return !ret;
}
-static int32_t ffa_rxtx_map(paddr_t tx_addr, paddr_t rx_addr,
- uint32_t page_count)
-{
- return ffa_simple_call(FFA_RXTX_MAP_64, tx_addr, rx_addr, page_count, 0);
-}
-
static void handle_version(struct cpu_user_regs *regs)
{
struct domain *d = current->domain;
ffa_set_regs(regs, vers, 0, 0, 0, 0, 0, 0, 0);
}
-static uint32_t ffa_handle_rxtx_map(uint32_t fid, register_t tx_addr,
- register_t rx_addr, uint32_t page_count)
-{
- uint32_t ret = FFA_RET_INVALID_PARAMETERS;
- struct domain *d = current->domain;
- struct ffa_ctx *ctx = d->arch.tee;
- struct page_info *tx_pg;
- struct page_info *rx_pg;
- p2m_type_t t;
- void *rx;
- void *tx;
-
- if ( !smccc_is_conv_64(fid) )
- {
- /*
- * Calls using the 32-bit calling convention must ignore the upper
- * 32 bits in the argument registers.
- */
- tx_addr &= UINT32_MAX;
- rx_addr &= UINT32_MAX;
- }
-
- if ( page_count > FFA_MAX_RXTX_PAGE_COUNT )
- {
- printk(XENLOG_ERR "ffa: RXTX_MAP: error: %u pages requested (limit %u)\n",
- page_count, FFA_MAX_RXTX_PAGE_COUNT);
- return FFA_RET_INVALID_PARAMETERS;
- }
-
- /* Already mapped */
- if ( ctx->rx )
- return FFA_RET_DENIED;
-
- tx_pg = get_page_from_gfn(d, gfn_x(gaddr_to_gfn(tx_addr)), &t, P2M_ALLOC);
- if ( !tx_pg )
- return FFA_RET_INVALID_PARAMETERS;
-
- /* Only normal RW RAM for now */
- if ( t != p2m_ram_rw )
- goto err_put_tx_pg;
-
- rx_pg = get_page_from_gfn(d, gfn_x(gaddr_to_gfn(rx_addr)), &t, P2M_ALLOC);
- if ( !tx_pg )
- goto err_put_tx_pg;
-
- /* Only normal RW RAM for now */
- if ( t != p2m_ram_rw )
- goto err_put_rx_pg;
-
- tx = __map_domain_page_global(tx_pg);
- if ( !tx )
- goto err_put_rx_pg;
-
- rx = __map_domain_page_global(rx_pg);
- if ( !rx )
- goto err_unmap_tx;
-
- ctx->rx = rx;
- ctx->tx = tx;
- ctx->rx_pg = rx_pg;
- ctx->tx_pg = tx_pg;
- ctx->page_count = page_count;
- ctx->rx_is_free = true;
- return FFA_RET_OK;
-
-err_unmap_tx:
- unmap_domain_page_global(tx);
-err_put_rx_pg:
- put_page(rx_pg);
-err_put_tx_pg:
- put_page(tx_pg);
-
- return ret;
-}
-
-static void rxtx_unmap(struct ffa_ctx *ctx)
-{
- unmap_domain_page_global(ctx->rx);
- unmap_domain_page_global(ctx->tx);
- put_page(ctx->rx_pg);
- put_page(ctx->tx_pg);
- ctx->rx = NULL;
- ctx->tx = NULL;
- ctx->rx_pg = NULL;
- ctx->tx_pg = NULL;
- ctx->page_count = 0;
- ctx->rx_is_free = false;
-}
-
-static uint32_t ffa_handle_rxtx_unmap(void)
-{
- struct domain *d = current->domain;
- struct ffa_ctx *ctx = d->arch.tee;
-
- if ( !ctx->rx )
- return FFA_RET_INVALID_PARAMETERS;
-
- rxtx_unmap(ctx);
-
- return FFA_RET_OK;
-}
-
-static int32_t ffa_handle_rx_release(void)
-{
- int32_t ret = FFA_RET_DENIED;
- struct domain *d = current->domain;
- struct ffa_ctx *ctx = d->arch.tee;
-
- if ( !spin_trylock(&ctx->rx_lock) )
- return FFA_RET_BUSY;
-
- if ( !ctx->page_count || ctx->rx_is_free )
- goto out;
- ret = FFA_RET_OK;
- ctx->rx_is_free = true;
-out:
- spin_unlock(&ctx->rx_lock);
-
- return ret;
-}
-
static void handle_msg_send_direct_req(struct cpu_user_regs *regs, uint32_t fid)
{
struct arm_smccc_1_2_regs arg = { .a0 = fid, };
if ( !ctx )
return 0;
- if ( ctx->rx )
- rxtx_unmap(ctx);
+ ffa_rxtx_domain_destroy(d);
ffa_domain_teardown_continue(ctx, true /* first_time */);
static bool ffa_probe(void)
{
uint32_t vers;
- int e;
unsigned int major_vers;
unsigned int minor_vers;
!check_mandatory_feature(FFA_MSG_SEND_DIRECT_REQ_32) )
return false;
- ffa_rx = alloc_xenheap_pages(get_order_from_pages(FFA_RXTX_PAGE_COUNT), 0);
- if ( !ffa_rx )
+ if ( !ffa_rxtx_init() )
return false;
- ffa_tx = alloc_xenheap_pages(get_order_from_pages(FFA_RXTX_PAGE_COUNT), 0);
- if ( !ffa_tx )
- goto err_free_ffa_rx;
-
- e = ffa_rxtx_map(__pa(ffa_tx), __pa(ffa_rx), FFA_RXTX_PAGE_COUNT);
- if ( e )
- {
- printk(XENLOG_ERR "ffa: Failed to map rxtx: error %d\n", e);
- goto err_free_ffa_tx;
- }
ffa_version = vers;
if ( !ffa_partinfo_init() )
- goto err_free_ffa_tx;
+ goto err_rxtx_destroy;
INIT_LIST_HEAD(&ffa_teardown_head);
init_timer(&ffa_teardown_timer, ffa_teardown_timer_callback, NULL, 0);
return true;
-err_free_ffa_tx:
- free_xenheap_pages(ffa_tx, 0);
- ffa_tx = NULL;
-err_free_ffa_rx:
- free_xenheap_pages(ffa_rx, 0);
- ffa_rx = NULL;
+err_rxtx_destroy:
+ ffa_rxtx_destroy();
ffa_version = 0;
return false;
uint32_t w4, uint32_t w5, uint32_t *count,
uint32_t *fpi_size);
+bool ffa_rxtx_init(void);
+void ffa_rxtx_destroy(void);
+void ffa_rxtx_domain_destroy(struct domain *d);
+uint32_t ffa_handle_rxtx_map(uint32_t fid, register_t tx_addr,
+ register_t rx_addr, uint32_t page_count);
+uint32_t ffa_handle_rxtx_unmap(void);
+int32_t ffa_handle_rx_release(void);
static inline uint16_t ffa_get_vm_id(const struct domain *d)
{
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024 Linaro Limited
+ */
+
+#include <xen/const.h>
+#include <xen/domain_page.h>
+#include <xen/mm.h>
+#include <xen/sizes.h>
+#include <xen/types.h>
+
+#include <asm/smccc.h>
+#include <asm/regs.h>
+
+#include "ffa_private.h"
+
+/* Endpoint RX/TX descriptor defined in FF-A-1.0-REL */
+struct ffa_endpoint_rxtx_descriptor_1_0 {
+ uint16_t sender_id;
+ uint16_t reserved;
+ uint32_t rx_range_count;
+ uint32_t tx_range_count;
+};
+
+/* Endpoint RX/TX descriptor defined in FF-A-1.1-REL0 */
+struct ffa_endpoint_rxtx_descriptor_1_1 {
+ uint16_t sender_id;
+ uint16_t reserved;
+ uint32_t rx_region_offs;
+ uint32_t tx_region_offs;
+};
+
+uint32_t ffa_handle_rxtx_map(uint32_t fid, register_t tx_addr,
+ register_t rx_addr, uint32_t page_count)
+{
+ uint32_t ret = FFA_RET_INVALID_PARAMETERS;
+ struct domain *d = current->domain;
+ struct ffa_ctx *ctx = d->arch.tee;
+ struct page_info *tx_pg;
+ struct page_info *rx_pg;
+ p2m_type_t t;
+ void *rx;
+ void *tx;
+
+ if ( !smccc_is_conv_64(fid) )
+ {
+ /*
+ * Calls using the 32-bit calling convention must ignore the upper
+ * 32 bits in the argument registers.
+ */
+ tx_addr &= UINT32_MAX;
+ rx_addr &= UINT32_MAX;
+ }
+
+ if ( page_count > FFA_MAX_RXTX_PAGE_COUNT )
+ {
+ printk(XENLOG_ERR "ffa: RXTX_MAP: error: %u pages requested (limit %u)\n",
+ page_count, FFA_MAX_RXTX_PAGE_COUNT);
+ return FFA_RET_INVALID_PARAMETERS;
+ }
+
+ /* Already mapped */
+ if ( ctx->rx )
+ return FFA_RET_DENIED;
+
+ tx_pg = get_page_from_gfn(d, gfn_x(gaddr_to_gfn(tx_addr)), &t, P2M_ALLOC);
+ if ( !tx_pg )
+ return FFA_RET_INVALID_PARAMETERS;
+
+ /* Only normal RW RAM for now */
+ if ( t != p2m_ram_rw )
+ goto err_put_tx_pg;
+
+ rx_pg = get_page_from_gfn(d, gfn_x(gaddr_to_gfn(rx_addr)), &t, P2M_ALLOC);
+ if ( !tx_pg )
+ goto err_put_tx_pg;
+
+ /* Only normal RW RAM for now */
+ if ( t != p2m_ram_rw )
+ goto err_put_rx_pg;
+
+ tx = __map_domain_page_global(tx_pg);
+ if ( !tx )
+ goto err_put_rx_pg;
+
+ rx = __map_domain_page_global(rx_pg);
+ if ( !rx )
+ goto err_unmap_tx;
+
+ ctx->rx = rx;
+ ctx->tx = tx;
+ ctx->rx_pg = rx_pg;
+ ctx->tx_pg = tx_pg;
+ ctx->page_count = page_count;
+ ctx->rx_is_free = true;
+ return FFA_RET_OK;
+
+err_unmap_tx:
+ unmap_domain_page_global(tx);
+err_put_rx_pg:
+ put_page(rx_pg);
+err_put_tx_pg:
+ put_page(tx_pg);
+
+ return ret;
+}
+
+static void rxtx_unmap(struct ffa_ctx *ctx)
+{
+ unmap_domain_page_global(ctx->rx);
+ unmap_domain_page_global(ctx->tx);
+ put_page(ctx->rx_pg);
+ put_page(ctx->tx_pg);
+ ctx->rx = NULL;
+ ctx->tx = NULL;
+ ctx->rx_pg = NULL;
+ ctx->tx_pg = NULL;
+ ctx->page_count = 0;
+ ctx->rx_is_free = false;
+}
+
+uint32_t ffa_handle_rxtx_unmap(void)
+{
+ struct domain *d = current->domain;
+ struct ffa_ctx *ctx = d->arch.tee;
+
+ if ( !ctx->rx )
+ return FFA_RET_INVALID_PARAMETERS;
+
+ rxtx_unmap(ctx);
+
+ return FFA_RET_OK;
+}
+
+int32_t ffa_handle_rx_release(void)
+{
+ int32_t ret = FFA_RET_DENIED;
+ struct domain *d = current->domain;
+ struct ffa_ctx *ctx = d->arch.tee;
+
+ if ( !spin_trylock(&ctx->rx_lock) )
+ return FFA_RET_BUSY;
+
+ if ( !ctx->page_count || ctx->rx_is_free )
+ goto out;
+ ret = FFA_RET_OK;
+ ctx->rx_is_free = true;
+out:
+ spin_unlock(&ctx->rx_lock);
+
+ return ret;
+}
+
+static int32_t ffa_rxtx_map(paddr_t tx_addr, paddr_t rx_addr,
+ uint32_t page_count)
+{
+ return ffa_simple_call(FFA_RXTX_MAP_64, tx_addr, rx_addr, page_count, 0);
+}
+
+static int32_t ffa_rxtx_unmap(void)
+{
+ return ffa_simple_call(FFA_RXTX_UNMAP, 0, 0, 0, 0);
+}
+
+void ffa_rxtx_domain_destroy(struct domain *d)
+{
+ struct ffa_ctx *ctx = d->arch.tee;
+
+ if ( ctx->rx )
+ rxtx_unmap(ctx);
+}
+
+void ffa_rxtx_destroy(void)
+{
+ bool need_unmap = ffa_tx && ffa_rx;
+
+ if ( ffa_tx )
+ {
+ free_xenheap_pages(ffa_tx, 0);
+ ffa_tx = NULL;
+ }
+ if ( ffa_rx )
+ {
+ free_xenheap_pages(ffa_rx, 0);
+ ffa_rx = NULL;
+ }
+
+ if ( need_unmap )
+ ffa_rxtx_unmap();
+}
+
+bool ffa_rxtx_init(void)
+{
+ int e;
+
+ ffa_rx = alloc_xenheap_pages(get_order_from_pages(FFA_RXTX_PAGE_COUNT), 0);
+ if ( !ffa_rx )
+ return false;
+
+ ffa_tx = alloc_xenheap_pages(get_order_from_pages(FFA_RXTX_PAGE_COUNT), 0);
+ if ( !ffa_tx )
+ goto err;
+
+ e = ffa_rxtx_map(__pa(ffa_tx), __pa(ffa_rx), FFA_RXTX_PAGE_COUNT);
+ if ( e )
+ {
+ printk(XENLOG_ERR "ffa: Failed to map rxtx: error %d\n", e);
+ goto err;
+ }
+ return true;
+
+err:
+ ffa_rxtx_destroy();
+
+ return false;
+}