struct {
MultiFDSendParams *params;
- /* array of pages to sent */
- MultiFDPages_t *pages;
+ MultiFDSendData *data;
/*
* Global number of generated multifd packets.
*
return sizeof(MultiFDPages_t) + n * sizeof(ram_addr_t);
}
+static MultiFDSendData *multifd_send_data_alloc(void)
+{
+ size_t max_payload_size, size_minus_payload;
+
+ /*
+ * MultiFDPages_t has a flexible array at the end, account for it
+ * when allocating MultiFDSendData. Use max() in case other types
+ * added to the union in the future are larger than
+ * (MultiFDPages_t + flex array).
+ */
+ max_payload_size = MAX(multifd_ram_payload_size(), sizeof(MultiFDPayload));
+
+ /*
+ * Account for any holes the compiler might insert. We can't pack
+ * the structure because that misaligns the members and triggers
+ * Waddress-of-packed-member.
+ */
+ size_minus_payload = sizeof(MultiFDSendData) - sizeof(MultiFDPayload);
+
+ return g_malloc0(size_minus_payload + max_payload_size);
+}
+
static bool multifd_use_packets(void)
{
return !migrate_mapped_ram();
static void multifd_set_file_bitmap(MultiFDSendParams *p)
{
- MultiFDPages_t *pages = p->pages;
+ MultiFDPages_t *pages = &p->data->u.ram;
assert(pages->block);
static void multifd_send_prepare_iovs(MultiFDSendParams *p)
{
- MultiFDPages_t *pages = p->pages;
+ MultiFDPages_t *pages = &p->data->u.ram;
uint32_t page_size = multifd_ram_page_size();
for (int i = 0; i < pages->normal_num; i++) {
return msg.id;
}
-static MultiFDPages_t *multifd_pages_init(uint32_t n)
-{
- return g_malloc0(multifd_ram_payload_size());
-}
-
-static void multifd_pages_clear(MultiFDPages_t *pages)
-{
- multifd_pages_reset(pages);
- g_free(pages);
-}
-
void multifd_send_fill_packet(MultiFDSendParams *p)
{
MultiFDPacket_t *packet = p->packet;
- MultiFDPages_t *pages = p->pages;
+ MultiFDPages_t *pages = &p->data->u.ram;
uint64_t packet_num;
uint32_t zero_num = pages->num - pages->normal_num;
int i;
int i;
static int next_channel;
MultiFDSendParams *p = NULL; /* make happy gcc */
- MultiFDPages_t *pages = multifd_send_state->pages;
+ MultiFDSendData *tmp;
if (multifd_send_should_exit()) {
return false;
* qatomic_store_release() in multifd_send_thread().
*/
smp_mb_acquire();
- assert(!p->pages->num);
- multifd_send_state->pages = p->pages;
- p->pages = pages;
+
+ assert(!p->data->u.ram.num);
+
+ tmp = multifd_send_state->data;
+ multifd_send_state->data = p->data;
+ p->data = tmp;
/*
- * Making sure p->pages is setup before marking pending_job=true. Pairs
+ * Making sure p->data is setup before marking pending_job=true. Pairs
* with the qatomic_load_acquire() in multifd_send_thread().
*/
qatomic_store_release(&p->pending_job, true);
MultiFDPages_t *pages;
retry:
- pages = multifd_send_state->pages;
+ pages = &multifd_send_state->data->u.ram;
/* If the queue is empty, we can already enqueue now */
if (multifd_queue_empty(pages)) {
qemu_sem_destroy(&p->sem_sync);
g_free(p->name);
p->name = NULL;
- multifd_pages_clear(p->pages);
- p->pages = NULL;
+ g_free(p->data);
+ p->data = NULL;
p->packet_len = 0;
g_free(p->packet);
p->packet = NULL;
qemu_sem_destroy(&multifd_send_state->channels_ready);
g_free(multifd_send_state->params);
multifd_send_state->params = NULL;
- multifd_pages_clear(multifd_send_state->pages);
- multifd_send_state->pages = NULL;
+ g_free(multifd_send_state->data);
+ multifd_send_state->data = NULL;
g_free(multifd_send_state);
multifd_send_state = NULL;
}
{
int i;
bool flush_zero_copy;
+ MultiFDPages_t *pages;
if (!migrate_multifd()) {
return 0;
}
- if (multifd_send_state->pages->num) {
+ pages = &multifd_send_state->data->u.ram;
+ if (pages->num) {
if (!multifd_send_pages()) {
error_report("%s: multifd_send_pages fail", __func__);
return -1;
}
/*
- * Read pending_job flag before p->pages. Pairs with the
+ * Read pending_job flag before p->data. Pairs with the
* qatomic_store_release() in multifd_send_pages().
*/
if (qatomic_load_acquire(&p->pending_job)) {
- MultiFDPages_t *pages = p->pages;
+ MultiFDPages_t *pages = &p->data->u.ram;
p->iovs_num = 0;
assert(pages->num);
if (migrate_mapped_ram()) {
ret = file_write_ramblock_iov(p->c, p->iov, p->iovs_num,
- pages, &local_err);
+ &p->data->u.ram, &local_err);
} else {
ret = qio_channel_writev_full_all(p->c, p->iov, p->iovs_num,
NULL, 0, p->write_flags,
p->next_packet_size = 0;
/*
- * Making sure p->pages is published before saying "we're
+ * Making sure p->data is published before saying "we're
* free". Pairs with the smp_mb_acquire() in
* multifd_send_pages().
*/
thread_count = migrate_multifd_channels();
multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
- multifd_send_state->pages = multifd_pages_init(page_count);
+ multifd_send_state->data = multifd_send_data_alloc();
qemu_sem_init(&multifd_send_state->channels_created, 0);
qemu_sem_init(&multifd_send_state->channels_ready, 0);
qatomic_set(&multifd_send_state->exiting, 0);
qemu_sem_init(&p->sem, 0);
qemu_sem_init(&p->sem_sync, 0);
p->id = i;
- p->pages = multifd_pages_init(page_count);
+ p->data = multifd_send_data_alloc();
if (use_packets) {
p->packet_len = sizeof(MultiFDPacket_t)
bool multifd_send_prepare_common(MultiFDSendParams *p)
{
- MultiFDPages_t *pages = p->pages;
+ MultiFDPages_t *pages = &p->data->u.ram;
multifd_send_zero_page_detect(p);
if (!pages->normal_num) {