mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
- pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
+ pending_reqs = kzalloc(sizeof(pending_reqs[0]) *
blkif_reqs, GFP_KERNEL);
pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
mmap_pages, GFP_KERNEL);
blkif_interface_init();
- memset(pending_reqs, 0, sizeof(pending_reqs));
INIT_LIST_HEAD(&pending_free);
for (i = 0; i < blkif_reqs; i++)
alloc_len = sizeof(struct scsi_lun) * alloc_luns
+ VSCSI_REPORT_LUNS_HEADER;
retry:
- if ((buff = kmalloc(alloc_len, GFP_KERNEL)) == NULL) {
+ if ((buff = kzalloc(alloc_len, GFP_KERNEL)) == NULL) {
printk(KERN_ERR "scsiback:%s kmalloc err\n", __FUNCTION__);
goto fail;
}
- memset(buff, 0, alloc_len);
-
one_lun = (struct scsi_lun *) &buff[8];
spin_lock_irqsave(&info->v2p_lock, flags);
list_for_each_entry(entry, head, l) {
mmap_pages = vscsiif_reqs * VSCSIIF_SG_TABLESIZE;
- pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
+ pending_reqs = kzalloc(sizeof(pending_reqs[0]) *
vscsiif_reqs, GFP_KERNEL);
pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
mmap_pages, GFP_KERNEL);
if (scsiback_interface_init() < 0)
goto out_of_kmem;
- memset(pending_reqs, 0, sizeof(pending_reqs));
INIT_LIST_HEAD(&pending_free);
for (i = 0; i < vscsiif_reqs; i++)
return -ENODEV;
mmap_pages = usbif_reqs * USBIF_MAX_SEGMENTS_PER_REQUEST;
- pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
+ pending_reqs = kzalloc(sizeof(pending_reqs[0]) *
usbif_reqs, GFP_KERNEL);
pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
mmap_pages, GFP_KERNEL);
for (i = 0; i < mmap_pages; i++)
pending_grant_handles[i] = USBBACK_INVALID_HANDLE;
- memset(pending_reqs, 0, sizeof(pending_reqs));
INIT_LIST_HEAD(&pending_free);
for (i = 0; i < usbif_reqs; i++)