代码拉取完成,页面将自动刷新
同步操作将从 src-openEuler/qemu 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
From d33cc7eccb68c6a1488804c94ff5c1197ee0fc6e Mon Sep 17 00:00:00 2001
From: Eric Auger <eric.auger@redhat.com>
Date: Tue, 5 Mar 2019 16:35:32 +0100
Subject: [PATCH] vfio/pci: Implement the DMA fault handler
Whenever the eventfd is triggered, we retrieve the DMA fault(s)
from the mmapped fault region and inject them in the iommu
memory region.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com>
---
hw/vfio/pci.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++
hw/vfio/pci.h | 1 +
2 files changed, 51 insertions(+)
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index 76bc9d3506..c54e62fe8f 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -2953,10 +2953,60 @@ static PCIPASIDOps vfio_pci_pasid_ops = {
static void vfio_dma_fault_notifier_handler(void *opaque)
{
VFIOPCIExtIRQ *ext_irq = opaque;
+ VFIOPCIDevice *vdev = ext_irq->vdev;
+ PCIDevice *pdev = &vdev->pdev;
+ AddressSpace *as = pci_device_iommu_address_space(pdev);
+ IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(as->root);
+ struct vfio_region_dma_fault header;
+ struct iommu_fault *queue;
+ char *queue_buffer = NULL;
+ ssize_t bytes;
if (!event_notifier_test_and_clear(&ext_irq->notifier)) {
return;
}
+
+ bytes = pread(vdev->vbasedev.fd, &header, sizeof(header),
+ vdev->dma_fault_region.fd_offset);
+ if (bytes != sizeof(header)) {
+ error_report("%s unable to read the fault region header (0x%lx)",
+ __func__, bytes);
+ return;
+ }
+
+ /* Normally the fault queue is mmapped */
+ queue = (struct iommu_fault *)vdev->dma_fault_region.mmaps[0].mmap;
+ if (!queue) {
+ size_t queue_size = header.nb_entries * header.entry_size;
+
+ error_report("%s: fault queue not mmapped: slower fault handling",
+ vdev->vbasedev.name);
+
+ queue_buffer = g_malloc(queue_size);
+ bytes = pread(vdev->vbasedev.fd, queue_buffer, queue_size,
+ vdev->dma_fault_region.fd_offset + header.offset);
+ if (bytes != queue_size) {
+ error_report("%s unable to read the fault queue (0x%lx)",
+ __func__, bytes);
+ return;
+ }
+
+ queue = (struct iommu_fault *)queue_buffer;
+ }
+
+ while (vdev->fault_tail_index != header.head) {
+ memory_region_inject_faults(iommu_mr, 1,
+ &queue[vdev->fault_tail_index]);
+ vdev->fault_tail_index =
+ (vdev->fault_tail_index + 1) % header.nb_entries;
+ }
+ bytes = pwrite(vdev->vbasedev.fd, &vdev->fault_tail_index, 4,
+ vdev->dma_fault_region.fd_offset);
+ if (bytes != 4) {
+ error_report("%s unable to write the fault region tail index (0x%lx)",
+ __func__, bytes);
+ }
+ g_free(queue_buffer);
}
static int vfio_register_ext_irq_handler(VFIOPCIDevice *vdev,
diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h
index eef91065f1..03ac8919ef 100644
--- a/hw/vfio/pci.h
+++ b/hw/vfio/pci.h
@@ -146,6 +146,7 @@ struct VFIOPCIDevice {
EventNotifier req_notifier;
VFIOPCIExtIRQ *ext_irqs;
VFIORegion dma_fault_region;
+ uint32_t fault_tail_index;
int (*resetfn)(struct VFIOPCIDevice *);
uint32_t vendor_id;
uint32_t device_id;
--
2.27.0
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。