The kernel supports since v5.2 direct mapped DMA buffers to userspace.
Up to this point a bounce buffer was involved. Because the buffers are
now directly accessed by the device, the rules of alignment also apply
for the payloads.
Introduce a helper to allocate all correctly buffers aligned.
Furthermore, ensure that the buffer is a multiple of 4k, because there
are devices on the market which will always transfer a multiple of 4k,
even if we ask for less, e.g 512 bytes. This avoid stack smashes.
Signed-off-by: Daniel Wagner <dwagner@suse.de>
static void *mmap_registers(nvme_root_t r, struct nvme_dev *dev);
-static void *__nvme_alloc(size_t len, bool *huge)
+#define ROUND_UP(N, S) ((((N) + (S) - 1) / (S)) * (S))
+
+static void *nvme_alloc(size_t len)
+{
+ size_t _len = ROUND_UP(len, 0x1000);
+ void *p;
+
+ if (posix_memalign((void *)&p, getpagesize(), _len))
+ return NULL;
+
+ memset(p, 0, _len);
+ return p;
+}
+
static void *__nvme_alloc_huge(size_t len, bool *huge)
{
void *p;