driver-know-hows

device driver related stuff

View on GitHub

Chapter 7: Direct Memory Access (DMA)

Table of Contents

  1. Introduction to DMA
  2. DMA Addressing and Coherency
  3. DMA Allocation APIs
  4. DMA Mapping APIs
  5. Scatter-Gather DMA
  6. DMA Pools
  7. Streaming vs Coherent DMA
  8. DMA Constraints and Masks

Introduction to DMA

What is DMA?

Direct Memory Access (DMA) allows hardware devices to transfer data directly to/from memory without CPU involvement:

Without DMA (PIO - Programmed I/O):

Device → CPU reads → CPU writes → Memory
(CPU is busy copying data)

With DMA:

Device ←→ Memory (directly)
(CPU can do other work)

Benefits of DMA

DMA Challenges

/*
 * Challenges when using DMA:
 * 
 * 1. Address Translation
 *    - CPU uses virtual addresses
 *    - DMA hardware uses physical/bus addresses
 *    - Must convert between them
 * 
 * 2. Cache Coherency
 *    - CPU caches may have stale data
 *    - DMA writes bypass CPU cache
 *    - Must synchronize cache and memory
 * 
 * 3. Address Limitations
 *    - Some devices can only address limited memory
 *    - Example: 32-bit device on 64-bit system
 *    - Must ensure DMA buffer in addressable range
 */

DMA Addressing and Coherency

Address Types

/*
 * Three types of addresses in DMA:
 * 
 * 1. Virtual Address (kernel space)
 *    - What CPU uses in kernel
 *    - Example: pointer from kmalloc()
 * 
 * 2. Physical Address
 *    - Actual RAM address
 *    - Use: virt_to_phys() / phys_to_virt()
 * 
 * 3. Bus Address (DMA Address)
 *    - What device sees on bus
 *    - May differ from physical (with IOMMU)
 *    - Use: dma_map_* functions
 */

void *virt_addr;          /* Virtual address (CPU) */
unsigned long phys_addr;  /* Physical address (RAM) */
dma_addr_t dma_addr;      /* Bus address (device) */

/* Conversions */
phys_addr = virt_to_phys(virt_addr);      /* Virtual → Physical */
virt_addr = phys_to_virt(phys_addr);      /* Physical → Virtual */
dma_addr = dma_map_single(dev, virt_addr, size, dir);  /* Virtual → Bus */

Cache Coherency

/*
 * Cache coherency issues:
 * 
 * Scenario 1: DMA write to memory
 * 
 *   1. Device writes data to memory via DMA
 *   2. CPU reads same location
 *   3. CPU gets stale data from cache! (BUG)
 * 
 * Solution: Invalidate cache before DMA write
 * 
 * Scenario 2: DMA read from memory
 * 
 *   1. CPU writes data
 *   2. Data sits in CPU cache (not written to RAM yet)
 *   3. DMA reads stale data from RAM! (BUG)
 * 
 * Solution: Flush cache before DMA read
 * 
 * The DMA API handles this automatically!
 */

DMA Allocation APIs

Coherent DMA Memory

#include <linux/dma-mapping.h>

/*
 * dma_alloc_coherent - Allocate DMA-coherent memory
 * 
 * @dev:        Device structure
 * @size:       Size in bytes
 * @dma_handle: Output - DMA address for device
 * @gfp:        GFP allocation flags
 * 
 * Return: Virtual address, or NULL on failure
 * 
 * Properties:
 * - Memory is cache-coherent (no manual sync needed)
 * - CPU and device see same data
 * - May disable caching (slower CPU access)
 * - Good for: Control structures, descriptors, shared buffers
 */
void *dma_alloc_coherent(struct device *dev, size_t size,
                        dma_addr_t *dma_handle, gfp_t gfp);

/*
 * dma_free_coherent - Free coherent DMA memory
 * 
 * @dev:        Device structure
 * @size:       Size (must match allocation)
 * @cpu_addr:   Virtual address from dma_alloc_coherent
 * @dma_handle: DMA address from dma_alloc_coherent
 */
void dma_free_coherent(struct device *dev, size_t size,
                      void *cpu_addr, dma_addr_t dma_handle);

/*
 * Example: Allocating DMA buffer
 */
struct dma_buffer {
    void *virt_addr;      /* CPU virtual address */
    dma_addr_t dma_addr;  /* Device DMA address */
    size_t size;
};

static int alloc_dma_buffer(struct device *dev, struct dma_buffer *buf,
                           size_t size)
{
    /* Allocate coherent DMA memory */
    buf->virt_addr = dma_alloc_coherent(dev, size, &buf->dma_addr,
                                       GFP_KERNEL);
    if (!buf->virt_addr) {
        dev_err(dev, "Failed to allocate DMA buffer\n");
        return -ENOMEM;
    }
    
    buf->size = size;
    
    dev_info(dev, "DMA buffer: virt=%p, dma=%pad, size=%zu\n",
             buf->virt_addr, &buf->dma_addr, buf->size);
    
    return 0;
}

static void free_dma_buffer(struct device *dev, struct dma_buffer *buf)
{
    if (buf->virt_addr) {
        dma_free_coherent(dev, buf->size, buf->virt_addr, buf->dma_addr);
        buf->virt_addr = NULL;
    }
}

/*
 * Example: DMA ring buffer for device
 */
struct dma_ring {
    void *desc;           /* Descriptor ring (CPU) */
    dma_addr_t desc_dma;  /* Descriptor ring (device) */
    void **buffers;       /* Data buffers */
    dma_addr_t *buf_dma;  /* Buffer DMA addresses */
    unsigned int size;    /* Ring size */
    unsigned int head;    /* Producer index */
    unsigned int tail;    /* Consumer index */
};

static int setup_dma_ring(struct device *dev, struct dma_ring *ring,
                         unsigned int num_desc, size_t buf_size)
{
    int i;
    
    ring->size = num_desc;
    ring->head = ring->tail = 0;
    
    /* Allocate descriptor ring */
    ring->desc = dma_alloc_coherent(dev,
                                    num_desc * sizeof(struct hw_desc),
                                    &ring->desc_dma,
                                    GFP_KERNEL);
    if (!ring->desc)
        return -ENOMEM;
    
    /* Allocate buffer arrays */
    ring->buffers = kcalloc(num_desc, sizeof(void *), GFP_KERNEL);
    ring->buf_dma = kcalloc(num_desc, sizeof(dma_addr_t), GFP_KERNEL);
    
    if (!ring->buffers || !ring->buf_dma)
        goto err_arrays;
    
    /* Allocate individual buffers */
    for (i = 0; i < num_desc; i++) {
        ring->buffers[i] = dma_alloc_coherent(dev, buf_size,
                                             &ring->buf_dma[i],
                                             GFP_KERNEL);
        if (!ring->buffers[i])
            goto err_buffers;
    }
    
    dev_info(dev, "DMA ring created: %u descriptors, %zu bytes each\n",
             num_desc, buf_size);
    
    return 0;

err_buffers:
    while (--i >= 0)
        dma_free_coherent(dev, buf_size, ring->buffers[i], ring->buf_dma[i]);
    kfree(ring->buf_dma);
    kfree(ring->buffers);
err_arrays:
    dma_free_coherent(dev, num_desc * sizeof(struct hw_desc),
                     ring->desc, ring->desc_dma);
    return -ENOMEM;
}

DMA Mapping APIs

Streaming DMA Mappings

/*
 * Streaming DMA - For one-time transfers
 * 
 * More efficient than coherent (cache-friendly)
 * Requires explicit synchronization
 * Good for: Large data transfers
 */

/*
 * dma_map_single - Map single buffer for DMA
 * 
 * @dev:  Device structure
 * @ptr:  Virtual address of buffer
 * @size: Size in bytes
 * @dir:  DMA direction
 * 
 * Return: DMA address, or error (check with dma_mapping_error)
 */
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
                         enum dma_data_direction dir);

/*
 * dma_unmap_single - Unmap single buffer
 * 
 * Must be called after DMA completes
 */
void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
                     size_t size, enum dma_data_direction dir);

/*
 * DMA directions:
 * 
 * DMA_TO_DEVICE     - Data is sent to device (CPU writes, device reads)
 * DMA_FROM_DEVICE   - Data received from device (device writes, CPU reads)
 * DMA_BIDIRECTIONAL - Data can flow both ways
 * DMA_NONE          - No DMA transfer (debugging only)
 */

/*
 * dma_mapping_error - Check if mapping failed
 * 
 * Return: Non-zero if error
 */
int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);

/*
 * Synchronization functions (for partial sync)
 */

/* Sync before device reads (flush CPU cache) */
void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
                               size_t size, enum dma_data_direction dir);

/* Sync before CPU reads (invalidate CPU cache) */
void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
                            size_t size, enum dma_data_direction dir);

Streaming DMA Examples

/*
 * Example 1: Simple DMA transfer
 */
static int simple_dma_transfer(struct device *dev, void *buffer, size_t size)
{
    dma_addr_t dma_addr;
    int ret;
    
    /* Map buffer for DMA */
    dma_addr = dma_map_single(dev, buffer, size, DMA_TO_DEVICE);
    if (dma_mapping_error(dev, dma_addr)) {
        dev_err(dev, "DMA mapping failed\n");
        return -ENOMEM;
    }
    
    dev_dbg(dev, "DMA mapped: virt=%p, dma=%pad, size=%zu\n",
            buffer, &dma_addr, size);
    
    /* Program device to perform DMA */
    ret = start_device_dma(dev, dma_addr, size);
    if (ret < 0) {
        dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
        return ret;
    }
    
    /* Wait for DMA completion (in real code, use interrupt) */
    wait_for_dma_complete(dev);
    
    /* Unmap after DMA completes */
    dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
    
    return 0;
}

/*
 * Example 2: Bidirectional DMA with synchronization
 */
static int bidirectional_dma(struct device *dev)
{
    void *buffer;
    dma_addr_t dma_addr;
    size_t size = 4096;
    
    /* Allocate buffer */
    buffer = kmalloc(size, GFP_KERNEL);
    if (!buffer)
        return -ENOMEM;
    
    /* Prepare data */
    memset(buffer, 0xAA, size);
    
    /* Map for bidirectional DMA */
    dma_addr = dma_map_single(dev, buffer, size, DMA_BIDIRECTIONAL);
    if (dma_mapping_error(dev, dma_addr)) {
        kfree(buffer);
        return -ENOMEM;
    }
    
    /*
     * Phase 1: Send data to device
     */
    
    /* Ensure CPU writes are visible to device */
    dma_sync_single_for_device(dev, dma_addr, size, DMA_TO_DEVICE);
    
    /* Device reads data */
    device_read_from_memory(dev, dma_addr, size);
    
    /*
     * Phase 2: Receive data from device
     */
    
    /* Device writes data */
    device_write_to_memory(dev, dma_addr, size);
    
    /* Ensure device writes are visible to CPU */
    dma_sync_single_for_cpu(dev, dma_addr, size, DMA_FROM_DEVICE);
    
    /* CPU can now safely read buffer */
    print_buffer_contents(buffer, size);
    
    /* Unmap and free */
    dma_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL);
    kfree(buffer);
    
    return 0;
}

/*
 * Example 3: Receive buffer with interrupt
 */
struct rx_buffer {
    void *data;
    dma_addr_t dma_addr;
    size_t size;
    struct completion done;
};

static irqreturn_t dma_rx_interrupt(int irq, void *dev_id)
{
    struct rx_buffer *rx = dev_id;
    
    /* DMA complete - signal completion */
    complete(&rx->done);
    
    return IRQ_HANDLED;
}

static int receive_via_dma(struct device *dev, void *buffer, size_t size)
{
    struct rx_buffer rx;
    unsigned long timeout;
    int ret;
    
    rx.data = buffer;
    rx.size = size;
    init_completion(&rx.done);
    
    /* Map for device-to-CPU transfer */
    rx.dma_addr = dma_map_single(dev, buffer, size, DMA_FROM_DEVICE);
    if (dma_mapping_error(dev, rx.dma_addr))
        return -ENOMEM;
    
    /* Start DMA receive */
    ret = device_start_rx_dma(dev, rx.dma_addr, size);
    if (ret < 0)
        goto err_unmap;
    
    /* Wait for completion (up to 5 seconds) */
    timeout = wait_for_completion_timeout(&rx.done, msecs_to_jiffies(5000));
    if (timeout == 0) {
        dev_err(dev, "DMA receive timeout\n");
        ret = -ETIMEDOUT;
        goto err_unmap;
    }
    
    /* Sync for CPU access */
    dma_sync_single_for_cpu(dev, rx.dma_addr, size, DMA_FROM_DEVICE);
    
    /* Process received data */
    dev_info(dev, "Received %zu bytes via DMA\n", size);
    
    ret = 0;

err_unmap:
    dma_unmap_single(dev, rx.dma_addr, size, DMA_FROM_DEVICE);
    return ret;
}

Scatter-Gather DMA

Theory: Scatter-Gather

Scatter-Gather allows DMA of non-contiguous memory regions in a single operation:

#include <linux/scatterlist.h>

/*
 * struct scatterlist - Scatter-gather entry
 */
struct scatterlist {
    unsigned long page_link;  /* Page pointer */
    unsigned int offset;      /* Offset in page */
    unsigned int length;      /* Length in bytes */
    dma_addr_t dma_address;   /* DMA address */
    unsigned int dma_length;  /* DMA length */
};

/*
 * Scatter-gather list initialization
 */

/* Initialize sg list */
void sg_init_table(struct scatterlist *sgl, unsigned int nents);

/* Initialize single entry */
void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen);

/* Set entry */
void sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen);
void sg_set_page(struct scatterlist *sg, struct page *page,
                unsigned int len, unsigned int offset);

/*
 * Mark end of list
 */
void sg_mark_end(struct scatterlist *sg);

/*
 * Iterate over sg list
 */
#define for_each_sg(sglist, sg, nr, __i)

/*
 * DMA mapping for scatter-gather
 */

/* Map sg list */
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
              enum dma_data_direction dir);

/* Unmap sg list */
void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
                 enum dma_data_direction dir);

/*
 * Synchronization
 */
void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
                        int nents, enum dma_data_direction dir);

void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
                           int nents, enum dma_data_direction dir);

Scatter-Gather Examples

/*
 * Example 1: Simple scatter-gather
 */
static int simple_sg_dma(struct device *dev)
{
    struct scatterlist sg[3];
    void *buf1, *buf2, *buf3;
    int nents, i;
    
    /* Allocate buffers */
    buf1 = kmalloc(1024, GFP_KERNEL);
    buf2 = kmalloc(2048, GFP_KERNEL);
    buf3 = kmalloc(4096, GFP_KERNEL);
    
    if (!buf1 || !buf2 || !buf3)
        goto err_alloc;
    
    /* Initialize scatter-gather table */
    sg_init_table(sg, 3);
    sg_set_buf(&sg[0], buf1, 1024);
    sg_set_buf(&sg[1], buf2, 2048);
    sg_set_buf(&sg[2], buf3, 4096);
    
    /* Map for DMA */
    nents = dma_map_sg(dev, sg, 3, DMA_TO_DEVICE);
    if (nents == 0) {
        dev_err(dev, "DMA mapping failed\n");
        goto err_map;
    }
    
    dev_info(dev, "Mapped %d sg entries\n", nents);
    
    /* Program device with sg list */
    for (i = 0; i < nents; i++) {
        dev_dbg(dev, "SG[%d]: dma=%pad, len=%u\n",
                i, &sg[i].dma_address, sg[i].dma_length);
        program_device_sg_entry(dev, i, sg[i].dma_address, sg[i].dma_length);
    }
    
    /* Start DMA */
    start_device_sg_dma(dev, nents);
    
    /* Wait for completion */
    wait_for_dma_complete(dev);
    
    /* Unmap */
    dma_unmap_sg(dev, sg, 3, DMA_TO_DEVICE);
    
    kfree(buf3);
    kfree(buf2);
    kfree(buf1);
    return 0;

err_map:
err_alloc:
    kfree(buf3);
    kfree(buf2);
    kfree(buf1);
    return -ENOMEM;
}

/*
 * Example 2: Page-based scatter-gather
 */
static int page_sg_dma(struct device *dev, struct page **pages, int nr_pages)
{
    struct scatterlist *sg;
    int i, nents;
    
    /* Allocate sg table */
    sg = kcalloc(nr_pages, sizeof(*sg), GFP_KERNEL);
    if (!sg)
        return -ENOMEM;
    
    /* Initialize table */
    sg_init_table(sg, nr_pages);
    
    /* Set pages */
    for (i = 0; i < nr_pages; i++)
        sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
    
    /* Map sg list */
    nents = dma_map_sg(dev, sg, nr_pages, DMA_BIDIRECTIONAL);
    if (nents == 0) {
        kfree(sg);
        return -ENOMEM;
    }
    
    /* Use mapped sg list */
    process_sg_list(dev, sg, nents);
    
    /* Unmap and free */
    dma_unmap_sg(dev, sg, nr_pages, DMA_BIDIRECTIONAL);
    kfree(sg);
    
    return 0;
}

/*
 * Example 3: Network packet scatter-gather (typical pattern)
 */
struct packet_desc {
    dma_addr_t addr;
    u32 length;
    u32 flags;
};

static int send_packet_sg(struct device *dev, struct sk_buff *skb)
{
    struct packet_desc *descs;
    struct scatterlist *sg;
    skb_frag_t *frag;
    int nr_frags;
    int i, nents;
    
    nr_frags = skb_shinfo(skb)->nr_frags;
    
    /* Allocate sg list (linear data + fragments) */
    sg = kcalloc(nr_frags + 1, sizeof(*sg), GFP_ATOMIC);
    if (!sg)
        return -ENOMEM;
    
    sg_init_table(sg, nr_frags + 1);
    
    /* First entry: linear data */
    sg_set_buf(&sg[0], skb->data, skb_headlen(skb));
    
    /* Remaining entries: fragments */
    for (i = 0; i < nr_frags; i++) {
        frag = &skb_shinfo(skb)->frags[i];
        sg_set_page(&sg[i + 1], skb_frag_page(frag),
                   skb_frag_size(frag), skb_frag_off(frag));
    }
    
    /* Map for DMA */
    nents = dma_map_sg(dev, sg, nr_frags + 1, DMA_TO_DEVICE);
    if (nents == 0) {
        kfree(sg);
        return -ENOMEM;
    }
    
    /* Build hardware descriptors */
    descs = get_tx_descriptors(dev, nents);
    for (i = 0; i < nents; i++) {
        descs[i].addr = sg_dma_address(&sg[i]);
        descs[i].length = sg_dma_len(&sg[i]);
        descs[i].flags = (i == nents - 1) ? DESC_FLAG_LAST : 0;
    }
    
    /* Start transmission */
    start_tx(dev, descs, nents);
    
    /* Save sg for unmapping in TX complete interrupt */
    save_tx_sg(dev, sg, nr_frags + 1);
    
    return 0;
}

DMA Pools

Theory: DMA Pools

DMA pools efficiently allocate many small DMA buffers:

#include <linux/dmapool.h>

/*
 * struct dma_pool - DMA memory pool
 */
struct dma_pool;

/*
 * dma_pool_create - Create DMA pool
 * 
 * @name:       Pool name
 * @dev:        Device
 * @size:       Size of each allocation
 * @align:      Alignment requirement
 * @boundary:   Boundary that allocations must not cross (0 for none)
 * 
 * Return: Pool pointer, or NULL on failure
 */
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
                                size_t size, size_t align,
                                size_t boundary);

/*
 * dma_pool_destroy - Destroy DMA pool
 */
void dma_pool_destroy(struct dma_pool *pool);

/*
 * dma_pool_alloc - Allocate from pool
 * 
 * @pool:       Pool to allocate from
 * @flags:      GFP flags
 * @dma_handle: Output - DMA address
 * 
 * Return: Virtual address, or NULL on failure
 */
void *dma_pool_alloc(struct dma_pool *pool, gfp_t flags,
                    dma_addr_t *dma_handle);

/*
 * dma_pool_free - Free back to pool
 */
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma);

DMA Pool Examples

/*
 * Example: Command descriptor pool
 */
struct command_desc {
    u32 opcode;
    dma_addr_t data_addr;
    u32 length;
    u32 flags;
};

struct device_priv {
    struct dma_pool *cmd_pool;
    /* ... other fields ... */
};

static int create_command_pool(struct device *dev, struct device_priv *priv)
{
    /*
     * Create pool for command descriptors
     * 
     * - Size: sizeof(struct command_desc)
     * - Align: 32 bytes (hardware requirement)
     * - Boundary: 0 (no boundary constraint)
     */
    priv->cmd_pool = dma_pool_create("cmd_desc_pool",
                                    dev,
                                    sizeof(struct command_desc),
                                    32,
                                    0);
    
    if (!priv->cmd_pool) {
        dev_err(dev, "Failed to create command pool\n");
        return -ENOMEM;
    }
    
    dev_info(dev, "Command descriptor pool created\n");
    return 0;
}

static struct command_desc *alloc_command(struct device_priv *priv,
                                         dma_addr_t *dma_addr)
{
    struct command_desc *cmd;
    
    /* Allocate from pool */
    cmd = dma_pool_alloc(priv->cmd_pool, GFP_ATOMIC, dma_addr);
    if (!cmd)
        return NULL;
    
    /* Initialize */
    memset(cmd, 0, sizeof(*cmd));
    
    return cmd;
}

static void free_command(struct device_priv *priv,
                        struct command_desc *cmd,
                        dma_addr_t dma_addr)
{
    dma_pool_free(priv->cmd_pool, cmd, dma_addr);
}

static void destroy_command_pool(struct device_priv *priv)
{
    if (priv->cmd_pool) {
        dma_pool_destroy(priv->cmd_pool);
        priv->cmd_pool = NULL;
    }
}

/*
 * Example: Using command pool
 */
static int submit_dma_command(struct device *dev, struct device_priv *priv,
                             void *data_buf, size_t data_len)
{
    struct command_desc *cmd;
    dma_addr_t cmd_dma, data_dma;
    
    /* Allocate command descriptor from pool */
    cmd = alloc_command(priv, &cmd_dma);
    if (!cmd)
        return -ENOMEM;
    
    /* Map data buffer */
    data_dma = dma_map_single(dev, data_buf, data_len, DMA_TO_DEVICE);
    if (dma_mapping_error(dev, data_dma)) {
        free_command(priv, cmd, cmd_dma);
        return -ENOMEM;
    }
    
    /* Fill command descriptor */
    cmd->opcode = CMD_TRANSFER;
    cmd->data_addr = data_dma;
    cmd->length = data_len;
    cmd->flags = CMD_FLAG_INTERRUPT;
    
    /* Submit command to device */
    writel(cmd_dma, device_cmd_register(dev));
    
    /* In real code, save pointers for cleanup in interrupt handler */
    
    return 0;
}

DMA Constraints and Masks

DMA Address Masks

/*
 * DMA address masks specify addressing capability of device
 */

/*
 * dma_set_mask - Set DMA addressing mask
 * 
 * @dev:  Device
 * @mask: Address mask (e.g., DMA_BIT_MASK(32) for 32-bit addressing)
 * 
 * Return: 0 on success, negative on error
 */
int dma_set_mask(struct device *dev, u64 mask);

/*
 * dma_set_coherent_mask - Set mask for coherent allocations
 */
int dma_set_coherent_mask(struct device *dev, u64 mask);

/*
 * dma_set_mask_and_coherent - Set both masks
 */
int dma_set_mask_and_coherent(struct device *dev, u64 mask);

/*
 * Common masks
 */
#define DMA_BIT_MASK(n)  (((n) == 64) ? ~0ULL : ((1ULL << (n)) - 1))

DMA_BIT_MASK(64)  /* Can address all 64-bit space */
DMA_BIT_MASK(32)  /* Can address 4GB (32-bit) */
DMA_BIT_MASK(24)  /* Can address 16MB (legacy ISA) */

/*
 * Example: Setting DMA mask
 */
static int setup_dma_mask(struct pci_dev *pdev)
{
    int ret;
    
    /* Try 64-bit DMA first */
    ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
    if (ret == 0) {
        dev_info(&pdev->dev, "Using 64-bit DMA\n");
        return 0;
    }
    
    /* Fall back to 32-bit DMA */
    ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
    if (ret == 0) {
        dev_info(&pdev->dev, "Using 32-bit DMA\n");
        return 0;
    }
    
    dev_err(&pdev->dev, "No suitable DMA available\n");
    return ret;
}

Summary

In this chapter, you learned:

DMA Basics: Direct memory access without CPU
Coherent DMA: Cache-coherent memory allocation
Streaming DMA: Efficient one-time transfers
Scatter-Gather: DMA for fragmented memory
DMA Pools: Efficient small buffer allocation
Address Constraints: Handling device limitations

Key Takeaways

  1. Use streaming DMA for large data transfers
  2. Use coherent DMA for shared control structures
  3. Always sync before accessing streamed DMA data
  4. Set DMA masks appropriately for device capabilities
  5. Use scatter-gather for page-based I/O

Next Steps

Proceed to 08-platform-drivers.md to learn about platform device drivers and device tree integration.


Quick Reference

/* Coherent DMA */
void *dma_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
dma_free_coherent(dev, size, virt_addr, dma_addr);

/* Streaming DMA */
dma_addr = dma_map_single(dev, ptr, size, DMA_TO_DEVICE);
dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);

/* Synchronization */
dma_sync_single_for_device(dev, dma_addr, size, dir);
dma_sync_single_for_cpu(dev, dma_addr, size, dir);

/* Scatter-gather */
sg_init_table(sg, nents);
sg_set_buf(&sg[i], buf, len);
nents = dma_map_sg(dev, sg, nents, dir);
dma_unmap_sg(dev, sg, nents, dir);

/* DMA pools */
pool = dma_pool_create(name, dev, size, align, boundary);
ptr = dma_pool_alloc(pool, GFP_ATOMIC, &dma_addr);
dma_pool_free(pool, ptr, dma_addr);
dma_pool_destroy(pool);