Chapter 6: Interrupt Handling
Table of Contents
- Introduction to Interrupts
- Interrupt Request Lines (IRQ)
- Registering Interrupt Handlers
- Top-Half and Bottom-Half
- Softirqs
- Tasklets
- Work Queues
- Threaded Interrupts
- Interrupt Sharing
- MSI and MSI-X Interrupts
Introduction to Interrupts
What Are Interrupts?
Interrupts are hardware signals that notify the CPU of events requiring immediate attention:
- Device has data ready (network packet arrived)
- I/O operation completed (disk read finished)
- Timer expired
- Hardware error occurred
Interrupt Flow
Hardware Event
↓
IRQ Line Asserted
↓
CPU Acknowledges Interrupt
↓
Save Current Context
↓
Execute Interrupt Handler (Top-Half)
↓
Schedule Bottom-Half (if needed)
↓
Restore Context
↓
Return to Interrupted Code
↓
(Later) Execute Bottom-Half
Interrupt Context
Code running in interrupt context has special restrictions:
- Cannot sleep or call blocking functions
- Cannot access user space (no current process)
- Must be fast (blocks other interrupts on same CPU)
- Limited stack space (typically 4KB or 8KB)
- Cannot hold semaphores/mutexes (only spinlocks)
/*
* What you CANNOT do in interrupt context:
*/
void interrupt_handler(void)
{
/* BAD: These will cause kernel panic or undefined behavior */
// sleep(1); /* Cannot sleep */
// mutex_lock(&my_mutex); /* Mutex can sleep */
// kmalloc(size, GFP_KERNEL); /* GFP_KERNEL can sleep */
// copy_to_user(...); /* Accesses user space */
// schedule(); /* Cannot reschedule */
}
/*
* What you CAN do in interrupt context:
*/
void safe_interrupt_handler(void)
{
/* These are OK */
spin_lock(&my_lock); /* Spinlock is OK */
kmalloc(size, GFP_ATOMIC); /* GFP_ATOMIC doesn't sleep */
read_hw_register(); /* Hardware access */
wake_up(&wait_queue); /* Wake sleeping process */
schedule_work(&my_work); /* Schedule bottom-half */
atomic_inc(&counter); /* Atomic operations */
}
Interrupt Request Lines (IRQ)
IRQ Numbers
/*
* IRQ numbers identify interrupt lines
*
* On x86:
* 0-15: Legacy ISA interrupts (PIC)
* 16+: PCI interrupts, MSI
*
* Modern systems use IRQ domains and mapping
*/
/* Check valid IRQ range */
#include <linux/interrupt.h>
unsigned int irq_number;
/* IRQ numbers are system-specific */
/* Usually obtained from:
* - Platform resources (platform_get_irq)
* - PCI configuration (pci_dev->irq)
* - Device tree (irq_of_parse_and_map)
*/
Interrupt Types
/*
* Edge-triggered vs Level-triggered
*/
/* Edge-triggered: Interrupt fires on signal transition (0→1 or 1→0) */
#define IRQF_TRIGGER_RISING 0x00000001
#define IRQF_TRIGGER_FALLING 0x00000002
/* Level-triggered: Interrupt fires while signal is high/low */
#define IRQF_TRIGGER_HIGH 0x00000004
#define IRQF_TRIGGER_LOW 0x00000008
Registering Interrupt Handlers
request_irq()
#include <linux/interrupt.h>
/*
* request_irq - Register an interrupt handler
*
* @irq: IRQ number
* @handler: Interrupt handler function
* @flags: IRQ flags (sharing, triggering)
* @name: Name for /proc/interrupts
* @dev: Device ID (for shared IRQs)
*
* Return: 0 on success, negative error code on failure
*/
int request_irq(unsigned int irq,
irq_handler_t handler,
unsigned long flags,
const char *name,
void *dev);
/*
* Interrupt handler prototype
*
* @irq: IRQ number that occurred
* @dev: Device ID passed to request_irq
*
* Return: IRQ_HANDLED if interrupt was handled
* IRQ_NONE if interrupt wasn't from this device
*/
typedef irqreturn_t (*irq_handler_t)(int irq, void *dev);
/*
* IRQ flags
*/
#define IRQF_SHARED 0x00000080 /* Share IRQ with other devices */
#define IRQF_TRIGGER_NONE 0x00000000 /* Default trigger */
#define IRQF_ONESHOT 0x00002000 /* For threaded interrupts */
#define IRQF_NO_SUSPEND 0x00004000 /* Don't disable during suspend */
/*
* free_irq - Unregister interrupt handler
*
* @irq: IRQ number
* @dev: Device ID (must match request_irq)
*
* Waits for any running handler to complete
*/
void free_irq(unsigned int irq, void *dev);
Basic Interrupt Handler Example
/*
* simple_irq.c - Simple interrupt handler example
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#define MY_IRQ 11 /* Example IRQ number */
static int irq_counter = 0;
/*
* Interrupt handler
*
* This is the "top-half" - must be FAST!
*/
static irqreturn_t my_interrupt_handler(int irq, void *dev_id)
{
/* Quick acknowledgment of interrupt */
pr_info("Interrupt! IRQ=%d, count=%d\n", irq, ++irq_counter);
/*
* Read hardware register to acknowledge interrupt
* (hardware-specific)
*/
// ack_hardware_interrupt();
/*
* Return IRQ_HANDLED to indicate we handled this interrupt
*/
return IRQ_HANDLED;
}
static int __init irq_example_init(void)
{
int ret;
pr_info("Requesting IRQ %d\n", MY_IRQ);
/*
* Register interrupt handler
*
* flags: 0 = exclusive IRQ, not shared
* name: "my_device" (appears in /proc/interrupts)
* dev_id: NULL (not needed for non-shared IRQ)
*/
ret = request_irq(MY_IRQ,
my_interrupt_handler,
0,
"my_device",
NULL);
if (ret) {
pr_err("Failed to request IRQ %d: %d\n", MY_IRQ, ret);
return ret;
}
pr_info("IRQ %d registered successfully\n", MY_IRQ);
return 0;
}
static void __exit irq_example_exit(void)
{
/* Free the IRQ */
free_irq(MY_IRQ, NULL);
pr_info("IRQ %d freed, handled %d interrupts\n", MY_IRQ, irq_counter);
}
module_init(irq_example_init);
module_exit(irq_example_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Simple interrupt handler example");
Top-Half and Bottom-Half
Theory: Why Split Processing?
Top-Half (Interrupt Handler):
- Runs in interrupt context
- Must be VERY fast (microseconds)
- Acknowledges interrupt, saves data
- Schedules bottom-half
Bottom-Half (Deferred Work):
- Runs later in process/softirq context
- Can take longer
- Does actual processing
- Can sleep (if using work queues)
Interrupt Occurs
↓
┌─────────────────────────┐
│ TOP-HALF (Fast) │
│ - Acknowledge HW │
│ - Read critical data │
│ - Schedule bottom-half │
└───────────┬─────────────┘
│
↓
Return from Interrupt
│
↓
┌─────────────────────────┐
│ BOTTOM-HALF (Slow) │
│ - Process data │
│ - Complex operations │
│ - Can sleep (workqueue) │
└─────────────────────────┘
Bottom-Half Mechanisms
/*
* Three main bottom-half mechanisms:
*
* 1. Softirqs
* - Lowest level, highest priority
* - Fixed number, compile-time defined
* - Used by kernel subsystems
* - Drivers rarely use directly
*
* 2. Tasklets
* - Built on top of softirqs
* - Dynamically created
* - Easy to use
* - Cannot sleep
* - Most common for drivers
*
* 3. Work Queues
* - Run in process context
* - Can sleep
* - Lower priority than softirqs/tasklets
* - Use for longer processing
*/
Softirqs
Theory: Softirqs
Softirqs are the lowest-level deferred work mechanism:
- Fixed set defined at compile time
- High priority (run before process scheduling)
- Can run on any CPU
- Complex to use correctly
#include <linux/interrupt.h>
/*
* Predefined softirqs (enum in kernel)
*/
enum {
HI_SOFTIRQ=0, /* High priority */
TIMER_SOFTIRQ, /* Timers */
NET_TX_SOFTIRQ, /* Network transmit */
NET_RX_SOFTIRQ, /* Network receive */
BLOCK_SOFTIRQ, /* Block devices */
IRQ_POLL_SOFTIRQ, /* IRQ polling */
TASKLET_SOFTIRQ, /* Tasklets */
SCHED_SOFTIRQ, /* Scheduler */
HRTIMER_SOFTIRQ, /* High-res timers */
RCU_SOFTIRQ, /* RCU callbacks */
NR_SOFTIRQS
};
/*
* Raising a softirq (kernel internal use)
*/
void raise_softirq(unsigned int nr);
Softirqs are typically not used directly by drivers. Instead, use tasklets (which are built on softirqs).
Tasklets
Theory: Tasklets
Tasklets are the recommended way to defer interrupt work:
- Dynamic creation (unlike softirqs)
- Guaranteed not to run concurrently (same tasklet won’t run on multiple CPUs)
- Cannot sleep
- Easy to use
#include <linux/interrupt.h>
/*
* struct tasklet_struct - Tasklet structure
*/
struct tasklet_struct {
struct tasklet_struct *next;
unsigned long state;
atomic_t count;
void (*func)(unsigned long);
unsigned long data;
};
/*
* Static initialization
*/
void my_tasklet_func(unsigned long data);
DECLARE_TASKLET(my_tasklet, my_tasklet_func, data);
DECLARE_TASKLET_DISABLED(my_tasklet, my_tasklet_func, data);
/*
* Dynamic initialization
*/
void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long),
unsigned long data);
/*
* Scheduling tasklet
*/
void tasklet_schedule(struct tasklet_struct *t);
void tasklet_hi_schedule(struct tasklet_struct *t); /* High priority */
/*
* Disabling/enabling tasklet
*/
void tasklet_disable(struct tasklet_struct *t); /* Wait for completion */
void tasklet_disable_nosync(struct tasklet_struct *t); /* Don't wait */
void tasklet_enable(struct tasklet_struct *t);
/*
* Killing tasklet
*/
void tasklet_kill(struct tasklet_struct *t); /* Wait and prevent rescheduling */
Tasklet Examples
/*
* Example 1: Basic tasklet usage
*/
struct my_device {
struct tasklet_struct tasklet;
spinlock_t lock;
unsigned char buffer[256];
size_t data_len;
};
static struct my_device *mydev;
/*
* Tasklet function (bottom-half)
*
* Runs in softirq context (cannot sleep!)
*/
static void my_tasklet_func(unsigned long data)
{
struct my_device *dev = (struct my_device *)data;
unsigned long flags;
unsigned char local_buffer[256];
size_t len;
pr_info("Tasklet: Processing deferred work\n");
/* Copy data from device buffer (with locking) */
spin_lock_irqsave(&dev->lock, flags);
memcpy(local_buffer, dev->buffer, dev->data_len);
len = dev->data_len;
dev->data_len = 0;
spin_unlock_irqrestore(&dev->lock, flags);
/* Process data (can take time, but still can't sleep) */
for (int i = 0; i < len; i++) {
/* Process each byte */
pr_debug("Byte %d: 0x%02x\n", i, local_buffer[i]);
}
pr_info("Tasklet: Processing complete\n");
}
/*
* Interrupt handler (top-half)
*/
static irqreturn_t my_irq_handler(int irq, void *dev_id)
{
struct my_device *dev = dev_id;
unsigned long flags;
/* Quick: Read data from hardware */
spin_lock_irqsave(&dev->lock, flags);
/* Read hardware data into buffer */
dev->data_len = read_hardware_data(dev->buffer, sizeof(dev->buffer));
spin_unlock_irqrestore(&dev->lock, flags);
/* Schedule tasklet for processing */
tasklet_schedule(&dev->tasklet);
return IRQ_HANDLED;
}
static int device_init(void)
{
int ret;
mydev = kzalloc(sizeof(*mydev), GFP_KERNEL);
if (!mydev)
return -ENOMEM;
spin_lock_init(&mydev->lock);
/* Initialize tasklet */
tasklet_init(&mydev->tasklet, my_tasklet_func, (unsigned long)mydev);
/* Request IRQ */
ret = request_irq(MY_IRQ, my_irq_handler, 0, "mydevice", mydev);
if (ret) {
kfree(mydev);
return ret;
}
return 0;
}
static void device_cleanup(void)
{
/* Free IRQ */
free_irq(MY_IRQ, mydev);
/* Kill tasklet (wait for completion) */
tasklet_kill(&mydev->tasklet);
kfree(mydev);
}
/*
* Example 2: Network driver pattern (simplified)
*/
struct net_device_priv {
struct tasklet_struct rx_tasklet;
struct sk_buff_head rx_queue;
};
static void rx_tasklet_func(unsigned long data)
{
struct net_device_priv *priv = (struct net_device_priv *)data;
struct sk_buff *skb;
/* Process received packets */
while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
/* Process packet */
process_received_packet(skb);
}
}
static irqreturn_t net_irq_handler(int irq, void *dev_id)
{
struct net_device_priv *priv = dev_id;
struct sk_buff *skb;
/* Quick: Read packet from hardware */
skb = alloc_skb(PKT_SIZE, GFP_ATOMIC);
if (skb) {
read_packet_from_hw(skb->data);
/* Queue for processing */
skb_queue_tail(&priv->rx_queue, skb);
/* Schedule tasklet */
tasklet_schedule(&priv->rx_tasklet);
}
return IRQ_HANDLED;
}
Work Queues
Theory: Work Queues
Work queues run in process context:
- Can sleep (can use mutexes, kmalloc with GFP_KERNEL, etc.)
- Lower priority than tasklets
- More flexible (can perform blocking operations)
- Use for: Long-running deferred work
#include <linux/workqueue.h>
/*
* struct work_struct - Work structure
*/
struct work_struct {
atomic_long_t data;
struct list_head entry;
work_func_t func;
};
/*
* Static initialization
*/
void my_work_func(struct work_struct *work);
DECLARE_WORK(my_work, my_work_func);
DECLARE_DELAYED_WORK(my_delayed_work, my_work_func);
/*
* Dynamic initialization
*/
INIT_WORK(struct work_struct *work, work_func_t func);
INIT_DELAYED_WORK(struct delayed_work *work, work_func_t func);
/*
* Scheduling work
*/
/* Schedule work on system workqueue */
bool schedule_work(struct work_struct *work);
/* Schedule delayed work (after delay in jiffies) */
bool schedule_delayed_work(struct delayed_work *work, unsigned long delay);
/* Schedule work on specific CPU */
bool schedule_work_on(int cpu, struct work_struct *work);
/*
* Canceling work
*/
bool cancel_work_sync(struct work_struct *work); /* Wait for completion */
bool cancel_delayed_work_sync(struct delayed_work *work);
/*
* Creating dedicated workqueue
*/
struct workqueue_struct *create_workqueue(const char *name);
struct workqueue_struct *create_singlethread_workqueue(const char *name);
struct workqueue_struct *alloc_workqueue(const char *fmt, unsigned int flags,
int max_active, ...);
void destroy_workqueue(struct workqueue_struct *wq);
/* Schedule work on dedicated workqueue */
bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
bool queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *work, unsigned long delay);
Work Queue Examples
/*
* Example 1: Simple work queue
*/
struct my_data {
struct work_struct work;
int value;
};
static struct my_data data;
/*
* Work function (runs in process context)
*
* CAN sleep, use mutexes, access user space, etc.
*/
static void my_work_func(struct work_struct *work)
{
struct my_data *d = container_of(work, struct my_data, work);
pr_info("Work: Processing value=%d\n", d->value);
/* Can perform blocking operations */
msleep(100); /* OK to sleep! */
/* Can use mutexes */
mutex_lock(&some_mutex);
/* ... do work ... */
mutex_unlock(&some_mutex);
/* Can allocate with GFP_KERNEL */
void *buf = kmalloc(4096, GFP_KERNEL);
if (buf) {
/* Use buffer */
kfree(buf);
}
pr_info("Work: Complete\n");
}
static irqreturn_t irq_handler_with_work(int irq, void *dev_id)
{
/* Quick interrupt handling */
pr_info("IRQ: Scheduling work\n");
/* Schedule work for later processing */
schedule_work(&data.work);
return IRQ_HANDLED;
}
static int init_work_example(void)
{
/* Initialize work */
INIT_WORK(&data.work, my_work_func);
data.value = 42;
/* Request IRQ... */
return 0;
}
static void cleanup_work_example(void)
{
/* Cancel and wait for work to complete */
cancel_work_sync(&data.work);
}
/*
* Example 2: Delayed work
*/
static struct delayed_work periodic_work;
static void periodic_work_func(struct work_struct *work)
{
pr_info("Periodic work executed\n");
/* Perform periodic task */
check_device_status();
/* Reschedule for next execution (5 seconds) */
schedule_delayed_work(&periodic_work, msecs_to_jiffies(5000));
}
static int start_periodic_work(void)
{
INIT_DELAYED_WORK(&periodic_work, periodic_work_func);
/* Start periodic work (first execution after 5 seconds) */
schedule_delayed_work(&periodic_work, msecs_to_jiffies(5000));
return 0;
}
static void stop_periodic_work(void)
{
/* Cancel periodic work */
cancel_delayed_work_sync(&periodic_work);
}
/*
* Example 3: Dedicated workqueue
*/
struct device_with_wq {
struct workqueue_struct *wq;
struct work_struct work;
};
static struct device_with_wq *dev_wq;
static void dedicated_work_func(struct work_struct *work)
{
pr_info("Dedicated workqueue: Processing\n");
/* Long-running operation */
perform_heavy_computation();
}
static int create_dedicated_wq(void)
{
dev_wq = kzalloc(sizeof(*dev_wq), GFP_KERNEL);
if (!dev_wq)
return -ENOMEM;
/* Create dedicated workqueue */
dev_wq->wq = create_singlethread_workqueue("my_device_wq");
if (!dev_wq->wq) {
kfree(dev_wq);
return -ENOMEM;
}
INIT_WORK(&dev_wq->work, dedicated_work_func);
return 0;
}
static void destroy_dedicated_wq(void)
{
/* Flush and destroy workqueue */
destroy_workqueue(dev_wq->wq);
kfree(dev_wq);
}
static void schedule_on_dedicated_wq(void)
{
queue_work(dev_wq->wq, &dev_wq->work);
}
Threaded Interrupts
Theory: Threaded Interrupts
Modern approach that simplifies interrupt handling:
- Interrupt handler runs in dedicated kernel thread
- Can sleep and use blocking operations
- Automatic top-half/bottom-half split
#include <linux/interrupt.h>
/*
* request_threaded_irq - Register threaded interrupt handler
*
* @irq: IRQ number
* @handler: Top-half (hardirq context) - quick handler
* @thread_fn: Bottom-half (thread context) - can sleep
* @flags: IRQ flags (must include IRQF_ONESHOT for level-triggered)
* @name: Name for /proc/interrupts
* @dev: Device ID
*
* Return: 0 on success, negative error code on failure
*/
int request_threaded_irq(unsigned int irq,
irq_handler_t handler,
irq_handler_t thread_fn,
unsigned long flags,
const char *name,
void *dev);
/*
* If handler is NULL, default handler is used which:
* - Returns IRQ_WAKE_THREAD to wake thread_fn
*/
Threaded Interrupt Examples
/*
* Example 1: Simple threaded interrupt
*/
/*
* Quick handler (top-half) - optional
*
* Runs in hardirq context
* Should be very fast
*/
static irqreturn_t quick_handler(int irq, void *dev_id)
{
/* Quick hardware acknowledgment */
ack_hardware();
/* Return IRQ_WAKE_THREAD to wake threaded handler */
return IRQ_WAKE_THREAD;
}
/*
* Threaded handler (bottom-half)
*
* Runs in thread context
* Can sleep, use mutexes, etc.
*/
static irqreturn_t threaded_handler(int irq, void *dev_id)
{
struct my_device *dev = dev_id;
pr_info("Threaded handler: Processing interrupt\n");
/* Can perform blocking operations */
mutex_lock(&dev->lock);
/* Can sleep */
msleep(10);
/* Can use GFP_KERNEL */
void *buf = kmalloc(4096, GFP_KERNEL);
if (buf) {
/* Process data */
process_interrupt_data(dev, buf);
kfree(buf);
}
mutex_unlock(&dev->lock);
return IRQ_HANDLED;
}
static int register_threaded_irq_example(void)
{
int ret;
/*
* Register threaded interrupt
*
* IRQF_ONESHOT: Keep IRQ disabled until thread handler completes
* (Required for level-triggered interrupts)
*/
ret = request_threaded_irq(MY_IRQ,
quick_handler,
threaded_handler,
IRQF_ONESHOT,
"my_device",
mydev);
if (ret) {
pr_err("Failed to request threaded IRQ\n");
return ret;
}
return 0;
}
/*
* Example 2: Threaded interrupt without top-half
*/
static irqreturn_t simple_threaded_handler(int irq, void *dev_id)
{
/* All processing in thread context */
pr_info("Simple threaded handler\n");
/* Can do everything here */
handle_interrupt();
return IRQ_HANDLED;
}
static int register_simple_threaded(void)
{
/*
* Pass NULL as top-half handler
* Kernel provides default that wakes thread
*/
return request_threaded_irq(MY_IRQ,
NULL, /* No top-half */
simple_threaded_handler,
IRQF_ONESHOT,
"simple_device",
mydev);
}
Interrupt Sharing
Shared IRQs
Multiple devices can share the same IRQ line:
/*
* Shared interrupt handler
*
* Must check if interrupt is from this device
* Return IRQ_NONE if not ours
*/
static irqreturn_t shared_irq_handler(int irq, void *dev_id)
{
struct my_device *dev = dev_id;
u32 status;
/* Read device status register */
status = read_device_register(dev, STATUS_REG);
/* Check if interrupt is from our device */
if (!(status & IRQ_PENDING)) {
/* Not our interrupt */
return IRQ_NONE;
}
/* Clear interrupt status */
write_device_register(dev, STATUS_REG, status);
/* Handle interrupt */
handle_device_interrupt(dev);
return IRQ_HANDLED;
}
static int register_shared_irq(void)
{
/*
* IRQF_SHARED: IRQ is shared with other devices
* dev_id: MUST be unique (typically device structure pointer)
*/
return request_irq(SHARED_IRQ,
shared_irq_handler,
IRQF_SHARED, /* Shared IRQ */
"my_device",
mydev); /* Must be non-NULL for shared IRQs */
}
static void unregister_shared_irq(void)
{
/*
* dev_id must match request_irq
*/
free_irq(SHARED_IRQ, mydev);
}
MSI and MSI-X Interrupts
Theory: MSI/MSI-X
MSI (Message Signaled Interrupts):
- PCIe devices write to memory instead of asserting IRQ line
- No IRQ sharing
- Better performance
- Multiple interrupt vectors per device
MSI-X (Extended):
- More interrupt vectors (up to 2048)
- Each vector can have independent handler
- Better for multi-queue devices
#include <linux/pci.h>
/*
* Enable MSI
*/
int pci_enable_msi(struct pci_dev *dev);
void pci_disable_msi(struct pci_dev *dev);
/*
* Enable MSI-X
*/
int pci_enable_msix_range(struct pci_dev *dev,
struct msix_entry *entries,
int minvec, int maxvec);
void pci_disable_msix(struct pci_dev *dev);
/*
* Example: MSI setup
*/
static int setup_msi(struct pci_dev *pdev)
{
int ret;
/* Enable MSI */
ret = pci_enable_msi(pdev);
if (ret) {
dev_err(&pdev->dev, "Failed to enable MSI\n");
return ret;
}
/* Register interrupt handler */
ret = request_irq(pdev->irq, my_msi_handler,
0, "my_device", mydev);
if (ret) {
pci_disable_msi(pdev);
return ret;
}
return 0;
}
static void cleanup_msi(struct pci_dev *pdev)
{
free_irq(pdev->irq, mydev);
pci_disable_msi(pdev);
}
Summary
In this chapter, you learned:
✅ Interrupt Basics: Hardware signals and IRQ lines
✅ Interrupt Handlers: Top-half fast processing
✅ Tasklets: Deferred work in softirq context
✅ Work Queues: Deferred work in process context (can sleep)
✅ Threaded Interrupts: Modern simplified interrupt handling
✅ Shared IRQs: Multiple devices sharing interrupt lines
✅ MSI/MSI-X: Modern message-based interrupts
Key Takeaways
- Keep top-half fast - defer processing to bottom-half
- Use tasklets for deferred work that can’t sleep
- Use work queues for deferred work that can sleep
- Threaded interrupts simplify code for modern drivers
- Always check IRQ source for shared interrupts
Next Steps
Proceed to 07-dma.md to learn about Direct Memory Access (DMA) for high-performance data transfer.
Quick Reference
/* Request IRQ */
request_irq(irq, handler, flags, name, dev_id);
free_irq(irq, dev_id);
/* Tasklet */
DECLARE_TASKLET(name, func, data);
tasklet_schedule(&tasklet);
tasklet_kill(&tasklet);
/* Work queue */
DECLARE_WORK(name, func);
schedule_work(&work);
cancel_work_sync(&work);
/* Delayed work */
DECLARE_DELAYED_WORK(name, func);
schedule_delayed_work(&work, delay);
/* Threaded interrupt */
request_threaded_irq(irq, quick_fn, thread_fn,
IRQF_ONESHOT, name, dev_id);