1 #ifndef _VIRTIO_RING_H_
2 # define _VIRTIO_RING_H_
3 #define PAGE_SHIFT (12)
4 #define PAGE_SIZE  (1<<PAGE_SHIFT)
5 #define PAGE_MASK  (PAGE_SIZE-1)
6 
7 /* Status byte for guest to report progress, and synchronize features. */
8 /* We have seen device and processed generic fields (VIRTIO_CONFIG_F_VIRTIO) */
9 #define VIRTIO_CONFIG_S_ACKNOWLEDGE     1
10 /* We have found a driver for the device. */
11 #define VIRTIO_CONFIG_S_DRIVER          2
12 /* Driver has used its parts of the config, and is happy */
13 #define VIRTIO_CONFIG_S_DRIVER_OK       4
14 /* We've given up on this device. */
15 #define VIRTIO_CONFIG_S_FAILED          0x80
16 
17 #define MAX_QUEUE_NUM      (512)
18 
19 #define VRING_DESC_F_NEXT  1
20 #define VRING_DESC_F_WRITE 2
21 
22 #define VRING_AVAIL_F_NO_INTERRUPT 1
23 
24 #define VRING_USED_F_NO_NOTIFY     1
25 
26 struct vring_desc
27 {
28    u64 addr;
29    u32 len;
30    u16 flags;
31    u16 next;
32 };
33 
34 struct vring_avail
35 {
36    u16 flags;
37    u16 idx;
38    u16 ring[0];
39 };
40 
41 struct vring_used_elem
42 {
43    u32 id;
44    u32 len;
45 };
46 
47 struct vring_used
48 {
49    u16 flags;
50    u16 idx;
51    struct vring_used_elem ring[];
52 };
53 
54 struct vring {
55    unsigned int num;
56    struct vring_desc *desc;
57    struct vring_avail *avail;
58    struct vring_used *used;
59 };
60 
61 #define vring_size(num) \
62    (((((sizeof(struct vring_desc) * num) + \
63       (sizeof(struct vring_avail) + sizeof(u16) * num)) \
64          + PAGE_MASK) & ~PAGE_MASK) + \
65          (sizeof(struct vring_used) + sizeof(struct vring_used_elem) * num))
66 
67 typedef unsigned char virtio_queue_t[PAGE_MASK + vring_size(MAX_QUEUE_NUM)];
68 
69 struct vring_virtqueue {
70    virtio_queue_t queue;
71    struct vring vring;
72    u16 free_head;
73    u16 last_used_idx;
74    u16 vdata[MAX_QUEUE_NUM];
75    /* PCI */
76    int queue_index;
77 };
78 
79 struct vring_list {
80   char *addr;
81   unsigned int length;
82 };
83 
vring_init(struct vring * vr,unsigned int num,unsigned char * queue)84 static inline void vring_init(struct vring *vr,
85                          unsigned int num, unsigned char *queue)
86 {
87    unsigned int i;
88    unsigned long pa;
89 
90         vr->num = num;
91 
92    /* physical address of desc must be page aligned */
93 
94    pa = virt_to_phys(queue);
95    pa = (pa + PAGE_MASK) & ~PAGE_MASK;
96    vr->desc = phys_to_virt(pa);
97 
98         vr->avail = (struct vring_avail *)&vr->desc[num];
99 
100    /* physical address of used must be page aligned */
101 
102    pa = virt_to_phys(&vr->avail->ring[num]);
103    pa = (pa + PAGE_MASK) & ~PAGE_MASK;
104         vr->used = phys_to_virt(pa);
105 
106    for (i = 0; i < num - 1; i++)
107            vr->desc[i].next = i + 1;
108    vr->desc[i].next = 0;
109 }
110 
vring_enable_cb(struct vring_virtqueue * vq)111 static inline void vring_enable_cb(struct vring_virtqueue *vq)
112 {
113    vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
114 }
115 
vring_disable_cb(struct vring_virtqueue * vq)116 static inline void vring_disable_cb(struct vring_virtqueue *vq)
117 {
118    vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
119 }
120 
121 
122 /*
123  * vring_more_used
124  *
125  * is there some used buffers ?
126  *
127  */
128 
vring_more_used(struct vring_virtqueue * vq)129 static inline int vring_more_used(struct vring_virtqueue *vq)
130 {
131    wmb();
132    return vq->last_used_idx != vq->vring.used->idx;
133 }
134 
135 void vring_detach(struct vring_virtqueue *vq, unsigned int head);
136 int vring_get_buf(struct vring_virtqueue *vq, unsigned int *len);
137 void vring_add_buf(struct vring_virtqueue *vq, struct vring_list list[],
138                    unsigned int out, unsigned int in,
139                    int index, int num_added);
140 void vring_kick(unsigned int ioaddr, struct vring_virtqueue *vq, int num_added);
141 
142 #endif /* _VIRTIO_RING_H_ */
143