1 // Copyright 2019 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use super::xhci_abi::{
6     AddressedTrb, Error as TrbError, LinkTrb, TransferDescriptor, Trb, TrbCast, TrbType,
7 };
8 use std::fmt::{self, Display};
9 use std::mem::size_of;
10 use vm_memory::{GuestAddress, GuestMemory, GuestMemoryError};
11 
12 #[derive(Debug)]
13 pub enum Error {
14     ReadGuestMemory(GuestMemoryError),
15     BadDequeuePointer(GuestAddress),
16     CastTrb(TrbError),
17     TrbChain(TrbError),
18 }
19 
20 type Result<T> = std::result::Result<T, Error>;
21 
22 impl Display for Error {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result23     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
24         use self::Error::*;
25 
26         match self {
27             ReadGuestMemory(e) => write!(f, "cannot read guest memory: {}", e),
28             BadDequeuePointer(addr) => write!(f, "bad dequeue pointer: {}", addr),
29             CastTrb(e) => write!(f, "cannot cast trb: {}", e),
30             TrbChain(e) => write!(f, "cannot get trb chain bit: {}", e),
31         }
32     }
33 }
34 
35 /// Ring Buffer is segmented circular buffer in guest memory containing work items
36 /// called transfer descriptors, each of which consists of one or more TRBs.
37 /// Ring buffer logic is shared between transfer ring and command ring.
38 /// Transfer Ring management is defined in xHCI spec 4.9.2.
39 pub struct RingBuffer {
40     name: String,
41     mem: GuestMemory,
42     dequeue_pointer: GuestAddress,
43     // Used to check if the ring is empty. Toggled when looping back to the begining
44     // of the buffer.
45     consumer_cycle_state: bool,
46 }
47 
48 impl Display for RingBuffer {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result49     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
50         write!(f, "RingBuffer `{}`", self.name)
51     }
52 }
53 
54 // Public interfaces for Ring buffer.
55 impl RingBuffer {
56     /// Create a new RingBuffer.
new(name: String, mem: GuestMemory) -> Self57     pub fn new(name: String, mem: GuestMemory) -> Self {
58         RingBuffer {
59             name,
60             mem,
61             dequeue_pointer: GuestAddress(0),
62             consumer_cycle_state: false,
63         }
64     }
65 
66     /// Dequeue next transfer descriptor from the transfer ring.
dequeue_transfer_descriptor(&mut self) -> Result<Option<TransferDescriptor>>67     pub fn dequeue_transfer_descriptor(&mut self) -> Result<Option<TransferDescriptor>> {
68         let mut td: TransferDescriptor = TransferDescriptor::new();
69         while let Some(addressed_trb) = self.get_current_trb()? {
70             if let Ok(TrbType::Link) = addressed_trb.trb.get_trb_type() {
71                 let link_trb = addressed_trb
72                     .trb
73                     .cast::<LinkTrb>()
74                     .map_err(Error::CastTrb)?;
75                 self.dequeue_pointer = GuestAddress(link_trb.get_ring_segment_pointer());
76                 self.consumer_cycle_state =
77                     self.consumer_cycle_state != link_trb.get_toggle_cycle();
78                 continue;
79             }
80 
81             self.dequeue_pointer = match self.dequeue_pointer.checked_add(size_of::<Trb>() as u64) {
82                 Some(addr) => addr,
83                 None => {
84                     return Err(Error::BadDequeuePointer(self.dequeue_pointer));
85                 }
86             };
87 
88             usb_debug!(
89                 "{}: adding trb to td {}",
90                 self.name.as_str(),
91                 addressed_trb.trb
92             );
93             td.push(addressed_trb);
94             if !addressed_trb.trb.get_chain_bit().map_err(Error::TrbChain)? {
95                 usb_debug!("trb chain is false returning");
96                 break;
97             }
98         }
99         // A valid transfer descriptor contains at least one addressed trb and the last trb has
100         // chain bit != 0.
101         match td.last() {
102             Some(t) => {
103                 if t.trb.get_chain_bit().map_err(Error::TrbChain)? {
104                     return Ok(None);
105                 }
106             }
107             None => return Ok(None),
108         }
109         Ok(Some(td))
110     }
111 
112     /// Set dequeue pointer of the ring buffer.
set_dequeue_pointer(&mut self, addr: GuestAddress)113     pub fn set_dequeue_pointer(&mut self, addr: GuestAddress) {
114         usb_debug!("{}: set dequeue pointer {:x}", self.name.as_str(), addr.0);
115 
116         self.dequeue_pointer = addr;
117     }
118 
119     /// Set consumer cycle state of the ring buffer.
set_consumer_cycle_state(&mut self, state: bool)120     pub fn set_consumer_cycle_state(&mut self, state: bool) {
121         usb_debug!("{}: set consumer cycle state {}", self.name.as_str(), state);
122         self.consumer_cycle_state = state;
123     }
124 
125     // Read trb pointed by dequeue pointer. Does not proceed dequeue pointer.
get_current_trb(&self) -> Result<Option<AddressedTrb>>126     fn get_current_trb(&self) -> Result<Option<AddressedTrb>> {
127         let trb: Trb = self
128             .mem
129             .read_obj_from_addr(self.dequeue_pointer)
130             .map_err(Error::ReadGuestMemory)?;
131         usb_debug!("{}: trb read from memory {:?}", self.name.as_str(), trb);
132         // If cycle bit of trb does not equal consumer cycle state, the ring is empty.
133         // This trb is invalid.
134         if trb.get_cycle() != self.consumer_cycle_state {
135             usb_debug!(
136                 "cycle bit does not match, self cycle {}",
137                 self.consumer_cycle_state
138             );
139             Ok(None)
140         } else {
141             Ok(Some(AddressedTrb {
142                 trb,
143                 gpa: self.dequeue_pointer.0,
144             }))
145         }
146     }
147 }
148 
149 #[cfg(test)]
150 mod test {
151     use super::*;
152     use crate::usb::xhci::xhci_abi::*;
153 
154     #[test]
ring_test_dequeue()155     fn ring_test_dequeue() {
156         let trb_size = size_of::<Trb>() as u64;
157         let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
158         let mut transfer_ring = RingBuffer::new(String::new(), gm.clone());
159 
160         // Structure of ring buffer:
161         //  0x100  --> 0x200  --> 0x300
162         //  trb 1  |   trb 3  |   trb 5
163         //  trb 2  |   trb 4  |   trb 6
164         //  l trb  -   l trb  -   l trb to 0x100
165         let mut trb = NormalTrb::new();
166         trb.set_trb_type(TrbType::Normal);
167         trb.set_data_buffer(1);
168         trb.set_chain(true);
169         gm.write_obj_at_addr(trb.clone(), GuestAddress(0x100))
170             .unwrap();
171 
172         trb.set_data_buffer(2);
173         gm.write_obj_at_addr(trb, GuestAddress(0x100 + trb_size))
174             .unwrap();
175 
176         let mut ltrb = LinkTrb::new();
177         ltrb.set_trb_type(TrbType::Link);
178         ltrb.set_ring_segment_pointer(0x200);
179         gm.write_obj_at_addr(ltrb, GuestAddress(0x100 + 2 * trb_size))
180             .unwrap();
181 
182         trb.set_data_buffer(3);
183         gm.write_obj_at_addr(trb, GuestAddress(0x200)).unwrap();
184 
185         // Chain bit is false.
186         trb.set_data_buffer(4);
187         trb.set_chain(false);
188         gm.write_obj_at_addr(trb, GuestAddress(0x200 + 1 * trb_size))
189             .unwrap();
190 
191         ltrb.set_ring_segment_pointer(0x300);
192         gm.write_obj_at_addr(ltrb, GuestAddress(0x200 + 2 * trb_size))
193             .unwrap();
194 
195         trb.set_data_buffer(5);
196         trb.set_chain(true);
197         gm.write_obj_at_addr(trb, GuestAddress(0x300)).unwrap();
198 
199         // Chain bit is false.
200         trb.set_data_buffer(6);
201         trb.set_chain(false);
202         gm.write_obj_at_addr(trb, GuestAddress(0x300 + 1 * trb_size))
203             .unwrap();
204 
205         ltrb.set_ring_segment_pointer(0x100);
206         gm.write_obj_at_addr(ltrb, GuestAddress(0x300 + 2 * trb_size))
207             .unwrap();
208 
209         transfer_ring.set_dequeue_pointer(GuestAddress(0x100));
210         transfer_ring.set_consumer_cycle_state(false);
211 
212         // Read first transfer descriptor.
213         let descriptor = transfer_ring
214             .dequeue_transfer_descriptor()
215             .unwrap()
216             .unwrap();
217         assert_eq!(descriptor.len(), 4);
218         assert_eq!(descriptor[0].trb.get_parameter(), 1);
219         assert_eq!(descriptor[1].trb.get_parameter(), 2);
220         assert_eq!(descriptor[2].trb.get_parameter(), 3);
221         assert_eq!(descriptor[3].trb.get_parameter(), 4);
222 
223         // Read second transfer descriptor.
224         let descriptor = transfer_ring
225             .dequeue_transfer_descriptor()
226             .unwrap()
227             .unwrap();
228         assert_eq!(descriptor.len(), 2);
229         assert_eq!(descriptor[0].trb.get_parameter(), 5);
230         assert_eq!(descriptor[1].trb.get_parameter(), 6);
231     }
232 
233     #[test]
transfer_ring_test_dequeue_failure()234     fn transfer_ring_test_dequeue_failure() {
235         let trb_size = size_of::<Trb>() as u64;
236         let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
237         let mut transfer_ring = RingBuffer::new(String::new(), gm.clone());
238 
239         let mut trb = NormalTrb::new();
240         trb.set_trb_type(TrbType::Normal);
241         trb.set_data_buffer(1);
242         trb.set_chain(true);
243         gm.write_obj_at_addr(trb.clone(), GuestAddress(0x100))
244             .unwrap();
245 
246         trb.set_data_buffer(2);
247         gm.write_obj_at_addr(trb, GuestAddress(0x100 + trb_size))
248             .unwrap();
249 
250         let mut ltrb = LinkTrb::new();
251         ltrb.set_trb_type(TrbType::Link);
252         ltrb.set_ring_segment_pointer(0x200);
253         ltrb.set_toggle_cycle(true);
254         gm.write_obj_at_addr(ltrb, GuestAddress(0x100 + 2 * trb_size))
255             .unwrap();
256 
257         trb.set_data_buffer(3);
258         gm.write_obj_at_addr(trb, GuestAddress(0x200)).unwrap();
259 
260         transfer_ring.set_dequeue_pointer(GuestAddress(0x100));
261         transfer_ring.set_consumer_cycle_state(false);
262 
263         // Read first transfer descriptor.
264         let descriptor = transfer_ring.dequeue_transfer_descriptor().unwrap();
265         assert_eq!(descriptor.is_none(), true);
266     }
267 
268     #[test]
ring_test_toggle_cycle()269     fn ring_test_toggle_cycle() {
270         let trb_size = size_of::<Trb>() as u64;
271         let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
272         let mut transfer_ring = RingBuffer::new(String::new(), gm.clone());
273 
274         let mut trb = NormalTrb::new();
275         trb.set_trb_type(TrbType::Normal);
276         trb.set_data_buffer(1);
277         trb.set_chain(false);
278         trb.set_cycle(false);
279         gm.write_obj_at_addr(trb.clone(), GuestAddress(0x100))
280             .unwrap();
281 
282         let mut ltrb = LinkTrb::new();
283         ltrb.set_trb_type(TrbType::Link);
284         ltrb.set_ring_segment_pointer(0x100);
285         ltrb.set_toggle_cycle(true);
286         ltrb.set_cycle(false);
287         gm.write_obj_at_addr(ltrb, GuestAddress(0x100 + trb_size))
288             .unwrap();
289 
290         // Initial state: consumer cycle = false
291         transfer_ring.set_dequeue_pointer(GuestAddress(0x100));
292         transfer_ring.set_consumer_cycle_state(false);
293 
294         // Read first transfer descriptor.
295         let descriptor = transfer_ring
296             .dequeue_transfer_descriptor()
297             .unwrap()
298             .unwrap();
299         assert_eq!(descriptor.len(), 1);
300         assert_eq!(descriptor[0].trb.get_parameter(), 1);
301 
302         // Cycle bit should be unchanged since we haven't advanced past the Link TRB yet.
303         assert_eq!(transfer_ring.consumer_cycle_state, false);
304 
305         // Overwrite the first TRB with a new one (data = 2)
306         // with the new producer cycle bit state (true).
307         let mut trb = NormalTrb::new();
308         trb.set_trb_type(TrbType::Normal);
309         trb.set_data_buffer(2);
310         trb.set_cycle(true); // Link TRB toggled the cycle.
311         gm.write_obj_at_addr(trb.clone(), GuestAddress(0x100))
312             .unwrap();
313 
314         // Read new transfer descriptor.
315         let descriptor = transfer_ring
316             .dequeue_transfer_descriptor()
317             .unwrap()
318             .unwrap();
319         assert_eq!(descriptor.len(), 1);
320         assert_eq!(descriptor[0].trb.get_parameter(), 2);
321 
322         assert_eq!(transfer_ring.consumer_cycle_state, true);
323 
324         // Update the Link TRB with the new cycle bit.
325         let mut ltrb = LinkTrb::new();
326         ltrb.set_trb_type(TrbType::Link);
327         ltrb.set_ring_segment_pointer(0x100);
328         ltrb.set_toggle_cycle(true);
329         ltrb.set_cycle(true); // Producer cycle state is now 1.
330         gm.write_obj_at_addr(ltrb, GuestAddress(0x100 + trb_size))
331             .unwrap();
332 
333         // Overwrite the first TRB again with a new one (data = 3)
334         // with the new producer cycle bit state (false).
335         let mut trb = NormalTrb::new();
336         trb.set_trb_type(TrbType::Normal);
337         trb.set_data_buffer(3);
338         trb.set_cycle(false); // Link TRB toggled the cycle.
339         gm.write_obj_at_addr(trb.clone(), GuestAddress(0x100))
340             .unwrap();
341 
342         // Read new transfer descriptor.
343         let descriptor = transfer_ring
344             .dequeue_transfer_descriptor()
345             .unwrap()
346             .unwrap();
347         assert_eq!(descriptor.len(), 1);
348         assert_eq!(descriptor[0].trb.get_parameter(), 3);
349 
350         assert_eq!(transfer_ring.consumer_cycle_state, false);
351     }
352 }
353