1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <android-base/logging.h>
18 #include <android-base/properties.h>
19 #include <asyncio/AsyncIO.h>
20 #include <dirent.h>
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <memory>
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <sys/eventfd.h>
28 #include <sys/ioctl.h>
29 #include <sys/mman.h>
30 #include <sys/poll.h>
31 #include <sys/stat.h>
32 #include <sys/types.h>
33 #include <unistd.h>
34 
35 #include "PosixAsyncIO.h"
36 #include "MtpDescriptors.h"
37 #include "MtpFfsHandle.h"
38 #include "mtp.h"
39 
40 namespace {
41 
42 constexpr unsigned AIO_BUFS_MAX = 128;
43 constexpr unsigned AIO_BUF_LEN = 16384;
44 
45 constexpr unsigned FFS_NUM_EVENTS = 5;
46 
47 constexpr unsigned MAX_FILE_CHUNK_SIZE = AIO_BUFS_MAX * AIO_BUF_LEN;
48 
49 constexpr uint32_t MAX_MTP_FILE_SIZE = 0xFFFFFFFF;
50 
51 struct timespec ZERO_TIMEOUT = { 0, 0 };
52 
53 struct mtp_device_status {
54     uint16_t  wLength;
55     uint16_t  wCode;
56 };
57 
58 } // anonymous namespace
59 
60 namespace android {
61 
getPacketSize(int ffs_fd)62 int MtpFfsHandle::getPacketSize(int ffs_fd) {
63     struct usb_endpoint_descriptor desc;
64     if (ioctl(ffs_fd, FUNCTIONFS_ENDPOINT_DESC, reinterpret_cast<unsigned long>(&desc))) {
65         PLOG(ERROR) << "Could not get FFS bulk-in descriptor";
66         return MAX_PACKET_SIZE_HS;
67     } else {
68         return desc.wMaxPacketSize;
69     }
70 }
71 
MtpFfsHandle(int controlFd)72 MtpFfsHandle::MtpFfsHandle(int controlFd) {
73     mControl.reset(controlFd);
74 }
75 
~MtpFfsHandle()76 MtpFfsHandle::~MtpFfsHandle() {}
77 
closeEndpoints()78 void MtpFfsHandle::closeEndpoints() {
79     mIntr.reset();
80     mBulkIn.reset();
81     mBulkOut.reset();
82 }
83 
openEndpoints(bool ptp)84 bool MtpFfsHandle::openEndpoints(bool ptp) {
85     if (mBulkIn < 0) {
86         mBulkIn.reset(TEMP_FAILURE_RETRY(open(ptp ? FFS_PTP_EP_IN : FFS_MTP_EP_IN, O_RDWR)));
87         if (mBulkIn < 0) {
88             PLOG(ERROR) << (ptp ? FFS_PTP_EP_IN : FFS_MTP_EP_IN) << ": cannot open bulk in ep";
89             return false;
90         }
91     }
92 
93     if (mBulkOut < 0) {
94         mBulkOut.reset(TEMP_FAILURE_RETRY(open(ptp ? FFS_PTP_EP_OUT : FFS_MTP_EP_OUT, O_RDWR)));
95         if (mBulkOut < 0) {
96             PLOG(ERROR) << (ptp ? FFS_PTP_EP_OUT : FFS_MTP_EP_OUT) << ": cannot open bulk out ep";
97             return false;
98         }
99     }
100 
101     if (mIntr < 0) {
102         mIntr.reset(TEMP_FAILURE_RETRY(open(ptp ? FFS_PTP_EP_INTR : FFS_MTP_EP_INTR, O_RDWR)));
103         if (mIntr < 0) {
104             PLOG(ERROR) << (ptp ? FFS_PTP_EP_INTR : FFS_MTP_EP_INTR) << ": cannot open intr ep";
105             return false;
106         }
107     }
108     return true;
109 }
110 
advise(int fd)111 void MtpFfsHandle::advise(int fd) {
112     for (unsigned i = 0; i < NUM_IO_BUFS; i++) {
113         if (posix_madvise(mIobuf[i].bufs.data(), MAX_FILE_CHUNK_SIZE,
114                 POSIX_MADV_SEQUENTIAL | POSIX_MADV_WILLNEED) < 0)
115             PLOG(ERROR) << "Failed to madvise";
116     }
117     if (posix_fadvise(fd, 0, 0,
118                 POSIX_FADV_SEQUENTIAL | POSIX_FADV_NOREUSE | POSIX_FADV_WILLNEED) < 0)
119         PLOG(ERROR) << "Failed to fadvise";
120 }
121 
writeDescriptors(bool ptp)122 bool MtpFfsHandle::writeDescriptors(bool ptp) {
123     return ::android::writeDescriptors(mControl, ptp);
124 }
125 
closeConfig()126 void MtpFfsHandle::closeConfig() {
127     mControl.reset();
128 }
129 
doAsync(void * data,size_t len,bool read,bool zero_packet)130 int MtpFfsHandle::doAsync(void* data, size_t len, bool read, bool zero_packet) {
131     struct io_event ioevs[AIO_BUFS_MAX];
132     size_t total = 0;
133 
134     while (total < len) {
135         size_t this_len = std::min(len - total, static_cast<size_t>(AIO_BUF_LEN * AIO_BUFS_MAX));
136         int num_bufs = this_len / AIO_BUF_LEN + (this_len % AIO_BUF_LEN == 0 ? 0 : 1);
137         for (int i = 0; i < num_bufs; i++) {
138             mIobuf[0].buf[i] = reinterpret_cast<unsigned char*>(data) + total + i * AIO_BUF_LEN;
139         }
140         int ret = iobufSubmit(&mIobuf[0], read ? mBulkOut : mBulkIn, this_len, read);
141         if (ret < 0) return -1;
142         ret = waitEvents(&mIobuf[0], ret, ioevs, nullptr);
143         if (ret < 0) return -1;
144         total += ret;
145         if (static_cast<size_t>(ret) < this_len) break;
146     }
147 
148     int packet_size = getPacketSize(read ? mBulkOut : mBulkIn);
149     if (len % packet_size == 0 && zero_packet) {
150         int ret = iobufSubmit(&mIobuf[0], read ? mBulkOut : mBulkIn, 0, read);
151         if (ret < 0) return -1;
152         ret = waitEvents(&mIobuf[0], ret, ioevs, nullptr);
153         if (ret < 0) return -1;
154     }
155 
156     for (unsigned i = 0; i < AIO_BUFS_MAX; i++) {
157         mIobuf[0].buf[i] = mIobuf[0].bufs.data() + i * AIO_BUF_LEN;
158     }
159     return total;
160 }
161 
read(void * data,size_t len)162 int MtpFfsHandle::read(void* data, size_t len) {
163     // Zero packets are handled by receiveFile()
164     return doAsync(data, len, true, false);
165 }
166 
write(const void * data,size_t len)167 int MtpFfsHandle::write(const void* data, size_t len) {
168     return doAsync(const_cast<void*>(data), len, false, true);
169 }
170 
handleEvent()171 int MtpFfsHandle::handleEvent() {
172 
173     std::vector<usb_functionfs_event> events(FFS_NUM_EVENTS);
174     usb_functionfs_event *event = events.data();
175     int nbytes = TEMP_FAILURE_RETRY(::read(mControl, event,
176                 events.size() * sizeof(usb_functionfs_event)));
177     if (nbytes == -1) {
178         return -1;
179     }
180     int ret = 0;
181     for (size_t n = nbytes / sizeof *event; n; --n, ++event) {
182         switch (event->type) {
183         case FUNCTIONFS_BIND:
184         case FUNCTIONFS_ENABLE:
185             ret = 0;
186             errno = 0;
187             break;
188         case FUNCTIONFS_UNBIND:
189         case FUNCTIONFS_DISABLE:
190             errno = ESHUTDOWN;
191             ret = -1;
192             break;
193         case FUNCTIONFS_SETUP:
194             if (handleControlRequest(&event->u.setup) == -1)
195                 ret = -1;
196             break;
197         case FUNCTIONFS_SUSPEND:
198         case FUNCTIONFS_RESUME:
199             break;
200         default:
201             LOG(ERROR) << "Mtp Event " << event->type << " (unknown)";
202         }
203     }
204     return ret;
205 }
206 
handleControlRequest(const struct usb_ctrlrequest * setup)207 int MtpFfsHandle::handleControlRequest(const struct usb_ctrlrequest *setup) {
208     uint8_t type = setup->bRequestType;
209     uint8_t code = setup->bRequest;
210     uint16_t length = setup->wLength;
211     uint16_t index = setup->wIndex;
212     uint16_t value = setup->wValue;
213     std::vector<char> buf;
214     buf.resize(length);
215     int ret = 0;
216 
217     if (!(type & USB_DIR_IN)) {
218         if (::read(mControl, buf.data(), length) != length) {
219             PLOG(ERROR) << "Mtp error ctrlreq read data";
220         }
221     }
222 
223     if ((type & USB_TYPE_MASK) == USB_TYPE_CLASS && index == 0 && value == 0) {
224         switch(code) {
225         case MTP_REQ_RESET:
226         case MTP_REQ_CANCEL:
227             errno = ECANCELED;
228             ret = -1;
229             break;
230         case MTP_REQ_GET_DEVICE_STATUS:
231         {
232             if (length < sizeof(struct mtp_device_status) + 4) {
233                 errno = EINVAL;
234                 return -1;
235             }
236             struct mtp_device_status *st = reinterpret_cast<struct mtp_device_status*>(buf.data());
237             st->wLength = htole16(sizeof(st));
238             if (mCanceled) {
239                 st->wLength += 4;
240                 st->wCode = MTP_RESPONSE_TRANSACTION_CANCELLED;
241                 uint16_t *endpoints = reinterpret_cast<uint16_t*>(st + 1);
242                 endpoints[0] = ioctl(mBulkIn, FUNCTIONFS_ENDPOINT_REVMAP);
243                 endpoints[1] = ioctl(mBulkOut, FUNCTIONFS_ENDPOINT_REVMAP);
244                 mCanceled = false;
245             } else {
246                 st->wCode = MTP_RESPONSE_OK;
247             }
248             length = st->wLength;
249             break;
250         }
251         default:
252             LOG(ERROR) << "Unrecognized Mtp class request! " << code;
253         }
254     } else {
255         LOG(ERROR) << "Unrecognized request type " << type;
256     }
257 
258     if (type & USB_DIR_IN) {
259         if (::write(mControl, buf.data(), length) != length) {
260             PLOG(ERROR) << "Mtp error ctrlreq write data";
261         }
262     }
263     return 0;
264 }
265 
start(bool ptp)266 int MtpFfsHandle::start(bool ptp) {
267     if (!openEndpoints(ptp))
268         return -1;
269 
270     for (unsigned i = 0; i < NUM_IO_BUFS; i++) {
271         mIobuf[i].bufs.resize(MAX_FILE_CHUNK_SIZE);
272         mIobuf[i].iocb.resize(AIO_BUFS_MAX);
273         mIobuf[i].iocbs.resize(AIO_BUFS_MAX);
274         mIobuf[i].buf.resize(AIO_BUFS_MAX);
275         for (unsigned j = 0; j < AIO_BUFS_MAX; j++) {
276             mIobuf[i].buf[j] = mIobuf[i].bufs.data() + j * AIO_BUF_LEN;
277             mIobuf[i].iocb[j] = &mIobuf[i].iocbs[j];
278         }
279     }
280 
281     memset(&mCtx, 0, sizeof(mCtx));
282     if (io_setup(AIO_BUFS_MAX, &mCtx) < 0) {
283         PLOG(ERROR) << "unable to setup aio";
284         return -1;
285     }
286     mEventFd.reset(eventfd(0, EFD_NONBLOCK));
287     mPollFds[0].fd = mControl;
288     mPollFds[0].events = POLLIN;
289     mPollFds[1].fd = mEventFd;
290     mPollFds[1].events = POLLIN;
291 
292     mCanceled = false;
293     return 0;
294 }
295 
close()296 void MtpFfsHandle::close() {
297     io_destroy(mCtx);
298     closeEndpoints();
299     closeConfig();
300 }
301 
waitEvents(struct io_buffer * buf,int min_events,struct io_event * events,int * counter)302 int MtpFfsHandle::waitEvents(struct io_buffer *buf, int min_events, struct io_event *events,
303         int *counter) {
304     int num_events = 0;
305     int ret = 0;
306     int error = 0;
307 
308     while (num_events < min_events) {
309         if (poll(mPollFds, 2, 0) == -1) {
310             PLOG(ERROR) << "Mtp error during poll()";
311             return -1;
312         }
313         if (mPollFds[0].revents & POLLIN) {
314             mPollFds[0].revents = 0;
315             if (handleEvent() == -1) {
316                 error = errno;
317             }
318         }
319         if (mPollFds[1].revents & POLLIN) {
320             mPollFds[1].revents = 0;
321             uint64_t ev_cnt = 0;
322 
323             if (::read(mEventFd, &ev_cnt, sizeof(ev_cnt)) == -1) {
324                 PLOG(ERROR) << "Mtp unable to read eventfd";
325                 error = errno;
326                 continue;
327             }
328 
329             // It's possible that io_getevents will return more events than the eventFd reported,
330             // since events may appear in the time between the calls. In this case, the eventFd will
331             // show up as readable next iteration, but there will be fewer or no events to actually
332             // wait for. Thus we never want io_getevents to block.
333             int this_events = TEMP_FAILURE_RETRY(io_getevents(mCtx, 0, AIO_BUFS_MAX, events, &ZERO_TIMEOUT));
334             if (this_events == -1) {
335                 PLOG(ERROR) << "Mtp error getting events";
336                 error = errno;
337             }
338             // Add up the total amount of data and find errors on the way.
339             for (unsigned j = 0; j < static_cast<unsigned>(this_events); j++) {
340                 if (events[j].res < 0) {
341                     errno = -events[j].res;
342                     PLOG(ERROR) << "Mtp got error event at " << j << " and " << buf->actual << " total";
343                     error = errno;
344                 }
345                 ret += events[j].res;
346             }
347             num_events += this_events;
348             if (counter)
349                 *counter += this_events;
350         }
351         if (error) {
352             errno = error;
353             ret = -1;
354             break;
355         }
356     }
357     return ret;
358 }
359 
cancelTransaction()360 void MtpFfsHandle::cancelTransaction() {
361     // Device cancels by stalling both bulk endpoints.
362     if (::read(mBulkIn, nullptr, 0) != -1 || errno != EBADMSG)
363         PLOG(ERROR) << "Mtp stall failed on bulk in";
364     if (::write(mBulkOut, nullptr, 0) != -1 || errno != EBADMSG)
365         PLOG(ERROR) << "Mtp stall failed on bulk out";
366     mCanceled = true;
367     errno = ECANCELED;
368 }
369 
cancelEvents(struct iocb ** iocb,struct io_event * events,unsigned start,unsigned end)370 int MtpFfsHandle::cancelEvents(struct iocb **iocb, struct io_event *events, unsigned start,
371         unsigned end) {
372     // Some manpages for io_cancel are out of date and incorrect.
373     // io_cancel will return -EINPROGRESS on success and does
374     // not place the event in the given memory. We have to use
375     // io_getevents to wait for all the events we cancelled.
376     int ret = 0;
377     unsigned num_events = 0;
378     int save_errno = errno;
379     errno = 0;
380 
381     for (unsigned j = start; j < end; j++) {
382         if (io_cancel(mCtx, iocb[j], nullptr) != -1 || errno != EINPROGRESS) {
383             PLOG(ERROR) << "Mtp couldn't cancel request " << j;
384         } else {
385             num_events++;
386         }
387     }
388     if (num_events != end - start) {
389         ret = -1;
390         errno = EIO;
391     }
392     int evs = TEMP_FAILURE_RETRY(io_getevents(mCtx, num_events, AIO_BUFS_MAX, events, nullptr));
393     if (static_cast<unsigned>(evs) != num_events) {
394         PLOG(ERROR) << "Mtp couldn't cancel all requests, got " << evs;
395         ret = -1;
396     }
397 
398     uint64_t ev_cnt = 0;
399     if (num_events && ::read(mEventFd, &ev_cnt, sizeof(ev_cnt)) == -1)
400         PLOG(ERROR) << "Mtp Unable to read event fd";
401 
402     if (ret == 0) {
403         // Restore errno since it probably got overriden with EINPROGRESS.
404         errno = save_errno;
405     }
406     return ret;
407 }
408 
iobufSubmit(struct io_buffer * buf,int fd,unsigned length,bool read)409 int MtpFfsHandle::iobufSubmit(struct io_buffer *buf, int fd, unsigned length, bool read) {
410     int ret = 0;
411     buf->actual = AIO_BUFS_MAX;
412     for (unsigned j = 0; j < AIO_BUFS_MAX; j++) {
413         unsigned rq_length = std::min(AIO_BUF_LEN, length - AIO_BUF_LEN * j);
414         io_prep(buf->iocb[j], fd, buf->buf[j], rq_length, 0, read);
415         buf->iocb[j]->aio_flags |= IOCB_FLAG_RESFD;
416         buf->iocb[j]->aio_resfd = mEventFd;
417 
418         // Not enough data, so table is truncated.
419         if (rq_length < AIO_BUF_LEN || length == AIO_BUF_LEN * (j + 1)) {
420             buf->actual = j + 1;
421             break;
422         }
423     }
424 
425     ret = io_submit(mCtx, buf->actual, buf->iocb.data());
426     if (ret != static_cast<int>(buf->actual)) {
427         PLOG(ERROR) << "Mtp io_submit got " << ret << " expected " << buf->actual;
428         if (ret != -1) {
429             errno = EIO;
430         }
431         ret = -1;
432     }
433     return ret;
434 }
435 
receiveFile(mtp_file_range mfr,bool zero_packet)436 int MtpFfsHandle::receiveFile(mtp_file_range mfr, bool zero_packet) {
437     // When receiving files, the incoming length is given in 32 bits.
438     // A >=4G file is given as 0xFFFFFFFF
439     uint32_t file_length = mfr.length;
440     uint64_t offset = mfr.offset;
441 
442     struct aiocb aio;
443     aio.aio_fildes = mfr.fd;
444     aio.aio_buf = nullptr;
445     struct aiocb *aiol[] = {&aio};
446 
447     int ret = -1;
448     unsigned i = 0;
449     size_t length;
450     struct io_event ioevs[AIO_BUFS_MAX];
451     bool has_write = false;
452     bool error = false;
453     bool write_error = false;
454     int packet_size = getPacketSize(mBulkOut);
455     bool short_packet = false;
456     advise(mfr.fd);
457 
458     // Break down the file into pieces that fit in buffers
459     while (file_length > 0 || has_write) {
460         // Queue an asynchronous read from USB.
461         if (file_length > 0) {
462             length = std::min(static_cast<uint32_t>(MAX_FILE_CHUNK_SIZE), file_length);
463             if (iobufSubmit(&mIobuf[i], mBulkOut, length, true) == -1)
464                 error = true;
465         }
466 
467         // Get the return status of the last write request.
468         if (has_write) {
469             aio_suspend(aiol, 1, nullptr);
470             int written = aio_return(&aio);
471             if (static_cast<size_t>(written) < aio.aio_nbytes) {
472                 errno = written == -1 ? aio_error(&aio) : EIO;
473                 PLOG(ERROR) << "Mtp error writing to disk";
474                 write_error = true;
475             }
476             has_write = false;
477         }
478 
479         if (error) {
480             return -1;
481         }
482 
483         // Get the result of the read request, and queue a write to disk.
484         if (file_length > 0) {
485             unsigned num_events = 0;
486             ret = 0;
487             unsigned short_i = mIobuf[i].actual;
488             while (num_events < short_i) {
489                 // Get all events up to the short read, if there is one.
490                 // We must wait for each event since data transfer could end at any time.
491                 int this_events = 0;
492                 int event_ret = waitEvents(&mIobuf[i], 1, ioevs, &this_events);
493                 num_events += this_events;
494 
495                 if (event_ret == -1) {
496                     cancelEvents(mIobuf[i].iocb.data(), ioevs, num_events, mIobuf[i].actual);
497                     return -1;
498                 }
499                 ret += event_ret;
500                 for (int j = 0; j < this_events; j++) {
501                     // struct io_event contains a pointer to the associated struct iocb as a __u64.
502                     if (static_cast<__u64>(ioevs[j].res) <
503                             reinterpret_cast<struct iocb*>(ioevs[j].obj)->aio_nbytes) {
504                         // We've found a short event. Store the index since
505                         // events won't necessarily arrive in the order they are queued.
506                         short_i = (ioevs[j].obj - reinterpret_cast<uint64_t>(mIobuf[i].iocbs.data()))
507                             / sizeof(struct iocb) + 1;
508                         short_packet = true;
509                     }
510                 }
511             }
512             if (short_packet) {
513                 if (cancelEvents(mIobuf[i].iocb.data(), ioevs, short_i, mIobuf[i].actual)) {
514                     write_error = true;
515                 }
516             }
517             if (file_length == MAX_MTP_FILE_SIZE) {
518                 // For larger files, receive until a short packet is received.
519                 if (static_cast<size_t>(ret) < length) {
520                     file_length = 0;
521                 }
522             } else if (ret < static_cast<int>(length)) {
523                 // If file is less than 4G and we get a short packet, it's an error.
524                 errno = EIO;
525                 LOG(ERROR) << "Mtp got unexpected short packet";
526                 return -1;
527             } else {
528                 file_length -= ret;
529             }
530 
531             if (write_error) {
532                 cancelTransaction();
533                 return -1;
534             }
535 
536             // Enqueue a new write request
537             aio_prepare(&aio, mIobuf[i].bufs.data(), ret, offset);
538             aio_write(&aio);
539 
540             offset += ret;
541             i = (i + 1) % NUM_IO_BUFS;
542             has_write = true;
543         }
544     }
545     if ((ret % packet_size == 0 && !short_packet) || zero_packet) {
546         // Receive an empty packet if size is a multiple of the endpoint size
547         // and we didn't already get an empty packet from the header or large file.
548         if (read(mIobuf[0].bufs.data(), packet_size) != 0) {
549             return -1;
550         }
551     }
552     return 0;
553 }
554 
sendFile(mtp_file_range mfr)555 int MtpFfsHandle::sendFile(mtp_file_range mfr) {
556     uint64_t file_length = mfr.length;
557     uint32_t given_length = std::min(static_cast<uint64_t>(MAX_MTP_FILE_SIZE),
558             file_length + sizeof(mtp_data_header));
559     uint64_t offset = mfr.offset;
560     int packet_size = getPacketSize(mBulkIn);
561 
562     // If file_length is larger than a size_t, truncating would produce the wrong comparison.
563     // Instead, promote the left side to 64 bits, then truncate the small result.
564     int init_read_len = std::min(
565             static_cast<uint64_t>(packet_size - sizeof(mtp_data_header)), file_length);
566 
567     advise(mfr.fd);
568 
569     struct aiocb aio;
570     aio.aio_fildes = mfr.fd;
571     struct aiocb *aiol[] = {&aio};
572     int ret = 0;
573     int length, num_read;
574     unsigned i = 0;
575     struct io_event ioevs[AIO_BUFS_MAX];
576     bool error = false;
577     bool has_write = false;
578 
579     // Send the header data
580     mtp_data_header *header = reinterpret_cast<mtp_data_header*>(mIobuf[0].bufs.data());
581     header->length = htole32(given_length);
582     header->type = htole16(2); // data packet
583     header->command = htole16(mfr.command);
584     header->transaction_id = htole32(mfr.transaction_id);
585 
586     // Some hosts don't support header/data separation even though MTP allows it
587     // Handle by filling first packet with initial file data
588     if (TEMP_FAILURE_RETRY(pread(mfr.fd, mIobuf[0].bufs.data() +
589                     sizeof(mtp_data_header), init_read_len, offset))
590             != init_read_len) return -1;
591     if (doAsync(mIobuf[0].bufs.data(), sizeof(mtp_data_header) + init_read_len,
592                 false, false /* zlps are handled below */) == -1)
593         return -1;
594     file_length -= init_read_len;
595     offset += init_read_len;
596     ret = init_read_len + sizeof(mtp_data_header);
597 
598     // Break down the file into pieces that fit in buffers
599     while(file_length > 0 || has_write) {
600         if (file_length > 0) {
601             // Queue up a read from disk.
602             length = std::min(static_cast<uint64_t>(MAX_FILE_CHUNK_SIZE), file_length);
603             aio_prepare(&aio, mIobuf[i].bufs.data(), length, offset);
604             aio_read(&aio);
605         }
606 
607         if (has_write) {
608             // Wait for usb write. Cancel unwritten portion if there's an error.
609             int num_events = 0;
610             if (waitEvents(&mIobuf[(i-1)%NUM_IO_BUFS], mIobuf[(i-1)%NUM_IO_BUFS].actual, ioevs,
611                         &num_events) != ret) {
612                 error = true;
613                 cancelEvents(mIobuf[(i-1)%NUM_IO_BUFS].iocb.data(), ioevs, num_events,
614                         mIobuf[(i-1)%NUM_IO_BUFS].actual);
615             }
616             has_write = false;
617         }
618 
619         if (file_length > 0) {
620             // Wait for the previous read to finish
621             aio_suspend(aiol, 1, nullptr);
622             num_read = aio_return(&aio);
623             if (static_cast<size_t>(num_read) < aio.aio_nbytes) {
624                 errno = num_read == -1 ? aio_error(&aio) : EIO;
625                 PLOG(ERROR) << "Mtp error reading from disk";
626                 cancelTransaction();
627                 return -1;
628             }
629 
630             file_length -= num_read;
631             offset += num_read;
632 
633             if (error) {
634                 return -1;
635             }
636 
637             // Queue up a write to usb.
638             if (iobufSubmit(&mIobuf[i], mBulkIn, num_read, false) == -1) {
639                 return -1;
640             }
641             has_write = true;
642             ret = num_read;
643         }
644 
645         i = (i + 1) % NUM_IO_BUFS;
646     }
647 
648     if (ret % packet_size == 0) {
649         // If the last packet wasn't short, send a final empty packet
650         if (write(mIobuf[0].bufs.data(), 0) != 0) {
651             return -1;
652         }
653     }
654     return 0;
655 }
656 
sendEvent(mtp_event me)657 int MtpFfsHandle::sendEvent(mtp_event me) {
658     // Mimic the behavior of f_mtp by sending the event async.
659     // Events aren't critical to the connection, so we don't need to check the return value.
660     char *temp = new char[me.length];
661     memcpy(temp, me.data, me.length);
662     me.data = temp;
663     std::thread t([this, me]() { return this->doSendEvent(me); });
664     t.detach();
665     return 0;
666 }
667 
doSendEvent(mtp_event me)668 void MtpFfsHandle::doSendEvent(mtp_event me) {
669     unsigned length = me.length;
670     int ret = ::write(mIntr, me.data, length);
671     if (static_cast<unsigned>(ret) != length)
672         PLOG(ERROR) << "Mtp error sending event thread!";
673     delete[] reinterpret_cast<char*>(me.data);
674 }
675 
676 } // namespace android
677 
678