1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <errno.h>
18 #include <heap.h>
19 #include <string.h>
20
21 #include <cpu.h>
22 #include <spi.h>
23 #include <spi_priv.h>
24 #include <timer.h>
25
26 #define INFO_PRINT(fmt, ...) do { \
27 osLog(LOG_INFO, "%s " fmt, "[spi]", ##__VA_ARGS__); \
28 } while (0);
29
30 #define ERROR_PRINT(fmt, ...) do { \
31 osLog(LOG_ERROR, "%s " fmt, "[spi] ERROR:", ##__VA_ARGS__); \
32 } while (0);
33
34 struct SpiDeviceState {
35 struct SpiDevice dev;
36
37 const struct SpiPacket *packets;
38 size_t n;
39 size_t currentBuf;
40 struct SpiMode mode;
41
42 SpiCbkF rxTxCallback;
43 void *rxTxCookie;
44
45 SpiCbkF finishCallback;
46 void *finishCookie;
47
48 int err;
49 };
50 #define SPI_DEVICE_TO_STATE(p) ((struct SpiDeviceState *)p)
51
52 static void spiMasterNext(struct SpiDeviceState *state);
53 static void spiMasterStop(struct SpiDeviceState *state);
54 static void spiMasterDone(struct SpiDeviceState *state, int err);
55
56 static void spiSlaveNext(struct SpiDeviceState *state);
57 static void spiSlaveIdle(struct SpiDeviceState *state, int err);
58 static void spiSlaveDone(struct SpiDeviceState *state);
59
spiMasterStart(struct SpiDeviceState * state,spi_cs_t cs,const struct SpiMode * mode)60 static int spiMasterStart(struct SpiDeviceState *state,
61 spi_cs_t cs, const struct SpiMode *mode)
62 {
63 struct SpiDevice *dev = &state->dev;
64
65 if (dev->ops->masterStartAsync)
66 return dev->ops->masterStartAsync(dev, cs, mode);
67
68 if (dev->ops->masterStartSync) {
69 int err = dev->ops->masterStartSync(dev, cs, mode);
70 if (err < 0)
71 return err;
72 }
73
74 return dev->ops->masterRxTx(dev, state->packets[0].rxBuf,
75 state->packets[0].txBuf, state->packets[0].size, mode);
76 }
77
spi_masterStartAsync_done(struct SpiDevice * dev,int err)78 void spi_masterStartAsync_done(struct SpiDevice *dev, int err)
79 {
80 struct SpiDeviceState *state = SPI_DEVICE_TO_STATE(dev);
81 if (err)
82 spiMasterDone(state, err);
83 else
84 spiMasterNext(state);
85 }
86
spiDelayCallback(uint32_t timerId,void * data)87 static void spiDelayCallback(uint32_t timerId, void *data)
88 {
89 spiMasterNext((struct SpiDeviceState *)data);
90 }
91
spiMasterNext(struct SpiDeviceState * state)92 static void spiMasterNext(struct SpiDeviceState *state)
93 {
94 struct SpiDevice *dev = &state->dev;
95
96 if (state->currentBuf == state->n) {
97 spiMasterStop(state);
98 return;
99 }
100
101 size_t i = state->currentBuf;
102 void *rxBuf = state->packets[i].rxBuf;
103 const void *txBuf = state->packets[i].txBuf;
104 size_t size = state->packets[i].size;
105 const struct SpiMode *mode = &state->mode;
106
107 int err = dev->ops->masterRxTx(dev, rxBuf, txBuf, size, mode);
108 if (err)
109 spiMasterDone(state, err);
110 }
111
spiMasterRxTxDone(struct SpiDevice * dev,int err)112 void spiMasterRxTxDone(struct SpiDevice *dev, int err)
113 {
114 struct SpiDeviceState *state = SPI_DEVICE_TO_STATE(dev);
115 if (err) {
116 spiMasterDone(state, err);
117 } else {
118 size_t i = state->currentBuf++;
119
120 if (state->packets[i].delay > 0) {
121 if (!timTimerSet(state->packets[i].delay, 0, 50, spiDelayCallback, state, true)) {
122 ERROR_PRINT("Cannot do delayed spi, timer depleted\n");
123 spiMasterDone(state, -ENOMEM); // should be out of timer; out of mem is close enough
124 }
125 } else {
126 spiMasterNext(state);
127 }
128 }
129 }
130
spiMasterStop(struct SpiDeviceState * state)131 static void spiMasterStop(struct SpiDeviceState *state)
132 {
133 struct SpiDevice *dev = &state->dev;
134
135 if (dev->ops->masterStopSync) {
136 int err = dev->ops->masterStopSync(dev);
137 spiMasterDone(state, err);
138 } else if (dev->ops->masterStopAsync) {
139 int err = dev->ops->masterStopAsync(dev);
140 if (err < 0)
141 spiMasterDone(state, err);
142 } else {
143 spiMasterDone(state, 0);
144 }
145 }
146
spiMasterStopAsyncDone(struct SpiDevice * dev,int err)147 void spiMasterStopAsyncDone(struct SpiDevice *dev, int err)
148 {
149 struct SpiDeviceState *state = SPI_DEVICE_TO_STATE(dev);
150 spiMasterDone(state, err);
151 }
152
spiMasterDone(struct SpiDeviceState * state,int err)153 static void spiMasterDone(struct SpiDeviceState *state, int err)
154 {
155 SpiCbkF callback = state->rxTxCallback;
156 void *cookie = state->rxTxCookie;
157
158 callback(cookie, err);
159 }
160
spiSlaveStart(struct SpiDeviceState * state,const struct SpiMode * mode)161 static int spiSlaveStart(struct SpiDeviceState *state,
162 const struct SpiMode *mode)
163 {
164 struct SpiDevice *dev = &state->dev;
165
166 if (dev->ops->slaveStartAsync)
167 return dev->ops->slaveStartAsync(dev, mode);
168
169 if (dev->ops->slaveStartSync) {
170 int err = dev->ops->slaveStartSync(dev, mode);
171 if (err < 0)
172 return err;
173 }
174
175 return dev->ops->slaveIdle(dev, mode);
176 }
177
spiSlaveStartAsyncDone(struct SpiDevice * dev,int err)178 void spiSlaveStartAsyncDone(struct SpiDevice *dev, int err)
179 {
180 struct SpiDeviceState *state = SPI_DEVICE_TO_STATE(dev);
181
182 if (err)
183 state->err = err;
184 else
185 state->err = dev->ops->slaveIdle(dev, &state->mode);
186 }
187
spiSlaveRxTxDone(struct SpiDevice * dev,int err)188 void spiSlaveRxTxDone(struct SpiDevice *dev, int err)
189 {
190 struct SpiDeviceState *state = SPI_DEVICE_TO_STATE(dev);
191
192 if (err) {
193 spiSlaveIdle(state, err);
194 } else {
195 state->currentBuf++;
196 spiSlaveNext(state);
197 }
198 }
199
spiSlaveCsInactive(struct SpiDevice * dev)200 void spiSlaveCsInactive(struct SpiDevice *dev)
201 {
202 struct SpiDeviceState *state = SPI_DEVICE_TO_STATE(dev);
203
204 dev->ops->slaveSetCsInterrupt(dev, false);
205
206 if (!state->finishCallback) {
207 osLog(LOG_WARN, "%s called without callback\n", __func__);
208 return;
209 }
210
211 SpiCbkF callback = state->finishCallback;
212 void *cookie = state->finishCookie;
213 state->finishCallback = NULL;
214 state->finishCookie = NULL;
215
216 callback(cookie, 0);
217 }
218
spiSlaveNext(struct SpiDeviceState * state)219 static void spiSlaveNext(struct SpiDeviceState *state)
220 {
221 struct SpiDevice *dev = &state->dev;
222
223 if (state->currentBuf == state->n) {
224 spiSlaveIdle(state, 0);
225 return;
226 }
227
228 size_t i = state->currentBuf;
229 void *rxBuf = state->packets[i].rxBuf;
230 const void *txBuf = state->packets[i].txBuf;
231 size_t size = state->packets[i].size;
232 const struct SpiMode *mode = &state->mode;
233
234 int err = dev->ops->slaveRxTx(dev, rxBuf, txBuf, size, mode);
235 if (err)
236 spiSlaveIdle(state, err);
237 }
238
spiSlaveIdle(struct SpiDeviceState * state,int err)239 static void spiSlaveIdle(struct SpiDeviceState *state, int err)
240 {
241 struct SpiDevice *dev = &state->dev;
242 SpiCbkF callback = state->rxTxCallback;
243 void *cookie = state->rxTxCookie;
244
245 if (!err)
246 err = dev->ops->slaveIdle(dev, &state->mode);
247
248 callback(cookie, err);
249 }
250
spiSlaveStopAsyncDone(struct SpiDevice * dev,int err)251 void spiSlaveStopAsyncDone(struct SpiDevice *dev, int err)
252 {
253 struct SpiDeviceState *state = SPI_DEVICE_TO_STATE(dev);
254 spiSlaveDone(state);
255 }
256
spiSlaveDone(struct SpiDeviceState * state)257 static void spiSlaveDone(struct SpiDeviceState *state)
258 {
259 struct SpiDevice *dev = &state->dev;
260
261 if (dev->ops->release)
262 dev->ops->release(dev);
263 heapFree(state);
264 }
265
spiSetupRxTx(struct SpiDeviceState * state,const struct SpiPacket packets[],size_t n,SpiCbkF callback,void * cookie)266 static int spiSetupRxTx(struct SpiDeviceState *state,
267 const struct SpiPacket packets[], size_t n,
268 SpiCbkF callback, void *cookie)
269 {
270 state->packets = packets;
271 state->n = n;
272 state->currentBuf = 0;
273 state->rxTxCallback = callback;
274 state->rxTxCookie = cookie;
275
276 return 0;
277 }
278
spiMasterRequest(uint8_t busId,struct SpiDevice ** dev_out)279 int spiMasterRequest(uint8_t busId, struct SpiDevice **dev_out)
280 {
281 int ret = 0;
282
283 struct SpiDeviceState *state = heapAlloc(sizeof(*state));
284 if (!state)
285 return -ENOMEM;
286 struct SpiDevice *dev = &state->dev;
287
288 ret = spiRequest(dev, busId);
289 if (ret < 0)
290 goto err_request;
291
292 if (!dev->ops->masterRxTx) {
293 ret = -EOPNOTSUPP;
294 goto err_opsupp;
295 }
296
297 *dev_out = dev;
298 return 0;
299
300 err_opsupp:
301 if (dev->ops->release)
302 dev->ops->release(dev);
303 err_request:
304 heapFree(state);
305 return ret;
306 }
307
spiMasterRxTx(struct SpiDevice * dev,spi_cs_t cs,const struct SpiPacket packets[],size_t n,const struct SpiMode * mode,SpiCbkF callback,void * cookie)308 int spiMasterRxTx(struct SpiDevice *dev, spi_cs_t cs,
309 const struct SpiPacket packets[], size_t n,
310 const struct SpiMode *mode, SpiCbkF callback,
311 void *cookie)
312 {
313 struct SpiDeviceState *state = SPI_DEVICE_TO_STATE(dev);
314 int ret = 0;
315
316 if (!n)
317 return -EINVAL;
318
319 ret = spiSetupRxTx(state, packets, n, callback, cookie);
320 if (ret < 0)
321 return ret;
322
323 state->mode = *mode;
324
325 return spiMasterStart(state, cs, mode);
326 }
327
spiMasterRelease(struct SpiDevice * dev)328 int spiMasterRelease(struct SpiDevice *dev)
329 {
330 struct SpiDeviceState *state = SPI_DEVICE_TO_STATE(dev);
331
332 if (dev->ops->release) {
333 int ret = dev->ops->release(dev);
334 if (ret < 0)
335 return ret;
336 }
337
338 heapFree(state);
339 return 0;
340 }
341
spiSlaveRequest(uint8_t busId,const struct SpiMode * mode,struct SpiDevice ** dev_out)342 int spiSlaveRequest(uint8_t busId, const struct SpiMode *mode,
343 struct SpiDevice **dev_out)
344 {
345 int ret = 0;
346
347 struct SpiDeviceState *state = heapAlloc(sizeof(*state));
348 if (!state)
349 return -ENOMEM;
350 struct SpiDevice *dev = &state->dev;
351
352 ret = spiRequest(dev, busId);
353 if (ret < 0)
354 goto err_request;
355
356 if (!dev->ops->slaveIdle || !dev->ops->slaveRxTx) {
357 ret = -EOPNOTSUPP;
358 goto err_opsupp;
359 }
360
361 state->mode = *mode;
362 state->err = 0;
363
364 ret = spiSlaveStart(state, mode);
365 if (ret < 0)
366 goto err_opsupp;
367
368 *dev_out = dev;
369 return 0;
370
371 err_opsupp:
372 if (dev->ops->release)
373 dev->ops->release(dev);
374 err_request:
375 heapFree(state);
376 return ret;
377 }
378
spiSlaveRxTx(struct SpiDevice * dev,const struct SpiPacket packets[],size_t n,SpiCbkF callback,void * cookie)379 int spiSlaveRxTx(struct SpiDevice *dev,
380 const struct SpiPacket packets[], size_t n,
381 SpiCbkF callback, void *cookie)
382 {
383 struct SpiDeviceState *state = SPI_DEVICE_TO_STATE(dev);
384
385 if (!n)
386 return -EINVAL;
387
388 if (state->err)
389 return state->err;
390
391 int ret = spiSetupRxTx(state, packets, n, callback, cookie);
392 if (ret < 0)
393 return ret;
394
395 return dev->ops->slaveRxTx(dev, state->packets[0].rxBuf,
396 state->packets[0].txBuf, state->packets[0].size, &state->mode);
397 }
398
spiSlaveWaitForInactive(struct SpiDevice * dev,SpiCbkF callback,void * cookie)399 int spiSlaveWaitForInactive(struct SpiDevice *dev, SpiCbkF callback,
400 void *cookie)
401 {
402 struct SpiDeviceState *state = SPI_DEVICE_TO_STATE(dev);
403
404 if (!dev->ops->slaveSetCsInterrupt || !dev->ops->slaveCsIsActive)
405 return -EOPNOTSUPP;
406
407 state->finishCallback = callback;
408 state->finishCookie = cookie;
409
410 uint64_t flags = cpuIntsOff();
411 dev->ops->slaveSetCsInterrupt(dev, true);
412
413 /* CS may already be inactive before enabling the interrupt. In this case
414 * roll back and fire the callback immediately.
415 *
416 * Interrupts must be off while checking for this. Otherwise there is a
417 * (very unlikely) race where the CS interrupt fires between calling
418 * slaveSetCsInterrupt(true) and the rollback
419 * slaveSetCsInterrupt(false), causing the event to be handled twice.
420 *
421 * Likewise the check must come after enabling the interrupt. Otherwise
422 * there is an (also unlikely) race where CS goes inactive between reading
423 * CS and enabling the interrupt, causing the event to be lost.
424 */
425
426 bool cs = dev->ops->slaveCsIsActive(dev);
427 if (!cs) {
428 dev->ops->slaveSetCsInterrupt(dev, false);
429 cpuIntsRestore(flags);
430
431 state->finishCallback = NULL;
432 state->finishCookie = NULL;
433 callback(cookie, 0);
434 return 0;
435 }
436
437 cpuIntsRestore(flags);
438 return 0;
439 }
440
spiSlaveRelease(struct SpiDevice * dev)441 int spiSlaveRelease(struct SpiDevice *dev)
442 {
443 struct SpiDeviceState *state = SPI_DEVICE_TO_STATE(dev);
444 int ret;
445
446 if (dev->ops->slaveStopSync) {
447 ret = dev->ops->slaveStopSync(dev);
448 if (ret < 0)
449 return ret;
450 } else if (dev->ops->slaveStopAsync) {
451 return dev->ops->slaveStopAsync(dev);
452 }
453
454 spiSlaveDone(state);
455 return 0;
456 }
457