1 /*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 /* Object implementation */
18
19 #include "sles_allinclusive.h"
20
21
22 // Called by a worker thread to handle an asynchronous Object.Realize.
23 // Parameter self is the Object.
24
HandleRealize(void * self,void * ignored,int unused)25 static void HandleRealize(void *self, void *ignored, int unused)
26 {
27
28 // validate input parameters
29 IObject *thiz = (IObject *) self;
30 assert(NULL != thiz);
31 const ClassTable *clazz = thiz->mClass;
32 assert(NULL != clazz);
33 AsyncHook realize = clazz->mRealize;
34 SLresult result;
35 SLuint8 state;
36
37 // check object state
38 object_lock_exclusive(thiz);
39 state = thiz->mState;
40 switch (state) {
41
42 case SL_OBJECT_STATE_REALIZING_1: // normal case
43 if (NULL != realize) {
44 thiz->mState = SL_OBJECT_STATE_REALIZING_2;
45 // Note that the mutex is locked on entry to and exit from the realize hook,
46 // but the hook is permitted to temporarily unlock the mutex (e.g. for async).
47 result = (*realize)(thiz, SL_BOOLEAN_TRUE);
48 assert(SL_OBJECT_STATE_REALIZING_2 == thiz->mState);
49 state = SL_RESULT_SUCCESS == result ? SL_OBJECT_STATE_REALIZED :
50 SL_OBJECT_STATE_UNREALIZED;
51 } else {
52 result = SL_RESULT_SUCCESS;
53 state = SL_OBJECT_STATE_REALIZED;
54 }
55 break;
56
57 case SL_OBJECT_STATE_REALIZING_1A: // operation was aborted while on work queue
58 result = SL_RESULT_OPERATION_ABORTED;
59 state = SL_OBJECT_STATE_UNREALIZED;
60 break;
61
62 default: // impossible
63 assert(SL_BOOLEAN_FALSE);
64 result = SL_RESULT_INTERNAL_ERROR;
65 break;
66
67 }
68
69 // mutex is locked, update state
70 thiz->mState = state;
71
72 // Make a copy of these, so we can call the callback with mutex unlocked
73 slObjectCallback callback = thiz->mCallback;
74 void *context = thiz->mContext;
75 object_unlock_exclusive(thiz);
76
77 // Note that the mutex is unlocked during the callback
78 if (NULL != callback) {
79 (*callback)(&thiz->mItf, context, SL_OBJECT_EVENT_ASYNC_TERMINATION, result, state, NULL);
80 }
81 }
82
83
IObject_Realize(SLObjectItf self,SLboolean async)84 static SLresult IObject_Realize(SLObjectItf self, SLboolean async)
85 {
86 SL_ENTER_INTERFACE
87
88 IObject *thiz = (IObject *) self;
89 SLuint8 state;
90 const ClassTable *clazz = thiz->mClass;
91 bool isSharedEngine = false;
92 object_lock_exclusive(thiz);
93 // note that SL_OBJECTID_ENGINE and XA_OBJECTID_ENGINE map to same class
94 if (clazz == objectIDtoClass(SL_OBJECTID_ENGINE)) {
95 // important: the lock order is engine followed by theOneTrueMutex
96 int ok = pthread_mutex_lock(&theOneTrueMutex);
97 assert(0 == ok);
98 isSharedEngine = 1 < theOneTrueRefCount;
99 ok = pthread_mutex_unlock(&theOneTrueMutex);
100 assert(0 == ok);
101 }
102 state = thiz->mState;
103 // Reject redundant calls to Realize, except on a shared engine
104 if (SL_OBJECT_STATE_UNREALIZED != state) {
105 object_unlock_exclusive(thiz);
106 // redundant realize on the shared engine is permitted
107 if (isSharedEngine && (SL_OBJECT_STATE_REALIZED == state)) {
108 result = SL_RESULT_SUCCESS;
109 } else {
110 result = SL_RESULT_PRECONDITIONS_VIOLATED;
111 }
112 } else {
113 // Asynchronous: mark operation pending and cancellable
114 if (async && (SL_OBJECTID_ENGINE != clazz->mSLObjectID)) {
115 state = SL_OBJECT_STATE_REALIZING_1;
116 // Synchronous: mark operation pending and non-cancellable
117 } else {
118 state = SL_OBJECT_STATE_REALIZING_2;
119 }
120 thiz->mState = state;
121 switch (state) {
122 case SL_OBJECT_STATE_REALIZING_1: // asynchronous on non-Engine
123 object_unlock_exclusive(thiz);
124 assert(async);
125 result = ThreadPool_add_ppi(&thiz->mEngine->mThreadPool, HandleRealize, thiz, NULL, 0);
126 if (SL_RESULT_SUCCESS != result) {
127 // Engine was destroyed during realize, or insufficient memory
128 object_lock_exclusive(thiz);
129 thiz->mState = SL_OBJECT_STATE_UNREALIZED;
130 object_unlock_exclusive(thiz);
131 }
132 break;
133 case SL_OBJECT_STATE_REALIZING_2: // synchronous, or asynchronous on Engine
134 {
135 AsyncHook realize = clazz->mRealize;
136 // Note that the mutex is locked on entry to and exit from the realize hook,
137 // but the hook is permitted to temporarily unlock the mutex (e.g. for async).
138 result = (NULL != realize) ? (*realize)(thiz, async) : SL_RESULT_SUCCESS;
139 assert(SL_OBJECT_STATE_REALIZING_2 == thiz->mState);
140 state = (SL_RESULT_SUCCESS == result) ? SL_OBJECT_STATE_REALIZED :
141 SL_OBJECT_STATE_UNREALIZED;
142 thiz->mState = state;
143 slObjectCallback callback = thiz->mCallback;
144 void *context = thiz->mContext;
145 object_unlock_exclusive(thiz);
146 // asynchronous Realize on an Engine is actually done synchronously, but still has
147 // callback because there is no thread pool yet to do it asynchronously.
148 if (async && (NULL != callback)) {
149 (*callback)(&thiz->mItf, context, SL_OBJECT_EVENT_ASYNC_TERMINATION, result, state,
150 NULL);
151 }
152 }
153 break;
154 default: // impossible
155 object_unlock_exclusive(thiz);
156 assert(SL_BOOLEAN_FALSE);
157 break;
158 }
159 }
160
161 SL_LEAVE_INTERFACE
162 }
163
164
165 // Called by a worker thread to handle an asynchronous Object.Resume.
166 // Parameter self is the Object.
167
HandleResume(void * self,void * ignored,int unused)168 static void HandleResume(void *self, void *ignored, int unused)
169 {
170
171 // valid input parameters
172 IObject *thiz = (IObject *) self;
173 assert(NULL != thiz);
174 const ClassTable *clazz = thiz->mClass;
175 assert(NULL != clazz);
176 AsyncHook resume = clazz->mResume;
177 SLresult result;
178 SLuint8 state;
179
180 // check object state
181 object_lock_exclusive(thiz);
182 state = thiz->mState;
183 switch (state) {
184
185 case SL_OBJECT_STATE_RESUMING_1: // normal case
186 if (NULL != resume) {
187 thiz->mState = SL_OBJECT_STATE_RESUMING_2;
188 // Note that the mutex is locked on entry to and exit from the resume hook,
189 // but the hook is permitted to temporarily unlock the mutex (e.g. for async).
190 result = (*resume)(thiz, SL_BOOLEAN_TRUE);
191 assert(SL_OBJECT_STATE_RESUMING_2 == thiz->mState);
192 state = SL_RESULT_SUCCESS == result ? SL_OBJECT_STATE_REALIZED :
193 SL_OBJECT_STATE_SUSPENDED;
194 } else {
195 result = SL_RESULT_SUCCESS;
196 state = SL_OBJECT_STATE_REALIZED;
197 }
198 break;
199
200 case SL_OBJECT_STATE_RESUMING_1A: // operation was aborted while on work queue
201 result = SL_RESULT_OPERATION_ABORTED;
202 state = SL_OBJECT_STATE_SUSPENDED;
203 break;
204
205 default: // impossible
206 assert(SL_BOOLEAN_FALSE);
207 result = SL_RESULT_INTERNAL_ERROR;
208 break;
209
210 }
211
212 // mutex is unlocked, update state
213 thiz->mState = state;
214
215 // Make a copy of these, so we can call the callback with mutex unlocked
216 slObjectCallback callback = thiz->mCallback;
217 void *context = thiz->mContext;
218 object_unlock_exclusive(thiz);
219
220 // Note that the mutex is unlocked during the callback
221 if (NULL != callback) {
222 (*callback)(&thiz->mItf, context, SL_OBJECT_EVENT_ASYNC_TERMINATION, result, state, NULL);
223 }
224 }
225
226
IObject_Resume(SLObjectItf self,SLboolean async)227 static SLresult IObject_Resume(SLObjectItf self, SLboolean async)
228 {
229 SL_ENTER_INTERFACE
230
231 IObject *thiz = (IObject *) self;
232 const ClassTable *clazz = thiz->mClass;
233 SLuint8 state;
234 object_lock_exclusive(thiz);
235 state = thiz->mState;
236 // Reject redundant calls to Resume
237 if (SL_OBJECT_STATE_SUSPENDED != state) {
238 object_unlock_exclusive(thiz);
239 result = SL_RESULT_PRECONDITIONS_VIOLATED;
240 } else {
241 // Asynchronous: mark operation pending and cancellable
242 if (async) {
243 state = SL_OBJECT_STATE_RESUMING_1;
244 // Synchronous: mark operatio pending and non-cancellable
245 } else {
246 state = SL_OBJECT_STATE_RESUMING_2;
247 }
248 thiz->mState = state;
249 switch (state) {
250 case SL_OBJECT_STATE_RESUMING_1: // asynchronous
251 object_unlock_exclusive(thiz);
252 assert(async);
253 result = ThreadPool_add_ppi(&thiz->mEngine->mThreadPool, HandleResume, thiz, NULL, 0);
254 if (SL_RESULT_SUCCESS != result) {
255 // Engine was destroyed during resume, or insufficient memory
256 object_lock_exclusive(thiz);
257 thiz->mState = SL_OBJECT_STATE_SUSPENDED;
258 object_unlock_exclusive(thiz);
259 }
260 break;
261 case SL_OBJECT_STATE_RESUMING_2: // synchronous
262 {
263 AsyncHook resume = clazz->mResume;
264 // Note that the mutex is locked on entry to and exit from the resume hook,
265 // but the hook is permitted to temporarily unlock the mutex (e.g. for async).
266 result = (NULL != resume) ? (*resume)(thiz, SL_BOOLEAN_FALSE) : SL_RESULT_SUCCESS;
267 assert(SL_OBJECT_STATE_RESUMING_2 == thiz->mState);
268 thiz->mState = (SL_RESULT_SUCCESS == result) ? SL_OBJECT_STATE_REALIZED :
269 SL_OBJECT_STATE_SUSPENDED;
270 object_unlock_exclusive(thiz);
271 }
272 break;
273 default: // impossible
274 object_unlock_exclusive(thiz);
275 assert(SL_BOOLEAN_FALSE);
276 break;
277 }
278 }
279
280 SL_LEAVE_INTERFACE
281 }
282
283
IObject_GetState(SLObjectItf self,SLuint32 * pState)284 static SLresult IObject_GetState(SLObjectItf self, SLuint32 *pState)
285 {
286 SL_ENTER_INTERFACE
287
288 if (NULL == pState) {
289 result = SL_RESULT_PARAMETER_INVALID;
290 } else {
291 IObject *thiz = (IObject *) self;
292 object_lock_shared(thiz);
293 SLuint8 state = thiz->mState;
294 object_unlock_shared(thiz);
295 // Re-map the realizing, resuming, and suspending states
296 switch (state) {
297 case SL_OBJECT_STATE_REALIZING_1:
298 case SL_OBJECT_STATE_REALIZING_1A:
299 case SL_OBJECT_STATE_REALIZING_2:
300 case SL_OBJECT_STATE_DESTROYING: // application shouldn't call GetState after Destroy
301 state = SL_OBJECT_STATE_UNREALIZED;
302 break;
303 case SL_OBJECT_STATE_RESUMING_1:
304 case SL_OBJECT_STATE_RESUMING_1A:
305 case SL_OBJECT_STATE_RESUMING_2:
306 case SL_OBJECT_STATE_SUSPENDING:
307 state = SL_OBJECT_STATE_SUSPENDED;
308 break;
309 case SL_OBJECT_STATE_UNREALIZED:
310 case SL_OBJECT_STATE_REALIZED:
311 case SL_OBJECT_STATE_SUSPENDED:
312 // These are the "official" object states, return them as is
313 break;
314 default:
315 assert(SL_BOOLEAN_FALSE);
316 break;
317 }
318 *pState = state;
319 result = SL_RESULT_SUCCESS;
320 }
321
322 SL_LEAVE_INTERFACE
323 }
324
IObject_GetInterface(SLObjectItf self,const SLInterfaceID iid,void * pInterface)325 static SLresult IObject_GetInterface(SLObjectItf self, const SLInterfaceID iid, void *pInterface)
326 {
327 SL_ENTER_INTERFACE
328
329 if (NULL == pInterface) {
330 result = SL_RESULT_PARAMETER_INVALID;
331 } else {
332 void *interface = NULL;
333 if (NULL == iid) {
334 result = SL_RESULT_PARAMETER_INVALID;
335 } else {
336 IObject *thiz = (IObject *) self;
337 const ClassTable *clazz = thiz->mClass;
338 int MPH, index;
339 if ((0 > (MPH = IID_to_MPH(iid))) ||
340 // no need to check for an initialization hook
341 // (NULL == MPH_init_table[MPH].mInit) ||
342 (0 > (index = clazz->mMPH_to_index[MPH]))) {
343 result = SL_RESULT_FEATURE_UNSUPPORTED;
344 } else {
345 unsigned mask = 1 << index;
346 object_lock_exclusive(thiz);
347 if ((SL_OBJECT_STATE_REALIZED != thiz->mState) &&
348 !(INTERFACE_PREREALIZE & clazz->mInterfaces[index].mInterface)) {
349 // Can't get interface on an unrealized object unless pre-realize is ok
350 result = SL_RESULT_PRECONDITIONS_VIOLATED;
351 } else if ((MPH_MUTESOLO == MPH) && (SL_OBJECTID_AUDIOPLAYER ==
352 clazz->mSLObjectID) && (1 == ((CAudioPlayer *) thiz)->mNumChannels)) {
353 // Can't get the MuteSolo interface of an audio player if the channel count is
354 // mono, but _can_ get the MuteSolo interface if the channel count is unknown
355 result = SL_RESULT_FEATURE_UNSUPPORTED;
356 } else {
357 switch (thiz->mInterfaceStates[index]) {
358 case INTERFACE_EXPOSED:
359 case INTERFACE_ADDED:
360 interface = (char *) thiz + clazz->mInterfaces[index].mOffset;
361 // Note that interface has been gotten,
362 // for debugger and to detect incorrect use of interfaces
363 if (!(thiz->mGottenMask & mask)) {
364 thiz->mGottenMask |= mask;
365 // This trickery validates the v-table
366 ((size_t *) interface)[0] ^= ~0;
367 }
368 result = SL_RESULT_SUCCESS;
369 break;
370 // Can't get interface if uninitialized, initialized, suspended,
371 // suspending, resuming, adding, or removing
372 default:
373 result = SL_RESULT_FEATURE_UNSUPPORTED;
374 break;
375 }
376 }
377 object_unlock_exclusive(thiz);
378 }
379 }
380 *(void **)pInterface = interface;
381 }
382
383 SL_LEAVE_INTERFACE
384 }
385
386
IObject_RegisterCallback(SLObjectItf self,slObjectCallback callback,void * pContext)387 static SLresult IObject_RegisterCallback(SLObjectItf self,
388 slObjectCallback callback, void *pContext)
389 {
390 SL_ENTER_INTERFACE
391
392 IObject *thiz = (IObject *) self;
393 object_lock_exclusive(thiz);
394 thiz->mCallback = callback;
395 thiz->mContext = pContext;
396 object_unlock_exclusive(thiz);
397 result = SL_RESULT_SUCCESS;
398
399 SL_LEAVE_INTERFACE
400 }
401
402
403 /** \brief This is internal common code for Abort and Destroy.
404 * Note: called with mutex unlocked, and returns with mutex locked.
405 */
406
Abort_internal(IObject * thiz)407 static void Abort_internal(IObject *thiz)
408 {
409 const ClassTable *clazz = thiz->mClass;
410 bool anyAsync = false;
411 object_lock_exclusive(thiz);
412
413 // Abort asynchronous operations on the object
414 switch (thiz->mState) {
415 case SL_OBJECT_STATE_REALIZING_1: // Realize
416 thiz->mState = SL_OBJECT_STATE_REALIZING_1A;
417 anyAsync = true;
418 break;
419 case SL_OBJECT_STATE_RESUMING_1: // Resume
420 thiz->mState = SL_OBJECT_STATE_RESUMING_1A;
421 anyAsync = true;
422 break;
423 case SL_OBJECT_STATE_REALIZING_1A: // Realize
424 case SL_OBJECT_STATE_REALIZING_2:
425 case SL_OBJECT_STATE_RESUMING_1A: // Resume
426 case SL_OBJECT_STATE_RESUMING_2:
427 anyAsync = true;
428 break;
429 case SL_OBJECT_STATE_DESTROYING:
430 assert(false);
431 break;
432 default:
433 break;
434 }
435
436 // Abort asynchronous operations on interfaces
437 SLuint8 *interfaceStateP = thiz->mInterfaceStates;
438 unsigned index;
439 for (index = 0; index < clazz->mInterfaceCount; ++index, ++interfaceStateP) {
440 switch (*interfaceStateP) {
441 case INTERFACE_ADDING_1: // AddInterface
442 *interfaceStateP = INTERFACE_ADDING_1A;
443 anyAsync = true;
444 break;
445 case INTERFACE_RESUMING_1: // ResumeInterface
446 *interfaceStateP = INTERFACE_RESUMING_1A;
447 anyAsync = true;
448 break;
449 case INTERFACE_ADDING_1A: // AddInterface
450 case INTERFACE_ADDING_2:
451 case INTERFACE_RESUMING_1A: // ResumeInterface
452 case INTERFACE_RESUMING_2:
453 case INTERFACE_REMOVING: // not observable: RemoveInterface is synchronous & mutex locked
454 anyAsync = true;
455 break;
456 default:
457 break;
458 }
459 }
460
461 // Wait until all asynchronous operations either complete normally or recognize the abort
462 while (anyAsync) {
463 object_unlock_exclusive(thiz);
464 // FIXME should use condition variable instead of polling
465 usleep(20000);
466 anyAsync = false;
467 object_lock_exclusive(thiz);
468 switch (thiz->mState) {
469 case SL_OBJECT_STATE_REALIZING_1: // state 1 means it cycled during the usleep window
470 case SL_OBJECT_STATE_RESUMING_1:
471 case SL_OBJECT_STATE_REALIZING_1A:
472 case SL_OBJECT_STATE_REALIZING_2:
473 case SL_OBJECT_STATE_RESUMING_1A:
474 case SL_OBJECT_STATE_RESUMING_2:
475 anyAsync = true;
476 break;
477 case SL_OBJECT_STATE_DESTROYING:
478 assert(false);
479 break;
480 default:
481 break;
482 }
483 interfaceStateP = thiz->mInterfaceStates;
484 for (index = 0; index < clazz->mInterfaceCount; ++index, ++interfaceStateP) {
485 switch (*interfaceStateP) {
486 case INTERFACE_ADDING_1: // state 1 means it cycled during the usleep window
487 case INTERFACE_RESUMING_1:
488 case INTERFACE_ADDING_1A:
489 case INTERFACE_ADDING_2:
490 case INTERFACE_RESUMING_1A:
491 case INTERFACE_RESUMING_2:
492 case INTERFACE_REMOVING:
493 anyAsync = true;
494 break;
495 default:
496 break;
497 }
498 }
499 }
500
501 // At this point there are no pending asynchronous operations
502 }
503
504
IObject_AbortAsyncOperation(SLObjectItf self)505 static void IObject_AbortAsyncOperation(SLObjectItf self)
506 {
507 SL_ENTER_INTERFACE_VOID
508
509 IObject *thiz = (IObject *) self;
510 Abort_internal(thiz);
511 object_unlock_exclusive(thiz);
512
513 SL_LEAVE_INTERFACE_VOID
514 }
515
516
IObject_Destroy(SLObjectItf self)517 void IObject_Destroy(SLObjectItf self)
518 {
519 SL_ENTER_INTERFACE_VOID
520
521 IObject *thiz = (IObject *) self;
522 // mutex is unlocked
523 Abort_internal(thiz);
524 // mutex is locked
525 const ClassTable *clazz = thiz->mClass;
526 PreDestroyHook preDestroy = clazz->mPreDestroy;
527 // The pre-destroy hook is called with mutex locked, and should block until it is safe to
528 // destroy. It is OK to unlock the mutex temporarily, as it long as it re-locks the mutex
529 // before returning.
530 if (NULL != preDestroy) {
531 predestroy_t okToDestroy = (*preDestroy)(thiz);
532 switch (okToDestroy) {
533 case predestroy_ok:
534 break;
535 case predestroy_error:
536 SL_LOGE("Object::Destroy(%p) not allowed", thiz);
537 // fall through
538 case predestroy_again:
539 object_unlock_exclusive(thiz);
540 // unfortunately Destroy doesn't return a result
541 SL_LEAVE_INTERFACE_VOID
542 // unreachable
543 default:
544 assert(false);
545 break;
546 }
547 }
548 thiz->mState = SL_OBJECT_STATE_DESTROYING;
549 VoidHook destroy = clazz->mDestroy;
550 // const, no lock needed
551 IEngine *thisEngine = &thiz->mEngine->mEngine;
552 unsigned i = thiz->mInstanceID;
553 assert(MAX_INSTANCE >= i);
554 // avoid a recursive lock on the engine when destroying the engine itself
555 if (thisEngine->mThis != thiz) {
556 interface_lock_exclusive(thisEngine);
557 }
558 // An unpublished object has a slot reserved, but the ID hasn't been chosen yet
559 assert(0 < thisEngine->mInstanceCount);
560 --thisEngine->mInstanceCount;
561 // If object is published, then remove it from exposure to sync thread and debugger
562 if (0 != i) {
563 --i;
564 unsigned mask = 1 << i;
565 assert(thisEngine->mInstanceMask & mask);
566 thisEngine->mInstanceMask &= ~mask;
567 assert(thisEngine->mInstances[i] == thiz);
568 thisEngine->mInstances[i] = NULL;
569 }
570 // avoid a recursive unlock on the engine when destroying the engine itself
571 if (thisEngine->mThis != thiz) {
572 interface_unlock_exclusive(thisEngine);
573 }
574 // The destroy hook is called with mutex locked
575 if (NULL != destroy) {
576 (*destroy)(thiz);
577 }
578 // Call the deinitializer for each currently initialized interface,
579 // whether it is implicit, explicit, optional, or dynamically added.
580 // The deinitializers are called in the reverse order that the
581 // initializers were called, so that IObject_deinit is called last.
582 unsigned index = clazz->mInterfaceCount;
583 const struct iid_vtable *x = &clazz->mInterfaces[index];
584 SLuint8 *interfaceStateP = &thiz->mInterfaceStates[index];
585 for ( ; index > 0; --index) {
586 --x;
587 size_t offset = x->mOffset;
588 void *thisItf = (char *) thiz + offset;
589 SLuint32 state = *--interfaceStateP;
590 switch (state) {
591 case INTERFACE_UNINITIALIZED:
592 break;
593 case INTERFACE_EXPOSED: // quiescent states
594 case INTERFACE_ADDED:
595 case INTERFACE_SUSPENDED:
596 // The remove hook is called with mutex locked
597 {
598 VoidHook remove = MPH_init_table[x->mMPH].mRemove;
599 if (NULL != remove) {
600 (*remove)(thisItf);
601 }
602 *interfaceStateP = INTERFACE_INITIALIZED;
603 }
604 // fall through
605 case INTERFACE_INITIALIZED:
606 {
607 VoidHook deinit = MPH_init_table[x->mMPH].mDeinit;
608 if (NULL != deinit) {
609 (*deinit)(thisItf);
610 }
611 *interfaceStateP = INTERFACE_UNINITIALIZED;
612 }
613 break;
614 case INTERFACE_ADDING_1: // active states indicate incorrect use of API
615 case INTERFACE_ADDING_1A:
616 case INTERFACE_ADDING_2:
617 case INTERFACE_RESUMING_1:
618 case INTERFACE_RESUMING_1A:
619 case INTERFACE_RESUMING_2:
620 case INTERFACE_REMOVING:
621 case INTERFACE_SUSPENDING:
622 SL_LOGE("Object::Destroy(%p) while interface %u active", thiz, index);
623 break;
624 default:
625 assert(SL_BOOLEAN_FALSE);
626 break;
627 }
628 }
629 // The mutex is unlocked and destroyed by IObject_deinit, which is the last deinitializer
630 memset(thiz, 0x55, clazz->mSize); // catch broken applications that continue using interfaces
631 // was ifdef USE_DEBUG but safer to do this unconditionally
632 free(thiz);
633
634 if (SL_OBJECTID_ENGINE == clazz->mSLObjectID) {
635 CEngine_Destroyed((CEngine *) thiz);
636 }
637
638 SL_LEAVE_INTERFACE_VOID
639 }
640
641
IObject_SetPriority(SLObjectItf self,SLint32 priority,SLboolean preemptable)642 static SLresult IObject_SetPriority(SLObjectItf self, SLint32 priority, SLboolean preemptable)
643 {
644 SL_ENTER_INTERFACE
645
646 #if USE_PROFILES & USE_PROFILES_BASE
647 IObject *thiz = (IObject *) self;
648 object_lock_exclusive(thiz);
649 thiz->mPriority = priority;
650 thiz->mPreemptable = SL_BOOLEAN_FALSE != preemptable; // normalize
651 object_unlock_exclusive(thiz);
652 result = SL_RESULT_SUCCESS;
653 #else
654 result = SL_RESULT_FEATURE_UNSUPPORTED;
655 #endif
656
657 SL_LEAVE_INTERFACE
658 }
659
660
IObject_GetPriority(SLObjectItf self,SLint32 * pPriority,SLboolean * pPreemptable)661 static SLresult IObject_GetPriority(SLObjectItf self, SLint32 *pPriority, SLboolean *pPreemptable)
662 {
663 SL_ENTER_INTERFACE
664
665 #if USE_PROFILES & USE_PROFILES_BASE
666 if (NULL == pPriority || NULL == pPreemptable) {
667 result = SL_RESULT_PARAMETER_INVALID;
668 } else {
669 IObject *thiz = (IObject *) self;
670 object_lock_shared(thiz);
671 SLint32 priority = thiz->mPriority;
672 SLboolean preemptable = thiz->mPreemptable;
673 object_unlock_shared(thiz);
674 *pPriority = priority;
675 *pPreemptable = preemptable;
676 result = SL_RESULT_SUCCESS;
677 }
678 #else
679 result = SL_RESULT_FEATURE_UNSUPPORTED;
680 #endif
681
682 SL_LEAVE_INTERFACE
683 }
684
685
IObject_SetLossOfControlInterfaces(SLObjectItf self,SLint16 numInterfaces,SLInterfaceID * pInterfaceIDs,SLboolean enabled)686 static SLresult IObject_SetLossOfControlInterfaces(SLObjectItf self,
687 SLint16 numInterfaces, SLInterfaceID *pInterfaceIDs, SLboolean enabled)
688 {
689 SL_ENTER_INTERFACE
690
691 #if USE_PROFILES & USE_PROFILES_BASE
692 result = SL_RESULT_SUCCESS;
693 if (0 < numInterfaces) {
694 SLuint32 i;
695 if (NULL == pInterfaceIDs) {
696 result = SL_RESULT_PARAMETER_INVALID;
697 } else {
698 IObject *thiz = (IObject *) self;
699 const ClassTable *clazz = thiz->mClass;
700 unsigned lossOfControlMask = 0;
701 // The cast is due to a typo in the spec, bug 6482
702 for (i = 0; i < (SLuint32) numInterfaces; ++i) {
703 SLInterfaceID iid = pInterfaceIDs[i];
704 if (NULL == iid) {
705 result = SL_RESULT_PARAMETER_INVALID;
706 goto out;
707 }
708 int MPH, index;
709 // We ignore without error any invalid MPH or index, but spec is unclear
710 if ((0 <= (MPH = IID_to_MPH(iid))) &&
711 // no need to check for an initialization hook
712 // (NULL == MPH_init_table[MPH].mInit) ||
713 (0 <= (index = clazz->mMPH_to_index[MPH]))) {
714 lossOfControlMask |= (1 << index);
715 }
716 }
717 object_lock_exclusive(thiz);
718 if (enabled) {
719 thiz->mLossOfControlMask |= lossOfControlMask;
720 } else {
721 thiz->mLossOfControlMask &= ~lossOfControlMask;
722 }
723 object_unlock_exclusive(thiz);
724 }
725 }
726 out:
727 #else
728 result = SL_RESULT_FEATURE_UNSUPPORTED;
729 #endif
730
731 SL_LEAVE_INTERFACE
732 }
733
734
735 static const struct SLObjectItf_ IObject_Itf = {
736 IObject_Realize,
737 IObject_Resume,
738 IObject_GetState,
739 IObject_GetInterface,
740 IObject_RegisterCallback,
741 IObject_AbortAsyncOperation,
742 IObject_Destroy,
743 IObject_SetPriority,
744 IObject_GetPriority,
745 IObject_SetLossOfControlInterfaces
746 };
747
748
749 /** \brief This must be the first initializer called for an object */
750
IObject_init(void * self)751 void IObject_init(void *self)
752 {
753 IObject *thiz = (IObject *) self;
754 thiz->mItf = &IObject_Itf;
755 // initialized in construct:
756 // mClass
757 // mInstanceID
758 // mLossOfControlMask
759 // mEngine
760 // mInterfaceStates
761 thiz->mState = SL_OBJECT_STATE_UNREALIZED;
762 thiz->mGottenMask = 1; // IObject
763 thiz->mAttributesMask = 0;
764 thiz->mCallback = NULL;
765 thiz->mContext = NULL;
766 #if USE_PROFILES & USE_PROFILES_BASE
767 thiz->mPriority = SL_PRIORITY_NORMAL;
768 thiz->mPreemptable = SL_BOOLEAN_FALSE;
769 #endif
770 thiz->mStrongRefCount = 0;
771 int ok;
772 ok = pthread_mutex_init(&thiz->mMutex, (const pthread_mutexattr_t *) NULL);
773 assert(0 == ok);
774 #ifdef USE_DEBUG
775 memset(&thiz->mOwner, 0, sizeof(pthread_t));
776 thiz->mFile = NULL;
777 thiz->mLine = 0;
778 thiz->mGeneration = 0;
779 #endif
780 ok = pthread_cond_init(&thiz->mCond, (const pthread_condattr_t *) NULL);
781 assert(0 == ok);
782 }
783
784
785 /** \brief This must be the last deinitializer called for an object */
786
IObject_deinit(void * self)787 void IObject_deinit(void *self)
788 {
789 IObject *thiz = (IObject *) self;
790 #ifdef USE_DEBUG
791 assert(pthread_equal(pthread_self(), thiz->mOwner));
792 #endif
793 int ok;
794 ok = pthread_cond_destroy(&thiz->mCond);
795 assert(0 == ok);
796 // equivalent to object_unlock_exclusive, but without the rigmarole
797 ok = pthread_mutex_unlock(&thiz->mMutex);
798 assert(0 == ok);
799 ok = pthread_mutex_destroy(&thiz->mMutex);
800 assert(0 == ok);
801 // redundant: thiz->mState = SL_OBJECT_STATE_UNREALIZED;
802 }
803
804
805 /** \brief Publish a new object after it is fully initialized.
806 * Publishing will expose the object to sync thread and debugger,
807 * and make it safe to return the SLObjectItf to the application.
808 */
809
IObject_Publish(IObject * thiz)810 void IObject_Publish(IObject *thiz)
811 {
812 IEngine *thisEngine = &thiz->mEngine->mEngine;
813 interface_lock_exclusive(thisEngine);
814 // construct earlier reserved a pending slot, but did not choose the actual slot number
815 unsigned availMask = ~thisEngine->mInstanceMask;
816 assert(availMask);
817 unsigned i = ctz(availMask);
818 assert(MAX_INSTANCE > i);
819 assert(NULL == thisEngine->mInstances[i]);
820 thisEngine->mInstances[i] = thiz;
821 thisEngine->mInstanceMask |= 1 << i;
822 // avoid zero as a valid instance ID
823 thiz->mInstanceID = i + 1;
824 interface_unlock_exclusive(thisEngine);
825 }
826