1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "hwc-platform-nv"
18 
19 #include "drmresources.h"
20 #include "platform.h"
21 #include "platformnv.h"
22 
23 #include <cinttypes>
24 #include <stdatomic.h>
25 #include <xf86drm.h>
26 #include <xf86drmMode.h>
27 
28 #include <cutils/log.h>
29 #include <hardware/gralloc.h>
30 
31 namespace android {
32 
33 #ifdef USE_NVIDIA_IMPORTER
34 // static
CreateInstance(DrmResources * drm)35 Importer *Importer::CreateInstance(DrmResources *drm) {
36   NvImporter *importer = new NvImporter(drm);
37   if (!importer)
38     return NULL;
39 
40   int ret = importer->Init();
41   if (ret) {
42     ALOGE("Failed to initialize the nv importer %d", ret);
43     delete importer;
44     return NULL;
45   }
46   return importer;
47 }
48 #endif
49 
NvImporter(DrmResources * drm)50 NvImporter::NvImporter(DrmResources *drm) : drm_(drm) {
51 }
52 
~NvImporter()53 NvImporter::~NvImporter() {
54 }
55 
Init()56 int NvImporter::Init() {
57   int ret = hw_get_module(GRALLOC_HARDWARE_MODULE_ID,
58                           (const hw_module_t **)&gralloc_);
59   if (ret) {
60     ALOGE("Failed to open gralloc module %d", ret);
61     return ret;
62   }
63 
64   if (strcasecmp(gralloc_->common.author, "NVIDIA"))
65     ALOGW("Using non-NVIDIA gralloc module: %s/%s\n", gralloc_->common.name,
66           gralloc_->common.author);
67 
68   return 0;
69 }
70 
ImportBuffer(buffer_handle_t handle,hwc_drm_bo_t * bo)71 int NvImporter::ImportBuffer(buffer_handle_t handle, hwc_drm_bo_t *bo) {
72   memset(bo, 0, sizeof(hwc_drm_bo_t));
73   NvBuffer_t *buf = GrallocGetNvBuffer(handle);
74   if (buf) {
75     atomic_fetch_add(&buf->ref, 1);
76     *bo = buf->bo;
77     return 0;
78   }
79 
80   buf = new NvBuffer_t();
81   if (!buf) {
82     ALOGE("Failed to allocate new NvBuffer_t");
83     return -ENOMEM;
84   }
85   buf->bo.priv = buf;
86   buf->importer = this;
87 
88   // We initialize the reference count to 2 since NvGralloc is still using this
89   // buffer (will be cleared in the NvGrallocRelease), and the other
90   // reference is for HWC (this ImportBuffer call).
91   atomic_init(&buf->ref, 2);
92 
93   int ret = gralloc_->perform(gralloc_, GRALLOC_MODULE_PERFORM_DRM_IMPORT,
94                               drm_->fd(), handle, &buf->bo);
95   if (ret) {
96     ALOGE("GRALLOC_MODULE_PERFORM_DRM_IMPORT failed %d", ret);
97     delete buf;
98     return ret;
99   }
100 
101   ret = drmModeAddFB2(drm_->fd(), buf->bo.width, buf->bo.height, buf->bo.format,
102                       buf->bo.gem_handles, buf->bo.pitches, buf->bo.offsets,
103                       &buf->bo.fb_id, 0);
104   if (ret) {
105     ALOGE("Failed to add fb %d", ret);
106     ReleaseBufferImpl(&buf->bo);
107     delete buf;
108     return ret;
109   }
110 
111   ret = GrallocSetNvBuffer(handle, buf);
112   if (ret) {
113     /* This will happen is persist.tegra.gpu_mapping_cache is 0/off,
114      * or if NV gralloc runs out of "priv slots" (currently 3 per buffer,
115      * only one of which should be used by drm_hwcomposer). */
116     ALOGE("Failed to register free callback for imported buffer %d", ret);
117     ReleaseBufferImpl(&buf->bo);
118     delete buf;
119     return ret;
120   }
121   *bo = buf->bo;
122   return 0;
123 }
124 
ReleaseBuffer(hwc_drm_bo_t * bo)125 int NvImporter::ReleaseBuffer(hwc_drm_bo_t *bo) {
126   NvBuffer_t *buf = (NvBuffer_t *)bo->priv;
127   if (!buf) {
128     ALOGE("Freeing bo %" PRIu32 ", buf is NULL!", bo->fb_id);
129     return 0;
130   }
131   if (atomic_fetch_sub(&buf->ref, 1) > 1)
132     return 0;
133 
134   ReleaseBufferImpl(bo);
135   delete buf;
136   return 0;
137 }
138 
139 // static
NvGrallocRelease(void * nv_buffer)140 void NvImporter::NvGrallocRelease(void *nv_buffer) {
141   NvBuffer_t *buf = (NvBuffer *)nv_buffer;
142   buf->importer->ReleaseBuffer(&buf->bo);
143 }
144 
ReleaseBufferImpl(hwc_drm_bo_t * bo)145 void NvImporter::ReleaseBufferImpl(hwc_drm_bo_t *bo) {
146   if (bo->fb_id) {
147     int ret = drmModeRmFB(drm_->fd(), bo->fb_id);
148     if (ret)
149       ALOGE("Failed to rm fb %d", ret);
150   }
151 
152   struct drm_gem_close gem_close;
153   memset(&gem_close, 0, sizeof(gem_close));
154   int num_gem_handles = sizeof(bo->gem_handles) / sizeof(bo->gem_handles[0]);
155   for (int i = 0; i < num_gem_handles; i++) {
156     if (!bo->gem_handles[i])
157       continue;
158 
159     gem_close.handle = bo->gem_handles[i];
160     int ret = drmIoctl(drm_->fd(), DRM_IOCTL_GEM_CLOSE, &gem_close);
161     if (ret) {
162       ALOGE("Failed to close gem handle %d %d", i, ret);
163     } else {
164       /* Clear any duplicate gem handle as well but don't close again */
165       for (int j = i + 1; j < num_gem_handles; j++)
166         if (bo->gem_handles[j] == bo->gem_handles[i])
167           bo->gem_handles[j] = 0;
168       bo->gem_handles[i] = 0;
169     }
170   }
171 }
172 
GrallocGetNvBuffer(buffer_handle_t handle)173 NvImporter::NvBuffer_t *NvImporter::GrallocGetNvBuffer(buffer_handle_t handle) {
174   void *priv = NULL;
175   int ret =
176       gralloc_->perform(gralloc_, GRALLOC_MODULE_PERFORM_GET_IMPORTER_PRIVATE,
177                         handle, NvGrallocRelease, &priv);
178   return ret ? NULL : (NvBuffer_t *)priv;
179 }
180 
GrallocSetNvBuffer(buffer_handle_t handle,NvBuffer_t * buf)181 int NvImporter::GrallocSetNvBuffer(buffer_handle_t handle, NvBuffer_t *buf) {
182   return gralloc_->perform(gralloc_,
183                            GRALLOC_MODULE_PERFORM_SET_IMPORTER_PRIVATE, handle,
184                            NvGrallocRelease, buf);
185 }
186 
187 #ifdef USE_NVIDIA_IMPORTER
188 // static
CreateInstance(DrmResources *)189 std::unique_ptr<Planner> Planner::CreateInstance(DrmResources *) {
190   std::unique_ptr<Planner> planner(new Planner);
191   planner->AddStage<PlanStageProtectedRotated>();
192   planner->AddStage<PlanStageProtected>();
193   planner->AddStage<PlanStageGreedy>();
194   return planner;
195 }
196 #endif
197 
GetCrtcPrimaryPlane(DrmCrtc * crtc,std::vector<DrmPlane * > * planes)198 static DrmPlane *GetCrtcPrimaryPlane(DrmCrtc *crtc,
199                                      std::vector<DrmPlane *> *planes) {
200   for (auto i = planes->begin(); i != planes->end(); ++i) {
201     if ((*i)->GetCrtcSupported(*crtc)) {
202       DrmPlane *plane = *i;
203       planes->erase(i);
204       return plane;
205     }
206   }
207   return NULL;
208 }
209 
ProvisionPlanes(std::vector<DrmCompositionPlane> * composition,std::map<size_t,DrmHwcLayer * > & layers,DrmCrtc * crtc,std::vector<DrmPlane * > * planes)210 int PlanStageProtectedRotated::ProvisionPlanes(
211     std::vector<DrmCompositionPlane> *composition,
212     std::map<size_t, DrmHwcLayer *> &layers, DrmCrtc *crtc,
213     std::vector<DrmPlane *> *planes) {
214   int ret;
215   int protected_zorder = -1;
216   for (auto i = layers.begin(); i != layers.end();) {
217     if (!i->second->protected_usage() || !i->second->transform) {
218       ++i;
219       continue;
220     }
221 
222     auto primary_iter = planes->begin();
223     for (; primary_iter != planes->end(); ++primary_iter) {
224       if ((*primary_iter)->type() == DRM_PLANE_TYPE_PRIMARY)
225         break;
226     }
227 
228     // We cheat a little here. Since there can only be one primary plane per
229     // crtc, we know we'll only hit this case once. So we blindly insert the
230     // protected content at the beginning of the composition, knowing this path
231     // won't be taken a second time during the loop.
232     if (primary_iter != planes->end()) {
233       composition->emplace(composition->begin(),
234                            DrmCompositionPlane::Type::kLayer, *primary_iter,
235                            crtc, i->first);
236       planes->erase(primary_iter);
237       protected_zorder = i->first;
238     } else {
239       ALOGE("Could not provision primary plane for protected/rotated layer");
240     }
241     i = layers.erase(i);
242   }
243 
244   if (protected_zorder == -1)
245     return 0;
246 
247   // Add any layers below the protected content to the precomposition since we
248   // need to punch a hole through them.
249   for (auto i = layers.begin(); i != layers.end();) {
250     // Skip layers above the z-order of the protected content
251     if (i->first > static_cast<size_t>(protected_zorder)) {
252       ++i;
253       continue;
254     }
255 
256     // If there's no precomp layer already queued, queue one now.
257     DrmCompositionPlane *precomp = GetPrecomp(composition);
258     if (precomp) {
259       precomp->source_layers().emplace_back(i->first);
260     } else {
261       if (planes->size()) {
262         DrmPlane *precomp_plane = planes->back();
263         planes->pop_back();
264         composition->emplace_back(DrmCompositionPlane::Type::kPrecomp,
265                                   precomp_plane, crtc, i->first);
266       } else {
267         ALOGE("Not enough planes to reserve for precomp fb");
268       }
269     }
270     i = layers.erase(i);
271   }
272   return 0;
273 }
274 }
275