1 /*
2  * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are
6  * met:
7     * Redistributions of source code must retain the above copyright
8       notice, this list of conditions and the following disclaimer.
9     * Redistributions in binary form must reproduce the above
10       copyright notice, this list of conditions and the following
11       disclaimer in the documentation and/or other materials provided
12       with the distribution.
13     * Neither the name of The Linux Foundation nor the names of its
14       contributors may be used to endorse or promote products derived
15       from this software without specific prior written permission.
16 
17  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
18  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
24  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
26  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
27  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31 * Changes from Qualcomm Innovation Center are provided under the following license:
32 *
33 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted (subject to the limitations in the
37 * disclaimer below) provided that the following conditions are met:
38 *
39 *    * Redistributions of source code must retain the above copyright
40 *      notice, this list of conditions and the following disclaimer.
41 *
42 *    * Redistributions in binary form must reproduce the above
43 *      copyright notice, this list of conditions and the following
44 *      disclaimer in the documentation and/or other materials provided
45 *      with the distribution.
46 *
47 *    * Neither the name of Qualcomm Innovation Center, Inc. nor the names of its
48 *      contributors may be used to endorse or promote products derived
49 *      from this software without specific prior written permission.
50 *
51 * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
52 * GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
53 * HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
54 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
55 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
56 * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
57 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
59 * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
60 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
61 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
62 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
63 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65 
66 #include "hwc_display_virtual_gpu.h"
67 #include "hwc_session.h"
68 
69 #include <qdMetaData.h>
70 
71 #define __CLASS__ "HWCDisplayVirtualGPU"
72 
73 namespace sdm {
74 
Init()75 int HWCDisplayVirtualGPU::Init() {
76   // Create client target.
77   client_target_ = new HWCLayer(id_, buffer_allocator_);
78 
79   // Calls into SDM need to be dropped. Create Null Display interface.
80   display_intf_ = new DisplayNull();
81 
82   disable_animation_ = Debug::IsExtAnimDisabled();
83 
84   return HWCDisplayVirtual::Init();
85 }
86 
Deinit()87 int HWCDisplayVirtualGPU::Deinit() {
88   // Destory color convert instance. This destroys thread and underlying GL resources.
89   if (gl_color_convert_) {
90     color_convert_task_.PerformTask(ColorConvertTaskCode::kCodeDestroyInstance, nullptr);
91   }
92 
93   delete static_cast<DisplayNull *>(display_intf_);
94   delete client_target_;
95 
96   for (auto hwc_layer : layer_set_) {
97     delete hwc_layer;
98   }
99 
100   return 0;
101 }
102 
HWCDisplayVirtualGPU(CoreInterface * core_intf,HWCBufferAllocator * buffer_allocator,HWCCallbacks * callbacks,hwc2_display_t id,int32_t sdm_id,uint32_t width,uint32_t height,float min_lum,float max_lum)103 HWCDisplayVirtualGPU::HWCDisplayVirtualGPU(CoreInterface *core_intf, HWCBufferAllocator
104                                            *buffer_allocator, HWCCallbacks *callbacks,
105                                            hwc2_display_t id, int32_t sdm_id, uint32_t width,
106                                            uint32_t height, float min_lum, float max_lum) :
107   HWCDisplayVirtual(core_intf, buffer_allocator, callbacks, id, sdm_id, width, height),
108   color_convert_task_(*this) {
109 }
110 
Validate(uint32_t * out_num_types,uint32_t * out_num_requests)111 HWC2::Error HWCDisplayVirtualGPU::Validate(uint32_t *out_num_types, uint32_t *out_num_requests) {
112   DTRACE_SCOPED();
113 
114   // Reset previous changes.
115   layer_changes_.clear();
116   layer_requests_.clear();
117 
118   // Mark all layers to GPU if there is no need to bypass.
119   bool needs_gpu_bypass = NeedsGPUBypass() || FreezeScreen();
120   for (auto hwc_layer : layer_set_) {
121     auto layer = hwc_layer->GetSDMLayer();
122     layer->composition = needs_gpu_bypass ? kCompositionSDE : kCompositionGPU;
123 
124     if (needs_gpu_bypass) {
125       if (hwc_layer->GetClientRequestedCompositionType() == HWC2::Composition::Client) {
126        layer_changes_[hwc_layer->GetId()] = HWC2::Composition::Device;
127        layer_requests_[hwc_layer->GetId()] = HWC2::LayerRequest::ClearClientTarget;
128       }
129     } else {
130       if (hwc_layer->GetClientRequestedCompositionType() != HWC2::Composition::Client) {
131        layer_changes_[hwc_layer->GetId()] = HWC2::Composition::Client;
132       }
133     }
134   }
135 
136   // Derive client target dataspace based on the color mode - bug/115482728
137   int32_t client_target_dataspace = GetDataspaceFromColorMode(GetCurrentColorMode());
138   SetClientTargetDataSpace(client_target_dataspace);
139 
140   *out_num_types = UINT32(layer_changes_.size());
141   *out_num_requests = UINT32(layer_requests_.size());;
142   has_client_composition_ = !needs_gpu_bypass;
143   client_target_->ResetValidation();
144 
145   validated_ = true;
146 
147   return ((*out_num_types > 0) ? HWC2::Error::HasChanges : HWC2::Error::None);
148 }
149 
SetOutputBuffer(buffer_handle_t buf,shared_ptr<Fence> release_fence)150 HWC2::Error HWCDisplayVirtualGPU::SetOutputBuffer(buffer_handle_t buf,
151                                                   shared_ptr<Fence> release_fence) {
152   HWC2::Error error = HWCDisplayVirtual::SetOutputBuffer(buf, release_fence);
153   if (error != HWC2::Error::None) {
154     return error;
155   }
156 
157   const private_handle_t *hnd = static_cast<const private_handle_t *>(buf);
158   output_buffer_.width = hnd->width;
159   output_buffer_.height = hnd->height;
160   output_buffer_.unaligned_width = width_;
161   output_buffer_.unaligned_height = height_;
162 
163   // Update active dimensions.
164   BufferDim_t buffer_dim;
165   if (getMetaData(const_cast<private_handle_t *>(hnd), GET_BUFFER_GEOMETRY, &buffer_dim) == 0) {
166     output_buffer_.unaligned_width = buffer_dim.sliceWidth;
167     output_buffer_.unaligned_height = buffer_dim.sliceHeight;
168     color_convert_task_.PerformTask(ColorConvertTaskCode::kCodeReset, nullptr);
169   }
170 
171   return HWC2::Error::None;
172 }
173 
Present(shared_ptr<Fence> * out_retire_fence)174 HWC2::Error HWCDisplayVirtualGPU::Present(shared_ptr<Fence> *out_retire_fence) {
175   DTRACE_SCOPED();
176 
177   auto status = HWC2::Error::None;
178 
179   if (!validated_) {
180     return HWC2::Error::NotValidated;
181   }
182 
183   if (!output_buffer_.buffer_id) {
184     return HWC2::Error::NoResources;
185   }
186 
187   if (active_secure_sessions_.any() || layer_set_.empty()) {
188     return status;
189   }
190   Layer *sdm_layer = client_target_->GetSDMLayer();
191   LayerBuffer &input_buffer = sdm_layer->input_buffer;
192   if (!input_buffer.buffer_id) {
193     return HWC2::Error::NoResources;
194   }
195 
196 
197   layer_stack_.output_buffer = &output_buffer_;
198   if (display_paused_) {
199     validated_ = false;
200   }
201 
202   // Ensure that blit is initialized.
203   // GPU context gets in secure or non-secure mode depending on output buffer provided.
204   if (!gl_color_convert_) {
205     // Get instance.
206     color_convert_task_.PerformTask(ColorConvertTaskCode::kCodeGetInstance, nullptr);
207     if (gl_color_convert_ == nullptr) {
208       DLOGE("Failed to get Color Convert Instance");
209       return HWC2::Error::NoResources;
210     } else {
211       DLOGI("Created ColorConvert instance: %p", gl_color_convert_);
212     }
213   }
214 
215   ColorConvertBlitContext ctx = {};
216 
217   ctx.src_hnd = reinterpret_cast<const private_handle_t *>(input_buffer.buffer_id);
218   ctx.dst_hnd = reinterpret_cast<const private_handle_t *>(output_handle_);
219   ctx.dst_rect = {0, 0, FLOAT(output_buffer_.unaligned_width),
220                   FLOAT(output_buffer_.unaligned_height)};
221   ctx.src_acquire_fence = input_buffer.acquire_fence;
222   ctx.dst_acquire_fence = output_buffer_.acquire_fence;
223 
224   color_convert_task_.PerformTask(ColorConvertTaskCode::kCodeBlit, &ctx);
225 
226   // todo blit
227   DumpVDSBuffer();
228 
229   *out_retire_fence = ctx.release_fence;
230 
231   return status;
232 }
233 
OnTask(const ColorConvertTaskCode & task_code,SyncTask<ColorConvertTaskCode>::TaskContext * task_context)234 void HWCDisplayVirtualGPU::OnTask(const ColorConvertTaskCode &task_code,
235                                   SyncTask<ColorConvertTaskCode>::TaskContext *task_context) {
236   switch (task_code) {
237     case ColorConvertTaskCode::kCodeGetInstance: {
238         gl_color_convert_ = GLColorConvert::GetInstance(kTargetYUV, output_buffer_.flags.secure);
239       }
240       break;
241     case ColorConvertTaskCode::kCodeBlit: {
242         DTRACE_SCOPED();
243         ColorConvertBlitContext* ctx = reinterpret_cast<ColorConvertBlitContext*>(task_context);
244         gl_color_convert_->Blit(ctx->src_hnd, ctx->dst_hnd, ctx->src_rect, ctx->dst_rect,
245                                 ctx->src_acquire_fence, ctx->dst_acquire_fence,
246                                 &(ctx->release_fence));
247       }
248       break;
249     case ColorConvertTaskCode::kCodeReset: {
250         DTRACE_SCOPED();
251         if (gl_color_convert_) {
252           gl_color_convert_->Reset();
253         }
254       }
255       break;
256     case ColorConvertTaskCode::kCodeDestroyInstance: {
257         if (gl_color_convert_) {
258           GLColorConvert::Destroy(gl_color_convert_);
259         }
260       }
261       break;
262   }
263 }
264 
FreezeScreen()265 bool HWCDisplayVirtualGPU::FreezeScreen() {
266   if (!disable_animation_) {
267     return false;
268   }
269 
270   bool freeze_screen = false;
271   if (animating_ && !animation_in_progress_) {
272     // Start of animation. GPU comp is needed.
273     animation_in_progress_ = true;
274   } else if (!animating_ && animation_in_progress_) {
275     // End of animation. Start composing.
276     animation_in_progress_ = false;
277   } else if (animating_ && animation_in_progress_) {
278     // Animation in progress...
279     freeze_screen = true;
280   }
281 
282   return freeze_screen;
283 }
284 
285 }  // namespace sdm
286 
287