1 /*
2 * Copyright (C) 2008 The Android Open Source Project
3 * Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
4 *
5 * Not a Contribution, Apache license notifications and license are retained
6 * for attribution purposes only.
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 */
20
21 #include <dlfcn.h>
22 #include <errno.h>
23 #include <fcntl.h>
24 #include <linux/msm_kgsl.h>
25 #include <stdint.h>
26 #include <string.h>
27 #include <sys/ioctl.h>
28 #include <sys/mman.h>
29 #include <sys/prctl.h>
30 #include <sys/resource.h>
31 #include <sys/types.h>
32 #include <unistd.h>
33
34 #include <cutils/native_handle.h>
35 #include <log/log.h>
36
37 #include <EGL/eglplatform.h>
38
39 #include <copybit.h>
40 #include <alloc_controller.h>
41 #include <memalloc.h>
42
43 #include "c2d2.h"
44 #include "software_converter.h"
45
46 using gralloc::IMemAlloc;
47 using gralloc::IonController;
48 using gralloc::alloc_data;
49
50 C2D_STATUS (*LINK_c2dCreateSurface)( uint32 *surface_id,
51 uint32 surface_bits,
52 C2D_SURFACE_TYPE surface_type,
53 void *surface_definition );
54
55 C2D_STATUS (*LINK_c2dUpdateSurface)( uint32 surface_id,
56 uint32 surface_bits,
57 C2D_SURFACE_TYPE surface_type,
58 void *surface_definition );
59
60 C2D_STATUS (*LINK_c2dReadSurface)( uint32 surface_id,
61 C2D_SURFACE_TYPE surface_type,
62 void *surface_definition,
63 int32 x, int32 y );
64
65 C2D_STATUS (*LINK_c2dDraw)( uint32 target_id,
66 uint32 target_config, C2D_RECT *target_scissor,
67 uint32 target_mask_id, uint32 target_color_key,
68 C2D_OBJECT *objects_list, uint32 num_objects );
69
70 C2D_STATUS (*LINK_c2dFinish)( uint32 target_id);
71
72 C2D_STATUS (*LINK_c2dFlush)( uint32 target_id, c2d_ts_handle *timestamp);
73
74 C2D_STATUS (*LINK_c2dWaitTimestamp)( c2d_ts_handle timestamp );
75
76 C2D_STATUS (*LINK_c2dDestroySurface)( uint32 surface_id );
77
78 C2D_STATUS (*LINK_c2dMapAddr) ( int mem_fd, void * hostptr, size_t len,
79 size_t offset, uint32 flags, void ** gpuaddr);
80
81 C2D_STATUS (*LINK_c2dUnMapAddr) ( void * gpuaddr);
82
83 C2D_STATUS (*LINK_c2dGetDriverCapabilities) ( C2D_DRIVER_INFO * driver_info);
84
85 /* create a fence fd for the timestamp */
86 C2D_STATUS (*LINK_c2dCreateFenceFD) ( uint32 target_id, c2d_ts_handle timestamp,
87 int32 *fd);
88
89 C2D_STATUS (*LINK_c2dFillSurface) ( uint32 surface_id, uint32 fill_color,
90 C2D_RECT * fill_rect);
91
92 /******************************************************************************/
93
94 #if defined(COPYBIT_Z180)
95 #define MAX_SCALE_FACTOR (4096)
96 #define MAX_DIMENSION (4096)
97 #else
98 #error "Unsupported HW version"
99 #endif
100
101 // The following defines can be changed as required i.e. as we encounter
102 // complex use cases.
103 #define MAX_RGB_SURFACES 32 // Max. RGB layers currently supported per draw
104 #define MAX_YUV_2_PLANE_SURFACES 4// Max. 2-plane YUV layers currently supported per draw
105 #define MAX_YUV_3_PLANE_SURFACES 1// Max. 3-plane YUV layers currently supported per draw
106 // +1 for the destination surface. We cannot have multiple destination surfaces.
107 #define MAX_SURFACES (MAX_RGB_SURFACES + MAX_YUV_2_PLANE_SURFACES + MAX_YUV_3_PLANE_SURFACES + 1)
108 #define NUM_SURFACE_TYPES 3 // RGB_SURFACE + YUV_SURFACE_2_PLANES + YUV_SURFACE_3_PLANES
109 #define MAX_BLIT_OBJECT_COUNT 50 // Max. blit objects that can be passed per draw
110
111 enum {
112 RGB_SURFACE,
113 YUV_SURFACE_2_PLANES,
114 YUV_SURFACE_3_PLANES
115 };
116
117 enum eConversionType {
118 CONVERT_TO_ANDROID_FORMAT,
119 CONVERT_TO_C2D_FORMAT
120 };
121
122 enum eC2DFlags {
123 FLAGS_PREMULTIPLIED_ALPHA = 1<<0,
124 FLAGS_YUV_DESTINATION = 1<<1,
125 FLAGS_TEMP_SRC_DST = 1<<2,
126 FLAGS_UBWC_FORMAT_MODE = 1<<3
127 };
128
129 static gralloc::IAllocController* sAlloc = 0;
130 /******************************************************************************/
131
132 /** State information for each device instance */
133 struct copybit_context_t {
134 struct copybit_device_t device;
135 // Templates for the various source surfaces. These templates are created
136 // to avoid the expensive create/destroy C2D Surfaces
137 C2D_OBJECT_STR blit_rgb_object[MAX_RGB_SURFACES];
138 C2D_OBJECT_STR blit_yuv_2_plane_object[MAX_YUV_2_PLANE_SURFACES];
139 C2D_OBJECT_STR blit_yuv_3_plane_object[MAX_YUV_3_PLANE_SURFACES];
140 C2D_OBJECT_STR blit_list[MAX_BLIT_OBJECT_COUNT]; // Z-ordered list of blit objects
141 C2D_DRIVER_INFO c2d_driver_info;
142 void *libc2d2;
143 alloc_data temp_src_buffer;
144 alloc_data temp_dst_buffer;
145 unsigned int dst[NUM_SURFACE_TYPES]; // dst surfaces
146 uintptr_t mapped_gpu_addr[MAX_SURFACES]; // GPU addresses mapped inside copybit
147 int blit_rgb_count; // Total RGB surfaces being blit
148 int blit_yuv_2_plane_count; // Total 2 plane YUV surfaces being
149 int blit_yuv_3_plane_count; // Total 3 plane YUV surfaces being blit
150 int blit_count; // Total blit objects.
151 unsigned int trg_transform; /* target transform */
152 int fb_width;
153 int fb_height;
154 int src_global_alpha;
155 int config_mask;
156 int dst_surface_type;
157 bool is_premultiplied_alpha;
158 void* time_stamp;
159 bool dst_surface_mapped; // Set when dst surface is mapped to GPU addr
160 void* dst_surface_base; // Stores the dst surface addr
161 bool is_src_ubwc_format;
162 bool is_dst_ubwc_format;
163
164 // used for signaling the wait thread
165 bool wait_timestamp;
166 pthread_t wait_thread_id;
167 bool stop_thread;
168 pthread_mutex_t wait_cleanup_lock;
169 pthread_cond_t wait_cleanup_cond;
170
171 };
172
173 struct bufferInfo {
174 int width;
175 int height;
176 int format;
177 };
178
179 struct yuvPlaneInfo {
180 int yStride; //luma stride
181 int plane1_stride;
182 int plane2_stride;
183 size_t plane1_offset;
184 size_t plane2_offset;
185 };
186
187 /**
188 * Common hardware methods
189 */
190
191 static int open_copybit(const struct hw_module_t* module, const char* name,
192 struct hw_device_t** device);
193
194 static struct hw_module_methods_t copybit_module_methods = {
195 .open = open_copybit,
196 };
197
198 /*
199 * The COPYBIT Module
200 */
201 struct copybit_module_t HAL_MODULE_INFO_SYM = {
202 .common = {
203 .tag = HARDWARE_MODULE_TAG,
204 .version_major = 1,
205 .version_minor = 0,
206 .id = COPYBIT_HARDWARE_MODULE_ID,
207 .name = "QCT COPYBIT C2D 2.0 Module",
208 .author = "Qualcomm",
209 .methods = ©bit_module_methods
210 }
211 };
212
213
214 /* thread function which waits on the timeStamp and cleans up the surfaces */
c2d_wait_loop(void * ptr)215 static void* c2d_wait_loop(void* ptr) {
216 copybit_context_t* ctx = (copybit_context_t*)(ptr);
217 char thread_name[64] = "copybitWaitThr";
218 prctl(PR_SET_NAME, (unsigned long) &thread_name, 0, 0, 0);
219 setpriority(PRIO_PROCESS, 0, HAL_PRIORITY_URGENT_DISPLAY);
220
221 while(ctx->stop_thread == false) {
222 pthread_mutex_lock(&ctx->wait_cleanup_lock);
223 while(ctx->wait_timestamp == false && !ctx->stop_thread) {
224 pthread_cond_wait(&(ctx->wait_cleanup_cond),
225 &(ctx->wait_cleanup_lock));
226 }
227 if(ctx->wait_timestamp) {
228 if(LINK_c2dWaitTimestamp(ctx->time_stamp)) {
229 ALOGE("%s: LINK_c2dWaitTimeStamp ERROR!!", __FUNCTION__);
230 }
231 ctx->wait_timestamp = false;
232 // Unmap any mapped addresses.
233 for (int i = 0; i < MAX_SURFACES; i++) {
234 if (ctx->mapped_gpu_addr[i]) {
235 LINK_c2dUnMapAddr( (void*)ctx->mapped_gpu_addr[i]);
236 ctx->mapped_gpu_addr[i] = 0;
237 }
238 }
239 // Reset the counts after the draw.
240 ctx->blit_rgb_count = 0;
241 ctx->blit_yuv_2_plane_count = 0;
242 ctx->blit_yuv_3_plane_count = 0;
243 ctx->blit_count = 0;
244 ctx->dst_surface_mapped = false;
245 ctx->dst_surface_base = 0;
246 }
247 pthread_mutex_unlock(&ctx->wait_cleanup_lock);
248 if(ctx->stop_thread)
249 break;
250 }
251 pthread_exit(NULL);
252 return NULL;
253 }
254
255
256 /* convert COPYBIT_FORMAT to C2D format */
get_format(int format)257 static int get_format(int format) {
258 switch (format) {
259 case HAL_PIXEL_FORMAT_RGB_565: return C2D_COLOR_FORMAT_565_RGB;
260 case HAL_PIXEL_FORMAT_RGB_888: return C2D_COLOR_FORMAT_888_RGB |
261 C2D_FORMAT_SWAP_RB;
262 case HAL_PIXEL_FORMAT_RGBX_8888: return C2D_COLOR_FORMAT_8888_ARGB |
263 C2D_FORMAT_SWAP_RB |
264 C2D_FORMAT_DISABLE_ALPHA;
265 case HAL_PIXEL_FORMAT_RGBA_8888: return C2D_COLOR_FORMAT_8888_ARGB |
266 C2D_FORMAT_SWAP_RB;
267 case HAL_PIXEL_FORMAT_BGRA_8888: return C2D_COLOR_FORMAT_8888_ARGB;
268 case HAL_PIXEL_FORMAT_RGBA_5551: return C2D_COLOR_FORMAT_5551_RGBA;
269 case HAL_PIXEL_FORMAT_RGBA_4444: return C2D_COLOR_FORMAT_4444_RGBA;
270 case HAL_PIXEL_FORMAT_YCbCr_420_SP: return C2D_COLOR_FORMAT_420_NV12;
271 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:return C2D_COLOR_FORMAT_420_NV12;
272 case HAL_PIXEL_FORMAT_YCrCb_420_SP: return C2D_COLOR_FORMAT_420_NV21;
273 case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED: return C2D_COLOR_FORMAT_420_NV12 |
274 C2D_FORMAT_MACROTILED;
275 default: ALOGE("%s: invalid format (0x%x",
276 __FUNCTION__, format);
277 return -EINVAL;
278 }
279 return -EINVAL;
280 }
281
282 /* Get the C2D formats needed for conversion to YUV */
get_c2d_format_for_yuv_destination(int halFormat)283 static int get_c2d_format_for_yuv_destination(int halFormat) {
284 switch (halFormat) {
285 // We do not swap the RB when the target is YUV
286 case HAL_PIXEL_FORMAT_RGBX_8888: return C2D_COLOR_FORMAT_8888_ARGB |
287 C2D_FORMAT_DISABLE_ALPHA;
288 case HAL_PIXEL_FORMAT_RGBA_8888: return C2D_COLOR_FORMAT_8888_ARGB;
289 // The U and V need to be interchanged when the target is YUV
290 case HAL_PIXEL_FORMAT_YCbCr_420_SP: return C2D_COLOR_FORMAT_420_NV21;
291 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:return C2D_COLOR_FORMAT_420_NV21;
292 case HAL_PIXEL_FORMAT_YCrCb_420_SP: return C2D_COLOR_FORMAT_420_NV12;
293 default: return get_format(halFormat);
294 }
295 return -EINVAL;
296 }
297
298 /* ------------------------------------------------------------------- *//*!
299 * \internal
300 * \brief Get the bpp for a particular color format
301 * \param color format
302 * \return bits per pixel
303 *//* ------------------------------------------------------------------- */
c2diGetBpp(int32 colorformat)304 int c2diGetBpp(int32 colorformat)
305 {
306
307 int c2dBpp = 0;
308
309 switch(colorformat&0xFF)
310 {
311 case C2D_COLOR_FORMAT_4444_RGBA:
312 case C2D_COLOR_FORMAT_4444_ARGB:
313 case C2D_COLOR_FORMAT_1555_ARGB:
314 case C2D_COLOR_FORMAT_565_RGB:
315 case C2D_COLOR_FORMAT_5551_RGBA:
316 c2dBpp = 16;
317 break;
318 case C2D_COLOR_FORMAT_8888_RGBA:
319 case C2D_COLOR_FORMAT_8888_ARGB:
320 c2dBpp = 32;
321 break;
322 case C2D_COLOR_FORMAT_888_RGB:
323 c2dBpp = 24;
324 break;
325 case C2D_COLOR_FORMAT_8_L:
326 case C2D_COLOR_FORMAT_8_A:
327 c2dBpp = 8;
328 break;
329 case C2D_COLOR_FORMAT_4_A:
330 c2dBpp = 4;
331 break;
332 case C2D_COLOR_FORMAT_1:
333 c2dBpp = 1;
334 break;
335 default:
336 ALOGE("%s ERROR", __func__);
337 break;
338 }
339 return c2dBpp;
340 }
341
c2d_get_gpuaddr(copybit_context_t * ctx,struct private_handle_t * handle,int & mapped_idx)342 static size_t c2d_get_gpuaddr(copybit_context_t* ctx,
343 struct private_handle_t *handle, int &mapped_idx)
344 {
345 uint32 memtype;
346 size_t *gpuaddr = 0;
347 C2D_STATUS rc;
348 int freeindex = 0;
349 bool mapaddr = false;
350
351 if(!handle)
352 return 0;
353
354 if (handle->flags & private_handle_t::PRIV_FLAGS_USES_ION)
355 memtype = KGSL_USER_MEM_TYPE_ION;
356 else {
357 ALOGE("Invalid handle flags: 0x%x", handle->flags);
358 return 0;
359 }
360
361 // Check for a freeindex in the mapped_gpu_addr list
362 for (freeindex = 0; freeindex < MAX_SURFACES; freeindex++) {
363 if (ctx->mapped_gpu_addr[freeindex] == 0) {
364 // free index is available
365 // map GPU addr and use this as mapped_idx
366 mapaddr = true;
367 break;
368 }
369 }
370
371 if(mapaddr) {
372 rc = LINK_c2dMapAddr(handle->fd, (void*)handle->base, handle->size,
373 handle->offset, memtype, (void**)&gpuaddr);
374
375 if (rc == C2D_STATUS_OK) {
376 // We have mapped the GPU address inside copybit. We need to unmap
377 // this address after the blit. Store this address
378 ctx->mapped_gpu_addr[freeindex] = (size_t)gpuaddr;
379 mapped_idx = freeindex;
380 }
381 }
382 return (size_t)gpuaddr;
383 }
384
unmap_gpuaddr(copybit_context_t * ctx,int mapped_idx)385 static void unmap_gpuaddr(copybit_context_t* ctx, int mapped_idx)
386 {
387 if (!ctx || (mapped_idx == -1))
388 return;
389
390 if (ctx->mapped_gpu_addr[mapped_idx]) {
391 LINK_c2dUnMapAddr( (void*)ctx->mapped_gpu_addr[mapped_idx]);
392 ctx->mapped_gpu_addr[mapped_idx] = 0;
393 }
394 }
395
is_supported_rgb_format(int format)396 static int is_supported_rgb_format(int format)
397 {
398 switch(format) {
399 case HAL_PIXEL_FORMAT_RGBA_8888:
400 case HAL_PIXEL_FORMAT_RGBX_8888:
401 case HAL_PIXEL_FORMAT_RGB_888:
402 case HAL_PIXEL_FORMAT_RGB_565:
403 case HAL_PIXEL_FORMAT_BGRA_8888:
404 case HAL_PIXEL_FORMAT_RGBA_5551:
405 case HAL_PIXEL_FORMAT_RGBA_4444: {
406 return COPYBIT_SUCCESS;
407 }
408 default:
409 return COPYBIT_FAILURE;
410 }
411 }
412
get_num_planes(int format)413 static int get_num_planes(int format)
414 {
415 switch(format) {
416 case HAL_PIXEL_FORMAT_YCbCr_420_SP:
417 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
418 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
419 case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED: {
420 return 2;
421 }
422 case HAL_PIXEL_FORMAT_YV12: {
423 return 3;
424 }
425 default:
426 return COPYBIT_FAILURE;
427 }
428 }
429
is_supported_yuv_format(int format)430 static int is_supported_yuv_format(int format)
431 {
432 switch(format) {
433 case HAL_PIXEL_FORMAT_YCbCr_420_SP:
434 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
435 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
436 case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED: {
437 return COPYBIT_SUCCESS;
438 }
439 default:
440 return COPYBIT_FAILURE;
441 }
442 }
443
is_valid_destination_format(int format)444 static int is_valid_destination_format(int format)
445 {
446 if (format == HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED) {
447 // C2D does not support NV12Tile as a destination format.
448 return COPYBIT_FAILURE;
449 }
450 return COPYBIT_SUCCESS;
451 }
452
calculate_yuv_offset_and_stride(const bufferInfo & info,yuvPlaneInfo & yuvInfo)453 static int calculate_yuv_offset_and_stride(const bufferInfo& info,
454 yuvPlaneInfo& yuvInfo)
455 {
456 int width = info.width;
457 int height = info.height;
458 int format = info.format;
459
460 int aligned_height = 0;
461 int aligned_width = 0, size = 0;
462
463 switch (format) {
464 case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED: {
465 /* NV12 Tile buffers have their luma height aligned to 32bytes and width
466 * aligned to 128 bytes. The chroma offset starts at an 8K boundary
467 */
468 aligned_height = ALIGN(height, 32);
469 aligned_width = ALIGN(width, 128);
470 size = aligned_width * aligned_height;
471 yuvInfo.plane1_offset = ALIGN(size,8192);
472 yuvInfo.yStride = aligned_width;
473 yuvInfo.plane1_stride = aligned_width;
474 break;
475 }
476 case HAL_PIXEL_FORMAT_YCbCr_420_SP:
477 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
478 case HAL_PIXEL_FORMAT_YCrCb_420_SP: {
479 aligned_width = ALIGN(width, 32);
480 yuvInfo.yStride = aligned_width;
481 yuvInfo.plane1_stride = aligned_width;
482 if (HAL_PIXEL_FORMAT_NV12_ENCODEABLE == format) {
483 // The encoder requires a 2K aligned chroma offset
484 yuvInfo.plane1_offset = ALIGN(aligned_width * height, 2048);
485 } else
486 yuvInfo.plane1_offset = aligned_width * height;
487
488 break;
489 }
490 default: {
491 return COPYBIT_FAILURE;
492 }
493 }
494 return COPYBIT_SUCCESS;
495 }
496
497 /** create C2D surface from copybit image */
set_image(copybit_context_t * ctx,uint32 surfaceId,const struct copybit_image_t * rhs,const eC2DFlags flags,int & mapped_idx)498 static int set_image(copybit_context_t* ctx, uint32 surfaceId,
499 const struct copybit_image_t *rhs,
500 const eC2DFlags flags, int &mapped_idx)
501 {
502 struct private_handle_t* handle = (struct private_handle_t*)rhs->handle;
503 C2D_SURFACE_TYPE surfaceType;
504 int status = COPYBIT_SUCCESS;
505 uint64_t gpuaddr = 0;
506 int c2d_format;
507 mapped_idx = -1;
508
509 if (flags & FLAGS_YUV_DESTINATION) {
510 c2d_format = get_c2d_format_for_yuv_destination(rhs->format);
511 } else {
512 c2d_format = get_format(rhs->format);
513 }
514
515 if(c2d_format == -EINVAL) {
516 ALOGE("%s: invalid format", __FUNCTION__);
517 return -EINVAL;
518 }
519
520 if(handle == NULL) {
521 ALOGE("%s: invalid handle", __func__);
522 return -EINVAL;
523 }
524
525 if (handle->gpuaddr == 0) {
526 gpuaddr = c2d_get_gpuaddr(ctx, handle, mapped_idx);
527 if(!gpuaddr) {
528 ALOGE("%s: c2d_get_gpuaddr failed", __FUNCTION__);
529 return COPYBIT_FAILURE;
530 }
531 } else {
532 gpuaddr = handle->gpuaddr;
533 }
534
535 /* create C2D surface */
536 if(is_supported_rgb_format(rhs->format) == COPYBIT_SUCCESS) {
537 /* RGB */
538 C2D_RGB_SURFACE_DEF surfaceDef;
539
540 surfaceType = (C2D_SURFACE_TYPE) (C2D_SURFACE_RGB_HOST | C2D_SURFACE_WITH_PHYS);
541
542 surfaceDef.phys = (void*) gpuaddr;
543 surfaceDef.buffer = (void*) (handle->base);
544
545 surfaceDef.format = c2d_format |
546 ((flags & FLAGS_PREMULTIPLIED_ALPHA) ? C2D_FORMAT_PREMULTIPLIED : 0);
547
548 surfaceDef.format = surfaceDef.format |
549 ((flags & FLAGS_UBWC_FORMAT_MODE) ? C2D_FORMAT_UBWC_COMPRESSED : 0);
550
551 surfaceDef.width = rhs->w;
552 surfaceDef.height = rhs->h;
553 int aligned_width = ALIGN((int)surfaceDef.width,32);
554 surfaceDef.stride = (aligned_width * c2diGetBpp(surfaceDef.format))>>3;
555
556 if(LINK_c2dUpdateSurface( surfaceId,C2D_TARGET | C2D_SOURCE, surfaceType,
557 &surfaceDef)) {
558 ALOGE("%s: RGB Surface c2dUpdateSurface ERROR", __FUNCTION__);
559 unmap_gpuaddr(ctx, mapped_idx);
560 status = COPYBIT_FAILURE;
561 }
562 } else if (is_supported_yuv_format(rhs->format) == COPYBIT_SUCCESS) {
563 C2D_YUV_SURFACE_DEF surfaceDef;
564 memset(&surfaceDef, 0, sizeof(surfaceDef));
565 surfaceType = (C2D_SURFACE_TYPE)(C2D_SURFACE_YUV_HOST | C2D_SURFACE_WITH_PHYS);
566 surfaceDef.format = c2d_format;
567
568 bufferInfo info;
569 info.width = rhs->w;
570 info.height = rhs->h;
571 info.format = rhs->format;
572
573 yuvPlaneInfo yuvInfo = {0};
574 status = calculate_yuv_offset_and_stride(info, yuvInfo);
575 if(status != COPYBIT_SUCCESS) {
576 ALOGE("%s: calculate_yuv_offset_and_stride error", __FUNCTION__);
577 unmap_gpuaddr(ctx, mapped_idx);
578 }
579
580 surfaceDef.width = rhs->w;
581 surfaceDef.height = rhs->h;
582 surfaceDef.plane0 = (void*) (handle->base);
583 surfaceDef.phys0 = (void*) (gpuaddr);
584 surfaceDef.stride0 = yuvInfo.yStride;
585
586 surfaceDef.plane1 = (void*) (handle->base + yuvInfo.plane1_offset);
587 surfaceDef.phys1 = (void*) (gpuaddr + yuvInfo.plane1_offset);
588 surfaceDef.stride1 = yuvInfo.plane1_stride;
589 if (3 == get_num_planes(rhs->format)) {
590 surfaceDef.plane2 = (void*) (handle->base + yuvInfo.plane2_offset);
591 surfaceDef.phys2 = (void*) (gpuaddr + yuvInfo.plane2_offset);
592 surfaceDef.stride2 = yuvInfo.plane2_stride;
593 }
594
595 if(LINK_c2dUpdateSurface( surfaceId,C2D_TARGET | C2D_SOURCE, surfaceType,
596 &surfaceDef)) {
597 ALOGE("%s: YUV Surface c2dUpdateSurface ERROR", __FUNCTION__);
598 unmap_gpuaddr(ctx, mapped_idx);
599 status = COPYBIT_FAILURE;
600 }
601 } else {
602 ALOGE("%s: invalid format 0x%x", __FUNCTION__, rhs->format);
603 unmap_gpuaddr(ctx, mapped_idx);
604 status = COPYBIT_FAILURE;
605 }
606
607 return status;
608 }
609
610 /** copy the bits */
msm_copybit(struct copybit_context_t * ctx,unsigned int target)611 static int msm_copybit(struct copybit_context_t *ctx, unsigned int target)
612 {
613 if (ctx->blit_count == 0) {
614 return COPYBIT_SUCCESS;
615 }
616
617 for (int i = 0; i < ctx->blit_count; i++)
618 {
619 ctx->blit_list[i].next = &(ctx->blit_list[i+1]);
620 }
621 ctx->blit_list[ctx->blit_count-1].next = NULL;
622 uint32_t target_transform = ctx->trg_transform;
623 if (ctx->c2d_driver_info.capabilities_mask &
624 C2D_DRIVER_SUPPORTS_OVERRIDE_TARGET_ROTATE_OP) {
625 // For A3xx - set 0x0 as the transform is set in the config_mask
626 target_transform = 0x0;
627 }
628 if(LINK_c2dDraw(target, target_transform, 0x0, 0, 0, ctx->blit_list,
629 ctx->blit_count)) {
630 ALOGE("%s: LINK_c2dDraw ERROR", __FUNCTION__);
631 return COPYBIT_FAILURE;
632 }
633 return COPYBIT_SUCCESS;
634 }
635
636
637
flush_get_fence_copybit(struct copybit_device_t * dev,int * fd)638 static int flush_get_fence_copybit (struct copybit_device_t *dev, int* fd)
639 {
640 struct copybit_context_t* ctx = (struct copybit_context_t*)dev;
641 int status = COPYBIT_FAILURE;
642 if (!ctx)
643 return COPYBIT_FAILURE;
644 pthread_mutex_lock(&ctx->wait_cleanup_lock);
645 status = msm_copybit(ctx, ctx->dst[ctx->dst_surface_type]);
646
647 if(LINK_c2dFlush(ctx->dst[ctx->dst_surface_type], &ctx->time_stamp)) {
648 ALOGE("%s: LINK_c2dFlush ERROR", __FUNCTION__);
649 // unlock the mutex and return failure
650 pthread_mutex_unlock(&ctx->wait_cleanup_lock);
651 return COPYBIT_FAILURE;
652 }
653 if(LINK_c2dCreateFenceFD(ctx->dst[ctx->dst_surface_type], ctx->time_stamp,
654 fd)) {
655 ALOGE("%s: LINK_c2dCreateFenceFD ERROR", __FUNCTION__);
656 status = COPYBIT_FAILURE;
657 }
658 if(status == COPYBIT_SUCCESS) {
659 //signal the wait_thread
660 ctx->wait_timestamp = true;
661 pthread_cond_signal(&ctx->wait_cleanup_cond);
662 }
663 pthread_mutex_unlock(&ctx->wait_cleanup_lock);
664 return status;
665 }
666
finish_copybit(struct copybit_device_t * dev)667 static int finish_copybit(struct copybit_device_t *dev)
668 {
669 struct copybit_context_t* ctx = (struct copybit_context_t*)dev;
670 if (!ctx)
671 return COPYBIT_FAILURE;
672
673 int status = msm_copybit(ctx, ctx->dst[ctx->dst_surface_type]);
674
675 if(LINK_c2dFinish(ctx->dst[ctx->dst_surface_type])) {
676 ALOGE("%s: LINK_c2dFinish ERROR", __FUNCTION__);
677 return COPYBIT_FAILURE;
678 }
679
680 // Unmap any mapped addresses.
681 for (int i = 0; i < MAX_SURFACES; i++) {
682 if (ctx->mapped_gpu_addr[i]) {
683 LINK_c2dUnMapAddr( (void*)ctx->mapped_gpu_addr[i]);
684 ctx->mapped_gpu_addr[i] = 0;
685 }
686 }
687
688 // Reset the counts after the draw.
689 ctx->blit_rgb_count = 0;
690 ctx->blit_yuv_2_plane_count = 0;
691 ctx->blit_yuv_3_plane_count = 0;
692 ctx->blit_count = 0;
693 ctx->dst_surface_mapped = false;
694 ctx->dst_surface_base = 0;
695
696 return status;
697 }
698
clear_copybit(struct copybit_device_t * dev,struct copybit_image_t const * buf,struct copybit_rect_t * rect)699 static int clear_copybit(struct copybit_device_t *dev,
700 struct copybit_image_t const *buf,
701 struct copybit_rect_t *rect)
702 {
703 int ret = COPYBIT_SUCCESS;
704 int flags = FLAGS_PREMULTIPLIED_ALPHA;
705 int mapped_dst_idx = -1;
706 struct copybit_context_t* ctx = (struct copybit_context_t*)dev;
707 if (ctx->is_dst_ubwc_format)
708 flags |= FLAGS_UBWC_FORMAT_MODE;
709 C2D_RECT c2drect = {rect->l, rect->t, rect->r - rect->l, rect->b - rect->t};
710 pthread_mutex_lock(&ctx->wait_cleanup_lock);
711 if(!ctx->dst_surface_mapped) {
712 ret = set_image(ctx, ctx->dst[RGB_SURFACE], buf,
713 (eC2DFlags)flags, mapped_dst_idx);
714 if(ret) {
715 ALOGE("%s: set_image error", __FUNCTION__);
716 unmap_gpuaddr(ctx, mapped_dst_idx);
717 pthread_mutex_unlock(&ctx->wait_cleanup_lock);
718 return COPYBIT_FAILURE;
719 }
720 //clear_copybit is the first call made by HWC for each composition
721 //with the dest surface, hence set dst_surface_mapped.
722 ctx->dst_surface_mapped = true;
723 ctx->dst_surface_base = buf->base;
724 ret = LINK_c2dFillSurface(ctx->dst[RGB_SURFACE], 0x0, &c2drect);
725 }
726 pthread_mutex_unlock(&ctx->wait_cleanup_lock);
727 return ret;
728 }
729
730
731 /** setup rectangles */
set_rects(struct copybit_context_t * ctx,C2D_OBJECT * c2dObject,const struct copybit_rect_t * dst,const struct copybit_rect_t * src,const struct copybit_rect_t * scissor)732 static void set_rects(struct copybit_context_t *ctx,
733 C2D_OBJECT *c2dObject,
734 const struct copybit_rect_t *dst,
735 const struct copybit_rect_t *src,
736 const struct copybit_rect_t *scissor)
737 {
738 // Set the target rect.
739 if((ctx->trg_transform & C2D_TARGET_ROTATE_90) &&
740 (ctx->trg_transform & C2D_TARGET_ROTATE_180)) {
741 /* target rotation is 270 */
742 c2dObject->target_rect.x = (dst->t)<<16;
743 c2dObject->target_rect.y = ctx->fb_width?
744 (ALIGN(ctx->fb_width,32)- dst->r):dst->r;
745 c2dObject->target_rect.y = c2dObject->target_rect.y<<16;
746 c2dObject->target_rect.height = ((dst->r) - (dst->l))<<16;
747 c2dObject->target_rect.width = ((dst->b) - (dst->t))<<16;
748 } else if(ctx->trg_transform & C2D_TARGET_ROTATE_90) {
749 c2dObject->target_rect.x = ctx->fb_height?(ctx->fb_height - dst->b):dst->b;
750 c2dObject->target_rect.x = c2dObject->target_rect.x<<16;
751 c2dObject->target_rect.y = (dst->l)<<16;
752 c2dObject->target_rect.height = ((dst->r) - (dst->l))<<16;
753 c2dObject->target_rect.width = ((dst->b) - (dst->t))<<16;
754 } else if(ctx->trg_transform & C2D_TARGET_ROTATE_180) {
755 c2dObject->target_rect.y = ctx->fb_height?(ctx->fb_height - dst->b):dst->b;
756 c2dObject->target_rect.y = c2dObject->target_rect.y<<16;
757 c2dObject->target_rect.x = ctx->fb_width?
758 (ALIGN(ctx->fb_width,32) - dst->r):dst->r;
759 c2dObject->target_rect.x = c2dObject->target_rect.x<<16;
760 c2dObject->target_rect.height = ((dst->b) - (dst->t))<<16;
761 c2dObject->target_rect.width = ((dst->r) - (dst->l))<<16;
762 } else {
763 c2dObject->target_rect.x = (dst->l)<<16;
764 c2dObject->target_rect.y = (dst->t)<<16;
765 c2dObject->target_rect.height = ((dst->b) - (dst->t))<<16;
766 c2dObject->target_rect.width = ((dst->r) - (dst->l))<<16;
767 }
768 c2dObject->config_mask |= C2D_TARGET_RECT_BIT;
769
770 // Set the source rect
771 c2dObject->source_rect.x = (src->l)<<16;
772 c2dObject->source_rect.y = (src->t)<<16;
773 c2dObject->source_rect.height = ((src->b) - (src->t))<<16;
774 c2dObject->source_rect.width = ((src->r) - (src->l))<<16;
775 c2dObject->config_mask |= C2D_SOURCE_RECT_BIT;
776
777 // Set the scissor rect
778 c2dObject->scissor_rect.x = scissor->l;
779 c2dObject->scissor_rect.y = scissor->t;
780 c2dObject->scissor_rect.height = (scissor->b) - (scissor->t);
781 c2dObject->scissor_rect.width = (scissor->r) - (scissor->l);
782 c2dObject->config_mask |= C2D_SCISSOR_RECT_BIT;
783 }
784
785 /*****************************************************************************/
786
787 /** Set a parameter to value */
set_parameter_copybit(struct copybit_device_t * dev,int name,int value)788 static int set_parameter_copybit(
789 struct copybit_device_t *dev,
790 int name,
791 int value)
792 {
793 struct copybit_context_t* ctx = (struct copybit_context_t*)dev;
794 int status = COPYBIT_SUCCESS;
795 if (!ctx) {
796 ALOGE("%s: null context", __FUNCTION__);
797 return -EINVAL;
798 }
799
800 pthread_mutex_lock(&ctx->wait_cleanup_lock);
801 switch(name) {
802 case COPYBIT_PLANE_ALPHA:
803 {
804 if (value < 0) value = 0;
805 if (value >= 256) value = 255;
806
807 ctx->src_global_alpha = value;
808 if (value < 255)
809 ctx->config_mask |= C2D_GLOBAL_ALPHA_BIT;
810 else
811 ctx->config_mask &= ~C2D_GLOBAL_ALPHA_BIT;
812 }
813 break;
814 case COPYBIT_BLEND_MODE:
815 {
816 if (value == COPYBIT_BLENDING_NONE) {
817 ctx->config_mask |= C2D_ALPHA_BLEND_NONE;
818 ctx->is_premultiplied_alpha = true;
819 } else if (value == COPYBIT_BLENDING_PREMULT) {
820 ctx->is_premultiplied_alpha = true;
821 } else {
822 ctx->config_mask &= ~C2D_ALPHA_BLEND_NONE;
823 }
824 }
825 break;
826 case COPYBIT_TRANSFORM:
827 {
828 unsigned int transform = 0;
829 uint32 config_mask = 0;
830 config_mask |= C2D_OVERRIDE_GLOBAL_TARGET_ROTATE_CONFIG;
831 if((value & 0x7) == COPYBIT_TRANSFORM_ROT_180) {
832 transform = C2D_TARGET_ROTATE_180;
833 config_mask |= C2D_OVERRIDE_TARGET_ROTATE_180;
834 } else if((value & 0x7) == COPYBIT_TRANSFORM_ROT_270) {
835 transform = C2D_TARGET_ROTATE_90;
836 config_mask |= C2D_OVERRIDE_TARGET_ROTATE_90;
837 } else if(value == COPYBIT_TRANSFORM_ROT_90) {
838 transform = C2D_TARGET_ROTATE_270;
839 config_mask |= C2D_OVERRIDE_TARGET_ROTATE_270;
840 } else {
841 config_mask |= C2D_OVERRIDE_TARGET_ROTATE_0;
842 if(value & COPYBIT_TRANSFORM_FLIP_H) {
843 config_mask |= C2D_MIRROR_H_BIT;
844 } else if(value & COPYBIT_TRANSFORM_FLIP_V) {
845 config_mask |= C2D_MIRROR_V_BIT;
846 }
847 }
848
849 if (ctx->c2d_driver_info.capabilities_mask &
850 C2D_DRIVER_SUPPORTS_OVERRIDE_TARGET_ROTATE_OP) {
851 ctx->config_mask |= config_mask;
852 } else {
853 // The transform for this surface does not match the current
854 // target transform. Draw all previous surfaces. This will be
855 // changed once we have a new mechanism to send different
856 // target rotations to c2d.
857 finish_copybit(dev);
858 }
859 ctx->trg_transform = transform;
860 }
861 break;
862 case COPYBIT_FRAMEBUFFER_WIDTH:
863 ctx->fb_width = value;
864 break;
865 case COPYBIT_FRAMEBUFFER_HEIGHT:
866 ctx->fb_height = value;
867 break;
868 case COPYBIT_ROTATION_DEG:
869 case COPYBIT_DITHER:
870 case COPYBIT_BLUR:
871 case COPYBIT_BLIT_TO_FRAMEBUFFER:
872 // Do nothing
873 break;
874 case COPYBIT_SRC_FORMAT_MODE:
875 ctx->is_src_ubwc_format = (value == COPYBIT_UBWC_COMPRESSED);
876 break;
877 case COPYBIT_DST_FORMAT_MODE:
878 ctx->is_dst_ubwc_format = (value == COPYBIT_UBWC_COMPRESSED);
879 break;
880 default:
881 ALOGE("%s: default case param=0x%x", __FUNCTION__, name);
882 status = -EINVAL;
883 break;
884 }
885 pthread_mutex_unlock(&ctx->wait_cleanup_lock);
886 return status;
887 }
888
889 /** Get a static info value */
get(struct copybit_device_t * dev,int name)890 static int get(struct copybit_device_t *dev, int name)
891 {
892 struct copybit_context_t* ctx = (struct copybit_context_t*)dev;
893 int value;
894
895 if (!ctx) {
896 ALOGE("%s: null context error", __FUNCTION__);
897 return -EINVAL;
898 }
899
900 switch(name) {
901 case COPYBIT_MINIFICATION_LIMIT:
902 value = MAX_SCALE_FACTOR;
903 break;
904 case COPYBIT_MAGNIFICATION_LIMIT:
905 value = MAX_SCALE_FACTOR;
906 break;
907 case COPYBIT_SCALING_FRAC_BITS:
908 value = 32;
909 break;
910 case COPYBIT_ROTATION_STEP_DEG:
911 value = 1;
912 break;
913 case COPYBIT_UBWC_SUPPORT:
914 value = 0;
915 if (ctx->c2d_driver_info.capabilities_mask & C2D_DRIVER_SUPPORTS_UBWC_COMPRESSED_OP) {
916 value = 1;
917 }
918 break;
919 default:
920 ALOGE("%s: default case param=0x%x", __FUNCTION__, name);
921 value = -EINVAL;
922 }
923 return value;
924 }
925
926 /* Function to check if we need a temporary buffer for the blit.
927 * This would happen if the requested destination stride and the
928 * C2D stride do not match. We ignore RGB buffers, since their
929 * stride is always aligned to 32.
930 */
need_temp_buffer(struct copybit_image_t const * img)931 static bool need_temp_buffer(struct copybit_image_t const *img)
932 {
933 if (COPYBIT_SUCCESS == is_supported_rgb_format(img->format))
934 return false;
935
936 struct private_handle_t* handle = (struct private_handle_t*)img->handle;
937
938 // The width parameter in the handle contains the aligned_w. We check if we
939 // need to convert based on this param. YUV formats have bpp=1, so checking
940 // if the requested stride is aligned should suffice.
941 if (0 == (handle->width)%32) {
942 return false;
943 }
944
945 return true;
946 }
947
948 /* Function to extract the information from the copybit image and set the corresponding
949 * values in the bufferInfo struct.
950 */
populate_buffer_info(struct copybit_image_t const * img,bufferInfo & info)951 static void populate_buffer_info(struct copybit_image_t const *img, bufferInfo& info)
952 {
953 info.width = img->w;
954 info.height = img->h;
955 info.format = img->format;
956 }
957
958 /* Function to get the required size for a particular format, inorder for C2D to perform
959 * the blit operation.
960 */
get_size(const bufferInfo & info)961 static int get_size(const bufferInfo& info)
962 {
963 int size = 0;
964 int w = info.width;
965 int h = info.height;
966 int aligned_w = ALIGN(w, 32);
967 switch(info.format) {
968 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
969 {
970 // Chroma for this format is aligned to 2K.
971 size = ALIGN((aligned_w*h), 2048) +
972 ALIGN(aligned_w/2, 32) * (h/2) *2;
973 size = ALIGN(size, 4096);
974 } break;
975 case HAL_PIXEL_FORMAT_YCbCr_420_SP:
976 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
977 {
978 size = aligned_w * h +
979 ALIGN(aligned_w/2, 32) * (h/2) * 2;
980 size = ALIGN(size, 4096);
981 } break;
982 default: break;
983 }
984 return size;
985 }
986
987 /* Function to allocate memory for the temporary buffer. This memory is
988 * allocated from Ashmem. It is the caller's responsibility to free this
989 * memory.
990 */
get_temp_buffer(const bufferInfo & info,alloc_data & data)991 static int get_temp_buffer(const bufferInfo& info, alloc_data& data)
992 {
993 ALOGD("%s E", __FUNCTION__);
994 // Alloc memory from system heap
995 data.base = 0;
996 data.fd = -1;
997 data.offset = 0;
998 data.size = get_size(info);
999 data.align = getpagesize();
1000 data.uncached = true;
1001 int allocFlags = 0;
1002
1003 if (sAlloc == 0) {
1004 sAlloc = gralloc::IAllocController::getInstance();
1005 }
1006
1007 if (sAlloc == 0) {
1008 ALOGE("%s: sAlloc is still NULL", __FUNCTION__);
1009 return COPYBIT_FAILURE;
1010 }
1011
1012 int err = sAlloc->allocate(data, allocFlags);
1013 if (0 != err) {
1014 ALOGE("%s: allocate failed", __FUNCTION__);
1015 return COPYBIT_FAILURE;
1016 }
1017
1018 ALOGD("%s X", __FUNCTION__);
1019 return err;
1020 }
1021
1022 /* Function to free the temporary allocated memory.*/
free_temp_buffer(alloc_data & data)1023 static void free_temp_buffer(alloc_data &data)
1024 {
1025 if (-1 != data.fd) {
1026 IMemAlloc* memalloc = sAlloc->getAllocator(data.allocType);
1027 memalloc->free_buffer(data.base, data.size, 0, data.fd);
1028 }
1029 }
1030
1031 /* Function to perform the software color conversion. Convert the
1032 * C2D compatible format to the Android compatible format
1033 */
copy_image(private_handle_t * src_handle,struct copybit_image_t const * rhs,eConversionType conversionType)1034 static int copy_image(private_handle_t *src_handle,
1035 struct copybit_image_t const *rhs,
1036 eConversionType conversionType)
1037 {
1038 if (src_handle->fd == -1) {
1039 ALOGE("%s: src_handle fd is invalid", __FUNCTION__);
1040 return COPYBIT_FAILURE;
1041 }
1042
1043 // Copy the info.
1044 int ret = COPYBIT_SUCCESS;
1045 switch(rhs->format) {
1046 case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
1047 case HAL_PIXEL_FORMAT_YCbCr_420_SP:
1048 case HAL_PIXEL_FORMAT_YCrCb_420_SP:
1049 {
1050 if (CONVERT_TO_ANDROID_FORMAT == conversionType) {
1051 return convert_yuv_c2d_to_yuv_android(src_handle, rhs);
1052 } else {
1053 return convert_yuv_android_to_yuv_c2d(src_handle, rhs);
1054 }
1055
1056 } break;
1057 default: {
1058 ALOGE("%s: invalid format 0x%x", __FUNCTION__, rhs->format);
1059 ret = COPYBIT_FAILURE;
1060 } break;
1061 }
1062 return ret;
1063 }
1064
delete_handle(private_handle_t * handle)1065 static void delete_handle(private_handle_t *handle)
1066 {
1067 if (handle) {
1068 delete handle;
1069 handle = 0;
1070 }
1071 }
1072
need_to_execute_draw(eC2DFlags flags)1073 static bool need_to_execute_draw(eC2DFlags flags)
1074 {
1075 if (flags & FLAGS_TEMP_SRC_DST) {
1076 return true;
1077 }
1078 if (flags & FLAGS_YUV_DESTINATION) {
1079 return true;
1080 }
1081 return false;
1082 }
1083
1084 /** do a stretch blit type operation */
stretch_copybit_internal(struct copybit_device_t * dev,struct copybit_image_t const * dst,struct copybit_image_t const * src,struct copybit_rect_t const * dst_rect,struct copybit_rect_t const * src_rect,struct copybit_region_t const * region,bool enableBlend)1085 static int stretch_copybit_internal(
1086 struct copybit_device_t *dev,
1087 struct copybit_image_t const *dst,
1088 struct copybit_image_t const *src,
1089 struct copybit_rect_t const *dst_rect,
1090 struct copybit_rect_t const *src_rect,
1091 struct copybit_region_t const *region,
1092 bool enableBlend)
1093 {
1094 struct copybit_context_t* ctx = (struct copybit_context_t*)dev;
1095 int status = COPYBIT_SUCCESS;
1096 int flags = 0;
1097 int src_surface_type;
1098 int mapped_src_idx = -1, mapped_dst_idx = -1;
1099 C2D_OBJECT_STR src_surface;
1100
1101 if (!ctx) {
1102 ALOGE("%s: null context error", __FUNCTION__);
1103 return -EINVAL;
1104 }
1105
1106 if (src->w > MAX_DIMENSION || src->h > MAX_DIMENSION) {
1107 ALOGE("%s: src dimension error", __FUNCTION__);
1108 return -EINVAL;
1109 }
1110
1111 if (dst->w > MAX_DIMENSION || dst->h > MAX_DIMENSION) {
1112 ALOGE("%s : dst dimension error dst w %d h %d", __FUNCTION__, dst->w,
1113 dst->h);
1114 return -EINVAL;
1115 }
1116
1117 if (is_valid_destination_format(dst->format) == COPYBIT_FAILURE) {
1118 ALOGE("%s: Invalid destination format format = 0x%x", __FUNCTION__,
1119 dst->format);
1120 return COPYBIT_FAILURE;
1121 }
1122
1123 int dst_surface_type;
1124 if (ctx->is_dst_ubwc_format)
1125 flags |= FLAGS_UBWC_FORMAT_MODE;
1126
1127 if (is_supported_rgb_format(dst->format) == COPYBIT_SUCCESS) {
1128 dst_surface_type = RGB_SURFACE;
1129 flags |= FLAGS_PREMULTIPLIED_ALPHA;
1130 } else if (is_supported_yuv_format(dst->format) == COPYBIT_SUCCESS) {
1131 int num_planes = get_num_planes(dst->format);
1132 flags |= FLAGS_YUV_DESTINATION;
1133 if (num_planes == 2) {
1134 dst_surface_type = YUV_SURFACE_2_PLANES;
1135 } else if (num_planes == 3) {
1136 dst_surface_type = YUV_SURFACE_3_PLANES;
1137 } else {
1138 ALOGE("%s: dst number of YUV planes is invalid dst format = 0x%x",
1139 __FUNCTION__, dst->format);
1140 return COPYBIT_FAILURE;
1141 }
1142 } else {
1143 ALOGE("%s: Invalid dst surface format 0x%x", __FUNCTION__,
1144 dst->format);
1145 return COPYBIT_FAILURE;
1146 }
1147
1148 if (ctx->blit_rgb_count == MAX_RGB_SURFACES ||
1149 ctx->blit_yuv_2_plane_count == MAX_YUV_2_PLANE_SURFACES ||
1150 ctx->blit_yuv_3_plane_count == MAX_YUV_2_PLANE_SURFACES ||
1151 ctx->blit_count == MAX_BLIT_OBJECT_COUNT ||
1152 ctx->dst_surface_type != dst_surface_type) {
1153 // we have reached the max. limits of our internal structures or
1154 // changed the target.
1155 // Draw the remaining surfaces. We need to do the finish here since
1156 // we need to free up the surface templates.
1157 finish_copybit(dev);
1158 }
1159
1160 ctx->dst_surface_type = dst_surface_type;
1161
1162 // Update the destination
1163 copybit_image_t dst_image;
1164 dst_image.w = dst->w;
1165 dst_image.h = dst->h;
1166 dst_image.format = dst->format;
1167 dst_image.handle = dst->handle;
1168 // Check if we need a temp. copy for the destination. We'd need this the destination
1169 // width is not aligned to 32. This case occurs for YUV formats. RGB formats are
1170 // aligned to 32.
1171 bool need_temp_dst = need_temp_buffer(dst);
1172 bufferInfo dst_info;
1173 populate_buffer_info(dst, dst_info);
1174 private_handle_t* dst_hnd = new private_handle_t(-1, 0, 0, 0, dst_info.format,
1175 dst_info.width, dst_info.height);
1176 if (dst_hnd == NULL) {
1177 ALOGE("%s: dst_hnd is null", __FUNCTION__);
1178 return COPYBIT_FAILURE;
1179 }
1180 if (need_temp_dst) {
1181 if (get_size(dst_info) != (int) ctx->temp_dst_buffer.size) {
1182 free_temp_buffer(ctx->temp_dst_buffer);
1183 // Create a temp buffer and set that as the destination.
1184 if (COPYBIT_FAILURE == get_temp_buffer(dst_info, ctx->temp_dst_buffer)) {
1185 ALOGE("%s: get_temp_buffer(dst) failed", __FUNCTION__);
1186 delete_handle(dst_hnd);
1187 return COPYBIT_FAILURE;
1188 }
1189 }
1190 dst_hnd->fd = ctx->temp_dst_buffer.fd;
1191 dst_hnd->size = ctx->temp_dst_buffer.size;
1192 dst_hnd->flags = ctx->temp_dst_buffer.allocType;
1193 dst_hnd->base = (uintptr_t)(ctx->temp_dst_buffer.base);
1194 dst_hnd->offset = ctx->temp_dst_buffer.offset;
1195 dst_hnd->gpuaddr = 0;
1196 dst_image.handle = dst_hnd;
1197 }
1198 if(!ctx->dst_surface_mapped) {
1199 //map the destination surface to GPU address
1200 status = set_image(ctx, ctx->dst[ctx->dst_surface_type], &dst_image,
1201 (eC2DFlags)flags, mapped_dst_idx);
1202 if(status) {
1203 ALOGE("%s: dst: set_image error", __FUNCTION__);
1204 delete_handle(dst_hnd);
1205 unmap_gpuaddr(ctx, mapped_dst_idx);
1206 return COPYBIT_FAILURE;
1207 }
1208 ctx->dst_surface_mapped = true;
1209 ctx->dst_surface_base = dst->base;
1210 } else if(ctx->dst_surface_mapped && ctx->dst_surface_base != dst->base) {
1211 // Destination surface for the operation should be same for multiple
1212 // requests, this check is catch if there is any case when the
1213 // destination changes
1214 ALOGE("%s: a different destination surface!!", __FUNCTION__);
1215 }
1216
1217 // Update the source
1218 flags = 0;
1219 if(is_supported_rgb_format(src->format) == COPYBIT_SUCCESS) {
1220 src_surface_type = RGB_SURFACE;
1221 src_surface = ctx->blit_rgb_object[ctx->blit_rgb_count];
1222 } else if (is_supported_yuv_format(src->format) == COPYBIT_SUCCESS) {
1223 int num_planes = get_num_planes(src->format);
1224 if (num_planes == 2) {
1225 src_surface_type = YUV_SURFACE_2_PLANES;
1226 src_surface = ctx->blit_yuv_2_plane_object[ctx->blit_yuv_2_plane_count];
1227 } else if (num_planes == 3) {
1228 src_surface_type = YUV_SURFACE_3_PLANES;
1229 src_surface = ctx->blit_yuv_3_plane_object[ctx->blit_yuv_2_plane_count];
1230 } else {
1231 ALOGE("%s: src number of YUV planes is invalid src format = 0x%x",
1232 __FUNCTION__, src->format);
1233 delete_handle(dst_hnd);
1234 unmap_gpuaddr(ctx, mapped_dst_idx);
1235 return -EINVAL;
1236 }
1237 } else {
1238 ALOGE("%s: Invalid source surface format 0x%x", __FUNCTION__,
1239 src->format);
1240 delete_handle(dst_hnd);
1241 unmap_gpuaddr(ctx, mapped_dst_idx);
1242 return -EINVAL;
1243 }
1244
1245 copybit_image_t src_image;
1246 src_image.w = src->w;
1247 src_image.h = src->h;
1248 src_image.format = src->format;
1249 src_image.handle = src->handle;
1250
1251 bool need_temp_src = need_temp_buffer(src);
1252 bufferInfo src_info;
1253 populate_buffer_info(src, src_info);
1254 private_handle_t* src_hnd = new private_handle_t(-1, 0, 0, 0, src_info.format,
1255 src_info.width, src_info.height);
1256 if (NULL == src_hnd) {
1257 ALOGE("%s: src_hnd is null", __FUNCTION__);
1258 delete_handle(dst_hnd);
1259 unmap_gpuaddr(ctx, mapped_dst_idx);
1260 return COPYBIT_FAILURE;
1261 }
1262 if (need_temp_src) {
1263 if (get_size(src_info) != (int) ctx->temp_src_buffer.size) {
1264 free_temp_buffer(ctx->temp_src_buffer);
1265 // Create a temp buffer and set that as the destination.
1266 if (COPYBIT_SUCCESS != get_temp_buffer(src_info,
1267 ctx->temp_src_buffer)) {
1268 ALOGE("%s: get_temp_buffer(src) failed", __FUNCTION__);
1269 delete_handle(dst_hnd);
1270 delete_handle(src_hnd);
1271 unmap_gpuaddr(ctx, mapped_dst_idx);
1272 return COPYBIT_FAILURE;
1273 }
1274 }
1275 src_hnd->fd = ctx->temp_src_buffer.fd;
1276 src_hnd->size = ctx->temp_src_buffer.size;
1277 src_hnd->flags = ctx->temp_src_buffer.allocType;
1278 src_hnd->base = (uintptr_t)(ctx->temp_src_buffer.base);
1279 src_hnd->offset = ctx->temp_src_buffer.offset;
1280 src_hnd->gpuaddr = 0;
1281 src_image.handle = src_hnd;
1282
1283 // Copy the source.
1284 status = copy_image((private_handle_t *)src->handle, &src_image,
1285 CONVERT_TO_C2D_FORMAT);
1286 if (status == COPYBIT_FAILURE) {
1287 ALOGE("%s:copy_image failed in temp source",__FUNCTION__);
1288 delete_handle(dst_hnd);
1289 delete_handle(src_hnd);
1290 unmap_gpuaddr(ctx, mapped_dst_idx);
1291 return status;
1292 }
1293
1294 // Clean the cache
1295 IMemAlloc* memalloc = sAlloc->getAllocator(src_hnd->flags);
1296 if (memalloc->clean_buffer((void *)(src_hnd->base), src_hnd->size,
1297 src_hnd->offset, src_hnd->fd,
1298 gralloc::CACHE_CLEAN)) {
1299 ALOGE("%s: clean_buffer failed", __FUNCTION__);
1300 delete_handle(dst_hnd);
1301 delete_handle(src_hnd);
1302 unmap_gpuaddr(ctx, mapped_dst_idx);
1303 return COPYBIT_FAILURE;
1304 }
1305 }
1306
1307 flags |= (ctx->is_premultiplied_alpha) ? FLAGS_PREMULTIPLIED_ALPHA : 0;
1308 flags |= (ctx->dst_surface_type != RGB_SURFACE) ? FLAGS_YUV_DESTINATION : 0;
1309 flags |= (ctx->is_src_ubwc_format) ? FLAGS_UBWC_FORMAT_MODE : 0;
1310 status = set_image(ctx, src_surface.surface_id, &src_image,
1311 (eC2DFlags)flags, mapped_src_idx);
1312 if(status) {
1313 ALOGE("%s: set_image (src) error", __FUNCTION__);
1314 delete_handle(dst_hnd);
1315 delete_handle(src_hnd);
1316 unmap_gpuaddr(ctx, mapped_dst_idx);
1317 unmap_gpuaddr(ctx, mapped_src_idx);
1318 return COPYBIT_FAILURE;
1319 }
1320
1321 src_surface.config_mask = C2D_NO_ANTIALIASING_BIT | ctx->config_mask;
1322 src_surface.global_alpha = ctx->src_global_alpha;
1323 if (enableBlend) {
1324 if(src_surface.config_mask & C2D_GLOBAL_ALPHA_BIT) {
1325 src_surface.config_mask &= ~C2D_ALPHA_BLEND_NONE;
1326 if(!(src_surface.global_alpha)) {
1327 // src alpha is zero
1328 delete_handle(dst_hnd);
1329 delete_handle(src_hnd);
1330 unmap_gpuaddr(ctx, mapped_dst_idx);
1331 unmap_gpuaddr(ctx, mapped_src_idx);
1332 return COPYBIT_FAILURE;
1333 }
1334 }
1335 } else {
1336 src_surface.config_mask |= C2D_ALPHA_BLEND_NONE;
1337 }
1338
1339 if (src_surface_type == RGB_SURFACE) {
1340 ctx->blit_rgb_object[ctx->blit_rgb_count] = src_surface;
1341 ctx->blit_rgb_count++;
1342 } else if (src_surface_type == YUV_SURFACE_2_PLANES) {
1343 ctx->blit_yuv_2_plane_object[ctx->blit_yuv_2_plane_count] = src_surface;
1344 ctx->blit_yuv_2_plane_count++;
1345 } else {
1346 ctx->blit_yuv_3_plane_object[ctx->blit_yuv_3_plane_count] = src_surface;
1347 ctx->blit_yuv_3_plane_count++;
1348 }
1349
1350 struct copybit_rect_t clip;
1351 while ((status == 0) && region->next(region, &clip)) {
1352 set_rects(ctx, &(src_surface), dst_rect, src_rect, &clip);
1353 if (ctx->blit_count == MAX_BLIT_OBJECT_COUNT) {
1354 ALOGW("Reached end of blit count");
1355 finish_copybit(dev);
1356 }
1357 ctx->blit_list[ctx->blit_count] = src_surface;
1358 ctx->blit_count++;
1359 }
1360
1361 // Check if we need to perform an early draw-finish.
1362 flags |= (need_temp_dst || need_temp_src) ? FLAGS_TEMP_SRC_DST : 0;
1363 if (need_to_execute_draw((eC2DFlags)flags))
1364 {
1365 finish_copybit(dev);
1366 }
1367
1368 if (need_temp_dst) {
1369 // copy the temp. destination without the alignment to the actual
1370 // destination.
1371 status = copy_image(dst_hnd, dst, CONVERT_TO_ANDROID_FORMAT);
1372 if (status == COPYBIT_FAILURE) {
1373 ALOGE("%s:copy_image failed in temp Dest",__FUNCTION__);
1374 delete_handle(dst_hnd);
1375 delete_handle(src_hnd);
1376 unmap_gpuaddr(ctx, mapped_dst_idx);
1377 unmap_gpuaddr(ctx, mapped_src_idx);
1378 return status;
1379 }
1380 // Clean the cache.
1381 IMemAlloc* memalloc = sAlloc->getAllocator(dst_hnd->flags);
1382 memalloc->clean_buffer((void *)(dst_hnd->base), dst_hnd->size,
1383 dst_hnd->offset, dst_hnd->fd,
1384 gralloc::CACHE_CLEAN);
1385 }
1386 delete_handle(dst_hnd);
1387 delete_handle(src_hnd);
1388
1389 ctx->is_premultiplied_alpha = false;
1390 ctx->fb_width = 0;
1391 ctx->fb_height = 0;
1392 ctx->config_mask = 0;
1393 return status;
1394 }
1395
set_sync_copybit(struct copybit_device_t * dev,int)1396 static int set_sync_copybit(struct copybit_device_t *dev,
1397 int /*acquireFenceFd*/)
1398 {
1399 if(!dev)
1400 return -EINVAL;
1401
1402 return 0;
1403 }
1404
stretch_copybit(struct copybit_device_t * dev,struct copybit_image_t const * dst,struct copybit_image_t const * src,struct copybit_rect_t const * dst_rect,struct copybit_rect_t const * src_rect,struct copybit_region_t const * region)1405 static int stretch_copybit(
1406 struct copybit_device_t *dev,
1407 struct copybit_image_t const *dst,
1408 struct copybit_image_t const *src,
1409 struct copybit_rect_t const *dst_rect,
1410 struct copybit_rect_t const *src_rect,
1411 struct copybit_region_t const *region)
1412 {
1413 struct copybit_context_t* ctx = (struct copybit_context_t*)dev;
1414 int status = COPYBIT_SUCCESS;
1415 bool needsBlending = (ctx->src_global_alpha != 0);
1416 pthread_mutex_lock(&ctx->wait_cleanup_lock);
1417 status = stretch_copybit_internal(dev, dst, src, dst_rect, src_rect,
1418 region, needsBlending);
1419 pthread_mutex_unlock(&ctx->wait_cleanup_lock);
1420 return status;
1421 }
1422
1423 /** Perform a blit type operation */
blit_copybit(struct copybit_device_t * dev,struct copybit_image_t const * dst,struct copybit_image_t const * src,struct copybit_region_t const * region)1424 static int blit_copybit(
1425 struct copybit_device_t *dev,
1426 struct copybit_image_t const *dst,
1427 struct copybit_image_t const *src,
1428 struct copybit_region_t const *region)
1429 {
1430 int status = COPYBIT_SUCCESS;
1431 struct copybit_context_t* ctx = (struct copybit_context_t*)dev;
1432 struct copybit_rect_t dr = { 0, 0, (int)dst->w, (int)dst->h };
1433 struct copybit_rect_t sr = { 0, 0, (int)src->w, (int)src->h };
1434 pthread_mutex_lock(&ctx->wait_cleanup_lock);
1435 status = stretch_copybit_internal(dev, dst, src, &dr, &sr, region, false);
1436 pthread_mutex_unlock(&ctx->wait_cleanup_lock);
1437 return status;
1438 }
1439
1440 /** Fill the rect on dst with RGBA color **/
fill_color(struct copybit_device_t * dev,struct copybit_image_t const * dst,struct copybit_rect_t const * rect,uint32_t)1441 static int fill_color(struct copybit_device_t *dev,
1442 struct copybit_image_t const *dst,
1443 struct copybit_rect_t const *rect,
1444 uint32_t /*color*/)
1445 {
1446 // TODO: Implement once c2d driver supports color fill
1447 if(!dev || !dst || !rect)
1448 return -EINVAL;
1449
1450 return -EINVAL;
1451 }
1452
1453 /*****************************************************************************/
1454
clean_up(copybit_context_t * ctx)1455 static void clean_up(copybit_context_t* ctx)
1456 {
1457 void* ret;
1458 if (!ctx)
1459 return;
1460
1461 // stop the wait_cleanup_thread
1462 pthread_mutex_lock(&ctx->wait_cleanup_lock);
1463 ctx->stop_thread = true;
1464 // Signal waiting thread
1465 pthread_cond_signal(&ctx->wait_cleanup_cond);
1466 pthread_mutex_unlock(&ctx->wait_cleanup_lock);
1467 // waits for the cleanup thread to exit
1468 pthread_join(ctx->wait_thread_id, &ret);
1469 pthread_mutex_destroy(&ctx->wait_cleanup_lock);
1470 pthread_cond_destroy (&ctx->wait_cleanup_cond);
1471
1472 for (int i = 0; i < NUM_SURFACE_TYPES; i++) {
1473 if (ctx->dst[i])
1474 LINK_c2dDestroySurface(ctx->dst[i]);
1475 }
1476
1477 for (int i = 0; i < MAX_RGB_SURFACES; i++) {
1478 if (ctx->blit_rgb_object[i].surface_id)
1479 LINK_c2dDestroySurface(ctx->blit_rgb_object[i].surface_id);
1480 }
1481
1482 for (int i = 0; i < MAX_YUV_2_PLANE_SURFACES; i++) {
1483 if (ctx->blit_yuv_2_plane_object[i].surface_id)
1484 LINK_c2dDestroySurface(ctx->blit_yuv_2_plane_object[i].surface_id);
1485 }
1486
1487 for (int i = 0; i < MAX_YUV_3_PLANE_SURFACES; i++) {
1488 if (ctx->blit_yuv_3_plane_object[i].surface_id)
1489 LINK_c2dDestroySurface(ctx->blit_yuv_3_plane_object[i].surface_id);
1490 }
1491
1492 if (ctx->libc2d2) {
1493 ::dlclose(ctx->libc2d2);
1494 ALOGV("dlclose(libc2d2)");
1495 }
1496
1497 free(ctx);
1498 }
1499
1500 /** Close the copybit device */
close_copybit(struct hw_device_t * dev)1501 static int close_copybit(struct hw_device_t *dev)
1502 {
1503 struct copybit_context_t* ctx = (struct copybit_context_t*)dev;
1504 if (ctx) {
1505 free_temp_buffer(ctx->temp_src_buffer);
1506 free_temp_buffer(ctx->temp_dst_buffer);
1507 }
1508 clean_up(ctx);
1509 return 0;
1510 }
1511
1512 /** Open a new instance of a copybit device using name */
open_copybit(const struct hw_module_t * module,const char * name,struct hw_device_t ** device)1513 static int open_copybit(const struct hw_module_t* module, const char* name,
1514 struct hw_device_t** device)
1515 {
1516 int status = COPYBIT_SUCCESS;
1517 if (strcmp(name, COPYBIT_HARDWARE_COPYBIT0)) {
1518 return COPYBIT_FAILURE;
1519 }
1520
1521 C2D_RGB_SURFACE_DEF surfDefinition = {0};
1522 C2D_YUV_SURFACE_DEF yuvSurfaceDef = {0} ;
1523 struct copybit_context_t *ctx;
1524
1525 ctx = (struct copybit_context_t *)malloc(sizeof(struct copybit_context_t));
1526 if(!ctx) {
1527 ALOGE("%s: malloc failed", __FUNCTION__);
1528 return COPYBIT_FAILURE;
1529 }
1530
1531 /* initialize drawstate */
1532 memset(ctx, 0, sizeof(*ctx));
1533 ctx->libc2d2 = ::dlopen("libC2D2.so", RTLD_NOW);
1534 if (!ctx->libc2d2) {
1535 ALOGE("FATAL ERROR: could not dlopen libc2d2.so: %s", dlerror());
1536 clean_up(ctx);
1537 status = COPYBIT_FAILURE;
1538 *device = NULL;
1539 return status;
1540 }
1541 *(void **)&LINK_c2dCreateSurface = ::dlsym(ctx->libc2d2,
1542 "c2dCreateSurface");
1543 *(void **)&LINK_c2dUpdateSurface = ::dlsym(ctx->libc2d2,
1544 "c2dUpdateSurface");
1545 *(void **)&LINK_c2dReadSurface = ::dlsym(ctx->libc2d2,
1546 "c2dReadSurface");
1547 *(void **)&LINK_c2dDraw = ::dlsym(ctx->libc2d2, "c2dDraw");
1548 *(void **)&LINK_c2dFlush = ::dlsym(ctx->libc2d2, "c2dFlush");
1549 *(void **)&LINK_c2dFinish = ::dlsym(ctx->libc2d2, "c2dFinish");
1550 *(void **)&LINK_c2dWaitTimestamp = ::dlsym(ctx->libc2d2,
1551 "c2dWaitTimestamp");
1552 *(void **)&LINK_c2dDestroySurface = ::dlsym(ctx->libc2d2,
1553 "c2dDestroySurface");
1554 *(void **)&LINK_c2dMapAddr = ::dlsym(ctx->libc2d2,
1555 "c2dMapAddr");
1556 *(void **)&LINK_c2dUnMapAddr = ::dlsym(ctx->libc2d2,
1557 "c2dUnMapAddr");
1558 *(void **)&LINK_c2dGetDriverCapabilities = ::dlsym(ctx->libc2d2,
1559 "c2dGetDriverCapabilities");
1560 *(void **)&LINK_c2dCreateFenceFD = ::dlsym(ctx->libc2d2,
1561 "c2dCreateFenceFD");
1562 *(void **)&LINK_c2dFillSurface = ::dlsym(ctx->libc2d2,
1563 "c2dFillSurface");
1564
1565 if (!LINK_c2dCreateSurface || !LINK_c2dUpdateSurface || !LINK_c2dReadSurface
1566 || !LINK_c2dDraw || !LINK_c2dFlush || !LINK_c2dWaitTimestamp ||
1567 !LINK_c2dFinish || !LINK_c2dDestroySurface ||
1568 !LINK_c2dGetDriverCapabilities || !LINK_c2dCreateFenceFD ||
1569 !LINK_c2dFillSurface) {
1570 ALOGE("%s: dlsym ERROR", __FUNCTION__);
1571 clean_up(ctx);
1572 status = COPYBIT_FAILURE;
1573 *device = NULL;
1574 return status;
1575 }
1576
1577 ctx->device.common.tag = HARDWARE_DEVICE_TAG;
1578 ctx->device.common.version = 1;
1579 ctx->device.common.module = (hw_module_t*)(module);
1580 ctx->device.common.close = close_copybit;
1581 ctx->device.set_parameter = set_parameter_copybit;
1582 ctx->device.get = get;
1583 ctx->device.blit = blit_copybit;
1584 ctx->device.set_sync = set_sync_copybit;
1585 ctx->device.stretch = stretch_copybit;
1586 ctx->device.finish = finish_copybit;
1587 ctx->device.flush_get_fence = flush_get_fence_copybit;
1588 ctx->device.clear = clear_copybit;
1589 ctx->device.fill_color = fill_color;
1590
1591 /* Create RGB Surface */
1592 surfDefinition.buffer = (void*)0xdddddddd;
1593 surfDefinition.phys = (void*)0xdddddddd;
1594 surfDefinition.stride = 1 * 4;
1595 surfDefinition.width = 1;
1596 surfDefinition.height = 1;
1597 surfDefinition.format = C2D_COLOR_FORMAT_8888_ARGB;
1598 if (LINK_c2dCreateSurface(&(ctx->dst[RGB_SURFACE]), C2D_TARGET | C2D_SOURCE,
1599 (C2D_SURFACE_TYPE)(C2D_SURFACE_RGB_HOST |
1600 C2D_SURFACE_WITH_PHYS |
1601 C2D_SURFACE_WITH_PHYS_DUMMY ),
1602 &surfDefinition)) {
1603 ALOGE("%s: create ctx->dst_surface[RGB_SURFACE] failed", __FUNCTION__);
1604 ctx->dst[RGB_SURFACE] = 0;
1605 clean_up(ctx);
1606 status = COPYBIT_FAILURE;
1607 *device = NULL;
1608 return status;
1609 }
1610
1611 unsigned int surface_id = 0;
1612 for (int i = 0; i < MAX_RGB_SURFACES; i++)
1613 {
1614 if (LINK_c2dCreateSurface(&surface_id, C2D_TARGET | C2D_SOURCE,
1615 (C2D_SURFACE_TYPE)(C2D_SURFACE_RGB_HOST |
1616 C2D_SURFACE_WITH_PHYS |
1617 C2D_SURFACE_WITH_PHYS_DUMMY ),
1618 &surfDefinition)) {
1619 ALOGE("%s: create RGB source surface %d failed", __FUNCTION__, i);
1620 ctx->blit_rgb_object[i].surface_id = 0;
1621 status = COPYBIT_FAILURE;
1622 break;
1623 } else {
1624 ctx->blit_rgb_object[i].surface_id = surface_id;
1625 ALOGW("%s i = %d surface_id=%d", __FUNCTION__, i,
1626 ctx->blit_rgb_object[i].surface_id);
1627 }
1628 }
1629
1630 if (status == COPYBIT_FAILURE) {
1631 clean_up(ctx);
1632 status = COPYBIT_FAILURE;
1633 *device = NULL;
1634 return status;
1635 }
1636
1637 // Create 2 plane YUV surfaces
1638 yuvSurfaceDef.format = C2D_COLOR_FORMAT_420_NV12;
1639 yuvSurfaceDef.width = 4;
1640 yuvSurfaceDef.height = 4;
1641 yuvSurfaceDef.plane0 = (void*)0xaaaaaaaa;
1642 yuvSurfaceDef.phys0 = (void*) 0xaaaaaaaa;
1643 yuvSurfaceDef.stride0 = 4;
1644
1645 yuvSurfaceDef.plane1 = (void*)0xaaaaaaaa;
1646 yuvSurfaceDef.phys1 = (void*) 0xaaaaaaaa;
1647 yuvSurfaceDef.stride1 = 4;
1648 if (LINK_c2dCreateSurface(&(ctx->dst[YUV_SURFACE_2_PLANES]),
1649 C2D_TARGET | C2D_SOURCE,
1650 (C2D_SURFACE_TYPE)(C2D_SURFACE_YUV_HOST |
1651 C2D_SURFACE_WITH_PHYS |
1652 C2D_SURFACE_WITH_PHYS_DUMMY),
1653 &yuvSurfaceDef)) {
1654 ALOGE("%s: create ctx->dst[YUV_SURFACE_2_PLANES] failed", __FUNCTION__);
1655 ctx->dst[YUV_SURFACE_2_PLANES] = 0;
1656 clean_up(ctx);
1657 status = COPYBIT_FAILURE;
1658 *device = NULL;
1659 return status;
1660 }
1661
1662 for (int i=0; i < MAX_YUV_2_PLANE_SURFACES; i++)
1663 {
1664 if (LINK_c2dCreateSurface(&surface_id, C2D_TARGET | C2D_SOURCE,
1665 (C2D_SURFACE_TYPE)(C2D_SURFACE_YUV_HOST |
1666 C2D_SURFACE_WITH_PHYS |
1667 C2D_SURFACE_WITH_PHYS_DUMMY ),
1668 &yuvSurfaceDef)) {
1669 ALOGE("%s: create YUV source %d failed", __FUNCTION__, i);
1670 ctx->blit_yuv_2_plane_object[i].surface_id = 0;
1671 status = COPYBIT_FAILURE;
1672 break;
1673 } else {
1674 ctx->blit_yuv_2_plane_object[i].surface_id = surface_id;
1675 ALOGW("%s: 2 Plane YUV i=%d surface_id=%d", __FUNCTION__, i,
1676 ctx->blit_yuv_2_plane_object[i].surface_id);
1677 }
1678 }
1679
1680 if (status == COPYBIT_FAILURE) {
1681 clean_up(ctx);
1682 status = COPYBIT_FAILURE;
1683 *device = NULL;
1684 return status;
1685 }
1686
1687 // Create YUV 3 plane surfaces
1688 yuvSurfaceDef.format = C2D_COLOR_FORMAT_420_YV12;
1689 yuvSurfaceDef.plane2 = (void*)0xaaaaaaaa;
1690 yuvSurfaceDef.phys2 = (void*) 0xaaaaaaaa;
1691 yuvSurfaceDef.stride2 = 4;
1692
1693 if (LINK_c2dCreateSurface(&(ctx->dst[YUV_SURFACE_3_PLANES]),
1694 C2D_TARGET | C2D_SOURCE,
1695 (C2D_SURFACE_TYPE)(C2D_SURFACE_YUV_HOST |
1696 C2D_SURFACE_WITH_PHYS |
1697 C2D_SURFACE_WITH_PHYS_DUMMY),
1698 &yuvSurfaceDef)) {
1699 ALOGE("%s: create ctx->dst[YUV_SURFACE_3_PLANES] failed", __FUNCTION__);
1700 ctx->dst[YUV_SURFACE_3_PLANES] = 0;
1701 clean_up(ctx);
1702 status = COPYBIT_FAILURE;
1703 *device = NULL;
1704 return status;
1705 }
1706
1707 for (int i=0; i < MAX_YUV_3_PLANE_SURFACES; i++)
1708 {
1709 if (LINK_c2dCreateSurface(&(surface_id),
1710 C2D_TARGET | C2D_SOURCE,
1711 (C2D_SURFACE_TYPE)(C2D_SURFACE_YUV_HOST |
1712 C2D_SURFACE_WITH_PHYS |
1713 C2D_SURFACE_WITH_PHYS_DUMMY),
1714 &yuvSurfaceDef)) {
1715 ALOGE("%s: create 3 plane YUV surface %d failed", __FUNCTION__, i);
1716 ctx->blit_yuv_3_plane_object[i].surface_id = 0;
1717 status = COPYBIT_FAILURE;
1718 break;
1719 } else {
1720 ctx->blit_yuv_3_plane_object[i].surface_id = surface_id;
1721 ALOGW("%s: 3 Plane YUV i=%d surface_id=%d", __FUNCTION__, i,
1722 ctx->blit_yuv_3_plane_object[i].surface_id);
1723 }
1724 }
1725
1726 if (status == COPYBIT_FAILURE) {
1727 clean_up(ctx);
1728 status = COPYBIT_FAILURE;
1729 *device = NULL;
1730 return status;
1731 }
1732
1733 if (LINK_c2dGetDriverCapabilities(&(ctx->c2d_driver_info))) {
1734 ALOGE("%s: LINK_c2dGetDriverCapabilities failed", __FUNCTION__);
1735 clean_up(ctx);
1736 status = COPYBIT_FAILURE;
1737 *device = NULL;
1738 return status;
1739 }
1740 // Initialize context variables.
1741 ctx->trg_transform = C2D_TARGET_ROTATE_0;
1742
1743 ctx->temp_src_buffer.fd = -1;
1744 ctx->temp_src_buffer.base = 0;
1745 ctx->temp_src_buffer.size = 0;
1746
1747 ctx->temp_dst_buffer.fd = -1;
1748 ctx->temp_dst_buffer.base = 0;
1749 ctx->temp_dst_buffer.size = 0;
1750
1751 ctx->fb_width = 0;
1752 ctx->fb_height = 0;
1753
1754 ctx->blit_rgb_count = 0;
1755 ctx->blit_yuv_2_plane_count = 0;
1756 ctx->blit_yuv_3_plane_count = 0;
1757 ctx->blit_count = 0;
1758
1759 ctx->wait_timestamp = false;
1760 ctx->stop_thread = false;
1761 pthread_mutex_init(&(ctx->wait_cleanup_lock), NULL);
1762 pthread_cond_init(&(ctx->wait_cleanup_cond), NULL);
1763 /* Start the wait thread */
1764 pthread_attr_t attr;
1765 pthread_attr_init(&attr);
1766 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
1767
1768 pthread_create(&ctx->wait_thread_id, &attr, &c2d_wait_loop,
1769 (void *)ctx);
1770 pthread_attr_destroy(&attr);
1771
1772 *device = &ctx->device.common;
1773 return status;
1774 }
1775