mirror of
https://github.com/lvgl/lvgl.git
synced 2025-01-14 06:42:58 +08:00
feat(draw): add preferred_draw_unit_id and preference_score to draw_tasks
see https://github.com/lvgl/lvgl/issues/4342#issuecomment-1691636624
This commit is contained in:
parent
19c9afb57a
commit
da876e4a21
@ -109,7 +109,7 @@ typedef struct _lv_global_t {
|
||||
_lv_img_cache_entry_t img_cache_single;
|
||||
#endif
|
||||
|
||||
lv_draw_cache_t draw_cache;
|
||||
lv_draw_global_info_t draw_info;
|
||||
#if defined(LV_DRAW_SW_SHADOW_CACHE_SIZE) && LV_DRAW_SW_SHADOW_CACHE_SIZE > 0
|
||||
lv_draw_sw_shadow_cache_t sw_shadow_cache;
|
||||
#endif
|
||||
|
@ -16,7 +16,7 @@
|
||||
/*********************
|
||||
* DEFINES
|
||||
*********************/
|
||||
#define _draw_cache LV_GLOBAL_DEFAULT()->draw_cache
|
||||
#define _draw_info LV_GLOBAL_DEFAULT()->draw_info
|
||||
|
||||
/**********************
|
||||
* TYPEDEFS
|
||||
@ -46,7 +46,7 @@ static bool is_independent(lv_layer_t * layer, lv_draw_task_t * t_check);
|
||||
void lv_draw_init(void)
|
||||
{
|
||||
#if LV_USE_OS
|
||||
lv_thread_sync_init(&_draw_cache.sync);
|
||||
lv_thread_sync_init(&_draw_info.sync);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -55,8 +55,8 @@ void * lv_draw_create_unit(size_t size)
|
||||
lv_draw_unit_t * new_unit = lv_malloc(size);
|
||||
lv_memzero(new_unit, size);
|
||||
|
||||
new_unit->next = _draw_cache.unit_head;
|
||||
_draw_cache.unit_head = new_unit;
|
||||
new_unit->next = _draw_info.unit_head;
|
||||
_draw_info.unit_head = new_unit;
|
||||
|
||||
return new_unit;
|
||||
}
|
||||
@ -92,19 +92,29 @@ void lv_draw_finalize_task_creation(lv_layer_t * layer, lv_draw_task_t * t)
|
||||
lv_draw_dsc_base_t * base_dsc = t->draw_dsc;
|
||||
base_dsc->layer = layer;
|
||||
|
||||
lv_draw_global_info_t * info = &_draw_info;
|
||||
|
||||
/*Let the draw units set their preference score*/
|
||||
t->preference_score = 100;
|
||||
t->preferred_draw_unit_id = 0;
|
||||
lv_draw_unit_t * u = info->unit_head;
|
||||
while(u) {
|
||||
if(u->evaluate_cb) u->evaluate_cb(u, t);
|
||||
u = u->next;
|
||||
}
|
||||
|
||||
/*Send LV_EVENT_DRAW_TASK_ADDED and dispatch only on the "main" draw_task
|
||||
*and not on the draw tasks added in the event.
|
||||
*Sending LV_EVENT_DRAW_TASK_ADDED events might cause recursive event sends
|
||||
*Dispatching might remove the "main" draw task while it's still being used in the event*/
|
||||
lv_draw_cache_t * cache = &_draw_cache;
|
||||
|
||||
if(cache->task_running == false) {
|
||||
cache->task_running = true;
|
||||
if(info->task_running == false) {
|
||||
info->task_running = true;
|
||||
if(base_dsc->obj && lv_obj_has_flag(base_dsc->obj, LV_OBJ_FLAG_SEND_DRAW_TASK_EVENTS)) {
|
||||
lv_obj_send_event(base_dsc->obj, LV_EVENT_DRAW_TASK_ADDED, t);
|
||||
}
|
||||
lv_draw_dispatch();
|
||||
cache->task_running = false;
|
||||
info->task_running = false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -149,8 +159,8 @@ bool lv_draw_dispatch_layer(struct _lv_disp_t * disp, lv_layer_t * layer)
|
||||
uint32_t layer_size_byte = layer_drawn->draw_buf.height * lv_draw_buf_width_to_stride(layer_drawn->draw_buf.width,
|
||||
layer_drawn->draw_buf.color_format);
|
||||
|
||||
_draw_cache.used_memory_for_layers_kb -= layer_size_byte < 1024 ? 1 : layer_size_byte >> 10;
|
||||
LV_LOG_INFO("Layer memory used: %d kB\n", _draw_cache.used_memory_for_layers_kb);
|
||||
_draw_info.used_memory_for_layers_kb -= layer_size_byte < 1024 ? 1 : layer_size_byte >> 10;
|
||||
LV_LOG_INFO("Layer memory used: %d kB\n", _draw_info.used_memory_for_layers_kb);
|
||||
lv_draw_buf_free(&layer_drawn->draw_buf);
|
||||
}
|
||||
|
||||
@ -211,7 +221,7 @@ bool lv_draw_dispatch_layer(struct _lv_disp_t * disp, lv_layer_t * layer)
|
||||
uint32_t layer_size_byte = layer->draw_buf.height * lv_draw_buf_width_to_stride(layer->draw_buf.width,
|
||||
layer->draw_buf.color_format);
|
||||
uint32_t kb = layer_size_byte < 1024 ? 1 : layer_size_byte >> 10;
|
||||
if(_draw_cache.used_memory_for_layers_kb + kb > LV_LAYER_MAX_MEMORY_USAGE) {
|
||||
if(_draw_info.used_memory_for_layers_kb + kb > LV_LAYER_MAX_MEMORY_USAGE) {
|
||||
layer_ok = false;
|
||||
}
|
||||
}
|
||||
@ -219,9 +229,9 @@ bool lv_draw_dispatch_layer(struct _lv_disp_t * disp, lv_layer_t * layer)
|
||||
if(layer_ok) {
|
||||
/*Find a draw unit which is not busy and can take at least one task*/
|
||||
/*Let all draw units to pick draw tasks*/
|
||||
lv_draw_unit_t * u = _draw_cache.unit_head;
|
||||
lv_draw_unit_t * u = _draw_info.unit_head;
|
||||
while(u) {
|
||||
int32_t taken_cnt = u->dispatch(u, layer);
|
||||
int32_t taken_cnt = u->dispatch_cb(u, layer);
|
||||
if(taken_cnt < 0) {
|
||||
break;
|
||||
}
|
||||
@ -238,23 +248,23 @@ bool lv_draw_dispatch_layer(struct _lv_disp_t * disp, lv_layer_t * layer)
|
||||
void lv_draw_dispatch_wait_for_request(void)
|
||||
{
|
||||
#if LV_USE_OS
|
||||
lv_thread_sync_wait(&_draw_cache.sync);
|
||||
lv_thread_sync_wait(&_draw_info.sync);
|
||||
#else
|
||||
while(!_draw_cache.dispatch_req);
|
||||
_draw_cache.dispatch_req = 0;
|
||||
while(!_draw_info.dispatch_req);
|
||||
_draw_info.dispatch_req = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
void lv_draw_dispatch_request(void)
|
||||
{
|
||||
#if LV_USE_OS
|
||||
lv_thread_sync_signal(&_draw_cache.sync);
|
||||
lv_thread_sync_signal(&_draw_info.sync);
|
||||
#else
|
||||
_draw_cache.dispatch_req = 1;
|
||||
_draw_info.dispatch_req = 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
lv_draw_task_t * lv_draw_get_next_available_task(lv_layer_t * layer, lv_draw_task_t * t_prev)
|
||||
lv_draw_task_t * lv_draw_get_next_available_task(lv_layer_t * layer, lv_draw_task_t * t_prev, uint8_t draw_unit_id)
|
||||
{
|
||||
LV_PROFILER_BEGIN;
|
||||
/*If the first task is screen sized, there cannot be independent areas*/
|
||||
@ -273,7 +283,9 @@ lv_draw_task_t * lv_draw_get_next_available_task(lv_layer_t * layer, lv_draw_tas
|
||||
lv_draw_task_t * t = t_prev ? t_prev->next : layer->draw_task_head;
|
||||
while(t) {
|
||||
/*Find a queued and independent task*/
|
||||
if(t->state == LV_DRAW_TASK_STATE_QUEUED && is_independent(layer, t)) {
|
||||
if(t->state == LV_DRAW_TASK_STATE_QUEUED &&
|
||||
(t->preferred_draw_unit_id == LV_DRAW_UNIT_ID_ANY || t->preferred_draw_unit_id == draw_unit_id) &&
|
||||
is_independent(layer, t)) {
|
||||
LV_PROFILER_END;
|
||||
return t;
|
||||
}
|
||||
@ -333,8 +345,8 @@ void * lv_draw_layer_alloc_buf(lv_layer_t * layer)
|
||||
}
|
||||
|
||||
uint32_t kb = layer_size_byte < 1024 ? 1 : layer_size_byte >> 10;
|
||||
_draw_cache.used_memory_for_layers_kb += kb;
|
||||
LV_LOG_INFO("Layer memory used: %d kB\n", _draw_cache.used_memory_for_layers_kb);
|
||||
_draw_info.used_memory_for_layers_kb += kb;
|
||||
LV_LOG_INFO("Layer memory used: %d kB\n", _draw_info.used_memory_for_layers_kb);
|
||||
|
||||
|
||||
if(lv_color_format_has_alpha(layer->draw_buf.color_format)) {
|
||||
|
@ -26,6 +26,7 @@ extern "C" {
|
||||
/*********************
|
||||
* DEFINES
|
||||
*********************/
|
||||
#define LV_DRAW_UNIT_ID_ANY 0
|
||||
|
||||
/**********************
|
||||
* TYPEDEFS
|
||||
@ -79,6 +80,20 @@ typedef struct _lv_draw_task_t {
|
||||
volatile int state; /*int instead of lv_draw_task_state_t to be sure its atomic*/
|
||||
|
||||
void * draw_dsc;
|
||||
|
||||
/**
|
||||
* The ID of the draw_unit which should take this task
|
||||
*/
|
||||
uint8_t preferred_draw_unit_id;
|
||||
|
||||
/**
|
||||
* Set to which extent `preferred_draw_unit_id` is good at this task.
|
||||
* 80: means 20% better (faster) than software rendering
|
||||
* 100: the default value
|
||||
* 110: means 10% better (faster) than software rendering
|
||||
*/
|
||||
uint8_t preference_score;
|
||||
|
||||
} lv_draw_task_t;
|
||||
|
||||
typedef struct {
|
||||
@ -96,7 +111,7 @@ typedef struct _lv_draw_unit_t {
|
||||
const lv_area_t * clip_area;
|
||||
|
||||
/**
|
||||
* Try to assign a draw task to itself.
|
||||
* Called to try to assign a draw task to itself.
|
||||
* `lv_draw_get_next_available_task` can be used to get an independent draw task.
|
||||
* A draw task should be assign only if the draw unit can draw it too
|
||||
* @param draw_unit pointer to the draw unit
|
||||
@ -105,7 +120,15 @@ typedef struct _lv_draw_unit_t {
|
||||
* -1: There where no available draw tasks at all.
|
||||
* Also means to no call the dispatcher of the other draw units as there is no draw task to take
|
||||
*/
|
||||
int32_t (*dispatch)(struct _lv_draw_unit_t * draw_unit, struct _lv_layer_t * layer);
|
||||
int32_t (*dispatch_cb)(struct _lv_draw_unit_t * draw_unit, struct _lv_layer_t * layer);
|
||||
|
||||
/**
|
||||
*
|
||||
* @param draw_unit
|
||||
* @param task
|
||||
* @return
|
||||
*/
|
||||
int32_t (*evaluate_cb)(struct _lv_draw_unit_t * draw_unit, lv_draw_task_t * task);
|
||||
} lv_draw_unit_t;
|
||||
|
||||
|
||||
@ -152,7 +175,7 @@ typedef struct {
|
||||
int dispatch_req;
|
||||
#endif
|
||||
bool task_running;
|
||||
} lv_draw_cache_t;
|
||||
} lv_draw_global_info_t;
|
||||
|
||||
/**********************
|
||||
* GLOBAL PROTOTYPES
|
||||
@ -185,11 +208,12 @@ void lv_draw_dispatch_request(void);
|
||||
|
||||
/**
|
||||
* Find and available draw task
|
||||
* @param layer the draw ctx to search in
|
||||
* @param t_prev continue searching from this task
|
||||
* @return tan available draw task or NULL if there is no any
|
||||
* @param layer the draw ctx to search in
|
||||
* @param t_prev continue searching from this task
|
||||
* @param draw_unit_id check the task where `preferred_draw_unit_id` equals this value or `LV_DRAW_UNIT_ID_ANY`
|
||||
* @return tan available draw task or NULL if there is no any
|
||||
*/
|
||||
lv_draw_task_t * lv_draw_get_next_available_task(lv_layer_t * layer, lv_draw_task_t * t_prev);
|
||||
lv_draw_task_t * lv_draw_get_next_available_task(lv_layer_t * layer, lv_draw_task_t * t_prev, uint8_t draw_unit_id);
|
||||
|
||||
/**
|
||||
* Create a new layer on a parent layer
|
||||
@ -205,8 +229,8 @@ void lv_draw_layer_get_area(lv_layer_t * layer, lv_area_t * area);
|
||||
|
||||
/**
|
||||
* Try to allocate a buffer for the layer.
|
||||
* @param layer pointer to a layer
|
||||
* @return pointer to the allocated aligned buffer or NULL on failure
|
||||
* @param layer pointer to a layer
|
||||
* @return pointer to the allocated aligned buffer or NULL on failure
|
||||
*/
|
||||
void * lv_draw_layer_alloc_buf(lv_layer_t * layer);
|
||||
|
||||
|
@ -17,6 +17,7 @@
|
||||
/*********************
|
||||
* DEFINES
|
||||
*********************/
|
||||
#define DRAW_UNIT_ID_SW 1
|
||||
|
||||
/**********************
|
||||
* TYPEDEFS
|
||||
@ -59,7 +60,7 @@ void lv_draw_sw_init(void)
|
||||
uint32_t i;
|
||||
for(i = 0; i < LV_DRAW_SW_DRAW_UNIT_CNT; i++) {
|
||||
lv_draw_sw_unit_t * draw_sw_unit = lv_draw_create_unit(sizeof(lv_draw_sw_unit_t));
|
||||
draw_sw_unit->base_unit.dispatch = lv_draw_sw_dispatch;
|
||||
draw_sw_unit->base_unit.dispatch_cb = lv_draw_sw_dispatch;
|
||||
draw_sw_unit->idx = i;
|
||||
|
||||
#if LV_USE_OS
|
||||
@ -80,7 +81,7 @@ static int32_t lv_draw_sw_dispatch(lv_draw_unit_t * draw_unit, lv_layer_t * laye
|
||||
if(draw_sw_unit->task_act) return 0;
|
||||
|
||||
lv_draw_task_t * t = NULL;
|
||||
t = lv_draw_get_next_available_task(layer, NULL);
|
||||
t = lv_draw_get_next_available_task(layer, NULL, DRAW_UNIT_ID_SW);
|
||||
if(t == NULL) return -1;
|
||||
|
||||
void * buf = lv_draw_layer_alloc_buf(layer);
|
||||
|
@ -23,7 +23,7 @@
|
||||
#define CIRCLE_CACHE_LIFE_MAX 1000
|
||||
#define CIRCLE_CACHE_AGING(life, r) life = LV_MIN(life + (r < 16 ? 1 : (r >> 4)), 1000)
|
||||
#if LV_USE_OS
|
||||
#define circle_cache_mutex LV_GLOBAL_DEFAULT()->draw_cache.circle_cache_mutex
|
||||
#define circle_cache_mutex LV_GLOBAL_DEFAULT()->draw_info.circle_cache_mutex
|
||||
#endif
|
||||
#define _circle_cache LV_GLOBAL_DEFAULT()->sw_circle_cache
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user