mirror of
https://github.com/lvgl/lvgl.git
synced 2025-01-14 06:42:58 +08:00
fix(thorvg): remove async parts typically not implemented on MCUs (#5249)
This commit is contained in:
parent
11c3be1ce4
commit
9a088565cc
@ -167,8 +167,6 @@ Result Initializer::term(CanvasEngine engine) noexcept
|
||||
|
||||
if (--_initCnt > 0) return Result::Success;
|
||||
|
||||
TaskScheduler::term();
|
||||
|
||||
if (!LoaderMgr::term()) return Result::Unknown;
|
||||
|
||||
return Result::Success;
|
||||
|
@ -80,7 +80,7 @@ void mpoolRetDashOutline(SwMpool* mpool, unsigned idx)
|
||||
}
|
||||
|
||||
|
||||
SwMpool* mpoolInit(unsigned threads)
|
||||
SwMpool* mpoolInit(uint32_t threads)
|
||||
{
|
||||
auto allocSize = threads + 1;
|
||||
|
||||
@ -89,7 +89,6 @@ SwMpool* mpoolInit(unsigned threads)
|
||||
mpool->strokeOutline = static_cast<SwOutline*>(calloc(1, sizeof(SwOutline) * allocSize));
|
||||
mpool->dashOutline = static_cast<SwOutline*>(calloc(1, sizeof(SwOutline) * allocSize));
|
||||
mpool->allocSize = allocSize;
|
||||
|
||||
return mpool;
|
||||
}
|
||||
|
||||
|
@ -824,8 +824,8 @@ static void _rasterPolygonImage(SwSurface* surface, const SwImage* image, const
|
||||
|
||||
static AASpans* _AASpans(float ymin, float ymax, const SwImage* image, const SwBBox* region)
|
||||
{
|
||||
auto yStart = static_cast<int32_t>(ymin);
|
||||
auto yEnd = static_cast<int32_t>(ymax);
|
||||
auto yStart = static_cast<int>(ymin);
|
||||
auto yEnd = static_cast<int>(ymax);
|
||||
|
||||
if (!_arrange(image, region, yStart, yEnd)) return nullptr;
|
||||
|
||||
|
@ -23,11 +23,8 @@
|
||||
#include "../../lv_conf_internal.h"
|
||||
#if LV_USE_THORVG_INTERNAL
|
||||
|
||||
#include <deque>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
#include <atomic>
|
||||
#include <condition_variable>
|
||||
#include "tvgTaskScheduler.h"
|
||||
|
||||
/************************************************************************/
|
||||
@ -36,131 +33,22 @@
|
||||
|
||||
namespace tvg {
|
||||
|
||||
struct TaskQueue {
|
||||
deque<Task*> taskDeque;
|
||||
mutex mtx;
|
||||
condition_variable ready;
|
||||
bool done = false;
|
||||
|
||||
bool tryPop(Task** task)
|
||||
{
|
||||
unique_lock<mutex> lock{mtx, try_to_lock};
|
||||
if (!lock || taskDeque.empty()) return false;
|
||||
*task = taskDeque.front();
|
||||
taskDeque.pop_front();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool tryPush(Task* task)
|
||||
{
|
||||
{
|
||||
unique_lock<mutex> lock{mtx, try_to_lock};
|
||||
if (!lock) return false;
|
||||
taskDeque.push_back(task);
|
||||
}
|
||||
|
||||
ready.notify_one();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void complete()
|
||||
{
|
||||
{
|
||||
unique_lock<mutex> lock{mtx};
|
||||
done = true;
|
||||
}
|
||||
ready.notify_all();
|
||||
}
|
||||
|
||||
bool pop(Task** task)
|
||||
{
|
||||
unique_lock<mutex> lock{mtx};
|
||||
|
||||
while (taskDeque.empty() && !done) {
|
||||
ready.wait(lock);
|
||||
}
|
||||
|
||||
if (taskDeque.empty()) return false;
|
||||
|
||||
*task = taskDeque.front();
|
||||
taskDeque.pop_front();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void push(Task* task)
|
||||
{
|
||||
{
|
||||
unique_lock<mutex> lock{mtx};
|
||||
taskDeque.push_back(task);
|
||||
}
|
||||
|
||||
ready.notify_one();
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
static thread_local bool _async = true; //toggle async tasking for each thread on/off
|
||||
|
||||
|
||||
struct TaskSchedulerImpl
|
||||
{
|
||||
uint32_t threadCnt;
|
||||
vector<thread> threads;
|
||||
vector<TaskQueue> taskQueues;
|
||||
atomic<uint32_t> idx{0};
|
||||
|
||||
TaskSchedulerImpl(unsigned threadCnt) : threadCnt(threadCnt), taskQueues(threadCnt)
|
||||
TaskSchedulerImpl(unsigned threadCnt)
|
||||
{
|
||||
threads.reserve(threadCnt);
|
||||
|
||||
for (unsigned i = 0; i < threadCnt; ++i) {
|
||||
threads.emplace_back([&, i] { run(i); });
|
||||
}
|
||||
}
|
||||
|
||||
~TaskSchedulerImpl()
|
||||
{
|
||||
for (auto& queue : taskQueues) queue.complete();
|
||||
for (auto& thread : threads) thread.join();
|
||||
}
|
||||
|
||||
void run(unsigned i)
|
||||
{
|
||||
Task* task;
|
||||
|
||||
//Thread Loop
|
||||
while (true) {
|
||||
auto success = false;
|
||||
for (unsigned x = 0; x < threadCnt * 2; ++x) {
|
||||
if (taskQueues[(i + x) % threadCnt].tryPop(&task)) {
|
||||
success = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!success && !taskQueues[i].pop(&task)) break;
|
||||
(*task)(i + 1);
|
||||
}
|
||||
}
|
||||
|
||||
void request(Task* task)
|
||||
{
|
||||
//Async
|
||||
if (threadCnt > 0 && _async) {
|
||||
task->prepare();
|
||||
auto i = idx++;
|
||||
for (unsigned n = 0; n < threadCnt; ++n) {
|
||||
if (taskQueues[(i + n) % threadCnt].tryPush(task)) return;
|
||||
}
|
||||
taskQueues[i % threadCnt].push(task);
|
||||
//Sync
|
||||
} else {
|
||||
task->run(0);
|
||||
}
|
||||
task->run(0);
|
||||
}
|
||||
};
|
||||
|
||||
@ -178,28 +66,11 @@ void TaskScheduler::init(unsigned threads)
|
||||
inst = new TaskSchedulerImpl(threads);
|
||||
}
|
||||
|
||||
|
||||
void TaskScheduler::term()
|
||||
{
|
||||
if (!inst) return;
|
||||
delete(inst);
|
||||
inst = nullptr;
|
||||
}
|
||||
|
||||
|
||||
void TaskScheduler::request(Task* task)
|
||||
{
|
||||
if (inst) inst->request(task);
|
||||
}
|
||||
|
||||
|
||||
unsigned TaskScheduler::threads()
|
||||
{
|
||||
if (inst) return inst->threadCnt;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void TaskScheduler::async(bool on)
|
||||
{
|
||||
_async = on;
|
||||
|
@ -26,8 +26,6 @@
|
||||
#ifndef _TVG_TASK_SCHEDULER_H_
|
||||
#define _TVG_TASK_SCHEDULER_H_
|
||||
|
||||
#include <mutex>
|
||||
#include <condition_variable>
|
||||
#include "tvgCommon.h"
|
||||
|
||||
namespace tvg
|
||||
@ -37,9 +35,7 @@ struct Task;
|
||||
|
||||
struct TaskScheduler
|
||||
{
|
||||
static unsigned threads();
|
||||
static void init(unsigned threads);
|
||||
static void term();
|
||||
static void request(Task* task);
|
||||
static void async(bool on);
|
||||
};
|
||||
@ -47,21 +43,12 @@ struct TaskScheduler
|
||||
struct Task
|
||||
{
|
||||
private:
|
||||
mutex mtx;
|
||||
condition_variable cv;
|
||||
bool ready = true;
|
||||
bool pending = false;
|
||||
|
||||
public:
|
||||
virtual ~Task() = default;
|
||||
|
||||
void done()
|
||||
{
|
||||
if (!pending) return;
|
||||
|
||||
unique_lock<mutex> lock(mtx);
|
||||
while (!ready) cv.wait(lock);
|
||||
pending = false;
|
||||
}
|
||||
|
||||
protected:
|
||||
@ -72,15 +59,10 @@ private:
|
||||
{
|
||||
run(tid);
|
||||
|
||||
lock_guard<mutex> lock(mtx);
|
||||
ready = true;
|
||||
cv.notify_one();
|
||||
}
|
||||
|
||||
void prepare()
|
||||
{
|
||||
ready = false;
|
||||
pending = true;
|
||||
}
|
||||
|
||||
friend struct TaskSchedulerImpl;
|
||||
|
Loading…
x
Reference in New Issue
Block a user