From 57201c576a7a5d2a8b2ca4eb80b36cc0ddc589d6 Mon Sep 17 00:00:00 2001 From: cxl Date: Thu, 5 Sep 2019 12:53:05 +0000 Subject: [PATCH] .tutorial git-svn-id: svn://ultimatepp.org/upp/trunk@13585 f0d560ea-af0d-0410-9eb7-867de7ffcac7 --- tutorial/CoreTutorial/CoLoop.cpp | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/tutorial/CoreTutorial/CoLoop.cpp b/tutorial/CoreTutorial/CoLoop.cpp index 1d1278c5f..74c3fc993 100644 --- a/tutorial/CoreTutorial/CoLoop.cpp +++ b/tutorial/CoreTutorial/CoLoop.cpp @@ -2,30 +2,30 @@ void CoLoopTutorial() { - /// .CoWork loop method + /// .CoDo - /// An alternative to `CoPartition` is 'loop' method of `CoWork` (`Loop` method with synonym - /// `operator*`). In this pattern, the job is simply started in all threads and the code is - /// responsible for scheduling the work. `Loop` / `operator*` waits for all started threads - /// to finish. For scheduling, it is possible to use internal `CoWork` index counter, - /// which is set to 0 at the start of `Loop` / `operator*`. This way, the overhead associated - /// with creating lambdas and scheduling them is kept to the minimum. + /// An alternative to `CoPartition` is `CoDo`. In this pattern, the job is simply started + /// in all threads and the code is responsible for scheduling the work. `CoDo` waits for + /// all started threads to finish. Scheduling is the responsibility of client code, but can + /// be easily managed using the std::atomic counter. This way, the overhead associated with + /// creating lambdas and scheduling them is kept to the minimum (basically the cost of + /// atomic increment). Vector data; for(int i = 0; i < 100; i++) data.Add(AsString(1.0 / i)); double sum = 0; - - CoWork co; - co * [&] { - int i; + + std::atomic ii = 0; + + CoDo([&] { double m = 0; - while((i = co.Next()) < data.GetCount()) + for(int i = ii++; i < data.GetCount(); i = ii++) m += atof(data[i]); CoWork::FinLock(); sum += m; - }; + }); DUMP(sum);