Commit e8a44888 authored by Michaël Roynard's avatar Michaël Roynard
Browse files

Merge remote-tracking branch 'origin/development/ranges' into development/transform_view

parents f4fb11da 880799ba
Pipeline #12729 failed with stages
in 4 minutes and 4 seconds
......@@ -12,7 +12,7 @@ build-doc-images:
stage: build
script:
- mkdir build && cd build
- conan install .. -pr buildfarm
- conan install .. -pr buildfarm --build=missing
- cmake -DPYLENE_BUILD_BENCHMARKS=NO .. -DCMAKE_BUILD_TYPE=$PYLENE_CONFIGURATION
- cmake --build .
--config Release
......@@ -29,16 +29,18 @@ build-doc-images:
stage: test
script:
- mkdir build && cd build
- conan install .. -pr buildfarm
- conan install .. -pr buildfarm --build=missing
- cmake .. -DCMAKE_BUILD_TYPE=$PYLENE_CONFIGURATION
- cmake --build .
--config $PYLENE_CONFIGURATION
--target check
dependencies: []
artifacts:
reports:
junit: build/tests/UT*.xml
distcheck-linux-gcc-release:
<<: *distcheck-linux-base
variables:
......@@ -76,15 +78,15 @@ distcheck-linux-clang-debug:
stage: bench
script:
- mkdir build && cd build
- conan install .. -pr buildfarm
- conan install .. -pr buildfarm --build=missing
- cmake .. -DCMAKE_BUILD_TYPE=$PYLENE_CONFIGURATION
- cmake --build . --target fetch-external-data
- cmake --build .
--config $PYLENE_CONFIGURATION
--target benchmarks
- cmake --build .
--config $PYLENE_CONFIGURATION
--target benchmarks-run
-- -j1 # Force serial execution
--target run-all-benchmarks
- ctest -L SpeedTests -V
when: manual
dependencies: []
artifacts:
name: "benchmark-results"
paths:
......@@ -115,6 +117,7 @@ distbench-linux-clang-release:
- cd doc
- doxygen
- sphinx-build -b html source ../public
dependencies: [build-doc-images]
artifacts:
name: "documentation"
paths:
......
#ifdef _MSC_VER
#define __restrict__ __restrict
#endif
#include <mln/core/image/image2d.hpp>
#include <mln/core/win2d.hpp>
#include <mln/core/wrt_offset.hpp>
#include <mln/morpho/structural/erode.hpp>
using namespace mln;
void erode_c8_mlnstyle(const mln::image2d<mln::uint8>& f, mln::image2d<mln::uint8>& out)
{
mln_pixter(px, f);
mln_pixter(pxout, out);
mln_iter(nx, winc8(px));
mln_forall (px, pxout)
{
uint8 min = 255;
//#pragma clang loop vectorize(enable)
mln_simple_forall(nx) min = std::min(min, nx->val());
pxout->val() = min;
}
}
void erode_c8_kernel(const mln::image2d<mln::uint8>& f, mln::image2d<mln::uint8>& out)
{
(void)f;
(void)out;
mln::morpho::structural::erode(f, winc8, out, std::less<mln::uint8>());
}
void erode_c8_cstyle(const mln::image2d<mln::uint8>& f, mln::image2d<mln::uint8>& out)
{
constexpr int sz = 9;
auto dpoints = winc8_t::dpoints;
const uint8* __restrict__ inptr = &f({0, 0});
uint8* __restrict__ outptr = &out({0, 0});
ptrdiff_t offsets[sz];
wrt_offset(f, dpoints, offsets);
int stride = static_cast<int>(f.strides()[0]);
int nr = f.nrows();
int nc = f.ncols();
for (int y = 0; y < nr; ++y)
{
const uint8* inlineptr = inptr + stride * y;
uint8* outlineptr = outptr + stride * y;
for (int x = 0; x < nc; ++x)
{
uint8 min = 255;
for (int k = 0; k < sz; ++k)
min = std::min(min, inlineptr[x + offsets[k]]);
outlineptr[x] = min;
}
}
}
void erode_c8_cstyle_restrict(const mln::image2d<mln::uint8>& f, mln::image2d<mln::uint8>& out)
{
constexpr int sz = 9;
auto dpoints = winc8_t::dpoints;
const uint8* __restrict__ inptr = &f({0, 0});
uint8* __restrict__ outptr = &out({0, 0});
ptrdiff_t offsets[sz];
wrt_offset(f, dpoints, offsets);
int stride = static_cast<int>(f.strides()[0]);
int nr = f.nrows();
int nc = f.ncols();
auto applyLine = [nc, offsets](const uint8* __restrict__ inlineptr, uint8* __restrict__ outlineptr) {
for (int x = 0; x < nc; ++x)
{
uint8 min = 255;
for (int k = 0; k < sz; ++k)
min = std::min(min, inlineptr[x + offsets[k]]);
outlineptr[x] = min;
}
};
for (int y = 0; y < nr; ++y)
{
const uint8* inlineptr = inptr + stride * y;
uint8* outlineptr = outptr + stride * y;
applyLine(inlineptr, outlineptr);
}
}
#include <mln/core/image/image2d.hpp>
void erode_c8_mlnstyle(const mln::image2d<mln::uint8>& f, mln::image2d<mln::uint8>& out);
void erode_c8_kernel(const mln::image2d<mln::uint8>& f, mln::image2d<mln::uint8>& out);
void erode_c8_cstyle(const mln::image2d<mln::uint8>& f, mln::image2d<mln::uint8>& out);
void erode_c8_cstyle_restrict(const mln::image2d<mln::uint8>& f, mln::image2d<mln::uint8>& out);
#include <mln/core/extension/fill.hpp>
#include <mln/core/image/image2d.hpp>
#include <mln/io/imread.hpp>
#include <mln/io/imsave.hpp>
#include <fixtures/ImagePath/image_path.hpp>
#include <benchmark/benchmark.h>
#include "BMErosion.hpp"
using namespace mln;
struct BMErosion : public benchmark::Fixture
{
std::string filename = fixtures::ImagePath::concat_with_filename("lena.pgm");
BMErosion()
{
io::imread(filename, m_input);
int nr = m_input.nrows();
int nc = m_input.ncols();
resize(m_output, m_input);
m_bytes = nr * nc * sizeof(uint8);
mln::extension::fill(m_input, 255);
}
image2d<uint8> m_input;
image2d<uint8> m_output;
std::size_t m_bytes;
};
BENCHMARK_F(BMErosion, Erosion_CStyle_Uint8)(benchmark::State& st)
{
while (st.KeepRunning())
{
erode_c8_cstyle(m_input, m_output);
}
st.SetBytesProcessed(st.iterations() * m_bytes);
}
BENCHMARK_F(BMErosion, Erosion_CStyle_Restrict_Uint8)(benchmark::State& st)
{
while (st.KeepRunning())
{
erode_c8_cstyle_restrict(m_input, m_output);
}
st.SetBytesProcessed(st.iterations() * m_bytes);
}
BENCHMARK_F(BMErosion, Erosion_Iterators_Uint8)(benchmark::State& st)
{
while (st.KeepRunning())
{
erode_c8_mlnstyle(m_input, m_output);
}
st.SetBytesProcessed(st.iterations() * m_bytes);
}
BENCHMARK_F(BMErosion, Erosion_Kernel_Uint8)(benchmark::State& st)
{
while (st.KeepRunning())
{
erode_c8_kernel(m_input, m_output);
}
st.SetBytesProcessed(st.iterations() * m_bytes);
}
BENCHMARK_MAIN();
......@@ -34,10 +34,9 @@ void Anisotropic_Diffusion_C(const mln::uint8* __restrict ibuffer, mln::uint8* _
class TestNeighborhood : public ::testing::Test
{
static std::string filename = fixtures::ImagePath::concat_with_filename("lena.ppm");
virtual void SetUp() override
{
std::string filename = fixtures::ImagePath::concat_with_filename("lena.ppm");
mln::image2d<mln::rgb8> tmp(0, 0);
mln::io::imread(filename, tmp);
......
# .rst:
#
# This file defines the following functions:
#
# * add_benchmark(Executable src_1 src_2 ... src_n)
#
# It creates a target with the name BM${Executable} which is the binary file
# And a target BM${Executable}_run that runs the benchmark and is added to
# the list of executables to be run.
# A test is also added with the same name and belongs to the group "SpeedTests"
# They can be run with ctest -L SpeedTests.
#
add_custom_target(run-all-benchmarks)
function(add_benchmark Executable)
set(Benchmark_SOURCES ${ARGN})
set(Benchmark_NAME ${Executable})
# Create benchmark exe
add_executable(${Benchmark_NAME} ${Benchmark_SOURCES})
target_link_libraries(${Benchmark_NAME} PRIVATE Fixtures::ImagePath Pylene::Pylene benchmark::benchmark ${FreeImage_LIBRARIES})
target_include_directories(${Benchmark_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
# Retrieve reference
set(Benchmark_OUTPUT_FILE ${Benchmark_NAME}-${CMAKE_CXX_COMPILER_ID}-${CMAKE_CXX_COMPILER_VERSION}.json)
# Create a target to run the benchmark
add_custom_target(run-${Benchmark_NAME}
COMMAND ${Benchmark_NAME} --benchmark_out_format=json --benchmark_out=${Benchmark_OUTPUT_FILE} --benchmark_repetitions=9 #--benchmark_display_aggregates_only=true
DEPENDS ${Benchmark_NAME}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
USES_TERMINAL)
add_dependencies(run-all-benchmarks run-${Benchmark_NAME})
# Create a speed test to run the benchmark and compare to the reference
if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/references/${Benchmark_OUTPUT_FILE}.md5")
message(STATUS "Test ${Benchmark_NAME} found.")
externaldata_add_test(
fetch-external-data
NAME ${Benchmark_NAME}
COMMAND python3 ${CMAKE_CURRENT_SOURCE_DIR}/tools/compare.py --display_aggregates_only benchmarks DATA{references/${Benchmark_OUTPUT_FILE}} ${Benchmark_OUTPUT_FILE})
set_tests_properties(${Benchmark_NAME} PROPERTIES
LABELS SpeedTests
RUN_SERIAL Yes)
endif()
endfunction(add_benchmark)
find_package(benchmark REQUIRED)
find_package(GTest REQUIRED)
include(CTest)
include(ExternalData)
include(BenchmarkMacros.cmake)
# Download file dependency
# Big images are stored at https://www.lrde.epita.fr/~mroynard/big_images/
file(DOWNLOAD "https://www.lrde.epita.fr/~mroynard/big_images/Space1_20MB.jpg" "${CMAKE_BINARY_DIR}/bench/Space1_20MB.jpg")
set(ExternalData_URL_TEMPLATES
"file:///lrde/dload/olena/pylene/data/%(algo)/%(hash)"
"https://www.lrde.epita.fr/dload/olena/pylene/data/%(algo)/%(hash)")
ExternalData_Expand_Arguments(
fetch-external-data
images
DATA{Space1_20MB.jpg}
)
# Extra compiler options
if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
set(STANDALONE_COMPILE_FLAGS "-Rpass=loop-vectorize -Rpass-missed=loop-vectorize -Rpass-analysis=loop-vectorize -gline-tables-only -gcolumn-info ")
......@@ -29,37 +41,18 @@ set(src_standalone
set_source_files_properties(${src_standalone} PROPERTIES COMPILE_FLAGS ${STANDALONE_COMPILE_FLAGS})
add_custom_target(benchmarks ALL COMMENT "Build all the benchmarks.")
add_custom_target(benchmarks-run
DEPENDS benchmarks
COMMENT "Build and run all the benchmarks.")
macro(add_benchmark Executable)
set(Sources ${ARGN})
add_executable(BM${Executable} ${Sources})
target_link_libraries(BM${Executable} PRIVATE Fixtures::ImagePath Pylene::Pylene benchmark::benchmark ${FreeImage_LIBRARIES})
target_include_directories(BM${Executable} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
enable_testing()
add_dependencies(benchmarks BM${Executable})
add_benchmark(BMNeighborhood BMNeighborhood.cpp BMNeighborhood_main.cpp)
add_benchmark(BMRotation BMRotation.cpp)
add_benchmark(BMDilation BMDilation.cpp)
add_benchmark(BMMorphers BMMorphers.cpp BMMorphers_main.cpp)
add_benchmark(BMReference_Linear BMReference_Linear.cpp BMReference_Linear_Reversed.cpp BMReference_Linear_main.cpp)
add_benchmark(BMReference_Neighborhood BMReference_Neighborhood.cpp BMReference_Neighborhood_main.cpp)
add_custom_target(BM${Executable}_run
COMMAND BM${Executable} --benchmark_out_format=json --benchmark_out=BM${Executable}.json
DEPENDS BM${Executable}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
USES_TERMINAL)
add_dependencies(benchmarks-run BM${Executable}_run)
endmacro(add_benchmark)
add_executable(UTReference_Neighborhood BMReference_Neighborhood.cpp BMReference_Neighborhood_test.cpp)
target_link_libraries(UTReference_Neighborhood PRIVATE Fixtures::ImagePath Pylene::Pylene GTest::Main GTest::GTest ${FreeImage_LIBRARIES})
add_benchmark(Erosion BMErosion.cpp BMErosion_main.cpp)
add_benchmark(Neighborhood BMNeighborhood.cpp BMNeighborhood_main.cpp)
add_benchmark(Rotation BMRotation.cpp)
add_benchmark(Dilation BMDilation.cpp)
add_benchmark(Morphers BMMorphers.cpp BMMorphers_main.cpp)
add_benchmark(Reference_Linear BMReference_Linear.cpp BMReference_Linear_Reversed.cpp BMReference_Linear_main.cpp)
add_benchmark(Reference_Neighborhood BMReference_Neighborhood.cpp BMReference_Neighborhood_main.cpp)
add_executable(UTReference_Neighborhood BMReference_Neighborhood.cpp BMReference_Neighborhood_test.cpp)
target_link_libraries(UTReference_Neighborhood Pylene::Pylene GTest::Main GTest::GTest ${FreeImage_LIBRARIES})
ExternalData_Add_Target(fetch-external-data)
# Compiling, running, comparing benchmarks
## Fetching external data
Before compiling and running the benchmarks, external data are required:
* external image samples (that can be quite huge for testing)
* reference benchmarks
So before everything else, fetch external data with the rules:
```sh
cmake --build . --target fetch-external-data
```
## Compiling and executing benchmarks
Benchmarks are compiled automatically when the target
`run-all-benchmarks` is triggered and then run.
```sh
cmake --build . --target run-all-benchmarks --config Release
```
It will output for each benchmark, a speed report with the name:
`{BenchmarkName}-{Compiler}-{Version}.json`
## Non-regressions tests
Once the benchmarks have been run, you can compare with the references by running the tests. Note that a test is ran
only if there is a reference file.
```sh
ctest -L SpeedTests -V
```
# Adding & Updating references
* Download benchmark artifacts from the CI
* Copy artifacts to `/lrde/dload/olena/pylene/data/benchmarks`
* Create links to `/lrde/dload/olena/pylene/data/MD5` to new benchmarks (use the script `install.sh`)
* Update MD5 checksum in `CVS_ROOT/bench/references` (just move all `*.md5` that have been generated by the `install.sh` script)
d72f20e1cd3c273dcdbd5660dc8e0ffc
d0f76f7c435caa6feb87b64f246b0563
0e52ca75826c746e4a2c02f677eadbef
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment