From f08244e5ae85c70e72e23caed54f0cc35096a5a0 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 4 Sep 2024 10:30:02 +0200 Subject: [PATCH 01/12] ci: test various java distribution on windows --- .../workflows/amd64_windows_cmake_java.yml | 28 +++++++++++++++++-- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/.github/workflows/amd64_windows_cmake_java.yml b/.github/workflows/amd64_windows_cmake_java.yml index 24265720552..2bf265b6a24 100644 --- a/.github/workflows/amd64_windows_cmake_java.yml +++ b/.github/workflows/amd64_windows_cmake_java.yml @@ -11,11 +11,35 @@ jobs: cmake: [ {generator: "Visual Studio 17 2022", config: Release, build_target: ALL_BUILD, test_target: RUN_TESTS, install_target: INSTALL}, ] + java: [ + # see https://endoflife.date/azul-zulu + {distrib: 'zulu', version: '8'}, # 2030/12 + {distrib: 'zulu', version: '11'}, # 2026/09 + {distrib: 'zulu', version: '17'}, # 2029/09 + {distrib: 'zulu', version: '21'}, # 2031/09 + # see https://endoflife.date/eclipse-temurin + {distrib: 'temurin', version: '8'}, # 2026/11 + {distrib: 'temurin', version: '11'}, # 2027/10 + {distrib: 'temurin', version: '17'}, # 2027/10 + {distrib: 'temurin', version: '21'}, # 2029/12 + # see https://endoflife.date/microsoft-build-of-openjdk + {distrib: 'microsoft', version: '11'}, # 2027/09 + {distrib: 'microsoft', version: '17'}, # 2027/09 + {distrib: 'microsoft', version: '21'}, # 2028/09 + ] fail-fast: false - name: Windows • ${{ matrix.cmake.generator }} • Java + name: Windows • ${{ matrix.cmake.generator }} • ${{ matrix.java.distrib }}-${{ matrix.java.version }} runs-on: windows-latest steps: - uses: actions/checkout@v4 + - uses: actions/setup-java@v4 + with: + distribution: ${{ matrix.java.distrib }} + java-version: ${{ matrix.java.version }} + - name: Check java + run: | + java -version + mvn --version - name: Install SWIG 4.2.1 run: | (New-Object System.Net.WebClient).DownloadFile("http://prdownloads.sourceforge.net/swig/swigwin-4.2.1.zip","swigwin-4.2.1.zip"); @@ -23,8 +47,6 @@ jobs: echo "$((Get-Item .).FullName)/swigwin-4.2.1" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - name: Check swig run: swig -version - - name: Check java - run: java -version - name: Check cmake run: cmake --version - name: Configure From bb19935571913de6f3704f399f8003bb0d4df90e Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 4 Sep 2024 08:36:04 +0200 Subject: [PATCH 02/12] cmake: dotnet test fixup --- cmake/dotnet.cmake | 26 +++++++++++++++++++++----- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/cmake/dotnet.cmake b/cmake/dotnet.cmake index 94ee599258d..4f0b006e3b9 100644 --- a/cmake/dotnet.cmake +++ b/cmake/dotnet.cmake @@ -264,11 +264,27 @@ function(add_dotnet_test) WORKING_DIRECTORY ${DOTNET_TEST_DIR}) if(BUILD_TESTING) - add_test( - NAME dotnet_${COMPONENT_NAME}_${TEST_NAME} - COMMAND ${CMAKE_COMMAND} -E env --unset=TARGETNAME - ${DOTNET_EXECUTABLE} test --nologo -c Release ${TEST_NAME}.csproj - WORKING_DIRECTORY ${DOTNET_TEST_DIR}) + if(USE_DOTNET_6) + add_test( + NAME dotnet_${COMPONENT_NAME}_${TEST_NAME}_net60 + COMMAND ${CMAKE_COMMAND} -E env --unset=TARGETNAME + ${DOTNET_EXECUTABLE} test --nologo --framework net6.0 -c Release + WORKING_DIRECTORY ${DOTNET_TEST_DIR}) + endif() + if(USE_DOTNET_7) + add_test( + NAME dotnet_${COMPONENT_NAME}_${TEST_NAME}_net70 + COMMAND ${CMAKE_COMMAND} -E env --unset=TARGETNAME + ${DOTNET_EXECUTABLE} test --nologo --framework net7.0 -c Release + WORKING_DIRECTORY ${DOTNET_TEST_DIR}) + endif() + if(USE_DOTNET_8) + add_test( + NAME dotnet_${COMPONENT_NAME}_${TEST_NAME}_net80 + COMMAND ${CMAKE_COMMAND} -E env --unset=TARGETNAME + ${DOTNET_EXECUTABLE} test --nologo --framework net8.0 -c Release + WORKING_DIRECTORY ${DOTNET_TEST_DIR}) + endif() endif() message(STATUS "Configuring test ${TEST_FILE_NAME} ...DONE") endfunction() From 9fdba5fc750f5afbbfe35a12deea6fc45f54cb77 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 4 Sep 2024 13:37:16 +0200 Subject: [PATCH 03/12] ci: remove swigwin setup from workflows --- .github/workflows/amd64_windows_cmake_dotnet.yml | 7 ------- .github/workflows/amd64_windows_cmake_java.yml | 7 ------- .github/workflows/amd64_windows_cmake_python.yml | 7 ------- cmake/dependencies/SWIG.CMakeLists.txt.in | 2 +- 4 files changed, 1 insertion(+), 22 deletions(-) diff --git a/.github/workflows/amd64_windows_cmake_dotnet.yml b/.github/workflows/amd64_windows_cmake_dotnet.yml index e5b676f5a79..fc8100fa3c5 100644 --- a/.github/workflows/amd64_windows_cmake_dotnet.yml +++ b/.github/workflows/amd64_windows_cmake_dotnet.yml @@ -16,13 +16,6 @@ jobs: runs-on: windows-latest steps: - uses: actions/checkout@v4 - - name: Install SWIG 4.2.1 - run: | - (New-Object System.Net.WebClient).DownloadFile("http://prdownloads.sourceforge.net/swig/swigwin-4.2.1.zip","swigwin-4.2.1.zip"); - Expand-Archive .\swigwin-4.2.1.zip .; - echo "$((Get-Item .).FullName)/swigwin-4.2.1" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - - name: Check swig - run: swig -version - name: Setup .NET 6.0 uses: actions/setup-dotnet@v4 with: diff --git a/.github/workflows/amd64_windows_cmake_java.yml b/.github/workflows/amd64_windows_cmake_java.yml index 2bf265b6a24..1dc8056b8e8 100644 --- a/.github/workflows/amd64_windows_cmake_java.yml +++ b/.github/workflows/amd64_windows_cmake_java.yml @@ -40,13 +40,6 @@ jobs: run: | java -version mvn --version - - name: Install SWIG 4.2.1 - run: | - (New-Object System.Net.WebClient).DownloadFile("http://prdownloads.sourceforge.net/swig/swigwin-4.2.1.zip","swigwin-4.2.1.zip"); - Expand-Archive .\swigwin-4.2.1.zip .; - echo "$((Get-Item .).FullName)/swigwin-4.2.1" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - - name: Check swig - run: swig -version - name: Check cmake run: cmake --version - name: Configure diff --git a/.github/workflows/amd64_windows_cmake_python.yml b/.github/workflows/amd64_windows_cmake_python.yml index 1126ab356d2..f280c787dbd 100644 --- a/.github/workflows/amd64_windows_cmake_python.yml +++ b/.github/workflows/amd64_windows_cmake_python.yml @@ -27,13 +27,6 @@ jobs: python-version: ${{ matrix.python.version }} - name: Install python3 run: python3 -m pip install --user mypy-protobuf absl-py setuptools wheel numpy pandas - - name: Install SWIG 4.2.1 - run: | - (New-Object System.Net.WebClient).DownloadFile("http://prdownloads.sourceforge.net/swig/swigwin-4.2.1.zip","swigwin-4.2.1.zip"); - Expand-Archive .\swigwin-4.2.1.zip .; - echo "$((Get-Item .).FullName)/swigwin-4.2.1" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - - name: Check swig - run: swig -version - name: Add Python binaries to path run: > echo "$((Get-Item ~).FullName)/AppData/Roaming/Python/${{ matrix.python.dir }}/Scripts" | diff --git a/cmake/dependencies/SWIG.CMakeLists.txt.in b/cmake/dependencies/SWIG.CMakeLists.txt.in index 08502562937..89be36cb37e 100644 --- a/cmake/dependencies/SWIG.CMakeLists.txt.in +++ b/cmake/dependencies/SWIG.CMakeLists.txt.in @@ -11,7 +11,7 @@ ExternalProject_Add(SWIG_project SOURCE_DIR "@CMAKE_CURRENT_BINARY_DIR@/${PROJECT_NAME}/source" BUILD_IN_SOURCE 1 - URL "http://prdownloads.sourceforge.net/swig/swigwin-4.1.1.zip" + URL "http://prdownloads.sourceforge.net/swig/swigwin-4.2.1.zip" LOG_DOWNLOAD TRUE UPDATE_COMMAND "" From 2fbead5222a5ce1e4f91a85c4f8b69cf930dca65 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 4 Sep 2024 14:17:12 +0200 Subject: [PATCH 04/12] ci: Add CTEST_OUTPUT_ON_FAILURE=1 variable env --- .github/workflows/amd64_windows_cmake_cpp.yml | 2 ++ .github/workflows/amd64_windows_cmake_dotnet.yml | 2 ++ .github/workflows/amd64_windows_cmake_java.yml | 2 ++ .github/workflows/amd64_windows_cmake_python.yml | 2 ++ 4 files changed, 8 insertions(+) diff --git a/.github/workflows/amd64_windows_cmake_cpp.yml b/.github/workflows/amd64_windows_cmake_cpp.yml index 18175b69de5..d30baf8625e 100644 --- a/.github/workflows/amd64_windows_cmake_cpp.yml +++ b/.github/workflows/amd64_windows_cmake_cpp.yml @@ -15,6 +15,8 @@ jobs: fail-fast: false name: Windows • ${{ matrix.cmake.generator }} (${{ matrix.cmake.config }}) • C++ runs-on: windows-latest + env: + CTEST_OUTPUT_ON_FAILURE: 1 steps: - uses: actions/checkout@v4 - name: Check cmake diff --git a/.github/workflows/amd64_windows_cmake_dotnet.yml b/.github/workflows/amd64_windows_cmake_dotnet.yml index fc8100fa3c5..4e8e612092c 100644 --- a/.github/workflows/amd64_windows_cmake_dotnet.yml +++ b/.github/workflows/amd64_windows_cmake_dotnet.yml @@ -14,6 +14,8 @@ jobs: fail-fast: false name: Windows • ${{ matrix.cmake.generator }} • .Net runs-on: windows-latest + env: + CTEST_OUTPUT_ON_FAILURE: 1 steps: - uses: actions/checkout@v4 - name: Setup .NET 6.0 diff --git a/.github/workflows/amd64_windows_cmake_java.yml b/.github/workflows/amd64_windows_cmake_java.yml index 1dc8056b8e8..4f535563212 100644 --- a/.github/workflows/amd64_windows_cmake_java.yml +++ b/.github/workflows/amd64_windows_cmake_java.yml @@ -30,6 +30,8 @@ jobs: fail-fast: false name: Windows • ${{ matrix.cmake.generator }} • ${{ matrix.java.distrib }}-${{ matrix.java.version }} runs-on: windows-latest + env: + CTEST_OUTPUT_ON_FAILURE: 1 steps: - uses: actions/checkout@v4 - uses: actions/setup-java@v4 diff --git a/.github/workflows/amd64_windows_cmake_python.yml b/.github/workflows/amd64_windows_cmake_python.yml index f280c787dbd..a8d0babfd60 100644 --- a/.github/workflows/amd64_windows_cmake_python.yml +++ b/.github/workflows/amd64_windows_cmake_python.yml @@ -20,6 +20,8 @@ jobs: fail-fast: false name: Windows • ${{ matrix.cmake.generator }} • Python-${{ matrix.python.version }} runs-on: windows-latest + env: + CTEST_OUTPUT_ON_FAILURE: 1 steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v4 From 7de3a9f8d5e1c8b7cd0dfcd1982efbdcc22a17fc Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Wed, 4 Sep 2024 14:45:59 +0200 Subject: [PATCH 05/12] ci: try to bump maven --- .github/workflows/amd64_windows_cmake_java.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/amd64_windows_cmake_java.yml b/.github/workflows/amd64_windows_cmake_java.yml index 4f535563212..190ebf7d3ff 100644 --- a/.github/workflows/amd64_windows_cmake_java.yml +++ b/.github/workflows/amd64_windows_cmake_java.yml @@ -38,6 +38,10 @@ jobs: with: distribution: ${{ matrix.java.distrib }} java-version: ${{ matrix.java.version }} + - name: Update maven + run: | + choco upgrade maven + echo "C:\ProgramData\chocolatey\lib\maven\apache-maven-3.9.9\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - name: Check java run: | java -version From 37dd30cbe9c8bae6f3e35f7ee4cdfa385c538c5a Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 6 Sep 2024 07:54:09 +0200 Subject: [PATCH 06/12] python: Fix protobuf supported range note: using python 5.28 will lead to multiple segfault --- ortools/python/setup.py.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ortools/python/setup.py.in b/ortools/python/setup.py.in index caaa1d7a353..23d0ed4f85b 100644 --- a/ortools/python/setup.py.in +++ b/ortools/python/setup.py.in @@ -46,7 +46,7 @@ setup( 'absl-py >= 2.0.0', 'numpy >= 1.13.3', 'pandas >= 2.0.0', - 'protobuf >= 5.27.3', + 'protobuf >= 5.27.3,<5.28', 'immutabledict >= 3.0.0', ], package_data={ From 21d767b3b5573b40c35e2ed547b7d110ab554861 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 30 Aug 2024 17:24:41 +0200 Subject: [PATCH 07/12] algorithms: Add python.set_cover --- cmake/python.cmake | 4 + ortools/algorithms/BUILD.bazel | 20 + ortools/algorithms/python/BUILD.bazel | 28 ++ ortools/algorithms/python/CMakeLists.txt | 27 + ortools/algorithms/python/set_cover.cc | 239 +++++++++ ortools/algorithms/python/set_cover_test.py | 207 ++++++++ ortools/algorithms/set_cover_heuristics.h | 2 +- ortools/algorithms/set_cover_invariant.cc | 22 +- ortools/algorithms/set_cover_invariant.h | 5 + ortools/algorithms/set_cover_lagrangian.cc | 520 ++++++++++++++++++++ ortools/algorithms/set_cover_lagrangian.h | 154 ++++++ ortools/algorithms/set_cover_mip.cc | 21 +- ortools/algorithms/set_cover_model.cc | 8 +- ortools/algorithms/set_cover_model.h | 2 +- ortools/algorithms/set_cover_orlib_test.cc | 61 ++- ortools/algorithms/set_cover_test.cc | 8 +- ortools/python/setup.py.in | 1 + 17 files changed, 1287 insertions(+), 42 deletions(-) create mode 100644 ortools/algorithms/python/set_cover.cc create mode 100644 ortools/algorithms/python/set_cover_test.py create mode 100644 ortools/algorithms/set_cover_lagrangian.cc create mode 100644 ortools/algorithms/set_cover_lagrangian.h diff --git a/cmake/python.cmake b/cmake/python.cmake index e8cd1ca509f..123468cf62d 100644 --- a/cmake/python.cmake +++ b/cmake/python.cmake @@ -455,6 +455,8 @@ add_custom_command( $ ${PYTHON_PROJECT}/init/python COMMAND ${CMAKE_COMMAND} -E copy $ ${PYTHON_PROJECT}/algorithms/python + COMMAND ${CMAKE_COMMAND} -E copy + $ ${PYTHON_PROJECT}/algorithms/python COMMAND ${CMAKE_COMMAND} -E copy $ ${PYTHON_PROJECT}/graph/python COMMAND ${CMAKE_COMMAND} -E copy @@ -492,6 +494,7 @@ add_custom_command( DEPENDS init_pybind11 knapsack_solver_pybind11 + set_cover_pybind11 linear_sum_assignment_pybind11 max_flow_pybind11 min_cost_flow_pybind11 @@ -530,6 +533,7 @@ add_custom_command( COMMAND ${CMAKE_COMMAND} -E remove -f stub_timestamp COMMAND ${stubgen_EXECUTABLE} -p ortools.init.python.init --output . COMMAND ${stubgen_EXECUTABLE} -p ortools.algorithms.python.knapsack_solver --output . + COMMAND ${stubgen_EXECUTABLE} -p ortools.algorithms.python.set_cover --output . COMMAND ${stubgen_EXECUTABLE} -p ortools.graph.python.linear_sum_assignment --output . COMMAND ${stubgen_EXECUTABLE} -p ortools.graph.python.max_flow --output . COMMAND ${stubgen_EXECUTABLE} -p ortools.graph.python.min_cost_flow --output . diff --git a/ortools/algorithms/BUILD.bazel b/ortools/algorithms/BUILD.bazel index 1e5a4f12fa0..3eec4c76014 100644 --- a/ortools/algorithms/BUILD.bazel +++ b/ortools/algorithms/BUILD.bazel @@ -14,6 +14,7 @@ load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") load("@rules_proto//proto:defs.bzl", "proto_library") +load("@rules_python//python:proto.bzl", "py_proto_library") package(default_visibility = ["//visibility:public"]) @@ -259,6 +260,24 @@ cc_proto_library( deps = [":set_cover_proto"], ) +py_proto_library( + name = "set_cover_py_pb2", + deps = [":set_cover_proto"], +) + +cc_library( + name = "set_cover_lagrangian", + srcs = ["set_cover_lagrangian.cc"], + hdrs = ["set_cover_lagrangian.h"], + deps = [ + ":adjustable_k_ary_heap", + ":set_cover_invariant", + ":set_cover_model", + "//ortools/base:threadpool", + "@com_google_absl//absl/log:check", + ], +) + cc_library( name = "set_cover_model", srcs = ["set_cover_model.cc"], @@ -284,6 +303,7 @@ cc_library( "//ortools/base", "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", + "@com_google_absl//absl/types:span", ], ) diff --git a/ortools/algorithms/python/BUILD.bazel b/ortools/algorithms/python/BUILD.bazel index f4ddbd6baaf..fe3de2c5f94 100644 --- a/ortools/algorithms/python/BUILD.bazel +++ b/ortools/algorithms/python/BUILD.bazel @@ -44,6 +44,7 @@ config_setting( }, ) +# knapsack_solver cc_library( name = "knapsack_solver_doc", hdrs = ["knapsack_solver_doc.h"], @@ -77,3 +78,30 @@ py_test( requirement("absl-py"), ], ) + +# set_cover +pybind_extension( + name = "set_cover", + srcs = ["set_cover.cc"], + visibility = ["//visibility:public"], + deps = [ + "//ortools/algorithms:set_cover_cc_proto", + "//ortools/algorithms:set_cover_heuristics", + "//ortools/algorithms:set_cover_invariant", + "//ortools/algorithms:set_cover_model", + "//ortools/algorithms:set_cover_reader", + "@com_google_absl//absl/strings", + "@pybind11_protobuf//pybind11_protobuf:native_proto_caster", + ], +) + +py_test( + name = "set_cover_test", + srcs = ["set_cover_test.py"], + python_version = "PY3", + deps = [ + ":set_cover", + "//ortools/algorithms:set_cover_py_pb2", + requirement("absl-py"), + ], +) diff --git a/ortools/algorithms/python/CMakeLists.txt b/ortools/algorithms/python/CMakeLists.txt index f60ee7c1d1e..332fec6d56e 100644 --- a/ortools/algorithms/python/CMakeLists.txt +++ b/ortools/algorithms/python/CMakeLists.txt @@ -11,6 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +# knapsack_solver pybind11_add_module(knapsack_solver_pybind11 MODULE knapsack_solver.cc) set_target_properties(knapsack_solver_pybind11 PROPERTIES LIBRARY_OUTPUT_NAME "knapsack_solver") @@ -33,6 +34,32 @@ endif() target_link_libraries(knapsack_solver_pybind11 PRIVATE ${PROJECT_NAMESPACE}::ortools) add_library(${PROJECT_NAMESPACE}::knapsack_solver_pybind11 ALIAS knapsack_solver_pybind11) +# set_cover +pybind11_add_module(set_cover_pybind11 MODULE set_cover.cc) +set_target_properties(set_cover_pybind11 PROPERTIES + LIBRARY_OUTPUT_NAME "set_cover") + +# note: macOS is APPLE and also UNIX ! +if(APPLE) + set_target_properties(set_cover_pybind11 PROPERTIES + SUFFIX ".so" + INSTALL_RPATH "@loader_path;@loader_path/../../../${PYTHON_PROJECT}/.libs" + ) + set_property(TARGET set_cover_pybind11 APPEND PROPERTY + LINK_FLAGS "-flat_namespace -undefined suppress" + ) +elseif(UNIX) + set_target_properties(set_cover_pybind11 PROPERTIES + INSTALL_RPATH "$ORIGIN:$ORIGIN/../../../${PYTHON_PROJECT}/.libs" + ) +endif() + +target_link_libraries(set_cover_pybind11 PRIVATE + ${PROJECT_NAMESPACE}::ortools + pybind11_native_proto_caster +) +add_library(${PROJECT_NAMESPACE}::set_cover_pybind11 ALIAS set_cover_pybind11) + if(BUILD_TESTING) file(GLOB PYTHON_SRCS "*_test.py") foreach(FILE_NAME IN LISTS PYTHON_SRCS) diff --git a/ortools/algorithms/python/set_cover.cc b/ortools/algorithms/python/set_cover.cc new file mode 100644 index 00000000000..2d107fdfc05 --- /dev/null +++ b/ortools/algorithms/python/set_cover.cc @@ -0,0 +1,239 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A pybind11 wrapper for set_cover_*. + +#include + +#include "absl/base/nullability.h" +#include "ortools/algorithms/set_cover_heuristics.h" +#include "ortools/algorithms/set_cover_invariant.h" +#include "ortools/algorithms/set_cover_model.h" +#include "ortools/algorithms/set_cover_reader.h" +#include "pybind11/pybind11.h" +#include "pybind11/pytypes.h" +#include "pybind11/stl.h" +#include "pybind11_protobuf/native_proto_caster.h" + +using ::operations_research::ElementDegreeSolutionGenerator; +using ::operations_research::GreedySolutionGenerator; +using ::operations_research::GuidedLocalSearch; +using ::operations_research::Preprocessor; +using ::operations_research::RandomSolutionGenerator; +using ::operations_research::ReadBeasleySetCoverProblem; +using ::operations_research::ReadRailSetCoverProblem; +using ::operations_research::SetCoverInvariant; +using ::operations_research::SetCoverModel; +using ::operations_research::SteepestSearch; +using ::operations_research::SubsetIndex; +using ::operations_research::TrivialSolutionGenerator; + +namespace py = pybind11; +using ::py::arg; + +// General note about TODOs: the corresponding functions/classes/methods are +// more complex to wrap, as they use nonstandard types, and are less important, +// as they are not as useful to most users (mostly useful to write some custom +// Python heuristics). + +PYBIND11_MODULE(set_cover, m) { + pybind11_protobuf::ImportNativeProtoCasters(); + + // set_cover_model.h + py::class_(m, "SetCoverModel") + .def(py::init<>()) + .def_property_readonly("num_elements", &SetCoverModel::num_elements) + .def_property_readonly("num_subsets", &SetCoverModel::num_subsets) + .def_property_readonly("num_nonzeros", &SetCoverModel::num_nonzeros) + .def_property_readonly("fill_rate", &SetCoverModel::FillRate) + .def("add_empty_subset", &SetCoverModel::AddEmptySubset, arg("cost")) + .def( + "add_element_to_last_subset", + [](SetCoverModel& model, int element) { + model.AddElementToLastSubset(element); + }, + arg("element")) + .def( + "set_subset_cost", + [](SetCoverModel& model, int subset, double cost) { + model.SetSubsetCost(subset, cost); + }, + arg("subset"), arg("cost")) + .def( + "add_element_to_subset", + [](SetCoverModel& model, int element, int subset) { + model.AddElementToSubset(element, subset); + }, + arg("subset"), arg("cost")) + .def("compute_feasibility", &SetCoverModel::ComputeFeasibility) + .def( + "reserve_num_subsets", + [](SetCoverModel& model, int num_subsets) { + model.ReserveNumSubsets(num_subsets); + }, + arg("num_subsets")) + .def( + "reserve_num_elements_in_subset", + [](SetCoverModel& model, int num_elements, int subset) { + model.ReserveNumElementsInSubset(num_elements, subset); + }, + arg("num_elements"), arg("subset")) + .def("export_model_as_proto", &SetCoverModel::ExportModelAsProto) + .def("import_model_from_proto", &SetCoverModel::ImportModelFromProto); + // TODO(user): add support for subset_costs, columns, rows, + // row_view_is_valid, SubsetRange, ElementRange, all_subsets, + // CreateSparseRowView, ComputeCostStats, ComputeRowStats, + // ComputeColumnStats, ComputeRowDeciles, ComputeColumnDeciles. + + // TODO(user): wrap IntersectingSubsetsIterator. + + // set_cover_invariant.h + py::class_(m, "SetCoverInvariant") + .def(py::init()) + .def("initialize", &SetCoverInvariant::Initialize) + .def("clear", &SetCoverInvariant::Clear) + .def("recompute_invariant", &SetCoverInvariant::RecomputeInvariant) + .def("model", &SetCoverInvariant::model) + .def_property( + "model", + // Expected semantics: give a pointer to Python **while + // keeping ownership** in C++. + [](SetCoverInvariant& invariant) -> std::shared_ptr { + // https://pybind11.readthedocs.io/en/stable/advanced/smart_ptrs.html#std-shared-ptr + std::shared_ptr ptr(invariant.model()); + return ptr; + }, + [](SetCoverInvariant& invariant, const SetCoverModel& model) { + *invariant.model() = model; + }) + .def("cost", &SetCoverInvariant::cost) + .def("num_uncovered_elements", &SetCoverInvariant::num_uncovered_elements) + .def("clear_trace", &SetCoverInvariant::ClearTrace) + .def("clear_removability_information", + &SetCoverInvariant::ClearRemovabilityInformation) + .def("compress_trace", &SetCoverInvariant::CompressTrace) + .def("check_consistency", &SetCoverInvariant::CheckConsistency) + .def( + "flip", + [](SetCoverInvariant& invariant, int subset) { + invariant.Flip(SubsetIndex(subset)); + }, + arg("subset")) + .def( + "flip_and_fully_update", + [](SetCoverInvariant& invariant, int subset) { + invariant.FlipAndFullyUpdate(SubsetIndex(subset)); + }, + arg("subset")) + .def( + "select", + [](SetCoverInvariant& invariant, int subset) { + invariant.Select(SubsetIndex(subset)); + }, + arg("subset")) + .def( + "select_and_fully_update", + [](SetCoverInvariant& invariant, int subset) { + invariant.SelectAndFullyUpdate(SubsetIndex(subset)); + }, + arg("subset")) + .def( + "deselect", + [](SetCoverInvariant& invariant, int subset) { + invariant.Deselect(SubsetIndex(subset)); + }, + arg("subset")) + .def( + "deselect_and_fully_update", + [](SetCoverInvariant& invariant, int subset) { + invariant.DeselectAndFullyUpdate(SubsetIndex(subset)); + }, + arg("subset")) + .def("export_solution_as_proto", + &SetCoverInvariant::ExportSolutionAsProto) + .def("import_solution_from_proto", + &SetCoverInvariant::ImportSolutionFromProto); + // TODO(user): add support for is_selected, num_free_elements, + // num_coverage_le_1_elements, coverage, ComputeCoverageInFocus, + // is_redundant, trace, new_removable_subsets, new_non_removable_subsets, + // LoadSolution, ComputeIsRedundant. + + // set_cover_heuristics.h + py::class_(m, "Preprocessor") + .def(py::init>()) + .def("next_solution", + [](Preprocessor& heuristic) -> bool { + return heuristic.NextSolution(); + }) + .def("num_columns_fixed_by_singleton_row", + &Preprocessor::num_columns_fixed_by_singleton_row); + // TODO(user): add support for focus argument. + + py::class_(m, "TrivialSolutionGenerator") + .def(py::init()) + .def("next_solution", [](TrivialSolutionGenerator& heuristic) -> bool { + return heuristic.NextSolution(); + }); + // TODO(user): add support for focus argument. + + py::class_(m, "RandomSolutionGenerator") + .def(py::init()) + .def("next_solution", [](RandomSolutionGenerator& heuristic) -> bool { + return heuristic.NextSolution(); + }); + // TODO(user): add support for focus argument. + + py::class_(m, "GreedySolutionGenerator") + .def(py::init()) + .def("next_solution", [](GreedySolutionGenerator& heuristic) -> bool { + return heuristic.NextSolution(); + }); + // TODO(user): add support for focus and cost arguments. + + py::class_(m, + "ElementDegreeSolutionGenerator") + .def(py::init()) + .def("next_solution", + [](ElementDegreeSolutionGenerator& heuristic) -> bool { + return heuristic.NextSolution(); + }); + // TODO(user): add support for focus and cost arguments. + + py::class_(m, "SteepestSearch") + .def(py::init()) + .def("next_solution", + [](SteepestSearch& heuristic, int num_iterations) -> bool { + return heuristic.NextSolution(num_iterations); + }); + // TODO(user): add support for focus and cost arguments. + + py::class_(m, "GuidedLocalSearch") + .def(py::init()) + .def("initialize", &GuidedLocalSearch::Initialize) + .def("next_solution", + [](GuidedLocalSearch& heuristic, int num_iterations) -> bool { + return heuristic.NextSolution(num_iterations); + }); + // TODO(user): add support for focus and cost arguments. + + // TODO(user): add support for ClearRandomSubsets, ClearRandomSubsets, + // ClearMostCoveredElements, ClearMostCoveredElements, TabuList, + // GuidedTabuSearch. + + // set_cover_reader.h + m.def("read_beasly_set_cover_problem", &ReadBeasleySetCoverProblem); + m.def("read_rail_set_cover_problem", &ReadRailSetCoverProblem); + + // set_cover_lagrangian.h + // TODO(user): add support for SetCoverLagrangian. +} diff --git a/ortools/algorithms/python/set_cover_test.py b/ortools/algorithms/python/set_cover_test.py new file mode 100644 index 00000000000..73624eaa8c3 --- /dev/null +++ b/ortools/algorithms/python/set_cover_test.py @@ -0,0 +1,207 @@ +#!/usr/bin/env python3 +# Copyright 2010-2024 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from absl import app +from absl.testing import absltest + +from ortools.algorithms.python import set_cover + + +def create_initial_cover_model(): + model = set_cover.SetCoverModel() + model.add_empty_subset(1.0) + model.add_element_to_last_subset(0) + model.add_empty_subset(1.0) + model.add_element_to_last_subset(1) + model.add_element_to_last_subset(2) + model.add_empty_subset(1.0) + model.add_element_to_last_subset(1) + model.add_empty_subset(1.0) + model.add_element_to_last_subset(2) + return model + + +def create_knights_cover_model(num_rows: int, num_cols: int) -> set_cover.SetCoverModel: + model = set_cover.SetCoverModel() + knight_row_move = [2, 1, -1, -2, -2, -1, 1, 2] + knight_col_move = [1, 2, 2, 1, -1, -2, -2, -1] + + for row in range(num_rows): + for col in range(num_cols): + model.add_empty_subset(1.0) + model.add_element_to_last_subset(row * num_cols + col) + + for i in range(8): + new_row = row + knight_row_move[i] + new_col = col + knight_col_move[i] + if 0 <= new_row < num_rows and 0 <= new_col < num_cols: + model.add_element_to_last_subset(new_row * num_cols + new_col) + + return model + + +# This test case is mostly a Python port of set_cover_test.cc. +class SetCoverTest(absltest.TestCase): + + def test_save_reload(self): + model = create_knights_cover_model(10, 10) + proto = model.export_model_as_proto() + reloaded = set_cover.SetCoverModel() + reloaded.import_model_from_proto(proto) + + self.assertEqual(model.num_subsets, reloaded.num_subsets) + self.assertEqual(model.num_elements, reloaded.num_elements) + # TODO(user): these methods are not yet wrapped. + # self.assertEqual(model.subset_costs, reloaded.subset_costs) + # self.assertEqual(model.columns, reloaded.columns) + + def test_save_reload_twice(self): + model = create_knights_cover_model(3, 3) + inv = set_cover.SetCoverInvariant(model) + + greedy = set_cover.GreedySolutionGenerator(inv) + self.assertTrue(greedy.next_solution()) + self.assertTrue(inv.check_consistency()) + greedy_proto = inv.export_solution_as_proto() + + steepest = set_cover.SteepestSearch(inv) + self.assertTrue(steepest.next_solution(500)) + self.assertTrue(inv.check_consistency()) + steepest_proto = inv.export_solution_as_proto() + + inv.import_solution_from_proto(greedy_proto) + self.assertTrue(steepest.next_solution(500)) + self.assertTrue(inv.check_consistency()) + reloaded_proto = inv.export_solution_as_proto() + self.assertEqual(str(steepest_proto), str(reloaded_proto)) + + def test_initial_values(self): + model = create_initial_cover_model() + self.assertTrue(model.compute_feasibility()) + + inv = set_cover.SetCoverInvariant(model) + trivial = set_cover.TrivialSolutionGenerator(inv) + self.assertTrue(trivial.next_solution()) + self.assertTrue(inv.check_consistency()) + + greedy = set_cover.GreedySolutionGenerator(inv) + self.assertTrue(greedy.next_solution()) + self.assertTrue(inv.check_consistency()) + + self.assertEqual(inv.num_uncovered_elements(), 0) + steepest = set_cover.SteepestSearch(inv) + self.assertTrue(steepest.next_solution(500)) + self.assertTrue(inv.check_consistency()) + + def test_preprocessor(self): + model = create_initial_cover_model() + self.assertTrue(model.compute_feasibility()) + + inv = set_cover.SetCoverInvariant(model) + preprocessor = set_cover.Preprocessor(inv) + self.assertTrue(preprocessor.next_solution()) + self.assertTrue(inv.check_consistency()) + + greedy = set_cover.GreedySolutionGenerator(inv) + self.assertTrue(greedy.next_solution()) + self.assertTrue(inv.check_consistency()) + + def test_infeasible(self): + model = set_cover.SetCoverModel() + model.add_empty_subset(1.0) + model.add_element_to_last_subset(0) + model.add_empty_subset(1.0) + model.add_element_to_last_subset(3) + self.assertFalse(model.compute_feasibility()) + + def test_knights_cover_creation(self): + model = create_knights_cover_model(16, 16) + self.assertTrue(model.compute_feasibility()) + + def test_knights_cover_greedy(self): + model = create_knights_cover_model(16, 16) + self.assertTrue(model.compute_feasibility()) + inv = set_cover.SetCoverInvariant(model) + + greedy = set_cover.GreedySolutionGenerator(inv) + self.assertTrue(greedy.next_solution()) + self.assertTrue(inv.check_consistency()) + + steepest = set_cover.SteepestSearch(inv) + self.assertTrue(steepest.next_solution(500)) + self.assertTrue(inv.check_consistency()) + + def test_knights_cover_degree(self): + model = create_knights_cover_model(16, 16) + self.assertTrue(model.compute_feasibility()) + inv = set_cover.SetCoverInvariant(model) + + degree = set_cover.ElementDegreeSolutionGenerator(inv) + self.assertTrue(degree.next_solution()) + self.assertTrue(inv.check_consistency()) + + steepest = set_cover.SteepestSearch(inv) + self.assertTrue(steepest.next_solution(500)) + self.assertTrue(inv.check_consistency()) + + def test_knights_cover_gls(self): + model = create_knights_cover_model(16, 16) + self.assertTrue(model.compute_feasibility()) + inv = set_cover.SetCoverInvariant(model) + + greedy = set_cover.GreedySolutionGenerator(inv) + self.assertTrue(greedy.next_solution()) + self.assertTrue(inv.check_consistency()) + + gls = set_cover.GuidedLocalSearch(inv) + self.assertTrue(gls.next_solution(500)) + self.assertTrue(inv.check_consistency()) + + def test_knights_cover_random(self): + model = create_knights_cover_model(16, 16) + self.assertTrue(model.compute_feasibility()) + inv = set_cover.SetCoverInvariant(model) + + random = set_cover.RandomSolutionGenerator(inv) + self.assertTrue(random.next_solution()) + self.assertTrue(inv.check_consistency()) + + steepest = set_cover.SteepestSearch(inv) + self.assertTrue(steepest.next_solution(500)) + self.assertTrue(inv.check_consistency()) + + def test_knights_cover_trivial(self): + model = create_knights_cover_model(16, 16) + self.assertTrue(model.compute_feasibility()) + inv = set_cover.SetCoverInvariant(model) + + trivial = set_cover.TrivialSolutionGenerator(inv) + self.assertTrue(trivial.next_solution()) + self.assertTrue(inv.check_consistency()) + + steepest = set_cover.SteepestSearch(inv) + self.assertTrue(steepest.next_solution(500)) + self.assertTrue(inv.check_consistency()) + + # TODO(user): KnightsCoverGreedyAndTabu, KnightsCoverGreedyRandomClear, + # KnightsCoverElementDegreeRandomClear, KnightsCoverRandomClearMip, + # KnightsCoverMip + + +def main(_): + absltest.main() + + +if __name__ == "__main__": + app.run(main) diff --git a/ortools/algorithms/set_cover_heuristics.h b/ortools/algorithms/set_cover_heuristics.h index 70c2bc85ccc..04d3b31b9f8 100644 --- a/ortools/algorithms/set_cover_heuristics.h +++ b/ortools/algorithms/set_cover_heuristics.h @@ -174,7 +174,7 @@ class GreedySolutionGenerator { bool NextSolution(const std::vector& focus); // Same with a different set of costs. - bool NextSolution(const std ::vector& focus, + bool NextSolution(const std::vector& focus, const SubsetCostVector& costs); private: diff --git a/ortools/algorithms/set_cover_invariant.cc b/ortools/algorithms/set_cover_invariant.cc index 1999b663af2..b377c8ab4e6 100644 --- a/ortools/algorithms/set_cover_invariant.cc +++ b/ortools/algorithms/set_cover_invariant.cc @@ -19,6 +19,7 @@ #include #include "absl/log/check.h" +#include "absl/types/span.h" #include "ortools/algorithms/set_cover_model.h" #include "ortools/base/logging.h" @@ -64,7 +65,7 @@ void SetCoverInvariant::Initialize() { bool SetCoverInvariant::CheckConsistency() const { auto [cst, cvrg] = ComputeCostAndCoverage(is_selected_); CHECK_EQ(cost_, cst); - for (ElementIndex element : model_->ElementRange()) { + for (const ElementIndex element : model_->ElementRange()) { CHECK_EQ(cvrg[element], coverage_[element]); } auto [num_uncvrd_elts, num_free_elts] = @@ -112,7 +113,7 @@ std::tuple SetCoverInvariant::ComputeCostAndCoverage( for (SubsetIndex subset(0); bool b : choices) { if (b) { cst += subset_costs[subset]; - for (ElementIndex element : columns[subset]) { + for (const ElementIndex element : columns[subset]) { ++cvrg[element]; } } @@ -121,6 +122,19 @@ std::tuple SetCoverInvariant::ComputeCostAndCoverage( return {cst, cvrg}; } +ElementToIntVector SetCoverInvariant::ComputeCoverageInFocus( + const absl::Span focus) const { + ElementToIntVector coverage; + for (const SubsetIndex subset : focus) { + if (is_selected_[subset]) { + for (const ElementIndex element : model_->columns()[subset]) { + ++coverage[element]; + } + } + } + return coverage; +} + std::tuple SetCoverInvariant::ComputeNumUncoveredAndFreeElements( const ElementToIntVector& cvrg) const { @@ -139,7 +153,7 @@ SetCoverInvariant::ComputeNumUncoveredAndFreeElements( for (const ElementIndex element : model_->ElementRange()) { if (cvrg[element] >= 1) { --num_uncvrd_elts; - for (SubsetIndex subset : rows[element]) { + for (const SubsetIndex subset : rows[element]) { --num_free_elts[subset]; } } @@ -163,7 +177,7 @@ SetCoverInvariant::ComputeNumNonOvercoveredElementsAndIsRedundant( const SparseRowView& rows = model_->rows(); for (const ElementIndex element : model_->ElementRange()) { if (cvrg[element] >= 2) { - for (SubsetIndex subset : rows[element]) { + for (const SubsetIndex subset : rows[element]) { --num_cvrg_le_1_elts[subset]; if (num_cvrg_le_1_elts[subset] == 0) { is_rdndnt[subset] = true; diff --git a/ortools/algorithms/set_cover_invariant.h b/ortools/algorithms/set_cover_invariant.h index db6e2e8dda9..15d90f9a6a5 100644 --- a/ortools/algorithms/set_cover_invariant.h +++ b/ortools/algorithms/set_cover_invariant.h @@ -110,6 +110,11 @@ class SetCoverInvariant { // Returns vector containing number of subsets covering each element. const ElementToIntVector& coverage() const { return coverage_; } + // Returns a vector containing the number of subsets within `focus` covering + // each element. Subsets that are without `focus` are not considered. + ElementToIntVector ComputeCoverageInFocus( + absl::Span focus) const; + // Returns vector of Booleans telling whether each subset can be removed from // the solution. const SubsetBoolVector& is_redundant() const { return is_redundant_; } diff --git a/ortools/algorithms/set_cover_lagrangian.cc b/ortools/algorithms/set_cover_lagrangian.cc new file mode 100644 index 00000000000..18dea932dfd --- /dev/null +++ b/ortools/algorithms/set_cover_lagrangian.cc @@ -0,0 +1,520 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/algorithms/set_cover_lagrangian.h" + +#include +#include +#include +#include +#include + +#include "absl/log/check.h" +#include "ortools/algorithms/adjustable_k_ary_heap.h" +#include "ortools/algorithms/set_cover_invariant.h" +#include "ortools/algorithms/set_cover_model.h" +#include "ortools/base/threadpool.h" + +namespace operations_research { + +// Notes from a discussion with Luca Accorsi (accorsi@) and Francesco Cavaliere +// regarding [1]: +// - the 3-phase algorithm in the paper actually uses pricing (which would +// better be called "partial" pricing), +// - the columns that were used in the preceding solutions should be fixed, +// because otherwise it diversifies too much and degrades the best solution +// (under "queue" in the paper). +// - the median algorithm is already in the STL (nth_element). + +// Denoted as u in [1], it is a dual vector: a column vector of nonnegative +// (zero is included) multipliers for the different constraints. +// A deterministic way to compute a feasible (non-optimal) u: +// For all element indices i, u_i = min {j \in J_i} c_j / |I_j|, where +// |I_j| denotes the number of elements covered by subset j. +// +// Concerning the fundamental ideas behind this approach, one may refer to [2]. +ElementCostVector SetCoverLagrangian::InitializeLagrangeMultipliers() const { + ElementCostVector multipliers(model_.num_elements(), + std::numeric_limits::infinity()); + SubsetCostVector marginal_costs(model_.num_subsets()); + // TODO(user): Parallelize this. + for (const SubsetIndex subset : model_.SubsetRange()) { + marginal_costs[subset] = + model_.subset_costs()[subset] / model_.columns()[subset].size(); + } + // TODO(user): Parallelize this. + for (const ElementIndex element : model_.ElementRange()) { + // Minimum marginal cost to cover this element. + Cost min_marginal_cost = std::numeric_limits::infinity(); + const SparseRowView& rows = model_.rows(); + // TODO(user): use std::min_element on rows[element] with a custom + // comparator that gets marginal_costs[subset]. Check performance. + for (const SubsetIndex subset : rows[element]) { + min_marginal_cost = std::min(min_marginal_cost, marginal_costs[subset]); + } + multipliers[element] = min_marginal_cost; + } + return multipliers; +} + +namespace { +// Computes the scalar product between a column and a vector of duals. +// Profiling has shown that this is where most of the time is spent. +// TODO(user): make this visible to other algorithms. +// TODO(user): Investigate. +Cost ScalarProduct(const SparseColumn& column, const ElementCostVector& dual) { + Cost result = 0.0; + for (ColumnEntryIndex pos(0); pos.value() < column.size(); ++pos) { + result += dual[column[pos]]; + } + return result; +} + +// Computes the reduced costs for a subset of subsets. +// This is a helper function for ParallelComputeReducedCosts(). +// It is called on a slice of subsets, defined by start and end. +// The reduced costs are computed using the multipliers vector. +// The columns of the subsets are given by the columns view. +// The result is stored in reduced_costs. +void FillReducedCostsSlice(SubsetIndex start, SubsetIndex end, + const SubsetCostVector& costs, + const ElementCostVector& multipliers, + const SparseColumnView& columns, + SubsetCostVector* reduced_costs) { + for (SubsetIndex subset = start; subset < end; ++subset) { + (*reduced_costs)[subset] = + costs[subset] - ScalarProduct(columns[subset], multipliers); + } +} +} // namespace + +// Computes the reduced costs for all subsets in parallel using ThreadPool. +SubsetCostVector SetCoverLagrangian::ParallelComputeReducedCosts( + const SubsetCostVector& costs, const ElementCostVector& multipliers) const { + const SubsetIndex num_subsets(model_.num_subsets()); + // TODO(user): compute a close-to-optimal k-subset partitioning. + const SubsetIndex block_size = + SubsetIndex(1) + num_subsets / num_threads_; // [***] Arbitrary choice. + const SparseColumnView& columns = model_.columns(); + SubsetCostVector reduced_costs(num_subsets); + ThreadPool thread_pool("ParallelComputeReducedCosts", num_threads_); + thread_pool.StartWorkers(); + { + // TODO(user): check how costly it is to create a new ThreadPool. + // TODO(user): use a queue of subsets to process? instead of a fixed range. + + // This parallelization is not very efficient, because all the threads + // use the same costs vector. Maybe it should be local to the thread. + // It's unclear whether sharing columns and costs is better than having + // each thread use its own partial copy. + // Finally, it might be better to use a queue of subsets to process, instead + // of a fixed range. + for (SubsetIndex start(0); start < num_subsets; start += block_size) { + thread_pool.Schedule([start, block_size, num_subsets, &costs, + &multipliers, &columns, &reduced_costs]() { + const SubsetIndex end = std::min(start + block_size, num_subsets); + FillReducedCostsSlice(start, end, costs, multipliers, columns, + &reduced_costs); + }); + } + } // Synchronize all the threads. This is equivalent to a wait. + return reduced_costs; +} + +// Reduced cost (row vector). Denoted as c_j(u) in [1], right after equation +// (5). For a subset j, c_j(u) = c_j - sum_{i \in I_j}.u_i. I_j is the set of +// indices for elements in subset j. For a general Integer Program A.x <= b, +// this would be: +// c_j(u) = c_j - sum_{i \in I_j} a_{ij}.u_i +SubsetCostVector SetCoverLagrangian::ComputeReducedCosts( + const SubsetCostVector& costs, const ElementCostVector& multipliers) const { + const SparseColumnView& columns = model_.columns(); + SubsetCostVector reduced_costs(costs.size()); + FillReducedCostsSlice(SubsetIndex(0), SubsetIndex(reduced_costs.size()), + costs, multipliers, columns, &reduced_costs); + return reduced_costs; +} + +namespace { +// Helper function to compute the subgradient. +// It fills a slice of the subgradient vector from indices start to end. +// This is a helper function for ParallelComputeSubgradient(). +// The subgradient is computed using the reduced costs vector. +void FillSubgradientSlice(SubsetIndex start, SubsetIndex end, + const SparseColumnView& columns, + const SubsetCostVector& reduced_costs, + ElementCostVector* subgradient) { + for (SubsetIndex subset(start); subset < end; ++subset) { + if (reduced_costs[subset] < 0.0) { + for (const ElementIndex element : columns[subset]) { + (*subgradient)[element] -= 1.0; + } + } + } +} +} // namespace + +// Vector of primal slack variable. Denoted as s_i(u) in [1], equation (6). +// For all element indices i, s_i(u) = 1 - sum_{j \in J_i} x_j(u), +// where J_i denotes the set of indices of subsets j covering element i. +// For a general Integer Program A x <= b, the subgradient cost vector is +// defined as A x - b. See [2]. +ElementCostVector SetCoverLagrangian::ComputeSubgradient( + const SubsetCostVector& reduced_costs) const { + // NOTE(user): Should the initialization be done with coverage[element]? + ElementCostVector subgradient(model_.num_elements(), 1.0); + FillSubgradientSlice(SubsetIndex(0), SubsetIndex(reduced_costs.size()), + model_.columns(), reduced_costs, &subgradient); + return subgradient; +} + +ElementCostVector SetCoverLagrangian::ParallelComputeSubgradient( + const SubsetCostVector& reduced_costs) const { + const SubsetIndex num_subsets(model_.num_subsets()); + const SubsetIndex block_size = + SubsetIndex(1) + num_subsets / num_threads_; // [***] + const SparseColumnView& columns = model_.columns(); + ElementCostVector subgradient(model_.num_elements(), 1.0); + // The subgradient has one component per element, each thread processes + // several subsets. Hence, have one vector per thread to avoid data races. + // TODO(user): it may be better to split the elements among the threads, + // although this might be less well-balanced. + std::vector subgradients( + num_threads_, ElementCostVector(model_.num_elements())); + ThreadPool thread_pool("ParallelComputeSubgradient", num_threads_); + thread_pool.StartWorkers(); + { + int thread_index = 0; + for (SubsetIndex start(0); start < num_subsets; + start += block_size, ++thread_index) { + thread_pool.Schedule([start, block_size, num_subsets, &reduced_costs, + &columns, &subgradients, thread_index]() { + const SubsetIndex end = std::min(start + block_size, num_subsets); + FillSubgradientSlice(start, end, columns, reduced_costs, + &subgradients[thread_index]); + }); + } + } // Synchronize all the threads. + for (int thread_index = 0; thread_index < num_threads_; ++thread_index) { + for (const ElementIndex element : model_.ElementRange()) { + subgradient[element] += subgradients[thread_index][element]; + } + } + return subgradient; +} + +namespace { +// Helper function to compute the value of the Lagrangian. +// This is a helper function for ParallelComputeLagrangianValue(). +// It is called on a slice of elements, defined by start and end. +// The value of the Lagrangian is computed using the reduced costs vector and +// the multipliers vector. +// The result is stored in lagrangian_value. +void FillLagrangianValueSlice(SubsetIndex start, SubsetIndex end, + const SubsetCostVector& reduced_costs, + Cost* lagrangian_value) { + // This is min \sum_{j \in N} c_j(u) x_j. This captures the remark above (**), + // taking into account the possible values for x_j, and using them to minimize + // the terms. + for (SubsetIndex subset(start); subset < end; ++subset) { + if (reduced_costs[subset] < 0.0) { + *lagrangian_value += reduced_costs[subset]; + } + } +} +} // namespace + +// Compute the (scalar) value of the Lagrangian vector by fixing the value of +// x_j based on the sign of c_j(u). In [1] equation (4), it is: +// L(u) = min \sum_{j \in N} c_j(u) x_j + \sum{i \in M} u_i . This is obtained +// - if c_j(u) < 0: x_j(u) = 1, +// - if c_j(u) > 0: x_j(u) = 0, (**) +// - if c_j(u) = 0: x_j(u) is unbound, in {0, 1}, we use 0. +// For a general Integer Program A x <= b, the Lagrangian vector L(u) [2] is +// L(u) = min \sum_{j \in N} c_j(u) x_j + \sum{i \in M} b_i.u_i. +Cost SetCoverLagrangian::ComputeLagrangianValue( + const SubsetCostVector& reduced_costs, + const ElementCostVector& multipliers) const { + Cost lagrangian_value = 0.0; + // This is \sum{i \in M} u_i. + for (const Cost u : multipliers) { + lagrangian_value += u; + } + FillLagrangianValueSlice(SubsetIndex(0), SubsetIndex(reduced_costs.size()), + reduced_costs, &lagrangian_value); + return lagrangian_value; +} + +Cost SetCoverLagrangian::ParallelComputeLagrangianValue( + const SubsetCostVector& reduced_costs, + const ElementCostVector& multipliers) const { + const SubsetIndex num_subsets(model_.num_subsets()); + const SubsetIndex block_size = + SubsetIndex(1) + num_subsets / num_threads_; // [***] Arbitrary. + Cost lagrangian_value = 0.0; + // This is \sum{i \in M} u_i. + + for (const Cost u : multipliers) { + lagrangian_value += u; + } + std::vector lagrangian_values(num_threads_, 0.0); + ThreadPool thread_pool("ParallelComputeLagrangianValue", num_threads_); + thread_pool.StartWorkers(); + { + int thread_index = 0; + for (SubsetIndex start(0); start < num_subsets; start += block_size) { + thread_pool.Schedule([start, block_size, num_subsets, thread_index, + &reduced_costs, &lagrangian_values]() { + const SubsetIndex end = std::min(start + block_size, num_subsets); + FillLagrangianValueSlice(start, end, reduced_costs, + &lagrangian_values[thread_index]); + }); + ++thread_index; + } + } // Synchronize all the threads. + for (const Cost l : lagrangian_values) { + lagrangian_value += l; + } + return lagrangian_value; +} + +// Perform a subgradient step. +// In the general case, for an Integer Program A.x <=b, the Lagragian +// multipliers vector at step k+1 is defined as: u^{k+1} = u^k + t_k (A x^k - b) +// with term t_k = lambda_k * (UB - L(u^k)) / |A x^k - b|^2. +// |.| is the 2-norm (i.e. Euclidean) +// In our case, the problem A x <= b is in the form A x >= 1. We need to +// replace A x - b by s_i(u) = 1 - sum_{j \in J_i} x_j(u). +// |A x^k - b|^2 = |s(u)|^2, and t_k is of the form: +// t_k = lambda_k * (UB - L(u^k)) / |s^k(u)|^2. +// Now, the coordinates of the multipliers vectors u^k, u^k_i are nonnegative, +// i.e. u^k_i >= 0. Negative values are simply cut off. Following [3], each of +// the coordinates is defined as: u^{k+1}_i = +// max(u^k_i + lambda_k * (UB - L(u^k)) / |s^k(u)|^2 * s^k_i(u), 0). +// This is eq. (7) in [1]. +void SetCoverLagrangian::UpdateMultipliers( + double step_size, Cost lagrangian_value, Cost upper_bound, + const SubsetCostVector& reduced_costs, + ElementCostVector* multipliers) const { + // step_size is \lambda_k in [1]. + DCHECK_GT(step_size, 0); + // Compute the square of the Euclidean norm of the subgradient vector. + const ElementCostVector subgradient = ComputeSubgradient(reduced_costs); + Cost subgradient_square_norm = 0.0; + for (const Cost x : subgradient) { + subgradient_square_norm += x * x; + } + // First compute lambda_k * (UB - L(u^k)). + const Cost factor = + step_size * (upper_bound - lagrangian_value) / subgradient_square_norm; + for (const ElementIndex element : model_.ElementRange()) { + // Avoid multipliers to go negative and to go over the roof. 1e6 chosen + // arbitrarily. [***] + (*multipliers)[element] = std::clamp( + (*multipliers)[element] + factor * subgradient[element], 0.0, 1e6); + } +} + +void SetCoverLagrangian::ParallelUpdateMultipliers( + double step_size, Cost lagrangian_value, Cost upper_bound, + const SubsetCostVector& reduced_costs, + ElementCostVector* multipliers) const { + // step_size is \lambda_k in [1]. + DCHECK_GT(step_size, 0); + // Compute the square of the Euclidean norm of the subgradient vector. + const ElementCostVector subgradient = + ParallelComputeSubgradient(reduced_costs); + Cost subgradient_square_norm = 0.0; + for (const Cost x : subgradient) { + subgradient_square_norm += x * x; + } + // First compute lambda_k * (UB - L(u^k)). + const Cost factor = + step_size * (upper_bound - lagrangian_value) / subgradient_square_norm; + for (const ElementIndex element : model_.ElementRange()) { + // Avoid multipliers to go negative and to go through the roof. 1e6 chosen + // arbitrarily. [***] + (*multipliers)[element] = std::clamp( + (*multipliers)[element] + factor * subgradient[element], 0.0, 1e6); + } +} + +Cost SetCoverLagrangian::ComputeGap( + const SubsetCostVector& reduced_costs, const SubsetBoolVector& solution, + const ElementCostVector& multipliers) const { + Cost gap = 0.0; + // TODO(user): Parallelize this, if need be. + for (const SubsetIndex subset : model_.SubsetRange()) { + if (solution[subset] && reduced_costs[subset] > 0.0) { + gap += reduced_costs[subset]; + } else if (!solution[subset] && reduced_costs[subset] < 0.0) { + // gap += std::abs(reduced_costs[subset]); We know the sign of rhs. + gap -= reduced_costs[subset]; + } + } + const ElementToIntVector& coverage = inv_->coverage(); + for (const ElementIndex element : model_.ElementRange()) { + gap += (coverage[element] - 1) * multipliers[element]; + } + return gap; +} + +SubsetCostVector SetCoverLagrangian::ComputeDelta( + const SubsetCostVector& reduced_costs, + const ElementCostVector& multipliers) const { + SubsetCostVector delta(model_.num_subsets()); + const ElementToIntVector& coverage = inv_->coverage(); + // This is definition (9) in [1]. + const SparseColumnView& columns = model_.columns(); + // TODO(user): Parallelize this. + for (const SubsetIndex subset : model_.SubsetRange()) { + delta[subset] = std::max(reduced_costs[subset], 0.0); + for (const ElementIndex element : columns[subset]) { + const double size = coverage[element]; + delta[subset] += multipliers[element] * (size - 1.0) / size; + } + } + return delta; +} + +namespace { +// Helper class to compute the step size for the multipliers. +// The step size is updated every period iterations. +// The step size is updated by multiplying it by a factor 0.5 or 1.5 +// on the relative change in the lower bound is greater than 0.01 or less +// than 0.001, respectively. The lower bound is updated every period +// iterations. +class StepSizer { + public: + StepSizer(int period, double step_size) + : period_(period), iter_to_check_(period), step_size_(step_size) { + ResetBounds(); + } + double UpdateStepSize(int iter, Cost lower_bound) { + min_lb_ = std::min(min_lb_, lower_bound); + max_lb_ = std::max(max_lb_, lower_bound); + if (iter == iter_to_check_) { + iter_to_check_ += period_; + // Bounds can be negative, so we need to take the absolute value. + // We also need to avoid division by zero. We decide to return 0.0 in + // that case, which is the same as not updating the step size. + const Cost lb_gap = + max_lb_ == 0.0 ? 0.0 : (max_lb_ - min_lb_) / std::abs(max_lb_); + DCHECK_GE(lb_gap, 0.0); + if (lb_gap > 0.01) { + step_size_ *= 0.5; + } else if (lb_gap <= 0.001) { + step_size_ *= 1.5; + } + step_size_ = std::clamp(step_size_, 1e-6, 10.0); + ResetBounds(); + } + return step_size_; + } + + private: + void ResetBounds() { + min_lb_ = std::numeric_limits::infinity(); + max_lb_ = -std::numeric_limits::infinity(); + } + int period_; + int iter_to_check_; + double step_size_; + Cost min_lb_; + Cost max_lb_; +}; + +// Helper class to decide whether to stop the algorithm. +// The algorithm stops when the lower bound is not updated for a certain +// number of iterations. +class Stopper { + public: + explicit Stopper(int period) + : period_(period), + iter_to_check_(period), + lower_bound_(std::numeric_limits::min()) {} + bool DecideWhetherToStop(int iter, Cost lb) { + DCHECK_GE(lb, lower_bound_); + if (iter == iter_to_check_) { + iter_to_check_ += period_; + const Cost delta = lb - lower_bound_; + const Cost relative_delta = delta / lb; + DCHECK_GE(delta, 0.0); + DCHECK_GE(relative_delta, 0.0); + lower_bound_ = lb; + return relative_delta < 0.001 && delta < 1; + } + return false; + } + + private: + int period_; + int iter_to_check_; + Cost lower_bound_; +}; + +} // namespace + +namespace { +// TODO(user): Add this to the file defining AdjustableKAryHeap. +template +class TopKHeap { + public: + explicit TopKHeap(int max_size) : heap_(), max_size_(max_size) {} + void Clear() { heap_.Clear(); } + void Add(Priority priority, Index index) { + if (heap_.Size() < max_size_) { + heap_.Add(priority, index); + } else if (heap_.HasPriority(priority, heap_.TopPriority())) { + heap_.ReplaceTop(priority, index); + } + } + + private: + AdjustableKAryHeap heap_; + int max_size_; +}; +} // namespace + +// Computes a lower bound on the optimal cost. +std::tuple +SetCoverLagrangian::ComputeLowerBound(const SubsetCostVector& costs, + Cost upper_bound) { + Cost lower_bound = 0.0; + ElementCostVector multipliers = InitializeLagrangeMultipliers(); + double step_size = 0.1; // [***] arbitrary, from [1]. + StepSizer step_sizer(20, step_size); // [***] arbitrary, from [1]. + Stopper stopper(100); // [***] arbitrary, from [1]. + SubsetCostVector reduced_costs(costs); + // For the time being, 4 threads seems to be the fastest. + // Running linux perf of the process shows that up to 60% of the cycles are + // lost as idle cycles in the CPU backend, probably because the algorithm is + // memory bound. + for (int iter = 0; iter < 1000; ++iter) { + reduced_costs = ParallelComputeReducedCosts(costs, multipliers); + const Cost lagrangian_value = + ComputeLagrangianValue(reduced_costs, multipliers); + UpdateMultipliers(step_size, lagrangian_value, upper_bound, reduced_costs, + &multipliers); + lower_bound = std::max(lower_bound, lagrangian_value); + // step_size should be updated like this. For the time besing, we keep the + // step size, because the implementation of the rest is not adequate yet + // step_size = step_sizer.UpdateStepSize(iter, lagrangian_value); + // if (stopper.DecideWhetherToStop(iter, lower_bound)) { + // break; + // } + } + return std::make_tuple(lower_bound, reduced_costs, multipliers); +} + +} // namespace operations_research diff --git a/ortools/algorithms/set_cover_lagrangian.h b/ortools/algorithms/set_cover_lagrangian.h new file mode 100644 index 00000000000..aa63627ad15 --- /dev/null +++ b/ortools/algorithms/set_cover_lagrangian.h @@ -0,0 +1,154 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OR_TOOLS_ALGORITHMS_SET_COVER_LAGRANGIAN_H_ +#define OR_TOOLS_ALGORITHMS_SET_COVER_LAGRANGIAN_H_ + +#include +#include +#include + +#include "ortools/algorithms/set_cover_invariant.h" +#include "ortools/algorithms/set_cover_model.h" +#include "ortools/base/threadpool.h" + +namespace operations_research { + +// The SetCoverLagrangian class implements the Lagrangian relaxation of the set +// cover problem. + +// In the following, we refer to the following articles: +// [1] Caprara, Alberto, Matteo Fischetti, and Paolo Toth. 1999. “A Heuristic +// Method for the Set Covering Problem.” Operations Research 47 (5): 730–43. +// https://www.jstor.org/stable/223097 +// [2] Fisher, Marshall L. 1981. “The Lagrangian Relaxation Method for Solving +// Integer Programming Problems.” Management Science 27 (1): 1–18. +// https://www.jstor.org/stable/2631139 +// [3] Held, M., Karp, R.M. The traveling-salesman problem and minimum spanning +// trees: Part II. Mathematical Programming 1, 6–25 (1971). +// https://link.springer.com/article/10.1007/BF01584070 +// [4] Williamson, David P. 2002. “The Primal-Dual Method for Approximation +// Algorithms.” Mathematical Programming, 91 (3): 447–78. +// https://link.springer.com/article/10.1007/s101070100262 + +class SetCoverLagrangian { + public: + explicit SetCoverLagrangian(SetCoverInvariant* inv, int num_threads = 1) + : inv_(inv), model_(*inv->model()), num_threads_(num_threads) {} + + // Returns true if a solution was found. + // TODO(user): Add time-outs and exit with a partial solution. This seems + // unlikely, though. + bool NextSolution(); + + // Computes the next partial solution considering only the subsets whose + // indices are in focus. + bool NextSolution(const std::vector& focus); + + // Initializes the multipliers vector (u) based on the cost per subset. + ElementCostVector InitializeLagrangeMultipliers() const; + + // Computes the Lagrangian (row-)cost vector. + // For a subset j, c_j(u) = c_j - sum_{i \in I_j} u_i. + // I_j denotes the indices of elements present in subset j. + SubsetCostVector ComputeReducedCosts( + const SubsetCostVector& costs, + const ElementCostVector& multipliers) const; + + // Same as above, but parallelized, using the number of threads specified in + // the constructor. + SubsetCostVector ParallelComputeReducedCosts( + const SubsetCostVector& costs, + const ElementCostVector& multipliers) const; + + // Computes the subgradient (column-)cost vector. + // For all element indices i, s_i(u) = 1 - sum_{j \in J_i} x_j(u), + // where J_i denotes the set of indices of subsets j covering element i. + ElementCostVector ComputeSubgradient( + const SubsetCostVector& reduced_costs) const; + + // Same as above, but parallelized, using the number of threads specified in + // the constructor. + ElementCostVector ParallelComputeSubgradient( + const SubsetCostVector& reduced_costs) const; + + // Computes the value of the Lagrangian L(u). + // L(u) = min \sum_{j \in N} c_j(u) x_j + \sum{i \in M} u_i. + // If c_j(u) < 0: x_j(u) = 1, if c_j(u) > 0: x_j(u) = 0, + // otherwise x_j(u) is unbound, it can take any value in {0, 1}. + Cost ComputeLagrangianValue(const SubsetCostVector& reduced_costs, + const ElementCostVector& multipliers) const; + + // Same as above, but parallelized, using the number of threads specified in + // the constructor. + Cost ParallelComputeLagrangianValue( + const SubsetCostVector& reduced_costs, + const ElementCostVector& multipliers) const; + + // Updates the multipliers vector (u) with the given step size. + // Following [3], each of the coordinates is defined as: u^{k+1}_i = + // max(u^k_i + lambda_k * (UB - L(u^k)) / |s^k(u)|^2 * s^k_i(u), 0). + // lambda_k is step_size in the function signature below. UB is upper_bound. + void UpdateMultipliers(double step_size, Cost lagrangian_value, + Cost upper_bound, + const SubsetCostVector& reduced_costs, + ElementCostVector* multipliers) const; + + // Same as above, but parallelized, using the number of threads specified in + // the constructor. + void ParallelUpdateMultipliers(double step_size, Cost lagrangian_value, + Cost upper_bound, + const SubsetCostVector& reduced_costs, + ElementCostVector* multipliers) const; + + // Computes the gap between the current solution and the optimal solution. + // This is the sum of the multipliers for the elements that are not covered + // by the current solution. + Cost ComputeGap(const SubsetCostVector& reduced_costs, + const SubsetBoolVector& solution, + const ElementCostVector& multipliers) const; + + // Performs the three-phase algorithm. + void ThreePhase(Cost upper_bound); + + // Computes a lower bound on the optimal cost. + // The returned value is the lower bound, the reduced costs, and the + // multipliers. + std::tuple ComputeLowerBound( + const SubsetCostVector& costs, Cost upper_bound); + + private: + // The invariant on which the algorithm will run. + SetCoverInvariant* inv_; + + // The model on which the invariant is defined. + const SetCoverModel& model_; + + // The number of threads to use for parallelization. + int num_threads_; + + // Total (scalar) Lagrangian cost. + Cost lagrangian_; + + // Lagrangian cost vector, per subset. + SubsetCostVector lagrangians_; + + // Computes the delta vector. + // This is definition (9) in [1]. + SubsetCostVector ComputeDelta(const SubsetCostVector& reduced_costs, + const ElementCostVector& multipliers) const; +}; + +} // namespace operations_research + +#endif // OR_TOOLS_ALGORITHMS_SET_COVER_LAGRANGIAN_H_ diff --git a/ortools/algorithms/set_cover_mip.cc b/ortools/algorithms/set_cover_mip.cc index ebf12445dab..03e38d5fde8 100644 --- a/ortools/algorithms/set_cover_mip.cc +++ b/ortools/algorithms/set_cover_mip.cc @@ -25,6 +25,18 @@ namespace operations_research { +namespace { +// Returns the vector a - b. +ElementToIntVector Subtract(const ElementToIntVector& a, + const ElementToIntVector& b) { + ElementToIntVector delta; + for (const ElementIndex i : a.index_range()) { + delta[i] = a[i] - b[i]; + } + return delta; +} +} // namespace + template using StrictVector = glop::StrictITIVector; @@ -88,11 +100,16 @@ bool SetCoverMip::NextSolution(absl::Span focus, StrictVector constraints(num_elements, nullptr); StrictVector vars(num_subsets, nullptr); + ElementToIntVector coverage_outside_focus = + Subtract(inv_->coverage(), inv_->ComputeCoverageInFocus(focus)); for (const SubsetIndex subset : focus) { vars[subset] = solver.MakeVar(0, 1, use_integers, ""); objective->SetCoefficient(vars[subset], model->subset_costs()[subset]); - for (ElementIndex element : model->columns()[subset]) { - if (inv_->coverage()[element] > 0) continue; + for (const ElementIndex element : model->columns()[subset]) { + // The model should only contain elements that are not forcibly covered by + // subsets outside the focus. + if (coverage_outside_focus[element] == 0) continue; + if (constraints[element] == nullptr) { constexpr double kInfinity = std::numeric_limits::infinity(); constraints[element] = solver.MakeRowConstraint(1.0, kInfinity); diff --git a/ortools/algorithms/set_cover_model.cc b/ortools/algorithms/set_cover_model.cc index 6164d4b07c0..f7b5faf24c4 100644 --- a/ortools/algorithms/set_cover_model.cc +++ b/ortools/algorithms/set_cover_model.cc @@ -120,7 +120,7 @@ void SetCoverModel::CreateSparseRowView() { } rows_.resize(num_elements_, SparseRow()); ElementToIntVector row_sizes(num_elements_, 0); - for (SubsetIndex subset : SubsetRange()) { + for (const SubsetIndex subset : SubsetRange()) { // Sort the columns. It's not super-critical to improve performance here as // this needs to be done only once. std::sort(columns_[subset].begin(), columns_[subset].end()); @@ -128,10 +128,10 @@ void SetCoverModel::CreateSparseRowView() { ++row_sizes[element]; } } - for (ElementIndex element : ElementRange()) { + for (const ElementIndex element : ElementRange()) { rows_[element].reserve(RowEntryIndex(row_sizes[element])); } - for (SubsetIndex subset : SubsetRange()) { + for (const SubsetIndex subset : SubsetRange()) { for (const ElementIndex element : columns_[subset]) { rows_[element].push_back(subset); } @@ -192,7 +192,7 @@ void SetCoverModel::ImportModelFromProto(const SetCoverProto& message) { if (subset_proto.element_size() > 0) { columns_[subset_index].reserve( ColumnEntryIndex(subset_proto.element_size())); - for (BaseInt element : subset_proto.element()) { + for (const BaseInt element : subset_proto.element()) { columns_[subset_index].push_back(ElementIndex(element)); num_elements_ = std::max(num_elements_, element + 1); } diff --git a/ortools/algorithms/set_cover_model.h b/ortools/algorithms/set_cover_model.h index b46b2069b9b..fa3f55430b8 100644 --- a/ortools/algorithms/set_cover_model.h +++ b/ortools/algorithms/set_cover_model.h @@ -202,7 +202,7 @@ class SetCoverModel { bool ComputeFeasibility() const; // Reserves num_subsets columns in the model. - void ReserveNumSubsets(BaseInt number_of_subsets); + void ReserveNumSubsets(BaseInt num_subsets); void ReserveNumSubsets(SubsetIndex num_subsets); // Reserves num_elements rows in the column indexed by subset. diff --git a/ortools/algorithms/set_cover_orlib_test.cc b/ortools/algorithms/set_cover_orlib_test.cc index 604408513f9..8dd153e44bb 100644 --- a/ortools/algorithms/set_cover_orlib_test.cc +++ b/ortools/algorithms/set_cover_orlib_test.cc @@ -22,6 +22,7 @@ #include "gtest/gtest.h" #include "ortools/algorithms/set_cover_heuristics.h" #include "ortools/algorithms/set_cover_invariant.h" +#include "ortools/algorithms/set_cover_lagrangian.h" #include "ortools/algorithms/set_cover_mip.h" #include "ortools/algorithms/set_cover_model.h" #include "ortools/algorithms/set_cover_reader.h" @@ -112,26 +113,25 @@ SetCoverInvariant RunElementDegreeGreedyAndSteepest(std::string name, return inv; } -SetCoverInvariant IterateClearAndMip(std::string name, SetCoverInvariant inv) { +void IterateClearAndMip(std::string name, SetCoverInvariant* inv) { WallTimer timer; timer.Start(); - std::vector focus = inv.model()->all_subsets(); - double best_cost = inv.cost(); - SubsetBoolVector best_choices = inv.is_selected(); + std::vector focus = inv->model()->all_subsets(); + double best_cost = inv->cost(); + SubsetBoolVector best_choices = inv->is_selected(); for (int i = 0; i < 10; ++i) { std::vector range = - ClearMostCoveredElements(std::min(100UL, focus.size()), &inv); - SetCoverMip mip(&inv); + ClearMostCoveredElements(std::min(100UL, focus.size()), inv); + SetCoverMip mip(inv); mip.NextSolution(range, true, 0.02); - DCHECK(inv.CheckConsistency()); - if (inv.cost() < best_cost) { - best_cost = inv.cost(); - best_choices = inv.is_selected(); + DCHECK(inv->CheckConsistency()); + if (inv->cost() < best_cost) { + best_cost = inv->cost(); + best_choices = inv->is_selected(); } } timer.Stop(); LogCostAndTiming(name, "IterateClearAndMip", best_cost, timer.GetDuration()); - return inv; } SetCoverInvariant ComputeLPLowerBound(std::string name, SetCoverModel* model) { @@ -145,6 +145,17 @@ SetCoverInvariant ComputeLPLowerBound(std::string name, SetCoverModel* model) { return inv; } +void ComputeLagrangianLowerBound(std::string name, SetCoverInvariant* inv) { + const SetCoverModel* model = inv->model(); + WallTimer timer; + timer.Start(); + SetCoverLagrangian lagrangian(inv, /*num_threads=*/4); + const auto [lower_bound, reduced_costs, multipliers] = + lagrangian.ComputeLowerBound(model->subset_costs(), inv->cost()); + LogCostAndTiming(name, "LagrangianLowerBound", lower_bound, + timer.GetDuration()); +} + SetCoverInvariant RunMip(std::string name, SetCoverModel* model) { SetCoverInvariant inv(model); WallTimer timer; @@ -156,29 +167,28 @@ SetCoverInvariant RunMip(std::string name, SetCoverModel* model) { return inv; } -SetCoverInvariant IterateClearElementDegreeAndSteepest(std::string name, - SetCoverInvariant inv) { +void IterateClearElementDegreeAndSteepest(std::string name, + SetCoverInvariant* inv) { WallTimer timer; timer.Start(); - double best_cost = inv.cost(); - SubsetBoolVector best_choices = inv.is_selected(); - ElementDegreeSolutionGenerator element_degree(&inv); - SteepestSearch steepest(&inv); + double best_cost = inv->cost(); + SubsetBoolVector best_choices = inv->is_selected(); + ElementDegreeSolutionGenerator element_degree(inv); + SteepestSearch steepest(inv); for (int i = 0; i < 1000; ++i) { std::vector range = - ClearRandomSubsets(0.1 * inv.trace().size(), &inv); + ClearRandomSubsets(0.1 * inv->trace().size(), inv); CHECK(element_degree.NextSolution()); steepest.NextSolution(range, 100000); - DCHECK(inv.CheckConsistency()); - if (inv.cost() < best_cost) { - best_cost = inv.cost(); - best_choices = inv.is_selected(); + DCHECK(inv->CheckConsistency()); + if (inv->cost() < best_cost) { + best_cost = inv->cost(); + best_choices = inv->is_selected(); } } timer.Stop(); LogCostAndTiming(name, "IterateClearElementDegreeAndSteepest", best_cost, timer.GetDuration()); - return inv; } double RunSolver(std::string name, SetCoverModel* model) { @@ -186,12 +196,13 @@ double RunSolver(std::string name, SetCoverModel* model) { WallTimer global_timer; global_timer.Start(); RunChvatalAndSteepest(name, model); - // SetCoverInvariant inv = ComputeLPLowerBound(name, model); + // SetCoverInvariant inv = ComputeLPLowerBound(name, model); RunMip(name, model); RunChvatalAndGLS(name, model); SetCoverInvariant inv = RunElementDegreeGreedyAndSteepest(name, model); + ComputeLagrangianLowerBound(name, &inv); // IterateClearAndMip(name, inv); - IterateClearElementDegreeAndSteepest(name, inv); + IterateClearElementDegreeAndSteepest(name, &inv); return inv.cost(); } diff --git a/ortools/algorithms/set_cover_test.cc b/ortools/algorithms/set_cover_test.cc index 021a19f5af8..c1da8e0bdeb 100644 --- a/ortools/algorithms/set_cover_test.cc +++ b/ortools/algorithms/set_cover_test.cc @@ -94,7 +94,6 @@ TEST(SolutionProtoTest, SaveReloadTwice) { inv.ImportSolutionFromProto(greedy_proto); CHECK(steepest.NextSolution(500)); EXPECT_TRUE(inv.CheckConsistency()); - SetCoverSolutionResponse reloaded_proto = inv.ExportSolutionAsProto(); } TEST(SetCoverTest, InitialValues) { @@ -121,6 +120,7 @@ TEST(SetCoverTest, InitialValues) { LOG(INFO) << "GreedySolutionGenerator cost: " << inv.cost(); EXPECT_TRUE(inv.CheckConsistency()); + EXPECT_EQ(inv.num_uncovered_elements(), 0); SteepestSearch steepest(&inv); CHECK(steepest.NextSolution(500)); LOG(INFO) << "SteepestSearch cost: " << inv.cost(); @@ -367,8 +367,6 @@ TEST(SetCoverTest, KnightsCoverRandomClearMip) { #endif SetCoverModel model = CreateKnightsCoverModel(BoardSize, BoardSize); SetCoverInvariant inv(&model); - Cost best_cost = std::numeric_limits::max(); - SubsetBoolVector best_choices = inv.is_selected(); GreedySolutionGenerator greedy(&inv); CHECK(greedy.NextSolution()); LOG(INFO) << "GreedySolutionGenerator cost: " << inv.cost(); @@ -377,8 +375,8 @@ TEST(SetCoverTest, KnightsCoverRandomClearMip) { CHECK(steepest.NextSolution(10'000)); LOG(INFO) << "SteepestSearch cost: " << inv.cost(); - best_cost = inv.cost(); - best_choices = inv.is_selected(); + Cost best_cost = inv.cost(); + SubsetBoolVector best_choices = inv.is_selected(); for (int i = 0; i < 100; ++i) { inv.LoadSolution(best_choices); auto focus = ClearRandomSubsets(0.1 * model.num_subsets(), &inv); diff --git a/ortools/python/setup.py.in b/ortools/python/setup.py.in index 23d0ed4f85b..d750714e21d 100644 --- a/ortools/python/setup.py.in +++ b/ortools/python/setup.py.in @@ -57,6 +57,7 @@ setup( ], '@PYTHON_PROJECT@.algorithms.python':[ '$', + '$', '*.pyi' ], '@PYTHON_PROJECT@.bop':['*.pyi'], From 03fa7c68d93109f7abc40d798c55e9e186f57068 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 6 Sep 2024 08:48:07 +0200 Subject: [PATCH 08/12] tools: Update bootlin cross toolchain and QEMU --- tools/cross_compile.sh | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/tools/cross_compile.sh b/tools/cross_compile.sh index 2325fe923e7..d8bb735f5df 100755 --- a/tools/cross_compile.sh +++ b/tools/cross_compile.sh @@ -46,7 +46,7 @@ function install_qemu() { >&2 echo 'QEMU is disabled !' return 0 fi - local -r QEMU_VERSION=${QEMU_VERSION:=9.0.1} + local -r QEMU_VERSION=${QEMU_VERSION:=9.0.2} local -r QEMU_TARGET=${QEMU_ARCH}-linux-user if echo "${QEMU_VERSION} ${QEMU_TARGET}" | cmp --silent "${QEMU_INSTALL}/.build" -; then @@ -108,38 +108,38 @@ function expand_bootlin_config() { # ref: https://toolchains.bootlin.com/ case "${TARGET}" in "arm64" | "aarch64") - local -r TOOLCHAIN_URL="https://toolchains.bootlin.com/downloads/releases/toolchains/aarch64/tarballs/aarch64--glibc--stable-2024.02-1.tar.bz2" + local -r TOOLCHAIN_URL="https://toolchains.bootlin.com/downloads/releases/toolchains/aarch64/tarballs/aarch64--glibc--stable-2024.05-1.tar.xz" local -r GCC_PREFIX="aarch64" local -r GCC_SUFFIX="" ;; "arm64be" | "aarch64be") - local -r TOOLCHAIN_URL="https://toolchains.bootlin.com/downloads/releases/toolchains/aarch64be/tarballs/aarch64be--glibc--stable-2024.02-1.tar.bz2" + local -r TOOLCHAIN_URL="https://toolchains.bootlin.com/downloads/releases/toolchains/aarch64be/tarballs/aarch64be--glibc--stable-2024.05-1.tar.xz" local -r GCC_PREFIX="aarch64_be" local -r GCC_SUFFIX="" ;; "ppc" | "ppc-440fp") - local -r TOOLCHAIN_URL="https://toolchains.bootlin.com/downloads/releases/toolchains/powerpc-440fp/tarballs/powerpc-440fp--glibc--stable-2024.02-1.tar.bz2" + local -r TOOLCHAIN_URL="https://toolchains.bootlin.com/downloads/releases/toolchains/powerpc-440fp/tarballs/powerpc-440fp--glibc--stable-2024.05-1.tar.xz" local -r GCC_PREFIX="powerpc" local -r GCC_SUFFIX="" ;; "ppc-e500mc") - local -r TOOLCHAIN_URL="https://toolchains.bootlin.com/downloads/releases/toolchains/powerpc-e500mc/tarballs/powerpc-e500mc--glibc--stable-2024.02-1.tar.bz2" + local -r TOOLCHAIN_URL="https://toolchains.bootlin.com/downloads/releases/toolchains/powerpc-e500mc/tarballs/powerpc-e500mc--glibc--stable-2024.05-1.tar.xz" local -r GCC_PREFIX="powerpc" local -r GCC_SUFFIX="" QEMU_ARGS+=( -cpu "e500mc" ) ;; "ppc64" | "ppc64-power8") - local -r TOOLCHAIN_URL="https://toolchains.bootlin.com/downloads/releases/toolchains/powerpc64-power8/tarballs/powerpc64-power8--glibc--stable-2024.02-1.tar.bz2" + local -r TOOLCHAIN_URL="https://toolchains.bootlin.com/downloads/releases/toolchains/powerpc64-power8/tarballs/powerpc64-power8--glibc--stable-2024.05-1.tar.xz" local -r GCC_PREFIX="powerpc64" local -r GCC_SUFFIX="" ;; "ppc64le" | "ppc64le-power8") - local -r TOOLCHAIN_URL="https://toolchains.bootlin.com/downloads/releases/toolchains/powerpc64le-power8/tarballs/powerpc64le-power8--glibc--stable-2024.02-1.tar.bz2" + local -r TOOLCHAIN_URL="https://toolchains.bootlin.com/downloads/releases/toolchains/powerpc64le-power8/tarballs/powerpc64le-power8--glibc--stable-2024.05-1.tar.xz" local -r GCC_PREFIX="powerpc64le" local -r GCC_SUFFIX="" ;; "riscv64") - local -r TOOLCHAIN_URL="https://toolchains.bootlin.com/downloads/releases/toolchains/riscv64-lp64d/tarballs/riscv64-lp64d--glibc--stable-2024.02-1.tar.bz2" + local -r TOOLCHAIN_URL="https://toolchains.bootlin.com/downloads/releases/toolchains/riscv64-lp64d/tarballs/riscv64-lp64d--glibc--stable-2024.05-1.tar.xz" local -r GCC_PREFIX="riscv64" local -r GCC_SUFFIX="" ;; @@ -150,7 +150,7 @@ function expand_bootlin_config() { local -r TOOLCHAIN_RELATIVE_DIR="${TARGET}" unpack "${TOOLCHAIN_URL}" "${TOOLCHAIN_RELATIVE_DIR}" - local -r EXTRACT_DIR="${ARCHIVE_DIR}/$(basename ${TOOLCHAIN_URL%.tar.bz2})" + local -r EXTRACT_DIR="${ARCHIVE_DIR}/$(basename ${TOOLCHAIN_URL%.tar.xz})" local -r TOOLCHAIN_DIR=${ARCHIVE_DIR}/${TOOLCHAIN_RELATIVE_DIR} if [[ -d "${EXTRACT_DIR}" ]]; then @@ -419,6 +419,7 @@ function main() { declare -a CMAKE_ADDITIONAL_ARGS=() declare -a QEMU_ARGS=() + # ref: https://go.dev/doc/install/source#environment case ${TARGET} in x86_64) declare -r QEMU_ARCH=x86_64 ;; From 514bd38e2702f28323017cabd071c47a20407399 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 6 Sep 2024 08:49:19 +0200 Subject: [PATCH 09/12] tools/docker: remove centos-7 --- tools/docker/Makefile | 6 +- tools/docker/images/centos-7.Dockerfile | 136 ------------------- tools/docker/test/centos-7/cpp.Dockerfile | 34 ----- tools/docker/test/centos-7/dotnet.Dockerfile | 26 ---- tools/docker/test/centos-7/java.Dockerfile | 23 ---- tools/docker/test/centos-7/python.Dockerfile | 21 --- 6 files changed, 2 insertions(+), 244 deletions(-) delete mode 100644 tools/docker/images/centos-7.Dockerfile delete mode 100644 tools/docker/test/centos-7/cpp.Dockerfile delete mode 100644 tools/docker/test/centos-7/dotnet.Dockerfile delete mode 100644 tools/docker/test/centos-7/java.Dockerfile delete mode 100644 tools/docker/test/centos-7/python.Dockerfile diff --git a/tools/docker/Makefile b/tools/docker/Makefile index 346f38e1552..374cd2167e0 100644 --- a/tools/docker/Makefile +++ b/tools/docker/Makefile @@ -92,7 +92,6 @@ help: @echo -e "\t\t${BOLD}almalinux-9${RESET} (latest)" @echo -e "\t\t${BOLD}alpine-edge${RESET} (latest)" @echo -e "\t\t${BOLD}archlinux${RESET} (latest)" - @echo -e "\t\t${BOLD}centos-7${RESET} (Centos 7 LTS)" @echo -e "\t\t${BOLD}debian-sid${RESET} (unstable)" @echo -e "\t\t${BOLD}debian-13${RESET} (Trixie)" @echo -e "\t\t${BOLD}debian-12${RESET} (Bookworm)" @@ -129,8 +128,8 @@ help: @echo -e "\te.g. 'make amd64_ubuntu-22.04_cpp_test'" @echo @echo -e "\tnote: Few custom merge targets (e.g. ${BOLD}export${RESET}) are also provided." - @echo -e "\te.g. 'make amd64_centos-7_export'" - @echo -e "\te.g. 'make arm64_centos-7_export'" + @echo -e "\te.g. 'make amd64_almalinux-9_export'" + @echo -e "\te.g. 'make arm64_almalinux-9_export'" @echo @echo -e "\t${BOLD}NOCACHE=1${RESET}: use 'docker build --no-cache' when building container (default use cache)." @echo @@ -407,7 +406,6 @@ DISTROS := \ almalinux-9 \ alpine-edge \ archlinux \ - centos-7 \ debian-11 debian-12 debian-13 debian-sid \ fedora-37 fedora-38 fedora-39 \ opensuse-leap \ diff --git a/tools/docker/images/centos-7.Dockerfile b/tools/docker/images/centos-7.Dockerfile deleted file mode 100644 index 0a98337c51c..00000000000 --- a/tools/docker/images/centos-7.Dockerfile +++ /dev/null @@ -1,136 +0,0 @@ -# ref: https://hub.docker.com/_/centos -FROM centos:7 AS env - -############# -## SETUP ## -############# -RUN yum -y update \ -&& yum -y groupinstall 'Development Tools' \ -&& yum -y install \ - wget curl \ - pcre2-devel openssl redhat-lsb-core \ - pkgconfig autoconf libtool zlib-devel which \ -&& yum clean all \ -&& rm -rf /var/cache/yum - -# Bump to gcc-11 -RUN yum -y update \ -&& yum -y install centos-release-scl \ -&& yum -y install devtoolset-11 \ -&& yum clean all \ -&& echo "source /opt/rh/devtoolset-11/enable" >> /etc/bashrc -SHELL ["/usr/bin/bash", "--login", "-c"] -ENTRYPOINT ["/usr/bin/bash", "--login", "-c"] -CMD ["/usr/bin/bash", "--login"] -# RUN g++ --version - -# Install CMake 3.28.3 -RUN ARCH=$(uname -m) \ -&& wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-${ARCH}.sh" \ -&& chmod a+x cmake-3.28.3-linux-${ARCH}.sh \ -&& ./cmake-3.28.3-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ -&& rm cmake-3.28.3-linux-${ARCH}.sh - -# Install SWIG 4.2.1 -RUN curl --location-trusted \ - --remote-name "https://downloads.sourceforge.net/project/swig/swig/swig-4.2.1/swig-4.2.1.tar.gz" \ - -o swig-4.2.1.tar.gz \ -&& tar xvf swig-4.2.1.tar.gz \ -&& rm swig-4.2.1.tar.gz \ -&& cd swig-4.2.1 \ -&& ./configure --prefix=/usr \ -&& make -j 4 \ -&& make install \ -&& cd .. \ -&& rm -rf swig-4.2.1 - -# Install .Net -# see https://docs.microsoft.com/en-us/dotnet/core/install/linux-centos#centos-7- -RUN rpm -Uvh https://packages.microsoft.com/config/centos/7/packages-microsoft-prod.rpm \ -&& yum -y update \ -&& yum -y install dotnet-sdk-3.1 dotnet-sdk-6.0 \ -&& yum clean all \ -&& rm -rf /var/cache/yum -# Trigger first run experience by running arbitrary cmd -RUN dotnet --info - -# Install Java 8 SDK -RUN yum -y update \ -&& yum -y install java-1.8.0-openjdk java-1.8.0-openjdk-devel maven \ -&& yum clean all \ -&& rm -rf /var/cache/yum -ENV JAVA_HOME=/usr/lib/jvm/java - -# Install Python -RUN yum -y update \ -&& yum -y install \ - rh-python38-python rh-python38-python-devel \ - rh-python38-python-pip rh-python38-python-numpy \ -&& yum clean all \ -&& rm -rf /var/cache/yum \ -&& echo "source /opt/rh/rh-python38/enable" >> /etc/bashrc -RUN python -m pip install absl-py mypy mypy-protobuf - -ENV TZ=America/Los_Angeles -RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone - -################ -## OR-TOOLS ## -################ -FROM env AS devel -WORKDIR /root -# Copy the snk key -COPY or-tools.snk /root/or-tools.snk -ENV DOTNET_SNK=/root/or-tools.snk - -ARG SRC_GIT_BRANCH -ENV SRC_GIT_BRANCH ${SRC_GIT_BRANCH:-main} -ARG SRC_GIT_SHA1 -ENV SRC_GIT_SHA1 ${SRC_GIT_SHA1:-unknown} - -ARG OR_TOOLS_PATCH -ENV OR_TOOLS_PATCH ${OR_TOOLS_PATCH:-9999} - -# Download sources -# use SRC_GIT_SHA1 to modify the command -# i.e. avoid docker reusing the cache when new commit is pushed -RUN git clone -b "${SRC_GIT_BRANCH}" --single-branch --depth=1 https://github.com/google/or-tools \ -&& [[ $(cd or-tools && git rev-parse --verify HEAD) == ${SRC_GIT_SHA1} ]] -WORKDIR /root/or-tools - -# C++ -## build -FROM devel AS cpp_build -RUN make detect_cpp \ -&& make cpp JOBS=8 -## archive -FROM cpp_build AS cpp_archive -RUN make archive_cpp - -# .Net -## build -FROM cpp_build AS dotnet_build -ENV USE_DOTNET_CORE_31=ON -RUN make detect_dotnet \ -&& make dotnet JOBS=8 -## archive -FROM dotnet_build AS dotnet_archive -RUN make archive_dotnet - -# Java -## build -FROM cpp_build AS java_build -RUN make detect_java \ -&& make java JOBS=8 -## archive -FROM java_build AS java_archive -RUN make archive_java - -# Python -## build -FROM cpp_build AS python_build -RUN make detect_python \ -&& make python JOBS=8 -## archive -FROM python_build AS python_archive -RUN make archive_python diff --git a/tools/docker/test/centos-7/cpp.Dockerfile b/tools/docker/test/centos-7/cpp.Dockerfile deleted file mode 100644 index 9e8e0903503..00000000000 --- a/tools/docker/test/centos-7/cpp.Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -# ref: https://hub.docker.com/_/centos -FROM centos:7 - -############# -## SETUP ## -############# -RUN yum -y update \ -&& yum -y groupinstall 'Development Tools' \ -&& yum -y install zlib-devel \ -&& yum clean all \ -&& rm -rf /var/cache/yum - -# Bump to gcc-11 -RUN yum -y update \ -&& yum -y install centos-release-scl \ -&& yum -y install devtoolset-11 \ -&& yum clean all \ -&& echo "source /opt/rh/devtoolset-11/enable" >> /etc/bashrc -SHELL ["/bin/bash", "--login", "-c"] -ENTRYPOINT ["/usr/bin/bash", "--login", "-c"] -CMD ["/usr/bin/bash", "--login"] -# RUN g++ --version - -# Install CMake 3.28.3 -RUN ARCH=$(uname -m) \ -&& wget -q "https://cmake.org/files/v3.28/cmake-3.28.3-linux-${ARCH}.sh" \ -&& chmod a+x cmake-3.28.3-linux-${ARCH}.sh \ -&& ./cmake-3.28.3-linux-${ARCH}.sh --prefix=/usr/local/ --skip-license \ -&& rm cmake-3.28.3-linux-${ARCH}.sh - -WORKDIR /root -ADD or-tools_amd64_centos-7_cpp_v*.tar.gz . - -RUN cd or-tools_*_v* && make test diff --git a/tools/docker/test/centos-7/dotnet.Dockerfile b/tools/docker/test/centos-7/dotnet.Dockerfile deleted file mode 100644 index 649644e6cdd..00000000000 --- a/tools/docker/test/centos-7/dotnet.Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -# ref: https://hub.docker.com/_/centos -FROM centos:7 - -############# -## SETUP ## -############# -RUN yum -y update \ -&& yum -y groupinstall 'Development Tools' \ -&& yum -y install zlib-devel \ -&& yum clean all \ -&& rm -rf /var/cache/yum - -# Install .Net -# see https://docs.microsoft.com/en-us/dotnet/core/install/linux-centos#centos-7- -RUN rpm -Uvh https://packages.microsoft.com/config/centos/7/packages-microsoft-prod.rpm \ -&& yum -y update \ -&& yum -y install dotnet-sdk-3.1 dotnet-sdk-6.0 \ -&& yum clean all \ -&& rm -rf /var/cache/yum -# Trigger first run experience by running arbitrary cmd -RUN dotnet --info - -WORKDIR /root -ADD or-tools_amd64_centos-7_dotnet_v*.tar.gz . - -RUN cd or-tools_*_v* && make test diff --git a/tools/docker/test/centos-7/java.Dockerfile b/tools/docker/test/centos-7/java.Dockerfile deleted file mode 100644 index 70652890a86..00000000000 --- a/tools/docker/test/centos-7/java.Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -# ref: https://hub.docker.com/_/centos -FROM centos:7 - -############# -## SETUP ## -############# -RUN yum -y update \ -&& yum -y groupinstall 'Development Tools' \ -&& yum -y install zlib-devel \ -&& yum clean all \ -&& rm -rf /var/cache/yum - -# Install Java 8 SDK -RUN yum -y update \ -&& yum -y install java-1.8.0-openjdk java-1.8.0-openjdk-devel maven \ -&& yum clean all \ -&& rm -rf /var/cache/yum -ENV JAVA_HOME=/usr/lib/jvm/java - -WORKDIR /root -ADD or-tools_amd64_centos-7_java_v*.tar.gz . - -RUN cd or-tools_*_v* && make test diff --git a/tools/docker/test/centos-7/python.Dockerfile b/tools/docker/test/centos-7/python.Dockerfile deleted file mode 100644 index 9f2b6cd656a..00000000000 --- a/tools/docker/test/centos-7/python.Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -# ref: https://hub.docker.com/_/centos -FROM centos:7 - -############# -## SETUP ## -############# -RUN yum -y update \ -&& yum -y groupinstall 'Development Tools' \ -&& yum clean all \ -&& rm -rf /var/cache/yum - -# Install Python -RUN yum -y update \ -&& yum -y install python3 python3-devel python3-pip numpy \ -&& yum clean all \ -&& rm -rf /var/cache/yum - -WORKDIR /root -ADD or-tools_amd64_centos-7_python_v*.tar.gz . - -RUN cd or-tools_*_v* && make test From a61566af7921a9076bb43ba8e8f3a2f585c4d8e3 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 6 Sep 2024 08:50:40 +0200 Subject: [PATCH 10/12] tools/docker: Clean Makefile --- tools/docker/Makefile | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/tools/docker/Makefile b/tools/docker/Makefile index 374cd2167e0..2825c82952c 100644 --- a/tools/docker/Makefile +++ b/tools/docker/Makefile @@ -93,7 +93,7 @@ help: @echo -e "\t\t${BOLD}alpine-edge${RESET} (latest)" @echo -e "\t\t${BOLD}archlinux${RESET} (latest)" @echo -e "\t\t${BOLD}debian-sid${RESET} (unstable)" - @echo -e "\t\t${BOLD}debian-13${RESET} (Trixie)" +# @echo -e "\t\t${BOLD}debian-13${RESET} (Trixie)" @echo -e "\t\t${BOLD}debian-12${RESET} (Bookworm)" @echo -e "\t\t${BOLD}debian-11${RESET} (Bullseye)" # @echo -e "\t\t${BOLD}debian-10${RESET} (buster)" @@ -132,6 +132,7 @@ help: @echo -e "\te.g. 'make arm64_almalinux-9_export'" @echo @echo -e "\t${BOLD}NOCACHE=1${RESET}: use 'docker build --no-cache' when building container (default use cache)." + @echo -e "\t${BOLD}VERBOSE=1${RESET}: use 'docker build --progress=plain' when building container." @echo @echo -e "${BOLD}NOTES${RESET}" @echo -e "\tAll generated code will be located in the export/ folder, use target ${BOLD}distclean${RESET} to remove it." @@ -161,14 +162,17 @@ $(info version: ${OR_TOOLS_VERSION}) # Docker image name prefix. IMAGE := or-tools/docker -ifdef NOCACHE -DOCKER_BUILD_CMD := docker build --no-cache -DOCKER_BUILDX_CMD := docker buildx build --no-cache -else DOCKER_BUILD_CMD := docker build DOCKER_BUILDX_CMD := docker buildx build +ifdef NOCACHE +DOCKER_BUILD_CMD := ${DOCKER_BUILD_CMD} --no-cache +DOCKER_BUILDX_CMD := ${DOCKER_BUILDX_CMD} --no-cache +endif +ifdef VERBOSE +DOCKER_BUILD_CMD := ${DOCKER_BUILD_CMD} --progress=plain +DOCKER_BUILDX_CMD := ${DOCKER_BUILDX_CMD} --progress=plain endif -DOCKER_RUN_CMD := docker run --rm --init +DOCKER_RUN_CMD := docker run --rm --init --net=host ################# ### DELIVERY ## @@ -406,7 +410,7 @@ DISTROS := \ almalinux-9 \ alpine-edge \ archlinux \ - debian-11 debian-12 debian-13 debian-sid \ + debian-11 debian-12 debian-sid \ fedora-37 fedora-38 fedora-39 \ opensuse-leap \ rockylinux-9 \ @@ -440,7 +444,7 @@ cache/$1/$2/docker_$3.tar: $1_$2_$3 .PHONY: sh_$1_$2_$3 sh_$1_$2_$3: $1_$2_$3 | export ${DOCKER_RUN_CMD} \ - -v `pwd`/export:/export \ + -v $$$$(pwd)/export:/export \ -it \ --name ortools_$$< \ ${IMAGE}:$$< @@ -495,7 +499,7 @@ cache/$1/$2/docker_$3_$4.tar: $1_$2_$3_$4 .PHONY: sh_$1_$2_$3_$4 sh_$1_$2_$3_$4: $1_$2_$3_$4 | export ${DOCKER_RUN_CMD} \ - -v `pwd`/export:/export \ + -v $$$$(pwd)/export:/export \ -it \ --name ortools_$$< \ ${IMAGE}:$$< @@ -535,9 +539,9 @@ export/archives/or-tools_$1_$2_$3_v${OR_TOOLS_VERSION}.tar.gz: $1_$2_$3_archive -mkdir -p export/$1/$2 ${DOCKER_RUN_CMD} \ -w /root/or-tools \ - -v `pwd`/export:/export \ + -v $$$$(pwd)/export:/export \ ${IMAGE}:$$< \ - "cp or-tools_*_v*.tar.gz /export/$1/$2" + "cp or-tools_*_v*.tar.gz /export/$1/$2/" cp export/$1/$2/or-tools_*_$3*_v*.tar.gz $$@ .PHONY: clean_$1_$2_$3_export From aa500e930a712bd471435b954f9a48193bdf3372 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 6 Sep 2024 10:35:40 +0200 Subject: [PATCH 11/12] tools/docker: clean almalinux and rockylinux --- tools/docker/images/almalinux-9.Dockerfile | 4 +++- tools/docker/images/rockylinux-9.Dockerfile | 4 +++- .../docker/test/almalinux-9/python.Dockerfile | 19 +++++++++++-------- .../test/rockylinux-9/python.Dockerfile | 19 +++++++++++-------- 4 files changed, 28 insertions(+), 18 deletions(-) diff --git a/tools/docker/images/almalinux-9.Dockerfile b/tools/docker/images/almalinux-9.Dockerfile index 392070e256e..0c624795e44 100644 --- a/tools/docker/images/almalinux-9.Dockerfile +++ b/tools/docker/images/almalinux-9.Dockerfile @@ -4,12 +4,12 @@ FROM almalinux:9 AS env ############# ## SETUP ## ############# -#ENV PATH=/root/.local/bin:$PATH RUN dnf -y update \ && dnf -y install git wget openssl-devel cmake \ && dnf -y groupinstall "Development Tools" \ && dnf clean all \ && rm -rf /var/cache/dnf +ENTRYPOINT ["/usr/bin/bash", "-c"] CMD [ "/usr/bin/bash" ] # Install SWIG 4.2.1 @@ -58,6 +58,8 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone ## OR-TOOLS ## ################ FROM env AS devel +ENV DISTRIBUTION=almalinux-9 + WORKDIR /root # Copy the snk key COPY or-tools.snk /root/or-tools.snk diff --git a/tools/docker/images/rockylinux-9.Dockerfile b/tools/docker/images/rockylinux-9.Dockerfile index ecb9f40b7d3..b1d7ddd94fa 100644 --- a/tools/docker/images/rockylinux-9.Dockerfile +++ b/tools/docker/images/rockylinux-9.Dockerfile @@ -4,12 +4,12 @@ FROM rockylinux:9 AS env ############# ## SETUP ## ############# -#ENV PATH=/root/.local/bin:$PATH RUN dnf -y update \ && dnf -y install git wget openssl-devel cmake \ && dnf -y groupinstall "Development Tools" \ && dnf clean all \ && rm -rf /var/cache/dnf +ENTRYPOINT ["/usr/bin/bash", "-c"] CMD [ "/usr/bin/bash" ] # Install SWIG 4.2.1 @@ -58,6 +58,8 @@ RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone ## OR-TOOLS ## ################ FROM env AS devel +ENV DISTRIBUTION=rockylinux-9 + WORKDIR /root # Copy the snk key COPY or-tools.snk /root/or-tools.snk diff --git a/tools/docker/test/almalinux-9/python.Dockerfile b/tools/docker/test/almalinux-9/python.Dockerfile index 6ff155b8261..06dc71ee476 100644 --- a/tools/docker/test/almalinux-9/python.Dockerfile +++ b/tools/docker/test/almalinux-9/python.Dockerfile @@ -4,16 +4,19 @@ FROM almalinux:9 ############# ## SETUP ## ############# -RUN yum -y update \ -&& yum -y groupinstall 'Development Tools' \ -&& yum clean all \ -&& rm -rf /var/cache/yum +RUN dnf -y update \ +&& dnf -y groupinstall 'Development Tools' \ +&& dnf clean all \ +&& rm -rf /var/cache/dnf +CMD ["/usr/bin/bash"] # Install Python -RUN yum -y update \ -&& yum -y install python3 python3-devel python3-pip python3-numpy \ -&& yum clean all \ -&& rm -rf /var/cache/yum +RUN dnf -y update \ +&& dnf -y install python3 python3-devel python3-pip python3-numpy \ +&& dnf clean all \ +&& rm -rf /var/cache/dnf +RUN python3 -m pip install \ + absl-py mypy mypy-protobuf pandas WORKDIR /root ADD or-tools_amd64_almalinux-9_python_v*.tar.gz . diff --git a/tools/docker/test/rockylinux-9/python.Dockerfile b/tools/docker/test/rockylinux-9/python.Dockerfile index 154f30f20a8..18b6be8d63a 100644 --- a/tools/docker/test/rockylinux-9/python.Dockerfile +++ b/tools/docker/test/rockylinux-9/python.Dockerfile @@ -4,16 +4,19 @@ FROM rockylinux:9 ############# ## SETUP ## ############# -RUN yum -y update \ -&& yum -y groupinstall 'Development Tools' \ -&& yum clean all \ -&& rm -rf /var/cache/yum +RUN dnf -y update \ +&& dnf -y groupinstall 'Development Tools' \ +&& dnf clean all \ +&& rm -rf /var/cache/dnf +CMD ["/usr/bin/bash"] # Install Python -RUN yum -y update \ -&& yum -y install python3 python3-devel python3-pip python3-numpy \ -&& yum clean all \ -&& rm -rf /var/cache/yum +RUN dnf -y update \ +&& dnf -y install python3 python3-devel python3-pip python3-numpy \ +&& dnf clean all \ +&& rm -rf /var/cache/dnf +RUN python3 -m pip install \ + absl-py mypy mypy-protobuf pandas WORKDIR /root ADD or-tools_amd64_rockylinux-9_python_v*.tar.gz . From f204c4f676e0b94560e904fa3d92242724e824e4 Mon Sep 17 00:00:00 2001 From: Corentin Le Molgat Date: Fri, 6 Sep 2024 10:36:10 +0200 Subject: [PATCH 12/12] tools/docker: fix version patch computation --- tools/docker/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/docker/Makefile b/tools/docker/Makefile index 2825c82952c..220912fa68a 100644 --- a/tools/docker/Makefile +++ b/tools/docker/Makefile @@ -150,7 +150,7 @@ OR_TOOLS_BRANCH := $(shell git rev-parse --abbrev-ref HEAD) OR_TOOLS_SHA1 := $(shell git rev-parse --verify HEAD) # OR_TOOLS_MAJOR & OR_TOOLS_MINOR include ../../Version.txt -OR_TOOLS_PATCH := $(shell git rev-list --count v${OR_TOOLS_MAJOR}.0..HEAD) +OR_TOOLS_PATCH := $(shell git rev-list --count v${OR_TOOLS_MAJOR}.0..HEAD || echo 0) OR_TOOLS_VERSION := ${OR_TOOLS_MAJOR}.${OR_TOOLS_MINOR}.${OR_TOOLS_PATCH} ifdef PRE_RELEASE OR_TOOLS_VERSION := ${OR_TOOLS_VERSION}-rc