From 701cc87e1ecb08941763a67f625f2978a41cacef Mon Sep 17 00:00:00 2001 From: Laurent Perron Date: Fri, 13 Sep 2024 13:29:25 -0700 Subject: [PATCH] backport from main: sat, graph, lp_data, glop --- WORKSPACE | 62 +- go.mod | 8 + go.sum | 6 + ortools/base/temp_path.cc | 62 +- ortools/glop/markowitz.cc | 136 +-- ortools/glop/markowitz.h | 30 +- ortools/glop/revised_simplex.h | 4 + ortools/graph/christofides_test.cc | 2 +- ortools/graph/hamiltonian_path_test.cc | 3 +- ortools/lp_data/sparse.cc | 16 +- ortools/lp_data/sparse.h | 2 +- ortools/sat/2d_orthogonal_packing_testing.cc | 76 +- ortools/sat/2d_orthogonal_packing_testing.h | 8 + ortools/sat/2d_packing_brute_force.cc | 9 +- ortools/sat/2d_rectangle_presolve.cc | 492 ++++++++- ortools/sat/2d_rectangle_presolve.h | 136 +++ ortools/sat/BUILD.bazel | 33 +- ortools/sat/CMakeLists.txt | 14 + ortools/sat/cp_model_lns.cc | 360 +++++-- ortools/sat/cp_model_lns.h | 113 +-- ortools/sat/cp_model_presolve.cc | 52 +- ortools/sat/cp_model_presolve.h | 3 + ortools/sat/cp_model_solver.cc | 25 +- ortools/sat/cp_model_solver_helpers.cc | 33 +- ortools/sat/diffn_util.cc | 5 +- ortools/sat/diffn_util.h | 3 +- ortools/sat/go/cpmodel/BUILD.bazel | 55 + ortools/sat/go/{ => cpmodel}/cp_model.go | 450 +++++---- ortools/sat/go/{ => cpmodel}/cp_model_test.go | 949 ++++++++++-------- ortools/sat/go/{ => cpmodel}/cp_solver.go | 10 +- ortools/sat/go/{ => cpmodel}/cp_solver_c.cc | 14 +- ortools/sat/go/{ => cpmodel}/cp_solver_c.h | 0 .../sat/go/{ => cpmodel}/cp_solver_test.go | 34 +- ortools/sat/go/{ => cpmodel}/domain.go | 16 +- ortools/sat/go/{ => cpmodel}/domain_test.go | 12 +- ortools/sat/integer_search.cc | 14 +- ortools/sat/linear_programming_constraint.cc | 23 +- ortools/sat/presolve_context.cc | 68 +- ortools/sat/presolve_context.h | 10 +- ortools/sat/python/cp_model.py | 77 +- ortools/sat/python/cp_model_helper.py | 22 - ortools/sat/python/cp_model_helper_test.py | 10 - ortools/sat/stat_tables.cc | 19 +- ortools/sat/stat_tables.h | 9 +- ortools/sat/symmetry_util_test.cc | 134 +++ ortools/sat/util.cc | 2 +- ortools/sat/util.h | 67 +- ortools/util/BUILD.bazel | 1 + ortools/util/bitset.h | 4 + ortools/util/saturated_arithmetic.h | 5 + ortools/util/sorted_interval_list.cc | 2 +- 51 files changed, 2483 insertions(+), 1217 deletions(-) create mode 100644 go.mod create mode 100644 go.sum create mode 100644 ortools/sat/go/cpmodel/BUILD.bazel rename ortools/sat/go/{ => cpmodel}/cp_model.go (76%) rename ortools/sat/go/{ => cpmodel}/cp_model_test.go (70%) rename ortools/sat/go/{ => cpmodel}/cp_solver.go (95%) rename ortools/sat/go/{ => cpmodel}/cp_solver_c.cc (89%) rename ortools/sat/go/{ => cpmodel}/cp_solver_c.h (100%) rename ortools/sat/go/{ => cpmodel}/cp_solver_test.go (94%) rename ortools/sat/go/{ => cpmodel}/domain.go (94%) rename ortools/sat/go/{ => cpmodel}/domain_test.go (98%) create mode 100644 ortools/sat/symmetry_util_test.cc diff --git a/WORKSPACE b/WORKSPACE index a399d160b52..8d5faf7c504 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -26,6 +26,16 @@ git_repository( load("@bazel_skylib//:workspace.bzl", "bazel_skylib_workspace") bazel_skylib_workspace() +http_archive( + name = "bazel_features", + sha256 = "cec7fbc7bce6597cf2e83e01ddd9328a1bb057dc1a3092745238f49d3301ab5a", + strip_prefix = "bazel_features-1.12.0", + url = "https://github.com/bazel-contrib/bazel_features/releases/download/v1.12.0/bazel_features-v1.12.0.tar.gz", +) + +load("@bazel_features//:deps.bzl", "bazel_features_deps") +bazel_features_deps() + ## Bazel rules. git_repository( name = "platforms", @@ -41,7 +51,7 @@ git_repository( git_repository( name = "rules_proto", - tag = "5.3.0-21.7", + tag = "6.0.0", remote = "https://github.com/bazelbuild/rules_proto.git", ) @@ -110,6 +120,12 @@ git_repository( load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps") protobuf_deps() +load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies") +rules_proto_dependencies() + +load("@rules_proto//proto:toolchains.bzl", "rules_proto_toolchains") +rules_proto_toolchains() + ## Solvers http_archive( name = "glpk", @@ -147,7 +163,7 @@ cc_library( name = 'eigen3', srcs = [], includes = ['.'], - hdrs = glob(['Eigen/**']), + hdrs = glob(['Eigen/**', 'unsupported/**']), defines = ["EIGEN_MPL2_ONLY",], visibility = ['//visibility:public'], ) @@ -318,3 +334,45 @@ git_repository( tag = "v1.8.5", remote = "https://github.com/google/benchmark.git", ) + +# Go + +http_archive( + name = "io_bazel_rules_go", + sha256 = "33acc4ae0f70502db4b893c9fc1dd7a9bf998c23e7ff2c4517741d4049a976f8", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.48.0/rules_go-v0.48.0.zip", + "https://github.com/bazelbuild/rules_go/releases/download/v0.48.0/rules_go-v0.48.0.zip", + ], +) + +http_archive( + name = "bazel_gazelle", + sha256 = "d76bf7a60fd8b050444090dfa2837a4eaf9829e1165618ee35dceca5cbdf58d5", + urls = [ + "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.37.0/bazel-gazelle-v0.37.0.tar.gz", + "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.37.0/bazel-gazelle-v0.37.0.tar.gz", + ], +) + +load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies") +load("//:deps.bzl", "go_dependencies") +# gazelle:repository_macro deps.bzl%go_dependencies +go_dependencies() + +load("@io_bazel_rules_go//go:deps.bzl", "go_download_sdk", "go_register_toolchains", "go_rules_dependencies") + +go_rules_dependencies() + +go_download_sdk( + name = "go_sdk_linux", + version = "1.22.4", +) + +go_register_toolchains() + +gazelle_dependencies( + go_env = { + "GOPROXY": "https://proxy.golang.org|direct", + }, +) diff --git a/go.mod b/go.mod new file mode 100644 index 00000000000..0e1efc1b277 --- /dev/null +++ b/go.mod @@ -0,0 +1,8 @@ +module github.com/google/or-tools + +go 1.22.2 + +require ( + github.com/golang/glog v1.2.2 + google.golang.org/protobuf v1.34.2 +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000000..aabe67fb1d3 --- /dev/null +++ b/go.sum @@ -0,0 +1,6 @@ +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= diff --git a/ortools/base/temp_path.cc b/ortools/base/temp_path.cc index 1e34b6ec7f1..bbc2c6552d7 100644 --- a/ortools/base/temp_path.cc +++ b/ortools/base/temp_path.cc @@ -39,44 +39,44 @@ TempPath::TempPath(absl::string_view prefix) : path_(file::TempFile(prefix)) { } TempPath::TempPath(absl::string_view prefix, absl::Status* status) - : path_(file::TempFile(prefix)) { + : path_(file::TempFile(prefix)) { *status = Init(kDefaultMode); } - TempPath::TempPath(TempPath && rhs) : path_(std::move(rhs.path_)) {} +TempPath::TempPath(TempPath&& rhs) : path_(std::move(rhs.path_)) {} - TempPath& TempPath::operator=(TempPath&& rhs) { - TempPath tmp(std::move(*this)); - path_ = std::move(rhs.path_); - return *this; - } +TempPath& TempPath::operator=(TempPath&& rhs) { + TempPath tmp(std::move(*this)); + path_ = std::move(rhs.path_); + return *this; +} - TempPath::~TempPath() {} +TempPath::~TempPath() {} - TempPath* TempPath::Create(Location location) { - std::string dirname; - switch (location) { - case Local: +TempPath* TempPath::Create(Location location) { + std::string dirname; + switch (location) { + case Local: dirname = file::TempFile(""); - } - if (dirname.empty()) { - return nullptr; - } - absl::Status status; - TempPath* temp_path = new TempPath(dirname, &status); - if (!status.ok()) { - delete temp_path; - return nullptr; - } - return temp_path; } - - TempPath::TempPath(const std::string& dirname, file::Options options, - absl::Status* status) - : path_(dirname) { - *status = Init(options); + if (dirname.empty()) { + return nullptr; } - - absl::Status TempPath::Init(file::Options options) { - return file::RecursivelyCreateDir(path(), options); + absl::Status status; + TempPath* temp_path = new TempPath(dirname, &status); + if (!status.ok()) { + delete temp_path; + return nullptr; } + return temp_path; +} + +TempPath::TempPath(const std::string& dirname, file::Options options, + absl::Status* status) + : path_(dirname) { + *status = Init(options); +} + +absl::Status TempPath::Init(file::Options options) { + return file::RecursivelyCreateDir(path(), options); +} diff --git a/ortools/glop/markowitz.cc b/ortools/glop/markowitz.cc index 1de35de2a0b..36f5716e6c4 100644 --- a/ortools/glop/markowitz.cc +++ b/ortools/glop/markowitz.cc @@ -24,6 +24,7 @@ #include "ortools/lp_data/lp_types.h" #include "ortools/lp_data/lp_utils.h" #include "ortools/lp_data/sparse.h" +#include "ortools/lp_data/sparse_column.h" namespace operations_research { namespace glop { @@ -175,51 +176,41 @@ void Markowitz::Clear() { permuted_lower_.Clear(); permuted_upper_.Clear(); residual_matrix_non_zero_.Clear(); - col_by_degree_.Clear(); examined_col_.clear(); num_fp_operations_ = 0; is_col_by_degree_initialized_ = false; } -namespace { -struct MatrixEntry { - RowIndex row; - ColIndex col; - Fractional coefficient; - MatrixEntry(RowIndex r, ColIndex c, Fractional coeff) - : row(r), col(c), coefficient(coeff) {} - bool operator<(const MatrixEntry& o) const { - return (row == o.row) ? col < o.col : row < o.row; - } -}; - -} // namespace - void Markowitz::ExtractSingletonColumns( const CompactSparseMatrixView& basis_matrix, RowPermutation* row_perm, ColumnPermutation* col_perm, int* index) { SCOPED_TIME_STAT(&stats_); - std::vector singleton_entries; + tmp_singleton_entries_.clear(); const ColIndex num_cols = basis_matrix.num_cols(); for (ColIndex col(0); col < num_cols; ++col) { - const ColumnView& column = basis_matrix.column(col); + const ColumnView column = basis_matrix.column(col); if (column.num_entries().value() == 1) { - singleton_entries.push_back( - MatrixEntry(column.GetFirstRow(), col, column.GetFirstCoefficient())); + const RowIndex row = column.GetFirstRow(); + + // We temporary mark row perm (it will be filled below). + // If there is a tie, we will choose the lower column. + if ((*row_perm)[row] != kInvalidRow) continue; + (*row_perm)[row] = 0; + + tmp_singleton_entries_.push_back( + MatrixEntry(row, col, column.GetFirstCoefficient())); } } // Sorting the entries by row indices allows the row_permutation to be closer // to identity which seems like a good idea. - std::sort(singleton_entries.begin(), singleton_entries.end()); - for (const MatrixEntry e : singleton_entries) { - if ((*row_perm)[e.row] == kInvalidRow) { - (*col_perm)[e.col] = ColIndex(*index); - (*row_perm)[e.row] = RowIndex(*index); - lower_.AddDiagonalOnlyColumn(1.0); - upper_.AddDiagonalOnlyColumn(e.coefficient); - ++(*index); - } + std::sort(tmp_singleton_entries_.begin(), tmp_singleton_entries_.end()); + for (const MatrixEntry e : tmp_singleton_entries_) { + (*col_perm)[e.col] = ColIndex(*index); + (*row_perm)[e.row] = RowIndex(*index); + lower_.AddDiagonalOnlyColumn(1.0); + upper_.AddDiagonalOnlyColumn(e.coefficient); + ++(*index); } stats_.basis_singleton_column_ratio.Add(static_cast(*index) / basis_matrix.num_rows().value()); @@ -246,7 +237,7 @@ void Markowitz::ExtractResidualSingletonColumns( RowIndex row = kInvalidRow; for (ColIndex col(0); col < num_cols; ++col) { if ((*col_perm)[col] != kInvalidCol) continue; - const ColumnView& column = basis_matrix.column(col); + const ColumnView column = basis_matrix.column(col); if (!IsResidualSingletonColumn(column, *row_perm, &row)) continue; (*col_perm)[col] = ColIndex(*index); (*row_perm)[row] = RowIndex(*index); @@ -810,42 +801,65 @@ void MatrixNonZeroPattern::MergeIntoSorted(RowIndex pivot_row, RowIndex row) { MergeSortedVectors(col_scratchpad_, &row_non_zero_[row]); } -void ColumnPriorityQueue::Clear() { - col_degree_.clear(); - col_index_.clear(); - col_by_degree_.clear(); -} - void ColumnPriorityQueue::Reset(int max_degree, ColIndex num_cols) { - Clear(); - col_degree_.assign(num_cols, 0); - col_index_.assign(num_cols, -1); - col_by_degree_.resize(max_degree + 1); + degree_.assign(num_cols, 0); + col_by_degree_.assign(max_degree + 1, kInvalidCol); min_degree_ = max_degree + 1; + + // These are not used as long as the degree is zero. + prev_.resize(num_cols, kInvalidCol); + next_.resize(num_cols, kInvalidCol); +} + +void ColumnPriorityQueue::Remove(ColIndex col, int32_t old_degree) { + DCHECK_NE(old_degree, 0); + + const ColIndex old_next = next_[col]; + const ColIndex old_prev = prev_[col]; + + // Remove. + if (old_next != -1) prev_[old_next] = old_prev; + if (old_prev == -1) { + DCHECK_EQ(col_by_degree_[old_degree], col); + col_by_degree_[old_degree] = old_next; + } else { + next_[old_prev] = old_next; + } + + // Mark as removed. + degree_[col] = 0; +} + +void ColumnPriorityQueue::Insert(ColIndex col, int32_t degree) { + DCHECK_EQ(degree_[col], 0); + DCHECK_NE(degree, 0); + + const ColIndex new_next = col_by_degree_[degree]; + next_[col] = new_next; + if (new_next != -1) { + prev_[new_next] = col; + } + + col_by_degree_[degree] = col; + prev_[col] = kInvalidCol; + degree_[col] = degree; + + min_degree_ = std::min(min_degree_, degree); } void ColumnPriorityQueue::PushOrAdjust(ColIndex col, int32_t degree) { DCHECK_GE(degree, 0); DCHECK_LT(degree, col_by_degree_.size()); DCHECK_GE(col, 0); - DCHECK_LT(col, col_degree_.size()); + DCHECK_LT(col, degree_.size()); - const int32_t old_degree = col_degree_[col]; + const int32_t old_degree = degree_[col]; if (degree != old_degree) { - const int32_t old_index = col_index_[col]; - if (old_index != -1) { - col_by_degree_[old_degree][old_index] = col_by_degree_[old_degree].back(); - col_index_[col_by_degree_[old_degree].back()] = old_index; - col_by_degree_[old_degree].pop_back(); + if (old_degree != 0) { + Remove(col, old_degree); } - if (degree > 0) { - col_index_[col] = col_by_degree_[degree].size(); - col_degree_[col] = degree; - col_by_degree_[degree].push_back(col); - min_degree_ = std::min(min_degree_, degree); - } else { - col_index_[col] = -1; - col_degree_[col] = 0; + if (degree != 0) { + Insert(col, degree); } } } @@ -853,16 +867,16 @@ void ColumnPriorityQueue::PushOrAdjust(ColIndex col, int32_t degree) { ColIndex ColumnPriorityQueue::Pop() { DCHECK_GE(min_degree_, 0); DCHECK_LE(min_degree_, col_by_degree_.size()); + ColIndex result = kInvalidCol; + const int limit = col_by_degree_.size(); while (true) { - if (min_degree_ == col_by_degree_.size()) return kInvalidCol; - if (!col_by_degree_[min_degree_].empty()) break; + if (min_degree_ == limit) return kInvalidCol; + result = col_by_degree_[min_degree_]; + if (result != kInvalidCol) break; min_degree_++; } - const ColIndex col = col_by_degree_[min_degree_].back(); - col_by_degree_[min_degree_].pop_back(); - col_index_[col] = -1; - col_degree_[col] = 0; - return col; + Remove(result, min_degree_); + return result; } void SparseMatrixWithReusableColumnMemory::Reset(ColIndex num_cols) { diff --git a/ortools/glop/markowitz.h b/ortools/glop/markowitz.h index c4acfca66d0..198b6ac63ba 100644 --- a/ortools/glop/markowitz.h +++ b/ortools/glop/markowitz.h @@ -215,9 +215,6 @@ class ColumnPriorityQueue { ColumnPriorityQueue(const ColumnPriorityQueue&) = delete; ColumnPriorityQueue& operator=(const ColumnPriorityQueue&) = delete; - // Releases the memory used by this class. - void Clear(); - // Clears the queue and prepares it to store up to num_cols column indices // with a degree from 1 to max_degree included. void Reset(int32_t max_degree, ColIndex num_cols); @@ -232,10 +229,19 @@ class ColumnPriorityQueue { ColIndex Pop(); private: - StrictITIVector col_index_; - StrictITIVector col_degree_; - std::vector> col_by_degree_; + void Remove(ColIndex col, int32_t old_degree); + void Insert(ColIndex col, int32_t degree); + + // A degree of zero means not present. int32_t min_degree_; + StrictITIVector degree_; + + // Pointer in the form of the prev/next column, kInvalidCol means "nil". + // We use double linked list for each degree, with col_by_degree_ pointing to + // the first element. + StrictITIVector prev_; + StrictITIVector next_; + std::vector col_by_degree_; }; // Contains a set of columns indexed by ColIndex. This is like a SparseMatrix @@ -413,6 +419,18 @@ class Markowitz { // Remove...() functions above. void UpdateResidualMatrix(RowIndex pivot_row, ColIndex pivot_col); + // Temporary memory. + struct MatrixEntry { + RowIndex row; + ColIndex col; + Fractional coefficient; + MatrixEntry() = default; + MatrixEntry(RowIndex r, ColIndex c, Fractional coeff) + : row(r), col(c), coefficient(coeff) {} + bool operator<(const MatrixEntry& o) const { return row < o.row; } + }; + std::vector tmp_singleton_entries_; + // Pointer to the matrix to factorize. CompactSparseMatrixView const* basis_matrix_; diff --git a/ortools/glop/revised_simplex.h b/ortools/glop/revised_simplex.h index a1e05f82baa..3f8d9c8b4ea 100644 --- a/ortools/glop/revised_simplex.h +++ b/ortools/glop/revised_simplex.h @@ -195,6 +195,10 @@ class RevisedSimplex { double DeterministicTime() const; bool objective_limit_reached() const { return objective_limit_reached_; } + const DenseBitRow& GetNotBasicBitRow() const { + return variables_info_.GetNotBasicBitRow(); + } + // If the problem status is PRIMAL_UNBOUNDED (respectively DUAL_UNBOUNDED), // then the solver has a corresponding primal (respectively dual) ray to show // the unboundness. From a primal (respectively dual) feasible solution any diff --git a/ortools/graph/christofides_test.cc b/ortools/graph/christofides_test.cc index b08699ba3f2..c092b06722f 100644 --- a/ortools/graph/christofides_test.cc +++ b/ortools/graph/christofides_test.cc @@ -50,7 +50,7 @@ void ComputeAndShow(const std::string& name, } void TestChristofides(const std::string& name, const int size, - const std::vector& cost_data, + absl::Span cost_data, bool use_minimal_matching, bool use_mip, const int expected_cost, absl::string_view expected_solution) { diff --git a/ortools/graph/hamiltonian_path_test.cc b/ortools/graph/hamiltonian_path_test.cc index ffaa436c9fc..c42a438ddfd 100644 --- a/ortools/graph/hamiltonian_path_test.cc +++ b/ortools/graph/hamiltonian_path_test.cc @@ -339,8 +339,7 @@ void InitEuclideanCosts(int size, std::vector x, std::vector y, } } -bool ComparePaths(const std::vector& path1, - const std::vector& path2) { +bool ComparePaths(absl::Span path1, absl::Span path2) { // Returns true if TSP paths are equal or one is the reverse of the other. // TSP paths always start and end with 0 (the start node). For example, paths // (0, 1, 2, 3, 0) and (0, 3, 2, 1, 0) are equivalent, but (0, 1, 2, 3, 0) and diff --git a/ortools/lp_data/sparse.cc b/ortools/lp_data/sparse.cc index 735a9088dc5..41ff043ee60 100644 --- a/ortools/lp_data/sparse.cc +++ b/ortools/lp_data/sparse.cc @@ -22,6 +22,7 @@ #include "absl/log/check.h" #include "absl/strings/str_format.h" +#include "absl/types/span.h" #include "ortools/lp_data/lp_types.h" #include "ortools/lp_data/permutation.h" #include "ortools/lp_data/sparse_column.h" @@ -592,7 +593,7 @@ ColIndex CompactSparseMatrix::AddDenseColumnPrefix( } ColIndex CompactSparseMatrix::AddDenseColumnWithNonZeros( - const DenseColumn& dense_column, const std::vector& non_zeros) { + const DenseColumn& dense_column, absl::Span non_zeros) { if (non_zeros.empty()) return AddDenseColumn(dense_column); for (const RowIndex row : non_zeros) { const Fractional value = dense_column[row]; @@ -1459,7 +1460,10 @@ void TriangularMatrix::ComputeRowsToConsiderInSortedOrder( } stored_.Resize(num_rows_); - for (const RowIndex row : *non_zero_rows) stored_.Set(row); + Bitset64::View stored = stored_.view(); + for (const RowIndex row : *non_zero_rows) { + stored.Set(row); + } const auto entry_rows = rows_.view(); for (int i = 0; i < non_zero_rows->size(); ++i) { @@ -1467,9 +1471,9 @@ void TriangularMatrix::ComputeRowsToConsiderInSortedOrder( for (const EntryIndex index : Column(RowToColIndex(row))) { ++num_ops; const RowIndex entry_row = entry_rows[index]; - if (!stored_[entry_row]) { + if (!stored[entry_row]) { non_zero_rows->push_back(entry_row); - stored_.Set(entry_row); + stored.Set(entry_row); } } if (num_ops > num_ops_threshold) break; @@ -1480,7 +1484,9 @@ void TriangularMatrix::ComputeRowsToConsiderInSortedOrder( non_zero_rows->clear(); } else { std::sort(non_zero_rows->begin(), non_zero_rows->end()); - for (const RowIndex row : *non_zero_rows) stored_.ClearBucket(row); + for (const RowIndex row : *non_zero_rows) { + stored_.ClearBucket(row); + } } } diff --git a/ortools/lp_data/sparse.h b/ortools/lp_data/sparse.h index fbc45513ff8..96c45714449 100644 --- a/ortools/lp_data/sparse.h +++ b/ortools/lp_data/sparse.h @@ -381,7 +381,7 @@ class CompactSparseMatrix { // Same as AddDenseColumn(), but uses the given non_zeros pattern of input. // If non_zeros is empty, this actually calls AddDenseColumn(). ColIndex AddDenseColumnWithNonZeros(const DenseColumn& dense_column, - const std::vector& non_zeros); + absl::Span non_zeros); // Adds a dense column for which we know the non-zero positions and clears it. // Note that this function supports duplicate indices in non_zeros. The diff --git a/ortools/sat/2d_orthogonal_packing_testing.cc b/ortools/sat/2d_orthogonal_packing_testing.cc index 95eddf91cba..597e718db36 100644 --- a/ortools/sat/2d_orthogonal_packing_testing.cc +++ b/ortools/sat/2d_orthogonal_packing_testing.cc @@ -14,6 +14,9 @@ #include "ortools/sat/2d_orthogonal_packing_testing.h" #include +#include +#include +#include #include #include @@ -34,7 +37,7 @@ std::vector GenerateNonConflictingRectangles( rectangles.reserve(num_rectangles); rectangles.push_back( {.x_min = 0, .x_max = kSizeMax, .y_min = 0, .y_max = kSizeMax}); - for (int i = 0; i < num_rectangles; ++i) { + while (rectangles.size() < num_rectangles) { std::swap(rectangles.back(), rectangles[absl::Uniform(random, 0ull, rectangles.size() - 1)]); const Rectangle& rec = rectangles.back(); @@ -49,6 +52,9 @@ std::vector GenerateNonConflictingRectangles( .x_max = rec.x_max, .y_min = rec.y_min, .y_max = rec.y_max}; + if (new_range.Area() == 0 || new_range2.Area() == 0) { + continue; + } rectangles.pop_back(); rectangles.push_back(new_range); rectangles.push_back(new_range2); @@ -63,6 +69,9 @@ std::vector GenerateNonConflictingRectangles( .x_max = rec.x_max, .y_min = cut, .y_max = rec.y_max}; + if (new_range.Area() == 0 || new_range2.Area() == 0) { + continue; + } rectangles.pop_back(); rectangles.push_back(new_range); rectangles.push_back(new_range2); @@ -71,6 +80,71 @@ std::vector GenerateNonConflictingRectangles( return rectangles; } +std::vector GenerateNonConflictingRectanglesWithPacking( + std::pair bb, int average_num_boxes, + absl::BitGenRef random) { + const double p = 0.01; + std::vector rectangles; + int num_retries = 0; + double average_size = + std::sqrt(bb.first.value() * bb.second.value() / average_num_boxes); + const int64_t n_x = static_cast(average_size / p); + const int64_t n_y = static_cast(average_size / p); + while (num_retries < 4) { + num_retries++; + + std::pair sizes; + do { + sizes.first = std::binomial_distribution<>(n_x, p)(random); + } while (sizes.first == 0 || sizes.first > bb.first); + do { + sizes.second = std::binomial_distribution<>(n_y, p)(random); + } while (sizes.second == 0 || sizes.second > bb.second); + + std::vector possible_x_positions = {0}; + std::vector possible_y_positions = {0}; + for (const Rectangle& rec : rectangles) { + possible_x_positions.push_back(rec.x_max); + possible_y_positions.push_back(rec.y_max); + } + std::sort(possible_x_positions.begin(), possible_x_positions.end()); + std::sort(possible_y_positions.begin(), possible_y_positions.end()); + bool found_position = false; + for (const IntegerValue x : possible_x_positions) { + for (const IntegerValue y : possible_y_positions) { + if (x + sizes.first > bb.first || y + sizes.second > bb.second) { + continue; + } + const Rectangle rec = {.x_min = x, + .x_max = x + sizes.first, + .y_min = y, + .y_max = y + sizes.second}; + bool conflict = false; + for (const Rectangle r : rectangles) { + if (!r.IsDisjoint(rec)) { + conflict = true; + break; + } + } + if (conflict) { + continue; + } else { + rectangles.push_back(rec); + found_position = true; + break; + } + } + if (found_position) { + break; + } + } + if (found_position) { + num_retries = 0; + } + } + return rectangles; +} + std::vector MakeItemsFromRectangles( absl::Span rectangles, double slack_factor, absl::BitGenRef random) { diff --git a/ortools/sat/2d_orthogonal_packing_testing.h b/ortools/sat/2d_orthogonal_packing_testing.h index 68e514f260f..72de30a9107 100644 --- a/ortools/sat/2d_orthogonal_packing_testing.h +++ b/ortools/sat/2d_orthogonal_packing_testing.h @@ -14,11 +14,13 @@ #ifndef OR_TOOLS_SAT_2D_ORTHOGONAL_PACKING_TESTING_H_ #define OR_TOOLS_SAT_2D_ORTHOGONAL_PACKING_TESTING_H_ +#include #include #include "absl/random/bit_gen_ref.h" #include "absl/types/span.h" #include "ortools/sat/diffn_util.h" +#include "ortools/sat/integer.h" namespace operations_research { namespace sat { @@ -26,6 +28,12 @@ namespace sat { std::vector GenerateNonConflictingRectangles(int num_rectangles, absl::BitGenRef random); +// Alternative way of generating random rectangles. This one generate random +// rectangles and try to pack them using the left-bottom-first order. +std::vector GenerateNonConflictingRectanglesWithPacking( + std::pair bb, int average_num_boxes, + absl::BitGenRef random); + std::vector MakeItemsFromRectangles( absl::Span rectangles, double slack_factor, absl::BitGenRef random); diff --git a/ortools/sat/2d_packing_brute_force.cc b/ortools/sat/2d_packing_brute_force.cc index d6381a99547..957c8a79caf 100644 --- a/ortools/sat/2d_packing_brute_force.cc +++ b/ortools/sat/2d_packing_brute_force.cc @@ -681,8 +681,13 @@ BruteForceResult BruteForceOrthogonalPacking( for (const PermutableItem& item : items) { result[item.index] = item.position; } - // VLOG_EVERY_N_SEC(3, 3) << "Found a feasible packing by brute force. Dot:\n " - // << RenderDot(bounding_box_size, result); + VLOG_EVERY_N_SEC(3, 3) << "Found a feasible packing by brute force. Dot:\n " + << RenderDot( + Rectangle{.x_min = 0, + .x_max = bounding_box_size.first, + .y_min = 0, + .y_max = bounding_box_size.second}, + result); return {.status = BruteForceResult::Status::kFoundSolution, .positions_for_solution = result}; } diff --git a/ortools/sat/2d_rectangle_presolve.cc b/ortools/sat/2d_rectangle_presolve.cc index 9fefb17ff64..e454c86cd9e 100644 --- a/ortools/sat/2d_rectangle_presolve.cc +++ b/ortools/sat/2d_rectangle_presolve.cc @@ -17,14 +17,19 @@ #include #include #include +#include #include #include +#include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" +#include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" #include "ortools/base/logging.h" +#include "ortools/base/stl_util.h" +#include "ortools/graph/strongly_connected_components.h" #include "ortools/sat/diffn_util.h" #include "ortools/sat/integer.h" @@ -234,8 +239,6 @@ struct Edge { IntegerValue y_start; IntegerValue size; - enum class EdgePosition { TOP, BOTTOM, LEFT, RIGHT }; - static Edge GetEdge(const Rectangle& rectangle, EdgePosition pos) { switch (pos) { case EdgePosition::TOP: @@ -266,6 +269,15 @@ struct Edge { return x_start == other.x_start && y_start == other.y_start && size == other.size; } + + static bool CompareXThenY(const Edge& a, const Edge& b) { + return std::tie(a.x_start, a.y_start, a.size) < + std::tie(b.x_start, b.y_start, b.size); + } + static bool CompareYThenX(const Edge& a, const Edge& b) { + return std::tie(a.y_start, a.x_start, a.size) < + std::tie(b.y_start, b.x_start, b.size); + } }; } // namespace @@ -290,8 +302,6 @@ bool ReduceNumberofBoxes(std::vector* mandatory_rectangles, absl::flat_hash_map left_edges_to_rectangle; absl::flat_hash_map right_edges_to_rectangle; - using EdgePosition = Edge::EdgePosition; - bool changed_optional = false; bool changed_mandatory = false; @@ -403,5 +413,479 @@ bool ReduceNumberofBoxes(std::vector* mandatory_rectangles, return changed_mandatory; } +Neighbours BuildNeighboursGraph(absl::Span rectangles) { + // To build a graph of neighbours, we build a sorted vector for each one of + // the edges (top, bottom, etc) of the rectangles. Then we merge the bottom + // and top vectors and iterate on it. Due to the sorting order, segments where + // the bottom of a rectangle touches the top of another one must consecutive. + std::vector> edges_to_rectangle[4]; + std::vector> neighbours; + neighbours.reserve(2 * rectangles.size()); + for (int edge_int = 0; edge_int < 4; ++edge_int) { + const EdgePosition edge_position = static_cast(edge_int); + edges_to_rectangle[edge_position].reserve(rectangles.size()); + } + + for (int i = 0; i < rectangles.size(); ++i) { + const Rectangle& rectangle = rectangles[i]; + for (int edge_int = 0; edge_int < 4; ++edge_int) { + const EdgePosition edge_position = static_cast(edge_int); + const Edge edge = Edge::GetEdge(rectangle, edge_position); + edges_to_rectangle[edge_position].push_back({edge, i}); + } + } + for (int edge_int = 0; edge_int < 4; ++edge_int) { + const EdgePosition edge_position = static_cast(edge_int); + const bool sort_x_then_y = edge_position == EdgePosition::LEFT || + edge_position == EdgePosition::RIGHT; + const auto cmp = + sort_x_then_y + ? [](const std::pair& a, + const std::pair& + b) { return Edge::CompareXThenY(a.first, b.first); } + : [](const std::pair& a, const std::pair& b) { + return Edge::CompareYThenX(a.first, b.first); + }; + absl::c_sort(edges_to_rectangle[edge_position], cmp); + } + + constexpr struct EdgeData { + EdgePosition edge; + EdgePosition opposite_edge; + bool (*cmp)(const Edge&, const Edge&); + } edge_data[4] = {{.edge = EdgePosition::BOTTOM, + .opposite_edge = EdgePosition::TOP, + .cmp = &Edge::CompareYThenX}, + {.edge = EdgePosition::TOP, + .opposite_edge = EdgePosition::BOTTOM, + .cmp = &Edge::CompareYThenX}, + {.edge = EdgePosition::LEFT, + .opposite_edge = EdgePosition::RIGHT, + .cmp = &Edge::CompareXThenY}, + {.edge = EdgePosition::RIGHT, + .opposite_edge = EdgePosition::LEFT, + .cmp = &Edge::CompareXThenY}}; + + for (int edge_int = 0; edge_int < 4; ++edge_int) { + const EdgePosition edge_position = edge_data[edge_int].edge; + const EdgePosition opposite_edge_position = + edge_data[edge_int].opposite_edge; + auto it = edges_to_rectangle[edge_position].begin(); + for (const auto& [edge, index] : + edges_to_rectangle[opposite_edge_position]) { + while (it != edges_to_rectangle[edge_position].end() && + edge_data[edge_int].cmp(it->first, edge)) { + ++it; + } + if (it == edges_to_rectangle[edge_position].end()) { + break; + } + if (edge_position == EdgePosition::BOTTOM || + edge_position == EdgePosition::TOP) { + while (it != edges_to_rectangle[edge_position].end() && + it->first.y_start == edge.y_start && + it->first.x_start < edge.x_start + edge.size) { + neighbours.push_back({index, opposite_edge_position, it->second}); + neighbours.push_back({it->second, edge_position, index}); + ++it; + } + } else { + while (it != edges_to_rectangle[edge_position].end() && + it->first.x_start == edge.x_start && + it->first.y_start < edge.y_start + edge.size) { + neighbours.push_back({index, opposite_edge_position, it->second}); + neighbours.push_back({it->second, edge_position, index}); + ++it; + } + } + } + } + + gtl::STLSortAndRemoveDuplicates(&neighbours); + return Neighbours(rectangles, neighbours); +} + +std::vector> SplitInConnectedComponents( + const Neighbours& neighbours) { + class GraphView { + public: + explicit GraphView(const Neighbours& neighbours) + : neighbours_(neighbours) {} + absl::Span operator[](int node) const { + temp_.clear(); + for (int edge = 0; edge < 4; ++edge) { + const auto edge_neighbors = neighbours_.GetSortedNeighbors( + node, static_cast(edge)); + for (int neighbor : edge_neighbors) { + temp_.push_back(neighbor); + } + } + return temp_; + } + + private: + const Neighbours& neighbours_; + mutable std::vector temp_; + }; + + std::vector> components; + FindStronglyConnectedComponents(neighbours.NumRectangles(), + GraphView(neighbours), &components); + return components; +} + +struct ContourPoint { + IntegerValue x; + IntegerValue y; + int next_box_index; + EdgePosition next_direction; + + bool operator!=(const ContourPoint& other) const { + return x != other.x || y != other.y || + next_box_index != other.next_box_index || + next_direction != other.next_direction; + } +}; + +// This function runs in O(log N). +ContourPoint NextByClockwiseOrder(const ContourPoint& point, + absl::Span rectangles, + const Neighbours& neighbours) { + // This algorithm is very verbose, but it is about handling four cases. In the + // schema below, "-->" is the current direction, "X" the next point and + // the dashed arrow the next direction. + // + // Case 1: + // ++++++++ + // ^ ++++++++ + // : ++++++++ + // : ++++++++ + // ++++++++ + // ---> X ++++++++ + // ****************** + // ****************** + // ****************** + // ****************** + // + // Case 2: + // ^ ++++++++ + // : ++++++++ + // : ++++++++ + // ++++++++ + // ---> X ++++++++ + // *************++++++++ + // *************++++++++ + // ************* + // ************* + // + // Case 3: + // ---> X ...> + // *************++++++++ + // *************++++++++ + // *************++++++++ + // *************++++++++ + // + // Case 4: + // ---> X + // ************* : + // ************* : + // ************* : + // ************* \/ + ContourPoint result; + const Rectangle& cur_rectangle = rectangles[point.next_box_index]; + + EdgePosition cur_edge; + bool clockwise; + // Much of the code below need to know two things: in which direction we are + // going and what edge of which rectangle we are touching. For example, in the + // "Case 4" drawing above we are going RIGHT and touching the TOP edge of the + // current rectangle. This switch statement finds this `cur_edge`. + switch (point.next_direction) { + case EdgePosition::TOP: + if (cur_rectangle.x_max == point.x) { + cur_edge = EdgePosition::RIGHT; + clockwise = false; + } else { + cur_edge = EdgePosition::LEFT; + clockwise = true; + } + break; + case EdgePosition::BOTTOM: + if (cur_rectangle.x_min == point.x) { + cur_edge = EdgePosition::LEFT; + clockwise = false; + } else { + cur_edge = EdgePosition::RIGHT; + clockwise = true; + } + break; + case EdgePosition::LEFT: + if (cur_rectangle.y_max == point.y) { + cur_edge = EdgePosition::TOP; + clockwise = false; + } else { + cur_edge = EdgePosition::BOTTOM; + clockwise = true; + } + break; + case EdgePosition::RIGHT: + if (cur_rectangle.y_min == point.y) { + cur_edge = EdgePosition::BOTTOM; + clockwise = false; + } else { + cur_edge = EdgePosition::TOP; + clockwise = true; + } + break; + } + + // Test case 1. We need to find the next box after the current point in the + // edge we are following in the current direction. + const auto cur_edge_neighbors = + neighbours.GetSortedNeighbors(point.next_box_index, cur_edge); + + const Rectangle fake_box_for_lower_bound = { + .x_min = point.x, .x_max = point.x, .y_min = point.y, .y_max = point.y}; + const auto clockwise_cmp = Neighbours::CompareClockwise(cur_edge); + auto it = absl::c_lower_bound( + cur_edge_neighbors, -1, + [&fake_box_for_lower_bound, rectangles, clockwise_cmp, clockwise](int a, + int b) { + const Rectangle& rectangle_a = + (a == -1 ? fake_box_for_lower_bound : rectangles[a]); + const Rectangle& rectangle_b = + (b == -1 ? fake_box_for_lower_bound : rectangles[b]); + if (clockwise) { + return clockwise_cmp(rectangle_a, rectangle_b); + } else { + return clockwise_cmp(rectangle_b, rectangle_a); + } + }); + + if (it != cur_edge_neighbors.end()) { + // We found box in the current edge. We are in case 1. + result.next_box_index = *it; + const Rectangle& next_rectangle = rectangles[*it]; + switch (point.next_direction) { + case EdgePosition::TOP: + result.x = point.x; + result.y = next_rectangle.y_min; + if (cur_edge == EdgePosition::LEFT) { + result.next_direction = EdgePosition::LEFT; + } else { + result.next_direction = EdgePosition::RIGHT; + } + break; + case EdgePosition::BOTTOM: + result.x = point.x; + result.y = next_rectangle.y_max; + if (cur_edge == EdgePosition::LEFT) { + result.next_direction = EdgePosition::LEFT; + } else { + result.next_direction = EdgePosition::RIGHT; + } + break; + case EdgePosition::LEFT: + result.y = point.y; + result.x = next_rectangle.x_max; + if (cur_edge == EdgePosition::TOP) { + result.next_direction = EdgePosition::TOP; + } else { + result.next_direction = EdgePosition::BOTTOM; + } + break; + case EdgePosition::RIGHT: + result.y = point.y; + result.x = next_rectangle.x_min; + if (cur_edge == EdgePosition::TOP) { + result.next_direction = EdgePosition::TOP; + } else { + result.next_direction = EdgePosition::BOTTOM; + } + break; + } + return result; + } + + // We now know we are not in Case 1, so know the next (x, y) position: it is + // the corner of the current rectangle in the direction we are going. + switch (point.next_direction) { + case EdgePosition::TOP: + result.x = point.x; + result.y = cur_rectangle.y_max; + break; + case EdgePosition::BOTTOM: + result.x = point.x; + result.y = cur_rectangle.y_min; + break; + case EdgePosition::LEFT: + result.x = cur_rectangle.x_min; + result.y = point.y; + break; + case EdgePosition::RIGHT: + result.x = cur_rectangle.x_max; + result.y = point.y; + break; + } + + // Case 2 and 3. + const auto next_edge_neighbors = + neighbours.GetSortedNeighbors(point.next_box_index, point.next_direction); + if (!next_edge_neighbors.empty()) { + // We are looking for the neighbor on the edge of the current box. + const int candidate_index = + clockwise ? next_edge_neighbors.front() : next_edge_neighbors.back(); + const Rectangle& next_rectangle = rectangles[candidate_index]; + switch (point.next_direction) { + case EdgePosition::TOP: + case EdgePosition::BOTTOM: + if (next_rectangle.x_min < point.x && point.x < next_rectangle.x_max) { + // Case 2 + result.next_box_index = candidate_index; + if (cur_edge == EdgePosition::LEFT) { + result.next_direction = EdgePosition::LEFT; + } else { + result.next_direction = EdgePosition::RIGHT; + } + return result; + } else if (next_rectangle.x_min == point.x && + cur_edge == EdgePosition::LEFT) { + // Case 3 + result.next_box_index = candidate_index; + result.next_direction = point.next_direction; + return result; + } else if (next_rectangle.x_max == point.x && + cur_edge == EdgePosition::RIGHT) { + // Case 3 + result.next_box_index = candidate_index; + result.next_direction = point.next_direction; + return result; + } + break; + case EdgePosition::LEFT: + case EdgePosition::RIGHT: + if (next_rectangle.y_min < point.y && point.y < next_rectangle.y_max) { + result.next_box_index = candidate_index; + if (cur_edge == EdgePosition::TOP) { + result.next_direction = EdgePosition::TOP; + } else { + result.next_direction = EdgePosition::BOTTOM; + } + return result; + } else if (next_rectangle.y_max == point.y && + cur_edge == EdgePosition::TOP) { + result.next_box_index = candidate_index; + result.next_direction = point.next_direction; + return result; + } else if (next_rectangle.y_min == point.y && + cur_edge == EdgePosition::BOTTOM) { + result.next_box_index = candidate_index; + result.next_direction = point.next_direction; + return result; + } + break; + } + } + + // Now we must be in the case 4. + result.next_box_index = point.next_box_index; + switch (point.next_direction) { + case EdgePosition::TOP: + case EdgePosition::BOTTOM: + if (cur_edge == EdgePosition::LEFT) { + result.next_direction = EdgePosition::RIGHT; + } else { + result.next_direction = EdgePosition::LEFT; + } + break; + case EdgePosition::LEFT: + case EdgePosition::RIGHT: + if (cur_edge == EdgePosition::TOP) { + result.next_direction = EdgePosition::BOTTOM; + } else { + result.next_direction = EdgePosition::TOP; + } + break; + } + return result; +} + +ShapePath TraceBoundary( + const std::pair& starting_step_point, + int starting_box_index, absl::Span rectangles, + const Neighbours& neighbours) { + // First find which direction we need to go to follow the border in the + // clockwise order. + const Rectangle& initial_rec = rectangles[starting_box_index]; + bool touching_edge[4]; + touching_edge[EdgePosition::LEFT] = + initial_rec.x_min == starting_step_point.first; + touching_edge[EdgePosition::RIGHT] = + initial_rec.x_max == starting_step_point.first; + touching_edge[EdgePosition::TOP] = + initial_rec.y_max == starting_step_point.second; + touching_edge[EdgePosition::BOTTOM] = + initial_rec.y_min == starting_step_point.second; + + EdgePosition next_direction; + if (touching_edge[EdgePosition::LEFT]) { + if (touching_edge[EdgePosition::TOP]) { + next_direction = EdgePosition::RIGHT; + } else { + next_direction = EdgePosition::TOP; + } + } else if (touching_edge[EdgePosition::RIGHT]) { + if (touching_edge[EdgePosition::BOTTOM]) { + next_direction = EdgePosition::LEFT; + } else { + next_direction = EdgePosition::BOTTOM; + } + } else if (touching_edge[EdgePosition::TOP]) { + next_direction = EdgePosition::LEFT; + } else if (touching_edge[EdgePosition::BOTTOM]) { + next_direction = EdgePosition::RIGHT; + } else { + LOG(FATAL) + << "TraceBoundary() got a `starting_step_point` that is not in an edge " + "of the rectangle of `starting_box_index`. This is not allowed."; + } + const ContourPoint starting_point = {.x = starting_step_point.first, + .y = starting_step_point.second, + .next_box_index = starting_box_index, + .next_direction = next_direction}; + + ShapePath result; + for (ContourPoint point = starting_point; + result.step_points.empty() || point != starting_point; + point = NextByClockwiseOrder(point, rectangles, neighbours)) { + if (!result.step_points.empty() && + point.x == result.step_points.back().first && + point.y == result.step_points.back().second) { + // There is a special corner-case of the algorithm using the neighbours. + // Consider the following set-up: + // + // ******** | + // ******** | + // ******** +----> + // ########++++++++ + // ########++++++++ + // ########++++++++ + // + // In this case, the only way the algorithm could reach the "+" box is via + // the "#" box, but which is doesn't contribute to the path. The algorithm + // returns a technically correct zero-size interval, which might be useful + // for callers that want to count the "#" box as visited, but this is not + // our case. + result.touching_box_index.back() = point.next_box_index; + } else { + result.touching_box_index.push_back(point.next_box_index); + result.step_points.push_back({point.x, point.y}); + } + } + result.touching_box_index.push_back(result.touching_box_index.front()); + result.step_points.push_back(result.step_points.front()); + return result; +} + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/2d_rectangle_presolve.h b/ortools/sat/2d_rectangle_presolve.h index d5cefb9c26b..28821877e80 100644 --- a/ortools/sat/2d_rectangle_presolve.h +++ b/ortools/sat/2d_rectangle_presolve.h @@ -14,10 +14,16 @@ #ifndef OR_TOOLS_SAT_2D_RECTANGLE_PRESOLVE_H_ #define OR_TOOLS_SAT_2D_RECTANGLE_PRESOLVE_H_ +#include +#include #include +#include "absl/algorithm/container.h" +#include "absl/container/flat_hash_map.h" +#include "absl/container/inlined_vector.h" #include "absl/types/span.h" #include "ortools/sat/diffn_util.h" +#include "ortools/sat/integer.h" namespace operations_research { namespace sat { @@ -44,6 +50,136 @@ bool PresolveFixed2dRectangles( bool ReduceNumberofBoxes(std::vector* mandatory_rectangles, std::vector* optional_rectangles); +enum EdgePosition { TOP = 0, RIGHT = 1, BOTTOM = 2, LEFT = 3 }; + +template +void AbslStringify(Sink& sink, EdgePosition e) { + switch (e) { + case EdgePosition::TOP: + sink.Append("TOP"); + break; + case EdgePosition::RIGHT: + sink.Append("RIGHT"); + break; + case EdgePosition::BOTTOM: + sink.Append("BOTTOM"); + break; + case EdgePosition::LEFT: + sink.Append("LEFT"); + break; + } +} + +// Given a set of non-overlapping rectangles, precompute a data-structure that +// allow for each rectangle to find the adjacent rectangle along an edge. +// +// Note that it only consider adjacent rectangles whose segments has a +// intersection of non-zero size. In particular, rectangles as following are not +// considered adjacent: +// +// ******** +// ******** +// ******** +// ******** +// +++++++++ +// +++++++++ +// +++++++++ +// +++++++++ +// +// Precondition: All rectangles must be disjoint. +class Neighbours { + public: + class CompareClockwise { + public: + explicit CompareClockwise(EdgePosition edge) : edge_(edge) {} + + bool operator()(const Rectangle& a, const Rectangle& b) const { + switch (edge_) { + case EdgePosition::BOTTOM: + return std::tie(a.x_min, a.x_max) > std::tie(b.x_min, b.x_max); + case EdgePosition::TOP: + return std::tie(a.x_min, a.x_max) < std::tie(b.x_min, b.x_max); + case EdgePosition::LEFT: + return std::tie(a.y_min, a.y_max) < std::tie(b.y_min, b.y_max); + case EdgePosition::RIGHT: + return std::tie(a.y_min, a.y_max) > std::tie(b.y_min, b.y_max); + } + } + EdgePosition edge_; + }; + + explicit Neighbours( + absl::Span rectangles, + absl::Span> neighbors) + : size_(rectangles.size()) { + for (const auto& [box_index, edge, neighbor] : neighbors) { + neighbors_[edge][box_index].push_back(neighbor); + } + for (int edge = 0; edge < 4; ++edge) { + for (auto& [box_index, neighbors] : neighbors_[edge]) { + absl::c_sort(neighbors, [&rectangles, edge](int a, int b) { + return CompareClockwise(static_cast(edge))( + rectangles[a], rectangles[b]); + }); + } + } + } + + int NumRectangles() const { return size_; } + + // Neighbors are sorted in the clockwise order. + absl::Span GetSortedNeighbors(int rectangle_index, + EdgePosition edge) const { + if (auto it = neighbors_[edge].find(rectangle_index); + it != neighbors_[edge].end()) { + return it->second; + } else { + return {}; + } + } + + private: + absl::flat_hash_map> neighbors_[4]; + int size_; +}; + +Neighbours BuildNeighboursGraph(absl::Span rectangles); + +std::vector> SplitInConnectedComponents( + const Neighbours& neighbours); + +// Generally, given a set of non-overlapping rectangles and a path that doesn't +// cross itself, the path can be cut into segments that touch only one single +// rectangle in the interior of the region delimited by the path. This struct +// holds a path cut into such segments. In particular, for the contour of an +// union of rectangles, the path is a subset of the union of all the rectangle's +// edges. +struct ShapePath { + // The two vectors should have exactly the same size. + std::vector> step_points; + // touching_box_index[i] contains the index of the unique interior rectangle + // touching the segment step_points[i]->step_points[(i+1)%size]. + std::vector touching_box_index; +}; + +// Returns a path delimiting a boundary of the union of a set of rectangles. It +// should work for both the exterior boundary and the boundaries of the holes +// inside the union. The path will start on `starting_point` and follow the +// boundary on clockwise order. +// +// `starting_point` should be a point in the boundary and `starting_box_index` +// the index of a rectangle with one edge containing `starting_point`. +// +// The resulting `path` satisfy: +// - path.step_points.front() == path.step_points.back() == starting_point +// - path.touching_box_index.front() == path.touching_box_index.back() == +// == starting_box_index +// +ShapePath TraceBoundary( + const std::pair& starting_step_point, + int starting_box_index, absl::Span rectangles, + const Neighbours& neighbours); + } // namespace sat } // namespace operations_research diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index 222559fa884..a6781b4ccdf 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -14,6 +14,7 @@ # Home of CP/SAT solver (which includes SAT, max-SAT and PB problems). load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") load("@rules_java//java:defs.bzl", "java_proto_library") load("@rules_proto//proto:defs.bzl", "proto_library") load("@rules_python//python:proto.bzl", "py_proto_library") @@ -61,6 +62,12 @@ cc_proto_library( deps = [":sat_parameters_proto"], ) +go_proto_library( + name = "sat_parameters_go_proto", + proto = ":sat_parameters_proto", + importpath = "github.com/google/or-tools/ortools/sat/proto/satparameters" +) + py_proto_library( name = "sat_parameters_py_pb2", deps = [":sat_parameters_proto"], @@ -81,6 +88,12 @@ cc_proto_library( deps = [":cp_model_proto"], ) +go_proto_library( + name = "cp_model_go_proto", + importpath = "github.com/google/or-tools/ortools/sat/proto/cpmodel", + proto = ":cp_model_proto", +) + py_proto_library( name = "cp_model_py_pb2", deps = [":cp_model_proto"], @@ -302,7 +315,6 @@ cc_library( ":clause", ":cp_model_cc_proto", ":cp_model_checker", - ":cp_model_lns", ":cp_model_loader", ":cp_model_mapping", ":cp_model_postsolve", @@ -614,6 +626,7 @@ cc_library( hdrs = ["presolve_context.h"], deps = [ ":cp_model_cc_proto", + ":cp_model_checker", ":cp_model_loader", ":cp_model_mapping", ":cp_model_utils", @@ -1036,6 +1049,17 @@ cc_library( ], ) +cc_test( + name = "symmetry_util_test", + size = "small", + srcs = ["symmetry_util_test.cc"], + deps = [ + ":symmetry_util", + "//ortools/algorithms:sparse_permutation", + "//ortools/base:gmock_main", + ], +) + cc_library( name = "var_domination", srcs = ["var_domination.cc"], @@ -1906,7 +1930,6 @@ cc_library( hdrs = ["stat_tables.h"], deps = [ ":cp_model_cc_proto", - ":cp_model_lns", ":linear_programming_constraint", ":model", ":sat_solver", @@ -2019,7 +2042,12 @@ cc_library( deps = [ ":diffn_util", ":integer", + "//ortools/base:stl_util", + "//ortools/graph:strongly_connected_components", + "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/container:flat_hash_map", + "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/container:inlined_vector", "@com_google_absl//absl/log", "@com_google_absl//absl/log:check", "@com_google_absl//absl/strings", @@ -2121,6 +2149,7 @@ cc_library( ":cp_model_cc_proto", ":cp_model_mapping", ":cp_model_presolve", + ":cp_model_solver_helpers", ":cp_model_utils", ":integer", ":linear_constraint_manager", diff --git a/ortools/sat/CMakeLists.txt b/ortools/sat/CMakeLists.txt index 027aa46c6f8..99f2113edb4 100644 --- a/ortools/sat/CMakeLists.txt +++ b/ortools/sat/CMakeLists.txt @@ -12,6 +12,7 @@ # limitations under the License. file(GLOB _SRCS "*.h" "*.cc") +list(FILTER _SRCS EXCLUDE REGEX ".*/.*_test.cc") list(REMOVE_ITEM _SRCS ${CMAKE_CURRENT_SOURCE_DIR}/opb_reader.h ${CMAKE_CURRENT_SOURCE_DIR}/sat_cnf_reader.h @@ -39,6 +40,19 @@ target_link_libraries(${NAME} PRIVATE ${PROJECT_NAMESPACE}::ortools_proto) #add_library(${PROJECT_NAMESPACE}::sat ALIAS ${NAME}) +if(BUILD_TESTING) + file(GLOB _TEST_SRCS "*_test.cc") + foreach(FILE_NAME IN LISTS _TEST_SRCS) + ortools_cxx_test( + FILE_NAME + ${FILE_NAME} + DEPS + GTest::gmock + GTest::gtest_main + ) + endforeach() +endif() + # Sat Runner add_executable(sat_runner) target_sources(sat_runner PRIVATE "sat_runner.cc") diff --git a/ortools/sat/cp_model_lns.cc b/ortools/sat/cp_model_lns.cc index 7ec0ff90c75..5e8104a47fe 100644 --- a/ortools/sat/cp_model_lns.cc +++ b/ortools/sat/cp_model_lns.cc @@ -44,6 +44,7 @@ #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_mapping.h" #include "ortools/sat/cp_model_presolve.h" +#include "ortools/sat/cp_model_solver_helpers.h" #include "ortools/sat/cp_model_utils.h" #include "ortools/sat/integer.h" #include "ortools/sat/linear_constraint_manager.h" @@ -1278,18 +1279,18 @@ void GetRandomSubset(double relative_size, std::vector* base, } // namespace Neighborhood RelaxRandomVariablesGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { std::vector fixed_variables = helper_.ActiveVariables(); - GetRandomSubset(1.0 - difficulty, &fixed_variables, random); + GetRandomSubset(1.0 - data.difficulty, &fixed_variables, random); return helper_.FixGivenVariables( initial_solution, {fixed_variables.begin(), fixed_variables.end()}); } Neighborhood RelaxRandomConstraintsGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { - if (helper_.DifficultyMeansFullNeighborhood(difficulty)) { + if (helper_.DifficultyMeansFullNeighborhood(data.difficulty)) { return helper_.FullNeighborhood(); } @@ -1308,7 +1309,7 @@ Neighborhood RelaxRandomConstraintsGenerator::Generate( const int num_active_vars = helper_.ActiveVariablesWhileHoldingLock().size(); - const int target_size = std::ceil(difficulty * num_active_vars); + const int target_size = std::ceil(data.difficulty * num_active_vars); if (target_size == num_active_vars) return helper_.FullNeighborhood(); // TODO(user): Clean-up when target_size == 0. @@ -1333,7 +1334,7 @@ Neighborhood RelaxRandomConstraintsGenerator::Generate( // Note that even if difficulty means full neighborhood, we go through the // generation process to never get out of a connected components. Neighborhood VariableGraphNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { const int num_model_vars = helper_.ModelProto().variables_size(); std::vector visited_variables_set(num_model_vars, false); @@ -1354,7 +1355,7 @@ Neighborhood VariableGraphNeighborhoodGenerator::Generate( helper_.ActiveVariablesWhileHoldingLock().size(); const int num_objective_variables = helper_.ActiveObjectiveVariablesWhileHoldingLock().size(); - const int target_size = std::ceil(difficulty * num_active_vars); + const int target_size = std::ceil(data.difficulty * num_active_vars); if (target_size == num_active_vars) return helper_.FullNeighborhood(); const int first_var = @@ -1403,7 +1404,7 @@ Neighborhood VariableGraphNeighborhoodGenerator::Generate( // Note that even if difficulty means full neighborhood, we go through the // generation process to never get out of a connected components. Neighborhood ArcGraphNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { const int num_model_vars = helper_.ModelProto().variables_size(); if (num_model_vars == 0) return helper_.NoNeighborhood(); @@ -1424,7 +1425,7 @@ Neighborhood ArcGraphNeighborhoodGenerator::Generate( vars_to_constraints = helper_.VarToConstraint(); } - const int target_size = std::ceil(difficulty * num_active_vars); + const int target_size = std::ceil(data.difficulty * num_active_vars); if (target_size == 0) return helper_.NoNeighborhood(); // We pick a variable from the objective. @@ -1488,7 +1489,7 @@ Neighborhood ArcGraphNeighborhoodGenerator::Generate( // Note that even if difficulty means full neighborhood, we go through the // generation process to never get out of a connected components. Neighborhood ConstraintGraphNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { const int num_model_constraints = helper_.ModelProto().constraints_size(); if (num_model_constraints == 0) { @@ -1507,7 +1508,7 @@ Neighborhood ConstraintGraphNeighborhoodGenerator::Generate( absl::ReaderMutexLock graph_lock(&helper_.graph_mutex_); const int num_active_vars = helper_.ActiveVariablesWhileHoldingLock().size(); - const int target_size = std::ceil(difficulty * num_active_vars); + const int target_size = std::ceil(data.difficulty * num_active_vars); if (target_size == num_active_vars) return helper_.FullNeighborhood(); // Start by a random constraint. @@ -1556,7 +1557,7 @@ Neighborhood ConstraintGraphNeighborhoodGenerator::Generate( } Neighborhood DecompositionGraphNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { int max_width = 0; int size_at_min_width_after_100; @@ -1572,7 +1573,7 @@ Neighborhood DecompositionGraphNeighborhoodGenerator::Generate( const int num_active_vars = helper_.ActiveVariablesWhileHoldingLock().size(); - const int target_size = std::ceil(difficulty * num_active_vars); + const int target_size = std::ceil(data.difficulty * num_active_vars); if (target_size == num_active_vars) return helper_.FullNeighborhood(); const int num_vars = helper_.VarToConstraint().size(); @@ -1721,94 +1722,209 @@ Neighborhood DecompositionGraphNeighborhoodGenerator::Generate( namespace { -// Given a (sub)set of binary variables and their initial solution values, -// returns a local branching constraint over these variables, that is: -// sum_{i : s[i] == 0} x_i + sum_{i : s[i] == 1} (1 - x_i) <= k -// where s is the initial solution and k is the neighborhood size. Requires all -// variables and initial solution values to be binary. -ConstraintProto LocalBranchingConstraint( - const std::vector& variable_indices, - const std::vector& initial_solution, const int neighborhood_size) { - DCHECK_EQ(variable_indices.size(), initial_solution.size()); - DCHECK_GE(neighborhood_size, 0); - ConstraintProto local_branching_constraint; - local_branching_constraint.set_name("local_branching"); - LinearConstraintProto* linear = local_branching_constraint.mutable_linear(); - int lhs_constant_value = 0; - for (int i = 0; i < variable_indices.size(); ++i) { - if (initial_solution[i] == 0) { - linear->add_coeffs(1); - linear->add_vars(variable_indices[i]); - } else { - DCHECK_EQ(initial_solution[i], 1); - linear->add_coeffs(-1); - linear->add_vars(variable_indices[i]); - lhs_constant_value++; - } +// Create a constraint sum (X - LB) + sum (UB - X) <= rhs. +ConstraintProto DistanceToBoundsSmallerThanConstraint( + const std::vector>& dist_to_lower_bound, + const std::vector>& dist_to_upper_bound, + const int64_t rhs) { + DCHECK_GE(rhs, 0); + ConstraintProto new_constraint; + LinearConstraintProto* linear = new_constraint.mutable_linear(); + int64_t lhs_constant_value = 0; + for (const auto [var, lb] : dist_to_lower_bound) { + // We add X - LB + linear->add_coeffs(1); + linear->add_vars(var); + lhs_constant_value -= lb; + } + for (const auto [var, ub] : dist_to_upper_bound) { + // We add UB - X + lhs_constant_value += ub; + linear->add_coeffs(-1); + linear->add_vars(var); } - linear->add_domain(-lhs_constant_value); - linear->add_domain(-lhs_constant_value + neighborhood_size); - return local_branching_constraint; + linear->add_domain(std::numeric_limits::min()); + linear->add_domain(rhs - lhs_constant_value); + return new_constraint; } } // namespace Neighborhood LocalBranchingLpBasedNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { - std::vector active_variables = helper_.ActiveVariables(); - - // Collect active binary variables and corresponding initial solution values. - // TODO(user): Extend to integer variables. - std::vector binary_var_indices; - std::vector non_binary_var_indices; - std::vector binary_var_initial_solution; - for (const int active_var_index : active_variables) { - const IntegerVariableProto& var = - helper_.ModelProto().variables(active_var_index); - if (var.domain_size() == 2 && var.domain(0) == 0 && var.domain(1) == 1) { - binary_var_indices.push_back(active_var_index); - binary_var_initial_solution.push_back( - initial_solution.solution(active_var_index)); + const std::vector active_variables = helper_.ActiveVariables(); + if (active_variables.empty()) return helper_.NoNeighborhood(); + + { + // Quick corner case in case the difficulty is too high. This is mainly + // useful when testing with only that kind of LNS to abort early on + // super-easy problems. + const int size = active_variables.size(); + if (static_cast(std::ceil(data.difficulty * size)) == size) { + return helper_.FullNeighborhood(); + } + } + + // These are candidate for relaxation. The score will be filled later. Active + // variable not kept in candidate will be added to other_variables. + std::vector> candidates_with_score; + std::vector other_variables; + + // Our extra relaxation constraint will be: sums of distance to the respective + // bound smaller than a constant that depends on the difficulty. + std::vector> dist_to_lower_bound; + std::vector> dist_to_upper_bound; + + // For the "easy" part of the extra constraint, we either look only at the + // binary variables. Or we extend that to all variables at their bound. + const bool only_look_at_binary = absl::Bernoulli(random, 0.5); + + // We copy the model early to have access to reduced domains. + // TODO(user): that might not be the most efficient if we abort just below. + CpModelProto local_cp_model = helper_.UpdatedModelProtoCopy(); + + // Loop over active variables. + bool some_non_binary_at_bound = false; + for (const int var : active_variables) { + DCHECK_LT(var, initial_solution.solution().size()); + DCHECK_LT(var, local_cp_model.variables().size()); + const IntegerVariableProto& var_proto = local_cp_model.variables(var); + const int64_t base_value = initial_solution.solution(var); + const bool is_binary = var_proto.domain_size() == 2 && + var_proto.domain(0) == 0 && var_proto.domain(1) == 1; + if (only_look_at_binary && !is_binary) { + other_variables.push_back(var); + continue; + } + + DCHECK(!var_proto.domain().empty()); + const int64_t domain_min = var_proto.domain(0); + const int64_t domain_max = var_proto.domain(var_proto.domain().size() - 1); + if (base_value <= domain_min) { + if (!is_binary) some_non_binary_at_bound = true; + candidates_with_score.push_back({var, 0.0}); + dist_to_lower_bound.push_back({var, domain_min}); + } else if (base_value >= domain_max) { + if (!is_binary) some_non_binary_at_bound = true; + candidates_with_score.push_back({var, 0.0}); + dist_to_upper_bound.push_back({var, domain_max}); } else { - non_binary_var_indices.push_back(active_var_index); + other_variables.push_back(var); } } - if (binary_var_indices.empty()) { + + bool use_hamming_for_others = false; + if (!other_variables.empty() && absl::Bernoulli(random, 0.5)) { + use_hamming_for_others = true; + } + if (!use_hamming_for_others && candidates_with_score.empty()) { return helper_.NoNeighborhood(); } - const int target_size = - static_cast(std::ceil(difficulty * binary_var_indices.size())); + // With this option, we will create a bunch of Boolean variable + // and add the constraints : "bool==0 => var == value_in_base_solution". + if (use_hamming_for_others) { + for (const int var : other_variables) { + const int indicator = local_cp_model.variables().size(); + auto* var_proto = local_cp_model.add_variables(); + var_proto->add_domain(0); + var_proto->add_domain(1); + auto* new_ct = local_cp_model.add_constraints(); + new_ct->add_enforcement_literal(NegatedRef(indicator)); + + const int64_t base_value = initial_solution.solution(var); + new_ct->mutable_linear()->add_domain(base_value); + new_ct->mutable_linear()->add_domain(base_value); + new_ct->mutable_linear()->add_vars(var); + new_ct->mutable_linear()->add_coeffs(1); + + // Add it to the distance constraint. + dist_to_lower_bound.push_back({indicator, 0}); + candidates_with_score.push_back({var, 0.0}); + } + + // Clear other_variables so that they are not added at the end. + other_variables.clear(); + } + + // Constrain the distance to the bounds. + const int size = dist_to_upper_bound.size() + dist_to_lower_bound.size(); + const int target_size = static_cast(std::ceil(data.difficulty * size)); + DCHECK_LE(target_size, candidates_with_score.size()); + *local_cp_model.add_constraints() = DistanceToBoundsSmallerThanConstraint( + dist_to_lower_bound, dist_to_upper_bound, target_size); - // Create and solve local branching LP. - CpModelProto local_branching_model = helper_.UpdatedModelProtoCopy(); - *local_branching_model.add_constraints() = LocalBranchingConstraint( - binary_var_indices, binary_var_initial_solution, target_size); Model model("lb_relax_lns_lp"); auto* const params = model.GetOrCreate(); + // Parameters to enable solving the LP only. params->set_num_workers(1); params->set_linearization_level(2); params->set_stop_after_root_propagation(true); params->set_add_lp_constraints_lazily(false); + // Parameters to attempt to speed up solve. params->set_cp_model_presolve(false); params->set_cp_model_probing_level(0); + // Parameters to limit time spent in the solve. The max number of iterations // is relaxed from the default since we rely more on deterministic time. params->set_root_lp_iterations(100000); + + // TODO(user): This is a lot longer than a normal LNS, so it might cause + // issue with the current round-robbin selection based on number of calls. params->set_max_deterministic_time(10); + model.GetOrCreate()->ResetLimitFromParameters(*params); if (global_time_limit_ != nullptr) { global_time_limit_->UpdateLocalLimit(model.GetOrCreate()); } - solve_callback_(local_branching_model, &model); - // Skip LNS if no (full) feasible solution was found for the LP. - const auto lp_constraints = - model.GetOrCreate(); - for (const LinearProgrammingConstraint* lp_constraint : *lp_constraints) { - if (!lp_constraint->HasSolution()) { + // Tricky: we want the inner_objective_lower_bound in the response to be in + // term of the current problem, not the user facing one. + if (local_cp_model.has_objective()) { + local_cp_model.mutable_objective()->set_integer_before_offset(0); + local_cp_model.mutable_objective()->set_integer_after_offset(0); + local_cp_model.mutable_objective()->set_integer_scaling_factor(0); + } + + // Solve. + // + // TODO(user): Shall we pass the objective upper bound so we have more + // chance to fix variable via reduced cost fixing. + // + // TODO(user): Does the current solution can provide a warm-start for the + // LP? + auto* response_manager = model.GetOrCreate(); + response_manager->InitializeObjective(local_cp_model); + LoadCpModel(local_cp_model, &model); + SolveLoadedCpModel(local_cp_model, &model); + + // Update dtime. + data.deterministic_time += + model.GetOrCreate()->GetElapsedDeterministicTime(); + + // Analyze the status of this first "solve". + // + // TODO(user): If we run into this case, it also means that every other LNS + // that tries to more variable than here will never be able to improve. + if (local_cp_model.has_objective()) { + const CpSolverResponse response = response_manager->GetResponse(); + if (response.status() == CpSolverStatus::INFEASIBLE) { + data.status = CpSolverStatus::INFEASIBLE; + AddSolveData(data); + return helper_.NoNeighborhood(); + } + + const int64_t inner_lb = response.inner_objective_lower_bound(); + const int64_t current_inner_obj = ComputeInnerObjective( + local_cp_model.objective(), initial_solution.solution()); + if (inner_lb >= current_inner_obj) { + // In this case, we cannot improve on the base solution. + // We could try to find a different solution for diversity, but we do have + // other neighborhood for that. Lets abort early. + data.status = CpSolverStatus::OPTIMAL; // We cannot improve. + AddSolveData(data); return helper_.NoNeighborhood(); } } @@ -1817,28 +1933,52 @@ Neighborhood LocalBranchingLpBasedNeighborhoodGenerator::Generate( // random noise for tie breaking. const auto var_mapping = model.GetOrCreate(); const auto lp_solution = model.GetOrCreate(); - std::vector differences; - for (int i = 0; i < binary_var_indices.size(); ++i) { - double difference = - std::abs(lp_solution->at(var_mapping->Integer(binary_var_indices[i])) - - binary_var_initial_solution[i]); - differences.push_back(difference + - absl::Uniform(random, 0.0, 1e-6)); + if (lp_solution->empty()) { + // We likely didn't solve the LP at all, so lets not use this neighborhood. + return helper_.NoNeighborhood(); + } + for (auto& [var, score] : candidates_with_score) { + const IntegerVariable integer = var_mapping->Integer(var); + DCHECK_LT(integer, lp_solution->size()); + DCHECK_LT(var, initial_solution.solution().size()); + const double difference = + std::abs(lp_solution->at(var_mapping->Integer(var)) - + initial_solution.solution(var)); + score = difference + absl::Uniform(random, 0.0, 1e-6); } // Take the target_size variables with largest differences. - std::vector vars_to_relax(binary_var_indices.size()); - absl::c_iota(vars_to_relax, 0); - absl::c_sort(vars_to_relax, [&differences](const int i, const int j) { - return differences[i] > differences[j]; + absl::c_sort(candidates_with_score, [](const std::pair& a, + const std::pair& b) { + return a.second > b.second; }); - vars_to_relax.resize(target_size); - // For now, we include all non-binary variables in the relaxation, since their - // values are likely tied to the binary values. - vars_to_relax.insert(vars_to_relax.end(), non_binary_var_indices.begin(), - non_binary_var_indices.end()); - return helper_.RelaxGivenVariables(initial_solution, vars_to_relax); + std::vector vars_to_relax; + vars_to_relax.reserve(target_size); + DCHECK_LE(target_size, candidates_with_score.size()); + for (int i = 0; i < target_size; ++i) { + vars_to_relax.push_back(candidates_with_score[i].first); + } + + // We will also relax all "other variables". We assume their values are likely + // tied to the other ones. + vars_to_relax.insert(vars_to_relax.end(), other_variables.begin(), + other_variables.end()); + Neighborhood result = + helper_.RelaxGivenVariables(initial_solution, vars_to_relax); + + // Lets the name reflect the type. + // + // TODO(user): Unfortunately like this we have a common difficulty for all + // variant, we should probably fix that. + result.source_info = "lb_relax_lns"; + absl::StrAppend(&result.source_info, + some_non_binary_at_bound ? "_int" : "_bool"); + if (use_hamming_for_others) { + absl::StrAppend(&result.source_info, "_h"); + } + + return result; } namespace { @@ -2007,22 +2147,22 @@ Neighborhood GenerateSchedulingNeighborhoodFromRelaxedIntervals( } Neighborhood RandomIntervalSchedulingNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { std::vector intervals_to_relax = helper_.GetActiveIntervals(initial_solution); - GetRandomSubset(difficulty, &intervals_to_relax, random); + GetRandomSubset(data.difficulty, &intervals_to_relax, random); return GenerateSchedulingNeighborhoodFromRelaxedIntervals( intervals_to_relax, {}, initial_solution, random, helper_); } Neighborhood RandomPrecedenceSchedulingNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { std::vector> precedences = helper_.GetSchedulingPrecedences({}, initial_solution, random); - GetRandomSubset(1.0 - difficulty, &precedences, random); + GetRandomSubset(1.0 - data.difficulty, &precedences, random); return GenerateSchedulingNeighborhoodFromIntervalPrecedences( precedences, initial_solution, helper_); } @@ -2041,7 +2181,7 @@ void AppendVarsFromAllIntervalIndices(absl::Span indices, } // namespace Neighborhood SchedulingTimeWindowNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { const std::vector active_intervals = helper_.GetActiveIntervals(initial_solution); @@ -2049,7 +2189,7 @@ Neighborhood SchedulingTimeWindowNeighborhoodGenerator::Generate( if (active_intervals.empty()) return helper_.FullNeighborhood(); const TimePartition partition = PartitionIndicesAroundRandomTimeWindow( - active_intervals, helper_.ModelProto(), initial_solution, difficulty, + active_intervals, helper_.ModelProto(), initial_solution, data.difficulty, random); std::vector intervals_to_relax; intervals_to_relax.reserve(partition.selected_indices.size()); @@ -2074,7 +2214,7 @@ Neighborhood SchedulingTimeWindowNeighborhoodGenerator::Generate( } Neighborhood SchedulingResourceWindowsNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { std::vector intervals_to_relax; std::vector variables_to_fix; @@ -2082,8 +2222,8 @@ Neighborhood SchedulingResourceWindowsNeighborhoodGenerator::Generate( for (const std::vector& intervals : intervals_in_constraints_) { active_intervals = helper_.KeepActiveIntervals(intervals, initial_solution); const TimePartition partition = PartitionIndicesAroundRandomTimeWindow( - active_intervals, helper_.ModelProto(), initial_solution, difficulty, - random); + active_intervals, helper_.ModelProto(), initial_solution, + data.difficulty, random); intervals_to_relax.insert(intervals_to_relax.end(), partition.selected_indices.begin(), partition.selected_indices.end()); @@ -2109,11 +2249,11 @@ Neighborhood SchedulingResourceWindowsNeighborhoodGenerator::Generate( } Neighborhood RandomRectanglesPackingNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { std::vector> rectangles_to_freeze = helper_.GetActiveRectangles(initial_solution); - GetRandomSubset(1.0 - difficulty, &rectangles_to_freeze, random); + GetRandomSubset(1.0 - data.difficulty, &rectangles_to_freeze, random); absl::flat_hash_set variables_to_freeze; for (const auto& [x, y] : rectangles_to_freeze) { @@ -2125,11 +2265,11 @@ Neighborhood RandomRectanglesPackingNeighborhoodGenerator::Generate( } Neighborhood RandomPrecedencesPackingNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { std::vector> rectangles_to_relax = helper_.GetActiveRectangles(initial_solution); - GetRandomSubset(difficulty, &rectangles_to_relax, random); + GetRandomSubset(data.difficulty, &rectangles_to_relax, random); std::vector intervals_to_relax; for (const auto& [x, y] : rectangles_to_relax) { intervals_to_relax.push_back(x); @@ -2142,7 +2282,7 @@ Neighborhood RandomPrecedencesPackingNeighborhoodGenerator::Generate( } Neighborhood SlicePackingNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { const std::vector> active_rectangles = helper_.GetActiveRectangles(initial_solution); @@ -2154,8 +2294,8 @@ Neighborhood SlicePackingNeighborhoodGenerator::Generate( } const TimePartition partition = PartitionIndicesAroundRandomTimeWindow( - projected_intervals, helper_.ModelProto(), initial_solution, difficulty, - random); + projected_intervals, helper_.ModelProto(), initial_solution, + data.difficulty, random); std::vector indices_to_fix(active_rectangles.size(), true); for (const int index : partition.selected_indices) { indices_to_fix[index] = false; @@ -2177,7 +2317,7 @@ Neighborhood SlicePackingNeighborhoodGenerator::Generate( } Neighborhood RoutingRandomNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { const std::vector> all_paths = helper_.GetRoutingPaths(initial_solution); @@ -2190,13 +2330,13 @@ Neighborhood RoutingRandomNeighborhoodGenerator::Generate( std::vector fixed_variables(all_path_variables.begin(), all_path_variables.end()); std::sort(fixed_variables.begin(), fixed_variables.end()); - GetRandomSubset(1.0 - difficulty, &fixed_variables, random); + GetRandomSubset(1.0 - data.difficulty, &fixed_variables, random); return helper_.FixGivenVariables( initial_solution, {fixed_variables.begin(), fixed_variables.end()}); } Neighborhood RoutingPathNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { std::vector> all_paths = helper_.GetRoutingPaths(initial_solution); @@ -2209,7 +2349,7 @@ Neighborhood RoutingPathNeighborhoodGenerator::Generate( // Select variables to relax. const int num_variables_to_relax = - static_cast(all_path_variables.size() * difficulty); + static_cast(all_path_variables.size() * data.difficulty); absl::flat_hash_set relaxed_variables; while (relaxed_variables.size() < num_variables_to_relax) { DCHECK(!all_paths.empty()); @@ -2242,7 +2382,7 @@ Neighborhood RoutingPathNeighborhoodGenerator::Generate( } Neighborhood RoutingFullPathNeighborhoodGenerator::Generate( - const CpSolverResponse& initial_solution, double difficulty, + const CpSolverResponse& initial_solution, SolveData& data, absl::BitGenRef random) { std::vector> all_paths = helper_.GetRoutingPaths(initial_solution); @@ -2259,7 +2399,7 @@ Neighborhood RoutingFullPathNeighborhoodGenerator::Generate( // Select variables to relax. const int num_variables_to_relax = - static_cast(all_path_variables.size() * difficulty); + static_cast(all_path_variables.size() * data.difficulty); absl::flat_hash_set relaxed_variables; // Relax the start and end of each path to ease relocation. @@ -2313,14 +2453,14 @@ bool RelaxationInducedNeighborhoodGenerator::ReadyToGenerate() const { } Neighborhood RelaxationInducedNeighborhoodGenerator::Generate( - const CpSolverResponse& /*initial_solution*/, double difficulty, + const CpSolverResponse& /*initial_solution*/, SolveData& data, absl::BitGenRef random) { Neighborhood neighborhood = helper_.FullNeighborhood(); neighborhood.is_generated = false; const ReducedDomainNeighborhood reduced_domains = GetRinsRensNeighborhood(response_manager_, lp_solutions_, - incomplete_solutions_, difficulty, random); + incomplete_solutions_, data.difficulty, random); if (reduced_domains.fixed_vars.empty() && reduced_domains.reduced_domain_vars.empty()) { diff --git a/ortools/sat/cp_model_lns.h b/ortools/sat/cp_model_lns.h index 31bd7ea0e39..dd92ff04d33 100644 --- a/ortools/sat/cp_model_lns.h +++ b/ortools/sat/cp_model_lns.h @@ -356,36 +356,6 @@ class NeighborhoodGenerator { : name_(name), helper_(*helper), difficulty_(0.5) {} virtual ~NeighborhoodGenerator() = default; - // Generates a "local" subproblem for the given seed. - // - // The difficulty will be in [0, 1] and is related to the asked neighborhood - // size (and thus local problem difficulty). A difficulty of 0.0 means empty - // neighborhood and a difficulty of 1.0 means the full problem. The algorithm - // should try to generate a neighborhood according to this difficulty which - // will be dynamically adjusted depending on whether or not we can solve the - // subproblem in a given time limit. - // - // The given initial_solution should contain a feasible solution to the - // initial CpModelProto given to this class. Any solution to the returned - // CPModelProto should also be valid solution to the same initial model. - // - // This function should be thread-safe. - virtual Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) = 0; - - // Returns true if the neighborhood generator can generate a neighborhood. - virtual bool ReadyToGenerate() const; - - // Uses UCB1 algorithm to compute the score (Multi armed bandit problem). - // Details are at - // https://lilianweng.github.io/lil-log/2018/01/23/the-multi-armed-bandit-problem-and-its-solutions.html. - // 'total_num_calls' should be the sum of calls across all generators part of - // the multi armed bandit problem. - // If the generator is called less than 10 times then the method returns - // infinity as score in order to get more data about the generator - // performance. - double GetUCBScore(int64_t total_num_calls) const; - // Adds solve data about one "solved" neighborhood. struct SolveData { // The status of the sub-solve. @@ -423,6 +393,37 @@ class NeighborhoodGenerator { o.base_objective, o.new_objective); } }; + + // Generates a "local" subproblem for the given seed. + // + // The data,difficulty will be in [0, 1] and is related to the asked + // neighborhood size (and thus local problem difficulty). A difficulty of 0.0 + // means empty neighborhood and a difficulty of 1.0 means the full problem. + // The algorithm should try to generate a neighborhood according to this + // difficulty which will be dynamically adjusted depending on whether or not + // we can solve the subproblem in a given time limit. + // + // The given initial_solution should contain a feasible solution to the + // initial CpModelProto given to this class. Any solution to the returned + // CPModelProto should also be valid solution to the same initial model. + // + // This function should be thread-safe. + virtual Neighborhood Generate(const CpSolverResponse& initial_solution, + SolveData& data, absl::BitGenRef random) = 0; + + // Returns true if the neighborhood generator can generate a neighborhood. + virtual bool ReadyToGenerate() const; + + // Uses UCB1 algorithm to compute the score (Multi armed bandit problem). + // Details are at + // https://lilianweng.github.io/lil-log/2018/01/23/the-multi-armed-bandit-problem-and-its-solutions.html. + // 'total_num_calls' should be the sum of calls across all generators part of + // the multi armed bandit problem. + // If the generator is called less than 10 times then the method returns + // infinity as score in order to get more data about the generator + // performance. + double GetUCBScore(int64_t total_num_calls) const; + void AddSolveData(SolveData data) { absl::MutexLock mutex_lock(&generator_mutex_); solve_data_.push_back(data); @@ -478,6 +479,7 @@ class NeighborhoodGenerator { const std::string name_; const NeighborhoodGeneratorHelper& helper_; mutable absl::Mutex generator_mutex_; + double deterministic_limit_ = 0.1; private: std::vector solve_data_; @@ -485,7 +487,6 @@ class NeighborhoodGenerator { // Current parameters to be used when generating/solving a neighborhood with // this generator. Only updated on Synchronize(). AdaptiveParameterValue difficulty_; - double deterministic_limit_ = 0.1; // Current statistics of the last solved neighborhood. // Only updated on Synchronize(). @@ -507,7 +508,7 @@ class RelaxRandomVariablesGenerator : public NeighborhoodGenerator { NeighborhoodGeneratorHelper const* helper, absl::string_view name) : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // Pick a random subset of constraints and relax all the variables of these @@ -522,7 +523,7 @@ class RelaxRandomConstraintsGenerator : public NeighborhoodGenerator { NeighborhoodGeneratorHelper const* helper, absl::string_view name) : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // Pick a random subset of variables that are constructed by a BFS in the @@ -538,7 +539,7 @@ class VariableGraphNeighborhoodGenerator : public NeighborhoodGenerator { NeighborhoodGeneratorHelper const* helper, absl::string_view name) : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // This randomly extend a working set of variable by one variable directly @@ -549,7 +550,7 @@ class ArcGraphNeighborhoodGenerator : public NeighborhoodGenerator { NeighborhoodGeneratorHelper const* helper, absl::string_view name) : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // Pick a random subset of constraint and relax all of their variables. We are a @@ -562,7 +563,7 @@ class ConstraintGraphNeighborhoodGenerator : public NeighborhoodGenerator { NeighborhoodGeneratorHelper const* helper, absl::string_view name) : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // The idea here is to try to generate a random neighborhood incrementally in @@ -582,7 +583,7 @@ class DecompositionGraphNeighborhoodGenerator : public NeighborhoodGenerator { NeighborhoodGeneratorHelper const* helper, absl::string_view name) : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // Solves a local branching LP and greedily picks a set of variables with the @@ -594,20 +595,20 @@ class DecompositionGraphNeighborhoodGenerator : public NeighborhoodGenerator { class LocalBranchingLpBasedNeighborhoodGenerator : public NeighborhoodGenerator { public: - // TODO(user): Restructure code so that we avoid circular dependency with - // solving functions. For now, we use solve_callback. - explicit LocalBranchingLpBasedNeighborhoodGenerator( + LocalBranchingLpBasedNeighborhoodGenerator( NeighborhoodGeneratorHelper const* helper, absl::string_view name, - std::function solve_callback, ModelSharedTimeLimit* const global_time_limit) : NeighborhoodGenerator(name, helper), - solve_callback_(std::move(solve_callback)), - global_time_limit_(global_time_limit) {} + global_time_limit_(global_time_limit) { + // Given that we spend time generating a good neighborhood it sounds + // reasonable to spend a bit more time solving it too. + deterministic_limit_ = 0.5; + } + Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; private: - const std::function solve_callback_; ModelSharedTimeLimit* const global_time_limit_; }; @@ -640,7 +641,7 @@ class RandomIntervalSchedulingNeighborhoodGenerator : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // Only make sense for scheduling problem. This select a random set of @@ -656,7 +657,7 @@ class RandomPrecedenceSchedulingNeighborhoodGenerator : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // Similar to SchedulingNeighborhoodGenerator except the set of intervals that @@ -668,7 +669,7 @@ class SchedulingTimeWindowNeighborhoodGenerator : public NeighborhoodGenerator { : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // Similar to SchedulingTimeWindowNeighborhoodGenerator except that it relaxes @@ -685,7 +686,7 @@ class SchedulingResourceWindowsNeighborhoodGenerator intervals_in_constraints_(intervals_in_constraints) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; private: const std::vector> intervals_in_constraints_; @@ -702,7 +703,7 @@ class RandomRectanglesPackingNeighborhoodGenerator : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // Only make sense for problems with no_overlap_2d constraints. This select a @@ -717,7 +718,7 @@ class RandomPrecedencesPackingNeighborhoodGenerator : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // Only make sense for problems with no_overlap_2d constraints. This select a @@ -730,7 +731,7 @@ class SlicePackingNeighborhoodGenerator : public NeighborhoodGenerator { : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // This routing based LNS generator will relax random arcs in all the paths of @@ -742,7 +743,7 @@ class RoutingRandomNeighborhoodGenerator : public NeighborhoodGenerator { : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // This routing based LNS generator will relax small sequences of arcs randomly @@ -754,7 +755,7 @@ class RoutingPathNeighborhoodGenerator : public NeighborhoodGenerator { : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // This routing based LNS generator aims are relaxing one full path, and make @@ -771,7 +772,7 @@ class RoutingFullPathNeighborhoodGenerator : public NeighborhoodGenerator { : NeighborhoodGenerator(name, helper) {} Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; }; // Generates a neighborhood by fixing the variables to solutions reported in @@ -806,7 +807,7 @@ class RelaxationInducedNeighborhoodGenerator : public NeighborhoodGenerator { // Both initial solution and difficulty values are ignored. Neighborhood Generate(const CpSolverResponse& initial_solution, - double difficulty, absl::BitGenRef random) final; + SolveData& data, absl::BitGenRef random) final; // Returns true if the required solutions are available. bool ReadyToGenerate() const override; diff --git a/ortools/sat/cp_model_presolve.cc b/ortools/sat/cp_model_presolve.cc index fe6309fd485..754fcc16713 100644 --- a/ortools/sat/cp_model_presolve.cc +++ b/ortools/sat/cp_model_presolve.cc @@ -12625,6 +12625,34 @@ void CpModelPresolver::InitializeMappingModelVariables() { context_->working_model->variables()); } +void CpModelPresolver::ExpandCpModelAndCanonicalizeConstraints() { + const int num_constraints_before_expansion = + context_->working_model->constraints_size(); + ExpandCpModel(context_); + if (context_->ModelIsUnsat()) return; + + // TODO(user): Make sure we can't have duplicate in these constraint. + // These are due to ExpandCpModel() were we create such constraint with + // duplicate. The problem is that some code assumes these are presolved + // before being called. + const int num_constraints = context_->working_model->constraints().size(); + for (int c = num_constraints_before_expansion; c < num_constraints; ++c) { + ConstraintProto* ct = context_->working_model->mutable_constraints(c); + const auto type = ct->constraint_case(); + if (type == ConstraintProto::kAtMostOne || + type == ConstraintProto::kExactlyOne) { + if (PresolveOneConstraint(c)) { + context_->UpdateConstraintVariableUsage(c); + } + if (context_->ModelIsUnsat()) return; + } else if (type == ConstraintProto::kLinear) { + if (CanonicalizeLinear(ct)) { + context_->UpdateConstraintVariableUsage(c); + } + } + } +} + // The presolve works as follow: // // First stage: @@ -12692,7 +12720,7 @@ CpSolverStatus CpModelPresolver::Presolve() { // If presolve is false, just run expansion. if (!context_->params().cp_model_presolve()) { - ExpandCpModel(context_); + ExpandCpModelAndCanonicalizeConstraints(); if (context_->ModelIsUnsat()) return InfeasibleStatus(); // We still write back the canonical objective has we don't deal well @@ -12746,26 +12774,8 @@ CpSolverStatus CpModelPresolver::Presolve() { // Call expansion. if (!context_->ModelIsExpanded()) { ExtractEncodingFromLinear(); - ExpandCpModel(context_); + ExpandCpModelAndCanonicalizeConstraints(); if (context_->ModelIsUnsat()) return InfeasibleStatus(); - - // TODO(user): Make sure we can't have duplicate in these constraint. - // These are due to ExpandCpModel() were we create such constraint with - // duplicate. The problem is that some code assumes these are presolved - // before being called. - const int num_constraints = context_->working_model->constraints().size(); - for (int c = 0; c < num_constraints; ++c) { - ConstraintProto* ct = context_->working_model->mutable_constraints(c); - const auto type = ct->constraint_case(); - if (type == ConstraintProto::kAtMostOne || - type == ConstraintProto::kExactlyOne) { - if (PresolveOneConstraint(c)) { - context_->UpdateConstraintVariableUsage(c); - } - if (context_->ModelIsUnsat()) return InfeasibleStatus(); - } - } - // We need to re-evaluate the degree because some presolve rule only // run after expansion. const int num_vars = context_->working_model->variables().size(); @@ -12805,7 +12815,7 @@ CpSolverStatus CpModelPresolver::Presolve() { } } - // Extract redundant at most one constraint form the linear ones. + // Extract redundant at most one constraint from the linear ones. // // TODO(user): more generally if we do some probing, the same relation will // be detected (and more). Also add an option to turn this off? diff --git a/ortools/sat/cp_model_presolve.h b/ortools/sat/cp_model_presolve.h index 000b8641533..7c9dff19cc4 100644 --- a/ortools/sat/cp_model_presolve.h +++ b/ortools/sat/cp_model_presolve.h @@ -106,6 +106,9 @@ class CpModelPresolver { // Runs the probing. void Probe(); + // Runs the expansion and fix constraints that became non-canonical. + void ExpandCpModelAndCanonicalizeConstraints(); + // Presolve functions. // // They should return false only if the constraint <-> variable graph didn't diff --git a/ortools/sat/cp_model_solver.cc b/ortools/sat/cp_model_solver.cc index 6a8c52ad46d..03df0ebc0d1 100644 --- a/ortools/sat/cp_model_solver.cc +++ b/ortools/sat/cp_model_solver.cc @@ -1074,7 +1074,13 @@ class LnsSolver : public SubSolver { ~LnsSolver() override { shared_->stat_tables.AddTimingStat(*this); - shared_->stat_tables.AddLnsStat(name(), *generator_); + shared_->stat_tables.AddLnsStat( + name(), + /*num_fully_solved_calls=*/generator_->num_fully_solved_calls(), + /*num_calls=*/generator_->num_calls(), + /*num_improving_calls=*/generator_->num_improving_calls(), + /*difficulty=*/generator_->difficulty(), + /*deterministic_limit=*/generator_->deterministic_limit()); } bool TaskIsAvailable() override { @@ -1130,7 +1136,7 @@ class LnsSolver : public SubSolver { } Neighborhood neighborhood = - generator_->Generate(base_response, data.difficulty, random); + generator_->Generate(base_response, data, random); if (!neighborhood.is_generated) return; @@ -1309,7 +1315,8 @@ class LnsSolver : public SubSolver { solution_values.end()); } - data.deterministic_time = local_time_limit->GetElapsedDeterministicTime(); + data.deterministic_time += + local_time_limit->GetElapsedDeterministicTime(); bool new_solution = false; bool display_lns_info = VLOG_IS_ON(2); @@ -1607,14 +1614,7 @@ void SolveCpModelParallel(SharedClasses* shared, Model* global_model) { if (params.use_lb_relax_lns() && name_filter.Keep("lb_relax_lns")) { reentrant_interleaved_subsolvers.push_back(std::make_unique( std::make_unique( - helper, name_filter.LastName(), - [](const CpModelProto cp_model, Model* model) { - model->GetOrCreate() - ->InitializeObjective(cp_model); - LoadCpModel(cp_model, model); - SolveLoadedCpModel(cp_model, model); - }, - shared->time_limit), + helper, name_filter.LastName(), shared->time_limit), lns_params, helper, shared)); } @@ -2553,7 +2553,8 @@ CpSolverResponse SolveCpModel(const CpModelProto& model_proto, Model* model) { // We ignore the multithreading parameter in this case. #else // __PORTABLE_PLATFORM__ if (params.num_workers() > 1 || params.interleave_search() || - !params.subsolvers().empty() || params.use_ls_only()) { + !params.subsolvers().empty() || !params.filter_subsolvers().empty() || + params.use_ls_only()) { SolveCpModelParallel(&shared, model); #endif // __PORTABLE_PLATFORM__ } else { diff --git a/ortools/sat/cp_model_solver_helpers.cc b/ortools/sat/cp_model_solver_helpers.cc index c8f60ffddd8..9e76433e666 100644 --- a/ortools/sat/cp_model_solver_helpers.cc +++ b/ortools/sat/cp_model_solver_helpers.cc @@ -660,7 +660,6 @@ void RegisterVariableBoundsLevelZeroImport( std::vector new_upper_bounds; shared_bounds_manager->GetChangedBounds( id, &model_variables, &new_lower_bounds, &new_upper_bounds); - bool new_bounds_have_been_imported = false; for (int i = 0; i < model_variables.size(); ++i) { const int model_var = model_variables[i]; @@ -675,7 +674,6 @@ void RegisterVariableBoundsLevelZeroImport( sat_solver->NotifyThatModelIsUnsat(); return false; } - new_bounds_have_been_imported = true; trail->EnqueueWithUnitReason(lit); continue; } @@ -691,7 +689,6 @@ void RegisterVariableBoundsLevelZeroImport( const bool changed_ub = new_ub < old_ub; if (!changed_lb && !changed_ub) continue; - new_bounds_have_been_imported = true; if (VLOG_IS_ON(3)) { const IntegerVariableProto& var_proto = model_proto.variables(model_var); @@ -715,9 +712,9 @@ void RegisterVariableBoundsLevelZeroImport( return false; } } - if (new_bounds_have_been_imported && !sat_solver->FinishPropagation()) { - return false; - } + + // Note that we will propagate if they are new bounds separately. + // See BeforeTakingDecision(). return true; }; model->GetOrCreate()->callbacks.push_back( @@ -764,7 +761,7 @@ void RegisterObjectiveBoundsImport( const auto import_objective_bounds = [name, solver, integer_trail, objective, shared_response_manager]() { if (solver->AssumptionLevel() != 0) return true; - bool propagate = false; + bool tighter_bounds = false; const IntegerValue external_lb = shared_response_manager->GetInnerObjectiveLowerBound(); @@ -776,7 +773,7 @@ void RegisterObjectiveBoundsImport( {}, {})) { return false; } - propagate = true; + tighter_bounds = true; } const IntegerValue external_ub = @@ -789,18 +786,20 @@ void RegisterObjectiveBoundsImport( {}, {})) { return false; } - propagate = true; + tighter_bounds = true; } - if (!propagate) return true; - - VLOG(3) << "'" << name << "' imports objective bounds: external [" - << objective->ScaleIntegerObjective(external_lb) << ", " - << objective->ScaleIntegerObjective(external_ub) << "], current [" - << objective->ScaleIntegerObjective(current_lb) << ", " - << objective->ScaleIntegerObjective(current_ub) << "]"; + // Note that we will propagate if they are new bounds separately. + // See BeforeTakingDecision(). + if (tighter_bounds) { + VLOG(3) << "'" << name << "' imports objective bounds: external [" + << objective->ScaleIntegerObjective(external_lb) << ", " + << objective->ScaleIntegerObjective(external_ub) << "], current [" + << objective->ScaleIntegerObjective(current_lb) << ", " + << objective->ScaleIntegerObjective(current_ub) << "]"; + } - return solver->FinishPropagation(); + return true; }; model->GetOrCreate()->callbacks.push_back( diff --git a/ortools/sat/diffn_util.cc b/ortools/sat/diffn_util.cc index 0cb6fbb67c0..ade9c12322d 100644 --- a/ortools/sat/diffn_util.cc +++ b/ortools/sat/diffn_util.cc @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -1539,7 +1540,8 @@ FindRectanglesResult FindRectanglesWithEnergyConflictMC( } std::string RenderDot(std::optional bb, - absl::Span solution) { + absl::Span solution, + std::string_view extra_dot_payload) { const std::vector colors = {"red", "green", "blue", "cyan", "yellow", "purple"}; std::stringstream ss; @@ -1559,6 +1561,7 @@ std::string RenderDot(std::optional bb, << "!\" shape=box width=" << 2 * solution[i].SizeX() << " height=" << 2 * solution[i].SizeY() << "]\n"; } + ss << extra_dot_payload; ss << "}\n"; return ss.str(); } diff --git a/ortools/sat/diffn_util.h b/ortools/sat/diffn_util.h index 0c3eac4c626..5b93c82621d 100644 --- a/ortools/sat/diffn_util.h +++ b/ortools/sat/diffn_util.h @@ -599,7 +599,8 @@ FindRectanglesResult FindRectanglesWithEnergyConflictMC( // Render a packing solution as a Graphviz dot file. Only works in the "neato" // or "fdp" Graphviz backends. std::string RenderDot(std::optional bb, - absl::Span solution); + absl::Span solution, + std::string_view extra_dot_payload = ""); // Given a bounding box and a list of rectangles inside that bounding box, // returns a list of rectangles partitioning the empty area inside the bounding diff --git a/ortools/sat/go/cpmodel/BUILD.bazel b/ortools/sat/go/cpmodel/BUILD.bazel new file mode 100644 index 00000000000..1aac72eae48 --- /dev/null +++ b/ortools/sat/go/cpmodel/BUILD.bazel @@ -0,0 +1,55 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "cpmodel", + srcs = [ + "cp_model.go", + "cp_solver.go", + "cp_solver_c.cc", + "cp_solver_c.h", + "domain.go", + ], + cdeps = [":cp_solver_c"], + cgo = True, + importpath = "github.com/google/or-tools/ortools/sat/go/cpmodel", + visibility = ["//visibility:public"], + deps = [ + "//ortools/sat:cp_model_go_proto", + "//ortools/sat:sat_parameters_go_proto", + "@com_github_golang_glog//:glog", + "@org_golang_google_protobuf//proto", + ], +) + +go_test( + name = "cpmodel_test", + srcs = [ + "cp_model_test.go", + "cp_solver_test.go", + "domain_test.go", + ], + embed = [":cpmodel"], + deps = [ + "//ortools/sat:cp_model_go_proto", + "//ortools/sat:sat_parameters_go_proto", + "@com_github_golang_glog//:glog", + "@com_github_google_go_cmp//cmp", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//testing/protocmp", + ], +) + +cc_library( + name = "cp_solver_c", + srcs = ["cp_solver_c.cc"], + hdrs = ["cp_solver_c.h"], + deps = [ + "//ortools/sat:cp_model_cc_proto", + "//ortools/sat:cp_model_solver", + "//ortools/sat:sat_parameters_cc_proto", + "//ortools/util:time_limit", + "@com_google_absl//absl/log", + "@com_google_absl//absl/status", + "@com_google_absl//absl/strings", + ], +) diff --git a/ortools/sat/go/cp_model.go b/ortools/sat/go/cpmodel/cp_model.go similarity index 76% rename from ortools/sat/go/cp_model.go rename to ortools/sat/go/cpmodel/cp_model.go index fbd42d747b9..1822f6c55fa 100644 --- a/ortools/sat/go/cp_model.go +++ b/ortools/sat/go/cpmodel/cp_model.go @@ -28,9 +28,8 @@ import ( "math" "sort" - "github.com/golang/glog" - - cmpb "ortools/sat/cp_model_go_proto" + log "github.com/golang/glog" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" ) // ErrMixedModels holds the error when elements added to a model are different. @@ -39,9 +38,9 @@ var ErrMixedModels = errors.New("elements are not part of the same model") type ( // VarIndex is the index of a variable in the CP model proto, if positive. If this value is // negative, it represents the negation of a Boolean variable in the position (-1*VarIndex-1). - VarIndex int32_t + VarIndex int32 // ConstrIndex is the index of a constraint in the CP model proto. - ConstrIndex int32_t + ConstrIndex int32 ) func (v VarIndex) positiveIndex() VarIndex { @@ -53,21 +52,21 @@ func (v VarIndex) positiveIndex() VarIndex { // LinearArgument provides an interface for BoolVar, IntVar, and LinearExpr. type LinearArgument interface { - addToLinearExpr(e *LinearExpr, c int64_t) + addToLinearExpr(e *LinearExpr, c int64) // asLinearExpressionProto returns the LinearArgument as a LinearExpressionProto. asLinearExpressionProto() *cmpb.LinearExpressionProto - evaluateSolutionValue(r *cmpb.CpSolverResponse) int64_t + evaluateSolutionValue(r *cmpb.CpSolverResponse) int64 } // LinearExpr is a container for a linear expression. type LinearExpr struct { varCoeffs []varCoeff - offset int64_t + offset int64 } type varCoeff struct { ind VarIndex - coeff int64_t + coeff int64 } // NewLinearExpr creates a new empty LinearExpr. @@ -76,7 +75,7 @@ func NewLinearExpr() *LinearExpr { } // NewConstant creates and returns a LinearExpr containing the constant `c`. -func NewConstant(c int64_t) *LinearExpr { +func NewConstant(c int64) *LinearExpr { return &LinearExpr{offset: c} } @@ -87,13 +86,13 @@ func (l *LinearExpr) Add(la LinearArgument) *LinearExpr { } // AddConstant adds the constant to the LinearExpr and returns itself. -func (l *LinearExpr) AddConstant(c int64_t) *LinearExpr { +func (l *LinearExpr) AddConstant(c int64) *LinearExpr { l.offset += c return l } // AddTerm adds the linear argument term with the given coefficient to the LinearExpr and returns itself. -func (l *LinearExpr) AddTerm(la LinearArgument, coeff int64_t) *LinearExpr { +func (l *LinearExpr) AddTerm(la LinearArgument, coeff int64) *LinearExpr { la.addToLinearExpr(l, coeff) return l } @@ -118,7 +117,7 @@ func (l *LinearExpr) AddWeightedSum(las []LinearArgument, coeffs []int64) *Linea return l } -func (l *LinearExpr) addToLinearExpr(e *LinearExpr, c int64_t) { +func (l *LinearExpr) addToLinearExpr(e *LinearExpr, c int64) { for _, vc := range l.varCoeffs { e.varCoeffs = append(e.varCoeffs, varCoeff{ind: vc.ind, coeff: vc.coeff * c}) } @@ -129,15 +128,15 @@ func (l *LinearExpr) asLinearExpressionProto() *cmpb.LinearExpressionProto { linExprProto := &cmpb.LinearExpressionProto{} for _, vc := range l.varCoeffs { - linExprProto.SetVars(append(linExprProto.GetVars(), int32_t(vc.ind))) - linExprProto.SetCoeffs(append(linExprProto.GetCoeffs(), vc.coeff)) + linExprProto.Vars = append(linExprProto.GetVars(), int32(vc.ind)) + linExprProto.Coeffs = append(linExprProto.GetCoeffs(), vc.coeff) } - linExprProto.SetOffset(l.offset) + linExprProto.Offset = l.offset return linExprProto } -func (l *LinearExpr) evaluateSolutionValue(r *cmpb.CpSolverResponse) int64_t { +func (l *LinearExpr) evaluateSolutionValue(r *cmpb.CpSolverResponse) int64 { result := l.offset for _, vc := range l.varCoeffs { @@ -147,9 +146,9 @@ func (l *LinearExpr) evaluateSolutionValue(r *cmpb.CpSolverResponse) int64_t { return result } -func int64AsLinearExpressionProto(l int64_t) *cmpb.LinearExpressionProto { +func int64AsLinearExpressionProto(l int64) *cmpb.LinearExpressionProto { linExprProto := &cmpb.LinearExpressionProto{} - linExprProto.SetOffset(l) + linExprProto.Offset = l return linExprProto } @@ -178,24 +177,24 @@ func (i IntVar) Index() VarIndex { // WithName sets the name of the variable. func (i IntVar) WithName(s string) IntVar { - i.cpb.cmpb.GetVariables()[i.ind].SetName(s) + i.cpb.cmpb.GetVariables()[i.ind].Name = s return i } -func (i IntVar) addToLinearExpr(e *LinearExpr, c int64_t) { +func (i IntVar) addToLinearExpr(e *LinearExpr, c int64) { e.varCoeffs = append(e.varCoeffs, varCoeff{ind: i.ind, coeff: c}) } func (i IntVar) asLinearExpressionProto() *cmpb.LinearExpressionProto { linExprProto := &cmpb.LinearExpressionProto{} - linExprProto.SetVars([]int32{int32_t(i.ind)}) - linExprProto.SetCoeffs([]int64{1}) + linExprProto.Vars = []int32{int32(i.ind)} + linExprProto.Coeffs = []int64{1} return linExprProto } -func (i IntVar) evaluateSolutionValue(r *cmpb.CpSolverResponse) int64_t { +func (i IntVar) evaluateSolutionValue(r *cmpb.CpSolverResponse) int64 { return r.GetSolution()[i.ind] } @@ -230,11 +229,11 @@ func (b BoolVar) Index() VarIndex { // WithName sets the name of the variable. func (b BoolVar) WithName(s string) BoolVar { - b.cpb.cmpb.GetVariables()[b.ind.positiveIndex()].SetName(s) + b.cpb.cmpb.GetVariables()[b.ind.positiveIndex()].Name = s return b } -func (b BoolVar) addToLinearExpr(e *LinearExpr, c int64_t) { +func (b BoolVar) addToLinearExpr(e *LinearExpr, c int64) { if b.ind < 0 { e.varCoeffs = append(e.varCoeffs, varCoeff{ind: b.ind.positiveIndex(), coeff: -c}) e.offset += c @@ -246,20 +245,20 @@ func (b BoolVar) addToLinearExpr(e *LinearExpr, c int64_t) { func (b BoolVar) asLinearExpressionProto() *cmpb.LinearExpressionProto { linExprProto := &cmpb.LinearExpressionProto{} - linExprProto.SetVars([]int32{int32_t(b.ind.positiveIndex())}) - coeff := int64_t(1) - var offset int64_t + linExprProto.Vars = []int32{int32(b.ind.positiveIndex())} + coeff := int64(1) + var offset int64 if b.ind < 0 { coeff = -1 offset = 1 } - linExprProto.SetCoeffs([]int64{coeff}) - linExprProto.SetOffset(offset) + linExprProto.Coeffs = []int64{coeff} + linExprProto.Offset = offset return linExprProto } -func (b BoolVar) evaluateSolutionValue(r *cmpb.CpSolverResponse) int64_t { +func (b BoolVar) evaluateSolutionValue(r *cmpb.CpSolverResponse) int64 { if b.ind < 0 { return 1 - r.GetSolution()[b.ind.positiveIndex()] } @@ -272,7 +271,7 @@ func asNegatedLinearExpressionProto(la LinearArgument) *cmpb.LinearExpressionPro for i, c := range result.GetCoeffs() { result.GetCoeffs()[i] = -c } - result.SetOffset(-result.GetOffset()) + result.Offset = -result.GetOffset() return result } @@ -297,7 +296,7 @@ func (iv IntervalVar) Index() ConstrIndex { // WithName sets the name of the interval variable. func (iv IntervalVar) WithName(s string) IntervalVar { - iv.cpb.cmpb.GetConstraints()[iv.ind].SetName(s) + iv.cpb.cmpb.GetConstraints()[iv.ind].Name = s return iv } @@ -309,7 +308,7 @@ type Constraint struct { // WithName sets the name of the constraint. func (c Constraint) WithName(s string) Constraint { - c.cpb.cmpb.GetConstraints()[c.ind].SetName(s) + c.cpb.cmpb.GetConstraints()[c.ind].Name = s return c } @@ -328,7 +327,7 @@ func (c Constraint) Index() ConstrIndex { func (c Constraint) OnlyEnforceIf(bvs ...BoolVar) Constraint { cstrpb := c.cpb.cmpb.GetConstraints()[c.ind] for _, bv := range bvs { - cstrpb.SetEnforcementLiteral(append(cstrpb.GetEnforcementLiteral(), int32_t(bv.ind))) + cstrpb.EnforcementLiteral = append(cstrpb.GetEnforcementLiteral(), int32(bv.ind)) } return c } @@ -345,8 +344,8 @@ func (noc NoOverlap2DConstraint) AddRectangle(xInterval, yInterval IntervalVar) return } noOverlapCt := noc.cpb.cmpb.GetConstraints()[noc.ind].GetNoOverlap_2D() - noOverlapCt.SetXIntervals(append(noOverlapCt.GetXIntervals(), int32_t(xInterval.ind))) - noOverlapCt.SetYIntervals(append(noOverlapCt.GetYIntervals(), int32_t(yInterval.ind))) + noOverlapCt.XIntervals = append(noOverlapCt.GetXIntervals(), int32(xInterval.ind)) + noOverlapCt.YIntervals = append(noOverlapCt.GetYIntervals(), int32(yInterval.ind)) } // CircuitConstraint is a reference to a specialized circuit constraint that allows for @@ -357,14 +356,14 @@ type CircuitConstraint struct { // AddArc adds an arc to the circuit constraint. `tail` and `head` are the indices of the tail // and head nodes, respectively, and `literal` is true if the arc is selected. -func (cc *CircuitConstraint) AddArc(tail, head int32_t, literal BoolVar) { +func (cc *CircuitConstraint) AddArc(tail, head int32, literal BoolVar) { if !cc.cpb.checkSameModelAndSetErrorf(literal.cpb, "invalid parameter Boolvar %v added to CircuitConstraint %v", literal.Index(), cc.Index()) { return } cirCt := cc.cpb.cmpb.GetConstraints()[cc.ind].GetCircuit() - cirCt.SetTails(append(cirCt.GetTails(), tail)) - cirCt.SetHeads(append(cirCt.GetHeads(), head)) - cirCt.SetLiterals(append(cirCt.GetLiterals(), int32_t(literal.ind))) + cirCt.Tails = append(cirCt.GetTails(), tail) + cirCt.Heads = append(cirCt.GetHeads(), head) + cirCt.Literals = append(cirCt.GetLiterals(), int32(literal.ind)) } // MultipleCircuitConstraint is a reference to a specialized circuit constraint that allows for @@ -375,14 +374,14 @@ type MultipleCircuitConstraint struct { // AddRoute adds an arc to the circuit constraint. `tail` and `head` and the indices of the tail // and head nodes, respectively, and `literal` is true if the arc is selected. -func (mc *MultipleCircuitConstraint) AddRoute(tail, head int32_t, literal BoolVar) { +func (mc *MultipleCircuitConstraint) AddRoute(tail, head int32, literal BoolVar) { if !mc.cpb.checkSameModelAndSetErrorf(literal.cpb, "invalid parameter boolvar %v added to MultipleCircuitConstraint %v", literal.Index(), mc.Index()) { return } multCirCt := mc.cpb.cmpb.GetConstraints()[mc.ind].GetRoutes() - multCirCt.SetTails(append(multCirCt.GetTails(), tail)) - multCirCt.SetHeads(append(multCirCt.GetHeads(), head)) - multCirCt.SetLiterals(append(multCirCt.GetLiterals(), int32_t(literal.ind))) + multCirCt.Tails = append(multCirCt.GetTails(), tail) + multCirCt.Heads = append(multCirCt.GetHeads(), head) + multCirCt.Literals = append(multCirCt.GetLiterals(), int32(literal.ind)) } // TableConstraint is a reference to a specialized assignment constraint that allows for adding @@ -392,13 +391,13 @@ type TableConstraint struct { } // AddTuple adds a tuple of possible values to the table constraint. -func (tc *TableConstraint) AddTuple(tuple ...int64_t) { +func (tc *TableConstraint) AddTuple(tuple ...int64) { ct := tc.cpb.cmpb.GetConstraints()[tc.ind].GetTable() if len(ct.GetVars()) != len(tuple) { log.Fatalf("length of vars in the proto must be the same length as the input tuple: %v != %v", len(ct.GetVars()), len(tuple)) } - ct.SetValues(append(ct.GetValues(), tuple...)) + ct.Values = append(ct.GetValues(), tuple...) } // ReservoirConstraint is a reference to a specialized reservoir constraint that allows for @@ -412,9 +411,9 @@ type ReservoirConstraint struct { // by `levelChange` at time `time`. func (rc *ReservoirConstraint) AddEvent(time LinearArgument, levelChange LinearArgument) { ct := rc.cpb.cmpb.GetConstraints()[rc.ind].GetReservoir() - ct.SetTimeExprs(append(ct.GetTimeExprs(), time.asLinearExpressionProto())) - ct.SetLevelChanges(append(ct.GetLevelChanges(), levelChange.asLinearExpressionProto())) - ct.SetActiveLiterals(append(ct.GetActiveLiterals(), int32_t(rc.oneIndex))) + ct.TimeExprs = append(ct.GetTimeExprs(), time.asLinearExpressionProto()) + ct.LevelChanges = append(ct.GetLevelChanges(), levelChange.asLinearExpressionProto()) + ct.ActiveLiterals = append(ct.GetActiveLiterals(), int32(rc.oneIndex)) } // AutomatonConstraint is a reference to a specialized automaton constraint that allows for @@ -426,11 +425,11 @@ type AutomatonConstraint struct { // AddTransition adds a transition to the constraint. Both tail and head are states, label // is any variable value. No two outgoing transitions from the same state can have the same // label. -func (ac *AutomatonConstraint) AddTransition(tail, head int64_t, label int64_t) { +func (ac *AutomatonConstraint) AddTransition(tail, head int64, label int64) { ct := ac.cpb.cmpb.GetConstraints()[ac.ind].GetAutomaton() - ct.SetTransitionTail(append(ct.GetTransitionTail(), tail)) - ct.SetTransitionHead(append(ct.GetTransitionHead(), head)) - ct.SetTransitionLabel(append(ct.GetTransitionLabel(), label)) + ct.TransitionTail = append(ct.GetTransitionTail(), tail) + ct.TransitionHead = append(ct.GetTransitionHead(), head) + ct.TransitionLabel = append(ct.GetTransitionLabel(), label) } // CumulativeConstraint is a reference to a specialized cumulative constraint that allows for @@ -445,8 +444,8 @@ func (cc *CumulativeConstraint) AddDemand(interval IntervalVar, demand LinearArg return } ct := cc.cpb.cmpb.GetConstraints()[cc.ind].GetCumulative() - ct.SetIntervals(append(ct.GetIntervals(), int32_t(interval.ind))) - ct.SetDemands(append(ct.GetDemands(), demand.asLinearExpressionProto())) + ct.Intervals = append(ct.GetIntervals(), int32(interval.ind)) + ct.Demands = append(ct.GetDemands(), demand.asLinearExpressionProto()) } // checkSameModelAndSetErrorf returns true if `cp` and `cp2` point to the same Builder. @@ -481,11 +480,11 @@ func NewCpModelBuilder() *Builder { } // NewIntVar creates a new intVar in the CpModel proto. -func (cp *Builder) NewIntVar(lb, ub int64_t) IntVar { +func (cp *Builder) NewIntVar(lb, ub int64) IntVar { intVar := IntVar{cpb: cp, ind: VarIndex(len(cp.cmpb.GetVariables()))} - pVar := cmpb.IntegerVariableProto_builder{Domain: []int64{lb, ub}}.Build() - cp.cmpb.SetVariables(append(cp.cmpb.GetVariables(), pVar)) + pVar := &cmpb.IntegerVariableProto{Domain: []int64{lb, ub}} + cp.cmpb.Variables = append(cp.cmpb.GetVariables(), pVar) return intVar } @@ -494,8 +493,8 @@ func (cp *Builder) NewIntVar(lb, ub int64_t) IntVar { func (cp *Builder) NewIntVarFromDomain(d Domain) IntVar { intVar := IntVar{cpb: cp, ind: VarIndex(len(cp.cmpb.GetVariables()))} - pVar := cmpb.IntegerVariableProto_builder{Domain: d.FlattenedIntervals()}.Build() - cp.cmpb.SetVariables(append(cp.cmpb.GetVariables(), pVar)) + pVar := &cmpb.IntegerVariableProto{Domain: d.FlattenedIntervals()} + cp.cmpb.Variables = append(cp.cmpb.GetVariables(), pVar) return intVar } @@ -504,15 +503,15 @@ func (cp *Builder) NewIntVarFromDomain(d Domain) IntVar { func (cp *Builder) NewBoolVar() BoolVar { boolVar := BoolVar{cpb: cp, ind: VarIndex(len(cp.cmpb.GetVariables()))} - pVar := cmpb.IntegerVariableProto_builder{Domain: []int64{0, 1}}.Build() - cp.cmpb.SetVariables(append(cp.cmpb.GetVariables(), pVar)) + pVar := &cmpb.IntegerVariableProto{Domain: []int64{0, 1}} + cp.cmpb.Variables = append(cp.cmpb.GetVariables(), pVar) return boolVar } // NewConstant creates a constant variable. If this is called multiple times, the same variable will // always be returned. -func (cp *Builder) NewConstant(v int64_t) IntVar { +func (cp *Builder) NewConstant(v int64) IntVar { if i, ok := cp.constants[v]; ok { return IntVar{cpb: cp, ind: i} } @@ -531,8 +530,8 @@ func (cp *Builder) TrueVar() BoolVar { } boolVar := BoolVar{cpb: cp, ind: VarIndex(len(cp.cmpb.GetVariables()))} - pVar := cmpb.IntegerVariableProto_builder{Domain: []int64{1, 1}}.Build() - cp.cmpb.SetVariables(append(cp.cmpb.GetVariables(), pVar)) + pVar := &cmpb.IntegerVariableProto{Domain: []int64{1, 1}} + cp.cmpb.Variables = append(cp.cmpb.GetVariables(), pVar) cp.constants[1] = boolVar.ind @@ -547,8 +546,8 @@ func (cp *Builder) FalseVar() BoolVar { } boolVar := BoolVar{cpb: cp, ind: VarIndex(len(cp.cmpb.GetVariables()))} - pVar := cmpb.IntegerVariableProto_builder{Domain: []int64{0, 0}}.Build() - cp.cmpb.SetVariables(append(cp.cmpb.GetVariables(), pVar)) + pVar := &cmpb.IntegerVariableProto{Domain: []int64{0, 0}} + cp.cmpb.Variables = append(cp.cmpb.GetVariables(), pVar) cp.constants[0] = boolVar.ind @@ -562,7 +561,7 @@ func (cp *Builder) NewIntervalVar(start, size, end LinearArgument) IntervalVar { } // NewFixedSizeIntervalVar creates a new interval variable with the fixed size. -func (cp *Builder) NewFixedSizeIntervalVar(start LinearArgument, size int64_t) IntervalVar { +func (cp *Builder) NewFixedSizeIntervalVar(start LinearArgument, size int64) IntervalVar { return cp.NewOptionalFixedSizeIntervalVar(start, size, cp.TrueVar()) } @@ -573,21 +572,21 @@ func (cp *Builder) NewOptionalIntervalVar(start, size, end LinearArgument, prese cp.AddEquality(NewLinearExpr().Add(start).Add(size), end).OnlyEnforceIf(presence) ind := ConstrIndex(len(cp.cmpb.GetConstraints())) - cp.cmpb.SetConstraints(append(cp.cmpb.GetConstraints(), cmpb.ConstraintProto_builder{ - EnforcementLiteral: []int32{int32_t(presence.ind)}, - Interval: cmpb.IntervalConstraintProto_builder{ + cp.cmpb.Constraints = append(cp.cmpb.GetConstraints(), &cmpb.ConstraintProto{ + EnforcementLiteral: []int32{int32(presence.ind)}, + Constraint: &cmpb.ConstraintProto_Interval{&cmpb.IntervalConstraintProto{ Start: start.asLinearExpressionProto(), Size: size.asLinearExpressionProto(), End: end.asLinearExpressionProto(), - }.Build(), - }.Build())) + }, + }}) return IntervalVar{cpb: cp, ind: ind} } // NewOptionalFixedSizeIntervalVar creates an optional interval variable with the fixed size. It // only enforces that the interval is of the fixed size when the `presence` variable is true. -func (cp *Builder) NewOptionalFixedSizeIntervalVar(start LinearArgument, size int64_t, presence BoolVar) IntervalVar { +func (cp *Builder) NewOptionalFixedSizeIntervalVar(start LinearArgument, size int64, presence BoolVar) IntervalVar { sizeLinExpr := NewConstant(size) end := NewLinearExpr().Add(start).Add(sizeLinExpr) @@ -596,7 +595,7 @@ func (cp *Builder) NewOptionalFixedSizeIntervalVar(start LinearArgument, size in func (cp *Builder) appendConstraint(ct *cmpb.ConstraintProto) Constraint { i := ConstrIndex(len(cp.cmpb.GetConstraints())) - cp.cmpb.SetConstraints(append(cp.cmpb.GetConstraints(), ct)) + cp.cmpb.Constraints = append(cp.cmpb.GetConstraints(), ct) return Constraint{cpb: cp, ind: i} } @@ -605,30 +604,30 @@ func buildBoolArgumentProto(cp *Builder, bvs ...BoolVar) *cmpb.BoolArgumentProto var literals []int32 for _, b := range bvs { cp.checkSameModelAndSetErrorf(b.cpb, "BoolVar %v added to Constraint %v", b.Index(), len(cp.cmpb.GetConstraints())) - literals = append(literals, int32_t(b.ind)) + literals = append(literals, int32(b.ind)) } - return cmpb.BoolArgumentProto_builder{Literals: literals}.Build() + return &cmpb.BoolArgumentProto{Literals: literals} } // AddBoolOr adds the constraint that at least one of the literals must be true. func (cp *Builder) AddBoolOr(bvs ...BoolVar) Constraint { - return cp.appendConstraint(cmpb.ConstraintProto_builder{ - BoolOr: buildBoolArgumentProto(cp, bvs...), - }.Build()) + return cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_BoolOr{buildBoolArgumentProto(cp, bvs...)}, + }) } // AddBoolAnd adds the constraint that all of the literals must be true. func (cp *Builder) AddBoolAnd(bvs ...BoolVar) Constraint { - return cp.appendConstraint(cmpb.ConstraintProto_builder{ - BoolAnd: buildBoolArgumentProto(cp, bvs...), - }.Build()) + return cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_BoolAnd{buildBoolArgumentProto(cp, bvs...)}, + }) } // AddBoolXor adds the constraint that an odd number of the literals must be true. func (cp *Builder) AddBoolXor(bvs ...BoolVar) Constraint { - return cp.appendConstraint(cmpb.ConstraintProto_builder{ - BoolXor: buildBoolArgumentProto(cp, bvs...), - }.Build()) + return cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_BoolXor{buildBoolArgumentProto(cp, bvs...)}, + }) } // AddAtLeastOne adds the constraint that at least one of the literals must be true. @@ -638,16 +637,16 @@ func (cp *Builder) AddAtLeastOne(bvs ...BoolVar) Constraint { // AddAtMostOne adds the constraint that at most one of the literals must be true. func (cp *Builder) AddAtMostOne(bvs ...BoolVar) Constraint { - return cp.appendConstraint(cmpb.ConstraintProto_builder{ - AtMostOne: buildBoolArgumentProto(cp, bvs...), - }.Build()) + return cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_AtMostOne{buildBoolArgumentProto(cp, bvs...)}, + }) } // AddExactlyOne adds the constraint that exactly one of the literals must be true. func (cp *Builder) AddExactlyOne(bvs ...BoolVar) Constraint { - return cp.appendConstraint(cmpb.ConstraintProto_builder{ - ExactlyOne: buildBoolArgumentProto(cp, bvs...), - }.Build()) + return cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_ExactlyOne{buildBoolArgumentProto(cp, bvs...)}, + }) } // AddImplication adds the constraint a => b. @@ -664,7 +663,7 @@ func (cp *Builder) addLinearConstraint(le *LinearExpr, intervals ...ClosedInterv var varCoeffs []int64 var domain []int64 for _, varCoeff := range le.varCoeffs { - varIndices = append(varIndices, int32_t(varCoeff.ind)) + varIndices = append(varIndices, int32(varCoeff.ind)) varCoeffs = append(varCoeffs, varCoeff.coeff) } for _, i := range intervals { @@ -672,11 +671,11 @@ func (cp *Builder) addLinearConstraint(le *LinearExpr, intervals ...ClosedInterv domain = append(domain, iOffset.Start, iOffset.End) } - return cp.appendConstraint(cmpb.ConstraintProto_builder{ - Linear: cmpb.LinearConstraintProto_builder{ + return cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Linear{&cmpb.LinearConstraintProto{ Vars: varIndices, Coeffs: varCoeffs, Domain: domain, - }.Build(), - }.Build()) + }}, + }) } // AddLinearConstraintForDomain adds the linear constraint `expr` in `domain`. @@ -686,7 +685,7 @@ func (cp *Builder) AddLinearConstraintForDomain(expr LinearArgument, domain Doma } // AddLinearConstraint adds the linear constraint `lb <= expr <= ub` -func (cp *Builder) AddLinearConstraint(expr LinearArgument, lb, ub int64_t) Constraint { +func (cp *Builder) AddLinearConstraint(expr LinearArgument, lb, ub int64) Constraint { linExpr := NewLinearExpr().Add(expr) return cp.addLinearConstraint(linExpr, ClosedInterval{lb, ub}) } @@ -740,25 +739,25 @@ func (cp *Builder) AddAllDifferent(la ...LinearArgument) Constraint { exprs = append(exprs, l.asLinearExpressionProto()) } - return cp.appendConstraint(cmpb.ConstraintProto_builder{ - AllDiff: cmpb.AllDifferentConstraintProto_builder{Exprs: exprs}.Build(), - }.Build()) + return cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_AllDiff{&cmpb.AllDifferentConstraintProto{Exprs: exprs}}, + }) } // AddVariableElement adds the variable element constraint: vars[ind] == target. func (cp *Builder) AddVariableElement(ind IntVar, vars []IntVar, target IntVar) Constraint { var varIndices []int32 for _, v := range vars { - varIndices = append(varIndices, int32_t(v.ind)) + varIndices = append(varIndices, int32(v.ind)) } - return cp.appendConstraint(cmpb.ConstraintProto_builder{ - Element: cmpb.ElementConstraintProto_builder{ - Index: int32_t(ind.ind), - Target: int32_t(target.ind), + return cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Element{&cmpb.ElementConstraintProto{ + Index: int32(ind.ind), + Target: int32(target.ind), Vars: varIndices, - }.Build(), - }.Build()) + }}, + }) } // AddElement adds the element constraint: values[ind] == target @@ -781,19 +780,19 @@ func (cp *Builder) AddInverseConstraint(vars []IntVar, inverseVars []IntVar) Con var fDirect []int32 for _, v := range vars { - fDirect = append(fDirect, int32_t(v.ind)) + fDirect = append(fDirect, int32(v.ind)) } var fInverse []int32 for _, v := range inverseVars { - fInverse = append(fInverse, int32_t(v.ind)) + fInverse = append(fInverse, int32(v.ind)) } - return cp.appendConstraint(cmpb.ConstraintProto_builder{ - Inverse: cmpb.InverseConstraintProto_builder{ + return cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Inverse{&cmpb.InverseConstraintProto{ FDirect: fDirect, FInverse: fInverse, - }.Build(), - }.Build()) + }}, + }) } // AddMinEquality adds the constraint: target == min(exprs). @@ -803,12 +802,13 @@ func (cp *Builder) AddMinEquality(target LinearArgument, exprs ...LinearArgument protos = append(protos, asNegatedLinearExpressionProto(e)) } - return cp.appendConstraint(cmpb.ConstraintProto_builder{ - LinMax: cmpb.LinearArgumentProto_builder{ - Target: asNegatedLinearExpressionProto(target), - Exprs: protos, - }.Build(), - }.Build()) + return cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_LinMax{ + &cmpb.LinearArgumentProto{ + Target: asNegatedLinearExpressionProto(target), + Exprs: protos, + }}, + }) } // AddMaxEquality adds the constraint: target == max(expr). @@ -818,12 +818,13 @@ func (cp *Builder) AddMaxEquality(target LinearArgument, exprs ...LinearArgument protos = append(protos, e.asLinearExpressionProto()) } - return cp.appendConstraint(cmpb.ConstraintProto_builder{ - LinMax: cmpb.LinearArgumentProto_builder{ - Target: target.asLinearExpressionProto(), - Exprs: protos, - }.Build(), - }.Build()) + return cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_LinMax{ + &cmpb.LinearArgumentProto{ + Target: target.asLinearExpressionProto(), + Exprs: protos, + }}, + }) } // AddMultiplicationEquality adds the constraint: target == Product(exprs). @@ -833,51 +834,55 @@ func (cp *Builder) AddMultiplicationEquality(target LinearArgument, exprs ...Lin protos = append(protos, e.asLinearExpressionProto()) } - return cp.appendConstraint(cmpb.ConstraintProto_builder{ - IntProd: cmpb.LinearArgumentProto_builder{ - Target: target.asLinearExpressionProto(), - Exprs: protos, - }.Build(), - }.Build()) + return cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_IntProd{ + &cmpb.LinearArgumentProto{ + Target: target.asLinearExpressionProto(), + Exprs: protos, + }}, + }) } // AddDivisionEquality adds the constraint: target == num / denom. func (cp *Builder) AddDivisionEquality(target, num, denom LinearArgument) Constraint { - return cp.appendConstraint(cmpb.ConstraintProto_builder{ - IntDiv: cmpb.LinearArgumentProto_builder{ - Target: target.asLinearExpressionProto(), - Exprs: []*cmpb.LinearExpressionProto{ - num.asLinearExpressionProto(), - denom.asLinearExpressionProto(), - }, - }.Build(), - }.Build()) + return cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_IntDiv{ + &cmpb.LinearArgumentProto{ + Target: target.asLinearExpressionProto(), + Exprs: []*cmpb.LinearExpressionProto{ + num.asLinearExpressionProto(), + denom.asLinearExpressionProto(), + }, + }}, + }) } // AddAbsEquality adds the constraint: target == Abs(expr). func (cp *Builder) AddAbsEquality(target, expr LinearArgument) Constraint { - return cp.appendConstraint(cmpb.ConstraintProto_builder{ - LinMax: cmpb.LinearArgumentProto_builder{ - Target: target.asLinearExpressionProto(), - Exprs: []*cmpb.LinearExpressionProto{ - expr.asLinearExpressionProto(), - asNegatedLinearExpressionProto(expr), - }, - }.Build(), - }.Build()) + return cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_LinMax{ + &cmpb.LinearArgumentProto{ + Target: target.asLinearExpressionProto(), + Exprs: []*cmpb.LinearExpressionProto{ + expr.asLinearExpressionProto(), + asNegatedLinearExpressionProto(expr), + }, + }}, + }) } // AddModuloEquality adds the constraint: target == v % mod. func (cp *Builder) AddModuloEquality(target, v, mod LinearArgument) Constraint { - return cp.appendConstraint(cmpb.ConstraintProto_builder{ - IntMod: cmpb.LinearArgumentProto_builder{ - Target: target.asLinearExpressionProto(), - Exprs: []*cmpb.LinearExpressionProto{ - v.asLinearExpressionProto(), - mod.asLinearExpressionProto(), - }, - }.Build(), - }.Build()) + return cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_IntMod{ + &cmpb.LinearArgumentProto{ + Target: target.asLinearExpressionProto(), + Exprs: []*cmpb.LinearExpressionProto{ + v.asLinearExpressionProto(), + mod.asLinearExpressionProto(), + }, + }}, + }) } // AddNoOverlap adds a constraint that ensures that all present intervals do not overlap in time. @@ -885,37 +890,41 @@ func (cp *Builder) AddNoOverlap(vars ...IntervalVar) Constraint { intervals := make([]int32, len(vars)) for i, v := range vars { cp.checkSameModelAndSetErrorf(v.cpb, "invalid parameter intervalVar %v added to the AddNoOverlap constraint %v", v.Index(), len(cp.cmpb.GetConstraints())) - intervals[i] = int32_t(v.ind) + intervals[i] = int32(v.ind) } - return cp.appendConstraint(cmpb.ConstraintProto_builder{ - NoOverlap: cmpb.NoOverlapConstraintProto_builder{ - Intervals: intervals, - }.Build(), - }.Build()) + return cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_NoOverlap{ + &cmpb.NoOverlapConstraintProto{ + Intervals: intervals, + }}, + }) } // AddNoOverlap2D adds a no_overlap2D constraint that prevents a set of boxes from overlapping. func (cp *Builder) AddNoOverlap2D() NoOverlap2DConstraint { - return NoOverlap2DConstraint{cp.appendConstraint(cmpb.ConstraintProto_builder{ - NoOverlap_2D: &cmpb.NoOverlap2DConstraintProto{}, - }.Build())} + return NoOverlap2DConstraint{cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_NoOverlap_2D{ + &cmpb.NoOverlap2DConstraintProto{}, + }})} } // AddCircuitConstraint adds a circuit constraint to the model. The circuit constraint is // defined on a graph where the arcs are present if the corresponding literals are set to true. func (cp *Builder) AddCircuitConstraint() CircuitConstraint { - return CircuitConstraint{cp.appendConstraint(cmpb.ConstraintProto_builder{ - Circuit: &cmpb.CircuitConstraintProto{}, - }.Build())} + return CircuitConstraint{cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Circuit{ + &cmpb.CircuitConstraintProto{}, + }})} } // AddMultipleCircuitConstraint adds a multiple circuit constraint to the model, aka the "VRP" // (Vehicle Routing Problem) constraint. func (cp *Builder) AddMultipleCircuitConstraint() MultipleCircuitConstraint { - return MultipleCircuitConstraint{cp.appendConstraint(cmpb.ConstraintProto_builder{ - Routes: &cmpb.RoutesConstraintProto{}, - }.Build())} + return MultipleCircuitConstraint{cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Routes{ + &cmpb.RoutesConstraintProto{}, + }})} } // AddAllowedAssignments adds an allowed assignments constraint to the model. When all variables @@ -924,12 +933,13 @@ func (cp *Builder) AddMultipleCircuitConstraint() MultipleCircuitConstraint { func (cp *Builder) AddAllowedAssignments(vars ...IntVar) TableConstraint { var varsInd []int32 for _, v := range vars { - varsInd = append(varsInd, int32_t(v.ind)) + varsInd = append(varsInd, int32(v.ind)) } - return TableConstraint{cp.appendConstraint(cmpb.ConstraintProto_builder{ - Table: cmpb.TableConstraintProto_builder{Vars: varsInd}.Build(), - }.Build())} + return TableConstraint{cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Table{ + &cmpb.TableConstraintProto{Vars: varsInd}, + }})} } // AddReservoirConstraint adds a reservoir constraint with optional refill/emptying events. @@ -949,12 +959,14 @@ func (cp *Builder) AddAllowedAssignments(vars ...IntVar) TableConstraint { // // It returns a ReservoirConstraint that allows adding optional and non // optional events incrementally after construction. -func (cp *Builder) AddReservoirConstraint(min, max int64_t) ReservoirConstraint { +func (cp *Builder) AddReservoirConstraint(min, max int64) ReservoirConstraint { return ReservoirConstraint{ cp.appendConstraint( - cmpb.ConstraintProto_builder{Reservoir: cmpb.ReservoirConstraintProto_builder{ - MinLevel: min, MaxLevel: max, - }.Build()}.Build(), + &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Reservoir{ + &cmpb.ReservoirConstraintProto{ + MinLevel: min, MaxLevel: max, + }}}, ), cp.NewConstant(1).Index()} } @@ -982,30 +994,32 @@ func (cp *Builder) AddReservoirConstraint(min, max int64_t) ReservoirConstraint // // It returns an AutomatonConstraint that allows adding transition // incrementally after construction. -func (cp *Builder) AddAutomaton(transitionVars []IntVar, startState int64_t, finalStates []int64) AutomatonConstraint { +func (cp *Builder) AddAutomaton(transitionVars []IntVar, startState int64, finalStates []int64) AutomatonConstraint { var transitions []int32 for _, v := range transitionVars { cp.checkSameModelAndSetErrorf(v.cpb, "invalid parameter intVar %v added to the AutomatonConstraint %v", v.Index(), len(cp.cmpb.GetConstraints())) - transitions = append(transitions, int32_t(v.Index())) + transitions = append(transitions, int32(v.Index())) } - return AutomatonConstraint{cp.appendConstraint(cmpb.ConstraintProto_builder{ - Automaton: cmpb.AutomatonConstraintProto_builder{ - Vars: transitions, - StartingState: startState, - FinalStates: finalStates, - }.Build(), - }.Build())} + return AutomatonConstraint{cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Automaton{ + &cmpb.AutomatonConstraintProto{ + Vars: transitions, + StartingState: startState, + FinalStates: finalStates, + }}, + })} } // AddCumulative adds a cumulative constraint to the model that ensures that for any integer // point, the sum of the demands of the intervals containging that point does not exceed the // capacity. func (cp *Builder) AddCumulative(capacity LinearArgument) CumulativeConstraint { - return CumulativeConstraint{cp.appendConstraint(cmpb.ConstraintProto_builder{ - Cumulative: cmpb.CumulativeConstraintProto_builder{ - Capacity: capacity.asLinearExpressionProto(), - }.Build(), - }.Build())} + return CumulativeConstraint{cp.appendConstraint(&cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Cumulative{ + &cmpb.CumulativeConstraintProto{ + Capacity: capacity.asLinearExpressionProto(), + }, + }})} } // Minimize adds a linear minimization objective. @@ -1014,12 +1028,12 @@ func (cp *Builder) Minimize(obj LinearArgument) { opb := &cmpb.CpObjectiveProto{} for _, varCoeff := range o.varCoeffs { - opb.SetVars(append(opb.GetVars(), int32_t(varCoeff.ind))) - opb.SetCoeffs(append(opb.GetCoeffs(), varCoeff.coeff)) + opb.Vars = append(opb.GetVars(), int32(varCoeff.ind)) + opb.Coeffs = append(opb.GetCoeffs(), varCoeff.coeff) } - opb.SetOffset(float64(o.offset)) + opb.Offset = float64(o.offset) - cp.cmpb.SetObjective(opb) + cp.cmpb.Objective = opb } // Maximize adds a linear maximization objective. @@ -1028,13 +1042,13 @@ func (cp *Builder) Maximize(obj LinearArgument) { opb := &cmpb.CpObjectiveProto{} for _, varCoeff := range o.varCoeffs { - opb.SetVars(append(opb.GetVars(), int32_t(varCoeff.ind))) - opb.SetCoeffs(append(opb.GetCoeffs(), -varCoeff.coeff)) + opb.Vars = append(opb.GetVars(), int32(varCoeff.ind)) + opb.Coeffs = append(opb.GetCoeffs(), -varCoeff.coeff) } - opb.SetOffset(float64(-o.offset)) - opb.SetScalingFactor(-1) + opb.Offset = float64(-o.offset) + opb.ScalingFactor = -1 - cp.cmpb.SetObjective(opb) + cp.cmpb.Objective = opb } // Hint is a container for IntVar and BoolVar hints to the CP model. @@ -1069,33 +1083,33 @@ func (h *Hint) proto() *cmpb.PartialVariableAssignment { var vars []int32 var hints []int64 for iv, hint := range h.Ints { - vars = append(vars, int32_t(iv.ind)) + vars = append(vars, int32(iv.ind)) hints = append(hints, hint) } for bv, hint := range h.Bools { - var hintInt int64_t + var hintInt int64 if hint { hintInt = 1 } if bv.ind < 0 { hintInt = 1 - hintInt } - vars = append(vars, int32_t(bv.ind.positiveIndex())) + vars = append(vars, int32(bv.ind.positiveIndex())) hints = append(hints, hintInt) } sort.Sort(indexValueSlices{vars, hints}) - return cmpb.PartialVariableAssignment_builder{Vars: vars, Values: hints}.Build() + return &cmpb.PartialVariableAssignment{Vars: vars, Values: hints} } // SetHint sets the hint on the model. func (cp *Builder) SetHint(hint *Hint) { - cp.cmpb.SetSolutionHint(hint.proto()) + cp.cmpb.SolutionHint = hint.proto() } // ClearHint clears any hints on the model. func (cp *Builder) ClearHint() { - cp.cmpb.ClearSolutionHint() + cp.cmpb.SolutionHint = nil } // AddAssumption adds the literals to the model as assumptions. @@ -1104,13 +1118,13 @@ func (cp *Builder) AddAssumption(lits ...BoolVar) { if !cp.checkSameModelAndSetErrorf(lit.cpb, "BoolVar %v added as an Assumption", lit.Index()) { return } - cp.cmpb.SetAssumptions(append(cp.cmpb.GetAssumptions(), int32_t(lit.ind))) + cp.cmpb.Assumptions = append(cp.cmpb.GetAssumptions(), int32(lit.ind)) } } // ClearAssumption clears all the assumptions on the model. func (cp *Builder) ClearAssumption() { - cp.cmpb.SetAssumptions(nil) + cp.cmpb.Assumptions = nil } // AddDecisionStrategy adds a decision strategy on a list of integer variables. @@ -1120,14 +1134,14 @@ func (cp *Builder) AddDecisionStrategy(vars []IntVar, vs cmpb.DecisionStrategyPr if !cp.checkSameModelAndSetErrorf(v.cpb, "invalid parameter var %v added to the DecisionStrategy", v.Index()) { return } - indices = append(indices, int32_t(v.ind)) + indices = append(indices, int32(v.ind)) } - cp.cmpb.SetSearchStrategy(append(cp.cmpb.GetSearchStrategy(), cmpb.DecisionStrategyProto_builder{ + cp.cmpb.SearchStrategy = append(cp.cmpb.GetSearchStrategy(), &cmpb.DecisionStrategyProto{ Variables: indices, VariableSelectionStrategy: vs, DomainReductionStrategy: ds, - }.Build())) + }) } // Model returns the built CP model proto. The proto returned is a pointer to the proto in Builder, diff --git a/ortools/sat/go/cp_model_test.go b/ortools/sat/go/cpmodel/cp_model_test.go similarity index 70% rename from ortools/sat/go/cp_model_test.go rename to ortools/sat/go/cpmodel/cp_model_test.go index cb52343d4d1..ad91fefff05 100644 --- a/ortools/sat/go/cp_model_test.go +++ b/ortools/sat/go/cpmodel/cp_model_test.go @@ -20,10 +20,11 @@ import ( "sort" "testing" - "github.com/golang/glog" - "golang/cmp/cmp" - "golang/protobuf/v2/testing/protocmp/protocmp" - cmpb "ortools/sat/cp_model_go_proto" + "github.com/google/go-cmp/cmp" + "google.golang.org/protobuf/testing/protocmp" + + log "github.com/golang/glog" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" ) func Example() { @@ -186,9 +187,9 @@ func TestVar_IntegerVariableProto(t *testing.T) { bv := model.NewBoolVar() return bv.Index() }, - want: cmpb.IntegerVariableProto_builder{ + want: &cmpb.IntegerVariableProto{ Domain: []int64{0, 1}, - }.Build(), + }, }, { name: "IntVar", @@ -196,9 +197,9 @@ func TestVar_IntegerVariableProto(t *testing.T) { iv := model.NewIntVar(-10, 10) return iv.Index() }, - want: cmpb.IntegerVariableProto_builder{ + want: &cmpb.IntegerVariableProto{ Domain: []int64{-10, 10}, - }.Build(), + }, }, { name: "IntVarFromDomain", @@ -206,9 +207,9 @@ func TestVar_IntegerVariableProto(t *testing.T) { iv := model.NewIntVarFromDomain(FromValues([]int64{1, 2, 3, 5, 4, 6, 10, 12, 11, 15, 8})) return iv.Index() }, - want: cmpb.IntegerVariableProto_builder{ + want: &cmpb.IntegerVariableProto{ Domain: FromValues([]int64{1, 2, 3, 5, 4, 6, 10, 12, 11, 15, 8}).FlattenedIntervals(), - }.Build(), + }, }, { name: "ConstVar", @@ -216,9 +217,9 @@ func TestVar_IntegerVariableProto(t *testing.T) { cv := model.NewConstant(10) return cv.Index() }, - want: cmpb.IntegerVariableProto_builder{ + want: &cmpb.IntegerVariableProto{ Domain: []int64{10, 10}, - }.Build(), + }, }, { name: "TrueVar", @@ -226,9 +227,9 @@ func TestVar_IntegerVariableProto(t *testing.T) { tv := model.TrueVar() return tv.Index() }, - want: cmpb.IntegerVariableProto_builder{ + want: &cmpb.IntegerVariableProto{ Domain: []int64{1, 1}, - }.Build(), + }, }, { name: "FalseVar", @@ -236,9 +237,9 @@ func TestVar_IntegerVariableProto(t *testing.T) { fv := model.FalseVar() return fv.Index() }, - want: cmpb.IntegerVariableProto_builder{ + want: &cmpb.IntegerVariableProto{ Domain: []int64{0, 0}, - }.Build(), + }, }, } @@ -258,55 +259,55 @@ func TestVar_IntegerVariableProto(t *testing.T) { func TestVar_EvaluateSolutionValue(t *testing.T) { testCases := []struct { name string - evaluateSolutionValue func() int64_t - want int64_t + evaluateSolutionValue func() int64 + want int64 }{ { name: "IntVarEvaluateSolutionValue", - evaluateSolutionValue: func() int64_t { + evaluateSolutionValue: func() int64 { model := NewCpModelBuilder() iv := model.NewIntVar(0, 10) - response := cmpb.CpSolverResponse_builder{ + response := &cmpb.CpSolverResponse{ Solution: []int64{5}, - }.Build() + } return iv.evaluateSolutionValue(response) }, want: 5, }, { name: "BoolVarEvaluateSolutionValue", - evaluateSolutionValue: func() int64_t { + evaluateSolutionValue: func() int64 { model := NewCpModelBuilder() bv := model.NewBoolVar() - response := cmpb.CpSolverResponse_builder{ + response := &cmpb.CpSolverResponse{ Solution: []int64{0}, - }.Build() + } return bv.evaluateSolutionValue(response) }, want: 0, }, { name: "BoolVarNotEvaluateSolutionValue", - evaluateSolutionValue: func() int64_t { + evaluateSolutionValue: func() int64 { model := NewCpModelBuilder() bv := model.NewBoolVar() - response := cmpb.CpSolverResponse_builder{ + response := &cmpb.CpSolverResponse{ Solution: []int64{0}, - }.Build() + } return bv.Not().evaluateSolutionValue(response) }, want: 1, }, { name: "AddLinExpr", - evaluateSolutionValue: func() int64_t { + evaluateSolutionValue: func() int64 { model := NewCpModelBuilder() iv := model.NewIntVar(0, 10) bv := model.NewBoolVar() le := NewLinearExpr().AddTerm(iv, 10).AddTerm(bv, 20).AddConstant(5) - response := cmpb.CpSolverResponse_builder{ + response := &cmpb.CpSolverResponse{ Solution: []int64{5, 1}, - }.Build() + } return le.evaluateSolutionValue(response) }, want: 75, @@ -427,42 +428,42 @@ func TestVar_AsLinearExpressionProto(t *testing.T) { buildProto: func() *cmpb.LinearExpressionProto { return iv.asLinearExpressionProto() }, - want: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv.Index())}, + want: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv.Index())}, Coeffs: []int64{1}, - }.Build(), + }, }, { name: "BoolVar", buildProto: func() *cmpb.LinearExpressionProto { return bv.asLinearExpressionProto() }, - want: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(bv.Index())}, + want: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(bv.Index())}, Coeffs: []int64{1}, - }.Build(), + }, }, { name: "BoolVarNot", buildProto: func() *cmpb.LinearExpressionProto { return bv.Not().asLinearExpressionProto() }, - want: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(bv.Index())}, + want: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(bv.Index())}, Coeffs: []int64{-1}, Offset: 1, - }.Build(), + }, }, { name: "LinearExpr", buildProto: func() *cmpb.LinearExpressionProto { return linExpr.asLinearExpressionProto() }, - want: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv.Index()), int32_t(bv.Index())}, + want: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv.Index()), int32(bv.Index())}, Coeffs: []int64{10, 20}, Offset: 5, - }.Build(), + }, }, } @@ -495,42 +496,42 @@ func TestVar_AsNegatedLinearExpressionProto(t *testing.T) { buildNegatedProto: func() *cmpb.LinearExpressionProto { return asNegatedLinearExpressionProto(iv) }, - want: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv.Index())}, + want: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv.Index())}, Coeffs: []int64{-1}, - }.Build(), + }, }, { name: "BoolVar", buildNegatedProto: func() *cmpb.LinearExpressionProto { return asNegatedLinearExpressionProto(bv) }, - want: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(bv.Index())}, + want: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(bv.Index())}, Coeffs: []int64{-1}, - }.Build(), + }, }, { name: "BoolVarNot", buildNegatedProto: func() *cmpb.LinearExpressionProto { return asNegatedLinearExpressionProto(bv.Not()) }, - want: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(bv.Index())}, + want: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(bv.Index())}, Coeffs: []int64{1}, Offset: -1, - }.Build(), + }, }, { name: "LinearExpr", buildNegatedProto: func() *cmpb.LinearExpressionProto { return asNegatedLinearExpressionProto(linExpr) }, - want: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv.Index()), int32_t(bv.Index())}, + want: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv.Index()), int32(bv.Index())}, Coeffs: []int64{-10, -20}, Offset: -5, - }.Build(), + }, }, } @@ -732,20 +733,22 @@ func TestIntervalVar(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[iv.Index()] }, - want: cmpb.ConstraintProto_builder{ - EnforcementLiteral: []int32{int32_t(trueVar.Index())}, - Interval: cmpb.IntervalConstraintProto_builder{ - Start: cmpb.LinearExpressionProto_builder{Offset: 1}.Build(), - Size: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64{1}, - }.Build(), - End: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv2.Index())}, - Coeffs: []int64{1}, - }.Build(), - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + EnforcementLiteral: []int32{int32(trueVar.Index())}, + Constraint: &cmpb.ConstraintProto_Interval{ + &cmpb.IntervalConstraintProto{ + Start: &cmpb.LinearExpressionProto{Offset: 1}, + Size: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + }, + End: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv2.Index())}, + Coeffs: []int64{1}, + }, + }, + }, + }, }, { name: "NewFixedSizeIntervalVar", @@ -754,21 +757,23 @@ func TestIntervalVar(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[iv.Index()] }, - want: cmpb.ConstraintProto_builder{ - EnforcementLiteral: []int32{int32_t(trueVar.Index())}, - Interval: cmpb.IntervalConstraintProto_builder{ - Start: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64{1}, - }.Build(), - Size: cmpb.LinearExpressionProto_builder{Offset: 5}.Build(), - End: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64{1}, - Offset: 5, - }.Build(), - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + EnforcementLiteral: []int32{int32(trueVar.Index())}, + Constraint: &cmpb.ConstraintProto_Interval{ + &cmpb.IntervalConstraintProto{ + Start: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + }, + Size: &cmpb.LinearExpressionProto{Offset: 5}, + End: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + Offset: 5, + }, + }, + }, + }, }, { name: "NewOptionalIntervalVar", @@ -777,20 +782,22 @@ func TestIntervalVar(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[iv.Index()] }, - want: cmpb.ConstraintProto_builder{ - EnforcementLiteral: []int32{int32_t(bv1.Index())}, - Interval: cmpb.IntervalConstraintProto_builder{ - Start: cmpb.LinearExpressionProto_builder{Offset: 1}.Build(), - Size: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64{1}, - }.Build(), - End: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv2.Index())}, - Coeffs: []int64{1}, - }.Build(), - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + EnforcementLiteral: []int32{int32(bv1.Index())}, + Constraint: &cmpb.ConstraintProto_Interval{ + &cmpb.IntervalConstraintProto{ + Start: &cmpb.LinearExpressionProto{Offset: 1}, + Size: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + }, + End: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv2.Index())}, + Coeffs: []int64{1}, + }, + }, + }, + }, }, { name: "NewOptionalFixedSizeIntervalVar", @@ -799,21 +806,23 @@ func TestIntervalVar(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[iv.Index()] }, - want: cmpb.ConstraintProto_builder{ - EnforcementLiteral: []int32{int32_t(bv1.Index())}, - Interval: cmpb.IntervalConstraintProto_builder{ - Start: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64{1}, - }.Build(), - Size: cmpb.LinearExpressionProto_builder{Offset: 5}.Build(), - End: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64{1}, - Offset: 5, - }.Build(), - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + EnforcementLiteral: []int32{int32(bv1.Index())}, + Constraint: &cmpb.ConstraintProto_Interval{ + &cmpb.IntervalConstraintProto{ + Start: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + }, + Size: &cmpb.LinearExpressionProto{Offset: 5}, + End: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + Offset: 5, + }, + }, + }, + }, }, } @@ -869,12 +878,14 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - EnforcementLiteral: []int32{int32_t(bv3.Index())}, - BoolOr: cmpb.BoolArgumentProto_builder{ - Literals: []int32{int32_t(bv1.Index()), int32_t(bv2.Not().Index())}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + EnforcementLiteral: []int32{int32(bv3.Index())}, + Constraint: &cmpb.ConstraintProto_BoolOr{ + &cmpb.BoolArgumentProto{ + Literals: []int32{int32(bv1.Index()), int32(bv2.Not().Index())}, + }, + }, + }, }, { name: "AddBoolAnd", @@ -883,12 +894,14 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - EnforcementLiteral: []int32{int32_t(bv3.Index())}, - BoolAnd: cmpb.BoolArgumentProto_builder{ - Literals: []int32{int32_t(bv1.Index()), int32_t(bv2.Not().Index())}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + EnforcementLiteral: []int32{int32(bv3.Index())}, + Constraint: &cmpb.ConstraintProto_BoolAnd{ + &cmpb.BoolArgumentProto{ + Literals: []int32{int32(bv1.Index()), int32(bv2.Not().Index())}, + }, + }, + }, }, { name: "AddBoolXor", @@ -897,12 +910,14 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - EnforcementLiteral: []int32{int32_t(bv3.Index())}, - BoolXor: cmpb.BoolArgumentProto_builder{ - Literals: []int32{int32_t(bv1.Index()), int32_t(bv2.Not().Index())}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + EnforcementLiteral: []int32{int32(bv3.Index())}, + Constraint: &cmpb.ConstraintProto_BoolXor{ + &cmpb.BoolArgumentProto{ + Literals: []int32{int32(bv1.Index()), int32(bv2.Not().Index())}, + }, + }, + }, }, { name: "AddAtLeastOne", @@ -911,12 +926,14 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - EnforcementLiteral: []int32{int32_t(bv3.Index())}, - BoolOr: cmpb.BoolArgumentProto_builder{ - Literals: []int32{int32_t(bv1.Index()), int32_t(bv2.Not().Index())}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + EnforcementLiteral: []int32{int32(bv3.Index())}, + Constraint: &cmpb.ConstraintProto_BoolOr{ + &cmpb.BoolArgumentProto{ + Literals: []int32{int32(bv1.Index()), int32(bv2.Not().Index())}, + }, + }, + }, }, { name: "AddAtMostOne", @@ -925,12 +942,14 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - EnforcementLiteral: []int32{int32_t(bv3.Index())}, - AtMostOne: cmpb.BoolArgumentProto_builder{ - Literals: []int32{int32_t(bv1.Index()), int32_t(bv2.Not().Index())}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + EnforcementLiteral: []int32{int32(bv3.Index())}, + Constraint: &cmpb.ConstraintProto_AtMostOne{ + &cmpb.BoolArgumentProto{ + Literals: []int32{int32(bv1.Index()), int32(bv2.Not().Index())}, + }, + }, + }, }, { name: "AddExactlyOne", @@ -939,12 +958,14 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - EnforcementLiteral: []int32{int32_t(bv3.Index())}, - ExactlyOne: cmpb.BoolArgumentProto_builder{ - Literals: []int32{int32_t(bv1.Index()), int32_t(bv2.Not().Index())}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + EnforcementLiteral: []int32{int32(bv3.Index())}, + Constraint: &cmpb.ConstraintProto_ExactlyOne{ + &cmpb.BoolArgumentProto{ + Literals: []int32{int32(bv1.Index()), int32(bv2.Not().Index())}, + }, + }, + }, }, { name: "AddImplication", @@ -953,11 +974,13 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - BoolOr: cmpb.BoolArgumentProto_builder{ - Literals: []int32{int32_t(bv1.Not().Index()), int32_t(bv2.Not().Index())}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_BoolOr{ + &cmpb.BoolArgumentProto{ + Literals: []int32{int32(bv1.Not().Index()), int32(bv2.Not().Index())}, + }, + }, + }, }, { name: "AddLinearConstraintForDomain", @@ -967,13 +990,15 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - Linear: cmpb.LinearConstraintProto_builder{ - Vars: []int32{int32_t(iv1.Index()), int32_t(bv1.Index())}, - Coeffs: []int64{1, 1}, - Domain: []int64{-5, -4, -2, -1, 6, 15}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Linear{ + &cmpb.LinearConstraintProto{ + Vars: []int32{int32(iv1.Index()), int32(bv1.Index())}, + Coeffs: []int64{1, 1}, + Domain: []int64{-5, -4, -2, -1, 6, 15}, + }, + }, + }, }, { name: "AddLinearConstraint", @@ -982,13 +1007,15 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - Linear: cmpb.LinearConstraintProto_builder{ - Vars: []int32{int32_t(iv1.Index()), int32_t(bv1.Index())}, - Coeffs: []int64{1, 1}, - Domain: []int64{2, 6}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Linear{ + &cmpb.LinearConstraintProto{ + Vars: []int32{int32(iv1.Index()), int32(bv1.Index())}, + Coeffs: []int64{1, 1}, + Domain: []int64{2, 6}, + }, + }, + }, }, { name: "AddEquality", @@ -997,13 +1024,15 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - Linear: cmpb.LinearConstraintProto_builder{ - Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64{1}, - Domain: []int64{10, 10}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Linear{ + &cmpb.LinearConstraintProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + Domain: []int64{10, 10}, + }, + }, + }, }, { name: "AddLessOrEqual", @@ -1012,13 +1041,15 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - Linear: cmpb.LinearConstraintProto_builder{ - Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64{1}, - Domain: []int64{math.MinInt64, 10}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Linear{ + &cmpb.LinearConstraintProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + Domain: []int64{math.MinInt64, 10}, + }, + }, + }, }, { name: "AddLessThan", @@ -1027,13 +1058,15 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - Linear: cmpb.LinearConstraintProto_builder{ - Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64{1}, - Domain: []int64{math.MinInt64, 9}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Linear{ + &cmpb.LinearConstraintProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + Domain: []int64{math.MinInt64, 9}, + }, + }, + }, }, { name: "AddGreaterOrEqual", @@ -1042,13 +1075,15 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - Linear: cmpb.LinearConstraintProto_builder{ - Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64{1}, - Domain: []int64{10, math.MaxInt64}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Linear{ + &cmpb.LinearConstraintProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + Domain: []int64{10, math.MaxInt64}, + }, + }, + }, }, { name: "AddGreaterThan", @@ -1057,13 +1092,15 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - Linear: cmpb.LinearConstraintProto_builder{ - Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64{1}, - Domain: []int64{11, math.MaxInt64}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Linear{ + &cmpb.LinearConstraintProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + Domain: []int64{11, math.MaxInt64}, + }, + }, + }, }, { name: "AddNotEqual", @@ -1072,13 +1109,15 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - Linear: cmpb.LinearConstraintProto_builder{ - Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64{1}, - Domain: []int64{math.MinInt64, 9, 11, math.MaxInt64}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Linear{ + &cmpb.LinearConstraintProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + Domain: []int64{math.MinInt64, 9, 11, math.MaxInt64}, + }, + }, + }, }, { name: "AddAllDifferent", @@ -1087,30 +1126,32 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - AllDiff: cmpb.AllDifferentConstraintProto_builder{ - Exprs: []*cmpb.LinearExpressionProto{ - cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64{1}, - }.Build(), - cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(bv1.Index())}, - Coeffs: []int64{1}, - }.Build(), - cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(bv2.Index())}, - Coeffs: []int64{-1}, - Offset: 1, - }.Build(), - cmpb.LinearExpressionProto_builder{ - Vars: []int32{}, - Coeffs: []int64{}, - Offset: 10, - }.Build(), + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_AllDiff{ + &cmpb.AllDifferentConstraintProto{ + Exprs: []*cmpb.LinearExpressionProto{ + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{1}, + }, + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(bv1.Index())}, + Coeffs: []int64{1}, + }, + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(bv2.Index())}, + Coeffs: []int64{-1}, + Offset: 1, + }, + &cmpb.LinearExpressionProto{ + Vars: []int32{}, + Coeffs: []int64{}, + Offset: 10, + }, + }, }, - }.Build(), - }.Build(), + }, + }, }, { name: "AddVariableElement", @@ -1119,13 +1160,15 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - Element: cmpb.ElementConstraintProto_builder{ - Index: int32_t(iv1.Index()), - Target: int32_t(iv4.Index()), - Vars: []int32{int32_t(iv2.Index()), int32_t(iv3.Index())}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Element{ + &cmpb.ElementConstraintProto{ + Index: int32(iv1.Index()), + Target: int32(iv4.Index()), + Vars: []int32{int32(iv2.Index()), int32(iv3.Index())}, + }, + }, + }, }, { name: "AddElement", @@ -1134,16 +1177,18 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - Element: cmpb.ElementConstraintProto_builder{ - Index: int32_t(iv1.Index()), - Target: int32_t(iv4.Index()), - Vars: []int32{ - int32_t(model.NewConstant(10).Index()), - int32_t(model.NewConstant(20).Index()), + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Element{ + &cmpb.ElementConstraintProto{ + Index: int32(iv1.Index()), + Target: int32(iv4.Index()), + Vars: []int32{ + int32(model.NewConstant(10).Index()), + int32(model.NewConstant(20).Index()), + }, }, - }.Build(), - }.Build(), + }, + }, }, { name: "AddInverseConstraint", @@ -1152,12 +1197,14 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - Inverse: cmpb.InverseConstraintProto_builder{ - FDirect: []int32{int32_t(iv1.Index()), int32_t(iv2.Index())}, - FInverse: []int32{int32_t(iv3.Index()), int32_t(iv4.Index())}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Inverse{ + &cmpb.InverseConstraintProto{ + FDirect: []int32{int32(iv1.Index()), int32(iv2.Index())}, + FInverse: []int32{int32(iv3.Index()), int32(iv4.Index())}, + }, + }, + }, }, { name: "AddMinEquality", @@ -1166,24 +1213,26 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - LinMax: cmpb.LinearArgumentProto_builder{ - Target: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64{-1}, - }.Build(), - Exprs: []*cmpb.LinearExpressionProto{ - cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv2.Index())}, - Coeffs: []int64{-1}, - }.Build(), - cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv3.Index())}, + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_LinMax{ + &cmpb.LinearArgumentProto{ + Target: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, Coeffs: []int64{-1}, - }.Build(), + }, + Exprs: []*cmpb.LinearExpressionProto{ + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv2.Index())}, + Coeffs: []int64{-1}, + }, + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv3.Index())}, + Coeffs: []int64{-1}, + }, + }, }, - }.Build(), - }.Build(), + }, + }, }, { name: "AddMaxEquality", @@ -1192,24 +1241,26 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - LinMax: cmpb.LinearArgumentProto_builder{ - Target: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64{1}, - }.Build(), - Exprs: []*cmpb.LinearExpressionProto{ - cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv2.Index())}, - Coeffs: []int64{1}, - }.Build(), - cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv3.Index())}, + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_LinMax{ + &cmpb.LinearArgumentProto{ + Target: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, Coeffs: []int64{1}, - }.Build(), + }, + Exprs: []*cmpb.LinearExpressionProto{ + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv2.Index())}, + Coeffs: []int64{1}, + }, + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv3.Index())}, + Coeffs: []int64{1}, + }, + }, }, - }.Build(), - }.Build(), + }, + }, }, { name: "AddMultiplicationEquality", @@ -1218,24 +1269,26 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - IntProd: cmpb.LinearArgumentProto_builder{ - Target: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64{1}, - }.Build(), - Exprs: []*cmpb.LinearExpressionProto{ - cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv2.Index())}, + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_IntProd{ + &cmpb.LinearArgumentProto{ + Target: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, Coeffs: []int64{1}, - }.Build(), - cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv3.Index())}, - Coeffs: []int64{1}, - }.Build(), + }, + Exprs: []*cmpb.LinearExpressionProto{ + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv2.Index())}, + Coeffs: []int64{1}, + }, + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv3.Index())}, + Coeffs: []int64{1}, + }, + }, }, - }.Build(), - }.Build(), + }, + }, }, { name: "AddDivisionEquality", @@ -1244,24 +1297,26 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - IntDiv: cmpb.LinearArgumentProto_builder{ - Target: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64{1}, - }.Build(), - Exprs: []*cmpb.LinearExpressionProto{ - cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv2.Index())}, - Coeffs: []int64{1}, - }.Build(), - cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv3.Index())}, + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_IntDiv{ + &cmpb.LinearArgumentProto{ + Target: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, Coeffs: []int64{1}, - }.Build(), + }, + Exprs: []*cmpb.LinearExpressionProto{ + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv2.Index())}, + Coeffs: []int64{1}, + }, + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv3.Index())}, + Coeffs: []int64{1}, + }, + }, }, - }.Build(), - }.Build(), + }, + }, }, { name: "AddAbsEquality", @@ -1270,24 +1325,26 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - LinMax: cmpb.LinearArgumentProto_builder{ - Target: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64{1}, - }.Build(), - Exprs: []*cmpb.LinearExpressionProto{ - cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv2.Index())}, + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_LinMax{ + &cmpb.LinearArgumentProto{ + Target: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, Coeffs: []int64{1}, - }.Build(), - cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv2.Index())}, - Coeffs: []int64{-1}, - }.Build(), + }, + Exprs: []*cmpb.LinearExpressionProto{ + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv2.Index())}, + Coeffs: []int64{1}, + }, + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv2.Index())}, + Coeffs: []int64{-1}, + }, + }, }, - }.Build(), - }.Build(), + }, + }, }, { name: "AddModuloEquality", @@ -1296,24 +1353,26 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - IntMod: cmpb.LinearArgumentProto_builder{ - Target: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64{1}, - }.Build(), - Exprs: []*cmpb.LinearExpressionProto{ - cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv2.Index())}, - Coeffs: []int64{1}, - }.Build(), - cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv3.Index())}, + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_IntMod{ + &cmpb.LinearArgumentProto{ + Target: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, Coeffs: []int64{1}, - }.Build(), + }, + Exprs: []*cmpb.LinearExpressionProto{ + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv2.Index())}, + Coeffs: []int64{1}, + }, + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv3.Index())}, + Coeffs: []int64{1}, + }, + }, }, - }.Build(), - }.Build(), + }, + }, }, { name: "AddNoOverlap", @@ -1322,11 +1381,13 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - NoOverlap: cmpb.NoOverlapConstraintProto_builder{ - Intervals: []int32{int32_t(interval1.Index()), int32_t(interval2.Index())}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_NoOverlap{ + &cmpb.NoOverlapConstraintProto{ + Intervals: []int32{int32(interval1.Index()), int32(interval2.Index())}, + }, + }, + }, }, { name: "AddNoOverlap2D", @@ -1337,12 +1398,14 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - NoOverlap_2D: cmpb.NoOverlap2DConstraintProto_builder{ - XIntervals: []int32{int32_t(interval1.Index()), int32_t(interval3.Index())}, - YIntervals: []int32{int32_t(interval2.Index()), int32_t(interval4.Index())}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_NoOverlap_2D{ + &cmpb.NoOverlap2DConstraintProto{ + XIntervals: []int32{int32(interval1.Index()), int32(interval3.Index())}, + YIntervals: []int32{int32(interval2.Index()), int32(interval4.Index())}, + }, + }, + }, }, { name: "AddCircuitConstraint", @@ -1352,13 +1415,15 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - Circuit: cmpb.CircuitConstraintProto_builder{ - Tails: []int32{0}, - Heads: []int32{1}, - Literals: []int32{int32_t(bv1.Index())}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Circuit{ + &cmpb.CircuitConstraintProto{ + Tails: []int32{0}, + Heads: []int32{1}, + Literals: []int32{int32(bv1.Index())}, + }, + }, + }, }, { name: "AddMultipleCircuitConstraint", @@ -1368,13 +1433,15 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - Routes: cmpb.RoutesConstraintProto_builder{ - Tails: []int32{0}, - Heads: []int32{1}, - Literals: []int32{int32_t(bv1.Index())}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Routes{ + &cmpb.RoutesConstraintProto{ + Tails: []int32{0}, + Heads: []int32{1}, + Literals: []int32{int32(bv1.Index())}, + }, + }, + }, }, { name: "AddAllowedAssignments", @@ -1385,12 +1452,14 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - Table: cmpb.TableConstraintProto_builder{ - Vars: []int32{int32_t(iv1.Index()), int32_t(iv2.Index())}, - Values: []int64{0, 2, 1, 3}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Table{ + &cmpb.TableConstraintProto{ + Vars: []int32{int32(iv1.Index()), int32(iv2.Index())}, + Values: []int64{0, 2, 1, 3}, + }, + }, + }, }, { name: "AddReservoirConstraint", @@ -1400,24 +1469,26 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - Reservoir: cmpb.ReservoirConstraintProto_builder{ - MinLevel: 10, - MaxLevel: 20, - TimeExprs: []*cmpb.LinearExpressionProto{ - cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64{2}, - }.Build(), + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Reservoir{ + &cmpb.ReservoirConstraintProto{ + MinLevel: 10, + MaxLevel: 20, + TimeExprs: []*cmpb.LinearExpressionProto{ + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, + Coeffs: []int64{2}, + }, + }, + LevelChanges: []*cmpb.LinearExpressionProto{ + &cmpb.LinearExpressionProto{ + Offset: 15, + }, + }, + ActiveLiterals: []int32{int32(one.Index())}, }, - LevelChanges: []*cmpb.LinearExpressionProto{ - cmpb.LinearExpressionProto_builder{ - Offset: 15, - }.Build(), - }, - ActiveLiterals: []int32{int32_t(one.Index())}, - }.Build(), - }.Build(), + }, + }, }, { name: "AddAutomaton", @@ -1428,16 +1499,18 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - Automaton: cmpb.AutomatonConstraintProto_builder{ - Vars: []int32{int32_t(iv1.Index()), int32_t(iv2.Index())}, - StartingState: 0, - FinalStates: []int64{5, 10}, - TransitionTail: []int64{0, 2}, - TransitionHead: []int64{1, 3}, - TransitionLabel: []int64{10, 15}, - }.Build(), - }.Build(), + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Automaton{ + &cmpb.AutomatonConstraintProto{ + Vars: []int32{int32(iv1.Index()), int32(iv2.Index())}, + StartingState: 0, + FinalStates: []int64{5, 10}, + TransitionTail: []int64{0, 2}, + TransitionHead: []int64{1, 3}, + TransitionLabel: []int64{10, 15}, + }, + }, + }, }, { name: "AddCumulative", @@ -1447,21 +1520,23 @@ func TestCpModelBuilder_Constraints(t *testing.T) { m := mustModel(t, model) return m.GetConstraints()[c.Index()] }, - want: cmpb.ConstraintProto_builder{ - Cumulative: cmpb.CumulativeConstraintProto_builder{ - Capacity: cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv1.Index())}, - Coeffs: []int64{1}, - }.Build(), - Intervals: []int32{int32_t(interval1.Index())}, - Demands: []*cmpb.LinearExpressionProto{ - cmpb.LinearExpressionProto_builder{ - Vars: []int32{int32_t(iv2.Index())}, + want: &cmpb.ConstraintProto{ + Constraint: &cmpb.ConstraintProto_Cumulative{ + &cmpb.CumulativeConstraintProto{ + Capacity: &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv1.Index())}, Coeffs: []int64{1}, - }.Build(), + }, + Intervals: []int32{int32(interval1.Index())}, + Demands: []*cmpb.LinearExpressionProto{ + &cmpb.LinearExpressionProto{ + Vars: []int32{int32(iv2.Index())}, + Coeffs: []int64{1}, + }, + }, }, - }.Build(), - }.Build(), + }, + }, }, } @@ -1484,10 +1559,10 @@ func TestCpModelBuilder_Minimize(t *testing.T) { model.Minimize(NewLinearExpr().AddTerm(iv1, 3).AddTerm(iv2, 5)) m := mustModel(t, model) - want := cmpb.CpObjectiveProto_builder{ - Vars: []int32{int32_t(iv1.Index()), int32_t(iv2.Index())}, + want := cmpb.CpObjectiveProto{ + Vars: []int32{int32(iv1.Index()), int32(iv2.Index())}, Coeffs: []int64{3, 5}, - }.Build() + } got := m.GetObjective() if diff := cmp.Diff(want, got, protocmp.Transform()); diff != "" { @@ -1502,12 +1577,12 @@ func TestCpModelBuilder_Maximize(t *testing.T) { iv2 := model.NewIntVar(-10, 10) model.Maximize(NewLinearExpr().AddTerm(iv1, 3).AddTerm(iv2, 5).AddConstant(7)) - want := cmpb.CpObjectiveProto_builder{ - Vars: []int32{int32_t(iv1.Index()), int32_t(iv2.Index())}, + want := cmpb.CpObjectiveProto{ + Vars: []int32{int32(iv1.Index()), int32(iv2.Index())}, Coeffs: []int64{-3, -5}, ScalingFactor: -1.0, Offset: -7, - }.Build() + } m := mustModel(t, model) got := m.GetObjective() @@ -1591,10 +1666,10 @@ func TestCpModelBuilder_SetHint(t *testing.T) { m := mustModel(t, model) got := m.GetSolutionHint() - want := cmpb.PartialVariableAssignment_builder{ - Vars: []int32{int32_t(iv.Index()), int32_t(bv1.Index()), int32_t(bv2.Index())}, + want := cmpb.PartialVariableAssignment{ + Vars: []int32{int32(iv.Index()), int32(bv1.Index()), int32(bv2.Index())}, Values: []int64{7, 1, 1}, - }.Build() + } if diff := cmp.Diff(want, got, protocmp.Transform()); diff != "" { t.Errorf("GetSolutionHint() returned unexpected diff (-want+got): %v", diff) @@ -1633,7 +1708,7 @@ func TestCpModelBuilder_AddAssumption(t *testing.T) { m := mustModel(t, model) got := m.GetAssumptions() - want := []int32{int32_t(bv1.Index()), int32_t(bv2.Index()), int32_t(bv3.Not().Index())} + want := []int32{int32(bv1.Index()), int32(bv2.Index()), int32(bv3.Not().Index())} if diff := cmp.Diff(want, got); diff != "" { t.Errorf("GetAssumptions() returned unexpected diff (-want+got): %v", diff) @@ -1670,16 +1745,16 @@ func TestCpModelBuilder_AddDecisionStrategy(t *testing.T) { m := mustModel(t, model) got := m.GetSearchStrategy() want := []*cmpb.DecisionStrategyProto{ - cmpb.DecisionStrategyProto_builder{ - Variables: []int32{int32_t(iv.Index()), int32_t(bv.Index())}, + &cmpb.DecisionStrategyProto{ + Variables: []int32{int32(iv.Index()), int32(bv.Index())}, VariableSelectionStrategy: cmpb.DecisionStrategyProto_CHOOSE_HIGHEST_MAX, DomainReductionStrategy: cmpb.DecisionStrategyProto_SELECT_LOWER_HALF, - }.Build(), - cmpb.DecisionStrategyProto_builder{ - Variables: []int32{int32_t(iv.Index())}, + }, + &cmpb.DecisionStrategyProto{ + Variables: []int32{int32(iv.Index())}, VariableSelectionStrategy: cmpb.DecisionStrategyProto_CHOOSE_LOWEST_MIN, DomainReductionStrategy: cmpb.DecisionStrategyProto_SELECT_UPPER_HALF, - }.Build(), + }, } if diff := cmp.Diff(want, got, protocmp.Transform()); diff != "" { diff --git a/ortools/sat/go/cp_solver.go b/ortools/sat/go/cpmodel/cp_solver.go similarity index 95% rename from ortools/sat/go/cp_solver.go rename to ortools/sat/go/cpmodel/cp_solver.go index 901f3d817d3..752b5d05624 100644 --- a/ortools/sat/go/cp_solver.go +++ b/ortools/sat/go/cpmodel/cp_solver.go @@ -18,16 +18,16 @@ import ( "sync" "unsafe" - "golang/protobuf/v2/proto/proto" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" + sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" - cmpb "ortools/sat/cp_model_go_proto" - sppb "ortools/sat/sat_parameters_go_proto" + "google.golang.org/protobuf/proto" ) /* #include // for free #include -#include "ortools/sat/go/cp_solver_c.h" +#include "ortools/sat/go/cpmodel/cp_solver_c.h" */ import "C" @@ -179,6 +179,6 @@ func SolutionBooleanValue(r *cmpb.CpSolverResponse, bv BoolVar) bool { } // SolutionIntegerValue returns the value of LinearArgument `la` in the response. -func SolutionIntegerValue(r *cmpb.CpSolverResponse, la LinearArgument) int64_t { +func SolutionIntegerValue(r *cmpb.CpSolverResponse, la LinearArgument) int64 { return la.evaluateSolutionValue(r) } diff --git a/ortools/sat/go/cp_solver_c.cc b/ortools/sat/go/cpmodel/cp_solver_c.cc similarity index 89% rename from ortools/sat/go/cp_solver_c.cc rename to ortools/sat/go/cpmodel/cp_solver_c.cc index b846cbd16a9..1f5b36808f4 100644 --- a/ortools/sat/go/cp_solver_c.cc +++ b/ortools/sat/go/cpmodel/cp_solver_c.cc @@ -11,24 +11,30 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "ortools/sat/go/cp_solver_c.h" +#include "ortools/sat/go/cpmodel/cp_solver_c.h" #include #include #include "absl/status/status.h" +#include "absl/strings/internal/memutil.h" #include "ortools/base/logging.h" -#include "ortools/base/status.pb.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_solver.h" #include "ortools/sat/sat_parameters.pb.h" #include "ortools/util/time_limit.h" -#include "strings/memutil.h" namespace operations_research::sat { namespace { +char* memdup(const char* s, size_t slen) { + void* copy; + if ((copy = malloc(slen)) == nullptr) return nullptr; + memcpy(copy, s, slen); + return reinterpret_cast(copy); +} + CpSolverResponse solveWithParameters(std::atomic* const limit_reached, const CpModelProto& proto, const SatParameters& params) { @@ -75,7 +81,7 @@ void SolveCpInterruptible(void* const limit_reached, const void* creq, CHECK(res.SerializeToString(&res_str)); *cres_len = static_cast(res_str.size()); - *cres = strings::memdup(res_str.data(), *cres_len); + *cres = memdup(res_str.data(), *cres_len); CHECK(*cres != nullptr); } diff --git a/ortools/sat/go/cp_solver_c.h b/ortools/sat/go/cpmodel/cp_solver_c.h similarity index 100% rename from ortools/sat/go/cp_solver_c.h rename to ortools/sat/go/cpmodel/cp_solver_c.h diff --git a/ortools/sat/go/cp_solver_test.go b/ortools/sat/go/cpmodel/cp_solver_test.go similarity index 94% rename from ortools/sat/go/cp_solver_test.go rename to ortools/sat/go/cpmodel/cp_solver_test.go index 9b2e70f1f8e..dcdd0945ea6 100644 --- a/ortools/sat/go/cp_solver_test.go +++ b/ortools/sat/go/cpmodel/cp_solver_test.go @@ -16,10 +16,10 @@ package cpmodel import ( "testing" - "golang/protobuf/v2/proto/proto" + "google.golang.org/protobuf/proto" - cmpb "ortools/sat/cp_model_go_proto" - sppb "ortools/sat/sat_parameters_go_proto" + cmpb "github.com/google/or-tools/ortools/sat/proto/cpmodel" + sppb "github.com/google/or-tools/ortools/sat/proto/satparameters" ) func TestCpSolver_SolveIntVar(t *testing.T) { @@ -50,8 +50,8 @@ func TestCpSolver_SolveIntVar(t *testing.T) { if wantObj != gotObj { t.Errorf("SolveCpModel() returned objective = %v, got %v", gotObj, wantObj) } - wantX := int64_t(10) - wantY := int64_t(5) + wantX := int64(10) + wantY := int64(5) gotX := SolutionIntegerValue(res, x) gotY := SolutionIntegerValue(res, y) if wantX != gotX || wantY != gotY { @@ -97,15 +97,15 @@ func TestCpSolver_SolveBoolVar(t *testing.T) { if !gotNotX || !gotNotY { t.Errorf("SolutionBooleanValue() returned (x.Not(), y.Not()) = (%v, %v), want (true, true)", gotX, gotY) } - wantIntX := int64_t(0) - wantIntY := int64_t(0) + wantIntX := int64(0) + wantIntY := int64(0) gotIntX := SolutionIntegerValue(res, x) gotIntY := SolutionIntegerValue(res, y) if wantIntX != gotIntX || wantIntY != gotIntY { t.Errorf("SolutionIntegerValue() returned (x, y) = (%v, %v), want (%v, %v)", gotIntX, gotIntY, wantIntX, wantIntY) } - wantNotX := int64_t(1) - wantNotY := int64_t(1) + wantNotX := int64(1) + wantNotY := int64(1) gotIntNotX := SolutionIntegerValue(res, x.Not()) gotIntNotY := SolutionIntegerValue(res, y.Not()) if wantNotX != gotIntNotX || wantNotY != gotIntNotY { @@ -170,9 +170,9 @@ func TestCpSolver_SolveWithParameters(t *testing.T) { model.AddAllDifferent(x, y) model.Maximize(NewLinearExpr().AddTerm(x, 5).AddTerm(y, 6)) - params := sppb.SatParameters_builder{ + params := &sppb.SatParameters{ MaxTimeInSeconds: proto.Float64(-1), - }.Build() + } m, err := model.Model() if err != nil { @@ -199,9 +199,9 @@ func TestCpSolver_SolveInterruptible(t *testing.T) { y := model.NewIntVar(0, 5) model.AddAllDifferent(x, y) model.Maximize(NewLinearExpr().AddTerm(x, 5).AddTerm(y, 6)) - params := sppb.SatParameters_builder{ + params := &sppb.SatParameters{ MaxTimeInSeconds: proto.Float64(10), - }.Build() + } m, err := model.Model() if err != nil { @@ -226,9 +226,9 @@ func TestCpSolver_SolveInterruptible_NotCancelled(t *testing.T) { y := model.NewIntVar(0, 5) model.AddAllDifferent(x, y) model.Maximize(NewLinearExpr().AddTerm(x, 5).AddTerm(y, 6)) - params := sppb.SatParameters_builder{ + params := &sppb.SatParameters{ MaxTimeInSeconds: proto.Float64(10), - }.Build() + } m, err := model.Model() if err != nil { @@ -253,9 +253,9 @@ func TestCpSolver_SolveInterruptible_BadParameters(t *testing.T) { y := model.NewIntVar(0, 5) model.AddAllDifferent(x, y) model.Maximize(NewLinearExpr().AddTerm(x, 5).AddTerm(y, 6)) - params := sppb.SatParameters_builder{ + params := &sppb.SatParameters{ MaxTimeInSeconds: proto.Float64(-1), - }.Build() + } m, err := model.Model() if err != nil { diff --git a/ortools/sat/go/domain.go b/ortools/sat/go/cpmodel/domain.go similarity index 94% rename from ortools/sat/go/domain.go rename to ortools/sat/go/cpmodel/domain.go index 901e42614a0..0910be97c8d 100644 --- a/ortools/sat/go/domain.go +++ b/ortools/sat/go/cpmodel/domain.go @@ -22,14 +22,14 @@ import ( // ClosedInterval stores the closed interval `[start,end]`. If the `Start` is greater // than the `End`, the interval is considered empty. type ClosedInterval struct { - Start int64_t - End int64_t + Start int64 + End int64 } // checkOverflowAndAdd first checks if adding `delta` to `i` will cause an integer overflow. // It will return the value of the summation if there is no overflow. Otherwise, it will // return MaxInt64 or MinInt64 depending on the direction of the overflow. -func checkOverflowAndAdd(i, delta int64_t) int64_t { +func checkOverflowAndAdd(i, delta int64) int64 { if i == math.MinInt64 || i == math.MaxInt64 { return i } @@ -49,7 +49,7 @@ func checkOverflowAndAdd(i, delta int64_t) int64_t { // is equal to MinInt or if `End` is equal to MaxInt, the offset does not get added since those // values represent an unbounded domain. Both `Start` and `End` are clamped at math.MinInt64 and // Math.MaxInt64. -func (c ClosedInterval) Offset(delta int64_t) ClosedInterval { +func (c ClosedInterval) Offset(delta int64) ClosedInterval { return ClosedInterval{checkOverflowAndAdd(c.Start, delta), checkOverflowAndAdd(c.End, delta)} } @@ -101,13 +101,13 @@ func NewEmptyDomain() Domain { } // NewSingleDomain creates a new singleton domain `[val]`. -func NewSingleDomain(val int64_t) Domain { +func NewSingleDomain(val int64) Domain { return Domain{[]ClosedInterval{{val, val}}} } // NewDomain creates a new domain of a single interval `[left,right]`. // If `left > right`, an empty domain is returned. -func NewDomain(left, right int64_t) Domain { +func NewDomain(left, right int64) Domain { if left > right { return NewEmptyDomain() } @@ -166,7 +166,7 @@ func (d Domain) FlattenedIntervals() []int64 { // Min returns the minimum value of the domain, and returns false if no minimum exists, // i.e. if the domain is empty. -func (d Domain) Min() (int64_t, bool) { +func (d Domain) Min() (int64, bool) { if len(d.intervals) == 0 { return 0, false } @@ -175,7 +175,7 @@ func (d Domain) Min() (int64_t, bool) { // Max returns the maximum value of the domain, and returns false if no maximum exists, // i.e. if the domain is empty. -func (d Domain) Max() (int64_t, bool) { +func (d Domain) Max() (int64, bool) { if len(d.intervals) == 0 { return 0, false } diff --git a/ortools/sat/go/domain_test.go b/ortools/sat/go/cpmodel/domain_test.go similarity index 98% rename from ortools/sat/go/domain_test.go rename to ortools/sat/go/cpmodel/domain_test.go index bfe0b13b97d..526475bb9c6 100644 --- a/ortools/sat/go/domain_test.go +++ b/ortools/sat/go/cpmodel/domain_test.go @@ -18,7 +18,7 @@ import ( "strings" "testing" - "golang/cmp/cmp" + "github.com/google/go-cmp/cmp" ) func TestDomain_NewEmptyDomain(t *testing.T) { @@ -41,8 +41,8 @@ func TestDomain_NewSingleDomain(t *testing.T) { func TestDomain_NewDomain(t *testing.T) { testCases := []struct { - left int64_t - right int64_t + left int64 + right int64 want Domain }{ { @@ -191,7 +191,7 @@ func TestDomain_FlattenedIntervals(t *testing.T) { func TestDomain_Min(t *testing.T) { d := Domain{[]ClosedInterval{{-1, 1}, {3, 3}, {5, 10}}} - want := int64_t(-1) + want := int64(-1) if got, ok := d.Min(); got != want || !ok { t.Errorf("Min() returned with unexpected value (%v, %v), want (%v, %v)", got, ok, want, true) } @@ -208,7 +208,7 @@ func TestDomain_MinEmptyDomain(t *testing.T) { func TestDomain_Max(t *testing.T) { d := Domain{[]ClosedInterval{{-1, 1}, {3, 3}, {5, 10}}} - want := int64_t(10) + want := int64(10) if got, ok := d.Max(); got != want || !ok { t.Errorf("Max() returned with unexpected value (%v, %v), want (%v, %v)", got, ok, want, true) } @@ -225,7 +225,7 @@ func TestDomain_MaxEmptyDomain(t *testing.T) { func TestDomain_Offset(t *testing.T) { testCases := []struct { interval ClosedInterval - delta int64_t + delta int64 want ClosedInterval }{ { diff --git a/ortools/sat/integer_search.cc b/ortools/sat/integer_search.cc index 1305be482e6..67e0e4632a2 100644 --- a/ortools/sat/integer_search.cc +++ b/ortools/sat/integer_search.cc @@ -1336,12 +1336,17 @@ bool IntegerSearchHelper::BeforeTakingDecision() { // the level zero first ! otherwise, the new deductions will not be // incorporated and the solver will loop forever. if (integer_trail_->HasPendingRootLevelDeduction()) { - if (!sat_solver_->ResetToLevelZero()) return false; + sat_solver_->Backtrack(0); } // The rest only trigger at level zero. if (sat_solver_->CurrentDecisionLevel() != 0) return true; + // Rather than doing it in each callback, we detect newly fixed variables or + // tighter bounds, and propagate just once when everything was added. + const int saved_bool_index = sat_solver_->LiteralTrail().Index(); + const int saved_integer_index = integer_trail_->num_enqueues(); + auto* level_zero_callbacks = model_->GetOrCreate(); for (const auto& cb : level_zero_callbacks->callbacks) { if (!cb()) { @@ -1350,6 +1355,13 @@ bool IntegerSearchHelper::BeforeTakingDecision() { } } + // We propagate if needed. + if (sat_solver_->LiteralTrail().Index() > saved_bool_index || + integer_trail_->num_enqueues() > saved_integer_index || + integer_trail_->HasPendingRootLevelDeduction()) { + if (!sat_solver_->ResetToLevelZero()) return false; + } + if (parameters_.use_sat_inprocessing() && !inprocessing_->InprocessingRound()) { sat_solver_->NotifyThatModelIsUnsat(); diff --git a/ortools/sat/linear_programming_constraint.cc b/ortools/sat/linear_programming_constraint.cc index 2d0ffd9d2ef..aa1a79cef31 100644 --- a/ortools/sat/linear_programming_constraint.cc +++ b/ortools/sat/linear_programming_constraint.cc @@ -733,6 +733,18 @@ bool LinearProgrammingConstraint::SolveLp() { if (lp_solution_level_ == 0) { level_zero_lp_solution_ = lp_solution_; } + } else { + // If this parameter is true, we still copy whatever we have as these + // values will be used for the local-branching lns heuristic. + if (parameters_.stop_after_root_propagation()) { + const int num_vars = integer_variables_.size(); + for (int i = 0; i < num_vars; i++) { + const glop::Fractional value = + GetVariableValueAtCpScale(glop::ColIndex(i)); + expanded_lp_solution_[integer_variables_[i]] = value; + expanded_lp_solution_[NegationOf(integer_variables_[i])] = -value; + } + } } return true; @@ -2367,15 +2379,12 @@ bool LinearProgrammingConstraint::PropagateExactDualRay() { } int64_t LinearProgrammingConstraint::CalculateDegeneracy() { - const glop::ColIndex num_vars = simplex_.GetProblemNumCols(); int num_non_basic_with_zero_rc = 0; - for (glop::ColIndex i(0); i < num_vars; ++i) { - const double rc = simplex_.GetReducedCost(i); - if (rc != 0.0) continue; - if (simplex_.GetVariableStatus(i) == glop::VariableStatus::BASIC) { - continue; + const auto reduced_costs = simplex_.GetReducedCosts().const_view(); + for (const glop::ColIndex i : simplex_.GetNotBasicBitRow()) { + if (reduced_costs[i] == 0.0) { + num_non_basic_with_zero_rc++; } - num_non_basic_with_zero_rc++; } const int64_t num_cols = simplex_.GetProblemNumCols().value(); is_degenerate_ = num_non_basic_with_zero_rc >= 0.3 * num_cols; diff --git a/ortools/sat/presolve_context.cc b/ortools/sat/presolve_context.cc index 722f530cdfc..7377477eee1 100644 --- a/ortools/sat/presolve_context.cc +++ b/ortools/sat/presolve_context.cc @@ -37,6 +37,7 @@ #include "ortools/base/mathutil.h" #include "ortools/port/proto_utils.h" #include "ortools/sat/cp_model.pb.h" +#include "ortools/sat/cp_model_checker.h" #include "ortools/sat/cp_model_loader.h" #include "ortools/sat/cp_model_mapping.h" #include "ortools/sat/cp_model_utils.h" @@ -522,7 +523,7 @@ ABSL_MUST_USE_RESULT bool PresolveContext::IntersectDomainWith( if (!domains[var].Contains(hint_[var])) { LOG(FATAL) << "Hint with value " << hint_[var] << " infeasible when changing domain of " << var << " to " - << domain[var]; + << domains[var]; } #endif @@ -1411,9 +1412,9 @@ void PresolveContext::CanonicalizeDomainOfSizeTwo(int var) { void PresolveContext::InsertVarValueEncodingInternal(int literal, int var, int64_t value, bool add_constraints) { - CHECK(RefIsPositive(var)); - CHECK(!VariableWasRemoved(literal)); - CHECK(!VariableWasRemoved(var)); + DCHECK(RefIsPositive(var)); + DCHECK(!VariableWasRemoved(literal)); + DCHECK(!VariableWasRemoved(var)); absl::flat_hash_map& var_map = encoding_[var]; // The code below is not 100% correct if this is not the case. @@ -1445,16 +1446,10 @@ void PresolveContext::InsertVarValueEncodingInternal(int literal, int var, // TODO(user): There is a bug here if the var == value was not in the // domain, it will just be ignored. CanonicalizeDomainOfSizeTwo(var); - } else { - VLOG(2) << "Insert lit(" << literal << ") <=> var(" << var - << ") == " << value; - eq_half_encoding_[var][value].insert(literal); - neq_half_encoding_[var][value].insert(NegatedRef(literal)); - if (add_constraints) { - UpdateRuleStats("variables: add encoding constraint"); - AddImplyInDomain(literal, var, Domain(value)); - AddImplyInDomain(NegatedRef(literal), var, Domain(value).Complement()); - } + } else if (add_constraints) { + UpdateRuleStats("variables: add encoding constraint"); + AddImplyInDomain(literal, var, Domain(value)); + AddImplyInDomain(NegatedRef(literal), var, Domain(value).Complement()); } } @@ -1462,32 +1457,25 @@ bool PresolveContext::InsertHalfVarValueEncoding(int literal, int var, int64_t value, bool imply_eq) { if (is_unsat_) return false; DCHECK(RefIsPositive(var)); - if (!CanonicalizeEncoding(&var, &value) || !DomainOf(var).Contains(value)) { - return SetLiteralToFalse(literal); - } // Creates the linking sets on demand. // Insert the enforcement literal in the half encoding map. - auto& direct_set = - imply_eq ? eq_half_encoding_[var][value] : neq_half_encoding_[var][value]; - if (!direct_set.insert(literal).second) return false; // Already there. - + auto& direct_set = imply_eq ? eq_half_encoding_ : neq_half_encoding_; + if (!direct_set.insert({literal, var, value}).second) { + return false; // Already there. + } VLOG(2) << "Collect lit(" << literal << ") implies var(" << var << (imply_eq ? ") == " : ") != ") << value; UpdateRuleStats("variables: detect half reified value encoding"); // Note(user): We don't expect a lot of literals in these sets, so doing // a scan should be okay. - auto& other_set = - imply_eq ? neq_half_encoding_[var][value] : eq_half_encoding_[var][value]; - for (const int other : other_set) { - if (GetLiteralRepresentative(other) != NegatedRef(literal)) continue; - + auto& other_set = imply_eq ? neq_half_encoding_ : eq_half_encoding_; + if (other_set.contains({NegatedRef(literal), var, value})) { UpdateRuleStats("variables: detect fully reified value encoding"); const int imply_eq_literal = imply_eq ? literal : NegatedRef(literal); InsertVarValueEncodingInternal(imply_eq_literal, var, value, /*add_constraints=*/false); - break; } return true; @@ -1508,6 +1496,8 @@ bool PresolveContext::InsertVarValueEncoding(int literal, int var, } literal = GetLiteralRepresentative(literal); InsertVarValueEncodingInternal(literal, var, value, /*add_constraints=*/true); + eq_half_encoding_.insert({literal, var, value}); + neq_half_encoding_.insert({NegatedRef(literal), var, value}); if (hint_is_loaded_) { const int bool_var = PositiveRef(literal); @@ -1524,6 +1514,7 @@ bool PresolveContext::InsertVarValueEncoding(int literal, int var, bool PresolveContext::StoreLiteralImpliesVarEqValue(int literal, int var, int64_t value) { if (!CanonicalizeEncoding(&var, &value) || !DomainOf(var).Contains(value)) { + // The literal cannot be true. return SetLiteralToFalse(literal); } literal = GetLiteralRepresentative(literal); @@ -1532,7 +1523,10 @@ bool PresolveContext::StoreLiteralImpliesVarEqValue(int literal, int var, bool PresolveContext::StoreLiteralImpliesVarNEqValue(int literal, int var, int64_t value) { - if (!CanonicalizeEncoding(&var, &value)) return false; + if (!CanonicalizeEncoding(&var, &value) || !DomainOf(var).Contains(value)) { + // The constraint is trivial. + return false; + } literal = GetLiteralRepresentative(literal); return InsertHalfVarValueEncoding(literal, var, value, /*imply_eq=*/false); } @@ -1541,16 +1535,16 @@ bool PresolveContext::HasVarValueEncoding(int ref, int64_t value, int* literal) { CHECK(!VariableWasRemoved(ref)); if (!CanonicalizeEncoding(&ref, &value)) return false; - const absl::flat_hash_map& var_map = encoding_[ref]; - const auto it = var_map.find(value); - if (it != var_map.end()) { - if (VariableWasRemoved(it->second.Get(this))) return false; - if (literal != nullptr) { - *literal = it->second.Get(this); - } - return true; + const auto first_it = encoding_.find(ref); + if (first_it == encoding_.end()) return false; + const auto it = first_it->second.find(value); + if (it == first_it->second.end()) return false; + + if (VariableWasRemoved(it->second.Get(this))) return false; + if (literal != nullptr) { + *literal = it->second.Get(this); } - return false; + return true; } bool PresolveContext::IsFullyEncoded(int ref) const { diff --git a/ortools/sat/presolve_context.h b/ortools/sat/presolve_context.h index 2244435d21e..c2e581289df 100644 --- a/ortools/sat/presolve_context.h +++ b/ortools/sat/presolve_context.h @@ -714,15 +714,11 @@ class PresolveContext { encoding_; // Contains the currently collected half value encodings: - // i.e.: literal => var ==/!= value + // (literal, var, value), i.e.: literal => var ==/!= value // The state is accumulated (adding x => var == value then !x => var != value) // will deduce that x equivalent to var == value. - absl::flat_hash_map>> - eq_half_encoding_; - absl::flat_hash_map>> - neq_half_encoding_; + absl::flat_hash_set> eq_half_encoding_; + absl::flat_hash_set> neq_half_encoding_; // This regroups all the affine relations between variables. Note that the // constraints used to detect such relations will be removed from the model at diff --git a/ortools/sat/python/cp_model.py b/ortools/sat/python/cp_model.py index 78fd14971fa..cea258dca56 100644 --- a/ortools/sat/python/cp_model.py +++ b/ortools/sat/python/cp_model.py @@ -465,7 +465,6 @@ def __sub__(self, arg): if cmh.is_zero(arg): return self if isinstance(arg, NumberTypes): - arg = cmh.assert_is_a_number(arg) return _Sum(self, -arg) else: return _Sum(self, -arg) @@ -566,7 +565,6 @@ def __eq__(self, arg: LinearExprT) -> BoundedLinearExprT: # type: ignore[overri if arg is None: return False if isinstance(arg, IntegralTypes): - arg = cmh.assert_is_int64(arg) return BoundedLinearExpression(self, [arg, arg]) elif isinstance(arg, LinearExpr): return BoundedLinearExpression(self - arg, [0, 0]) @@ -575,22 +573,23 @@ def __eq__(self, arg: LinearExprT) -> BoundedLinearExprT: # type: ignore[overri def __ge__(self, arg: LinearExprT) -> "BoundedLinearExpression": if isinstance(arg, IntegralTypes): - arg = cmh.assert_is_int64(arg) + if arg >= INT_MAX: + raise ArithmeticError(">= INT_MAX is not supported") return BoundedLinearExpression(self, [arg, INT_MAX]) else: return BoundedLinearExpression(self - arg, [0, INT_MAX]) def __le__(self, arg: LinearExprT) -> "BoundedLinearExpression": if isinstance(arg, IntegralTypes): - arg = cmh.assert_is_int64(arg) + if arg <= INT_MIN: + raise ArithmeticError("<= INT_MIN is not supported") return BoundedLinearExpression(self, [INT_MIN, arg]) else: return BoundedLinearExpression(self - arg, [INT_MIN, 0]) def __lt__(self, arg: LinearExprT) -> "BoundedLinearExpression": if isinstance(arg, IntegralTypes): - arg = cmh.assert_is_int64(arg) - if arg == INT_MIN: + if arg <= INT_MIN: raise ArithmeticError("< INT_MIN is not supported") return BoundedLinearExpression(self, [INT_MIN, arg - 1]) else: @@ -598,8 +597,7 @@ def __lt__(self, arg: LinearExprT) -> "BoundedLinearExpression": def __gt__(self, arg: LinearExprT) -> "BoundedLinearExpression": if isinstance(arg, IntegralTypes): - arg = cmh.assert_is_int64(arg) - if arg == INT_MAX: + if arg >= INT_MAX: raise ArithmeticError("> INT_MAX is not supported") return BoundedLinearExpression(self, [arg + 1, INT_MAX]) else: @@ -609,10 +607,9 @@ def __ne__(self, arg: LinearExprT) -> BoundedLinearExprT: # type: ignore[overri if arg is None: return True if isinstance(arg, IntegralTypes): - arg = cmh.assert_is_int64(arg) - if arg == INT_MAX: + if arg >= INT_MAX: return BoundedLinearExpression(self, [INT_MIN, INT_MAX - 1]) - elif arg == INT_MIN: + elif arg <= INT_MIN: return BoundedLinearExpression(self, [INT_MIN + 1, INT_MAX]) else: return BoundedLinearExpression( @@ -702,7 +699,6 @@ class _ProductCst(LinearExpr): """Represents the product of a LinearExpr by a constant.""" def __init__(self, expr, coeff) -> None: - coeff = cmh.assert_is_a_number(coeff) if isinstance(expr, _ProductCst): self.__expr = expr.expression() self.__coef = expr.coefficient() * coeff @@ -736,7 +732,6 @@ def __init__(self, expressions, constant=0) -> None: if isinstance(x, NumberTypes): if cmh.is_zero(x): continue - x = cmh.assert_is_a_number(x) self.__constant += x elif isinstance(x, LinearExpr): self.__expressions.append(x) @@ -776,11 +771,9 @@ def __init__(self, expressions, coefficients, constant=0) -> None: " coefficient array must have the same length." ) for e, c in zip(expressions, coefficients): - c = cmh.assert_is_a_number(c) if cmh.is_zero(c): continue if isinstance(e, NumberTypes): - e = cmh.assert_is_a_number(e) self.__constant += e * c elif isinstance(e, LinearExpr): self.__expressions.append(e) @@ -1509,9 +1502,8 @@ def add_linear_expression_in_domain( for t in coeffs_map.items(): if not isinstance(t[0], IntVar): raise TypeError("Wrong argument" + str(t)) - c = cmh.assert_is_int64(t[1]) model_ct.linear.vars.append(t[0].index) - model_ct.linear.coeffs.append(c) + model_ct.linear.coeffs.append(t[1]) model_ct.linear.domain.extend( [ cmh.capped_subtraction(x, constant) @@ -1640,12 +1632,9 @@ def add_circuit(self, arcs: Sequence[ArcT]) -> Constraint: ct = Constraint(self) model_ct = self.__model.constraints[ct.index] for arc in arcs: - tail = cmh.assert_is_int32(arc[0]) - head = cmh.assert_is_int32(arc[1]) - lit = self.get_or_make_boolean_index(arc[2]) - model_ct.circuit.tails.append(tail) - model_ct.circuit.heads.append(head) - model_ct.circuit.literals.append(lit) + model_ct.circuit.tails.append(arc[0]) + model_ct.circuit.heads.append(arc[1]) + model_ct.circuit.literals.append(self.get_or_make_boolean_index(arc[2])) return ct def add_multiple_circuit(self, arcs: Sequence[ArcT]) -> Constraint: @@ -1677,12 +1666,9 @@ def add_multiple_circuit(self, arcs: Sequence[ArcT]) -> Constraint: ct = Constraint(self) model_ct = self.__model.constraints[ct.index] for arc in arcs: - tail = cmh.assert_is_int32(arc[0]) - head = cmh.assert_is_int32(arc[1]) - lit = self.get_or_make_boolean_index(arc[2]) - model_ct.routes.tails.append(tail) - model_ct.routes.heads.append(head) - model_ct.routes.literals.append(lit) + model_ct.routes.tails.append(arc[0]) + model_ct.routes.heads.append(arc[1]) + model_ct.routes.literals.append(self.get_or_make_boolean_index(arc[2])) return ct def add_allowed_assignments( @@ -1720,15 +1706,19 @@ def add_allowed_assignments( model_ct = self.__model.constraints[ct.index] model_ct.table.vars.extend([self.get_or_make_index(x) for x in variables]) arity: int = len(variables) - for t in tuples_list: - if len(t) != arity: - raise TypeError("Tuple " + str(t) + " has the wrong arity") + for one_tuple in tuples_list: + if len(one_tuple) != arity: + raise TypeError("Tuple " + str(one_tuple) + " has the wrong arity") # duck-typing (no explicit type checks here) try: - model_ct.table.values.extend(a for b in tuples_list for a in b) + for one_tuple in tuples_list: + model_ct.table.values.extend(one_tuple) except ValueError as ex: - raise TypeError(f"add_xxx_assignment: Not an integer or does not fit in an int64_t: {ex.args}") from ex + raise TypeError( + "add_xxx_assignment: Not an integer or does not fit in an int64_t:" + f" {ex.args}" + ) from ex return ct @@ -1762,7 +1752,7 @@ def add_forbidden_assignments( "add_forbidden_assignments expects a non-empty variables array" ) - index = len(self.__model.constraints) + index: int = len(self.__model.constraints) ct: Constraint = self.add_allowed_assignments(variables, tuples_list) self.__model.constraints[index].table.negated = True return ct @@ -1829,20 +1819,15 @@ def add_automaton( model_ct.automaton.vars.extend( [self.get_or_make_index(x) for x in transition_variables] ) - starting_state = cmh.assert_is_int64(starting_state) model_ct.automaton.starting_state = starting_state for v in final_states: - v = cmh.assert_is_int64(v) model_ct.automaton.final_states.append(v) for t in transition_triples: if len(t) != 3: raise TypeError("Tuple " + str(t) + " has the wrong arity (!= 3)") - tail = cmh.assert_is_int64(t[0]) - label = cmh.assert_is_int64(t[1]) - head = cmh.assert_is_int64(t[2]) - model_ct.automaton.transition_tail.append(tail) - model_ct.automaton.transition_label.append(label) - model_ct.automaton.transition_head.append(head) + model_ct.automaton.transition_tail.append(t[0]) + model_ct.automaton.transition_label.append(t[1]) + model_ct.automaton.transition_head.append(t[2]) return ct def add_inverse( @@ -2358,7 +2343,6 @@ def new_fixed_size_interval_var( Returns: An `IntervalVar` object. """ - size = cmh.assert_is_int64(size) start_expr = self.parse_linear_expression(start) size_expr = self.parse_linear_expression(size) end_expr = self.parse_linear_expression(start + size) @@ -2545,7 +2529,6 @@ def new_optional_fixed_size_interval_var( Returns: An `IntervalVar` object. """ - size = cmh.assert_is_int64(size) start_expr = self.parse_linear_expression(start) size_expr = self.parse_linear_expression(size) end_expr = self.parse_linear_expression(start + size) @@ -2776,7 +2759,6 @@ def get_or_make_index(self, arg: VariableT) -> int: ): return -arg.expression().index - 1 if isinstance(arg, IntegralTypes): - arg = cmh.assert_is_int64(arg) return self.get_or_make_index_from_constant(arg) raise TypeError("NotSupported: model.get_or_make_index(" + str(arg) + ")") @@ -2842,9 +2824,8 @@ def parse_linear_expression( for t in coeffs_map.items(): if not isinstance(t[0], IntVar): raise TypeError("Wrong argument" + str(t)) - c = cmh.assert_is_int64(t[1]) result.vars.append(t[0].index) - result.coeffs.append(c * mult) + result.coeffs.append(t[1] * mult) return result def _set_objective(self, obj: ObjLinearExprT, minimize: bool): diff --git a/ortools/sat/python/cp_model_helper.py b/ortools/sat/python/cp_model_helper.py index 364aea8485c..8abae4cf1a0 100644 --- a/ortools/sat/python/cp_model_helper.py +++ b/ortools/sat/python/cp_model_helper.py @@ -60,26 +60,6 @@ def is_minus_one(x: Any) -> bool: return False -def assert_is_int64(x: Any) -> int: - """Asserts that x is integer and x is in [min_int_64, max_int_64] and returns it casted to an int.""" - if not isinstance(x, numbers.Integral): - raise TypeError(f"Not an integer: {x} of type {type(x)}") - x_as_int = int(x) - if x_as_int < INT_MIN or x_as_int > INT_MAX: - raise OverflowError(f"Does not fit in an int64_t: {x}") - return x_as_int - - -def assert_is_int32(x: Any) -> int: - """Asserts that x is integer and x is in [min_int_32, max_int_32] and returns it casted to an int.""" - if not isinstance(x, numbers.Integral): - raise TypeError(f"Not an integer: {x} of type {type(x)}") - x_as_int = int(x) - if x_as_int < INT32_MIN or x_as_int > INT32_MAX: - raise OverflowError(f"Does not fit in an int32_t: {x}") - return x_as_int - - def assert_is_zero_or_one(x: Any) -> int: """Asserts that x is 0 or 1 and returns it as an int.""" if not isinstance(x, numbers.Integral): @@ -110,8 +90,6 @@ def to_capped_int64(v: int) -> int: def capped_subtraction(x: int, y: int) -> int: """Saturated arithmetics. Returns x - y truncated to the int64_t range.""" - assert_is_int64(x) - assert_is_int64(y) if y == 0: return x if x == y: diff --git a/ortools/sat/python/cp_model_helper_test.py b/ortools/sat/python/cp_model_helper_test.py index 40fe7b2c25c..62c4b5c8d5a 100644 --- a/ortools/sat/python/cp_model_helper_test.py +++ b/ortools/sat/python/cp_model_helper_test.py @@ -30,16 +30,6 @@ def test_is_boolean(self): self.assertTrue(cp_model_helper.is_boolean(np.bool_(1))) self.assertTrue(cp_model_helper.is_boolean(np.bool_(0))) - def testassert_is_int64(self): - print("testassert_is_int64") - self.assertRaises(TypeError, cp_model_helper.assert_is_int64, "Hello") - self.assertRaises(TypeError, cp_model_helper.assert_is_int64, 1.2) - self.assertRaises(OverflowError, cp_model_helper.assert_is_int64, 2**63) - self.assertRaises(OverflowError, cp_model_helper.assert_is_int64, -(2**63) - 1) - cp_model_helper.assert_is_int64(123) - cp_model_helper.assert_is_int64(2**63 - 1) - cp_model_helper.assert_is_int64(-(2**63)) - def testto_capped_int64(self): print("testto_capped_int64") self.assertEqual( diff --git a/ortools/sat/stat_tables.cc b/ortools/sat/stat_tables.cc index e5b96d133de..957d1a7e2e5 100644 --- a/ortools/sat/stat_tables.cc +++ b/ortools/sat/stat_tables.cc @@ -26,7 +26,6 @@ #include "absl/synchronization/mutex.h" #include "ortools/lp_data/lp_types.h" #include "ortools/sat/cp_model.pb.h" -#include "ortools/sat/cp_model_lns.h" #include "ortools/sat/linear_programming_constraint.h" #include "ortools/sat/model.h" #include "ortools/sat/sat_solver.h" @@ -228,18 +227,20 @@ void SharedStatTables::AddLpStat(absl::string_view name, Model* model) { } void SharedStatTables::AddLnsStat(absl::string_view name, - const NeighborhoodGenerator& generator) { + int64_t num_fully_solved_calls, + int64_t num_calls, + int64_t num_improving_calls, + double difficulty, + double deterministic_limit) { absl::MutexLock mutex_lock(&mutex_); const double fully_solved_proportion = - static_cast(generator.num_fully_solved_calls()) / - static_cast(std::max(int64_t{1}, generator.num_calls())); + static_cast(num_fully_solved_calls) / + static_cast(std::max(int64_t{1}, num_calls)); lns_table_.push_back( - {FormatName(name), - absl::StrCat(generator.num_improving_calls(), "/", - generator.num_calls()), + {FormatName(name), absl::StrCat(num_improving_calls, "/", num_calls), absl::StrFormat("%2.0f%%", 100 * fully_solved_proportion), - absl::StrFormat("%0.2f", generator.difficulty()), - absl::StrFormat("%0.2f", generator.deterministic_limit())}); + absl::StrFormat("%0.2f", difficulty), + absl::StrFormat("%0.2f", deterministic_limit)}); } void SharedStatTables::AddLsStat(absl::string_view name, int64_t num_batches, diff --git a/ortools/sat/stat_tables.h b/ortools/sat/stat_tables.h index 18b0ffde46b..f3b233909ed 100644 --- a/ortools/sat/stat_tables.h +++ b/ortools/sat/stat_tables.h @@ -18,9 +18,9 @@ #include #include +#include "absl/container/btree_map.h" #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" -#include "ortools/sat/cp_model_lns.h" #include "ortools/sat/model.h" #include "ortools/sat/subsolver.h" #include "ortools/sat/util.h" @@ -42,13 +42,14 @@ class SharedStatTables { void AddLpStat(absl::string_view name, Model* model); - void AddLnsStat(absl::string_view name, - const NeighborhoodGenerator& generator); + void AddLnsStat(absl::string_view name, int64_t num_fully_solved_calls, + int64_t num_calls, int64_t num_improving_calls, + double difficulty, double deterministic_limit); void AddLsStat(absl::string_view name, int64_t num_batches, int64_t num_restarts, int64_t num_linear_moves, int64_t num_general_moves, int64_t num_compound_moves, - int64_t num_bactracks, int64_t num_weight_updates, + int64_t num_backtracks, int64_t num_weight_updates, int64_t num_scores_computed); // Display the set of table at the end. diff --git a/ortools/sat/symmetry_util_test.cc b/ortools/sat/symmetry_util_test.cc new file mode 100644 index 00000000000..85a7d674811 --- /dev/null +++ b/ortools/sat/symmetry_util_test.cc @@ -0,0 +1,134 @@ +// Copyright 2010-2024 Google LLC +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "ortools/sat/symmetry_util.h" + +#include +#include + +#include "gtest/gtest.h" +#include "ortools/algorithms/sparse_permutation.h" +#include "ortools/base/gmock.h" + +namespace operations_research { +namespace sat { +namespace { + +using ::testing::ElementsAre; + +TEST(GetOrbitsTest, BasicExample) { + const int n = 10; + std::vector> generators; + generators.push_back(std::make_unique(n)); + generators[0]->AddToCurrentCycle(0); + generators[0]->AddToCurrentCycle(1); + generators[0]->AddToCurrentCycle(2); + generators[0]->CloseCurrentCycle(); + generators[0]->AddToCurrentCycle(7); + generators[0]->AddToCurrentCycle(8); + generators[0]->CloseCurrentCycle(); + + generators.push_back(std::make_unique(n)); + generators[1]->AddToCurrentCycle(3); + generators[1]->AddToCurrentCycle(2); + generators[1]->AddToCurrentCycle(7); + generators[1]->CloseCurrentCycle(); + const std::vector orbits = GetOrbits(n, generators); + for (const int i : std::vector{0, 1, 2, 3, 7, 8}) { + EXPECT_EQ(orbits[i], 0); + } + for (const int i : std::vector{4, 5, 6, 9}) { + EXPECT_EQ(orbits[i], -1); + } +} + +// Recover for generators (in a particular form) +// [0, 1, 2] +// [4, 5, 3] +// [8, 7, 6] +TEST(BasicOrbitopeExtractionTest, BasicExample) { + const int n = 10; + std::vector> generators; + + generators.push_back(std::make_unique(n)); + generators[0]->AddToCurrentCycle(0); + generators[0]->AddToCurrentCycle(1); + generators[0]->CloseCurrentCycle(); + generators[0]->AddToCurrentCycle(4); + generators[0]->AddToCurrentCycle(5); + generators[0]->CloseCurrentCycle(); + generators[0]->AddToCurrentCycle(8); + generators[0]->AddToCurrentCycle(7); + generators[0]->CloseCurrentCycle(); + + generators.push_back(std::make_unique(n)); + generators[1]->AddToCurrentCycle(2); + generators[1]->AddToCurrentCycle(1); + generators[1]->CloseCurrentCycle(); + generators[1]->AddToCurrentCycle(5); + generators[1]->AddToCurrentCycle(3); + generators[1]->CloseCurrentCycle(); + generators[1]->AddToCurrentCycle(6); + generators[1]->AddToCurrentCycle(7); + generators[1]->CloseCurrentCycle(); + + const std::vector> orbitope = + BasicOrbitopeExtraction(generators); + ASSERT_EQ(orbitope.size(), 3); + EXPECT_THAT(orbitope[0], ElementsAre(0, 1, 2)); + EXPECT_THAT(orbitope[1], ElementsAre(4, 5, 3)); + EXPECT_THAT(orbitope[2], ElementsAre(8, 7, 6)); +} + +// This one is trickier and is not an orbitope because 8 appear twice. So it +// would be incorrect to "grow" the first two columns with the 3rd one. +// [0, 1, 2] +// [4, 5, 8] +// [8, 7, 9] +TEST(BasicOrbitopeExtractionTest, NotAnOrbitopeBecauseOfDuplicates) { + const int n = 10; + std::vector> generators; + + generators.push_back(std::make_unique(n)); + generators[0]->AddToCurrentCycle(0); + generators[0]->AddToCurrentCycle(1); + generators[0]->CloseCurrentCycle(); + generators[0]->AddToCurrentCycle(4); + generators[0]->AddToCurrentCycle(5); + generators[0]->CloseCurrentCycle(); + generators[0]->AddToCurrentCycle(8); + generators[0]->AddToCurrentCycle(7); + generators[0]->CloseCurrentCycle(); + + generators.push_back(std::make_unique(n)); + generators[1]->AddToCurrentCycle(1); + generators[1]->AddToCurrentCycle(2); + generators[1]->CloseCurrentCycle(); + generators[1]->AddToCurrentCycle(5); + generators[1]->AddToCurrentCycle(8); + generators[1]->CloseCurrentCycle(); + generators[1]->AddToCurrentCycle(6); + generators[1]->AddToCurrentCycle(9); + generators[1]->CloseCurrentCycle(); + + const std::vector> orbitope = + BasicOrbitopeExtraction(generators); + ASSERT_EQ(orbitope.size(), 3); + EXPECT_THAT(orbitope[0], ElementsAre(0, 1)); + EXPECT_THAT(orbitope[1], ElementsAre(4, 5)); + EXPECT_THAT(orbitope[2], ElementsAre(8, 7)); +} + +} // namespace +} // namespace sat +} // namespace operations_research diff --git a/ortools/sat/util.cc b/ortools/sat/util.cc index edbc813bb1b..8bc821226e9 100644 --- a/ortools/sat/util.cc +++ b/ortools/sat/util.cc @@ -465,7 +465,7 @@ void CompressTuples(absl::Span domain_sizes, for (int i = 0; i < num_vars; ++i) { const int domain_size = domain_sizes[i]; if (domain_size == 1) continue; - absl::flat_hash_map, std::vector> + absl::flat_hash_map, std::vector> masked_tuples_to_indices; for (int t = 0; t < tuples->size(); ++t) { int out = 0; diff --git a/ortools/sat/util.h b/ortools/sat/util.h index 9ccdf7d3095..36d9e8d9966 100644 --- a/ortools/sat/util.h +++ b/ortools/sat/util.h @@ -428,10 +428,15 @@ class MaxBoundedSubsetSum { template class FirstFewValues { public: - FirstFewValues() { Reset(); } + FirstFewValues() + : reachable_(new int64_t[n]), new_reachable_(new int64_t[n]) { + Reset(); + } void Reset() { - reachable_.fill(std::numeric_limits::max()); + for (int i = 0; i < n; ++i) { + reachable_[i] = std::numeric_limits::max(); + } reachable_[0] = 0; new_reachable_[0] = 0; } @@ -441,23 +446,25 @@ class FirstFewValues { // TODO(user): Implement Add() with an upper bound on the multiplicity. void Add(const int64_t positive_value) { DCHECK_GT(positive_value, 0); - if (positive_value >= reachable_.back()) return; + const int64_t* reachable = reachable_.get(); + if (positive_value >= reachable[n - 1]) return; // We copy from reachable_[i] to new_reachable_[j]. // The position zero is already copied. int i = 1; int j = 1; + int64_t* new_reachable = new_reachable_.get(); for (int base = 0; j < n && base < n; ++base) { - const int64_t candidate = CapAdd(new_reachable_[base], positive_value); - while (j < n && i < n && reachable_[i] < candidate) { - new_reachable_[j++] = reachable_[i++]; + const int64_t candidate = CapAdd(new_reachable[base], positive_value); + while (j < n && i < n && reachable[i] < candidate) { + new_reachable[j++] = reachable[i++]; } if (j < n) { // Eliminate duplicates. - while (i < n && reachable_[i] == candidate) i++; + while (i < n && reachable[i] == candidate) i++; // insert candidate in its final place. - new_reachable_[j++] = candidate; + new_reachable[j++] = candidate; } } std::swap(reachable_, new_reachable_); @@ -466,16 +473,19 @@ class FirstFewValues { // Returns true iff sum might be expressible as a weighted sum of the added // value. Any sum >= LastValue() is always considered potentially reachable. bool MightBeReachable(int64_t sum) const { - if (sum >= reachable_.back()) return true; - return std::binary_search(reachable_.begin(), reachable_.end(), sum); + if (sum >= reachable_[n - 1]) return true; + return std::binary_search(&reachable_[0], &reachable_[0] + n, sum); } - const std::array& reachable() const { return reachable_; } - int64_t LastValue() const { return reachable_.back(); } + int64_t LastValue() const { return reachable_[n - 1]; } + + absl::Span reachable() { + return absl::MakeSpan(reachable_.get(), n); + } private: - std::array reachable_; - std::array new_reachable_; + std::unique_ptr reachable_; + std::unique_ptr new_reachable_; }; // Use Dynamic programming to solve a single knapsack. This is used by the @@ -700,37 +710,16 @@ inline bool IsNegatableInt64(absl::int128 x) { x > absl::int128(std::numeric_limits::min()); } -// These functions are copied from MathUtils. However, the original ones are -// incompatible with absl::int128 as MathLimits::kIsInteger == -// false. -template -IntType CeilOrFloorOfRatio(IntType numerator, IntType denominator) { - static_assert(std::numeric_limits::is_integer, - "CeilOfRatio is only defined for integral types"); - DCHECK_NE(0, denominator) << "Division by zero is not supported."; - DCHECK(numerator != std::numeric_limits::min() || denominator != -1) - << "Dividing " << numerator << "by -1 is not supported: it would SIGFPE"; - - const IntType rounded_toward_zero = numerator / denominator; - const bool needs_round = (numerator % denominator) != 0; - const bool same_sign = (numerator >= 0) == (denominator >= 0); - - if (ceil) { // Compile-time condition: not an actual branching - return rounded_toward_zero + static_cast(same_sign && needs_round); - } else { - return rounded_toward_zero - - static_cast(!same_sign && needs_round); - } -} - template +ABSL_DEPRECATE_AND_INLINE() IntType CeilOfRatio(IntType numerator, IntType denominator) { - return CeilOrFloorOfRatio(numerator, denominator); + return MathUtil::CeilOfRatio(numerator, denominator); } template +ABSL_DEPRECATE_AND_INLINE() IntType FloorOfRatio(IntType numerator, IntType denominator) { - return CeilOrFloorOfRatio(numerator, denominator); + return MathUtil::FloorOfRatio(numerator, denominator); } template diff --git a/ortools/util/BUILD.bazel b/ortools/util/BUILD.bazel index b2ee31597e1..38c67dd3ad6 100644 --- a/ortools/util/BUILD.bazel +++ b/ortools/util/BUILD.bazel @@ -129,6 +129,7 @@ cc_library( deps = [ ":saturated_arithmetic", "//ortools/base", + "//ortools/base:dump_vars", "//ortools/base:types", "@com_google_absl//absl/container:btree", "@com_google_absl//absl/strings", diff --git a/ortools/util/bitset.h b/ortools/util/bitset.h index 82299b66d98..7f2f3a4932a 100644 --- a/ortools/util/bitset.h +++ b/ortools/util/bitset.h @@ -815,6 +815,10 @@ inline int Bitset64::Value(int64_t input) { DCHECK_GE(input, 0); return input; } +template <> +inline int Bitset64::Value(size_t input) { + return input; +} // A simple utility class to set/unset integer in a range [0, size). // This is optimized for sparsity. diff --git a/ortools/util/saturated_arithmetic.h b/ortools/util/saturated_arithmetic.h index e2065ec3cca..321426143d8 100644 --- a/ortools/util/saturated_arithmetic.h +++ b/ortools/util/saturated_arithmetic.h @@ -311,6 +311,11 @@ inline int64_t CapSub(int64_t x, int64_t y) { #endif } +// Updates *target with CapSub(*target, amount). +inline void CapSubFrom(int64_t amount, int64_t* target) { + *target = CapSub(*target, amount); +} + inline int64_t CapProd(int64_t x, int64_t y) { #if defined(__GNUC__) && defined(__x86_64__) // On x86_64, the product of two 64-bit registeres is a 128-bit integer, diff --git a/ortools/util/sorted_interval_list.cc b/ortools/util/sorted_interval_list.cc index 7511e8ec551..6343b19ab13 100644 --- a/ortools/util/sorted_interval_list.cc +++ b/ortools/util/sorted_interval_list.cc @@ -15,9 +15,9 @@ #include #include +#include #include #include -#include #include #include #include