forked from google/or-tools
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathsat_parameters.proto
803 lines (668 loc) · 38.5 KB
/
sat_parameters.proto
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
// Copyright 2010-2018 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
package operations_research.sat;
option java_package = "com.google.ortools.sat";
option java_multiple_files = true;
// Contains the definitions for all the sat algorithm parameters and their
// default values.
//
// NEXT TAG: 139
message SatParameters {
// ==========================================================================
// Branching and polarity
// ==========================================================================
// Variables without activity (i.e. at the beginning of the search) will be
// tried in this preferred order.
enum VariableOrder {
IN_ORDER = 0; // As specified by the problem.
IN_REVERSE_ORDER = 1;
IN_RANDOM_ORDER = 2;
}
optional VariableOrder preferred_variable_order = 1 [default = IN_ORDER];
// Specifies the initial polarity (true/false) when the solver branches on a
// variable. This can be modified later by the user, or the phase saving
// heuristic.
//
// Note(user): POLARITY_FALSE is usually a good choice because of the
// "natural" way to express a linear boolean problem.
enum Polarity {
POLARITY_TRUE = 0;
POLARITY_FALSE = 1;
POLARITY_RANDOM = 2;
// Choose the sign that tends to satisfy the most constraints. This is
// computed using a weighted sum: if a literal l appears in a constraint of
// the form: ... + coeff * l +... <= rhs with positive coefficients and
// rhs, then -sign(l) * coeff / rhs is added to the weight of l.variable().
POLARITY_WEIGHTED_SIGN = 3;
// The opposite choice of POLARITY_WEIGHTED_SIGN.
POLARITY_REVERSE_WEIGHTED_SIGN = 4;
}
optional Polarity initial_polarity = 2 [default = POLARITY_FALSE];
// If this is true, then the polarity of a variable will be the last value it
// was assigned to, or its default polarity if it was never assigned since the
// call to ResetDecisionHeuristic().
//
// This is called 'literal phase saving'. For details see 'A Lightweight
// Component Caching Scheme for Satisfiability Solvers' K. Pipatsrisawat and
// A.Darwiche, In 10th International Conference on Theory and Applications of
// Satisfiability Testing, 2007.
optional bool use_phase_saving = 44 [default = true];
// The proportion of polarity chosen at random. Note that this take
// precedence over the phase saving heuristic. This is different from
// initial_polarity:POLARITY_RANDOM because it will select a new random
// polarity each time the variable is branched upon instead of selecting one
// initially and then always taking this choice.
optional double random_polarity_ratio = 45 [default = 0.0];
// A number between 0 and 1 that indicates the proportion of branching
// variables that are selected randomly instead of choosing the first variable
// from the given variable_ordering strategy.
optional double random_branches_ratio = 32 [default = 0.0];
// Whether we use the ERWA (Exponential Recency Weighted Average) heuristic as
// described in "Learning Rate Based Branching Heuristic for SAT solvers",
// J.H.Liang, V. Ganesh, P. Poupart, K.Czarnecki, SAT 2016.
optional bool use_erwa_heuristic = 75 [default = false];
// The initial value of the variables activity. A non-zero value only make
// sense when use_erwa_heuristic is true. Experiments with a value of 1e-2
// together with the ERWA heuristic showed slighthly better result than simply
// using zero. The idea is that when the "learning rate" of a variable becomes
// lower than this value, then we prefer to branch on never explored before
// variables. This is not in the ERWA paper.
optional double initial_variables_activity = 76 [default = 0.0];
// When this is true, then the variables that appear in any of the reason of
// the variables in a conflict have their activity bumped. This is addition to
// the variables in the conflict, and the one that were used during conflict
// resolution.
optional bool also_bump_variables_in_conflict_reasons = 77 [default = false];
// ==========================================================================
// Conflict analysis
// ==========================================================================
// Do we try to minimize conflicts (greedily) when creating them.
enum ConflictMinimizationAlgorithm {
NONE = 0;
SIMPLE = 1;
RECURSIVE = 2;
EXPERIMENTAL = 3;
}
optional ConflictMinimizationAlgorithm minimization_algorithm = 4
[default = RECURSIVE];
// Whether to expoit the binary clause to minimize learned clauses further.
// This will have an effect only if treat_binary_clauses_separately is true.
enum BinaryMinizationAlgorithm {
NO_BINARY_MINIMIZATION = 0;
BINARY_MINIMIZATION_FIRST = 1;
BINARY_MINIMIZATION_FIRST_WITH_TRANSITIVE_REDUCTION = 4;
BINARY_MINIMIZATION_WITH_REACHABILITY = 2;
EXPERIMENTAL_BINARY_MINIMIZATION = 3;
}
optional BinaryMinizationAlgorithm binary_minimization_algorithm = 34
[default = BINARY_MINIMIZATION_FIRST];
// At a really low cost, during the 1-UIP conflict computation, it is easy to
// detect if some of the involved reasons are subsumed by the current
// conflict. When this is true, such clauses are detached and later removed
// from the problem.
optional bool subsumption_during_conflict_analysis = 56 [default = true];
// ==========================================================================
// Clause database management
// ==========================================================================
// Trigger a cleanup when this number of "deletable" clauses is learned.
optional int32 clause_cleanup_period = 11 [default = 10000];
// During a cleanup, we will always keep that number of "deletable" clauses.
// Note that this doesn't include the "protected" clauses.
optional int32 clause_cleanup_target = 13 [default = 10000];
// Each time a clause activity is bumped, the clause has a chance to be
// protected during the next cleanup phase. Note that clauses used as a reason
// are always protected.
enum ClauseProtection {
PROTECTION_NONE = 0; // No protection.
PROTECTION_ALWAYS = 1; // Protect all clauses whose activity is bumped.
PROTECTION_LBD = 2; // Only protect clause with a better LBD.
}
optional ClauseProtection clause_cleanup_protection = 58
[default = PROTECTION_NONE];
// All the clauses with a LBD (literal blocks distance) lower or equal to this
// parameters will always be kept.
optional int32 clause_cleanup_lbd_bound = 59 [default = 5];
// The clauses that will be kept during a cleanup are the ones that come
// first under this order. We always keep or exclude ties together.
enum ClauseOrdering {
// Order clause by decreasing activity, then by increasing LBD.
CLAUSE_ACTIVITY = 0;
// Order clause by increasing LBD, then by decreasing activity.
CLAUSE_LBD = 1;
}
optional ClauseOrdering clause_cleanup_ordering = 60
[default = CLAUSE_ACTIVITY];
// Same as for the clauses, but for the learned pseudo-Boolean constraints.
optional int32 pb_cleanup_increment = 46 [default = 200];
optional double pb_cleanup_ratio = 47 [default = 0.5];
// Parameters for an heuristic similar to the one descibed in "An effective
// learnt clause minimization approach for CDCL Sat Solvers",
// https://www.ijcai.org/proceedings/2017/0098.pdf
//
// For now, we have a somewhat simpler implementation where every x restart we
// spend y decisions on clause minimization. The minimization technique is the
// same as the one used to minimize core in max-sat. We also minimize problem
// clauses and not just the learned clause that we keep forever like in the
// paper.
//
// Changing these parameters or the kind of clause we minimize seems to have
// a big impact on the overall perf on our benchmarks. So this technique seems
// definitely useful, but it is hard to tune properly.
optional int32 minimize_with_propagation_restart_period = 96 [default = 10];
optional int32 minimize_with_propagation_num_decisions = 97 [default = 1000];
// ==========================================================================
// Variable and clause activities
// ==========================================================================
// Each time a conflict is found, the activities of some variables are
// increased by one. Then, the activity of all variables are multiplied by
// variable_activity_decay.
//
// To implement this efficiently, the activity of all the variables is not
// decayed at each conflict. Instead, the activity increment is multiplied by
// 1 / decay. When an activity reach max_variable_activity_value, all the
// activity are multiplied by 1 / max_variable_activity_value.
optional double variable_activity_decay = 15 [default = 0.8];
optional double max_variable_activity_value = 16 [default = 1e100];
// The activity starts at 0.8 and increment by 0.01 every 5000 conflicts until
// 0.95. This "hack" seems to work well and comes from:
//
// Glucose 2.3 in the SAT 2013 Competition - SAT Competition 2013
// http://edacc4.informatik.uni-ulm.de/SC13/solver-description-download/136
optional double glucose_max_decay = 22 [default = 0.95];
optional double glucose_decay_increment = 23 [default = 0.01];
optional int32 glucose_decay_increment_period = 24 [default = 5000];
// Clause activity parameters (same effect as the one on the variables).
optional double clause_activity_decay = 17 [default = 0.999];
optional double max_clause_activity_value = 18 [default = 1e20];
// ==========================================================================
// Restart
// ==========================================================================
// Restart algorithms.
//
// A reference for the more advanced ones is:
// Gilles Audemard, Laurent Simon, "Refining Restarts Strategies for SAT
// and UNSAT", Principles and Practice of Constraint Programming Lecture
// Notes in Computer Science 2012, pp 118-126
enum RestartAlgorithm {
NO_RESTART = 0;
// Just follow a Luby sequence times restart_period.
LUBY_RESTART = 1;
// Moving average restart based on the decision level of conflicts.
DL_MOVING_AVERAGE_RESTART = 2;
// Moving average restart based on the LBD of conflicts.
LBD_MOVING_AVERAGE_RESTART = 3;
// Fixed period restart every restart period.
FIXED_RESTART = 4;
}
// The restart strategies will change each time the strategy_counter is
// increased. The current strategy will simply be the one at index
// strategy_counter modulo the number of strategy. Note that if this list
// includes a NO_RESTART, nothing will change when it is reached because the
// strategy_counter will only increment after a restart.
//
// The idea of switching of search strategy tailored for SAT/UNSAT comes from
// Chanseok Oh with his COMiniSatPS solver, see http://cs.nyu.edu/~chanseok/.
// But more generally, it seems REALLY beneficial to try different strategy.
repeated RestartAlgorithm restart_algorithms = 61;
optional string default_restart_algorithms = 70
[default =
"LUBY_RESTART,LBD_MOVING_AVERAGE_RESTART,DL_MOVING_AVERAGE_RESTART"];
// Restart period for the FIXED_RESTART strategy. This is also the multiplier
// used by the LUBY_RESTART strategy.
optional int32 restart_period = 30 [default = 50];
// Size of the window for the moving average restarts.
optional int32 restart_running_window_size = 62 [default = 50];
// In the moving average restart algorithms, a restart is triggered if the
// window average times this ratio is greater that the global average.
optional double restart_dl_average_ratio = 63 [default = 1.0];
optional double restart_lbd_average_ratio = 71 [default = 1.0];
// Block a moving restart algorithm if the trail size of the current conflict
// is greater than the multiplier times the moving average of the trail size
// at the previous conflicts.
optional bool use_blocking_restart = 64 [default = false];
optional int32 blocking_restart_window_size = 65 [default = 5000];
optional double blocking_restart_multiplier = 66 [default = 1.4];
// After each restart, if the number of conflict since the last strategy
// change is greater that this, then we increment a "strategy_counter" that
// can be use to change the search strategy used by the following restarts.
optional int32 num_conflicts_before_strategy_changes = 68 [default = 0];
// The parameter num_conflicts_before_strategy_changes is increased by that
// much after each strategy change.
optional double strategy_change_increase_ratio = 69 [default = 0.0];
// ==========================================================================
// Limits
// ==========================================================================
// Maximum time allowed in seconds to solve a problem.
// The counter will starts as soon as SetParameters() or SolveWithTimeLimit()
// is called.
optional double max_time_in_seconds = 36 [default = inf];
// Maximum time allowed in deterministic time to solve a problem.
// The deterministic time should be correlated with the real time used by the
// solver, the time unit being as close as possible to a second.
// The counter will starts as soon as SetParameters() or SolveWithTimeLimit()
// is called.
optional double max_deterministic_time = 67 [default = inf];
// Maximum number of conflicts allowed to solve a problem.
//
// TODO(user,user): Maybe change the way the conflict limit is enforced?
// currently it is enforced on each independent internal SAT solve, rather
// than on the overall number of conflicts across all solves. So in the
// context of an optimization problem, this is not really usable directly by a
// client.
optional int64 max_number_of_conflicts = 37
[default = 0x7FFFFFFFFFFFFFFF]; // kint64max
// Maximum memory allowed for the whole thread containing the solver. The
// solver will abort as soon as it detects that this limit is crossed. As a
// result, this limit is approximative, but usually the solver will not go too
// much over.
optional int64 max_memory_in_mb = 40 [default = 10000];
// ==========================================================================
// Other parameters
// ==========================================================================
// If true, the binary clauses are treated separately from the others. This
// should be faster and uses less memory. However it changes the propagation
// order.
optional bool treat_binary_clauses_separately = 33 [default = true];
// At the beginning of each solve, the random number generator used in some
// part of the solver is reinitialized to this seed. If you change the random
// seed, the solver may make different choices during the solving process.
//
// For some problems, the running time may vary a lot depending on small
// change in the solving algorithm. Running the solver with different seeds
// enables to have more robust benchmarks when evaluating new features.
optional int32 random_seed = 31 [default = 1];
// Whether the solver should log the search progress to LOG(INFO).
optional bool log_search_progress = 41 [default = false];
// Whether to use pseudo-Boolean resolution to analyze a conflict. Note that
// this option only make sense if your problem is modelized using
// pseudo-Boolean constraints. If you only have clauses, this shouldn't change
// anything (except slow the solver down).
optional bool use_pb_resolution = 43 [default = false];
// A different algorithm during PB resolution. It minimizes the number of
// calls to ReduceCoefficients() which can be time consuming. However, the
// search space will be different and if the coefficients are large, this may
// lead to integer overflows that could otherwise be prevented.
optional bool minimize_reduction_during_pb_resolution = 48 [default = false];
// Whether or not the assumption levels are taken into account during the LBD
// computation. According to the reference below, not counting them improves
// the solver in some situation. Note that this only impact solves under
// assumptions.
//
// Gilles Audemard, Jean-Marie Lagniez, Laurent Simon, "Improving Glucose for
// Incremental SAT Solving with Assumptions: Application to MUS Extraction"
// Theory and Applications of Satisfiability Testing - SAT 2013, Lecture Notes
// in Computer Science Volume 7962, 2013, pp 309-317.
optional bool count_assumption_levels_in_lbd = 49 [default = true];
// ==========================================================================
// Presolve
// ==========================================================================
// During presolve, only try to perform the bounded variable elimination (BVE)
// of a variable x if the number of occurences of x times the number of
// occurences of not(x) is not greater than this parameter.
optional int32 presolve_bve_threshold = 54 [default = 500];
// During presolve, we apply BVE only if this weight times the number of
// clauses plus the number of clause literals is not increased.
optional int32 presolve_bve_clause_weight = 55 [default = 3];
// The maximum "deterministic" time limit to spend in probing. A value of
// zero will disable the probing.
optional double presolve_probing_deterministic_time_limit = 57
[default = 30.0];
// Whether we use an heuristic to detect some basic case of blocked clause
// in the SAT presolve.
optional bool presolve_blocked_clause = 88 [default = true];
// Whether or not we use Bounded Variable Addition (BVA) in the presolve.
optional bool presolve_use_bva = 72 [default = true];
// Apply Bounded Variable Addition (BVA) if the number of clauses is reduced
// by stricly more than this threshold. The algorithm described in the paper
// uses 0, but quick experiments showed that 1 is a good value. It may not be
// worth it to add a new variable just to remove one clause.
optional int32 presolve_bva_threshold = 73 [default = 1];
// In case of large reduction in a presolve iteration, we perform multiple
// presolve iterations. This parameter controls the maximum number of such
// presolve iterations.
optional int32 max_presolve_iterations = 138 [default = 3];
// ==========================================================================
// Max-sat parameters
// ==========================================================================
// For an optimization problem, whether we follow some hints in order to find
// a better first solution. For a variable with hint, the solver will always
// try to follow the hint. It will revert to the variable_branching default
// otherwise.
optional bool use_optimization_hints = 35 [default = true];
// Whether we use a simple heuristic to try to minimize an UNSAT core.
optional bool minimize_core = 50 [default = true];
// Whether we try to find more independent cores for a given set of
// assumptions in the core based max-SAT algorithms.
optional bool find_multiple_cores = 84 [default = true];
// If true, when the max-sat algo find a core, we compute the minimal number
// of literals in the core that needs to be true to have a feasible solution.
optional bool cover_optimization = 89 [default = true];
// In what order do we add the assumptions in a core-based max-sat algorithm
enum MaxSatAssumptionOrder {
DEFAULT_ASSUMPTION_ORDER = 0;
ORDER_ASSUMPTION_BY_DEPTH = 1;
ORDER_ASSUMPTION_BY_WEIGHT = 2;
}
optional MaxSatAssumptionOrder max_sat_assumption_order = 51
[default = DEFAULT_ASSUMPTION_ORDER];
// If true, adds the assumption in the reverse order of the one defined by
// max_sat_assumption_order.
optional bool max_sat_reverse_assumption_order = 52 [default = false];
// What stratification algorithm we use in the presence of weight.
enum MaxSatStratificationAlgorithm {
// No stratification of the problem.
STRATIFICATION_NONE = 0;
// Start with literals with the highest weight, and when SAT, add the
// literals with the next highest weight and so on.
STRATIFICATION_DESCENT = 1;
// Start with all literals. Each time a core is found with a given minimum
// weight, do not consider literals with a lower weight for the next core
// computation. If the subproblem is SAT, do like in STRATIFICATION_DESCENT
// and just add the literals with the next highest weight.
STRATIFICATION_ASCENT = 2;
}
optional MaxSatStratificationAlgorithm max_sat_stratification = 53
[default = STRATIFICATION_DESCENT];
// ==========================================================================
// Constraint programming parameters
// ==========================================================================
// When this is true, then a disjunctive constraint will try to use the
// precedence relations between time intervals to propagate their bounds
// further. For instance if task A and B are both before C and task A and B
// are in disjunction, then we can deduce that task C must start after
// duration(A) + duration(B) instead of simply max(duration(A), duration(B)),
// provided that the start time for all task was currently zero.
//
// This always result in better propagation, but it is usually slow, so
// depending on the problem, turning this off may lead to a faster solution.
optional bool use_precedences_in_disjunctive_constraint = 74 [default = true];
// When this is true, the cumulative constraint is reinforced with overload
// checking, i.e., an additional level of reasoning based on energy. This
// additional level supplements the default level of reasoning as well as
// timetable edge finding.
//
// This always result in better propagation, but it is usually slow, so
// depending on the problem, turning this off may lead to a faster solution.
optional bool use_overload_checker_in_cumulative_constraint = 78
[default = false];
// When this is true, the cumulative constraint is reinforced with timetable
// edge finding, i.e., an additional level of reasoning based on the
// conjunction of energy and mandatory parts. This additional level
// supplements the default level of reasoning as well as overload_checker.
//
// This always result in better propagation, but it is usually slow, so
// depending on the problem, turning this off may lead to a faster solution.
optional bool use_timetable_edge_finding_in_cumulative_constraint = 79
[default = false];
// When this is true, the cumulative constraint is reinforced with propagators
// from the disjunctive constraint to improve the inference on a set of tasks
// that are disjunctive at the root of the problem. This additional level
// supplements the default level of reasoning.
//
// Propagators of the cumulative constraint will not be used at all if all the
// tasks are disjunctive at root node.
//
// This always result in better propagation, but it is usually slow, so
// depending on the problem, turning this off may lead to a faster solution.
optional bool use_disjunctive_constraint_in_cumulative_constraint = 80
[default = true];
// A non-negative level indicating the type of constraints we consider in the
// LP relaxation. At level zero, no LP relaxation is used. At level 1, only
// the linear constraint and full encoding are added. At level 2, we also add
// all the Boolean constraints.
optional int32 linearization_level = 90 [default = 1];
// A non-negative level indicating how much we should try to fully encode
// Integer variables as Boolean.
optional int32 boolean_encoding_level = 107 [default = 1];
// The limit on the number of cuts in our cut pool. When this is reached we do
// not generate cuts anymore.
//
// TODO(user): We should probably remove this parameters, and just always
// generate cuts but only keep the best n or something.
optional int32 max_num_cuts = 91 [default = 5000];
// For the cut that can be generated at any level, this control if we only
// try to generate them at the root node.
optional bool only_add_cuts_at_level_zero = 92 [default = false];
// Whether we generate knapsack cuts. Note that in our setting where all
// variables are integer and bounded on both side, such a cut could be applied
// to any constraint.
optional bool add_knapsack_cuts = 111 [default = false];
// Whether we generate and add Chvatal-Gomory cuts to the LP at root node.
// Note that for now, this is not heavily tunned.
optional bool add_cg_cuts = 117 [default = false];
// Whether we generate MIR cuts at root node.
// Note that for now, this is not heavily tunned.
optional bool add_mir_cuts = 120 [default = true];
// Whether we use the classical MIR rounding function when computing a cut
// using integer rounding. If false we use our own variant based on the strong
// fractional rouding of Letchford and Lodi.
optional bool use_mir_rounding = 118 [default = true];
// In the integer rounding procedure used for MIR and Gomory cut, the maximum
// "scaling" we use (must be positive). The lower this is, the lower the
// integer coefficients of the cut will be. Note that cut generated by lower
// values are not necessarily worse than cut generated by larger value. There
// is no strict dominance relationship.
optional int32 max_integer_rounding_scaling = 119 [default = 600];
// If true, we start by an empty LP, and only add constraints not satisfied
// by the current LP solution batch by batch. A constraint that is only added
// like this is known as a "lazy" constraint in the literature, except that we
// currently consider all constraints as lazy here.
optional bool add_lp_constraints_lazily = 112 [default = true];
// While adding constraints, skip the constraints which have orthogonality
// less than 'min_orthogonality_for_lp_constraints' with already added
// constraints during current call. Orthogonality is defined as 1 -
// cosine(vector angle between constraints). A value of zero disable this
// feature.
optional double min_orthogonality_for_lp_constraints = 115 [default = 0.0];
// If a constraint/cut in LP is not active for the 'max_inactive_count',
// remove it from the LP.
optional int64 max_inactive_count = 121 [default = 1000];
// Remove constraints only if at least 'constraint_removal_batch_size'
// constraints are marked for removal.
optional int64 constraint_removal_batch_size = 122 [default = 100];
// The search branching will be used to decide how to branch on unfixed nodes.
enum SearchBranching {
// Try to fix all literals using the underlying SAT solver's heuristics,
// then generate and fix literals until integer variables are fixed.
AUTOMATIC_SEARCH = 0;
// If used then all decisions taken by the solver are made using a fixed
// order as specified in the API or in the CpModelProto search_strategy
// field.
FIXED_SEARCH = 1;
// If used, the solver will use various generic heuristics in turn.
PORTFOLIO_SEARCH = 2;
// If used, the solver will use heuristics from the LP relaxation. This
// exploit the reduced costs of the variables in the relaxation.
//
// TODO(user): Maybe rename REDUCED_COST_SEARCH?
LP_SEARCH = 3;
// If used, the solver uses the pseudo costs for branching. Pseudo costs
// are computed using the historical change in objective bounds when some
// decision are taken.
PSEUDO_COST_SEARCH = 4;
// Mainly exposed here for testing. This quickly tries a lot of randomized
// heuristics with a low conflict limit. It usually provides a good first
// solution.
PORTFOLIO_WITH_QUICK_RESTART_SEARCH = 5;
// Mainly used internally. This is like FIXED_SEARCH, except we follow the
// solution_hint field of the CpModelProto rather than using the information
// provided in the search_strategy.
HINT_SEARCH = 6;
}
optional SearchBranching search_branching = 82 [default = AUTOMATIC_SEARCH];
// All the "exploit_*" parameters below work in the same way: when branching
// on an IntegerVariable, these parameters affect the value the variable is
// branched on. Currently the first heuristic that triggers win in the order
// in which they appear below.
//
// TODO(user): Maybe do like for the restart algorithm, introduce an enum
// and a repeated field that control the order on which these are applied?
// If true and the Lp relaxation of the problem has an integer optimal
// solution, try to exploit it. Note that since the LP relaxation may not
// contain all the constraints, such a solution is not necessarily a solution
// of the full problem.
optional bool exploit_integer_lp_solution = 94 [default = true];
// If true and the Lp relaxation of the problem has a solution, try to exploit
// it. This is same as above except in this case the lp solution might not be
// an integer solution.
optional bool exploit_all_lp_solution = 116 [default = true];
// When branching on a variable, follow the last best solution value.
optional bool exploit_best_solution = 130 [default = false];
// When branching an a variable that directly affect the objective,
// branch on the value that lead to the best objective first.
optional bool exploit_objective = 131 [default = true];
// The solver ignores the pseudo costs of variables with number of recordings
// less than this threshold.
optional int64 pseudo_cost_reliability_threshold = 123 [default = 100];
// The default optimization method is a simple "linear scan", each time trying
// to find a better solution than the previous one. If this is true, then we
// use a core-based approach (like in max-SAT) when we try to increase the
// lower bound instead.
optional bool optimize_with_core = 83 [default = false];
// If non-negative, perform a binary search on the objective variable in order
// to find an [min, max] interval outside of which the solver proved unsat/sat
// under this amount of conflict. This can quickly reduce the objective domain
// on some problems.
optional int32 binary_search_num_conflicts = 99 [default = -1];
// This has no effect if optimize_with_core is false. If true, use a different
// core-based algorithm similar to the max-HS algo for max-SAT. This is a
// hybrid MIP/CP approach and it uses a MIP solver in addition to the CP/SAT
// one. This is also related to the PhD work of tobyodavies@
// "Automatic Logic-Based Benders Decomposition with MiniZinc"
// http://aaai.org/ocs/index.php/AAAI/AAAI17/paper/view/14489
optional bool optimize_with_max_hs = 85 [default = false];
// Whether we presolve the cp_model before solving it.
optional bool cp_model_presolve = 86 [default = true];
// How much effort do we spend on probing. 0 disables it completely.
optional int32 cp_model_probing_level = 110 [default = 2];
// Whether we also use the sat presolve when cp_model_presolve is true.
optional bool cp_model_use_sat_presolve = 93 [default = true];
// Whether we enumerate all solutions of a problem without objective. Note
// that setting this to true automatically disable the presolve. This is
// because the presolve rules only guarantee the existence of one feasible
// solution to the presolved problem.
//
// TODO(user): Activate the presolve but with just the rules that do not
// change the set of feasible solutions.
optional bool enumerate_all_solutions = 87 [default = false];
// If true, add information about the derived variable domains to the
// CpSolverResponse. It is an option because it makes the response slighly
// bigger and there is a bit more work involved during the postsolve to
// construct it, but it should still have a low overhead. See the
// tightened_variables field in CpSolverResponse for more details.
optional bool fill_tightened_domains_in_response = 132 [default = false];
// If true, the solver will add a default integer branching strategy to the
// already defined search strategy.
optional bool instantiate_all_variables = 106 [default = true];
// If true, then the precedences propagator try to detect for each variable if
// it has a set of "optional incoming arc" for which at least one of them is
// present. This is usually useful to have but can be slow on model with a lot
// of precedence.
optional bool auto_detect_greater_than_at_least_one_of = 95 [default = true];
// For an optimization problem, stop the solver as soon as we have a solution.
optional bool stop_after_first_solution = 98 [default = false];
// Specify the number of parallel workers to use during search.
// A number <= 1 means no parallelism.
optional int32 num_search_workers = 100 [default = 1];
// Experimental. If this is true, then we interleave all our major search
// strategy and distribute the work amongst num_search_workers. This also
// work with just one worker, in which case the solver is deterministic even
// if deterministic_parallel_search is false.
optional bool interleave_search = 136 [default = false];
// Make the parallelization deterministic. Currently, this only work with
// use_lns_only().
//
// TODO(user): make it work without use_lns_only().
optional bool deterministic_parallel_search = 134 [default = false];
// Allows objective sharing between workers.
optional bool share_objective_bounds = 113 [default = true];
// Allows sharing of the bounds of modified variables at level 0.
optional bool share_level_zero_bounds = 114 [default = true];
// LNS parameters.
optional bool use_lns_only = 101 [default = false];
optional bool lns_focus_on_decision_variables = 105 [default = false];
// Turns on relaxation induced neighborhood generator.
optional bool use_rins_lns = 129 [default = true];
// If true, registers more lns subsolvers with different parameters.
optional bool diversify_lns_params = 137 [default = false];
// Randomize fixed search.
optional bool randomize_search = 103 [default = false];
// Search randomization will collect equivalent 'max valued' variables, and
// pick one randomly. For instance, if the variable strategy is CHOOSE_FIRST,
// all unassigned variables are equivalent. If the variable strategy is
// CHOOSE_LOWEST_MIN, and `lm` is the current lowest min of all unassigned
// variables, then the set of max valued variables will be all unassigned
// variables where
// lm <= variable min <= lm + search_randomization_tolerance
optional int64 search_randomization_tolerance = 104 [default = 0];
// If true, we automatically detect variables whose constraint are always
// enforced by the same literal and we mark them as optional. This allows
// to propagate them as if they were present in some situation.
optional bool use_optional_variables = 108 [default = true];
// The solver usually exploit the LP relaxation of a model. If this option is
// true, then whatever is infered by the LP will be used like an heuristic to
// compute EXACT propagation on the IP. So with this option, there is no
// numerical imprecision issues.
optional bool use_exact_lp_reason = 109 [default = true];
// This can be beneficial if there is a lot of no-overlap constraints but a
// relatively low number of different intervals in the problem. Like 1000
// intervals, but 1M intervals in the no-overlap constraints covering them.
optional bool use_combined_no_overlap = 133 [default = false];
// ==========================================================================
// MIP -> CP-SAT (i.e. IP with integer coeff) conversion parameters that are
// used by our automatic "scaling" algorithm.
//
// Note that it is hard to do a meaningful conversion automatically and if
// you have a model with continuous variables, it is best if you scale the
// domain of the variable yourself so that you have a relevant precision for
// the application at hand. Same for the coefficients and constraint bounds.
// ==========================================================================
// We need to bound the maximum magnitude of the variables for CP-SAT, and
// that is the bound we use. If the MIP model expect larger variable value in
// the solution, then the converted model will likely not be relevant.
optional double mip_max_bound = 124 [default = 1e7];
// All continuous variable of the problem will be multiplied by this factor.
// By default, we don't do any variable scaling and rely on the MIP model to
// specify continuous variable domain with the wanted precision.
optional double mip_var_scaling = 125 [default = 1.0];
// When scaling constraint with double coefficients to integer coefficients,
// we will multiply by a power of 2 and round the coefficients. We will choose
// the lowest power such that we have this relative precision on each of the
// constraint (resp. objective) coefficient.
//
// We also use this to decide by how much we relax the constraint bounds so
// that we can have a feasible integer solution of constraints involving
// continuous variable. This is required for instance when you have an == rhs
// constraint as in many situation you cannot have a perfect equality with
// integer variables and coefficients.
optional double mip_wanted_precision = 126 [default = 1e-6];
// To avoid integer overflow, we always force the maximum possible constraint
// activity (and objective value) according to the initial variable domain to
// be smaller than 2 to this given power. Because of this, we cannot always
// reach the "mip_wanted_precision" parameter above.
//
// This can go as high as 62, but some internal algo currently abort early if
// they might run into integer overflow, so it is better to keep it a bit
// lower than this.
optional int32 mip_max_activity_exponent = 127 [default = 53];
// As explained in mip_precision and mip_max_activity_exponent, we cannot
// always reach the wanted coefficient precision during scaling. When we
// cannot, we will report MODEL_INVALID if the relative preicision is larger
// than this parameter.
optional double mip_check_precision = 128 [default = 1e-4];
// Indicates if the CP-SAT layer should catch Control-C (SIGINT) signals
// when calling solve. If set, catching the SIGINT signal will terminate the
// search gracefully, as if a time limit was reached.
optional bool catch_sigint_signal = 135 [default = true];
}