minimize_constraint.cpp
Go to the documentation of this file.
00001 // 
00002 // Copyright (c) 2010-2012, Benjamin Kaufmann
00003 // 
00004 // This file is part of Clasp. See http://www.cs.uni-potsdam.de/clasp/ 
00005 // 
00006 // Clasp is free software; you can redistribute it and/or modify
00007 // it under the terms of the GNU General Public License as published by
00008 // the Free Software Foundation; either version 2 of the License, or
00009 // (at your option) any later version.
00010 // 
00011 // Clasp is distributed in the hope that it will be useful,
00012 // but WITHOUT ANY WARRANTY; without even the implied warranty of
00013 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00014 // GNU General Public License for more details.
00015 // 
00016 // You should have received a copy of the GNU General Public License
00017 // along with Clasp; if not, write to the Free Software
00018 // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
00019 //
00020 #include <clasp/minimize_constraint.h>
00021 #include <clasp/solver.h>
00022 #include <clasp/weight_constraint.h>
00023 namespace Clasp { 
00025 // SharedMinimizeData
00027 SharedMinimizeData::SharedMinimizeData(const SumVec& lhsAdjust, MinimizeMode m) : mode_(m) {
00028         adjust_  = lhsAdjust;
00029         count_   = 1;
00030         resetBounds();
00031         setMode(MinimizeMode_t::optimize);
00032 }
00033 SharedMinimizeData::~SharedMinimizeData() {}
00034 
00035 void SharedMinimizeData::destroy() const {
00036         this->~SharedMinimizeData();
00037         ::operator delete(const_cast<SharedMinimizeData*>(this));
00038 }
00039 
00040 void SharedMinimizeData::resetBounds() {
00041         gCount_  = 0;
00042         optGen_  = 0;
00043         lower_.assign(numRules(), 0);
00044         opt_[0].assign(numRules(), maxBound());
00045         opt_[1] = opt_[0];
00046         const WeightLiteral* lit = lits;
00047         for (weight_t wPos = 0, end = (weight_t)weights.size(), x; wPos != end; wPos = x+1) {
00048                 for (x = wPos; weights[x].next; ++x) { ; }
00049                 if (weights[x].weight < 0) {
00050                         while (lit->second != wPos) { ++lit; }
00051                         while (lit->second == wPos) {
00052                                 lower_[weights[x].level] += weights[x].weight;
00053                                 ++lit;
00054                         }
00055                 }
00056         }
00057 }
00058 
00059 bool SharedMinimizeData::setMode(MinimizeMode m, const wsum_t* bound, uint32 boundSize)  {
00060         mode_ = m;
00061         if (boundSize && bound) {
00062                 SumVec& opt = opt_[0];
00063                 bool    ok  = false;
00064                 gCount_     = 0;
00065                 optGen_     = 0;
00066                 boundSize   = std::min(boundSize, numRules());
00067                 for (uint32 i = 0, end = boundSize; i != end; ++i) {
00068                         wsum_t B = bound[i], a = adjust(i);
00069                         B        = a >= 0 || (maxBound() + a) >= B ? B - a : maxBound();
00070                         wsum_t d = B - lower_[i];
00071                         if (d < 0 && !ok) { return false; }
00072                         opt[i]   = B;
00073                         ok       = ok || d > 0;
00074                 }
00075                 for (uint32 i = boundSize, end = (uint32)opt.size(); i != end; ++i) { opt[i] = maxBound(); }
00076         }
00077         return true;
00078 }
00079 
00080 MinimizeConstraint* SharedMinimizeData::attach(Solver& s, uint32 strat, bool addRef) {
00081         if (addRef) this->share();
00082         const SolverParams& c = s.configuration();
00083         if (strat == UINT32_MAX) {
00084                 strat = c.optStrat;
00085         }
00086         if ((c.optHeu & SolverStrategies::opt_sign) != 0) {
00087                 for (const WeightLiteral* it = lits; !isSentinel(it->first); ++it) {
00088                         s.setPref(it->first.var(), ValueSet::pref_value, falseValue(it->first));
00089                 }
00090         }
00091         MinimizeConstraint* ret;
00092         if (strat < SolverStrategies::opt_unsat || mode() == MinimizeMode_t::enumerate) {
00093                 ret = new DefaultMinimize(this, strat);
00094         }
00095         else {
00096                 ret = new UncoreMinimize(this, strat == SolverStrategies::opt_unsat_pre);
00097         }
00098         ret->attach(s);
00099         return ret;
00100 }
00101 
00102 const SumVec* SharedMinimizeData::setOptimum(const wsum_t* newOpt) {
00103         if (mode() == MinimizeMode_t::enumerate) {
00104                 opt_[1].assign(newOpt, newOpt+numRules());
00105                 return opt_ + 1;
00106         }
00107         if (optGen_) { return opt_ + (optGen_&1u); }
00108         uint32 g = gCount_;
00109         uint32 n = 1u - (g & 1u);
00110         opt_[n].assign(newOpt, newOpt+numRules());
00111         if (++g == 0) { g = 2; }
00112         gCount_  = g;
00113         return opt_ + n;
00114 }
00115 void SharedMinimizeData::setLower(uint32 lev, wsum_t low) {
00116         lower_[lev] = low;
00117 }
00118 void SharedMinimizeData::markOptimal() {
00119         optGen_ = generation();
00120 }
00121 bool SharedMinimizeData::heuristic(Solver& s, bool full) const {
00122         const bool heuristic = full || (s.queueSize() == 0 && s.decisionLevel() == s.rootLevel());
00123         if (heuristic && s.propagate()) {
00124                 for (const WeightLiteral* w = lits; !isSentinel(w->first); ++w) {
00125                         if (s.value(w->first.var()) == value_free) {
00126                                 s.assume(~w->first);
00127                                 if (!full || !s.propagate()) { break; }
00128                         }
00129                 }
00130         }
00131         return !s.hasConflict();
00132 }
00133 void SharedMinimizeData::sub(wsum_t* lhs, const LevelWeight* w, uint32& aLev) const {
00134         if (w->level < aLev) { aLev = w->level; }
00135         do { lhs[w->level] -= w->weight; } while (w++->next);
00136 }
00137 bool SharedMinimizeData::imp(wsum_t* lhs, const LevelWeight* w, const wsum_t* rhs, uint32& lev) const {
00138         assert(lev <= w->level && std::equal(lhs, lhs+lev, rhs));
00139         while (lev != w->level && lhs[lev] == rhs[lev]) { ++lev; }
00140         for (uint32 i = lev, end = numRules(); i != end; ++i) {
00141                 wsum_t temp = lhs[i];
00142                 if (i == w->level) { temp += w->weight; if (w->next) ++w; }
00143                 if (temp != rhs[i]){ return temp > rhs[i]; }
00144         }
00145         return false;
00146 }
00148 // MinimizeConstraint
00150 MinimizeConstraint::MinimizeConstraint(SharedData* d) : shared_(d) {}
00151 
00152 MinimizeConstraint::~MinimizeConstraint() {
00153         assert(shared_ == 0 && "MinimizeConstraint not destroyed!");
00154 }
00155 bool MinimizeConstraint::prepare(Solver& s, bool useTag) {
00156         CLASP_ASSERT_CONTRACT_MSG(!s.isFalse(tag_), "Tag literal must not be false!");
00157         if (useTag && tag_ == posLit(0))      { tag_ = posLit(s.pushTagVar(false)); }
00158         if (s.isTrue(tag_) || s.hasConflict()){ return !s.hasConflict(); }
00159         return useTag ? s.pushRoot(tag_) : s.force(tag_, 0);
00160 }
00161 void MinimizeConstraint::destroy(Solver* s, bool d) {
00162         shared_->release();
00163         shared_ = 0;
00164         Constraint::destroy(s, d);
00165 }
00166 Constraint* MinimizeConstraint::cloneAttach(Solver& s) {
00167         return shared_->attach(s);
00168 }
00170 // DefaultMinimize
00172 union DefaultMinimize::UndoInfo {
00173         UndoInfo() : rep(0) {}
00174         struct {
00175                 uint32 idx    : 30; // index of literal on stack
00176                 uint32 newDL  :  1; // first literal of new DL?
00177                 uint32 idxSeen:  1; // literal with idx already propagated?
00178         }      data;
00179         uint32 rep;
00180         uint32 index() const { return data.idx; }
00181         bool   newDL() const { return data.newDL != 0u; }
00182 };
00183 DefaultMinimize::DefaultMinimize(SharedData* d, uint32 strat)
00184         : MinimizeConstraint(d)
00185         , bounds_(0)
00186         , pos_(d->lits)
00187         , undo_(0)
00188         , undoTop_(0)
00189         , size_(d->numRules()) {
00190         step_.type = strat;
00191         if (step_.type == SolverStrategies::opt_hier && d->numRules() == 1) {
00192                 step_.type = SolverStrategies::opt_def;
00193         }
00194 }
00195 
00196 DefaultMinimize::~DefaultMinimize() {
00197         delete [] bounds_;
00198         delete [] undo_;
00199 }
00200 
00201 void DefaultMinimize::destroy(Solver* s, bool detach) {
00202         if (s && detach) {
00203                 for (const WeightLiteral* it = shared_->lits; !isSentinel(it->first); ++it) {
00204                         s->removeWatch(it->first, this);
00205                 }
00206                 for (uint32 dl = 0; (dl = lastUndoLevel(*s)) != 0; ) {
00207                         s->removeUndoWatch(dl, this);
00208                         DefaultMinimize::undoLevel(*s);
00209                 }
00210         }
00211         MinimizeConstraint::destroy(s, detach);
00212 }
00213 
00214 bool DefaultMinimize::attach(Solver& s) {
00215         assert(s.decisionLevel() == 0 && !bounds_);
00216         uint32 numL = 0;
00217         VarVec up;
00218         for (const WeightLiteral* it = shared_->lits; !isSentinel(it->first); ++it, ++numL) {
00219                 if (s.value(it->first.var()) == value_free) {
00220                         s.addWatch(it->first, this, numL);
00221                 }
00222                 else if (s.isTrue(it->first)) {
00223                         up.push_back(numL);
00224                 }
00225         }
00226         bounds_ = new wsum_t[(numRules() * (3 + uint32(step_.type != 0)))]; // upper, sum, temp, lower
00227         std::fill(this->opt(), this->sum(), SharedData::maxBound());
00228         std::fill(this->sum(), this->end(), wsum_t(0));
00229         stepInit(0);
00230         // [0,numL+1)      : undo stack
00231         // [numL+1, numL*2): pos  stack
00232         undo_    = new UndoInfo[(numL*2)+1];
00233         undoTop_ = 0;
00234         posTop_  = numL+1;
00235         actLev_  = 0;
00236         for (WeightVec::size_type i = 0; i != up.size(); ++i) {
00237                 DefaultMinimize::propagate(s, shared_->lits[up[i]].first, up[i]);
00238         }
00239         return true;
00240 }
00241 
00242 // Returns the numerical highest decision level watched by this constraint.
00243 uint32 DefaultMinimize::lastUndoLevel(const Solver& s) const {
00244         return undoTop_ != 0
00245                 ? s.level(shared_->lits[undo_[undoTop_-1].index()].first.var())
00246                 : 0;
00247 }
00248 bool DefaultMinimize::litSeen(uint32 i) const { return undo_[i].data.idxSeen != 0; }
00249 
00250 // Pushes the literal at index idx onto the undo stack
00251 // and marks it as seen; if literal is first in current level
00252 // adds a new undo watch.
00253 void DefaultMinimize::pushUndo(Solver& s, uint32 idx) {
00254         assert(idx >= static_cast<uint32>(pos_ - shared_->lits));
00255         undo_[undoTop_].data.idx  = idx;
00256         undo_[undoTop_].data.newDL= 0;
00257         if (lastUndoLevel(s) != s.decisionLevel()) {
00258                 // remember current "look at" position and start
00259                 // a new decision level on the undo stack
00260                 undo_[posTop_++].data.idx = static_cast<uint32>(pos_-shared_->lits);
00261                 s.addUndoWatch(s.decisionLevel(), this);
00262                 undo_[undoTop_].data.newDL = 1;
00263         }
00264         undo_[idx].data.idxSeen   = 1;
00265         ++undoTop_;
00266 }
00268 // MinimizeConstraint - arithmetic strategy implementation
00269 //
00270 // For now we use a simple "switch-on-type" approach. 
00271 // In the future, if new strategies emerge, we may want to use a strategy hierarchy.
00273 #define STRATEGY(x) shared_->x
00274 // set *lhs = *rhs, where lhs != rhs
00275 void DefaultMinimize::assign(wsum_t* lhs, wsum_t* rhs) const {
00276         std::memcpy(lhs, rhs, size_*sizeof(wsum_t));
00277 }
00278 // returns lhs > rhs
00279 bool DefaultMinimize::greater(wsum_t* lhs, wsum_t* rhs, uint32 len, uint32& aLev) const {
00280         while (*lhs == *rhs && --len) { ++lhs, ++rhs; ++aLev; }
00281         return *lhs > *rhs;
00282 }
00284 Constraint::PropResult DefaultMinimize::propagate(Solver& s, Literal, uint32& data) {
00285         pushUndo(s, data);
00286         STRATEGY(add(sum(), shared_->lits[data]));
00287         return PropResult(propagateImpl(s, propagate_new_sum), true);
00288 }
00289 
00290 // Computes the set of literals implying p and returns 
00291 // the highest decision level of that set.
00292 // PRE: p is implied on highest undo level
00293 uint32 DefaultMinimize::computeImplicationSet(const Solver& s, const WeightLiteral& p, uint32& undoPos) {
00294         wsum_t* temp    = this->temp(), *opt = this->opt();
00295         uint32 up       = undoTop_, lev = actLev_;
00296         uint32 minLevel = std::max(s.level(tag_.var()), s.level(s.sharedContext()->stepLiteral().var()));
00297         // start from current sum
00298         assign(temp, sum());
00299         // start with full set
00300         for (UndoInfo u; up != 0; --up) {
00301                 u = undo_[up-1];
00302                 // subtract last element from set
00303                 STRATEGY(sub(temp, shared_->lits[u.index()], lev));
00304                 if (!STRATEGY(imp(temp, p, opt, lev))) {
00305                         // p is no longer implied after we removed last literal,
00306                         // hence [0, up) implies p @ level of last literal
00307                         undoPos = up;
00308                         return std::max(s.level(shared_->lits[u.index()].first.var()), minLevel);
00309                 }
00310         }
00311         undoPos = 0; 
00312         return minLevel;
00313 }
00314 
00315 bool DefaultMinimize::propagateImpl(Solver& s, PropMode m) {
00316         Iter it    = pos_;
00317         uint32 idx = static_cast<uint32>(it - shared_->lits);
00318         uint32 DL  = s.decisionLevel();
00319         // current implication level or "unknown" if
00320         // we propagate a new optimum 
00321         uint32 impLevel = DL + (m == propagate_new_opt);
00322         weight_t lastW  = -1;
00323         uint32 undoPos  = undoTop_;
00324         bool ok         = true;
00325         actLev_         = std::min(actLev_, shared_->level(idx));
00326         for (wsum_t* sum= this->sum(), *opt = this->opt(); ok && !isSentinel(it->first); ++it, ++idx) {
00327                 // skip propagated/false literals
00328                 if (litSeen(idx) || (m == propagate_new_sum && s.isFalse(it->first))) {
00329                         continue;
00330                 }
00331                 if (lastW != it->second) {
00332                         // check if the current weight is implied
00333                         if (!STRATEGY(imp(sum, *it, opt, actLev_))) {
00334                                 // all good - current optimum is safe
00335                                 pos_    = it;
00336                                 return true;
00337                         }
00338                         // compute implication set and level of current weight
00339                         if (m == propagate_new_opt) {
00340                                 impLevel = computeImplicationSet(s, *it, undoPos);
00341                         }
00342                         lastW = it->second;
00343                 }
00344                 assert(active());
00345                 // force implied literals
00346                 if (!s.isFalse(it->first) || (impLevel < DL && s.level(it->first.var()) > impLevel)) {
00347                         if (impLevel != DL) { DL = s.undoUntil(impLevel, true); }
00348                         ok = s.force(~it->first, impLevel, this, undoPos);
00349                 }
00350         }
00351         return ok;
00352 }
00353 
00354 // pops free literals from the undo stack and decreases current sum
00355 void DefaultMinimize::undoLevel(Solver&) {
00356         assert(undoTop_ != 0 && posTop_ > undoTop_);
00357         uint32 up  = undoTop_;
00358         uint32 idx = undo_[--posTop_].index();
00359         for (wsum_t* sum = this->sum();;) {
00360                 UndoInfo& u = undo_[--up];
00361                 undo_[u.index()].data.idxSeen = 0;
00362                 STRATEGY(sub(sum, shared_->lits[u.index()], actLev_));
00363                 if (u.newDL()) { break; }
00364         }
00365         undoTop_ = up;
00366         Iter temp= shared_->lits + idx;
00367         if (temp < pos_) {
00368                 pos_    = temp;
00369                 actLev_ = std::min(actLev_, shared_->level(idx));
00370         }
00371 }
00372 
00373 // computes the reason for p - 
00374 // all literals that were propagated before p
00375 void DefaultMinimize::reason(Solver& s, Literal p, LitVec& lits) {
00376         assert(s.isTrue(tag_));
00377         uint32 stop = s.reasonData(p);
00378         Literal   x = s.sharedContext()->stepLiteral();
00379         assert(stop <= undoTop_);
00380         if (!isSentinel(x) && s.isTrue(x)) { lits.push_back(x); }
00381         if (s.level(tag_.var()))           { lits.push_back(tag_); }
00382         for (uint32 i = 0; i != stop; ++i) {
00383                 UndoInfo u = undo_[i];
00384                 x = shared_->lits[u.index()].first;
00385                 lits.push_back(x);
00386         }
00387 }
00388 
00389 bool DefaultMinimize::minimize(Solver& s, Literal p, CCMinRecursive* rec) {
00390         assert(s.isTrue(tag_));
00391         uint32 stop = s.reasonData(p);
00392         Literal   x = s.sharedContext()->stepLiteral();
00393         assert(stop <= undoTop_);
00394         if (!s.ccMinimize(x, rec) || !s.ccMinimize(tag_, rec)) { return false; }
00395         for (uint32 i = 0; i != stop; ++i) {
00396                 UndoInfo u = undo_[i];
00397                 x = shared_->lits[u.index()].first;
00398                 if (!s.ccMinimize(x, rec)) {
00399                         return false;
00400                 }
00401         }
00402         return true;
00403 }
00405 // DefaultMinimize - bound management
00407 // Stores the current sum as the shared optimum.
00408 void DefaultMinimize::commitUpperBound(const Solver&)  {
00409         shared_->setOptimum(sum());
00410         if (step_.type == SolverStrategies::opt_inc) { step_.size *= 2; }
00411 }
00412 bool DefaultMinimize::commitLowerBound(const Solver&, bool upShared) {
00413         bool act   = active() && shared_->checkNext();
00414         bool more  = step_.lev < size_ && (step_.size > 1 || step_.lev != size_-1);
00415         if (act && step_.type && step_.lev < size_) {
00416                 uint32 x = step_.lev;
00417                 wsum_t L = opt()[x] + 1;
00418                 stepLow()= L;
00419                 while (upShared && shared_->lower(x) < L)   { shared_->setLower(x, L); }
00420                 if (step_.type == SolverStrategies::opt_inc){ step_.size = 1; }
00421         }
00422         return more;
00423 }
00424 bool DefaultMinimize::handleUnsat(Solver& s, bool up, LitVec& out) {
00425         bool more = shared_->optimize() && commitLowerBound(s, up);
00426         uint32 dl = s.isTrue(tag_) ? s.level(tag_.var()) : 0;
00427         relaxBound(false);
00428         if (more && dl && dl <= s.rootLevel()) {
00429                 s.popRootLevel(s.rootLevel()-dl, &out); // pop and remember new path
00430                 return s.popRootLevel(1);               // pop tag - disable constraint
00431         }
00432         return false;
00433 }
00434 
00435 // Disables the minimize constraint by clearing its upper bound.
00436 bool DefaultMinimize::relaxBound(bool full) {
00437         if (active()) { std::fill(opt(), opt()+size_, SharedData::maxBound()); }
00438         pos_       = shared_->lits;
00439         actLev_    = 0;
00440         if (full || !shared_->optimize()) { stepInit(0); }
00441         return true;
00442 }
00443 
00444 void DefaultMinimize::stepInit(uint32 n) {
00445         step_.size = uint32(step_.type != SolverStrategies::opt_dec);
00446         if (step_.type) { step_.lev = n; if (n != size_) stepLow() = 0-shared_->maxBound(); }
00447         else            { step_.lev = shared_->maxLevel(); }
00448 }
00449 
00450 // Integrates new (tentative) bounds from the ones stored in the shared data object.
00451 bool DefaultMinimize::integrateBound(Solver& s) {
00452         bool useTag = shared_->optimize() && (step_.type != 0 || shared_->mode() == MinimizeMode_t::enumOpt);
00453         if (!prepare(s, useTag)) { return false; }
00454         if (useTag && s.level(tag_.var()) == 0) {
00455                 step_.type = 0;
00456                 stepInit(0);
00457         }
00458         if ((active() && !shared_->checkNext())) { return !s.hasConflict(); }
00459         WeightLiteral min(posLit(0), shared_->weights.empty() ? uint32(0) : (uint32)shared_->weights.size()-1);
00460         while (!s.hasConflict() && updateBounds(shared_->checkNext())) {
00461                 uint32 x = 0;
00462                 uint32 dl= s.decisionLevel() + 1;
00463                 if (!STRATEGY(imp(sum(), min, opt(), actLev_)) || (dl = computeImplicationSet(s, min, x)) > s.rootLevel()) {
00464                         for (--dl; !s.hasConflict() || s.resolveConflict(); ) {
00465                                 if      (s.undoUntil(dl, true) > dl)         { s.backtrack(); }
00466                                 else if (propagateImpl(s, propagate_new_opt)){ return true;   }
00467                         }
00468                 }
00469                 if (!shared_->checkNext()) {
00470                         break;
00471                 }
00472                 if (!step_.type) { ++step_.lev; }
00473                 else             { stepLow() = ++opt()[step_.lev]; }
00474         }
00475         relaxBound(false);
00476         if (!s.hasConflict()) { 
00477                 s.undoUntil(0);
00478                 s.setStopConflict();
00479         }
00480         return false;
00481 }
00482 
00483 // Loads bounds from the shared data object and (optionally) applies
00484 // the current step to get the next tentative bound to check.
00485 bool DefaultMinimize::updateBounds(bool applyStep) {
00486         for (;;) {
00487                 const uint32  seq   = shared_->generation();
00488                 const wsum_t* upper = shared_->upper();
00489                 const wsum_t* myLow = step_.type ? end() : shared_->lower();
00490                 const wsum_t* sLow  = shared_->lower();
00491                 wsum_t*       bound = opt();
00492                 uint32        appLev= applyStep ? step_.lev : size_;
00493                 for (uint32 i = 0; i != size_; ++i) {
00494                         wsum_t U = upper[i], B = bound[i];
00495                         if (i != appLev) {
00496                                 if (myLow != sLow && (i > step_.lev || sLow[i] > myLow[i])) {
00497                                         end()[i] = sLow[i];
00498                                 }
00499                                 if      (i > appLev)   { bound[i] = SharedData::maxBound(); }
00500                                 else if (U >= myLow[i]){ bound[i] = U; }
00501                                 else                   { stepInit(size_); return false; }
00502                                 continue;
00503                         }
00504                         if (step_.type) {
00505                                 wsum_t L = (stepLow() = std::max(myLow[i], sLow[i]));
00506                                 if (U < L) { // path exhausted?
00507                                         stepInit(size_);
00508                                         return false;
00509                                 }
00510                                 if (B < L) { // tentative bound too strong?
00511                                         return false;
00512                                 }
00513                                 if (B < U) { // existing step?
00514                                         assert(std::count(bound+i+1, bound+size_, SharedData::maxBound()) == int(size_ - (i+1)));
00515                                         return true;
00516                                 }
00517                                 if (U == L) {// done with current level?
00518                                         bound[i] = U;
00519                                         stepInit(++appLev);
00520                                         continue;
00521                                 }
00522                                 assert(U > L && B > L);
00523                                 wsum_t diff = U - L;
00524                                 uint32 half = static_cast<uint32>( (diff>>1) | (diff&1) );
00525                                 if (step_.type == SolverStrategies::opt_inc) {
00526                                         step_.size = std::min(step_.size, half);
00527                                 }
00528                                 else if (step_.type == SolverStrategies::opt_dec) {
00529                                         if (!step_.size) { step_.size = static_cast<uint32>(diff); }
00530                                         else             { step_.size = half; }
00531                                 }
00532                         }
00533                         assert(step_.size != 0);
00534                         bound[i] = U - step_.size;
00535                         actLev_  = 0;
00536                         pos_     = shared_->lits;
00537                 }
00538                 if (seq == shared_->generation()) { break; }
00539         }
00540         return step_.lev != size_ || !applyStep;
00541 }
00542 #undef STRATEGY
00543 
00544 // MinimizeBuilder
00546 MinimizeBuilder::MinimizeBuilder() : ready_(false) { }
00547 MinimizeBuilder::~MinimizeBuilder() { clear(); }
00548 void MinimizeBuilder::clear() {
00549         for (LitVec::size_type i = 0; i != lits_.size(); ++i) {
00550                 Weight::free(lits_[i].second);
00551         }
00552         LitRepVec().swap(lits_);
00553         SumVec().swap(adjust_);
00554         ready_ = false;
00555 }
00556 
00557 // adds a new minimize statement
00558 MinimizeBuilder& MinimizeBuilder::addRule(const WeightLitVec& lits, wsum_t initSum) {
00559         unfreeze();
00560         uint32 lev = (uint32)adjust_.size();
00561         adjust_.push_back(initSum);
00562         for (WeightLitVec::const_iterator it = lits.begin(); it != lits.end(); ++it) {
00563                 adjust_[lev] += addLitImpl(lev, *it);
00564         }
00565         return *this;
00566 }
00567 MinimizeBuilder& MinimizeBuilder::addLit(uint32 lev, WeightLiteral lit) {
00568         unfreeze();
00569         if (lev >= adjust_.size()) { adjust_.resize(lev + 1, 0); }
00570         adjust_[lev] += addLitImpl(lev, lit);
00571         return *this;
00572 }
00573 
00574 // adds the weights of the given lit to the appropriate levels in vec
00575 void MinimizeBuilder::addTo(LitRep lit, SumVec& vec) {
00576         vec.resize(numRules());
00577         for (Weight* r = lit.second; r; r = r->next) {
00578                 vec[r->level] += r->weight;
00579         }
00580 }
00581 
00582 void MinimizeBuilder::unfreeze() {
00583         if (ready_) {
00584                 assert(isSentinel(lits_.back().first));
00585                 lits_.pop_back();
00586                 ready_ = false;
00587         }
00588 }
00589 
00590 // merges duplicate literals and removes literals that are already assigned
00591 // POST: the literals in lits_ are unique and sorted by decreasing weight
00592 bool MinimizeBuilder::prepare(SharedContext& ctx) {
00593         std::sort(lits_.begin(), lits_.end(), CmpByLit());
00594         LitVec::size_type j = 0;
00595         Solver& s           = *ctx.master();
00596         Weight* w           = 0;
00597         for (LitVec::size_type i = 0, k = 0, end = lits_.size(); i != end;) {
00598                 w    = lits_[i].second;
00599                 if (s.value(lits_[i].first.var()) == value_free) {
00600                         for (k = i+1; k < end && lits_[i].first == lits_[k].first; ++k) {
00601                                 // duplicate literal - merge weights
00602                                 if (w->level == lits_[k].second->level) {
00603                                         // add up weights from same level
00604                                         w->weight += lits_[k].second->weight;
00605                                 }
00606                                 else {
00607                                         // extend weight vector with new level
00608                                         w->next         = lits_[k].second;
00609                                         w               = w->next;
00610                                         lits_[k].second = 0;
00611                                 }
00612                                 Weight::free(lits_[k].second);
00613                         }       
00614                         // exempt from variable elimination
00615                         ctx.setFrozen(lits_[i].first.var(), true);
00616                         lits_[j++] = lits_[i];
00617                         i = k;
00618                 }
00619                 else {
00620                         if (s.isTrue(lits_[i].first)) {
00621                                 addTo(lits_[i], adjust_);
00622                         }
00623                         Weight::free(lits_[i].second);
00624                         ++i;    
00625                 }
00626         }
00627         shrinkVecTo(lits_, j);
00628         // allocate enough reason data for all our vars
00629         ctx.requestData(!lits_.empty() ? lits_.back().first.var() : 0);
00630         // now literals are unique - merge any complementary literals
00631         j = 0; CmpByWeight greaterW; int cmp;
00632         for (LitVec::size_type i = 0, k = 1; i < lits_.size(); ) {
00633                 if (k == lits_.size() || lits_[i].first.var() != lits_[k].first.var()) {
00634                         lits_[j++] = lits_[i];
00635                         ++i, ++k;
00636                 }
00637                 else if ( (cmp = greaterW.compare(lits_[i], lits_[k])) != 0 ) {
00638                         LitVec::size_type wMin = cmp > 0 ? k : i;
00639                         LitVec::size_type wMax = cmp > 0 ? i : k;
00640                         addTo(lits_[wMin], adjust_);
00641                         mergeReduceWeight(lits_[wMax], lits_[wMin]);
00642                         assert(lits_[wMin].second == 0);
00643                         lits_[j++] = lits_[wMax];
00644                         i += 2;
00645                         k += 2;
00646                 }
00647                 else {
00648                         // weights are equal
00649                         addTo(lits_[i], adjust_);
00650                         Weight::free(lits_[i].second);
00651                         Weight::free(lits_[k].second);
00652                         i += 2;
00653                         k += 2;
00654                 }
00655         }
00656         shrinkVecTo(lits_, j);
00657         std::stable_sort(lits_.begin(), lits_.end(), greaterW);
00658         if (adjust_.empty()) {
00659                 adjust_.push_back(0);
00660         }
00661         // add terminating sentinel literal
00662         lits_.push_back(LitRep(posLit(0), new Weight(static_cast<uint32>(adjust_.size()-1), 0)));
00663         return true;
00664 }
00665 
00666 // creates a suitable minimize constraint from the 
00667 // previously added minimize statements
00668 MinimizeBuilder::SharedData* MinimizeBuilder::build(SharedContext& ctx) {
00669         assert(!ctx.master()->hasConflict());
00670         if (!ctx.master()->propagate()) { return 0; }
00671         if (!ready_ && !prepare(ctx))   { return 0; }
00672         SharedData* srep = new (::operator new(sizeof(SharedData) + (lits_.size()*sizeof(WeightLiteral)))) SharedData(adjust_);
00673         if (adjust_.size() == 1) {
00674                 for (LitVec::size_type i = 0; i != lits_.size(); ++i) {
00675                         srep->lits[i] = WeightLiteral(lits_[i].first, lits_[i].second->weight);
00676                 }
00677         }
00678         else {
00679                 // The weights of a multi-level constraint are stored in a flattened way,
00680                 // i.e. we store all weights in one vector and each literal stores
00681                 // an index into that vector. For a (weight) literal i, weights[i.second]
00682                 // is the first weight of literal i and weights[i.second].next denotes
00683                 // whether i has more than one weight.
00684                 srep->lits[0].first  = lits_[0].first;
00685                 srep->lits[0].second = addFlattened(srep->weights, *lits_[0].second);
00686                 for (LitVec::size_type i = 1; i < lits_.size(); ++i) {
00687                         srep->lits[i].first = lits_[i].first;
00688                         if (eqWeight(&srep->weights[srep->lits[i-1].second], *lits_[i].second)) {
00689                                 // reuse existing weight
00690                                 srep->lits[i].second = srep->lits[i-1].second;
00691                         }
00692                         else {
00693                                 // add a new flattened list of weights to srep->weights
00694                                 srep->lits[i].second = addFlattened(srep->weights, *lits_[i].second);
00695                         }
00696                 }
00697         }
00698         srep->resetBounds();
00699         ready_ = true;
00700         return srep;
00701 }
00702 
00703 // computes x.weight -= by.weight
00704 // PRE: x.weight > by.weight
00705 void MinimizeBuilder::mergeReduceWeight(LitRep& x, LitRep& by) {
00706         assert(x.second->level <= by.second->level);
00707         Weight dummy(0,0);
00708         dummy.next  = x.second;
00709         Weight* ins = &dummy;
00710         for (;by.second;) {
00711                 // unlink head
00712                 Weight* t = by.second;
00713                 by.second = by.second->next;
00714                 // prepare for subtraction
00715                 t->weight*= -1;
00716                 // find correct insert location
00717                 while (ins->next && ins->next->level < t->level) {
00718                         ins = ins->next;
00719                 }
00720                 if (!ins->next || ins->next->level > t->level) {
00721                         t->next   = ins->next ? ins->next : 0;
00722                         ins->next = t;
00723                 }
00724                 else if ( (ins->next->weight += t->weight) != 0 ) {
00725                         delete t;
00726                 }
00727                 else {
00728                         Weight* t2 = ins->next;
00729                         ins->next  = t2->next;
00730                         delete t2;
00731                         delete t;
00732                 }
00733         }
00734         x.second = dummy.next;
00735 }
00736 
00737 // sort by literal id followed by weight
00738 bool MinimizeBuilder::CmpByLit::operator()(const LitRep& lhs, const LitRep& rhs) const {
00739         return lhs.first < rhs.first ||
00740                 (lhs.first == rhs.first && lhs.second->level < rhs.second->level);
00741 }
00742 // sort by final weight
00743 bool MinimizeBuilder::CmpByWeight::operator()(const LitRep& lhs, const LitRep& rhs) const {
00744         Weight* wLhs = lhs.second;
00745         Weight* wRhs = rhs.second;
00746         while (wLhs && wRhs) {
00747                 if (wLhs->level != wRhs->level) {
00748                         return wLhs->level < wRhs->level;
00749                 }
00750                 if (wLhs->weight != wRhs->weight) {
00751                         return wLhs->weight > wRhs->weight;
00752                 }
00753                 wLhs = wLhs->next;
00754                 wRhs = wRhs->next;
00755         }
00756         return (wLhs && wLhs->weight > 0)
00757           ||   (wRhs && wRhs->weight < 0);
00758 }
00759 int MinimizeBuilder::CmpByWeight::compare(const LitRep& lhs, const LitRep& rhs) const {
00760         if (this->operator()(lhs, rhs)) return 1;
00761         if (this->operator()(rhs, lhs)) return -1;
00762         return 0;
00763 }
00764 
00765 // frees the given weight list
00766 void MinimizeBuilder::Weight::free(Weight*& head) {
00767         for (Weight* r = head; r;) {
00768                 Weight* t = r;
00769                 r         = r->next;
00770                 delete t;
00771         }
00772         head = 0;
00773 }
00774 
00775 // flattens the given weight w and adds the flattened representation to x
00776 // RETURN: starting position of w in x
00777 weight_t MinimizeBuilder::addFlattened(SharedData::WeightVec& x, const Weight& w) {
00778         typedef SharedData::LevelWeight WT;
00779         uint32 idx       = static_cast<uint32>(x.size());
00780         const Weight* r  = &w;
00781         while (r) {
00782                 x.push_back(WT(r->level, r->weight));
00783                 x.back().next  = (r->next != 0);
00784                 r              = r->next;
00785         }
00786         return idx;
00787 }
00788 // returns true if lhs is equal to w
00789 bool MinimizeBuilder::eqWeight(const SharedData::LevelWeight* lhs, const Weight& w) {
00790         const Weight* r = &w;
00791         do {
00792                 if (lhs->level != r->level || lhs->weight != r->weight) {
00793                         return false;
00794                 }
00795                 r = r->next;
00796                 if (lhs->next == 0) return r == 0;
00797                 ++lhs;
00798         } while(r);
00799         return false;
00800 }
00801 
00803 // UncoreMinimize
00805 UncoreMinimize::UncoreMinimize(SharedMinimizeData* d, bool pre) 
00806         : MinimizeConstraint(d)
00807         , enum_(0)
00808         , sum_(new wsum_t[d->numRules()])
00809         , auxInit_(UINT32_MAX)
00810         , auxAdd_(0)
00811         , freeOpen_(0) {
00812         hasPre_ = static_cast<uint32>(pre);
00813 }
00814 void UncoreMinimize::init() {
00815         releaseLits();
00816         fix_.clear();
00817         eRoot_ = 0;
00818         aTop_  = 0;
00819         upper_ = shared_->upper(0);
00820         lower_ = 0;
00821         gen_   = 0;
00822         level_ = 0;
00823         valid_ = 0;
00824         sat_   = 0;
00825         pre_   = 0;
00826         path_  = 1;
00827         next_  = 0;
00828         init_  = 1;
00829 }
00830 bool UncoreMinimize::attach(Solver& s) {
00831         init();
00832         initRoot(s);
00833         auxInit_ = UINT32_MAX;
00834         auxAdd_  = 0;
00835         if (s.sharedContext()->concurrency() > 1 && shared_->mode() == MinimizeMode_t::enumOpt) {
00836                 enum_ = new DefaultMinimize(shared_->share(), 0);
00837                 enum_->attach(s);
00838                 enum_->relaxBound(true);
00839         }
00840         return true;
00841 }
00842 // Detaches the constraint and all cores from s and removes 
00843 // any introduced aux vars.
00844 void UncoreMinimize::detach(Solver* s, bool b) {
00845         releaseLits();
00846         for (ConTable::iterator it = closed_.begin(), end = closed_.end(); it != end; ++it) {
00847                 (*it)->destroy(s, b);
00848         }
00849         closed_.clear();
00850         if (s && s->numAuxVars() == (auxInit_ + auxAdd_)) {
00851                 s->popAuxVar(auxAdd_);
00852                 auxAdd_ = 0;
00853         }
00854         fix_.clear();
00855 }
00856 // Destroys this object and optionally detaches it from the given solver.
00857 void UncoreMinimize::destroy(Solver* s, bool b) {
00858         detach(s, b);
00859         delete [] sum_;
00860         if (enum_) { enum_->destroy(s, b); enum_ = 0; }
00861         MinimizeConstraint::destroy(s, b);
00862 }
00863 Constraint::PropResult UncoreMinimize::propagate(Solver&, Literal, uint32&) {
00864         return PropResult(true, true);
00865 }
00866 bool UncoreMinimize::simplify(Solver& s, bool) {
00867         if (s.decisionLevel() == 0) { simplifyDB(s, closed_, false); }
00868         return false;
00869 }
00870 void UncoreMinimize::reason(Solver& s, Literal, LitVec& out) {
00871         uint32 r = initRoot(s);
00872         for (uint32 i = 1; i <= r; ++i) {
00873                 out.push_back(s.decision(i));
00874         }
00875 }
00876 
00877 // Integrates any new information from this constraint into s.
00878 bool UncoreMinimize::integrate(Solver& s) {
00879         assert(!s.isFalse(tag_));
00880         bool useTag = (shared_->mode() == MinimizeMode_t::enumOpt) || s.sharedContext()->concurrency() > 1;
00881         if (!prepare(s, useTag)) {
00882                 return false;
00883         }
00884         if (enum_ && !shared_->optimize() && !enum_->integrateBound(s)) {
00885                 return false;
00886         }
00887         for (uint32 gGen = shared_->generation(); gGen != gen_; ) {
00888                 gen_   = gGen;
00889                 upper_ = shared_->upper(level_);
00890                 gGen   = shared_->generation();
00891                 valid_ = 0;
00892         }
00893         return pushPath(s);
00894 }
00895 
00896 // Initializes the next optimization level to look at.
00897 bool UncoreMinimize::initLevel(Solver& s) {
00898         assert(shared_->optimize() || !next_);
00899         initRoot(s);
00900         sat_   = 0;
00901         pre_   = 0;
00902         path_  = 1;
00903         init_  = 0;
00904         sum_[0]= -1;
00905         for (LitVec::const_iterator it = fix_.begin(), end = fix_.end(); it != end; ++it) {
00906                 if (!s.force(*it, eRoot_, this)) { return false; }
00907         }
00908         if (!shared_->optimize() || !assume_.empty()) {
00909                 return !s.hasConflict(); 
00910         }
00911         releaseLits();
00912         bool hasWeights = false;
00913         for (uint32 level = (level_ + next_), n = 1; level <= shared_->maxLevel() && assume_.empty(); ++level) {
00914                 level_ = level;
00915                 lower_ = std::min(shared_->lower(level), wsum_t(0));
00916                 upper_ = shared_->upper(level_);
00917                 for (const WeightLiteral* it = shared_->lits; !isSentinel(it->first); ++it) {
00918                         if (weight_t w = shared_->weight(*it, level_)){
00919                                 Literal x = it->first;
00920                                 if (w < 0) {
00921                                         w = -w;
00922                                         x = ~x;
00923                                 }
00924                                 if (s.value(x.var()) == value_free || s.level(x.var()) > eRoot_) {
00925                                         addLit(x, w);
00926                                         if (w != 1) { hasWeights = true; }
00927                                 }
00928                                 else if (s.isTrue(x)) {
00929                                         lower_ += w;
00930                                 }
00931                         }
00932                 }
00933                 if (n == 1) {
00934                         if      (lower_ > upper_){ next_ = 1; break; }
00935                         else if (lower_ < upper_){ next_ = (n = 0);  }
00936                         else                     {
00937                                 n     = shared_->checkNext() || level != shared_->maxLevel();
00938                                 next_ = n;
00939                                 while (!assume_.empty()) {
00940                                         fixLit(s, assume_.back().lit);
00941                                         assume_.pop_back();
00942                                 }
00943                                 litData_.clear();
00944                                 assume_.clear();
00945                         }
00946                 }
00947         }
00948         pre_  = hasPre_;
00949         valid_= (pre_ == 0 && !hasWeights);
00950         if (next_ && !s.hasConflict()) {
00951                 s.force(~tag_, Antecedent(0));
00952                 next_ = 0;
00953                 pre_  = 0;
00954         }
00955         return !s.hasConflict();
00956 }
00957 
00958 UncoreMinimize::LitData& UncoreMinimize::addLit(Literal p, weight_t w) {
00959         assert(w > 0);
00960         litData_.push_back(LitData(w, true, 0));
00961         assume_.push_back(LitPair(~p, litData_.size()));
00962         return litData_.back();
00963 }
00964 
00965 // Pushes the active assumptions from the active optimization level to
00966 // the root path.
00967 bool UncoreMinimize::pushPath(Solver& s) {
00968         bool ok = !s.hasConflict() && !sat_ && (!path_ || (!next_ && !init_) || initLevel(s));
00969         if (path_) {
00970                 initRoot(s);
00971                 ok = ok && s.simplify();
00972                 uint32 j = 0;
00973                 for (uint32 i = 0, end = assume_.size(); i != end; ++i) {
00974                         LitData& x = getData(assume_[i].id);
00975                         if (x.assume) {
00976                                 assume_[j++] = assume_[i];
00977                                 Literal lit  = assume_[i].lit;
00978                                 if      (!ok || s.isTrue(lit)) { continue; }
00979                                 else if (s.value(lit.var()) == value_free) {
00980                                         ok    = s.pushRoot(lit);
00981                                         aTop_ = s.rootLevel();
00982                                 }
00983                                 else if (s.level(lit.var()) > eRoot_) {
00984                                         todo_.push_back(LitPair(~lit, assume_[i].id));
00985                                         ok   = s.force(lit, Antecedent(0));
00986                                 }
00987                                 else {
00988                                         LitPair core(~lit, assume_[i].id);
00989                                         ok = addCore(s, &core, 1, x.weight);
00990                                         end= assume_.size();
00991                                         --j;
00992                                 }
00993                         }
00994                 }
00995                 shrinkVecTo(assume_, j);
00996                 path_ = 0;
00997         }
00998         if (sat_ || (ok && !validLowerBound())) {
00999                 ok   = false;
01000                 sat_ = 1;
01001                 s.setStopConflict();
01002         }
01003         return ok;
01004 }
01005 
01006 // Removes invalid assumptions from the root path.
01007 bool UncoreMinimize::popPath(Solver& s, uint32 dl, LitVec& out) {
01008         CLASP_ASSERT_CONTRACT(dl <= aTop_ && eRoot_ <= aTop_ && "You must not mess with my root level!");
01009         if (dl < eRoot_) { dl = eRoot_; }
01010         if (s.rootLevel() > aTop_) {
01011                 // remember any assumptions added after us and
01012                 // force removal of our assumptions because we want
01013                 // them nicely arranged one after another
01014                 s.popRootLevel(s.rootLevel() - aTop_, &out, true);
01015                 dl    = eRoot_;
01016                 path_ = 1;
01017                 CLASP_FAIL_IF(true, "TODO: splitting not yet supported!");
01018         }
01019         return s.popRootLevel(s.rootLevel() - (aTop_ = dl));
01020 }
01021 
01022 bool UncoreMinimize::relax(Solver& s, bool reset) {
01023         if (sat_ && !reset) {
01024                 // commit cores of last model
01025                 s.setStopConflict();
01026                 LitVec ignore;
01027                 handleUnsat(s, false, ignore);
01028         }
01029         if ((reset && shared_->optimize()) || !assume_.empty() || level_ != shared_->maxLevel() || s.sharedContext()->concurrency() > 1) {
01030                 detach(&s, true);
01031                 init();
01032         }
01033         if (!shared_->optimize()) {
01034                 gen_  = shared_->generation();
01035                 next_ = 0;
01036                 valid_= 1;
01037         }
01038         init_ = 1;
01039         sat_  = 0;
01040         assert(assume_.empty());
01041         return !enum_ || enum_->relax(s, reset);
01042 }
01043 
01044 // Computes the costs of the current assignment.
01045 wsum_t* UncoreMinimize::computeSum(Solver& s) const {
01046         std::fill_n(sum_, shared_->numRules(), wsum_t(0));
01047         for (const WeightLiteral* it = shared_->lits; !isSentinel(it->first); ++it) {
01048                 if (s.isTrue(it->first)) { shared_->add(sum_, *it); }
01049         }
01050         return sum_;
01051 }
01052 
01053 // Checks whether the current assignment in s is valid w.r.t the
01054 // bound stored in the shared data object.
01055 bool UncoreMinimize::valid(Solver& s) {
01056         if (shared_->upper(level_) == SharedData::maxBound()){ return true; }
01057         if (gen_ == shared_->generation() && valid_ == 1)    { return true; }
01058         if (sum_[0] < 0) { computeSum(s); }
01059         const wsum_t* rhs;
01060         uint32 end  = shared_->numRules();
01061         wsum_t cmp  = 0;
01062         do {
01063                 gen_  = shared_->generation();
01064                 rhs   = shared_->upper();
01065                 upper_= rhs[level_];
01066                 for (uint32 i = level_; i != end && (cmp = sum_[i]-rhs[i]) == 0; ++i) { ; }
01067         } while (gen_ != shared_->generation());
01068         wsum_t low = sum_[level_];
01069         if (s.numFreeVars() != 0) { sum_[0] = -1; }
01070         if (cmp < wsum_t(!shared_->checkNext())) {
01071                 valid_ = s.numFreeVars() == 0;
01072                 return true;
01073         }
01074         valid_ = 0;
01075         sat_   = 1;
01076         setLower(low);
01077         s.setStopConflict();
01078         return false;
01079 }
01080 // Sets the current sum as the current shared optimum.
01081 bool UncoreMinimize::handleModel(Solver& s) {
01082         if (!valid(s))  { return false; }
01083         if (sum_[0] < 0){ computeSum(s);}
01084         shared_->setOptimum(sum_);
01085         sat_  = shared_->checkNext();
01086         gen_  = shared_->generation();
01087         upper_= shared_->upper(level_);
01088         valid_= 1;
01089         if (sat_) { setLower(sum_[level_]); }
01090         return true;
01091 }
01092 
01093 // Tries to recover from either a model or a root-level conflict.
01094 bool UncoreMinimize::handleUnsat(Solver& s, bool up, LitVec& out) {
01095         assert(s.hasConflict());
01096         if (enum_) { enum_->relaxBound(true); }
01097         path_   = 1;
01098         sum_[0] = -1;
01099         do {
01100                 if (sat_ == 0) {
01101                         if (s.hasStopConflict()) { return false; }
01102                         conflict_ = s.conflict();
01103                         if (s.strategy.search == SolverStrategies::no_learning) {
01104                                 conflict_.clear();
01105                                 for (uint32 i = 1, end = s.decisionLevel(); i <= end; ++i) { conflict_.push_back(s.decision(i)); }
01106                         }
01107                         weight_t mw;
01108                         uint32 cs = analyze(s, conflict_, mw, out);
01109                         if (!cs) {
01110                                 todo_.clear();
01111                                 return false;
01112                         }
01113                         if (pre_ == 0) {
01114                                 addCore(s, &todo_[0], cs, mw);
01115                                 todo_.clear();
01116                         }
01117                         else { // preprocessing: remove assumptions and remember core
01118                                 todo_.push_back(LitPair(posLit(0), 0)); // null-terminate core
01119                                 lower_ += mw;
01120                                 for(LitSet::iterator it = todo_.end() - (cs + 1); it->id; ++it) {
01121                                         getData(it->id).assume = 0;
01122                                 }
01123                         }
01124                         sat_ = !validLowerBound();
01125                 }
01126                 else {
01127                         s.clearStopConflict();
01128                         popPath(s, 0, out);
01129                         sat_ = 0;
01130                         for (LitSet::iterator it = todo_.begin(), end = todo_.end(), cs; (cs = it) != end; ++it) {
01131                                 weight_t w = std::numeric_limits<weight_t>::max();
01132                                 while (it->id) { w = std::min(w, getData(it->id).weight); ++it; }
01133                                 lower_    -= w;
01134                                 if (!addCore(s, &*cs, uint32(it - cs), w)) { break; }
01135                         }
01136                         wsum_t cmp = (lower_ - upper_);
01137                         if (cmp >= 0) {
01138                                 fixLevel(s);
01139                                 if (cmp > 0) { s.hasConflict() || s.force(~tag_, Antecedent(0)); }
01140                                 else         { next_ = level_ != shared_->maxLevel() || shared_->checkNext(); }
01141                         }
01142                         if (pre_) { 
01143                                 LitSet().swap(todo_);
01144                                 pre_ = 0; 
01145                         }
01146                 }
01147                 if (up && shared_->lower(level_) < lower_) { shared_->setLower(level_, lower_); }
01148         } while (sat_ || s.hasConflict() || (next_ && out.empty() && !initLevel(s)));
01149         return true;
01150 }
01151 
01152 // Analyzes the current root level conflict and stores the set of our assumptions
01153 // that caused the conflict in todo_.
01154 uint32 UncoreMinimize::analyze(Solver& s, LitVec& rhs, weight_t& minW, LitVec& poppedOther) {
01155         uint32 marked = 0;                // number of literals to resolve out
01156         uint32 tPos   = s.trail().size(); // current position in trail
01157         uint32 cs     = 0, dl, roots = 0;
01158         minW          = std::numeric_limits<weight_t>::max();
01159         uint32 minDL  = s.decisionLevel();
01160         if (!todo_.empty() && todo_.back().id) { 
01161                 cs   = 1;
01162                 minW = getData(todo_.back().id).weight;
01163                 minDL= s.level(todo_.back().lit.var());
01164         }
01165         if (s.decisionLevel() <= eRoot_) {
01166                 return cs;
01167         }
01168         // resolve all-last uip
01169         Literal p;
01170         for (Var v;;) {
01171                 // process current rhs
01172                 for (LitVec::size_type i = 0; i != rhs.size(); ++i) {
01173                         if (!s.seen(v = rhs[i].var())) {
01174                                 assert(s.level(v) <= s.decisionLevel());
01175                                 s.markSeen(v);
01176                                 ++marked;
01177                         }
01178                 }
01179                 rhs.clear();
01180                 if (marked-- == 0) { break; }
01181                 // search for the last assigned literal that needs to be analyzed...
01182                 while (!s.seen(s.trail()[--tPos].var())) { ; }  
01183                 p  = s.trail()[tPos];
01184                 dl = s.level(p.var());
01185                 assert(dl);
01186                 s.clearSeen(p.var());
01187                 if      (!s.reason(p).isNull()) { s.reason(p, rhs); }
01188                 else if (p == s.decision(dl) && dl > eRoot_ && dl <= aTop_) {
01189                         s.markSeen(p);
01190                         ++roots;
01191                 }
01192         }
01193         // map marked root decisions back to our assumptions
01194         for (LitSet::iterator it = assume_.begin(), end = assume_.end(); it != end && roots; ++it) {
01195                 if (s.seen(p = it->lit) && (dl = s.level(p.var())) != 0) {
01196                         assert(p == s.decision(dl) && getData(it->id).assume);
01197                         if (dl < minDL) { minDL = dl; }
01198                         minW = std::min(getData(it->id).weight, minW);
01199                         todo_.push_back(LitPair(~p, it->id));
01200                         ++cs;
01201                         s.clearSeen(p.var());
01202                         --roots;
01203                 }
01204         }
01205         popPath(s, minDL - (minDL != 0), poppedOther);
01206         if (roots) { // clear remaining levels - can only happen if someone messed with our assumptions
01207                 for (uint32 i = s.decisionLevel(); i; --i) { s.clearSeen(s.decision(i).var()); }
01208         }
01209         return cs;
01210 }
01211 
01212 // Eliminates the given core by adding suitable constraints to the solver.
01213 bool UncoreMinimize::addCore(Solver& s, const LitPair* lits, uint32 cs, weight_t w) {
01214         assert(s.decisionLevel() == s.rootLevel());
01215         assert(w && cs);
01216         // apply weight and check for subcores
01217         lower_ += w;
01218         for (uint32 i = 0; i != cs; ++i) {
01219                 LitData& x = getData(lits[i].id);
01220                 if ( (x.weight -= w) <= 0) {
01221                         x.assume = 0;
01222                         x.weight = 0;
01223                 }
01224                 else if (pre_ && !x.assume) {
01225                         x.assume = 1;
01226                         assume_.push_back(LitPair(~lits[i].lit, lits[i].id));
01227                 }
01228                 if (x.weight == 0 && hasCore(x)) {
01229                         Core& core = getCore(x);
01230                         temp_.start(core.bound + 1);
01231                         for (uint32 k = 0, end = core.size(); k != end; ++k) {
01232                                 Literal p = core.at(k);
01233                                 while (s.value(p.var()) != value_free && s.rootLevel() > eRoot_) {
01234                                         uint32 dl = s.level(p.var());
01235                                         if (dl > eRoot_){ --dl; s.popRootLevel(s.rootLevel() - dl); }
01236                                         else            { s.popRootLevel(s.rootLevel() - eRoot_); }
01237                                         aTop_ = std::min(aTop_, s.rootLevel());
01238                                 }
01239                                 temp_.add(s, p);
01240                         }
01241                         weight_t cw = core.weight;
01242                         if (!closeCore(s, x, temp_.bound <= 1) || !addCore(s, temp_, cw)) {
01243                                 return false;
01244                         }
01245                 }
01246         }
01247         // add new core
01248         temp_.start(2);
01249         for (uint32 i = 0; i != cs; ++i) { temp_.add(s, lits[i].lit); }
01250         if (!temp_.unsat()) { 
01251                 return addCore(s, temp_, w);
01252         }
01253         Literal fix = !temp_.lits.empty() ? temp_.lits[0].first : lits[0].lit;
01254         return temp_.bound < 2 || fixLit(s, fix);
01255 }
01256 bool UncoreMinimize::addCore(Solver& s, const WCTemp& wc, weight_t weight) {
01257         typedef WeightConstraint::CPair ResPair;
01258         weight_t B = wc.bound;
01259         if (B <= 0) { 
01260                 // constraint is sat and hence conflicting w.r.t new assumption -
01261                 // relax core
01262                 lower_ += ((1-B)*weight);
01263                 B       = 1;
01264         }
01265         if (static_cast<uint32>(B) > static_cast<uint32>(wc.lits.size())) {
01266                 // constraint is unsat and hence the new assumption is trivially satisfied
01267                 return true; 
01268         }
01269         // create new var for this core
01270         if (auxInit_ == UINT32_MAX) { auxInit_ = s.numAuxVars(); }
01271         Var newAux = s.pushAuxVar();
01272         ++auxAdd_;
01273         LitData& x = addLit(negLit(newAux), weight);
01274         WeightLitsRep rep = {&const_cast<WCTemp&>(wc).lits[0], (uint32)wc.lits.size(), B, (weight_t)wc.lits.size()};
01275         uint32       fset = WeightConstraint::create_explicit | WeightConstraint::create_no_add | WeightConstraint::create_no_freeze | WeightConstraint::create_no_share;
01276         ResPair       res = WeightConstraint::create(s, negLit(newAux), rep, fset);
01277         if (res.ok() && res.first()) {
01278                 x.coreId = allocCore(res.first(), B, weight, rep.bound != rep.reach);
01279         }
01280         return !s.hasConflict();
01281 }
01282 
01283 
01284 // Computes the solver's initial root level, i.e. all assumptions that are not from us.
01285 uint32 UncoreMinimize::initRoot(Solver& s) {
01286         if (eRoot_ == aTop_ && !s.hasStopConflict()) {
01287                 eRoot_ = s.rootLevel();
01288                 aTop_  = eRoot_;
01289         }
01290         return eRoot_;
01291 }
01292 
01293 // Assigns p at the solver's initial root level.
01294 bool UncoreMinimize::fixLit(Solver& s, Literal p) {
01295         assert(s.decisionLevel() >= eRoot_);
01296         if (!s.isTrue(p) || s.level(p.var()) != 0) {
01297                 if (s.decisionLevel() > eRoot_) {
01298                         s.popRootLevel(s.rootLevel() - eRoot_);
01299                         aTop_ = s.rootLevel();
01300                 }
01301                 if (eRoot_) { fix_.push_back(p); }
01302                 return !s.hasConflict() && s.force(p, eRoot_, this);
01303         }
01304         return !s.hasConflict();
01305 }
01306 // Fixes any remaining assumptions of the active optimization level.
01307 bool UncoreMinimize::fixLevel(Solver& s) {
01308         for (LitSet::iterator it = assume_.begin(), end = assume_.end(); it != end; ++it) {
01309                 if (getData(it->id).assume) { fixLit(s, it->lit); }
01310         }
01311         releaseLits();
01312         return !s.hasConflict();
01313 }
01314 void UncoreMinimize::releaseLits() {
01315         // remaining cores are no longer open - move to closed list
01316         for (CoreTable::iterator it = open_.begin(), end = open_.end(); it != end; ++it) {
01317                 if (it->con) { closed_.push_back(it->con); }
01318         }
01319         open_.clear();
01320         litData_.clear();
01321         assume_.clear();
01322         todo_.clear();
01323         freeOpen_ = 0;
01324 }
01325 
01326 uint32 UncoreMinimize::allocCore(WeightConstraint* con, weight_t bound, weight_t weight, bool open) {
01327         if (!open) {
01328                 closed_.push_back(con);
01329                 return 0;
01330         }
01331         if (freeOpen_) { // pop next slot from free list
01332                 assert(open_[freeOpen_-1].con == 0);
01333                 uint32 id   = freeOpen_;
01334                 freeOpen_   = static_cast<uint32>(open_[id-1].bound);
01335                 open_[id-1] = Core(con, bound, weight);
01336                 return id;
01337         }
01338         open_.push_back(Core(con, bound, weight));
01339         return static_cast<uint32>(open_.size());
01340 }
01341 bool UncoreMinimize::closeCore(Solver& s, LitData& x, bool sat) {
01342         if (uint32 coreId = x.coreId) {
01343                 Core& core = open_[coreId-1];
01344                 x.coreId   = 0;
01345                 // close by moving to closed list
01346                 if (!sat) { closed_.push_back(core.con); }
01347                 else      { fixLit(s, core.tag()); core.con->destroy(&s, true); }
01348                 // link slot to free list
01349                 core      = Core(0, 0, static_cast<weight_t>(freeOpen_)); 
01350                 freeOpen_ = coreId;
01351         }
01352         return !s.hasConflict();
01353 }
01354 uint32  UncoreMinimize::Core::size()       const { return con->size() - 1; }
01355 Literal UncoreMinimize::Core::at(uint32 i) const { return con->lit(i+1, WeightConstraint::FFB_BTB); }
01356 Literal UncoreMinimize::Core::tag()        const { return con->lit(0, WeightConstraint::FTB_BFB); }
01357 void    UncoreMinimize::WCTemp::add(Solver& s, Literal p) {
01358         if      (s.topValue(p.var()) == value_free) { lits.push_back(WeightLiteral(p, 1)); }
01359         else if (s.isTrue(p))                       { --bound; }
01360 }
01361 
01362 } // end namespaces Clasp


clasp
Author(s): Benjamin Kaufmann
autogenerated on Thu Aug 27 2015 12:41:39