2 #ifndef OPENGM_FUSION_BASED_INF_HXX
3 #define OPENGM_FUSION_BASED_INF_HXX
61 namespace proposal_gen{
66 template<
class GM,
class ACC>
83 for(
size_t i=0; i<gm.numberOfVariables();++i){
84 if(gm.numberOfLabels(i)>maxLabel_){
85 maxLabel_ = gm.numberOfLabels(i);
96 void getProposal(
const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
98 for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi)
100 if (gm_.numberOfLabels(vi) > currentAlpha_ )
102 proposal[vi] = currentAlpha_;
106 proposal[vi] = current[vi];
110 if(currentAlpha_>=maxLabel_){
126 template<
class GM,
class ACC>
136 const std::string startDirection = std::string(
"up")
147 argBuffer_(gm.numberOfVariables(),0),
148 direction_(gm.numberOfVariables())
155 for(
size_t i=0; i<gm_.numberOfVariables();++i){
156 direction_[i]=rand()%2 == 0 ? -1:1;
160 for(
size_t i=0; i<gm_.numberOfVariables();++i){
165 for(
size_t i=0; i<gm_.numberOfVariables();++i){
176 void getProposal(
const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
178 for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi)
180 const size_t numL = gm_.numberOfLabels(vi);
185 std::copy(current.begin(), current.end(), argBuffer_.begin());
195 proposal[vi] = cl +1;
199 proposal[vi] = cl - 1 ;
204 proposal[vi] = cl - 1;
208 proposal[vi] = cl + 1 ;
216 std::vector<LabelType> argBuffer_;
217 std::vector<LabelType> direction_;
218 std::vector<LabelType> jumpSize_;
222 template<
class GM,
class ACC>
234 static size_t getMaxLabel(
const GM &gm){
236 for(
size_t i=0; i<gm.numberOfVariables();++i){
237 if(gm.numberOfLabels(i)>maxLabel ){
238 maxLabel = gm.numberOfLabels(i);
247 maxLabel_(getMaxLabel(gm)),
248 abShape_(2, maxLabel_),
249 abWalker_(abShape_.begin(), 2)
260 void getProposal(
const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
263 for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi)
264 proposal[vi] = current[vi];
270 while (abWalker_.coordinateTuple()[0] == abWalker_.coordinateTuple()[1])
275 const LabelType alpha = abWalker_.coordinateTuple()[0];
276 const LabelType beta = abWalker_.coordinateTuple()[1];
278 for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi)
280 if ( current[vi] == alpha && gm_.numberOfLabels(vi) > beta )
284 else if ( current[vi] == beta && gm_.numberOfLabels(vi) > alpha )
286 proposal[vi] = alpha;
290 proposal[vi] = current[vi];
298 return abWalker_.coordinateTuple()[0];
302 return abWalker_.coordinateTuple()[1];
309 std::vector<LabelType> abShape_;
310 ShapeWalker<typename std::vector<LabelType>::const_iterator> abWalker_;
314 template<
class GM,
class ACC>
336 void getProposal(
const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
338 for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi){
340 opengm::RandomUniform<size_t> randomLabel(0, gm_.numberOfLabels(vi),currentStep_+vi);
341 proposal[vi] = randomLabel();
352 template<
class GM,
class ACC>
374 void getProposal(
const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
376 for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi){
378 opengm::RandomUniform<size_t> randomLabel(0,3,currentStep_+vi);
379 proposal[vi] = std::min(randomLabel(),
size_t(1));
391 template<
class GM,
class ACC>
413 void getProposal(
const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
415 for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi){
417 opengm::RandomUniform<size_t> randomLabel(0, gm_.numberOfLabels(vi),currentStep_+vi);
418 proposal[vi] = randomLabel();
433 template<
class GM,
class ACC>
452 randomGens_(gm.numberOfVariables())
454 std::vector<bool> hasUnary(gm.numberOfVariables(),
false);
456 for(IndexType fi=0; fi<gm_.numberOfFactors(); ++fi){
458 if(gm_[fi].numberOfVariables()==1){
460 const IndexType vi = gm_[fi].variableIndex(0);
461 const LabelType numLabels = gm_.numberOfLabels(vi);
462 std::vector<ValueType> weights(numLabels);
463 gm_[fi].copyValues(&weights[0]);
464 const ValueType minValue = *std::min_element(weights.begin(),weights.end());
466 weights[l]-= minValue;
470 weights[l]=std::exp(-1.0*param_.
temp_*weights[l]);
472 randomGens_[vi]=GenType(weights.begin(),weights.end());
476 for(IndexType vi=0 ;vi<gm_.numberOfVariables(); ++vi){
478 const LabelType numLabels = gm_.numberOfLabels(vi);
479 std::vector<ValueType> weights(numLabels,1.0);
480 randomGens_[vi]=GenType(weights.begin(),weights.end());
494 void getProposal(
const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
496 for (IndexType vi = 0; vi < gm_.numberOfVariables(); ++vi){
497 proposal[vi]=randomGens_[vi]();
506 typedef RandomDiscreteWeighted<LabelType,ValueType> GenType;
508 std::vector < RandomDiscreteWeighted<LabelType,ValueType> > randomGens_;
512 template<
class GM,
class ACC>
531 const double pi = 3.1416;
532 const double oneOverSqrt2PiSigmaSquared = 1.0 / (std::sqrt(2.0 * pi) * param_.
sigma_);
533 const double oneOverTwoSigmaSquared = 1.0 / (2.0* param_.
sigma_ * param_.
sigma_);
534 const size_t kradius = std::ceil(3*param_.
sigma_);
535 kernel_.resize(2*kradius + 1);
537 for(
double i = 0; i <= kradius ; ++i) {
538 double value = oneOverSqrt2PiSigmaSquared * std::exp(-(i*i)*oneOverTwoSigmaSquared);
539 kernel_[kradius+i] = value;
540 kernel_[kradius-i] = value;
543 for(
double i = 0; i <= kradius ; ++i) {
544 kernel_[kradius+i] /= sum;
545 kernel_[kradius-i] /= sum;
548 size_t N = gm_.numberOfFactors(0);
549 for(
size_t i=1; i<gm_.numberOfVariables(); ++i){
550 if(N==gm_.numberOfFactors(i)){
556 width_ = gm_.numberOfVariables()/height_;
561 bluredLabel_.resize(gm_.numberOfVariables(),0);
562 std::vector<double> temp(gm_.numberOfVariables(),0.0);
563 std::vector<LabelType> localLabel(gm_.numberOfVariables(),0);
564 for (
size_t i=0; i<gm_.numberOfVariables(); ++i){
565 for(
typename GM::ConstFactorIterator it=gm_.factorsOfVariableBegin(i); it!=gm_.factorsOfVariableEnd(i);++it){
566 if(gm_[*it].numberOfVariables() == 1){
569 for(
LabelType l=0; l<gm_.numberOfLabels(i); ++l){
570 if(ACC::bop(gm_[*it](&l),v)){
579 const int radius = (kernel_.size()-1)/2;
580 const int h = height_-1;
581 const int w = width_ -1;
582 for (
int i = 0; i < height_; ++i) {
583 for (
int j = 0; j < width_; ++j) {
585 for (
int k = 0; k < 2*radius+1; ++k) {
586 int i2 = std::min( h,std::max(0,i-radius+k));
587 val += kernel_[k] * localLabel[ind(i2,j)];
589 temp[ind(i,j)] = val;
592 for (
int i = 0; i < height_; ++i) {
593 for (
int j = 0; j < width_; ++j) {
595 for (
int k = 0; k < 2*radius+1; ++k) {
596 int j2 = std::min(w,std::max(0,i-radius+k));
597 val += kernel_[k] * temp[ind(i, j2)];
599 bluredLabel_[ind(i,j)] = std::min(
double(gm_.numberOfLabels(ind(i,j))),(std::max(0.0,val)));
607 void getProposal(
const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
609 if ((currentStep_ % 2) == 0){
610 for (
int i = 0; i < height_; ++i) {
611 for (
int j = 0; j < width_; ++j) {
612 const size_t var = ind(i,j);
613 opengm::RandomUniform<size_t> randomLabel(0, gm_.numberOfLabels(var),currentStep_+i+j);
614 proposal[var] = (
LabelType)(randomLabel());
618 proposal.resize(gm_.numberOfVariables(),0.0);
619 opengm::RandomUniform<double> randomLabel(-param_.
sigma_*1.5, param_.
sigma_*1.5,currentStep_);
620 for(
size_t i=0; i<proposal.size();++i){
621 proposal[i] = std::min(gm_.numberOfLabels(i), (
LabelType)(std::max(0.0,bluredLabel_[i] + randomLabel())));
627 size_t ind(
int i,
int j){
return i+j*height_;}
632 std::vector<double> kernel_;
633 std::vector<double> bluredLabel_;
638 template<
class GM,
class ACC>
660 const double pi = 3.1416;
661 const double oneOverSqrt2PiSigmaSquared = 1.0 / (std::sqrt(2.0 * pi) * param_.
sigma_);
662 const double oneOverTwoSigmaSquared = 1.0 / (2.0* param_.
sigma_ * param_.
sigma_);
663 const size_t kradius = std::ceil(3*param_.
sigma_);
664 std::vector<double> kernel;
665 kernel.resize(2*kradius + 1);
667 for(
double i = 0; i <= kradius ; ++i) {
668 double value = oneOverSqrt2PiSigmaSquared * std::exp(-(i*i)*oneOverTwoSigmaSquared);
669 kernel[kradius+i] = value;
670 kernel[kradius-i] = value;
673 for(
double i = 0; i <= kradius ; ++i) {
674 kernel[kradius+i] /= sum;
675 kernel[kradius-i] /= sum;
678 size_t N = gm_.numberOfFactors(0);
679 for(
size_t i=1; i<gm_.numberOfVariables(); ++i){
680 if(N==gm_.numberOfFactors(i)){
686 width_ = gm_.numberOfVariables()/height_;
691 size_t numLabels =gm_.numberOfLabels(0);
692 std::vector<double> temp(gm_.numberOfVariables(),0.0);
693 std::vector<double> bluredEnergy(gm_.numberOfVariables(),1000000000000.0);
694 std::vector<double> bluredOpt(gm_.numberOfVariables(),0);
695 std::vector<double> energy(gm_.numberOfVariables(),0.0);
696 std::vector<IndexType> unaries(gm_.numberOfVariables());
697 std::vector<std::vector<double> > margs;;
699 margs.resize(gm_.numberOfVariables(),std::vector<double>(numLabels));
701 for (
size_t i=0; i<gm_.numberOfVariables(); ++i){
703 for(
typename GM::ConstFactorIterator it=gm_.factorsOfVariableBegin(i); it!=gm_.factorsOfVariableEnd(i);++it){
704 if(gm_[*it].numberOfVariables() == 1){
707 if(gm_[*it].numberOfLabels(0) != numLabels)
708 throw RuntimeError(
"number of labels are not equal for all variables");
717 for(
size_t l=0; l<numLabels; ++l){
718 for (
int i = 0; i < height_; ++i) {
719 for (
int j = 0; j < width_; ++j) {
720 const size_t var = ind(i, j);
721 energy[var] =gm_[unaries[ind(i, j)]](&l);
725 const int radius = (kernel.size()-1)/2;
726 const int h = height_-1;
727 const int w = width_ -1;
728 for (
int i = 0; i < height_; ++i) {
729 for (
int j = 0; j < width_; ++j) {
731 const size_t var = ind(i, j);
732 for (
int k = 0; k < 2*radius+1; ++k) {
733 int i2 = std::min( h,std::max(0,i-radius+k));
734 val += kernel[k] * energy[ind(i2,j)];
739 for (
int i = 0; i < height_; ++i) {
740 for (
int j = 0; j < width_; ++j) {
742 const size_t var = ind(i, j);
743 for (
int k = 0; k < 2*radius+1; ++k) {
744 int j2 = std::min(w,std::max(0,i-radius+k));
745 val += kernel[k] * temp[ind(i, j2)];
750 if(val < bluredEnergy[var]){
751 bluredEnergy[var] = val;
759 localMargGens_.reserve(bluredOpt.size());
760 for(
size_t var=0 ; var<bluredOpt.size(); ++var){
761 const ValueType minValue = *std::min_element(margs[var].begin(),margs[var].end());
763 margs[var][l]-= minValue;
766 margs[var][l]=std::exp(-1.0*param_.
temp_*margs[var][l]);
768 localMargGens_[var]=opengm::RandomDiscreteWeighted<LabelType,ValueType>(margs[var].begin(),margs[var].end(),var);
771 uniformGens_.reserve(bluredOpt.size());
772 for(
size_t var=0 ; var<bluredOpt.size(); ++var){
775 uniformGens_[var] = opengm::RandomUniform<LabelType>(minVal, maxVal+1, var);
783 void getProposal(
const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal)
785 proposal.resize(gm_.numberOfVariables());
787 for(
size_t i=0; i<proposal.size();++i){
788 proposal[i] = localMargGens_[i]();
792 opengm::RandomUniform<LabelType> randomLabel(0, gm_.numberOfLabels(0),currentStep_);
793 if ((currentStep_ % 2) == 0){
794 for(
size_t i=0; i<proposal.size();++i){
795 proposal[i] = randomLabel();
798 for(
size_t i=0; i<proposal.size();++i){
799 proposal[i] = uniformGens_[i]();
806 size_t ind(
int i,
int j){
return i+j*height_;}
814 std::vector<opengm::RandomDiscreteWeighted<LabelType,ValueType> > localMargGens_;
815 std::vector<opengm::RandomUniform<LabelType> > uniformGens_;
819 template<
class GM,
class ACC>
848 alphaExpansionGen_->reset();
850 alphaBetaSwapGen_->reset();
856 randomLFGen_->reset();
858 nonUniformRandomGen_->reset();
862 energyBlurGen_->reset();
869 return alphaExpansionGen_->defaultNumStopIt();
871 return alphaBetaSwapGen_->defaultNumStopIt();
873 return upDownGen_->defaultNumStopIt();
875 return randomGen_->defaultNumStopIt();
877 return randomLFGen_->defaultNumStopIt();
879 return nonUniformRandomGen_->defaultNumStopIt();
881 return blurGen_->defaultNumStopIt();
883 return energyBlurGen_->defaultNumStopIt();
888 void getProposal(
const std::vector<LabelType> ¤t , std::vector<LabelType> &proposal){
890 return alphaExpansionGen_->getProposal(current, proposal);
892 return alphaBetaSwapGen_->getProposal(current, proposal);
894 return upDownGen_->getProposal(current, proposal);
896 return randomGen_->getProposal(current, proposal);
898 return randomLFGen_->getProposal(current, proposal);
900 return nonUniformRandomGen_->getProposal(current, proposal);
902 return blurGen_->getProposal(current, proposal);
904 return energyBlurGen_->getProposal(current, proposal);
928 template<
class GM,
class PROPOSAL_GEN>
934 typedef AccumulationType
ACC;
958 const size_t numIt=1000,
959 const size_t numStopIt = 0
979 std::string
name()
const;
983 template<
class VisitorType>
991 const GraphicalModelType &gm_;
994 FusionMoverType * fusionMover_;
996 PROPOSAL_GEN proposalGen_;
998 std::vector<LabelType> bestArg_;
1005 template<
class GM,
class PROPOSAL_GEN>
1008 const GraphicalModelType &gm,
1016 bestArg_(gm_.numberOfVariables(), 0),
1017 maxOrder_(gm.factorOrder())
1019 ACC::neutral(bestValue_);
1020 fusionMover_ =
new FusionMoverType(gm_,parameter.
fusionParam_);
1022 std::vector<LabelType> conf(gm_.numberOfVariables(),0);
1023 for (
size_t i=0; i<gm_.numberOfVariables(); ++i){
1024 for(
typename GM::ConstFactorIterator it=gm_.factorsOfVariableBegin(i); it!=gm_.factorsOfVariableEnd(i);++it){
1025 if(gm_[*it].numberOfVariables() == 1){
1028 for(
LabelType l=0; l<gm_.numberOfLabels(i); ++l){
1029 if(ACC::bop(gm_[*it](&l),v)){
1038 setStartingPoint(conf.begin());
1040 template<
class GM,
class PROPOSAL_GEN>
1043 delete fusionMover_;
1047 template<
class GM,
class PROPOSAL_GEN>
1054 template<
class GM,
class PROPOSAL_GEN>
1061 std::copy(begin, begin + gm_.numberOfVariables(), bestArg_.begin());
1062 bestValue_ = gm_.evaluate(bestArg_.begin());
1065 template<
class GM,
class PROPOSAL_GEN>
1069 return "FusionBasedInf";
1072 template<
class GM,
class PROPOSAL_GEN>
1079 template<
class GM,
class PROPOSAL_GEN>
1088 template<
class GM,
class PROPOSAL_GEN>
1089 template<
class VisitorType>
1092 VisitorType &visitor
1096 bestValue_ = gm_.evaluate(bestArg_.begin());
1098 visitor.begin(*
this);
1101 if(param_.numStopIt_ == 0){
1102 param_.numStopIt_ = proposalGen_.defaultNumStopIt();
1105 std::vector<LabelType> proposedState(gm_.numberOfVariables());
1106 std::vector<LabelType> fusedState(gm_.numberOfVariables());
1108 size_t countRoundsWithNoImprovement = 0;
1110 for(
size_t iteration=0; iteration<param_.numIt_; ++iteration){
1112 const ValueType valueBeforeRound = bestValue_;
1114 proposalGen_.getProposal(bestArg_,proposedState);
1117 ValueType proposalValue = gm_.evaluate(proposedState);
1120 const bool anyVar = fusionMover_->fuse(bestArg_,proposedState, fusedState,
1121 bestValue_, proposalValue, bestValue_);
1125 if( !ACC::bop(bestValue_, valueBeforeRound)){
1126 ++countRoundsWithNoImprovement;
1130 countRoundsWithNoImprovement = 0;
1131 bestArg_ = fusedState;
1133 if(visitor(*
this)!=0){
1138 ++countRoundsWithNoImprovement;
1141 if(countRoundsWithNoImprovement==param_.numStopIt_ && param_.numStopIt_ !=0 )
1151 template<
class GM,
class PROPOSAL_GEN>
1155 std::vector<LabelType> &x,
1161 x.resize(gm_.numberOfVariables());
1162 for (
size_t j = 0; j < x.size(); ++j)
1176 #endif // #ifndef OPENGM_FUSION_BASED_INF_HXX
RandomGen(const GM &gm, const Parameter ¶m)
AlphaBetaSwapGen(const GM &gm, const Parameter ¶m)
EnergyBlurGen(const GM &gm, const Parameter ¶m)
size_t defaultNumStopIt()
virtual ValueType value() const
return the solution (value)
FusionParameter fusionParam_
opengm::visitors::VerboseVisitor< FusionBasedInf< GM, PROPOSAL_GEN > > VerboseVisitorType
Parameter(double sigma=20.0)
Parameter(const ProposalParameter &proposalParam=ProposalParameter(), const FusionParameter &fusionParam=FusionParameter(), const size_t numIt=1000, const size_t numStopIt=0)
UpDownGen(const GM &gm, const Parameter ¶m)
const GraphicalModelType & graphicalModel() const
#define OPENGM_ASSERT(expression)
ProposalGen::AccumulationType AccumulationType
size_t defaultNumStopIt()
AlphaExpansionGen(const GM &gm, const Parameter ¶m)
opengm::visitors::EmptyVisitor< FusionBasedInf< GM, PROPOSAL_GEN > > EmptyVisitorType
RandomLFGen(const GM &gm, const Parameter ¶m)
std::string startDirection_
void getProposal(const std::vector< LabelType > ¤t, std::vector< LabelType > &proposal)
Alpha-Beta-Swap Algorithm.
ProposalParameter proposalParam_
FusionMoverType::Parameter FusionParameter
GraphicalModelType::ValueType ValueType
Inference algorithm interface.
void setStartingPoint(typename std::vector< LabelType >::const_iterator)
set initial labeling
size_t defaultNumStopIt()
opengm::visitors::TimingVisitor< FusionBasedInf< GM, PROPOSAL_GEN > > TimingVisitorType
DynamincGen(const GM &gm, const Parameter ¶m)
size_t defaultNumStopIt()
Alpha-Expansion Algorithm.
HlFusionMover< GraphicalModelType, AccumulationType > FusionMover
Random2Gen(const GM &gm, const Parameter ¶m)
void getProposal(const std::vector< LabelType > ¤t, std::vector< LabelType > &proposal)
void getProposal(const std::vector< LabelType > ¤t, std::vector< LabelType > &proposal)
void getProposal(const std::vector< LabelType > ¤t, std::vector< LabelType > &proposal)
Parameter(double sigma=20.0, bool useLocalMargs=false, double temp=1)
size_t defaultNumStopIt()
InferenceTermination infer()
void getProposal(const std::vector< LabelType > ¤t, std::vector< LabelType > &proposal)
void getProposal(const std::vector< LabelType > ¤t, std::vector< LabelType > &proposal)
void getProposal(const std::vector< LabelType > ¤t, std::vector< LabelType > &proposal)
BlurGen(const GM &gm, const Parameter ¶m)
size_t defaultNumStopIt()
HlFusionMover< GraphicalModelType, AccumulationType > FusionMoverType
InferenceTermination arg(std::vector< LabelType > &, const size_t=1) const
output a solution
Parameter(const std::string startDirection=std::string("up"))
GraphicalModelType::LabelType LabelType
void getProposal(const std::vector< LabelType > ¤t, std::vector< LabelType > &proposal)
A generalization of ICM B. Andres, J. H. Kappes, U. Koethe and Hamprecht F. A., The Lazy Flipper: MA...
size_t defaultNumStopIt()
FusionBasedInf(const GraphicalModelType &, const Parameter &=Parameter())
ProposalGen::Parameter ProposalParameter
size_t defaultNumStopIt()
void getProposal(const std::vector< LabelType > ¤t, std::vector< LabelType > &proposal)
virtual InferenceTermination arg(std::vector< LabelType > &, const size_t=1) const
output a solution
InferenceTermination infer()
start the algorithm
size_t defaultNumStopIt()