Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions pumpkin-proof-processor/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ authors.workspace = true
[dependencies]
anyhow = "1.0.99"
clap = { version = "4.5.47", features = ["derive"] }
clap-verbosity-flag = "3.0.4"
drcp-format = { version = "0.3.1", path = "../drcp-format" }
fzn-rs = { version = "0.1.0", path = "../fzn-rs" }
flate2 = { version = "1.1.2" }
Expand All @@ -22,6 +23,7 @@ thiserror = "2.0.18"
assert_cmd = "2.1.2"
escargot = "0.5.15"
pumpkin-checker = { path = "../pumpkin-checker" }
regex = "1.12.3"

[lints]
workspace = true
15 changes: 13 additions & 2 deletions pumpkin-proof-processor/src/main.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
mod deduction_propagator;
mod model;
mod predicate_heap;
mod processor;
mod variables;

Expand All @@ -13,6 +14,8 @@ use std::path::PathBuf;
use std::rc::Rc;

use clap::Parser;
use clap_verbosity_flag::InfoLevel;
use clap_verbosity_flag::Verbosity;
use drcp_format::reader::ProofReader;
use drcp_format::writer::ProofWriter;
use pumpkin_core::containers::StorageKey;
Expand All @@ -32,6 +35,9 @@ use crate::variables::Variables;

#[derive(Parser)]
struct Cli {
#[command(flatten)]
verbose: Verbosity<InfoLevel>,
Comment thread
ImkoMarijnissen marked this conversation as resolved.

/// Path to the model file (.fzn).
model_path: PathBuf,

Expand All @@ -43,10 +49,15 @@ struct Cli {
}

fn main() -> anyhow::Result<()> {
env_logger::init();

let cli = Cli::parse();

env_logger::Builder::new()
.format_timestamp(None)
.format_target(false)
.filter_level(cli.verbose.log_level_filter())
.target(env_logger::Target::Stdout)
.init();

let proof_processor = parse_model(&cli.model_path)?;
let proof_reader = create_proof_reader(&cli.scaffold_path)?;
let proof_writer = create_proof_writer(&cli.full_proof_path)?;
Expand Down
70 changes: 70 additions & 0 deletions pumpkin-proof-processor/src/predicate_heap.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
use std::cmp::Ordering;
use std::collections::BinaryHeap;

use pumpkin_core::predicates::Predicate;
use pumpkin_core::state::State;

/// A max-heap of predicates. The keys are based on the trail positions of the predicates in the
/// state, meaning predicates are popped in reverse trail order. Implied predicates are popped
/// before the predicate on the trail that implies the predicate.
#[derive(Clone, Debug, Default)]
pub(crate) struct PredicateHeap {
heap: BinaryHeap<PredicateToExplain>,
}

impl PredicateHeap {
/// See [`BinaryHeap::is_empty`].
pub(crate) fn is_empty(&self) -> bool {
self.heap.is_empty()
}

/// See [`BinaryHeap::pop`].
pub(crate) fn pop(&mut self) -> Option<Predicate> {
self.heap.pop().map(|to_explain| to_explain.predicate)
}

/// Push a new predicate onto the heap.
///
/// Its priority will be based on its trail position in the given `state`. This heap will
/// return elements through [`Self::pop`] by reverse-trail order.
Comment thread
ImkoMarijnissen marked this conversation as resolved.
///
/// If the predicate is not true in the given state, this method panics.
pub(crate) fn push(&mut self, predicate: Predicate, state: &State) {
let trail_position = state
.trail_position(predicate)
.expect("predicate must be true in given state");

let priority = if state.is_on_trail(predicate) {
trail_position * 2
} else {
trail_position * 2 + 1
};

self.heap.push(PredicateToExplain {
predicate,
priority,
});
}
}

/// Used to order the predicates in the [`PredicateHeap`].
///
/// The priority is calculated based on the trail position of the predicate and whether the
/// predicate is on the trail or implied.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
struct PredicateToExplain {
predicate: Predicate,
priority: usize,
}

impl PartialOrd for PredicateToExplain {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}

impl Ord for PredicateToExplain {
fn cmp(&self, other: &Self) -> Ordering {
self.priority.cmp(&other.priority)
}
}
115 changes: 22 additions & 93 deletions pumpkin-proof-processor/src/processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,18 +19,13 @@ use drcp_format::writer::ProofWriter;
use log::debug;
use log::info;
use log::trace;
use pumpkin_core::asserts::pumpkin_assert_moderate;
use pumpkin_core::containers::KeyValueHeap;
use pumpkin_core::containers::KeyedVec;
use pumpkin_core::containers::StorageKey;
use pumpkin_core::predicate;
use pumpkin_core::predicates::Predicate;
use pumpkin_core::predicates::PredicateIdGenerator;
use pumpkin_core::predicates::PredicateType;
use pumpkin_core::predicates::PropositionalConjunction;
use pumpkin_core::proof::ConstraintTag;
use pumpkin_core::proof::InferenceCode;
use pumpkin_core::propagation::PredicateId;
use pumpkin_core::state::Conflict;
use pumpkin_core::state::CurrentNogood;
use pumpkin_core::state::PropagatorConflict;
Expand All @@ -39,6 +34,7 @@ use pumpkin_core::state::State;

use crate::deduction_propagator::DeductionPropagator;
use crate::deduction_propagator::DeductionPropagatorConstructor;
use crate::predicate_heap::PredicateHeap;
use crate::variables::Variables;

#[derive(Debug)]
Expand All @@ -50,14 +46,11 @@ pub(crate) struct ProofProcessor {
/// due to the backward trimming.
output_proof: Vec<ProofStage>,

/// A mapping from predicates to predicate IDs.
predicate_ids: PredicateIdGenerator,
/// Heap containing the predicates which still need to be processed; sorted non-increasing
/// based on trail-index where implied predicates are processed first.
/// A queue of predicates that should still be explained.
///
/// Note that two predicates implied by the same trail index do not have an order between each
/// other.
to_process_heap: KeyValueHeap<PredicateId, u32>,
/// This is a re-usable heap, but its contents is specific to the processing of an
/// individual conflict.
to_process_heap: PredicateHeap,
}

/// A single proof stage for the proof.
Expand Down Expand Up @@ -116,8 +109,7 @@ impl ProofProcessor {
state,
variables,
output_proof: vec![],
predicate_ids: PredicateIdGenerator::default(),
to_process_heap: KeyValueHeap::default(),
to_process_heap: PredicateHeap::default(),
}
}

Expand All @@ -140,6 +132,9 @@ impl ProofProcessor {
// constraint and process from there.
let (conclusion, mut nogood_stack) = self.initialise(proof_reader)?;

info!("Processing a proof with {} stages", nogood_stack.len());
let mut num_inferences = 0;

// Next, we will start the backward trimming procedure.
//
// Initialization will have marked the deductions that are used to derive the
Expand Down Expand Up @@ -184,6 +179,7 @@ impl ProofProcessor {
};

let inferences = self.explain_current_conflict(&mut nogood_stack, conflict);
num_inferences += inferences.len();
self.output_proof.push(ProofStage {
inferences,
constraint_id: tag.into(),
Expand All @@ -194,7 +190,11 @@ impl ProofProcessor {
});
}

info!("Writing final proof to file");
info!(
"Writing proof with {} stages and {} inferences",
self.output_proof.len(),
num_inferences,
);

// Finally, we write the output proof to the file. Since the proof stages are in
// reverse order, we iterate from the end to the beginning.
Expand Down Expand Up @@ -357,7 +357,8 @@ impl ProofProcessor {
// If the dual bound is the initial bound on the objective variable, write the
// correct proof in that situation and short-circuit.
if self.state.is_implied_by_initial_domain(predicate) {
self.add_predicate_to_explain(predicate);
self.to_process_heap.push(predicate, &self.state);

let inferences = self.explain_predicates(&mut nogood_stack, vec![]);
let deduction_id = self.state.new_constraint_tag();

Expand Down Expand Up @@ -403,7 +404,7 @@ impl ProofProcessor {
} else {
// In this case we have to explain by 'root propagation' and no deductions
// were used. The predicate is propagated by a propagator.
self.add_predicate_to_explain(predicate);
self.to_process_heap.push(predicate, &self.state);
let inferences = self.explain_predicates(&mut nogood_stack, vec![]);
let deduction_id = self.state.new_constraint_tag();

Expand Down Expand Up @@ -454,11 +455,7 @@ impl ProofProcessor {
nogood_stack: &mut DeductionStack,
conflict: Conflict,
) -> Vec<Inference<String, i32, Arc<str>>> {
assert_eq!(self.to_process_heap.num_nonremoved_elements(), 0);

// The implementation of key-value heap is buggy, but this `clear` seems to reset
// the internals properly.
self.to_process_heap.clear();
assert!(self.to_process_heap.is_empty());

// The inferences in the current conflict ordered by closeness to the conflict.
// Every element is an option, where `None` serves as a tombstone value.
Expand Down Expand Up @@ -526,7 +523,7 @@ impl ProofProcessor {
// Then we add the conflict to the queue of predicates that need to be explained
// through inferences.
for predicate in predicates_to_explain {
self.add_predicate_to_explain(predicate);
self.to_process_heap.push(predicate, &self.state);
}

self.explain_predicates(nogood_stack, inferences)
Expand All @@ -542,9 +539,7 @@ impl ProofProcessor {

// For every predicate in the queue, we will introduce appropriate inferences into
// the proof.
while let Some(predicate_id) = self.to_process_heap.pop_max() {
let predicate = self.predicate_ids.get_predicate(predicate_id);

while let Some(predicate) = self.to_process_heap.pop() {
// The predicate is either propagated or an initial bound. If it is an
// initial bound, we dispatch that here.
if self.state.is_implied_by_initial_domain(predicate) {
Expand Down Expand Up @@ -609,7 +604,7 @@ impl ProofProcessor {
}

for predicate in reason_buffer.drain(..) {
self.add_predicate_to_explain(predicate);
self.to_process_heap.push(predicate, &self.state);
}
}

Expand All @@ -619,72 +614,6 @@ impl ProofProcessor {
inferences.into_iter().rev().flatten().collect()
}

/// Add a predicate that still has to be explained.
fn add_predicate_to_explain(&mut self, predicate: Predicate) {
assert_eq!(
self.state.truth_value(predicate),
Some(true),
"predicate {predicate:?} does not evaluate to true",
);

// The first time we encounter the predicate, we initialise its value in the
// heap.
//
// Note that if the predicate is already in the heap, no action needs to be
// taken. It can happen that a predicate is returned
// multiple times as a reason for other predicates.

// Here we manually adjust the size of the heap to accommodate new elements.
let predicate_id = self.predicate_ids.get_id(predicate);
while self.to_process_heap.len() <= predicate_id.index() {
let next_id = PredicateId::create_from_index(self.to_process_heap.len());
self.to_process_heap.grow(next_id, 0);
self.to_process_heap.delete_key(next_id);
}

// Then we check whether the predicate was not already present in the heap, if
// this is not the case then we insert it
if !self.to_process_heap.is_key_present(predicate_id)
&& *self.to_process_heap.get_value(predicate_id) == 0
{
let trail_position = self.state.trail_position(predicate).unwrap();

// The goal is to traverse predicate in reverse order of the trail.
//
// However some predicates may share the trail position. For example, if a
// predicate that was posted to trail resulted in
// some other predicates being true, then all
// these predicates would have the same trail position.
//
// When considering the predicates in reverse order of the trail, the
// implicitly set predicates are posted after the
// explicitly set one, but they all have the same
// trail position.
//
// To remedy this, we make a tie-breaking scheme to prioritise implied
// predicates over explicit predicates. This is done
// by assigning explicitly set predicates the
// value `2 * trail_position`, whereas implied predicates get `2 *
// trail_position + 1`.
let heap_value = if self.state.is_on_trail(predicate) {
trail_position * 2
} else {
trail_position * 2 + 1
};

// We restore the key and since we know that the value is 0, we can safely
// increment with `heap_value`
self.to_process_heap.restore_key(predicate_id);
self.to_process_heap
.increment(predicate_id, heap_value as u32);

pumpkin_assert_moderate!(
*self.to_process_heap.get_value(predicate_id) == heap_value.try_into().unwrap(),
"The value in the heap should be the same as was added"
)
}
}

fn post_assumptions(&mut self, predicates: &[Predicate]) -> Result<(), Conflict> {
for predicate in predicates.iter() {
let _ = self.state.post(*predicate)?;
Expand Down
Loading
Loading