Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -19,24 +19,83 @@
package org.tweetyproject.causal.parser;

import org.tweetyproject.causal.syntax.CausalKnowledgeBase;
import org.tweetyproject.causal.syntax.StructuralCausalModel;
import org.tweetyproject.commons.Parser;
import org.tweetyproject.commons.ParserException;
import org.tweetyproject.logics.pl.parser.PlParserFactory;
import org.tweetyproject.logics.pl.syntax.PlBeliefSet;
import org.tweetyproject.logics.pl.syntax.PlFormula;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.Reader;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
* Parser for {@link CausalKnowledgeBase} and observation as consumed by {@link org.tweetyproject.causal.reasoner.AbstractCausalReasoner}.
* A causal knowledge base can be parsed with {@link #parseBeliefBase(Reader)}.
* Observations can be parsed with {@link #parseListOfFormulae(String, String)}.
*
* @author Lars Bengel
* @author Oleksandr Dzhychko
*/
public class CausalParser extends Parser<CausalKnowledgeBase, PlFormula> {
static final Pattern ASSUMPTIONS_PATTERN = Pattern.compile("^\\s*\\{(.*)\\}\\s*$");
static final String SYMBOL_COMMA = ",";
static Parser<PlBeliefSet, PlFormula> plParser = PlParserFactory.getParserForFormat(PlParserFactory.Format.TWEETY);

/**
* Parses data from the reader into a {@link CausalKnowledgeBase}.
* Each line must contain either assumptions or are an equation.
* Assumptions and equations are defined as following:
* <br>equation ::= formula '&lt;=&gt;' formula
* <br>assumptions ::= '{' assumption (',' assumption)* '}'
* <br>assumption ::= formula
* <br>formula ::= a propositional formula as parsable by {@link org.tweetyproject.logics.pl.parser.PlParser#parseFormula(String)}
*
* @param reader a reader
* @return the parsed causal knowledge base
* @throws IOException if some IO issue occurred.
* @throws ParserException some parsing exceptions may be added here.
*/
@Override
public CausalKnowledgeBase parseBeliefBase(Reader reader) throws IOException, ParserException {
return null;
// Implementation similar to AbaParser.parseBeliefBase.
// But it simplified by not allowing empty lines or comments.
// If needed, it can be added later.

List<PlFormula> assumptions = new ArrayList<>();
List<PlFormula> equations = new ArrayList<>();
BufferedReader br = new BufferedReader(reader);
while (true) {
String line = br.readLine();
if (line == null) break;
Matcher matcher = ASSUMPTIONS_PATTERN.matcher(line);
if (matcher.matches()) {
String[] assumptionStrings = matcher.group(1).split(SYMBOL_COMMA);
for (String assumptionString : assumptionStrings)
if (!assumptionString.isBlank()) {
assumptions.add(parseFormula(assumptionString));
}
} else {
equations.add(parseFormula(line));
}
}

StructuralCausalModel model;
try {
model = new StructuralCausalModel(equations);
} catch (IllegalArgumentException | StructuralCausalModel.CyclicDependencyException e) {
throw new ParserException(e);
}
return new CausalKnowledgeBase(model, assumptions);
}

@Override
public PlFormula parseFormula(Reader reader) throws IOException, ParserException {
return null;
return plParser.parseFormula(reader);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@
import org.tweetyproject.commons.util.SetTools;
import org.tweetyproject.logics.pl.syntax.*;

import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;

Expand All @@ -44,12 +46,7 @@
public class ArgumentationBasedCausalReasoner extends AbstractArgumentationBasedCausalReasoner {
@Override
public DungTheory getInducedTheory(CausalKnowledgeBase cbase, Collection<PlFormula> observations, Map<Proposition,Boolean> interventions) {
StructuralCausalModel model = cbase.getCausalModel();
for (Proposition atom : interventions.keySet()) {
model = model.intervene(atom, interventions.get(atom));
}
PlBeliefSet base = new PlBeliefSet(model.getStructuralEquations());
base.addAll(observations);
PlBeliefSet base = createBeliefSetWithObservationsAndInterventions(cbase, observations, interventions);

Collection<PlFormula> literals = new HashSet<>();
for (Proposition atom : base.getSignature()) {
Expand Down Expand Up @@ -111,6 +108,19 @@ public DungTheory getInducedTheory(CausalKnowledgeBase cbase, Collection<PlFormu
return theory;
}

private static PlBeliefSet createBeliefSetWithObservationsAndInterventions(
CausalKnowledgeBase cbase,
Collection<PlFormula> observations,
Map<Proposition, Boolean> interventions) {
StructuralCausalModel model = cbase.getCausalModel();
for (Proposition atom : interventions.keySet()) {
model = model.intervene(atom, interventions.get(atom));
}
PlBeliefSet base = new PlBeliefSet(model.getStructuralEquations());
base.addAll(observations);
return base;
}

/**
* Determines whether the given causal statements holds under the causal knowledge base
*
Expand All @@ -132,4 +142,92 @@ public boolean query(CausalKnowledgeBase cbase, CausalStatement statement) {
public boolean query(CausalKnowledgeBase cbase, InterventionalStatement statement) {
return query(cbase, statement.getObservations(), statement.getInterventions(), statement.getConclusion());
}

/**
* Computes, for each atom that appears in the knowledge base, the set of atoms that are
* significant for establishing that conclusion under the given observations and interventions.
* <p>
* This method:
* <ul>
* <li>Induces an argumentation theory from the given knowledge base, observations,
* and interventions.</li>
* <li>Groups arguments by the (single) atom occurring in their conclusion (positive
* or negated).</li>
* <li>Collects, per atom, all arguments concluding that atom (or its negation) and
* all their ancestors in the attack graph.</li>
* <li>For each of these arguments, retrieves kernels for the argument’s conclusion
* under a belief set extended with the argument’s premises, and gathers all
* atoms that appear in those kernels.</li>
* </ul>
*
* @param cbase some causal knowledge base
* @param observations some logical formulae representing the observations of causal atoms
* @param interventions a set of interventions on causal atoms
* @param atomFilter atoms for which to get the significant atoms.
* If {@code null}, the filter is not applied.
* @return the argumentation framework induced from the causal knowledge base and the observations
*/
public Map<Proposition, Collection<Proposition>> getSignificantAtoms(
CausalKnowledgeBase cbase,
Collection<PlFormula> observations,
Map<Proposition, Boolean> interventions,
Collection<Proposition> atomFilter) {
var theory = getInducedTheory(cbase, observations, interventions);
var perAtomArgumentsWithAtomInConclusion = getPerAtomArgumentsWithAtomInConclusion(theory, atomFilter);
var beliefSetWithoutAssumptions = createBeliefSetWithObservationsAndInterventions(cbase, observations, interventions);

var perAtomSignificantAtoms = new HashMap<Proposition, Collection<Proposition>>();

for (var entry : perAtomArgumentsWithAtomInConclusion.entrySet()) {
var atom = entry.getKey();
var argumentsForAtom = entry.getValue();

var significantArguments = new HashSet<>();
significantArguments.addAll(argumentsForAtom);
significantArguments.addAll(theory.getAncestors(argumentsForAtom));

var significantAtoms = new HashSet<Proposition>();
for (var argument : significantArguments) {
var causalArgument = (CausalArgument) argument;
var beliefSetWithAssumptions = new PlBeliefSet(beliefSetWithoutAssumptions);
beliefSetWithAssumptions.addAll(causalArgument.getPremises());
var kernels = reasoner.getKernels(beliefSetWithAssumptions, causalArgument.getConclusion());
for (var kernel : kernels) {
for (var formula : kernel) {
significantAtoms.addAll(formula.getAtoms());
}
}
}
perAtomSignificantAtoms.put(atom, significantAtoms);
}

return perAtomSignificantAtoms;
}


/**
* Returns, for each atom, the set of arguments whose conclusion is the atom or its negation.
*
* @param theory the theory containing the arguments
* @param atomFilter atoms for which to get the significant atoms.
* If {@code null}, the filter is not applied.
* @return a map from atom to the set of matching arguments
*/
public Map<Proposition, Collection<CausalArgument>> getPerAtomArgumentsWithAtomInConclusion(DungTheory theory, Collection<Proposition> atomFilter) {
var perAtomArguments = new HashMap<Proposition, Collection<CausalArgument>>();

for (var argument: theory) {
var causalArgument = (CausalArgument) argument;
var signature = causalArgument.getConclusion().getAtoms();
if (signature.size() != 1) {
throw new IllegalStateException("Encountered invalid argument with more than one atom in the its conclusion: " + causalArgument);
}
var atom = signature.stream().findFirst().get();
if (atomFilter != null && !atomFilter.contains(atom)) continue;

var arguments = perAtomArguments.computeIfAbsent(atom, (_atom) -> new ArrayList<>());
arguments.add(causalArgument);
}
return perAtomArguments;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -379,7 +379,7 @@ public void clear() {
/**
* Thrown to indicate that the structural equations of a causal model contain a cyclic dependency
*/
public static class CyclicDependencyException extends Throwable {
public static class CyclicDependencyException extends Exception {
/**
* Constructs a CyclicDependencyException with the specified detail message
*
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
/*
* This file is part of "TweetyProject", a collection of Java libraries for
* logical aspects of artificial intelligence and knowledge representation.
*
* TweetyProject is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License version 3 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright 2025 The TweetyProject Team <http://tweetyproject.org/contact/>
*/
package org.tweetyproject.causal.parser;

import org.junit.jupiter.api.Test;
import org.tweetyproject.commons.ParserException;
import org.tweetyproject.logics.pl.syntax.Equivalence;
import org.tweetyproject.logics.pl.syntax.Negation;
import org.tweetyproject.logics.pl.syntax.Proposition;
import org.tweetyproject.logics.pl.syntax.Tautology;

import java.io.IOException;
import java.util.List;
import java.util.Set;

import static org.junit.jupiter.api.Assertions.*;
import static org.tweetyproject.causal.syntax.StructuralCausalModel.*;

/**
* @author Oleksandr Dzhychko
*/
public class CausalParserTest {

CausalParser parser = new CausalParser();

@Test
public void parseBeliefBase() throws IOException {
var a = new Proposition("a");
var b = new Proposition("b");
var c = new Proposition("c");
var d = new Proposition("d");
var input = """
a <=> b
c <=> d
{ d, !b }
""";

var knowledgeBase = parser.parseBeliefBase(input);

assertEquals(Set.of(new Equivalence(a, b), new Equivalence(c, d)), knowledgeBase.getBeliefs());
assertEquals(Set.of(new Negation(b), d), knowledgeBase.getAssumptions());
}

@Test
public void parseBeliefBaseWithMultipleAssumptionLines() throws IOException {
var a = new Proposition("a");
var b = new Proposition("b");
var c = new Proposition("c");
var d = new Proposition("d");
// Being able to break up assumptions to different lines
// allows to freely structure the knowledgeable.
var input = """
{ !b }
a <=> b
c <=> d
{ d }
""";

var knowledgeBase = parser.parseBeliefBase(input);

assertEquals(Set.of(new Equivalence(a, b), new Equivalence(c, d)), knowledgeBase.getBeliefs());
assertEquals(Set.of(new Negation(b), d), knowledgeBase.getAssumptions());
}

@Test
public void parseBeliefBaseWithEmptyAssumptions() throws IOException {
var a = new Proposition("a");
var input = """
a <=> +
{}
""";

var knowledgeBase = parser.parseBeliefBase(input);

assertEquals(Set.of(new Equivalence(a, new Tautology())), knowledgeBase.getBeliefs());
}

@Test
public void throwsParserExceptionForInvalidSyntax() {
var input = """
(a
""";

assertThrows(ParserException.class, () -> parser.parseBeliefBase(input));
}

@Test
public void throwsParserExceptionForFormulaThatIsNotAnEquivalence() {
var input = """
a
""";

assertThrows(ParserException.class, () -> parser.parseBeliefBase(input));
}

@Test
public void throwsParserExceptionForCyclicDependency() {
var input = """
a <=> a
""";

var exception = assertThrows(ParserException.class, () -> parser.parseBeliefBase(input));
assertInstanceOf(CyclicDependencyException.class, exception.getCause());
}


@Test
public void parseObservations() throws IOException {
var a = new Proposition("a");
var b = new Proposition("b");
var input = "a, !b";

var observations = parser.parseListOfFormulae(input, ",");

assertEquals(List.of(a, new Negation(b)), observations);
}
}
Loading
Loading