mirror of
https://github.com/Brandon-Rozek/matmod.git
synced 2025-06-18 20:45:34 +00:00
Compare commits
13 commits
84e4d3d1e1
...
f057ba64fc
Author | SHA1 | Date | |
---|---|---|---|
f057ba64fc | |||
8628107704 | |||
7b93a9ff35 | |||
76eeee701e | |||
af81342a74 | |||
9744f976dd | |||
17fb542bd0 | |||
e293e5ac3e | |||
4e4e19e117 | |||
bed3d09f4a | |||
312e1aeda5 | |||
646202cb50 | |||
3b535fdfa5 |
5 changed files with 471 additions and 243 deletions
|
@ -2,15 +2,15 @@
|
|||
|
||||
Interested in seeing which satisfiable models from [arranstewart/magic](https://github.com/arranstewart/magic) have the variable sharing property?
|
||||
|
||||
(1) Generate satisfiable matrix models using `magic`.
|
||||
(1) Generate satisfiable matrix models using `MaGIC`.
|
||||
- Use the `ugly` data format as the output
|
||||
- Keep in mind not all logic features in magic are supported, check out the [GitHub issue tracker](https://github.com/Brandon-Rozek/matmod/issues) to see upcoming features or make your own requests
|
||||
|
||||
(2) Run our tool! It will first attempt to parse all the matrices in the output file and then check for the variable sharing property one-by-one.
|
||||
|
||||
```
|
||||
python3 parse_magic.py < examples/R6
|
||||
./vspursuer.py -i examples/R6
|
||||
```
|
||||
|
||||
If you face any troubles, feel free to reach out. This tool also has capabilities to generate satisfiable models given a specification (see: R.py), however, it is much slower than magic so you're better off using that.
|
||||
If you face any troubles, feel free to reach out. This tool also is able to generate satisfiable models given a specification (see: R.py). This is, however, much slower than MaGIC so you're better off using that.
|
||||
|
||||
Check out the [GitHub issue tracker](https://github.com/Brandon-Rozek/matmod/issues) to see upcoming features or make your own requests.
|
||||
|
|
36
model.py
36
model.py
|
@ -219,15 +219,18 @@ def satisfiable(logic: Logic, model: Model, interpretation: Dict[Operation, Mode
|
|||
|
||||
|
||||
|
||||
def model_closure(initial_set: Set[ModelValue], mfunctions: Set[ModelFunction]):
|
||||
def model_closure(initial_set: Set[ModelValue], mfunctions: Set[ModelFunction], top: Optional[ModelValue], bottom: Optional[ModelValue]) -> Set[ModelValue]:
|
||||
"""
|
||||
Given an initial set of model values and a set of model functions,
|
||||
compute the complete set of model values that are closed
|
||||
under the operations.
|
||||
|
||||
If top or bottom is encountered, then we end the saturation procedure early.
|
||||
"""
|
||||
closure_set: Set[ModelValue] = initial_set
|
||||
last_new: Set[ModelValue] = initial_set
|
||||
changed: bool = True
|
||||
topbottom_found = False
|
||||
|
||||
while changed:
|
||||
changed = False
|
||||
|
@ -251,6 +254,18 @@ def model_closure(initial_set: Set[ModelValue], mfunctions: Set[ModelFunction]):
|
|||
if element not in closure_set:
|
||||
new_elements.add(element)
|
||||
|
||||
# Optimization: Break out of computation
|
||||
# early when top or bottom element is foun
|
||||
if top is not None and element == top:
|
||||
topbottom_found = True
|
||||
if bottom is not None and element == bottom:
|
||||
topbottom_found = True
|
||||
if topbottom_found:
|
||||
break
|
||||
|
||||
if topbottom_found:
|
||||
break
|
||||
|
||||
# We don't need to compute the arguments
|
||||
# thanks to the cache, so move onto the
|
||||
# next function.
|
||||
|
@ -274,8 +289,27 @@ def model_closure(initial_set: Set[ModelValue], mfunctions: Set[ModelFunction]):
|
|||
if element not in closure_set:
|
||||
new_elements.add(element)
|
||||
|
||||
# Optimization: Break out of computation
|
||||
# early when top or bottom element is foun
|
||||
if top is not None and element == top:
|
||||
topbottom_found = True
|
||||
if bottom is not None and element == bottom:
|
||||
topbottom_found = True
|
||||
if topbottom_found:
|
||||
break
|
||||
|
||||
if topbottom_found:
|
||||
break
|
||||
|
||||
if topbottom_found:
|
||||
break
|
||||
|
||||
|
||||
closure_set.update(new_elements)
|
||||
changed = len(new_elements) > 0
|
||||
last_new = new_elements
|
||||
|
||||
if topbottom_found:
|
||||
break
|
||||
|
||||
return closure_set
|
||||
|
|
542
parse_magic.py
542
parse_magic.py
|
@ -3,8 +3,7 @@ Parses the Magic Ugly Data File Format
|
|||
|
||||
Assumes the base logic is R with no extra connectives
|
||||
"""
|
||||
import argparse
|
||||
import sys
|
||||
import re
|
||||
from typing import TextIO, List, Optional, Tuple, Set, Dict
|
||||
|
||||
from model import Model, ModelValue, ModelFunction
|
||||
|
@ -13,169 +12,328 @@ from logic import (
|
|||
Conjunction,
|
||||
Negation,
|
||||
Necessitation,
|
||||
Disjunction
|
||||
Disjunction,
|
||||
Operation
|
||||
)
|
||||
from vsp import has_vsp
|
||||
|
||||
class SourceFile:
|
||||
def __init__(self, fileobj: TextIO):
|
||||
self.fileobj = fileobj
|
||||
self.current_line = 0
|
||||
self.reststr = ""
|
||||
|
||||
def __next__(self):
|
||||
contents = next(self.fileobj)
|
||||
def next_line(self):
|
||||
"""
|
||||
Grabs the next line.
|
||||
If reststr is populated return that, otherwise
|
||||
consume generator
|
||||
"""
|
||||
if self.reststr != "":
|
||||
reststr = self.reststr
|
||||
self.reststr = ""
|
||||
return reststr
|
||||
|
||||
contents = next(self.fileobj).strip()
|
||||
self.current_line += 1
|
||||
return contents
|
||||
|
||||
def __next__(self):
|
||||
"""
|
||||
Grabs the next word token from the stream
|
||||
"""
|
||||
if self.reststr == "":
|
||||
self.reststr = next(self.fileobj).strip()
|
||||
self.current_line += 1
|
||||
|
||||
tokens = self.reststr.split(" ")
|
||||
next_token = tokens[0]
|
||||
self.reststr = " ".join(tokens[1:])
|
||||
|
||||
return next_token
|
||||
|
||||
class UglyHeader:
|
||||
def __init__(self, negation: bool, necessitation: bool):
|
||||
def __init__(self, negation: bool, necessitation: bool, custom_model_functions: List[Tuple[int, str]]):
|
||||
# Booleans describing the logical fragment
|
||||
self.negation = negation
|
||||
self.necessitation = necessitation
|
||||
# List of custom model functions described as
|
||||
# a sequence of (adicity, symbol) pairs
|
||||
self.custom_model_functions = custom_model_functions
|
||||
|
||||
class ModelBuilder:
|
||||
def __init__(self):
|
||||
self.size : int = 0
|
||||
self.carrier_set : Set[ModelValue] = set()
|
||||
self.num_negation: int = 0
|
||||
self.mnegation: Optional[ModelFunction] = None
|
||||
self.num_order: int = 0
|
||||
self.mconjunction: Optional[ModelFunction] = None
|
||||
self.mdisjunction: Optional[ModelFunction] = None
|
||||
self.num_designated: int = 0
|
||||
self.designated_values: Set[ModelValue] = set()
|
||||
self.num_implication: int = 0
|
||||
self.mimplication: Optional[ModelFunction] = None
|
||||
self.num_necessitation: int = 0
|
||||
self.mnecessitation: Optional[ModelFunction] = None
|
||||
# Map symbol to model function
|
||||
self.custom_model_functions: Dict[str, ModelFunction] = {}
|
||||
|
||||
class Stage:
|
||||
def __init__(self, name: str):
|
||||
self.name = name
|
||||
self.next: Optional['Stage'] = None
|
||||
self.previous: Optional['Stage'] = None
|
||||
# This corresponds to a portion of the model name in MaGIC
|
||||
self.num = 0
|
||||
|
||||
def increment(self):
|
||||
self.num += 1
|
||||
|
||||
def reset(self):
|
||||
self.num = 0
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
class Stages:
|
||||
def __init__(self):
|
||||
end_stage = Stage("end")
|
||||
self.stages: Dict[str, Stage] = {"end": end_stage}
|
||||
self.last_added_stage: Stage = end_stage
|
||||
self.first_stage: Optional[Stage] = None
|
||||
|
||||
def add(self, name: str):
|
||||
stage = Stage(name)
|
||||
stage.next = stage
|
||||
|
||||
stage.previous = self.last_added_stage
|
||||
|
||||
# End stage is a sink so don't
|
||||
# mark any stages as next
|
||||
if self.last_added_stage.name != "end":
|
||||
self.last_added_stage.next = stage
|
||||
else:
|
||||
# If this is triggered, than this is the first
|
||||
# stage added
|
||||
self.first_stage = stage
|
||||
|
||||
self.stages[name] = stage
|
||||
self.last_added_stage = stage
|
||||
|
||||
def reset_after(self, name):
|
||||
"""
|
||||
Resets the stage counters after a given stage.
|
||||
This is to accurately reflect the name of the
|
||||
model within MaGIC.
|
||||
"""
|
||||
stage = self.stages[name]
|
||||
while stage.name != "process_model":
|
||||
stage.reset()
|
||||
stage = stage.next
|
||||
|
||||
def get(self, name):
|
||||
return self.stages[name]
|
||||
|
||||
def name(self):
|
||||
result = ""
|
||||
stage = self.first_stage
|
||||
if stage is None:
|
||||
return ""
|
||||
|
||||
result = f"{stage.num}"
|
||||
if stage.next == "process_model":
|
||||
return result
|
||||
|
||||
stage = stage.next
|
||||
while stage is not None:
|
||||
result += f".{stage.num}"
|
||||
if stage.next.name != "process_model":
|
||||
stage = stage.next
|
||||
else:
|
||||
stage = None
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def derive_stages(header: UglyHeader) -> Stages:
|
||||
stages = Stages()
|
||||
stages.add("size")
|
||||
if header.negation:
|
||||
stages.add("negation")
|
||||
stages.add("order")
|
||||
stages.add("designated")
|
||||
stages.add("implication")
|
||||
if header.necessitation:
|
||||
stages.add("necessitation")
|
||||
for (adicity, symbol) in header.custom_model_functions:
|
||||
stages.add(f"custom--{adicity}--{symbol}")
|
||||
stages.add("process_model")
|
||||
|
||||
# After processing the model, go to the previous stage
|
||||
stages.get("process_model").next = stages.get("process_model").previous
|
||||
|
||||
return stages
|
||||
|
||||
def parse_matrices(infile: SourceFile) -> List[Tuple[Model, Dict]]:
|
||||
solutions = [] # Reset
|
||||
solutions = []
|
||||
header = parse_header(infile)
|
||||
stages = derive_stages(header)
|
||||
first_run = True
|
||||
current_model_parts = ModelBuilder()
|
||||
process_sizes(infile, header, current_model_parts, solutions)
|
||||
stage = stages.first_stage
|
||||
while True:
|
||||
match stage.name:
|
||||
case "end":
|
||||
break
|
||||
case "process_model":
|
||||
process_model(stages.name(), current_model_parts, solutions)
|
||||
stage = stage.next
|
||||
case "size":
|
||||
processed = process_sizes(infile, current_model_parts, first_run)
|
||||
first_run = False
|
||||
if processed:
|
||||
stage.num = current_model_parts.size + 1
|
||||
stage = stage.next
|
||||
else:
|
||||
stages.reset_after(stage.name)
|
||||
stage = stage.previous
|
||||
case "negation":
|
||||
processed = process_negations(infile, current_model_parts)
|
||||
if processed:
|
||||
stage.increment()
|
||||
stage = stage.next
|
||||
else:
|
||||
stages.reset_after(stage.name)
|
||||
stage = stage.previous
|
||||
case "order":
|
||||
processed = process_orders(infile, current_model_parts)
|
||||
if processed:
|
||||
stage.increment()
|
||||
stage = stage.next
|
||||
else:
|
||||
stages.reset_after(stage.name)
|
||||
stage = stage.previous
|
||||
case "designated":
|
||||
processed = process_designateds(infile, current_model_parts)
|
||||
if processed:
|
||||
stage.increment()
|
||||
stage = stage.next
|
||||
else:
|
||||
stages.reset_after(stage.name)
|
||||
stage = stage.previous
|
||||
case "implication":
|
||||
processed = process_implications(infile, current_model_parts)
|
||||
if processed:
|
||||
stage.increment()
|
||||
stage = stage.next
|
||||
else:
|
||||
stages.reset_after(stage.name)
|
||||
stage = stage.previous
|
||||
case "necessitation":
|
||||
processed = process_necessitations(infile, current_model_parts)
|
||||
if processed:
|
||||
stage.increment()
|
||||
stage = stage.next
|
||||
else:
|
||||
stages.reset_after(stage.name)
|
||||
stage = stage.previous
|
||||
case _:
|
||||
custom_stage = re.search(r"custom--(\d+)--(\S+)", stage.name)
|
||||
if custom_stage is None or len(custom_stage.groups()) != 2:
|
||||
raise NotImplementedError(f"Unrecognized Stage: {stage.name}")
|
||||
adicity, symbol = custom_stage.groups()
|
||||
adicity = int(adicity)
|
||||
processed = process_custom_connective(infile, symbol, adicity, current_model_parts)
|
||||
|
||||
if processed:
|
||||
stage.increment()
|
||||
stage = stage.next
|
||||
else:
|
||||
stages.reset_after(stage.name)
|
||||
stage = stage.previous
|
||||
|
||||
return solutions
|
||||
|
||||
def process_sizes(infile: SourceFile, header: UglyHeader, current_model_parts: ModelBuilder, solutions: List[Tuple[Model, Dict]]):
|
||||
"""Stage 1"""
|
||||
|
||||
first_run = True
|
||||
|
||||
while True:
|
||||
print("Processing next size")
|
||||
def process_sizes(infile: SourceFile, current_model_parts: ModelBuilder, first_run: bool) -> bool:
|
||||
try:
|
||||
size = parse_size(infile, first_run)
|
||||
first_run = False
|
||||
except StopIteration:
|
||||
# For some reason, when necessitation is enabled this doesn't
|
||||
# have a -1 on the last line
|
||||
break
|
||||
return False
|
||||
if size is None:
|
||||
break
|
||||
return False
|
||||
|
||||
carrier_set = carrier_set_from_size(size)
|
||||
current_model_parts.size = size
|
||||
current_model_parts.carrier_set = carrier_set
|
||||
process_negations(infile, header, current_model_parts, solutions)
|
||||
current_model_parts.size = size
|
||||
|
||||
def process_negations(infile: SourceFile, header: UglyHeader, current_model_parts: ModelBuilder, solutions: List[Tuple[Model, Dict]]):
|
||||
return True
|
||||
|
||||
def process_negations(infile: SourceFile, current_model_parts: ModelBuilder) -> bool:
|
||||
"""Stage 2 (Optional)"""
|
||||
num_negation = 0
|
||||
while True:
|
||||
print("Processing next negation")
|
||||
mnegation = None
|
||||
if header.negation:
|
||||
mnegation = parse_single_negation(infile, current_model_parts.size)
|
||||
mnegation = parse_single_monadic_connective(infile, "¬", current_model_parts.size)
|
||||
if mnegation is None:
|
||||
break
|
||||
num_negation += 1
|
||||
return False
|
||||
|
||||
current_model_parts.num_negation = num_negation
|
||||
current_model_parts.mnegation = mnegation
|
||||
return True
|
||||
|
||||
process_orders(infile, header, current_model_parts, solutions)
|
||||
|
||||
if not header.negation:
|
||||
break
|
||||
|
||||
def process_orders(infile: SourceFile, header: UglyHeader, current_model_parts: ModelBuilder, solutions: List[Tuple[Model, Dict]]):
|
||||
def process_orders(infile: SourceFile, current_model_parts: ModelBuilder) -> bool:
|
||||
"""Stage 3"""
|
||||
num_order = 0
|
||||
while True:
|
||||
print("Processing next order")
|
||||
result = parse_single_order(infile, current_model_parts.size)
|
||||
if result is None:
|
||||
break
|
||||
num_order += 1
|
||||
return False
|
||||
|
||||
mconjunction, mdisjunction = result
|
||||
current_model_parts.num_order = num_order
|
||||
current_model_parts.mconjunction = mconjunction
|
||||
current_model_parts.mdisjunction = mdisjunction
|
||||
process_designateds(infile, header, current_model_parts, solutions)
|
||||
|
||||
def process_designateds(infile: SourceFile, header: UglyHeader, current_model_parts: ModelBuilder, solutions: List[Tuple[Model, Dict]]):
|
||||
return True
|
||||
|
||||
def process_designateds(infile: SourceFile, current_model_parts: ModelBuilder) -> bool:
|
||||
"""Stage 4"""
|
||||
num_designated = 0
|
||||
while True:
|
||||
print("Processing next designated")
|
||||
designated_values = parse_single_designated(infile, current_model_parts.size)
|
||||
if designated_values is None:
|
||||
break
|
||||
num_designated += 1
|
||||
current_model_parts.num_designated = num_designated
|
||||
return False
|
||||
|
||||
current_model_parts.designated_values = designated_values
|
||||
process_implications(infile, header, current_model_parts, solutions)
|
||||
return True
|
||||
|
||||
def process_implications(
|
||||
infile: SourceFile, header: UglyHeader, current_model_parts: ModelBuilder, solutions: List[Tuple[Model, Dict]]):
|
||||
def process_implications(infile: SourceFile, current_model_parts: ModelBuilder) -> bool:
|
||||
"""Stage 5"""
|
||||
if header.necessitation:
|
||||
num_implication = 0
|
||||
while True:
|
||||
print("Processing next implication")
|
||||
instr = next(infile).strip()
|
||||
mimplication, reststr = parse_single_implication(instr, infile.current_line, current_model_parts.size)
|
||||
mimplication = parse_single_dyadic_connective(infile, "→", current_model_parts.size)
|
||||
if mimplication is None:
|
||||
break
|
||||
num_implication += 1
|
||||
current_model_parts.num_implication = num_implication
|
||||
return False
|
||||
|
||||
current_model_parts.mimplication = mimplication
|
||||
process_necessitations(infile, reststr, header, current_model_parts, solutions)
|
||||
else:
|
||||
results = parse_implications(infile, current_model_parts.size)
|
||||
for num_implication, mimplication in enumerate(results, 1):
|
||||
current_model_parts.num_implication = num_implication
|
||||
current_model_parts.mimplication = mimplication
|
||||
process_model(current_model_parts, solutions)
|
||||
return True
|
||||
|
||||
def process_necessitations(infile: SourceFile, instr: str, header: UglyHeader, current_model_parts: ModelBuilder, solutions: List[Tuple[Model, Dict]]):
|
||||
|
||||
# NOTE: For some reason, one necessitation table will be on the same line as the implication table
|
||||
mnecessitation = parse_single_necessitation_from_str(instr, infile.current_line, current_model_parts.size)
|
||||
assert mnecessitation is not None, f"Expected Necessitation Table at line {infile.current_line}"
|
||||
num_necessitation = 1
|
||||
|
||||
current_model_parts.num_necessitation = num_necessitation
|
||||
current_model_parts.mnecessitation = mnecessitation
|
||||
process_model(current_model_parts, solutions)
|
||||
|
||||
while True:
|
||||
print("Processing next necessitation")
|
||||
mnecessitation = parse_single_necessitation(infile, current_model_parts.size)
|
||||
def process_necessitations(infile: SourceFile, current_model_parts: ModelBuilder) -> bool:
|
||||
mnecessitation = parse_single_monadic_connective(infile, "!", current_model_parts.size)
|
||||
if mnecessitation is None:
|
||||
break
|
||||
num_necessitation += 1
|
||||
return False
|
||||
|
||||
current_model_parts.num_necessitation = num_necessitation
|
||||
current_model_parts.mnecessitation = mnecessitation
|
||||
process_model(current_model_parts, solutions)
|
||||
return True
|
||||
|
||||
def process_model(mp: ModelBuilder, solutions: List[Tuple[Model, Dict]]):
|
||||
def process_custom_connective(infile: SourceFile, symbol: str, adicity: int, current_model_parts: ModelBuilder) -> bool:
|
||||
if adicity == 0:
|
||||
mfunction = parse_single_nullary_connective(infile, symbol)
|
||||
elif adicity == 1:
|
||||
mfunction = parse_single_monadic_connective(infile, symbol, current_model_parts.size)
|
||||
elif adicity == 2:
|
||||
mfunction = parse_single_dyadic_connective(infile, symbol, current_model_parts.size)
|
||||
else:
|
||||
raise NotImplementedError("Unable to process connectives of adicity greater than 2")
|
||||
|
||||
if mfunction is None:
|
||||
return False
|
||||
|
||||
current_model_parts.custom_model_functions[symbol] = mfunction
|
||||
return True
|
||||
|
||||
def process_model(model_name: str, mp: ModelBuilder, solutions: List[Tuple[Model, Dict]]):
|
||||
"""Create Model"""
|
||||
assert mp.size > 0
|
||||
assert mp.size + 1 == len(mp.carrier_set)
|
||||
assert len(mp.designated_values) <= len(mp.carrier_set)
|
||||
assert mp.mimplication is not None
|
||||
assert len(mp.carrier_set) > 0
|
||||
|
||||
logical_operations = { mp.mimplication }
|
||||
model_name = f"{mp.size}{'.' + str(mp.num_negation) if mp.num_negation != 0 else ''}.{mp.num_order}.{mp.num_designated}.{mp.num_implication}{'.' + str(mp.num_necessitation) if mp.num_necessitation != 0 else ''}"
|
||||
model = Model(mp.carrier_set, logical_operations, mp.designated_values, name=model_name)
|
||||
interpretation = {
|
||||
Implication: mp.mimplication
|
||||
|
@ -193,23 +351,34 @@ def process_model(mp: ModelBuilder, solutions: List[Tuple[Model, Dict]]):
|
|||
logical_operations.add(mp.mnecessitation)
|
||||
interpretation[Necessitation] = mp.mnecessitation
|
||||
|
||||
for custom_mf in mp.custom_model_functions.values():
|
||||
if custom_mf is not None:
|
||||
logical_operations.add(custom_mf)
|
||||
op = Operation(custom_mf.operation_name, custom_mf.arity)
|
||||
interpretation[op] = custom_mf
|
||||
|
||||
solutions.append((model, interpretation))
|
||||
print(f"Parsed Matrix {model.name}")
|
||||
|
||||
def parse_header(infile: SourceFile) -> UglyHeader:
|
||||
"""
|
||||
Parse the header line from the ugly data format.
|
||||
NOTE: Currently Incomplete.
|
||||
"""
|
||||
header_line = next(infile).strip()
|
||||
header_line = infile.next_line()
|
||||
header_tokens = header_line.split(" ")
|
||||
assert header_tokens[0] in ["0", "1"]
|
||||
assert header_tokens[6] in ["0", "1"]
|
||||
assert len(header_tokens) >= 7
|
||||
negation_defined = bool(int(header_tokens[0]))
|
||||
necessitation_defined = bool(int(header_tokens[6]))
|
||||
return UglyHeader(negation_defined, necessitation_defined)
|
||||
num_custom_connectives = int(header_tokens[7])
|
||||
custom_model_functions: List[Tuple[int, str]] = []
|
||||
for i in range(num_custom_connectives):
|
||||
arity = int(header_tokens[7 + (2 * i) + 1])
|
||||
symbol = header_tokens[7 + (2 * i) + 2]
|
||||
custom_model_functions.append((arity, symbol))
|
||||
return UglyHeader(negation_defined, necessitation_defined, custom_model_functions)
|
||||
|
||||
def carrier_set_from_size(size: int):
|
||||
def carrier_set_from_size(size: int) -> Set[ModelValue]:
|
||||
"""
|
||||
Construct a carrier set of model values
|
||||
based on the desired size.
|
||||
|
@ -222,37 +391,21 @@ def parse_size(infile: SourceFile, first_run: bool) -> Optional[int]:
|
|||
"""
|
||||
Parse the line representing the matrix size.
|
||||
"""
|
||||
size = int(next(infile))
|
||||
# HACK: The first size line may be -1 due to a bug. Skip it
|
||||
if size == -1 and first_run:
|
||||
size = int(next(infile))
|
||||
|
||||
size = int(infile.next_line())
|
||||
|
||||
# HACK: When necessitation and custom connectives are enabled
|
||||
# MaGIC may produce -1s at the beginning of the file
|
||||
if first_run:
|
||||
while size == -1:
|
||||
size = int(infile.next_line())
|
||||
|
||||
if size == -1:
|
||||
return None
|
||||
assert size > 0, f"Unexpected size at line {infile.current_line}"
|
||||
return size
|
||||
|
||||
def parse_single_negation(infile: SourceFile, size: int) -> Optional[ModelFunction]:
|
||||
"""
|
||||
Parse the line representing the negation table.
|
||||
"""
|
||||
line = next(infile).strip()
|
||||
if line == '-1':
|
||||
return None
|
||||
|
||||
row = line.split(" ")
|
||||
assert len(row) == size + 1, f"Negation table doesn't match size at line {infile.current_line}"
|
||||
mapping = {}
|
||||
|
||||
for i, j in zip(range(size + 1), row):
|
||||
x = mvalue_from_index(i)
|
||||
y = parse_mvalue(j)
|
||||
mapping[(x, )] = y
|
||||
|
||||
return ModelFunction(1, mapping, "¬")
|
||||
|
||||
|
||||
def mvalue_from_index(i: int):
|
||||
def mvalue_from_index(i: int) -> ModelValue:
|
||||
"""
|
||||
Given an index, return the
|
||||
representation of the model value.
|
||||
|
@ -317,7 +470,7 @@ def parse_single_order(infile: SourceFile, size: int) -> Optional[Tuple[ModelFun
|
|||
"""
|
||||
Parse the line representing the ordering table
|
||||
"""
|
||||
line = next(infile).strip()
|
||||
line = infile.next_line()
|
||||
if line == '-1':
|
||||
return None
|
||||
|
||||
|
@ -338,7 +491,6 @@ def parse_single_order(infile: SourceFile, size: int) -> Optional[Tuple[ModelFun
|
|||
cmapping = {}
|
||||
dmapping = {}
|
||||
|
||||
|
||||
for i in range(size + 1):
|
||||
x = mvalue_from_index(i)
|
||||
for j in range(size + 1):
|
||||
|
@ -368,7 +520,7 @@ def parse_single_designated(infile: SourceFile, size: int) -> Optional[Set[Model
|
|||
"""
|
||||
Parse the line representing which model values are designated.
|
||||
"""
|
||||
line = next(infile).strip()
|
||||
line = infile.next_line()
|
||||
if line == '-1':
|
||||
return None
|
||||
|
||||
|
@ -385,77 +537,25 @@ def parse_single_designated(infile: SourceFile, size: int) -> Optional[Set[Model
|
|||
return designated_values
|
||||
|
||||
|
||||
def parse_single_implication(instr: str, line: int, size: int) -> Tuple[ModelFunction, str]:
|
||||
"""
|
||||
Take the current string, parse an implication table from it,
|
||||
and return along with it the remainder of the string
|
||||
"""
|
||||
if instr == "-1":
|
||||
return None, ""
|
||||
|
||||
table = instr.split(" ")
|
||||
|
||||
assert len(table) >= (size + 1)**2, f"Implication table does not match expected size at line {line}"
|
||||
|
||||
mapping = {}
|
||||
table_i = 0
|
||||
for i in range(size + 1):
|
||||
x = mvalue_from_index(i)
|
||||
for j in range(size + 1):
|
||||
y = mvalue_from_index(j)
|
||||
|
||||
r = parse_mvalue(table[table_i])
|
||||
table_i += 1
|
||||
|
||||
mapping[(x, y)] = r
|
||||
|
||||
mimplication = ModelFunction(2, mapping, "→")
|
||||
reststr = " ".join(table[(size + 1)**2:])
|
||||
return mimplication, reststr
|
||||
|
||||
|
||||
def parse_implications(infile: SourceFile, size: int) -> List[ModelFunction]:
|
||||
"""
|
||||
Parse the line representing the list of implication
|
||||
tables.
|
||||
"""
|
||||
line = next(infile).strip()
|
||||
|
||||
# Split and remove the last '-1' character
|
||||
table = line.split(" ")[:-1]
|
||||
|
||||
assert len(table) % (size + 1)**2 == 0, f"Implication table does not match expected size at line {infile.current_line}"
|
||||
|
||||
table_i = 0
|
||||
mimplications: List[ModelFunction] = []
|
||||
|
||||
for _ in range(len(table) // (size + 1)**2):
|
||||
mapping = {}
|
||||
|
||||
for i in range(size + 1):
|
||||
x = mvalue_from_index(i)
|
||||
for j in range(size + 1):
|
||||
y = mvalue_from_index(j)
|
||||
|
||||
r = parse_mvalue(table[table_i])
|
||||
table_i += 1
|
||||
|
||||
mapping[(x, y)] = r
|
||||
|
||||
mimplication = ModelFunction(2, mapping, "→")
|
||||
mimplications.append(mimplication)
|
||||
|
||||
return mimplications
|
||||
|
||||
def parse_single_necessitation_from_str(instr: str, line: int, size: int) -> Optional[ModelFunction]:
|
||||
"""
|
||||
Parse the line representing the necessitation table.
|
||||
"""
|
||||
if instr == "-1":
|
||||
def parse_single_nullary_connective(infile: SourceFile, symbol: str) -> Optional[ModelFunction]:
|
||||
line = infile.next_line()
|
||||
if line == "-1":
|
||||
return None
|
||||
|
||||
row = instr.split(" ")
|
||||
assert len(row) == size + 1, f"Necessitation table doesn't match size at line {line}"
|
||||
row = line.split(" ")
|
||||
assert len(row) == 1, f"More than one assignment for a nullary connective was provided at line {infile.current_line}"
|
||||
|
||||
mapping = {}
|
||||
mapping[()] = parse_mvalue(row[0])
|
||||
return ModelFunction(0, mapping, symbol)
|
||||
|
||||
def parse_single_monadic_connective(infile: SourceFile, symbol: str, size: int) -> Optional[ModelFunction]:
|
||||
line = infile.next_line()
|
||||
if line == '-1':
|
||||
return None
|
||||
|
||||
row = line.split(" ")
|
||||
assert len(row) == size + 1, f"{symbol} table doesn't match size at line {infile.current_line}"
|
||||
mapping = {}
|
||||
|
||||
for i, j in zip(range(size + 1), row):
|
||||
|
@ -463,27 +563,31 @@ def parse_single_necessitation_from_str(instr: str, line: int, size: int) -> Opt
|
|||
y = parse_mvalue(j)
|
||||
mapping[(x, )] = y
|
||||
|
||||
return ModelFunction(1, mapping, "!")
|
||||
return ModelFunction(1, mapping, symbol)
|
||||
|
||||
def parse_single_necessitation(infile: SourceFile, size: int) -> Optional[ModelFunction]:
|
||||
line = next(infile).strip()
|
||||
return parse_single_necessitation_from_str(line, infile.current_line, size)
|
||||
def parse_single_dyadic_connective(infile: SourceFile, symbol: str, size: int) -> Optional[ModelFunction]:
|
||||
first_token = next(infile)
|
||||
if first_token == "-1":
|
||||
return None
|
||||
|
||||
table = []
|
||||
try:
|
||||
table = [first_token] + [next(infile) for _ in range((size + 1)**2 - 1)]
|
||||
except StopIteration:
|
||||
pass
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="VSP Checker")
|
||||
parser.add_argument("--verbose", action='store_true', help="Print out all parsed matrices")
|
||||
args = vars(parser.parse_args())
|
||||
solutions = parse_matrices(SourceFile(sys.stdin))
|
||||
print(f"Parsed {len(solutions)} matrices")
|
||||
num_has_vsp = 0
|
||||
for i, (model, interpretation) in enumerate(solutions):
|
||||
vsp_result = has_vsp(model, interpretation)
|
||||
print(vsp_result)
|
||||
assert len(table) == (size + 1)**2, f"{symbol} table does not match expected size at line {infile.current_line}"
|
||||
|
||||
if args['verbose'] or vsp_result.has_vsp:
|
||||
print(model)
|
||||
mapping = {}
|
||||
table_i = 0
|
||||
for i in range(size + 1):
|
||||
x = mvalue_from_index(i)
|
||||
for j in range(size + 1):
|
||||
y = mvalue_from_index(j)
|
||||
|
||||
if vsp_result.has_vsp:
|
||||
num_has_vsp += 1
|
||||
print(f"Tested {len(solutions)} models, {num_has_vsp} of which satisfy VSP")
|
||||
r = parse_mvalue(table[table_i])
|
||||
table_i += 1
|
||||
|
||||
mapping[(x, y)] = r
|
||||
|
||||
return ModelFunction(2, mapping, symbol)
|
||||
|
|
65
vsp.py
65
vsp.py
|
@ -8,7 +8,7 @@ from common import set_to_str
|
|||
from model import (
|
||||
Model, model_closure, ModelFunction, ModelValue
|
||||
)
|
||||
from logic import Implication, Operation
|
||||
from logic import Conjunction, Disjunction, Implication, Operation
|
||||
|
||||
def preseed(
|
||||
initial_set: Set[ModelValue],
|
||||
|
@ -38,6 +38,40 @@ def preseed(
|
|||
same_set = candidate_preseed[1] == 0
|
||||
return candidate_preseed[0], same_set
|
||||
|
||||
|
||||
def find_top(algebra: Set[ModelValue], mconjunction: Optional[ModelFunction], mdisjunction: Optional[ModelFunction]) -> Optional[ModelValue]:
|
||||
"""
|
||||
Find the top of the order lattice.
|
||||
T || a = T, T && a = a for all a in the carrier set
|
||||
"""
|
||||
if mconjunction is None or mdisjunction is None:
|
||||
return None
|
||||
|
||||
for x in algebra:
|
||||
for y in algebra:
|
||||
if mdisjunction(x, y) == x and mconjunction(x, y) == y:
|
||||
return x
|
||||
|
||||
print("[Warning] Failed to find the top of the lattice")
|
||||
return None
|
||||
|
||||
def find_bottom(algebra: Set[ModelValue], mconjunction: Optional[ModelFunction], mdisjunction: Optional[ModelFunction]) -> Optional[ModelValue]:
|
||||
"""
|
||||
Find the bottom of the order lattice
|
||||
F || a = a, F && a = F for all a in the carrier set
|
||||
"""
|
||||
if mconjunction is None or mdisjunction is None:
|
||||
return None
|
||||
|
||||
for x in algebra:
|
||||
for y in algebra:
|
||||
if mdisjunction(x, y) == y and mconjunction(x, y) == x:
|
||||
return x
|
||||
|
||||
print("[Warning] Failed to find the bottom of the lattice")
|
||||
return None
|
||||
|
||||
|
||||
class VSP_Result:
|
||||
def __init__(
|
||||
self, has_vsp: bool, model_name: Optional[str] = None,
|
||||
|
@ -62,6 +96,10 @@ def has_vsp(model: Model, interpretation: Dict[Operation, ModelFunction]) -> VSP
|
|||
sharing property.
|
||||
"""
|
||||
impfunction = interpretation[Implication]
|
||||
mconjunction = interpretation.get(Conjunction)
|
||||
mdisjunction = interpretation.get(Disjunction)
|
||||
top = find_top(model.carrier_set, mconjunction, mdisjunction)
|
||||
bottom = find_bottom(model.carrier_set, mconjunction, mdisjunction)
|
||||
|
||||
# NOTE: No models with only one designated
|
||||
# value satisfies VSP
|
||||
|
@ -101,28 +139,45 @@ def has_vsp(model: Model, interpretation: Dict[Operation, ModelFunction]) -> VSP
|
|||
|
||||
|
||||
# NOTE: Optimziation before model_closure
|
||||
# If the carrier set intersects, then move on to the next
|
||||
# subalgebra
|
||||
# If the two subalgebras intersect, move
|
||||
# onto the next pair
|
||||
if len(xs & ys) > 0:
|
||||
continue
|
||||
|
||||
# NOTE: Optimization
|
||||
# if either subalgebra contains top or bottom, move
|
||||
# onto the next pair
|
||||
if top is not None and (top in xs or top in ys):
|
||||
continue
|
||||
if bottom is not None and (bottom in xs or bottom in ys):
|
||||
continue
|
||||
|
||||
# Compute the closure of all operations
|
||||
# with just the xs
|
||||
carrier_set_left: Set[ModelValue] = model_closure(xs, model.logical_operations)
|
||||
carrier_set_left: Set[ModelValue] = model_closure(xs, model.logical_operations, top, bottom)
|
||||
|
||||
# Save to cache
|
||||
if cached_xs[0] is not None and not cached_ys[1]:
|
||||
closure_cache.append((orig_xs, carrier_set_left))
|
||||
|
||||
if top is not None and top in carrier_set_left:
|
||||
continue
|
||||
if bottom is not None and bottom in carrier_set_left:
|
||||
continue
|
||||
|
||||
|
||||
# Compute the closure of all operations
|
||||
# with just the ys
|
||||
carrier_set_right: Set[ModelValue] = model_closure(ys, model.logical_operations)
|
||||
carrier_set_right: Set[ModelValue] = model_closure(ys, model.logical_operations, top, bottom)
|
||||
|
||||
# Save to cache
|
||||
if cached_ys[0] is not None and not cached_ys[1]:
|
||||
closure_cache.append((orig_ys, carrier_set_right))
|
||||
|
||||
if top is not None and top in carrier_set_right:
|
||||
continue
|
||||
if bottom is not None and bottom in carrier_set_right:
|
||||
continue
|
||||
|
||||
# If the carrier set intersects, then move on to the next
|
||||
# subalgebra
|
||||
|
|
35
vspursuer.py
Executable file
35
vspursuer.py
Executable file
|
@ -0,0 +1,35 @@
|
|||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
from parse_magic import (
|
||||
SourceFile,
|
||||
parse_matrices
|
||||
)
|
||||
from vsp import has_vsp
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="VSP Checker")
|
||||
parser.add_argument("--verbose", action='store_true', help="Print out all parsed matrices")
|
||||
parser.add_argument("-i", type=str, help="Path to MaGIC ugly data file")
|
||||
args = vars(parser.parse_args())
|
||||
|
||||
data_file_path = args.get("i")
|
||||
if data_file_path is None:
|
||||
data_file_path = input("Path to MaGIC Ugly Data File: ")
|
||||
|
||||
solutions = []
|
||||
with open(data_file_path, "r") as data_file:
|
||||
solutions = parse_matrices(SourceFile(data_file))
|
||||
print(f"Parsed {len(solutions)} matrices")
|
||||
|
||||
num_has_vsp = 0
|
||||
for i, (model, interpretation) in enumerate(solutions):
|
||||
vsp_result = has_vsp(model, interpretation)
|
||||
print(vsp_result)
|
||||
|
||||
if args['verbose'] or vsp_result.has_vsp:
|
||||
print(model)
|
||||
|
||||
if vsp_result.has_vsp:
|
||||
num_has_vsp += 1
|
||||
|
||||
print(f"Tested {len(solutions)} models, {num_has_vsp} of which satisfy VSP")
|
Loading…
Add table
Reference in a new issue