(#38) Redid parallel implementation

Queue-based parallel implementation with dedicated file reader node and dedicated VSP verifier nodes.

Additionally, cleaned up the other files.
This commit is contained in:
Brandon Rozek 2025-05-13 12:58:22 -04:00 committed by GitHub
commit 214e9ba658
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 485 additions and 379 deletions

155
model.py
View file

@ -16,9 +16,9 @@ from typing import Dict, List, Optional, Set, Tuple
__all__ = ['ModelValue', 'ModelFunction', 'Model', 'Interpretation']
class ModelValue:
def __init__(self, name):
def __init__(self, name: str, hashed_value: Optional[int] = None):
self.name = name
self.hashed_value = hash(self.name)
self.hashed_value = hashed_value if hashed_value is not None else hash(self.name)
self.__setattr__ = immutable
def __str__(self):
return self.name
@ -27,7 +27,7 @@ class ModelValue:
def __eq__(self, other):
return isinstance(other, ModelValue) and self.name == other.name
def __deepcopy__(self, _):
return ModelValue(self.name)
return ModelValue(self.name, self.hashed_value)
class ModelFunction:
def __init__(self, arity: int, mapping, operation_name = ""):
@ -105,16 +105,76 @@ Interpretation = Dict[Operation, ModelFunction]
class OrderTable:
def __init__(self):
self.ordering = set()
# a : {x | x <= a }
self.le_map: Dict[ModelValue, Set[ModelValue]] = defaultdict(set)
# a : {x | x >= a}
self.ge_map: Dict[ModelValue, Set[ModelValue]] = defaultdict(set)
def add(self, x, y):
"""
Add x <= y
"""
self.ordering.add((x, y))
self.le_map[y].add(x)
self.ge_map[x].add(y)
def is_lt(self, x, y):
return (x, y) in self.ordering
return x in self.le_map[y]
def meet(self, x, y) -> Optional[ModelValue]:
X = self.le_map[x]
Y = self.le_map[y]
candidates = X.intersection(Y)
# Grab all elements greater than each of the candidates
candidate_ge_maps = (self.ge_map[candidate] for candidate in candidates)
common_ge_values = reduce(set.intersection, candidate_ge_maps)
# Intersect with candidates to get the values that satisfy
# the meet properties
result_set = candidates.intersection(common_ge_values)
# NOTE: The meet may not exist, in which case return None
result = next(iter(result_set), None)
return result
def join(self, x, y) -> Optional[ModelValue]:
X = self.ge_map[x]
Y = self.ge_map[y]
candidates = X.intersection(Y)
# Grab all elements smaller than each of the candidates
candidate_le_maps = (self.le_map[candidate] for candidate in candidates)
common_le_values = reduce(set.intersection, candidate_le_maps)
# Intersect with candidatse to get the values that satisfy
# the join properties
result_set = candidates.intersection(common_le_values)
# NOTE: The join may not exist, in which case return None
result = next(iter(result_set), None)
return result
def top(self) -> Optional[ModelValue]:
ge_maps = (self.ge_map[candidate] for candidate in self.ge_map)
result_set = reduce(set.intersection, ge_maps)
# Either not unique or does not exist
if len(result_set) != 1:
return None
return next(iter(result_set))
def bottom(self) -> Optional[ModelValue]:
le_maps = (self.le_map[candidate] for candidate in self.le_map)
result_set = reduce(set.intersection, le_maps)
# Either not unique or does not exist
if len(result_set) != 1:
return None
return next(iter(result_set))
class Model:
@ -231,86 +291,61 @@ def satisfiable(logic: Logic, model: Model, interpretation: Dict[Operation, Mode
return True
def model_closure(initial_set: Set[ModelValue], mfunctions: Set[ModelFunction], forbidden_element: Optional[ModelValue]) -> Set[ModelValue]:
"""
Given an initial set of model values and a set of model functions,
compute the complete set of model values that are closed
under the operations.
If top or bottom is encountered, then we end the saturation procedure early.
If the forbidden element is encountered, then we end the saturation procedure early.
"""
closure_set: Set[ModelValue] = initial_set
last_new: Set[ModelValue] = initial_set
changed: bool = True
forbidden_found = False
arities = set()
for mfun in mfunctions:
arities.add(mfun.arity)
while changed:
changed = False
new_elements: Set[ModelValue] = set()
old_closure: Set[ModelValue] = closure_set - last_new
# arity -> args
cached_args = defaultdict(list)
args_by_arity = defaultdict(list)
# Pass elements into each model function
for mfun in mfunctions:
# If a previous function shared the same arity,
# we'll use the same set of computed arguments
# to pass into the model functions.
if mfun.arity in cached_args:
for args in cached_args[mfun.arity]:
# Compute the new elements
# given the cached arguments.
element = mfun(*args)
if element not in closure_set:
new_elements.add(element)
# Optimization: Break out of computation
# early when forbidden element is found
if forbidden_element is not None and element == forbidden_element:
forbidden_found = True
break
if forbidden_found:
break
# We don't need to compute the arguments
# thanks to the cache, so move onto the
# next function.
continue
# At this point, we don't have cached arguments, so we need
# to compute this set.
# Each argument must have at least one new element to not repeat
# work. We'll range over the number of new model values within our
# argument.
for num_new in range(1, mfun.arity + 1):
# Motivation: We want to only compute arguments that we have not
# seen before
for arity in arities:
for num_new in range(1, arity + 1):
new_args = combinations_with_replacement(last_new, r=num_new)
old_args = combinations_with_replacement(old_closure, r=mfun.arity - num_new)
# Determine every possible ordering of the concatenated
# new and old model values.
old_args = combinations_with_replacement(old_closure, r=arity - num_new)
# Determine every possible ordering of the concatenated new and
# old model values.
for new_arg, old_arg in product(new_args, old_args):
for args in permutations(new_arg + old_arg):
cached_args[mfun.arity].append(args)
element = mfun(*args)
if element not in closure_set:
new_elements.add(element)
for combined_args in permutations(new_arg + old_arg):
args_by_arity[arity].append(combined_args)
# Optimization: Break out of computation
# early when forbidden element is found
if forbidden_element is not None and element == forbidden_element:
forbidden_found = True
break
if forbidden_found:
break
# Pass each argument into each model function
for mfun in mfunctions:
for args in args_by_arity[mfun.arity]:
# Compute the new elements
# given the cached arguments.
element = mfun(*args)
if element not in closure_set:
new_elements.add(element)
if forbidden_found:
# Optimization: Break out of computation
# early when forbidden element is found
if forbidden_element is not None and element == forbidden_element:
forbidden_found = True
break
if forbidden_found:
break
closure_set.update(new_elements)
changed = len(new_elements) > 0

View file

@ -4,7 +4,8 @@ Parses the Magic Ugly Data File Format
Assumes the base logic is R with no extra connectives
"""
import re
from typing import TextIO, List, Optional, Tuple, Set, Dict
from typing import TextIO, List, Iterator, Optional, Tuple, Set, Dict
from itertools import product
from model import Model, ModelValue, ModelFunction, OrderTable
from logic import (
@ -19,7 +20,7 @@ from logic import (
class SourceFile:
def __init__(self, fileobj: TextIO):
self.fileobj = fileobj
self.current_line = 0
self.line_in_file = 0
self.reststr = ""
def next_line(self):
@ -34,7 +35,7 @@ class SourceFile:
return reststr
contents = next(self.fileobj).strip()
self.current_line += 1
self.line_in_file += 1
return contents
def __next__(self):
@ -43,7 +44,7 @@ class SourceFile:
"""
if self.reststr == "":
self.reststr = next(self.fileobj).strip()
self.current_line += 1
self.line_in_file += 1
next_token, _, self.reststr = self.reststr.partition(" ")
return next_token
@ -60,7 +61,7 @@ class UglyHeader:
class ModelBuilder:
def __init__(self):
self.size : int = 0
self.carrier_set : Set[ModelValue] = set()
self.carrier_list : List[ModelValue] = []
self.mnegation: Optional[ModelFunction] = None
self.ordering: Optional[OrderTable] = None
self.mconjunction: Optional[ModelFunction] = None
@ -71,6 +72,45 @@ class ModelBuilder:
# Map symbol to model function
self.custom_model_functions: Dict[str, ModelFunction] = {}
def build(self, model_name: str) -> Tuple[Model, Dict[Operation, ModelFunction]]:
"""Create Model"""
assert self.size > 0
assert self.size + 1 == len(self.carrier_list)
assert len(self.designated_values) <= len(self.carrier_list)
assert self.mimplication is not None
# Implication is required to be present
logical_operations = { self.mimplication }
interpretation = {
Implication: self.mimplication
}
# Other model functions and logical
# operations are optional
if self.mnegation is not None:
logical_operations.add(self.mnegation)
interpretation[Negation] = self.mnegation
if self.mconjunction is not None:
logical_operations.add(self.mconjunction)
interpretation[Conjunction] = self.mconjunction
if self.mdisjunction is not None:
logical_operations.add(self.mdisjunction)
interpretation[Disjunction] = self.mdisjunction
if self.mnecessitation is not None:
logical_operations.add(self.mnecessitation)
interpretation[Necessitation] = self.mnecessitation
# Custom model function definitions
for custom_mf in self.custom_model_functions.values():
if custom_mf is not None:
logical_operations.add(custom_mf)
op = Operation(custom_mf.operation_name, custom_mf.arity)
interpretation[op] = custom_mf
model = Model(set(self.carrier_list), logical_operations, self.designated_values, ordering=self.ordering, name=model_name)
return (model, interpretation)
class Stage:
def __init__(self, name: str):
self.name = name
@ -97,8 +137,6 @@ class Stages:
def add(self, name: str):
stage = Stage(name)
stage.next = stage
stage.previous = self.last_added_stage
# End stage is a sink so don't
@ -115,11 +153,16 @@ class Stages:
def reset_after(self, name):
"""
Resets the stage counters after a given stage.
This is to accurately reflect the name of the
model within MaGIC.
Resets the counters of every stage after
a given stage.
This is to accurately reflect how names are
generated within magic.
Example: 1.1, 1.2, (reset after 1), 2.1, 2.2
"""
stage = self.stages[name]
# NOTE: The process_model stage doesn't
# have a counter associated with it.
while stage.name != "process_model":
stage.reset()
stage = stage.next
@ -128,15 +171,26 @@ class Stages:
return self.stages[name]
def name(self):
"""
Get the full name of where we are within
the parsing process. Takes into account
the stage number of all the stages seen
so far.
"""
# Stages haven't been added yet
result = ""
stage = self.first_stage
if stage is None:
return ""
# There's only one stage
result = f"{stage.num}"
if stage.next == "process_model":
return result
# Add every subsequent stage counter
# by appending .stage_num
stage = stage.next
while stage is not None:
result += f".{stage.num}"
@ -167,19 +221,19 @@ def derive_stages(header: UglyHeader) -> Stages:
return stages
def parse_matrices(infile: SourceFile) -> List[Tuple[Model, Dict]]:
solutions = []
def parse_matrices(infile: SourceFile) -> Iterator[Tuple[Model, Dict[Operation, ModelFunction]]]:
header = parse_header(infile)
stages = derive_stages(header)
first_run = True
current_model_parts = ModelBuilder()
stage = stages.first_stage
while True:
match stage.name:
case "end":
break
case "process_model":
process_model(stages.name(), current_model_parts, solutions)
yield current_model_parts.build(stages.name())
stage = stage.next
case "size":
processed = process_sizes(infile, current_model_parts, first_run)
@ -245,25 +299,25 @@ def parse_matrices(infile: SourceFile) -> List[Tuple[Model, Dict]]:
stages.reset_after(stage.name)
stage = stage.previous
return solutions
def process_sizes(infile: SourceFile, current_model_parts: ModelBuilder, first_run: bool) -> bool:
size: Optional[int] = None
try:
size = parse_size(infile, first_run)
except StopIteration:
return False
pass
if size is None:
return False
carrier_set = carrier_set_from_size(size)
current_model_parts.carrier_set = carrier_set
carrier_list = carrier_list_from_size(size)
current_model_parts.carrier_list = carrier_list
current_model_parts.size = size
return True
def process_negations(infile: SourceFile, current_model_parts: ModelBuilder) -> bool:
"""Stage 2 (Optional)"""
mnegation = parse_single_monadic_connective(infile, "¬", current_model_parts.size)
mnegation = parse_single_monadic_connective(infile, "¬", current_model_parts.size, current_model_parts.carrier_list)
if mnegation is None:
return False
@ -272,7 +326,7 @@ def process_negations(infile: SourceFile, current_model_parts: ModelBuilder) ->
def process_orders(infile: SourceFile, current_model_parts: ModelBuilder) -> bool:
"""Stage 3"""
result = parse_single_order(infile, current_model_parts.size)
result = parse_single_order(infile, current_model_parts.size, current_model_parts.carrier_list)
if result is None:
return False
@ -285,7 +339,7 @@ def process_orders(infile: SourceFile, current_model_parts: ModelBuilder) -> boo
def process_designateds(infile: SourceFile, current_model_parts: ModelBuilder) -> bool:
"""Stage 4"""
designated_values = parse_single_designated(infile, current_model_parts.size)
designated_values = parse_single_designated(infile, current_model_parts.size, current_model_parts.carrier_list)
if designated_values is None:
return False
@ -294,7 +348,7 @@ def process_designateds(infile: SourceFile, current_model_parts: ModelBuilder) -
def process_implications(infile: SourceFile, current_model_parts: ModelBuilder) -> bool:
"""Stage 5"""
mimplication = parse_single_dyadic_connective(infile, "", current_model_parts.size)
mimplication = parse_single_dyadic_connective(infile, "", current_model_parts.size, current_model_parts.carrier_list)
if mimplication is None:
return False
@ -302,7 +356,7 @@ def process_implications(infile: SourceFile, current_model_parts: ModelBuilder)
return True
def process_necessitations(infile: SourceFile, current_model_parts: ModelBuilder) -> bool:
mnecessitation = parse_single_monadic_connective(infile, "!", current_model_parts.size)
mnecessitation = parse_single_monadic_connective(infile, "!", current_model_parts.size, current_model_parts.carrier_list)
if mnecessitation is None:
return False
@ -313,9 +367,9 @@ def process_custom_connective(infile: SourceFile, symbol: str, adicity: int, cur
if adicity == 0:
mfunction = parse_single_nullary_connective(infile, symbol)
elif adicity == 1:
mfunction = parse_single_monadic_connective(infile, symbol, current_model_parts.size)
mfunction = parse_single_monadic_connective(infile, symbol, current_model_parts.size, current_model_parts.carrier_list)
elif adicity == 2:
mfunction = parse_single_dyadic_connective(infile, symbol, current_model_parts.size)
mfunction = parse_single_dyadic_connective(infile, symbol, current_model_parts.size, current_model_parts.carrier_list)
else:
raise NotImplementedError("Unable to process connectives of adicity greater than 2")
@ -325,38 +379,6 @@ def process_custom_connective(infile: SourceFile, symbol: str, adicity: int, cur
current_model_parts.custom_model_functions[symbol] = mfunction
return True
def process_model(model_name: str, mp: ModelBuilder, solutions: List[Tuple[Model, Dict]]):
"""Create Model"""
assert mp.size > 0
assert mp.size + 1 == len(mp.carrier_set)
assert len(mp.designated_values) <= len(mp.carrier_set)
assert mp.mimplication is not None
logical_operations = { mp.mimplication }
model = Model(mp.carrier_set, logical_operations, mp.designated_values, ordering=mp.ordering, name=model_name)
interpretation = {
Implication: mp.mimplication
}
if mp.mnegation is not None:
logical_operations.add(mp.mnegation)
interpretation[Negation] = mp.mnegation
if mp.mconjunction is not None:
logical_operations.add(mp.mconjunction)
interpretation[Conjunction] = mp.mconjunction
if mp.mdisjunction is not None:
logical_operations.add(mp.mdisjunction)
interpretation[Disjunction] = mp.mdisjunction
if mp.mnecessitation is not None:
logical_operations.add(mp.mnecessitation)
interpretation[Necessitation] = mp.mnecessitation
for custom_mf in mp.custom_model_functions.values():
if custom_mf is not None:
logical_operations.add(custom_mf)
op = Operation(custom_mf.operation_name, custom_mf.arity)
interpretation[op] = custom_mf
solutions.append((model, interpretation))
def parse_header(infile: SourceFile) -> UglyHeader:
"""
@ -377,20 +399,19 @@ def parse_header(infile: SourceFile) -> UglyHeader:
custom_model_functions.append((arity, symbol))
return UglyHeader(negation_defined, necessitation_defined, custom_model_functions)
def carrier_set_from_size(size: int) -> Set[ModelValue]:
def carrier_list_from_size(size: int) -> List[ModelValue]:
"""
Construct a carrier set of model values
based on the desired size.
"""
return {
return [
mvalue_from_index(i) for i in range(size + 1)
}
]
def parse_size(infile: SourceFile, first_run: bool) -> Optional[int]:
"""
Parse the line representing the matrix size.
"""
size = int(infile.next_line())
# HACK: When necessitation and custom connectives are enabled
@ -401,7 +422,9 @@ def parse_size(infile: SourceFile, first_run: bool) -> Optional[int]:
if size == -1:
return None
assert size > 0, f"Unexpected size at line {infile.current_line}"
assert size > 0, f"Unexpected size at line {infile.line_in_file}"
return size
def mvalue_from_index(i: int) -> ModelValue:
@ -417,55 +440,9 @@ def parse_mvalue(x: str) -> ModelValue:
"""
return mvalue_from_index(int(x))
def determine_cresult(size: int, ordering: Dict[ModelValue, ModelValue], a: ModelValue, b: ModelValue) -> ModelValue:
"""
Determine what a b should be given the ordering table.
"""
for i in range(size + 1):
c = mvalue_from_index(i)
if not ordering[(c, a)]:
continue
if not ordering[(c, b)]:
continue
invalid = False
for j in range(size + 1):
d = mvalue_from_index(j)
if c == d:
continue
if ordering[(c, d)]:
if ordering[(d, a)] and ordering [(d, b)]:
invalid = True
if not invalid:
return c
def determine_dresult(size: int, ordering: Dict[ModelValue, ModelValue], a: ModelValue, b: ModelValue) -> ModelValue:
"""
Determine what a b should be given the ordering table.
"""
for i in range(size + 1):
c = mvalue_from_index(i)
if not ordering[(a, c)]:
continue
if not ordering[(b, c)]:
continue
invalid = False
for j in range(size + 1):
d = mvalue_from_index(j)
if d == c:
continue
if ordering[(d, c)]:
if ordering[(a, d)] and ordering[(b, d)]:
invalid = True
if not invalid:
return c
def parse_single_order(infile: SourceFile, size: int) -> Optional[Tuple[OrderTable, ModelFunction, ModelFunction]]:
def parse_single_order(infile: SourceFile, size: int, carrier_list: List[ModelValue]) -> Optional[
Tuple[OrderTable, Optional[ModelFunction], Optional[ModelFunction]]]:
"""
Parse the line representing the ordering table
"""
@ -475,50 +452,38 @@ def parse_single_order(infile: SourceFile, size: int) -> Optional[Tuple[OrderTab
table = line.split(" ")
assert len(table) == (size + 1)**2, f"Order table doesn't match expected size at line {infile.current_line}"
assert len(table) == (size + 1)**2, f"Order table doesn't match expected size at line {infile.line_in_file}"
ordering = OrderTable()
omapping = {}
table_i = 0
for i in range(size + 1):
x = mvalue_from_index(i)
for j in range(size + 1):
y = mvalue_from_index(j)
omapping[(x, y)] = table[table_i] == '1'
if table[table_i] == '1':
ordering.add(x, y)
table_i += 1
for x, y in product(carrier_list, carrier_list):
omapping[(x, y)] = table[table_i] == '1'
if table[table_i] == '1':
ordering.add(x, y)
table_i += 1
cmapping = {}
dmapping = {}
for i in range(size + 1):
x = mvalue_from_index(i)
for j in range(size + 1):
y = mvalue_from_index(j)
cresult = determine_cresult(size, omapping, x, y)
if cresult is None:
print("[Warning] Conjunction and Disjunction are not well-defined")
print(f"{x}{y} = ??")
return None, None
cmapping[(x, y)] = cresult
dresult = determine_dresult(size, omapping, x, y)
if dresult is None:
print("[Warning] Conjunction and Disjunction are not well-defined")
print(f"{x} {y} = ??")
return None, None
dmapping[(x, y)] = dresult
for x, y in product(carrier_list, carrier_list):
cresult = ordering.meet(x, y)
if cresult is None:
return ordering, None, None
cmapping[(x, y)] = cresult
dresult = ordering.join(x, y)
if dresult is None:
return ordering, None, None
dmapping[(x, y)] = dresult
mconjunction = ModelFunction(2, cmapping, "")
mdisjunction = ModelFunction(2, dmapping, "")
return ordering, mconjunction, mdisjunction
def parse_single_designated(infile: SourceFile, size: int) -> Optional[Set[ModelValue]]:
def parse_single_designated(infile: SourceFile, size: int, carrier_list: List[ModelValue]) -> Optional[Set[ModelValue]]:
"""
Parse the line representing which model values are designated.
"""
@ -527,13 +492,12 @@ def parse_single_designated(infile: SourceFile, size: int) -> Optional[Set[Model
return None
row = line.split(" ")
assert len(row) == size + 1, f"Designated table doesn't match expected size at line {infile.current_line}"
assert len(row) == size + 1, f"Designated table doesn't match expected size at line {infile.line_in_file}"
designated_values = set()
for i, j in zip(range(size + 1), row):
for x, j in zip(carrier_list, row):
if j == '1':
x = mvalue_from_index(i)
designated_values.add(x)
return designated_values
@ -545,29 +509,28 @@ def parse_single_nullary_connective(infile: SourceFile, symbol: str) -> Optional
return None
row = line.split(" ")
assert len(row) == 1, f"More than one assignment for a nullary connective was provided at line {infile.current_line}"
assert len(row) == 1, f"More than one assignment for a nullary connective was provided at line {infile.line_in_file}"
mapping = {}
mapping[()] = parse_mvalue(row[0])
return ModelFunction(0, mapping, symbol)
def parse_single_monadic_connective(infile: SourceFile, symbol: str, size: int) -> Optional[ModelFunction]:
def parse_single_monadic_connective(infile: SourceFile, symbol: str, size: int, carrier_list: List[ModelValue]) -> Optional[ModelFunction]:
line = infile.next_line()
if line == '-1':
return None
row = line.split(" ")
assert len(row) == size + 1, f"{symbol} table doesn't match size at line {infile.current_line}"
assert len(row) == size + 1, f"{symbol} table doesn't match size at line {infile.line_in_file}"
mapping = {}
for i, j in zip(range(size + 1), row):
x = mvalue_from_index(i)
for x, j in zip(carrier_list, row):
y = parse_mvalue(j)
mapping[(x, )] = y
return ModelFunction(1, mapping, symbol)
def parse_single_dyadic_connective(infile: SourceFile, symbol: str, size: int) -> Optional[ModelFunction]:
def parse_single_dyadic_connective(infile: SourceFile, symbol: str, size: int, carrier_list: List[ModelValue]) -> Optional[ModelFunction]:
first_token = next(infile)
if first_token == "-1":
return None
@ -576,20 +539,14 @@ def parse_single_dyadic_connective(infile: SourceFile, symbol: str, size: int) -
try:
table = [first_token] + [next(infile) for _ in range((size + 1)**2 - 1)]
except StopIteration:
pass
assert len(table) == (size + 1)**2, f"{symbol} table does not match expected size at line {infile.current_line}"
raise Exception(f"{symbol} table does not match expected size at line {infile.line_in_file}")
mapping = {}
table_i = 0
for i in range(size + 1):
x = mvalue_from_index(i)
for j in range(size + 1):
y = mvalue_from_index(j)
r = parse_mvalue(table[table_i])
table_i += 1
mapping[(x, y)] = r
for x, y in product(carrier_list, carrier_list):
r = parse_mvalue(table[table_i])
table_i += 1
mapping[(x, y)] = r
return ModelFunction(2, mapping, symbol)

195
vsp.py
View file

@ -2,6 +2,7 @@
Check to see if the model has the variable
sharing property.
"""
from collections import defaultdict
from itertools import chain, combinations, product
from typing import List, Optional, Set, Tuple
from common import set_to_str
@ -9,75 +10,36 @@ from model import (
Model, model_closure, ModelFunction, ModelValue, OrderTable
)
def preseed(
initial_set: Set[ModelValue],
cache:List[Tuple[Set[ModelValue], Set[ModelValue]]]):
"""
Given a cache of previous model_closure calls,
use this to compute an initial model closure
set based on the initial set.
class Cache:
def __init__(self):
# input size -> cached (inputs, outputs)
self.c = defaultdict(list)
Basic Idea:
Let {1, 2, 3} -> X be in the cache.
If {1,2,3} is a subset of initial set,
then X is the subset of the output of model_closure.
def add(self, i: Set[ModelValue], o: Set[ModelValue]):
self.c[len(i)].append((i, o))
This is used to speed up subsequent calls to model_closure
"""
candidate_preseed: Tuple[Set[ModelValue], int] = (None, None)
def get_closest(self, initial_set: Set[ModelValue]) -> Optional[Tuple[Set[ModelValue], bool]]:
"""
Iterate through our cache starting with the cached
inputs closest in size to the initial_set and
find the one that's a subset of initial_set.
for i, o in cache:
if i < initial_set:
cost = len(initial_set - i)
# If i is a subset with less missing elements than
# the previous candidate, then it's the new candidate.
if candidate_preseed[1] is None or cost < candidate_preseed[1]:
candidate_preseed = o, cost
Returns cached_output, and whether the initial_set is the same
as the cached_input.
"""
initial_set_size = len(initial_set)
sizes = range(initial_set_size, 0, -1)
same_set = candidate_preseed[1] == 0
return candidate_preseed[0], same_set
for size in sizes:
if size not in self.c:
continue
for cached_input, cached_output in self.c[size]:
if cached_input <= initial_set:
return cached_output, size == initial_set_size
def find_top(algebra: Set[ModelValue], mconjunction: Optional[ModelFunction], mdisjunction: Optional[ModelFunction]) -> Optional[ModelValue]:
"""
Find the top of the order lattice.
T || a = T, T && a = a for all a in the carrier set
"""
if mconjunction is None or mdisjunction is None:
return None
for x in algebra:
is_top = True
for y in algebra:
if mdisjunction(x, y) != x or mconjunction(x, y) != y:
is_top = False
break
if is_top:
return x
print("[Warning] Failed to find the top of the lattice")
return None
def find_bottom(algebra: Set[ModelValue], mconjunction: Optional[ModelFunction], mdisjunction: Optional[ModelFunction]) -> Optional[ModelValue]:
"""
Find the bottom of the order lattice
F || a = a, F && a = F for all a in the carrier set
"""
if mconjunction is None or mdisjunction is None:
return None
for x in algebra:
is_bottom = True
for y in algebra:
if mdisjunction(x, y) != y or mconjunction(x, y) != x:
is_bottom = False
break
if is_bottom:
return x
print("[Warning] Failed to find the bottom of the lattice")
return None
def order_dependent(subalgebra1: Set[ModelValue], subalegbra2: Set[ModelValue], ordering: OrderTable):
"""
Returns true if there exists a value in subalgebra1 that's less than a value in subalgebra2
@ -106,17 +68,44 @@ Subalgebra 1: {set_to_str(self.subalgebra1)}
Subalgebra 2: {set_to_str(self.subalgebra2)}
"""
def quick_vsp_unsat_incomplete(xs, ys, model, top, bottom, negation_defined) -> bool:
"""
Return True if VSP cannot be satisfied
through some incomplete checks.
"""
# If the left subalgebra contains bottom
# or the right subalgebra contains top
# skip this pair
if top is not None and top in ys:
return True
if negation_defined and bottom is not None and bottom in ys:
return True
if bottom is not None and bottom in xs:
return True
if negation_defined and top is not None and top in xs:
return True
# If the two subalgebras intersect, move
# onto the next pair.
if not xs.isdisjoint(ys):
return True
# If the subalgebras are order-dependent, skip this pair
if order_dependent(xs, ys, model.ordering):
return True
if negation_defined and order_dependent(ys, xs, model.ordering):
return True
# We can't immediately rule out that these
# subalgebras don't exhibit VSP
return False
def has_vsp(model: Model, impfunction: ModelFunction,
mconjunction: Optional[ModelFunction] = None,
mdisjunction: Optional[ModelFunction] = None,
mnegation: Optional[ModelFunction] = None) -> VSP_Result:
negation_defined: bool) -> VSP_Result:
"""
Checks whether a model has the variable
sharing property.
"""
top = find_top(model.carrier_set, mconjunction, mdisjunction)
bottom = find_bottom(model.carrier_set, mconjunction, mdisjunction)
# NOTE: No models with only one designated
# value satisfies VSP
if len(model.designated_values) == 1:
@ -124,68 +113,44 @@ def has_vsp(model: Model, impfunction: ModelFunction,
assert model.ordering is not None, "Expected ordering table in model"
top = model.ordering.top()
bottom = model.ordering.bottom()
# Compute I the set of tuples (x, y) where
# x -> y does not take a designiated value
I: Set[Tuple[ModelValue, ModelValue]] = set()
I: List[Tuple[ModelValue, ModelValue]] = []
for (x, y) in product(model.carrier_set, model.carrier_set):
if impfunction(x, y) not in model.designated_values:
I.add((x, y))
I.append((x, y))
# Construct the powerset of I without the empty set
s = list(I)
I_power = chain.from_iterable(combinations(s, r) for r in range(1, len(s) + 1))
I_power = chain.from_iterable(combinations(I, r) for r in range(1, len(I) + 1))
# ((x1, y1)), ((x1, y1), (x2, y2)), ...
# Closure cache
closure_cache: List[Tuple[Set[ModelValue], Set[ModelValue]]] = []
closure_cache = Cache()
# Find the subalgebras which falsify implication
for xys in I_power:
xs = {xy[0] for xy in xys}
xs = { xy[0] for xy in xys }
ys = { xy[1] for xy in xys }
if quick_vsp_unsat_incomplete(xs, ys, model, top, bottom, negation_defined):
continue
orig_xs = xs
cached_xs = preseed(xs, closure_cache)
if cached_xs[0] is not None:
cached_xs = closure_cache.get_closest(xs)
if cached_xs is not None:
xs |= cached_xs[0]
ys = {xy[1] for xy in xys}
orig_ys = ys
cached_ys = preseed(ys, closure_cache)
if cached_ys[0] is not None:
cached_ys = closure_cache.get_closest(ys)
if cached_ys is not None:
ys |= cached_ys[0]
# NOTE: Optimziation before model_closure
# If the two subalgebras intersect, move
# onto the next pair.
if len(xs & ys) > 0:
continue
# NOTE: Optimization
# If a subalgebra doesn't have at least one
# designated value, move onto the next pair.
# Depends on no intersection between xs and ys
if len(xs & model.designated_values) == 0:
continue
if len(ys & model.designated_values) == 0:
continue
# NOTE: Optimization
# If the left subalgebra contains bottom
# or the right subalgebra contains top
# skip this pair
if top is not None and top in ys:
continue
if bottom is not None and bottom in xs:
continue
# NOTE: Optimization
# If the subalgebras are order-dependent, skip this pair
if order_dependent(xs, ys, model.ordering):
continue
if mnegation is not None and order_dependent(ys, xs, model.ordering):
xs_ys_updated = cached_xs is not None or cached_ys is not None
if xs_ys_updated and quick_vsp_unsat_incomplete(xs, ys, model, top, bottom, negation_defined):
continue
# Compute the closure of all operations
@ -193,8 +158,8 @@ def has_vsp(model: Model, impfunction: ModelFunction,
carrier_set_left: Set[ModelValue] = model_closure(xs, model.logical_operations, bottom)
# Save to cache
if cached_xs[0] is not None and not cached_ys[1]:
closure_cache.append((orig_xs, carrier_set_left))
if cached_xs is None or (cached_xs is not None and not cached_xs[1]):
closure_cache.add(orig_xs, carrier_set_left)
if bottom is not None and bottom in carrier_set_left:
continue
@ -204,15 +169,15 @@ def has_vsp(model: Model, impfunction: ModelFunction,
carrier_set_right: Set[ModelValue] = model_closure(ys, model.logical_operations, top)
# Save to cache
if cached_ys[0] is not None and not cached_ys[1]:
closure_cache.append((orig_ys, carrier_set_right))
if cached_ys is None or (cached_ys is not None and not cached_ys[1]):
closure_cache.add(orig_ys, carrier_set_right)
if top is not None and top in carrier_set_right:
continue
# If the carrier set intersects, then move on to the next
# subalgebra
if len(carrier_set_left & carrier_set_right) > 0:
if not carrier_set_left.isdisjoint(carrier_set_right):
continue
# See if for all pairs in the subalgebras, that

View file

@ -1,17 +1,202 @@
#!/usr/bin/env python3
from os import cpu_count
from datetime import datetime
from typing import Dict, Iterator, Optional, Tuple
from queue import Empty as QueueEmpty
import argparse
import multiprocessing as mp
from logic import Conjunction, Disjunction, Negation, Implication
from logic import Negation, Implication, Operation
from model import Model, ModelFunction
from parse_magic import SourceFile, parse_matrices
from vsp import has_vsp, VSP_Result
def print_with_timestamp(message):
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"[{current_time}] {message}", flush=True)
def restructure_solutions(solutions: Iterator[Tuple[Model, Dict[Operation, ModelFunction]]], skip_to: Optional[str]) -> \
Iterator[Tuple[Model, ModelFunction, Optional[ModelFunction], Optional[ModelFunction], Optional[ModelFunction]]]:
"""
When subprocess gets spawned, the logical operations will
have a different memory address than what's expected in interpretation.
Therefore, we need to pass the model functions directly instead.
While we're at it, filter out models until we get to --skip-to
if applicable.
"""
start_processing = skip_to is None
for model, interpretation in solutions:
# If skip_to is defined, then don't process models
# until then.
if not start_processing and model.name != skip_to:
continue
start_processing = True
# NOTE: Implication must be defined, the rest may not
impfunction = interpretation[Implication]
negation_defined = Negation in interpretation
yield (model, impfunction, negation_defined)
def has_vsp_plus_model(model, impfunction, negation_defined) -> Tuple[Optional[Model], VSP_Result]:
"""
Wrapper which also stores the models along with its vsp result
"""
vsp_result = has_vsp(model, impfunction, negation_defined)
# NOTE: Memory optimization - Don't return model if it doesn't have VSP
model = model if vsp_result.has_vsp else None
return (model, vsp_result)
def worker_vsp(task_queue: mp.Queue, result_queue: mp.Queue):
"""
Worker process which processes models from the task
queue and adds the result to the result_queue.
Adds the sentinal value None when exception occurs and when there's
no more to process.
"""
try:
while True:
task = task_queue.get()
# If sentinal value, break
if task is None:
break
(model, impfunction, negation_defined) = task
result = has_vsp_plus_model(model, impfunction, negation_defined)
result_queue.put(result)
finally:
# Either an exception occured or the worker finished
# Push sentinal value
result_queue.put(None)
def worker_parser(data_file_path: str, num_sentinal_values: int, task_queue: mp.Queue, skip_to: Optional[str]):
"""
Function which parses the MaGIC file
and adds models to the task_queue.
Intended to be deployed with a dedicated process.
"""
try:
data_file = open(data_file_path, "r")
solutions = parse_matrices(SourceFile(data_file))
solutions = restructure_solutions(solutions, skip_to)
while True:
try:
item = next(solutions)
task_queue.put(item)
except StopIteration:
break
finally:
data_file.close()
for _ in range(num_sentinal_values):
task_queue.put(None)
def multi_process_runner(num_cpu: int, data_file_path: str, skip_to: Optional[str]):
"""
Run VSPursuer in a multi-process configuration.
"""
assert num_cpu > 1
num_tested = 0
num_has_vsp = 0
num_workers = num_cpu - 1
# Create queues
task_queue = mp.Queue(maxsize=1000)
result_queue = mp.Queue()
# Create dedicated process to parse the MaGIC file
process_parser = mp.Process(target=worker_parser, args=(data_file_path, num_workers, task_queue, skip_to))
process_parser.start()
# Create dedicated processes which check VSP
process_workers = []
for _ in range(num_workers):
p = mp.Process(target=worker_vsp, args=(task_queue, result_queue))
process_workers.append(p)
p.start()
# Check results and add new tasks until finished
result_sentinal_count = 0
while True:
# Read a result
try:
result = result_queue.get(True, 60)
except QueueEmpty:
if all((not p.is_alive() for p in process_workers)):
# All workers finished without us receiving all the
# sentinal values.
break
task_queue_size = 0
try:
task_queue_size = task_queue.qsize()
except NotImplementedError:
# MacOS doesn't implement this
pass
if task_queue_size == 0 and not process_parser.is_alive():
# For Linux/Windows this means that the process_parser
# died before sending the sentinal values.
# For Mac, this doesn't guarentee anything but might
# as well push more sentinal values.
for _ in range(num_workers):
task_queue.put(None)
# Don't do anymore work, wait again for a result
continue
# When we receive None, it means a child process has finished
if result is None:
result_sentinal_count += 1
# If all workers have finished break
if result_sentinal_count == len(process_workers):
break
continue
# Process result
model, vsp_result = result
print_with_timestamp(vsp_result)
num_tested += 1
if vsp_result.has_vsp:
print(model)
if vsp_result.has_vsp:
num_has_vsp += 1
print_with_timestamp(f"Tested {num_tested} models, {num_has_vsp} of which satisfy VSP")
def single_process_runner(data_file_path: str, skip_to: Optional[str]):
num_tested = 0
num_has_vsp = 0
data_file = open(data_file_path, "r")
solutions = parse_matrices(SourceFile(data_file))
solutions = restructure_solutions(solutions, skip_to)
for model, impfunction, negation_defined in solutions:
model, vsp_result = has_vsp_plus_model(model, impfunction, negation_defined)
print_with_timestamp(vsp_result)
num_tested += 1
if vsp_result.has_vsp:
print(model)
if vsp_result.has_vsp:
num_has_vsp += 1
print_with_timestamp(f"Tested {num_tested} models, {num_has_vsp} of which satisfy VSP")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="VSP Checker")
parser.add_argument("--verbose", action='store_true', help="Print out all parsed matrices")
parser.add_argument("-i", type=str, help="Path to MaGIC ugly data file")
parser.add_argument("-c", type=int, help="Number of CPUs to use. Default: MAX - 2.")
parser.add_argument("-c", type=int, help="Number of CPUs to use. Default: 1")
parser.add_argument("--skip-to", type=str, help="Skip until a model name is found and process from then onwards.")
args = vars(parser.parse_args())
@ -19,47 +204,11 @@ if __name__ == "__main__":
if data_file_path is None:
data_file_path = input("Path to MaGIC Ugly Data File: ")
solutions = []
with open(data_file_path, "r") as data_file:
solutions = parse_matrices(SourceFile(data_file))
print(f"Parsed {len(solutions)} matrices")
num_cpu = args.get("c")
if num_cpu is None:
num_cpu = 1
start_processing = args.get("skip_to") is None
# NOTE: When subprocess gets spawned, the logical operations will
# have a different memory address than what's expected in interpretation.
# Therefore, we need to pass the model functions directly instead.
solutions_expanded = []
for model, interpretation in solutions:
# If skip_to is defined, then don't process models
# until then.
if not start_processing and model.name == args.get("skip_to"):
start_processing = True
if not start_processing:
continue
impfunction = interpretation[Implication]
mconjunction = interpretation.get(Conjunction)
mdisjunction = interpretation.get(Disjunction)
mnegation = interpretation.get(Negation)
solutions_expanded.append((model, impfunction, mconjunction, mdisjunction, mnegation))
num_has_vsp = 0
num_cpu = args.get("c", max(cpu_count() - 2, 1))
with mp.Pool(processes=num_cpu) as pool:
results = [
pool.apply_async(has_vsp, (model, impfunction, mconjunction, mdisjunction, mnegation))
for model, impfunction, mconjunction, mdisjunction, mnegation in solutions_expanded
]
for i, result in enumerate(results):
vsp_result: VSP_Result = result.get()
print(vsp_result)
if args['verbose'] or vsp_result.has_vsp:
model = solutions_expanded[i][0]
print(model)
if vsp_result.has_vsp:
num_has_vsp += 1
print(f"Tested {len(solutions_expanded)} models, {num_has_vsp} of which satisfy VSP")
if num_cpu == 1:
single_process_runner(data_file_path, args.get("skip_to"))
else:
multi_process_runner(num_cpu, data_file_path, args.get("skip_to"))