https://git.tu-berlin.de/afigiel/undo-vc-drr
Tip revision: 4e025b1d3b5ef68cd5804bf2712331b87c107c1e authored by Aleskander Figiel on 04 June 2022, 12:32:09 UTC
Added project files to git
Added project files to git
Tip revision: 4e025b1
shrink_instance.py
import sys
import argparse
import subprocess
def run_program(prog, args, graph_in, graph_out, timeout=None):
print("Running", prog, args, "timeout="+str(timeout))
to = ""
if timeout is not None:
to = "timeout --foreground --signal=SIGINT "+str(timeout) + " "
subprocess.run([to + prog + " " + args + " < " + graph_in + " > " + graph_out], text=True, shell=True)
def apply_presolve(mode):
global current
global tmps
global used_tmps
# light presolving
if mode in ["light"]:
run_program("./solver/main", "--presolve-only --print-kernel --print-rec-instr --rrules=[[deg1,deg2],deg3,unconf,cn,lp,struction,magnet,desk,oe_sparsify]", current, tmps[0], timeout=None)
# medium presolving
if mode in ["medium"]:
run_program("./solver/main", "--presolve-only --print-kernel --print-rec-instr --rrules=[[deg1,deg2],deg3,unconf,cn,lp,guided_struction,guided_unconf,desk,oe_sparsify]", current, tmps[0], timeout=None)
if mode in ["medium+"]:
run_program("./solver/main", "--presolve-only --print-kernel --print-rec-instr --rrules=[[deg1,deg2],deg3,unconf,cn,lp,guided_struction,guided_magnet,guided_unconf,desk,oe_sparsify]", current, tmps[0], timeout=None)
# heavy presolving
if mode in ["heavy"]:
run_program("./solver/main", "--presolve-only --print-kernel --print-rec-instr --rrules=[[deg1,deg2],deg3,unconf,cn,lp,guided_struction,guided_magnet,guided_unconf,relcrown,desk,oe_sparsify]", current, tmps[0], timeout=None)
current = tmps[0]
used_tmps.append(current)
tmps = tmps[1:]
def return_word_in_match(file, grep_ptr, word_idx):
process = subprocess.Popen(["grep -m1 -e \"" + grep_ptr + "\" " + file], stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True, shell=True)
words = process.communicate()[0].split()
if word_idx >= len(words):
return -1
return words[word_idx]
parser = argparse.ArgumentParser(description="Shrink the size of a Vertex Cover instance")
parser.add_argument('input', help="the file containing the graph (edge list format)")
parser.add_argument('output', help="the file to which the shrunk/reduced graph will be written to (edge list format)")
parser.add_argument('--timeout', default=600, type=int, help="soft timeout (in seconds) for the duration of the program (not exact) (default: 600)")
parser.add_argument('--mode', default="mixed", help="choose which method to use: ID, FAR or mixed (default: mixed)")
parser.add_argument('--presolve', default="medium", help="choose a class of data reduction rules to initially shrink the input: light, medium, medium+, heavy (default: medium)")
parser.add_argument('--FAR', default=3, type=int, help="maximum length of a sequence of forward or backward rules used in FAR (default: 3)")
parser.add_argument('--ID', default=10, type=float, help="inflation percentage to use in Inflate Deflate (default: 10.0)")
args = parser.parse_args()
tmps = []
for i in range(20):
tmps.append(args.output+"_tmp"+str(i)+".dimacs")
used_tmps = []
current = args.input
# run presolving
apply_presolve(args.presolve)
modes = []
times = []
if args.mode.upper() == "ID":
modes = ["ID"]
times = [args.timeout]
if args.mode.upper() == "FAR":
modes = ["FAR"]
times = [args.timeout]
if args.mode.lower() == "mixed":
modes = ["ID", "FAR", "ID"]
times = [args.timeout * 0.2, args.timeout * 0.6, args.timeout * 0.2]
# run undo-based shrinking
for mode, time in zip(modes, times):
if mode == "FAR":
run_program("./rrfinder/main", "--mode=FAR --branch-depth="+str(args.FAR), current, tmps[0], timeout=time)
if mode == "ID":
run_program("./rrfinder/main", "--mode=ID --max-inflate="+str(args.ID), current, tmps[0], timeout=time)
current = tmps[0]
used_tmps.append(current)
tmps = tmps[1:]
# run post-presolving
apply_presolve(args.presolve)
# produce final output
# gather some stats
n = int(return_word_in_match(used_tmps[0], "# vertices:", 2))
m = int(return_word_in_match(used_tmps[0], "# edges:", 2))
n_reduced = int(return_word_in_match(used_tmps[-1], "# n: [0-9]* -> ", 4))
m_reduced = int(return_word_in_match(used_tmps[-1], "# m: [0-9]* -> ", 4))
output = open(args.output, "w")
print("# reduced graph:", file=output)
print("# n:", n, "->", n_reduced, file=output)
print("# m:", m, "->", m_reduced, file=output)
if n_reduced > 0:
print("# average degree:", 2*m_reduced / n_reduced, file=output)
else:
print("# average degree: -nan", file=output)
print("# graph history:)", file=output)
for i, file in enumerate(used_tmps):
nn = int(return_word_in_match(file, "# n: [0-9]* -> ", 2))
nr = int(return_word_in_match(file, "# n: [0-9]* -> ", 4))
if nn > 0:
ratio = str(nr / nn * 100)
else:
ratio = "-nan"
print("# stage " + str(i) +" n:", nn, "->", nr, " ("+ratio+"%)", file=output)
print("# RECOVERY_INSTRUCTIONS START", file=output)
# concatenate recovery instructions
for file in used_tmps[::-1]:
reading_instr = False
log = open(file, "r")
for line in log:
if line.startswith("# RECOVERY_INSTRUCTIONS START"):
reading_instr = True
elif line.startswith("# RECOVERY_INSTRUCTIONS END"):
reading_instr = False
elif reading_instr:
print(line, end='', file=output)
print("# RECOVERY_INSTRUCTIONS END", file=output)
# output last kernel
reading_instr = False
log = open(file, "r")
for line in open(used_tmps[-1], "r"):
if not line.startswith("#"):
print(line, end='', file=output)
# delete temporary files
for file in used_tmps:
subprocess.run(["rm -f "+file], text=True, shell=True)