Commit fa409b85 authored by Pierre Donat-Bouillud's avatar Pierre Donat-Bouillud

More complex audio effects based on Faust

parent 9a78f2c2
Pipeline #970 failed with stage
in 7 minutes and 33 seconds
import("stdfaust.lib");
process = pm.brassModel(3, 0.6, 0.9, 0.8);
import("stdfaust.lib");
maxdel=16;
del=5.5;
aN=0.8;
process = fi.allpass_fcomb(maxdel,del,aN) ;
import("stdfaust.lib");
process = pm.guitar(2, 0.3, 0.8, 1);
import("stdfaust.lib");
process = sp.panner(0.8);
import("stdfaust.lib");
process = no.pink_noise_vm(10);
import("stdfaust.lib");
process = os.sawtooth(440);
import("stdfaust.lib");
process = sp.spat(4, 0.7, 0.9);
import("stdfaust.lib");
process = ef.transpose(128, 32, 5);
import("stdfaust.lib");
process = pm.violinModel(2, 0.4, 0.5, 0.6);
import("stdfaust.lib");
process = ve.wah4(893);
import("stdfaust.lib");
process = re.zita_rev1_stereo(5, 100, 600, 3, 6, 44100);
import("stdfaust.lib");
process = re.zita_rev1_stereo(
#!/usr/bin/python3
"""
Convert a faust effect to an audio effect (in Rust)
For one file or for a bunch of files (and put them all in one Rust file)
"""
import argparse
import subprocess
import os
parser = argprase.ArgumentParser(description="Convert a faust effect to an audio effect (in Rust)")
parser.add_argument("-d", "--directory", help="Process all Faust dsp files in the given directory and bundle them in one Rust file.")
parser.add_argument("-o", "--output", help="Name of the output file")
args = parser.parse_args()
...@@ -37,6 +37,7 @@ parser.add_argument("-a", "--all", help="Explore all sizes up to the one precise ...@@ -37,6 +37,7 @@ parser.add_argument("-a", "--all", help="Explore all sizes up to the one precise
parser.add_argument("-d", "--draw", help="Draw graph of quality and cost.", action="store_true") parser.add_argument("-d", "--draw", help="Draw graph of quality and cost.", action="store_true")
parser.add_argument("--only-draw", help="Only draws graph", action="store_true") parser.add_argument("--only-draw", help="Only draws graph", action="store_true")
parser.add_argument("--no-generation", help="Only execute the graphs previously generated. Expects a precise naming of the graphs", action="store_true") parser.add_argument("--no-generation", help="Only execute the graphs previously generated. Expects a precise naming of the graphs", action="store_true")
parser.add_argument("--continue-exec", help="Continue execution where it was left.", action="store_true")
gen_option = parser.add_mutually_exclusive_group() gen_option = parser.add_mutually_exclusive_group()
gen_option.add_argument("-r", "--random", help="Randomly generates the graphs", action="store_true") gen_option.add_argument("-r", "--random", help="Randomly generates the graphs", action="store_true")
gen_option.add_argument("-z", "--from-graphs", help="Use a set of already generated graphs", action="store_true") gen_option.add_argument("-z", "--from-graphs", help="Use a set of already generated graphs", action="store_true")
...@@ -68,12 +69,18 @@ finally: ...@@ -68,12 +69,18 @@ finally:
print("Graph enumeration with: ", graph_enum) print("Graph enumeration with: ", graph_enum)
print("Using node dictionary: ", nodes_dic) print("Using node dictionary: ", nodes_dic)
def index(a, x): def find_last_file():
'Locate the leftmost value exactly equal to x' #each execution generates a -rt.csv file
i = bisectbisect_left(a, x) # Pick the most recent one
if i != len(a) and a[i] == x: last = list(sorted(glob.iglob("*-rt.csv"), reverse=True, key= lambda f: os.path.getmtime(f)))[0]
return i # Check if there is also the corresponding wav file
raise ValueError basename = last.split("_")[1].rsplit("-", maxsplit=1)[0]
if not os.path.exists(basename+".wav"):
print("Impossible to continue. No wav file associated to it. ", last)
exit(-1)
else:
return basename
def get_costs(csvname): def get_costs(csvname):
"""Get csv file with execution times and compute average execution time """Get csv file with execution times and compute average execution time
...@@ -91,6 +98,8 @@ def get_costs(csvname): ...@@ -91,6 +98,8 @@ def get_costs(csvname):
def execute_graph(graph): def execute_graph(graph):
tqdm.write("Executing graph " + graph) tqdm.write("Executing graph " + graph)
subprocess.run([graph_exec, "-m", "-b", "-c", "10000", graph], check=True) subprocess.run([graph_exec, "-m", "-b", "-c", "10000", graph], check=True)
def get_exec_times(graph):
# Get execution times for reports (-m option) # Get execution times for reports (-m option)
basename,_ = os.path.splitext(os.path.basename(graph)) basename,_ = os.path.splitext(os.path.basename(graph))
reports = glob.glob("*"+basename + "*.csv") reports = glob.glob("*"+basename + "*.csv")
...@@ -148,7 +157,7 @@ class GraphResults: ...@@ -148,7 +157,7 @@ class GraphResults:
def process_all_graphs(nb_nodes, dirname, random=False, from_graphs=False): def process_all_graphs(nb_nodes, dirname, random=False, from_graphs=False):
"""Process on all weakly connected Dags up to nb_nodes""" """Process on all weakly connected Dags up to nb_nodes"""
if not args.no_generation: if (not args.no_generation) and (not args.continue_exec):
tqdm.write("Enumerating weakily DAGs", end=" ") tqdm.write("Enumerating weakily DAGs", end=" ")
if from_graphs: if from_graphs:
tqdm.write("with at least", end=" ") tqdm.write("with at least", end=" ")
...@@ -164,11 +173,18 @@ def process_all_graphs(nb_nodes, dirname, random=False, from_graphs=False): ...@@ -164,11 +173,18 @@ def process_all_graphs(nb_nodes, dirname, random=False, from_graphs=False):
command.extend(["-ewxr", "-n", str(nb_nodes), "--node-file="+nodes_dic]) command.extend(["-ewxr", "-n", str(nb_nodes), "--node-file="+nodes_dic])
subprocess.run(command, check=True) subprocess.run(command, check=True)
last_basename=""
doexec=True
if args.continue_exec:
last_basename=find_last_file()
doexec=False
nb_errors=0 nb_errors=0
results={} results={}
tqdm.write("Executing graphs") tqdm.write("Executing graphs")
#Group them by non-degraded graphs #Group them by non-degraded graphs
for non_degraded_graph in tqdm(glob.iglob("*-0.ag")): #TODO: rather sort them by number? (right now, lexicographic order)
for non_degraded_graph in tqdm(sorted(glob.iglob("*-0.ag"))):
# Get the prefix for this graph # Get the prefix for this graph
prefix = non_degraded_graph.rsplit("-", maxsplit=1)[0] prefix = non_degraded_graph.rsplit("-", maxsplit=1)[0]
tqdm.write(prefix) tqdm.write(prefix)
...@@ -177,8 +193,13 @@ def process_all_graphs(nb_nodes, dirname, random=False, from_graphs=False): ...@@ -177,8 +193,13 @@ def process_all_graphs(nb_nodes, dirname, random=False, from_graphs=False):
for graph in tqdm(sorted(glob.iglob(prefix+"*.ag"))): for graph in tqdm(sorted(glob.iglob(prefix+"*.ag"))):
basename,_ = os.path.splitext(graph) basename,_ = os.path.splitext(graph)
result= GraphResults(basename) result= GraphResults(basename)
costs = execute_graph(graph) if not args.continue_exec or (doexec and basename != last_basename):
result.costs = costs #We need to execute the graph
result.costs = execute_graph(graph)
elif not doexec and basename == last_basename:
doexec = True
result.costs = get_exec_times(graph)
result_graph.append(result) result_graph.append(result)
except subprocess.CalledProcessError as err: except subprocess.CalledProcessError as err:
if args.no_error: if args.no_error:
...@@ -208,6 +229,8 @@ def process_all_graphs(nb_nodes, dirname, random=False, from_graphs=False): ...@@ -208,6 +229,8 @@ def process_all_graphs(nb_nodes, dirname, random=False, from_graphs=False):
# Meaningless to compute rank correlation on a vector of size 1 # Meaningless to compute rank correlation on a vector of size 1
if nb_degraded_graphs > 0: if nb_degraded_graphs > 0:
#TODO: save results of analysis to be used if need to continue?
# Get audio files # Get audio files
audiofiles = glob.glob(prefix+"*.wav") audiofiles = glob.glob(prefix+"*.wav")
qualities = compare_audio_files(audiofiles) qualities = compare_audio_files(audiofiles)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment