...
 
Commits (2)
......@@ -76,7 +76,7 @@ def get_costs(csvname):
def execute_graph(graph):
tqdm.write("Executing graph " + graph)
subprocess.run([graph_exec, "-m", "-b", "-c", "60000", graph], check=True)
subprocess.run([graph_exec, "-m", "-b", "-c", "10000", graph], check=True)
# Get execution times for reports (-m option)
basename,_ = os.path.splitext(os.path.basename(graph))
reports = glob.glob("*"+basename + "*.csv")
......@@ -89,10 +89,11 @@ def compare_audio_files(audiofiles):
audiofiles.sort()# Number 0 is always the non-degraded file
non_degraded = audiofiles.pop(0)
tqdm.write("Non degraded file is: " + non_degraded)
tqdm.write("Degraded files are: " + str(audiofiles))
tqdm.write("Comparing degraded versions with non-degraded one.")
y_nd,sr_nd = quality.load_file(non_degraded, duration=2)
qualities = {}
for degraded in tqdm(audiofiles):
for degraded in tqdm(sorted(audiofiles)):
y,sr = quality.load_file(degraded, duration=2)
basename,_ = os.path.splitext(degraded)
qualities[basename] = quality.compare_specto(y_nd, sr_nd, y, sr)
......@@ -128,6 +129,9 @@ class GraphResults:
self.costs=None
self.quality=None
def __repr__(self):
return "{}: {}, {}".format(self.name, self.costs, self.quality)
def process_all_graphs(nb_nodes, dirname):
"""Process on all weakly connected Dags up to nb_nodes"""
tqdm.write("Enumerating weakily DAGs up to " + str(nb_nodes) + " nodes with result in " + dirname)
......@@ -149,6 +153,20 @@ def process_all_graphs(nb_nodes, dirname):
result.costs = costs
result_graph.append(result)
# We also want to get the following measures:
# - are the worst/best graphs in terms of costs and quality the same in
# the theoretical models and in the experiments. How close are they in both vectors? (in inversions? In position distances?)
# - are costs and qualities correlated? In the experimental model first. And in the theoretical one? (We could even prove it)
# - are all the degraded graphs faster than the non-degraded one? And at least one? How many? Which percentage?
# Shape questions:
# - how many degraded graphs in average for one graph?
# - how many resamplers have been inserted? Downsamplers? Upsamplers?
# TODO later: try to degrade in same order as heuristics and see if it correlates with the order in quality and in cost
# TODO: case of a source => use a real audio file? Or generate a sin wave? Or just noise? Or don't generate sources here?
# Because for now, sources just output a 0 signal, so we get the same quality for each version and
# it does not give an useful ranking for the measured quality.
# TODO: apply merge operation for resampler (the one that inserts a mixer and then a resampler instead of several resamplers)
# Meaningless to compute rank correlation on a vector of size 1
if len(result_graph) > 1:
# Get audio files
......@@ -172,7 +190,14 @@ def process_all_graphs(nb_nodes, dirname):
# We should get them in the same graph order as in the measured one (non-degraded first)
csvname = prefix.rsplit("-", maxsplit=1)[0] + "-theo.csv"
costs_th, qualities_th = load_csv(csvname)
qualities_th, costs_th = load_csv(csvname)
# print("Results:", result_graph)
#
# print("Theoretical costs: ", costs_th)
# print("Measured costs: ", costs_mes)
# print("Theoretical qualities: ", qualities_th)
# print("Measured qualities: ", qualities_mes)
kendalltau = GraphResults(prefix)
kendalltau.costs = stats.kendalltau(costs_mes, costs_th, nan_policy='raise')
......@@ -185,8 +210,15 @@ def process_all_graphs(nb_nodes, dirname):
print(kendalltau.name, " Kendal's tau: cost=", kendalltau.costs, " and quality=", kendalltau.quality)
print(spearmanr.name, " Spearman's r: cost=", spearmanr.costs, " and quality=", spearmanr.quality)
#input("Press a key to continue.")
results[prefix] = (kendalltau, spearmanr)
# We remove the audio files here as they can take a log of space
audiofiles = glob.glob(prefix+"*.wav")
for audiofile in audiofiles:
os.remove(audiofile)
return results
......@@ -270,7 +302,6 @@ def q_c_dict_to_list(qualities, costs):
q = []
c_cycle = []
# graph 0
c_cycle.append
q.append(1.)
name = list(sorted(costs.keys()))[0]
cost, total = costs[name]
......
......@@ -536,6 +536,9 @@ impl AudioEffect for AudioGraph {
let mut edges = self.outputs_mut(self.input_node_index);
let mut i = 0;
while let Some(edge) = edges.next_edge(&self.graph) {
self.output_edges[i].resize(self.graph.edge_weight(edge).unwrap().buffer().len());
self.output_edges[i].samplerate = self.graph.edge_weight(edge).unwrap().samplerate;
debug_assert_eq!(self.graph.edge_weight(edge).unwrap().buffer().len(), self.output_edges[i].buffer().len());
self.graph.edge_weight_mut(edge).unwrap().buffer_mut().copy_from_slice(self.output_edges[i].buffer());
i += 1;
}
......@@ -595,11 +598,15 @@ impl AudioEffect for AudioGraph {
// Sink
//println!("Executing {}", self.graph.node_weight(self.output_node_index).unwrap().node_processor);
//Prepare inputs
//Quite inefficient, with allocating. Rather use a fixed vec with max number of inputs and outputs and a buffer pool
// Or just use &[&DspEdge]??
let mut edges = self.inputs_mut(self.output_node_index);
let mut i = 0;
while let Some(edge) = edges.next_edge(&self.graph) {
self.input_edges[i].resize(self.graph.edge_weight(edge).unwrap().buffer().len());
self.input_edges[i].samplerate = self.graph.edge_weight(edge).unwrap().samplerate;
debug_assert_eq!(self.graph.edge_weight(edge).unwrap().buffer().len(), self.input_edges[i].buffer().len());
self.input_edges[i].buffer_mut().copy_from_slice(self.graph.edge_weight(edge).unwrap().buffer());
i += 1;
}
......
......@@ -30,7 +30,7 @@ use audio_adaptive::sndfile;
const CHANNELS: i32 = 1;
const SAMPLE_RATE: u32 = 44_100;
const NB_CYCLES : u32 = 12000;
const FRAMES_PER_BUFFER : usize = 256;
const FRAMES_PER_BUFFER : usize = 512;
#[derive(Clone, Copy, Debug)]
pub struct TimeMonitor {
......@@ -227,6 +227,9 @@ fn main() {
.short("m")
.long("monitor")
.help("Monitor execution and save it as a csv file."))
.arg(Arg::with_name("silent")
.long("silent")
.help("No output at all on the terminal."))
.group(ArgGroup::with_name("execution-mode")
.args(&["real-time", "bounce"])
.required(true))
......@@ -238,13 +241,16 @@ fn main() {
let bounce = matches.is_present("bounce");
let nb_cycles : u32 = matches.value_of("cycles").map_or(NB_CYCLES, |v| v.parse().unwrap_or(NB_CYCLES));
let monitor = matches.is_present("monitor");
let silent = matches.is_present("silent");
let mut audiograph = parse_audiograph_from_file(filename, FRAMES_PER_BUFFER, 1, SAMPLE_RATE).unwrap();
audiograph.update_schedule().expect(&format!("Audio graph in {} is cyclic!!", filename));
let basename = Path::new(filename).file_stem().and_then(OsStr::to_str).unwrap();
println!("Starting processing");
if !silent {
println!("Starting processing")
};
let start = PreciseTime::now();
if real_time {
real_time_run(audiograph, basename.to_string(), nb_cycles, monitor).unwrap();
......@@ -254,5 +260,5 @@ fn main() {
bounce_run(audiograph, basename.to_string(), audio_input, nb_cycles, monitor).unwrap();
}
let execution_time = start.to(PreciseTime::now()).num_microseconds().unwrap();
println!("End processing in {}s", execution_time as f64 / 1_000_000.0);
if !silent {println!("End processing in {}s", execution_time as f64 / 1_000_000.0);}
}
......@@ -92,7 +92,6 @@ pub fn parse_audiograph(audiograph : &str, buffer_size: usize, nb_channels: usiz
port_ident = inner_rule.into_inner().next().unwrap().into_inner();
let dst_id = port_ident.next().unwrap().as_str().to_string();
let dst_port = port_ident.next().unwrap().as_str().parse().unwrap();
println!("dst_id = {}; dst_port = {}", dst_id, dst_port);
edges.push(Edge {src_id, src_port, dst_id : dst_id.clone(), dst_port});
src_id = dst_id;
src_port = dst_port;
......