Skip to content

Commit

Permalink
Making it available for SBT
Browse files Browse the repository at this point in the history
  • Loading branch information
Rojods committed Jul 10, 2024
1 parent f022be0 commit 62bc30b
Show file tree
Hide file tree
Showing 7 changed files with 170 additions and 26 deletions.
2 changes: 1 addition & 1 deletion java-bridge-forsyde-io/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ plugins {
id 'idesyde.java-standalone-module'
}

def forsydeioVersion = "develop-SNAPSHOT"
def forsydeioVersion = "master-SNAPSHOT"

dependencies {
testImplementation 'org.junit.jupiter:junit-jupiter-api:5.8.1'
Expand Down
4 changes: 3 additions & 1 deletion rust-bridge-java/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -846,7 +846,9 @@ impl ReverseIdentificationRuleLike for JavaModuleReverseIdentificationRule {
JValue::Object(jdecisions.as_ref()),
JValue::Object(jdesigns.as_ref()),
],
)?.l()?;
)
.inspect_err(|_| {let _ = env.exception_describe();})
?.l()?;
HashSet::from_java(env, set_obj)
});
match jresult {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,12 @@ constraint forall(p in Processes) (durationRead[p] >= 0);
constraint forall(p in Processes) (durationWrite[p] >= 0);
constraint forall(p in Processes) (duration[p] >= min([e | e in executionTime[p, ..] where e >= 0]));

int: maxTh = sum(p in Processes) (2 * sum(f in Firings where firingsActor[f] = p) (1) * max([e | e in executionTime[p, ..] where e >= 0]));
constraint forall(p in Processes) (invThroughput[p] <= maxTh);
constraint forall(p in Processes) (invThroughput[p] >= min([e | e in executionTime[p, ..] where e >= 0]));
constraint forall(f in Firings, l in Mappable) (maxPath[f, l] >= -maxTh);
constraint forall(f in Firings, l in Mappable) (maxPath[f, l] <= maxTh);

constraint forall(p in Processes, pe in ListSchedulers, me in Memories where absent(interconnectFromMemories[me, pe])) (
processesExecution[p] != pe \/ processesMapping[p] != me
);
Expand Down Expand Up @@ -126,29 +132,29 @@ constraint forall(p in Processes)(duration[p] = durationFetch[p] + executionTime

constraint forall(f, ff in Firings where f != ff /\ ff in follows[f]) (processesExecution[firingsActor[f]] = processesExecution[firingsActor[ff]] -> firingsOrdering[f] < firingsOrdering[ff]);

constraint diffn([processMapping[firingsActor[f]]| f in Firings], [firingsOrdering[f] | f in Firings], [1 | f in Firings], [1 | f in Firings]);
constraint diffn([processesMapping[firingsActor[f]]| f in Firings], [firingsOrdering[f] | f in Firings], [1 | f in Firings], [1 | f in Firings]);

constraint forall(pe in ListSchedulers, f in Firings) (processesExecution[firingsActor[f]] = pe -> count_lt([processesExecution[firingsActor[ff]] | ff in Firings], pe, firingsOrdering[f]));

constraint nvalue(nUsedPEs, processesExecution);

constraint forall(f in Firings, pe in Tiles where firingsOrdering[f] = 0 /\ processMapping[firingsActor[f]] = pe) (
constraint forall(f in Firings, pe in Mappable where firingsOrdering[f] = 0 /\ processesMapping[firingsActor[f]] = pe) (
maxPath[f, pe] = duration[firingsActor[f]]
);
constraint forall(f in Firings, pe in Tiles where firingsOrdering[f] = 0 /\ processMapping[firingsActor[f]] != pe) (
constraint forall(f in Firings, pe in Mappable where firingsOrdering[f] = 0 /\ processesMapping[firingsActor[f]] != pe) (
maxPath[f, pe] = duration[firingsActor[f]] + max([-maxTh] ++ [maxPath[ff, pe] | ff in Firings where f in follows[ff]])
);
constraint forall(f in Firings, pe in Tiles where firingsOrdering[f] > 0) (
constraint forall(f in Firings, pe in Mappable where firingsOrdering[f] > 0) (
maxPath[f, pe] = duration[firingsActor[f]] + max(
[maxPath[ff, pe] | ff in Firings where f != ff /\ processMapping[firingsActor[f]] = processMapping[firingsActor[ff]] /\ firingsOrdering[f] > firingsOrdering[ff]] ++
[maxPath[ff, pe] | ff in Firings where f != ff /\ processMapping[firingsActor[f]] != processMapping[firingsActor[ff]] /\ f in follows[ff]]
[maxPath[ff, pe] | ff in Firings where f != ff /\ processesMapping[firingsActor[f]] = processesMapping[firingsActor[ff]] /\ firingsOrdering[f] > firingsOrdering[ff]] ++
[maxPath[ff, pe] | ff in Firings where f != ff /\ processesMapping[firingsActor[f]] != processesMapping[firingsActor[ff]] /\ f in follows[ff]]
)
);

constraint forall(a in Processes) (
let {
var int: maxCycleConnected = max([maxPath[f, processMapping[firingsActor[f]]] | f in Firings where connected[firingsActor[f], a]]);
var int: maxCycleComapped = max([maxPath[f, processMapping[firingsActor[f]]] | f in Firings where processMapping[firingsActor[f]] = processMapping[a]])
var int: maxCycleConnected = max([maxPath[f, processesMapping[firingsActor[f]]] | f in Firings where connected[firingsActor[f], a]]);
var int: maxCycleComapped = max([maxPath[f, processesMapping[firingsActor[f]]] | f in Firings where processesMapping[firingsActor[f]] = processesMapping[a]])
} in
invThroughput[a] = max([duration[a], maxCycleConnected, maxCycleComapped])
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ constraint forall(b in Buffers) (durationBufferMessage[b] <= max([0] ++ [
sum(ce in path) (frameSize[ce]) + processesWriteBuffer[src, b] * min(ce in path) (invBandwidthPerChannel[ce])
| src in Processes, mSrc, mDst in Tiles where mSrc != mDst /\ hasInterconnectTo[mSrc, mDst] /\ processesWriteBuffer[src, b] > 0
]));
int: maxTh = sum(p in Processes) (max([e | e in executionTime[p, ..] where e >= 0]) + sum(b in Buffers, mSrc, mDst in Tiles where mSrc != mDst /\ hasInterconnectTo[mSrc, mDst] /\ processesWriteBuffer[p, b] > 0) (
int: maxTh = sum(p in Processes) (sum(f in Firings where firingsActor[f] = p) (1) * max([e | e in executionTime[p, ..] where e >= 0]) + sum(b in Buffers, mSrc, mDst in Tiles where mSrc != mDst /\ hasInterconnectTo[mSrc, mDst] /\ processesWriteBuffer[p, b] > 0) (
let { set of int: path = interconnectTo[mSrc, mDst]; } in
sum(ce in path) (frameSize[ce]) + processesWriteBuffer[p, b] * min(ce in path) (invBandwidthPerChannel[ce])
));
Expand Down
93 changes: 88 additions & 5 deletions rust-common/src/irules.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,12 @@ use crate::models::{
AperiodicAsynchronousDataflowToPartitionedMemoryMappableMulticore,
AperiodicAsynchronousDataflowToPartitionedMemoryMappableMulticoreAndPL,
AperiodicAsynchronousDataflowToPartitionedTiledMulticore, HardwareImplementationArea,
InstrumentedComputationTimes, InstrumentedMemoryRequirements, MemoryMappableMulticoreWithPL,
MemoryMappableMultiCore, PartitionedMemoryMappableMulticore,
PartitionedMemoryMappableMulticoreAndPL, PartitionedTiledMulticore, RuntimesAndProcessors,
SDFApplication, TiledMultiCore,
InstrumentedComputationTimes, InstrumentedMemoryRequirements, MemoryMappableMultiCore,
MemoryMappableMulticoreWithPL, PartitionedMemoryMappableMulticore,
PartitionedMemoryMappableMulticoreAndPL, PartitionedTiledMulticore,
PeriodicWorkloadAndAperiodicAsynchronousDataflowToPartitionedMemoryMappable,
PeriodicWorkloadToPartitionedSharedMultiCore, RuntimesAndProcessors, SDFApplication,
TiledMultiCore,
};

pub fn identify_partitioned_mem_mapped_multicore(
Expand Down Expand Up @@ -100,7 +102,8 @@ pub fn identify_partitioned_mem_mapped_multicore_and_pl(
}
if one_proc_per_scheduler && one_scheduler_per_proc {
for m1 in decision_models {
if let Some(plat) = cast_dyn_decision_model!(m1, MemoryMappableMulticoreWithPL) {
if let Some(plat) = cast_dyn_decision_model!(m1, MemoryMappableMulticoreWithPL)
{
let potential = Arc::new(PartitionedMemoryMappableMulticoreAndPL {
hardware: plat.to_owned(),
runtimes: runt.to_owned(),
Expand Down Expand Up @@ -804,6 +807,86 @@ pub fn identify_aperiodic_asynchronous_dataflow_to_partitioned_mem_mappable_mult
(identified, errors)
}

pub fn identify_combined_periodic_workload_and_aad_mem_mappable(
_design_models: &[Arc<dyn DesignModel>],
decision_models: &[Arc<dyn DecisionModel>],
) -> IdentificationResult {
let mut identified: Vec<Arc<dyn DecisionModel>> = Vec::new();
let mut errors: Vec<String> = Vec::new();
for workload_dse in decision_models
.iter()
.flat_map(|m| cast_dyn_decision_model!(m, PeriodicWorkloadToPartitionedSharedMultiCore))
{
for aad_dse in decision_models.iter().flat_map(|m| {
cast_dyn_decision_model!(
m,
AperiodicAsynchronousDataflowToPartitionedMemoryMappableMulticore
)
}) {
let workload_dse = workload_dse.clone();
if workload_dse.platform == aad_dse.partitioned_mem_mappable_multicore
&& workload_dse.instrumented_computation_times
== aad_dse.instrumented_computation_times
&& workload_dse.instrumented_memory_requirements
== aad_dse.instrumented_memory_requirements
{
let proc_mappings: Vec<(String, String)> = workload_dse
.process_mapping
.into_iter()
.chain(aad_dse.processes_to_memory_mapping.into_iter())
.collect();
let channel_mappings: Vec<(String, String)> = workload_dse
.channel_mappings
.into_iter()
.chain(aad_dse.buffer_to_memory_mappings.into_iter())
.collect();
let proc_schedulings: Vec<(String, String)> = workload_dse
.process_schedulings
.into_iter()
.chain(aad_dse.processes_to_runtime_scheduling.into_iter())
.collect();
let comm_reservations: HashMap<String, HashMap<String, u16>> = aad_dse
.processing_elements_to_routers_reservations
.into_iter()
.chain(
workload_dse
.channel_slot_allocations
.into_iter()
.map(|(k, v)| {
(
k,
v.into_iter()
.map(|(k, v)| {
(k, v.into_iter().filter(|x| *x == true).count() as u16)
})
.collect(),
)
}),
)
.collect();
identified.push(Arc::new(
PeriodicWorkloadAndAperiodicAsynchronousDataflowToPartitionedMemoryMappable {
periodic_workload: workload_dse.workload,
aperiodic_asynchronous_dataflows: aad_dse.aperiodic_asynchronous_dataflows,
instrumented_computation_times: workload_dse.instrumented_computation_times,
instrumented_memory_requirements: workload_dse
.instrumented_memory_requirements,
platform: workload_dse.platform,
process_mapping: proc_mappings,
process_schedulings: proc_schedulings,
channel_mappings: channel_mappings,
communication_slot_allocations: comm_reservations,
super_loop_schedules: aad_dse.super_loop_schedules,
},
) as Arc<dyn DecisionModel>);
} else {
errors.push("identify_combined_periodic_workload_and_aad_mem_mappable: partitioned multicore platforms or instrumentation data do not match".to_string());
}
}
}
(identified, errors)
}

/// Finds the weakly connected components (WCCs) of a directed graph
///
/// This auxiliary function exists because petgraph does not return
Expand Down
50 changes: 50 additions & 0 deletions rust-common/src/models.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1392,3 +1392,53 @@ impl DecisionModel for PeriodicWorkloadToPartitionedSharedMultiCore {
elems
}
}

#[derive(Debug, PartialEq, Serialize, Deserialize, Clone, JsonSchema)]
pub struct PeriodicWorkloadAndAperiodicAsynchronousDataflowToPartitionedMemoryMappable {
pub periodic_workload: CommunicatingAndTriggeredReactiveWorkload,
pub aperiodic_asynchronous_dataflows: Vec<AperiodicAsynchronousDataflow>,
pub platform: PartitionedMemoryMappableMulticore,
pub instrumented_computation_times: InstrumentedComputationTimes,
pub instrumented_memory_requirements: InstrumentedMemoryRequirements,
pub process_mapping: Vec<(String, String)>,
pub process_schedulings: Vec<(String, String)>,
pub channel_mappings: Vec<(String, String)>,
pub communication_slot_allocations: HashMap<String, HashMap<String, u16>>,
pub super_loop_schedules: HashMap<String, Vec<String>>,
}

impl_decision_model_conversion!(PeriodicWorkloadAndAperiodicAsynchronousDataflowToPartitionedMemoryMappable);
impl DecisionModel for PeriodicWorkloadAndAperiodicAsynchronousDataflowToPartitionedMemoryMappable {
impl_decision_model_standard_parts!(PeriodicWorkloadAndAperiodicAsynchronousDataflowToPartitionedMemoryMappable);

fn part(&self) -> HashSet<String> {
let mut elems: HashSet<String> = HashSet::new();
elems.extend(self.periodic_workload.part().iter().map(|x| x.to_owned()));
for app in &self.aperiodic_asynchronous_dataflows {
elems.extend(app.part().iter().map(|x| x.to_owned()));
}
elems.extend(self.platform.part().iter().map(|x| x.to_owned()));
elems.extend(
self.instrumented_computation_times
.part()
.iter()
.map(|x| x.to_owned()),
);
elems.extend(
self.instrumented_memory_requirements
.part()
.iter()
.map(|x| x.to_owned()),
);
for (pe, sched) in &self.process_schedulings {
elems.insert(format!("{}={}:{}-{}:{}", "scheduling", pe, "", sched, ""));
}
for (pe, mem) in &self.process_mapping {
elems.insert(format!("{}={}:{}-{}:{}", "mapping", pe, "", mem, ""));
}
for (buf, mem) in &self.channel_mappings {
elems.insert(format!("{}={}:{}-{}:{}", "mapping", buf, "", mem, ""));
}
elems
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -474,15 +474,15 @@ final class CanSolvePeriodicWorkloadAndSDFServersToMulticore
)*
)

// chocoModel
// .getSolver()
// .plugMonitor(new IMonitorContradiction {
// def onContradiction(cex: ContradictionException): Unit = {
// println(cex.toString())
// println(chocoModel.getVars().filter(_.getName().contains("utilization")).mkString(", "))
// println(chocoModel.getSolver().getDecisionPath().toString())
// }
// })
chocoModel
.getSolver()
.plugMonitor(new IMonitorContradiction {
def onContradiction(cex: ContradictionException): Unit = {
println(cex.toString())
// println(chocoModel.getVars().filter(_.getName().contains("utilization")).mkString(", "))
println(chocoModel.getSolver().getDecisionPath().toString())
}
})
(chocoModel, objs.map(o => o.getName() -> o).toMap)
}

Expand All @@ -491,6 +491,7 @@ final class CanSolvePeriodicWorkloadAndSDFServersToMulticore
solution: Solution,
configuration: Explorer.Configuration
): ExplorationSolution = {
println("Rebulding")
val timeValues =
m.wcets.flatten ++ m.platform.hardware.maxTraversalTimePerBit.flatten
.map(
Expand All @@ -517,7 +518,7 @@ final class CanSolvePeriodicWorkloadAndSDFServersToMulticore
// if (configuration.memoryDiscretizationFactor > Int.MaxValue) Int.MaxValue else configuration.memoryDiscretizationFactor.toInt
// )
val intVars = solution.retrieveIntVars(true).asScala
// println(intVars.filter(v => v.getName().contains("effect") || v.getName().contains("utilization")).mkString(", "))
println(intVars.filter(v => v.getName().contains("effect") || v.getName().contains("utilization")).mkString(", "))
val tasksMemoryMapping: Vector[Int] =
m.tasksAndSDFs.workload.processes.zipWithIndex.map((_, i) =>
intVars
Expand Down Expand Up @@ -583,6 +584,7 @@ final class CanSolvePeriodicWorkloadAndSDFServersToMulticore
)
)
val numMappedElements = intVars.find(_.getName() == "nUsedPEs").get
println("Here")
val invThs = intVars.filter(_.getName().startsWith("invTh"))
val dataChannelsSlotAllocations = m.tasksAndSDFs.workload.data_channels.zipWithIndex
.map((c, ci) =>
Expand Down Expand Up @@ -645,6 +647,7 @@ final class CanSolvePeriodicWorkloadAndSDFServersToMulticore
.map((s, i) =>
intVars.find(_.getName().startsWith("utilization(" + i + ")")).get.getLB().toDouble / 100.0
)
println("Before solution")
ExplorationSolution(
(Map(
"nUsedPEs" -> numMappedElements.getValue().toDouble.asInstanceOf[java.lang.Double]
Expand Down

0 comments on commit 62bc30b

Please sign in to comment.