-
Notifications
You must be signed in to change notification settings - Fork 4
/
PartitionPtscotch.h
125 lines (97 loc) · 3.52 KB
/
PartitionPtscotch.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
/**
* @file
* This file is part of PUML
*
* For conditions of distribution and use, please see the copyright
* notice in the file 'COPYING' at the root directory of this package
* and the copyright notice at https://github.com/TUM-I5/PUMGen
*
* @copyright 2019-2023 Technische Universitaet Muenchen
* @author David Schneller <[email protected]>
*/
#ifndef PUML_PARTITIONPTSCOTCH_H
#define PUML_PARTITIONPTSCOTCH_H
#ifdef USE_MPI
#include <mpi.h>
#endif // USE_MPI
#ifndef USE_PTSCOTCH
#warning PTSCOTCH is not enabled.
#endif
#include <stdint.h>
#include <stddef.h>
#include <stdio.h>
#include <ptscotch.h>
#include <cmath>
#include "PartitionBase.h"
#include "PartitionGraph.h"
#include "Topology.h"
namespace PUML
{
template<TopoType Topo>
class PartitionPtscotch : public PartitionBase<Topo>
{
public:
PartitionPtscotch(int mode) : mode(mode) {
}
#ifdef USE_MPI
virtual PartitioningResult partition(int* partition, const PartitionGraph<Topo>& graph, const PartitionTarget& target, int seed = 1)
{
int rank;
MPI_Comm_rank(graph.comm(), &rank);
if (graph.vertexWeights().size() > graph.localVertexCount()) {
logWarning(rank) << "Multiple vertex weights are currently ignored by PTSCOTCH.";
}
if (!graph.edgeWeights().empty()) {
logWarning(rank) << "The existence of edge weights may make PTSCOTCH very slow.";
}
auto comm = graph.comm();
std::vector<SCOTCH_Num> adjDisp(graph.adjDisp().begin(), graph.adjDisp().end());
std::vector<SCOTCH_Num> adj(graph.adj().begin(), graph.adj().end());
std::vector<SCOTCH_Num> vertexWeights(graph.vertexWeights().begin(), graph.vertexWeights().end());
std::vector<SCOTCH_Num> edgeWeights(graph.edgeWeights().begin(), graph.edgeWeights().end());
auto cellCount = graph.localVertexCount();
auto nparts = target.vertexCount();
std::vector<SCOTCH_Num> weights(nparts, 1);
if (!target.vertexWeightsUniform()) {
// we need to convert from double node weights to integer node weights
// (that is due to the interface still being oriented at ParMETIS right now)
double scale = (double)(1ULL<<24); // if this is not enough (or too much), adjust it
for (int i = 0; i < nparts; ++i) {
// important: the weights should be non-negative
weights[i] = std::max(static_cast<SCOTCH_Num>(1), static_cast<SCOTCH_Num>(std::round(target.vertexWeights()[i] * scale)));
}
}
int edgecut;
std::vector<SCOTCH_Num> part(cellCount);
SCOTCH_Dgraph dgraph;
SCOTCH_Strat strategy;
SCOTCH_Arch arch;
SCOTCH_Num processCount = graph.processCount();
SCOTCH_Num partCount = nparts;
SCOTCH_Num stratflag = mode;
SCOTCH_randomProc(rank);
SCOTCH_randomSeed(seed);
SCOTCH_randomReset();
SCOTCH_dgraphInit(&dgraph, comm);
SCOTCH_stratInit(&strategy);
SCOTCH_archInit(&arch);
SCOTCH_dgraphBuild(&dgraph, 0, graph.localVertexCount(), graph.localVertexCount(), adjDisp.data(), nullptr,
vertexWeights.empty() ? nullptr : vertexWeights.data(), nullptr, graph.localEdgeCount(), graph.localEdgeCount(),
adj.data(), nullptr, edgeWeights.empty() ? nullptr : edgeWeights.data());
SCOTCH_stratDgraphMapBuild(&strategy, stratflag, processCount, partCount, target.imbalance());
SCOTCH_archCmpltw(&arch, partCount, weights.data());
SCOTCH_dgraphMap(&dgraph, &arch, &strategy, part.data());
SCOTCH_archExit(&arch);
SCOTCH_stratExit(&strategy);
SCOTCH_dgraphExit(&dgraph);
for (int i = 0; i < cellCount; i++) {
partition[i] = part[i];
}
return PartitioningResult::SUCCESS;
}
#endif // USE_MPI
private:
int mode;
};
}
#endif // PUML_PARTITIONPTSCOTCH_H