-
Notifications
You must be signed in to change notification settings - Fork 1
/
3la-ir-example.py
96 lines (84 loc) · 3.83 KB
/
3la-ir-example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
'''Example showing multiple levels of TVM IR being printed.
LSTM code by Steven.'''
import tvm
from tvm import relay
from tvm.relay.prelude import Prelude
from tvm.relay.testing.lstm import lstm_cell
import numpy as np
def generate_random_tensor(ty):
return tvm.nd.array(
np.random.rand(*[int(int_imm)
for int_imm in ty.shape]).astype(ty.dtype))
def get_lstm(batch_size, num_hidden, dtype):
'''Returns a module where the main() function is an LSTM RNN,
returning a tuple of two items where the first is the
list of outputs and the second is the final hidden state'''
mod = tvm.IRModule()
p = Prelude(mod)
input_type = relay.TensorType((batch_size, num_hidden), dtype)
weight_type = relay.TensorType((4 * num_hidden, num_hidden), dtype)
bias_type = relay.TensorType((4 * num_hidden, ), dtype)
state_type = relay.TupleType([input_type, input_type])
cell_type = relay.TupleType([input_type, state_type])
state_var_type = relay.TupleType([p.l(input_type), state_type])
input_list = relay.Var('input_list', p.l(input_type))
init_states = relay.Var('init_states', state_type)
cell_fn = lstm_cell(num_hidden, batch_size, dtype, "lstm_cell")
i2h_weight = relay.Var('i2h_weight', weight_type)
i2h_bias = relay.Var('i2h_bias', bias_type)
h2h_weight = relay.Var('h2h_weight', weight_type)
h2h_bias = relay.Var('h2h_bias', bias_type)
state_var = relay.Var('state_var', state_var_type)
input_var = relay.Var('input_var', input_type)
cell_out = relay.Var('cell_out', cell_type)
iteration = relay.Function([state_var, input_var],
relay.Let(
cell_out,
cell_fn(input_var,
relay.TupleGetItem(state_var,
1), i2h_weight,
i2h_bias, h2h_weight, h2h_bias),
relay.Tuple([
p.cons(relay.TupleGetItem(cell_out, 0),
relay.TupleGetItem(state_var,
0)),
relay.TupleGetItem(cell_out, 1)
])), state_var_type)
fold_res = relay.Var('fold_res', state_var_type)
mod['rnn'] = relay.Function(
[i2h_weight, i2h_bias, h2h_weight, h2h_bias, init_states, input_list],
relay.Let(
fold_res,
p.foldl(iteration, relay.Tuple([p.nil(), init_states]),
input_list),
relay.Tuple([
p.rev(relay.TupleGetItem(fold_res, 0)),
relay.TupleGetItem(fold_res, 1)
])), state_var_type)
mod['main'] = relay.Function(
[],
relay.Call(mod.get_global_var('rnn'), [
relay.const(generate_random_tensor(weight_type)),
relay.const(generate_random_tensor(bias_type)),
relay.const(generate_random_tensor(weight_type)),
relay.const(generate_random_tensor(bias_type)),
relay.Tuple([
relay.const(generate_random_tensor(input_type)),
relay.const(generate_random_tensor(input_type))
]),
p.cons(relay.const(generate_random_tensor(input_type)), p.nil())
]))
return mod
def main():
mod = get_lstm(1, 1, 'float32')
input_dict = {}
USE_EXECUTOR = True
if (USE_EXECUTOR):
ex = relay.create_executor(mod=mod)
out = ex.evaluate()(**input_dict)
else:
# TODO manually scheduling, lowering, building the code; do people still
# do this from Relay? or does everyone just use the evaluators?
pass
if __name__ == '__main__':
main()