cpu.c
22.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
/*
* Copyright 2012 INRIA Paris-Rocquencourt
* Copyright 2012 Ecole Normale Superieure
*
* Use of this software is governed by the MIT license
*
* Written by Tobias Grosser, INRIA Paris-Rocquencourt,
* Domaine de Voluceau, Rocquenqourt, B.P. 105,
* 78153 Le Chesnay Cedex France
* and Sven Verdoolaege,
* Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
*/
#include <limits.h>
#include <stdio.h>
#include <string.h>
#include <isl/aff.h>
#include <isl/ctx.h>
#include <isl/flow.h>
#include <isl/map.h>
#include <isl/ast_build.h>
#include <isl/schedule.h>
#include <isl/schedule_node.h>
#include <pet.h>
#include "ppcg.h"
#include "ppcg_options.h"
#include "cpu.h"
#include "print.h"
#include "schedule.h"
#include "util.h"
/* Representation of a statement inside a generated AST.
*
* "stmt" refers to the original statement.
* "ref2expr" maps the reference identifier of each access in
* the statement to an AST expression that should be printed
* at the place of the access.
*/
struct ppcg_stmt {
struct pet_stmt *stmt;
isl_id_to_ast_expr *ref2expr;
};
static void ppcg_stmt_free(void *user)
{
struct ppcg_stmt *stmt = user;
if (!stmt)
return;
isl_id_to_ast_expr_free(stmt->ref2expr);
free(stmt);
}
/* Derive the output file name from the input file name.
* 'input' is the entire path of the input file. The output
* is the file name plus the additional extension.
*
* We will basically replace everything after the last point
* with '.ppcg.c'. This means file.c becomes file.ppcg.c
*/
static FILE *get_output_file(const char *input, const char *output)
{
char name[PATH_MAX];
const char *ext;
const char ppcg_marker[] = ".ppcg";
int len;
FILE *file;
len = ppcg_extract_base_name(name, input);
strcpy(name + len, ppcg_marker);
ext = strrchr(input, '.');
strcpy(name + len + sizeof(ppcg_marker) - 1, ext ? ext : ".c");
if (!output)
output = name;
file = fopen(output, "w");
if (!file) {
fprintf(stderr, "Unable to open '%s' for writing\n", output);
return NULL;
}
return file;
}
/* Data used to annotate for nodes in the ast.
*/
struct ast_node_userinfo {
/* The for node is an openmp parallel for node. */
int is_openmp;
};
/* Information used while building the ast.
*/
struct ast_build_userinfo {
/* The current ppcg scop. */
struct ppcg_scop *scop;
/* Are we currently in a parallel for loop? */
int in_parallel_for;
};
/* Check if the current scheduling dimension is parallel.
*
* We check for parallelism by verifying that the loop does not carry any
* dependences.
* If the live_range_reordering option is set, then this currently
* includes the order dependences. In principle, non-zero order dependences
* could be allowed, but this would require privatization and/or expansion.
*
* Parallelism test: if the distance is zero in all outer dimensions, then it
* has to be zero in the current dimension as well.
* Implementation: first, translate dependences into time space, then force
* outer dimensions to be equal. If the distance is zero in the current
* dimension, then the loop is parallel.
* The distance is zero in the current dimension if it is a subset of a map
* with equal values for the current dimension.
*/
static int ast_schedule_dim_is_parallel(__isl_keep isl_ast_build *build,
struct ppcg_scop *scop)
{
isl_union_map *schedule, *deps;
isl_map *schedule_deps, *test;
isl_space *schedule_space;
unsigned i, dimension, is_parallel;
schedule = isl_ast_build_get_schedule(build);
schedule_space = isl_ast_build_get_schedule_space(build);
dimension = isl_space_dim(schedule_space, isl_dim_out) - 1;
deps = isl_union_map_copy(scop->dep_flow);
deps = isl_union_map_union(deps, isl_union_map_copy(scop->dep_false));
if (scop->options->live_range_reordering) {
isl_union_map *order = isl_union_map_copy(scop->dep_order);
deps = isl_union_map_union(deps, order);
}
deps = isl_union_map_apply_range(deps, isl_union_map_copy(schedule));
deps = isl_union_map_apply_domain(deps, schedule);
if (isl_union_map_is_empty(deps)) {
isl_union_map_free(deps);
isl_space_free(schedule_space);
return 1;
}
schedule_deps = isl_map_from_union_map(deps);
for (i = 0; i < dimension; i++)
schedule_deps = isl_map_equate(schedule_deps, isl_dim_out, i,
isl_dim_in, i);
test = isl_map_universe(isl_map_get_space(schedule_deps));
test = isl_map_equate(test, isl_dim_out, dimension, isl_dim_in,
dimension);
is_parallel = isl_map_is_subset(schedule_deps, test);
isl_space_free(schedule_space);
isl_map_free(test);
isl_map_free(schedule_deps);
return is_parallel;
}
/* Mark a for node openmp parallel, if it is the outermost parallel for node.
*/
static void mark_openmp_parallel(__isl_keep isl_ast_build *build,
struct ast_build_userinfo *build_info,
struct ast_node_userinfo *node_info)
{
if (build_info->in_parallel_for)
return;
if (ast_schedule_dim_is_parallel(build, build_info->scop)) {
build_info->in_parallel_for = 1;
node_info->is_openmp = 1;
}
}
/* Allocate an ast_node_info structure and initialize it with default values.
*/
static struct ast_node_userinfo *allocate_ast_node_userinfo()
{
struct ast_node_userinfo *node_info;
node_info = (struct ast_node_userinfo *)
malloc(sizeof(struct ast_node_userinfo));
node_info->is_openmp = 0;
return node_info;
}
/* Free an ast_node_info structure.
*/
static void free_ast_node_userinfo(void *ptr)
{
struct ast_node_userinfo *info;
info = (struct ast_node_userinfo *) ptr;
free(info);
}
/* This method is executed before the construction of a for node. It creates
* an isl_id that is used to annotate the subsequently generated ast for nodes.
*
* In this function we also run the following analyses:
*
* - Detection of openmp parallel loops
*/
static __isl_give isl_id *ast_build_before_for(
__isl_keep isl_ast_build *build, void *user)
{
isl_id *id;
struct ast_build_userinfo *build_info;
struct ast_node_userinfo *node_info;
build_info = (struct ast_build_userinfo *) user;
node_info = allocate_ast_node_userinfo();
id = isl_id_alloc(isl_ast_build_get_ctx(build), "", node_info);
id = isl_id_set_free_user(id, free_ast_node_userinfo);
mark_openmp_parallel(build, build_info, node_info);
return id;
}
/* This method is executed after the construction of a for node.
*
* It performs the following actions:
*
* - Reset the 'in_parallel_for' flag, as soon as we leave a for node,
* that is marked as openmp parallel.
*
*/
static __isl_give isl_ast_node *ast_build_after_for(
__isl_take isl_ast_node *node, __isl_keep isl_ast_build *build,
void *user)
{
isl_id *id;
struct ast_build_userinfo *build_info;
struct ast_node_userinfo *info;
id = isl_ast_node_get_annotation(node);
info = isl_id_get_user(id);
if (info && info->is_openmp) {
build_info = (struct ast_build_userinfo *) user;
build_info->in_parallel_for = 0;
}
isl_id_free(id);
return node;
}
/* Find the element in scop->stmts that has the given "id".
*/
static struct pet_stmt *find_stmt(struct ppcg_scop *scop, __isl_keep isl_id *id)
{
int i;
for (i = 0; i < scop->pet->n_stmt; ++i) {
struct pet_stmt *stmt = scop->pet->stmts[i];
isl_id *id_i;
id_i = isl_set_get_tuple_id(stmt->domain);
isl_id_free(id_i);
if (id_i == id)
return stmt;
}
isl_die(isl_id_get_ctx(id), isl_error_internal,
"statement not found", return NULL);
}
/* Print a user statement in the generated AST.
* The ppcg_stmt has been attached to the node in at_each_domain.
*/
static __isl_give isl_printer *print_user(__isl_take isl_printer *p,
__isl_take isl_ast_print_options *print_options,
__isl_keep isl_ast_node *node, void *user)
{
struct ppcg_stmt *stmt;
isl_id *id;
id = isl_ast_node_get_annotation(node);
stmt = isl_id_get_user(id);
isl_id_free(id);
p = pet_stmt_print_body(stmt->stmt, p, stmt->ref2expr);
isl_ast_print_options_free(print_options);
return p;
}
/* Print a for loop node as an openmp parallel loop.
*
* To print an openmp parallel loop we print a normal for loop, but add
* "#pragma openmp parallel for" in front.
*
* Variables that are declared within the body of this for loop are
* automatically openmp 'private'. Iterators declared outside of the
* for loop are automatically openmp 'shared'. As ppcg declares all iterators
* at the position where they are assigned, there is no need to explicitly mark
* variables. Their automatically assigned type is already correct.
*
* This function only generates valid OpenMP code, if the ast was generated
* with the 'atomic-bounds' option enabled.
*
*/
static __isl_give isl_printer *print_for_with_openmp(
__isl_keep isl_ast_node *node, __isl_take isl_printer *p,
__isl_take isl_ast_print_options *print_options)
{
p = isl_printer_start_line(p);
p = isl_printer_print_str(p, "#pragma omp parallel for");
p = isl_printer_end_line(p);
p = isl_ast_node_for_print(node, p, print_options);
return p;
}
/* Print a for node.
*
* Depending on how the node is annotated, we either print a normal
* for node or an openmp parallel for node.
*/
static __isl_give isl_printer *print_for(__isl_take isl_printer *p,
__isl_take isl_ast_print_options *print_options,
__isl_keep isl_ast_node *node, void *user)
{
isl_id *id;
int openmp;
openmp = 0;
id = isl_ast_node_get_annotation(node);
if (id) {
struct ast_node_userinfo *info;
info = (struct ast_node_userinfo *) isl_id_get_user(id);
if (info && info->is_openmp)
openmp = 1;
}
if (openmp)
p = print_for_with_openmp(node, p, print_options);
else
p = isl_ast_node_for_print(node, p, print_options);
isl_id_free(id);
return p;
}
/* Index transformation callback for pet_stmt_build_ast_exprs.
*
* "index" expresses the array indices in terms of statement iterators
* "iterator_map" expresses the statement iterators in terms of
* AST loop iterators.
*
* The result expresses the array indices in terms of
* AST loop iterators.
*/
static __isl_give isl_multi_pw_aff *pullback_index(
__isl_take isl_multi_pw_aff *index, __isl_keep isl_id *id, void *user)
{
isl_pw_multi_aff *iterator_map = user;
iterator_map = isl_pw_multi_aff_copy(iterator_map);
return isl_multi_pw_aff_pullback_pw_multi_aff(index, iterator_map);
}
/* Transform the accesses in the statement associated to the domain
* called by "node" to refer to the AST loop iterators, construct
* corresponding AST expressions using "build",
* collect them in a ppcg_stmt and annotate the node with the ppcg_stmt.
*/
static __isl_give isl_ast_node *at_each_domain(__isl_take isl_ast_node *node,
__isl_keep isl_ast_build *build, void *user)
{
struct ppcg_scop *scop = user;
isl_ast_expr *expr, *arg;
isl_ctx *ctx;
isl_id *id;
isl_map *map;
isl_pw_multi_aff *iterator_map;
struct ppcg_stmt *stmt;
ctx = isl_ast_node_get_ctx(node);
stmt = isl_calloc_type(ctx, struct ppcg_stmt);
if (!stmt)
goto error;
expr = isl_ast_node_user_get_expr(node);
arg = isl_ast_expr_get_op_arg(expr, 0);
isl_ast_expr_free(expr);
id = isl_ast_expr_get_id(arg);
isl_ast_expr_free(arg);
stmt->stmt = find_stmt(scop, id);
isl_id_free(id);
if (!stmt->stmt)
goto error;
map = isl_map_from_union_map(isl_ast_build_get_schedule(build));
map = isl_map_reverse(map);
iterator_map = isl_pw_multi_aff_from_map(map);
stmt->ref2expr = pet_stmt_build_ast_exprs(stmt->stmt, build,
&pullback_index, iterator_map, NULL, NULL);
isl_pw_multi_aff_free(iterator_map);
id = isl_id_alloc(isl_ast_node_get_ctx(node), NULL, stmt);
id = isl_id_set_free_user(id, &ppcg_stmt_free);
return isl_ast_node_set_annotation(node, id);
error:
ppcg_stmt_free(stmt);
return isl_ast_node_free(node);
}
/* Set *depth (initialized to 0 by the caller) to the maximum
* of the schedule depths of the leaf nodes for which this function is called.
*/
static isl_bool update_depth(__isl_keep isl_schedule_node *node, void *user)
{
int *depth = user;
int node_depth;
if (isl_schedule_node_get_type(node) != isl_schedule_node_leaf)
return isl_bool_true;
node_depth = isl_schedule_node_get_schedule_depth(node);
if (node_depth > *depth)
*depth = node_depth;
return isl_bool_false;
}
/* This function is called for each node in a CPU AST.
* In case of a user node, print the macro definitions required
* for printing the AST expressions in the annotation, if any.
* For other nodes, return true such that descendants are also
* visited.
*
* In particular, print the macro definitions needed for the substitutions
* of the original user statements.
*/
static isl_bool at_node(__isl_keep isl_ast_node *node, void *user)
{
struct ppcg_stmt *stmt;
isl_id *id;
isl_printer **p = user;
if (isl_ast_node_get_type(node) != isl_ast_node_user)
return isl_bool_true;
id = isl_ast_node_get_annotation(node);
stmt = isl_id_get_user(id);
isl_id_free(id);
if (!stmt)
return isl_bool_error;
*p = ppcg_print_body_macros(*p, stmt->ref2expr);
if (!*p)
return isl_bool_error;
return isl_bool_false;
}
/* Print the required macros for the CPU AST "node" to "p",
* including those needed for the user statements inside the AST.
*/
static __isl_give isl_printer *cpu_print_macros(__isl_take isl_printer *p,
__isl_keep isl_ast_node *node)
{
if (isl_ast_node_foreach_descendant_top_down(node, &at_node, &p) < 0)
return isl_printer_free(p);
p = ppcg_print_macros(p, node);
return p;
}
/* Code generate the scop 'scop' using "schedule"
* and print the corresponding C code to 'p'.
*/
static __isl_give isl_printer *print_scop(struct ppcg_scop *scop,
__isl_take isl_schedule *schedule, __isl_take isl_printer *p,
struct ppcg_options *options)
{
isl_ctx *ctx = isl_printer_get_ctx(p);
isl_ast_build *build;
isl_ast_print_options *print_options;
isl_ast_node *tree;
isl_id_list *iterators;
struct ast_build_userinfo build_info;
int depth;
depth = 0;
if (isl_schedule_foreach_schedule_node_top_down(schedule, &update_depth,
&depth) < 0)
goto error;
build = isl_ast_build_alloc(ctx);
iterators = ppcg_scop_generate_names(scop, depth, "c");
build = isl_ast_build_set_iterators(build, iterators);
build = isl_ast_build_set_at_each_domain(build, &at_each_domain, scop);
if (options->openmp) {
build_info.scop = scop;
build_info.in_parallel_for = 0;
build = isl_ast_build_set_before_each_for(build,
&ast_build_before_for,
&build_info);
build = isl_ast_build_set_after_each_for(build,
&ast_build_after_for,
&build_info);
}
tree = isl_ast_build_node_from_schedule(build, schedule);
isl_ast_build_free(build);
print_options = isl_ast_print_options_alloc(ctx);
print_options = isl_ast_print_options_set_print_user(print_options,
&print_user, NULL);
print_options = isl_ast_print_options_set_print_for(print_options,
&print_for, NULL);
p = cpu_print_macros(p, tree);
p = isl_ast_node_print(tree, p, print_options);
isl_ast_node_free(tree);
return p;
error:
isl_schedule_free(schedule);
isl_printer_free(p);
return NULL;
}
/* Tile the band node "node" with tile sizes "sizes" and
* mark all members of the resulting tile node as "atomic".
*/
static __isl_give isl_schedule_node *tile(__isl_take isl_schedule_node *node,
__isl_take isl_multi_val *sizes)
{
node = isl_schedule_node_band_tile(node, sizes);
node = ppcg_set_schedule_node_type(node, isl_ast_loop_atomic);
return node;
}
/* Tile "node", if it is a band node with at least 2 members.
* The tile sizes are set from the "tile_size" option.
*/
static __isl_give isl_schedule_node *tile_band(
__isl_take isl_schedule_node *node, void *user)
{
struct ppcg_scop *scop = user;
int n;
isl_space *space;
isl_multi_val *sizes;
if (isl_schedule_node_get_type(node) != isl_schedule_node_band)
return node;
n = isl_schedule_node_band_n_member(node);
if (n <= 1)
return node;
space = isl_schedule_node_band_get_space(node);
sizes = ppcg_multi_val_from_int(space, scop->options->tile_size);
return tile(node, sizes);
}
/* Construct schedule constraints from the dependences in ps
* for the purpose of computing a schedule for a CPU.
*
* The proximity constraints are set to the flow dependences.
*
* If live-range reordering is allowed then the conditional validity
* constraints are set to the order dependences with the flow dependences
* as condition. That is, a live-range (flow dependence) will be either
* local to an iteration of a band or all adjacent order dependences
* will be respected by the band.
* The validity constraints are set to the union of the flow dependences
* and the forced dependences, while the coincidence constraints
* are set to the union of the flow dependences, the forced dependences and
* the order dependences.
*
* If live-range reordering is not allowed, then both the validity
* and the coincidence constraints are set to the union of the flow
* dependences and the false dependences.
*
* Note that the coincidence constraints are only set when the "openmp"
* options is set. Even though the way openmp pragmas are introduced
* does not rely on the coincident property of the schedule band members,
* the coincidence constraints do affect the way the schedule is constructed,
* such that more schedule dimensions should be detected as parallel
* by ast_schedule_dim_is_parallel.
* Since the order dependences are also taken into account by
* ast_schedule_dim_is_parallel, they are also added to
* the coincidence constraints. If the openmp handling learns
* how to privatize some memory, then the corresponding order
* dependences can be removed from the coincidence constraints.
*/
static __isl_give isl_schedule_constraints *construct_cpu_schedule_constraints(
struct ppcg_scop *ps)
{
isl_schedule_constraints *sc;
isl_union_map *validity, *coincidence;
sc = isl_schedule_constraints_on_domain(isl_union_set_copy(ps->domain));
if (ps->options->live_range_reordering) {
sc = isl_schedule_constraints_set_conditional_validity(sc,
isl_union_map_copy(ps->tagged_dep_flow),
isl_union_map_copy(ps->tagged_dep_order));
validity = isl_union_map_copy(ps->dep_flow);
validity = isl_union_map_union(validity,
isl_union_map_copy(ps->dep_forced));
if (ps->options->openmp) {
coincidence = isl_union_map_copy(validity);
coincidence = isl_union_map_union(coincidence,
isl_union_map_copy(ps->dep_order));
}
} else {
validity = isl_union_map_copy(ps->dep_flow);
validity = isl_union_map_union(validity,
isl_union_map_copy(ps->dep_false));
if (ps->options->openmp)
coincidence = isl_union_map_copy(validity);
}
if (ps->options->openmp)
sc = isl_schedule_constraints_set_coincidence(sc, coincidence);
sc = isl_schedule_constraints_set_validity(sc, validity);
sc = isl_schedule_constraints_set_proximity(sc,
isl_union_map_copy(ps->dep_flow));
return sc;
}
/* Compute a schedule for the scop "ps".
*
* First derive the appropriate schedule constraints from the dependences
* in "ps" and then compute a schedule from those schedule constraints,
* possibly grouping statement instances based on the input schedule.
*/
static __isl_give isl_schedule *compute_cpu_schedule(struct ppcg_scop *ps)
{
isl_schedule_constraints *sc;
isl_schedule *schedule;
if (!ps)
return NULL;
sc = construct_cpu_schedule_constraints(ps);
if (ps->options->debug->dump_schedule_constraints)
isl_schedule_constraints_dump(sc);
schedule = ppcg_compute_schedule(sc, ps->schedule, ps->options);
return schedule;
}
/* Compute a new schedule to the scop "ps" if the reschedule option is set.
* Otherwise, return a copy of the original schedule.
*/
static __isl_give isl_schedule *optionally_compute_schedule(void *user)
{
struct ppcg_scop *ps = user;
if (!ps)
return NULL;
if (!ps->options->reschedule)
return isl_schedule_copy(ps->schedule);
return compute_cpu_schedule(ps);
}
/* Compute a schedule based on the dependences in "ps" and
* tile it if requested by the user.
*/
static __isl_give isl_schedule *get_schedule(struct ppcg_scop *ps,
struct ppcg_options *options)
{
isl_ctx *ctx;
isl_schedule *schedule;
if (!ps)
return NULL;
ctx = isl_union_set_get_ctx(ps->domain);
schedule = ppcg_get_schedule(ctx, options,
&optionally_compute_schedule, ps);
if (ps->options->tile)
schedule = isl_schedule_map_schedule_node_bottom_up(schedule,
&tile_band, ps);
return schedule;
}
/* Generate CPU code for the scop "ps" using "schedule" and
* print the corresponding C code to "p", including variable declarations.
*/
static __isl_give isl_printer *print_cpu_with_schedule(
__isl_take isl_printer *p, struct ppcg_scop *ps,
__isl_take isl_schedule *schedule, struct ppcg_options *options)
{
int hidden;
isl_set *context;
p = isl_printer_start_line(p);
p = isl_printer_print_str(p, "/* ppcg generated CPU code */");
p = isl_printer_end_line(p);
p = isl_printer_start_line(p);
p = isl_printer_end_line(p);
p = ppcg_set_macro_names(p);
p = ppcg_print_exposed_declarations(p, ps);
hidden = ppcg_scop_any_hidden_declarations(ps);
if (hidden) {
p = ppcg_start_block(p);
p = ppcg_print_hidden_declarations(p, ps);
}
context = isl_set_copy(ps->context);
context = isl_set_from_params(context);
schedule = isl_schedule_insert_context(schedule, context);
if (options->debug->dump_final_schedule)
isl_schedule_dump(schedule);
p = print_scop(ps, schedule, p, options);
if (hidden)
p = ppcg_end_block(p);
return p;
}
/* Generate CPU code for the scop "ps" and print the corresponding C code
* to "p", including variable declarations.
*/
__isl_give isl_printer *print_cpu(__isl_take isl_printer *p,
struct ppcg_scop *ps, struct ppcg_options *options)
{
isl_schedule *schedule;
schedule = isl_schedule_copy(ps->schedule);
return print_cpu_with_schedule(p, ps, schedule, options);
}
/* Generate CPU code for "scop" and print it to "p".
*
* First obtain a schedule for "scop" and then print code for "scop"
* using that schedule.
*/
static __isl_give isl_printer *generate(__isl_take isl_printer *p,
struct ppcg_scop *scop, struct ppcg_options *options)
{
isl_schedule *schedule;
schedule = get_schedule(scop, options);
return print_cpu_with_schedule(p, scop, schedule, options);
}
/* Wrapper around generate for use as a ppcg_transform callback.
*/
static __isl_give isl_printer *print_cpu_wrap(__isl_take isl_printer *p,
struct ppcg_scop *scop, void *user)
{
struct ppcg_options *options = user;
return generate(p, scop, options);
}
/* Transform the code in the file called "input" by replacing
* all scops by corresponding CPU code and write the results to a file
* called "output".
*/
int generate_cpu(isl_ctx *ctx, struct ppcg_options *options,
const char *input, const char *output)
{
FILE *output_file;
int r;
output_file = get_output_file(input, output);
if (!output_file)
return -1;
r = ppcg_transform(ctx, input, output_file, options,
&print_cpu_wrap, options);
fclose(output_file);
return r;
}