VPR-7.0
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros
route_common.c
Go to the documentation of this file.
1 #include <math.h>
2 #include <stdio.h>
3 #include <assert.h>
4 #include <time.h>
5 #include "util.h"
6 #include "vpr_types.h"
7 #include "vpr_utils.h"
8 #include "globals.h"
9 #include "route_export.h"
10 #include "route_common.h"
11 #include "route_tree_timing.h"
12 #include "route_timing.h"
13 #include "route_breadth_first.h"
14 #include "place_and_route.h"
15 #include "rr_graph.h"
16 #include "read_xml_arch_file.h"
17 #include "ReadOptions.h"
18 
19 /***************** Variables shared only by route modules *******************/
20 
21 t_rr_node_route_inf *rr_node_route_inf = NULL; /* [0..num_rr_nodes-1] */
22 
23 struct s_bb *route_bb = NULL; /* [0..num_nets-1]. Limits area in which each */
24 
25 /* net must be routed. */
26 
27 /**************** Static variables local to route_common.c ******************/
28 
29 static struct s_heap **heap; /* Indexed from [1..heap_size] */
30 static int heap_size; /* Number of slots in the heap array */
31 static int heap_tail; /* Index of first unused slot in the heap array */
32 
33 /* For managing my own list of currently free heap data structures. */
34 static struct s_heap *heap_free_head = NULL;
35 /* For keeping track of the sudo malloc memory for the heap*/
36 static t_chunk heap_ch = {NULL, 0, NULL};
37 
38 /* For managing my own list of currently free trace data structures. */
39 static struct s_trace *trace_free_head = NULL;
40 /* For keeping track of the sudo malloc memory for the trace*/
41 static t_chunk trace_ch = {NULL, 0, NULL};
42 
43 #ifdef DEBUG
44 static int num_trace_allocated = 0; /* To watch for memory leaks. */
45 static int num_heap_allocated = 0;
46 static int num_linked_f_pointer_allocated = 0;
47 #endif
48 
49 static struct s_linked_f_pointer *rr_modified_head = NULL;
51 
52 static t_chunk linked_f_pointer_ch = {NULL, 0, NULL};
53 
54 /* The numbering relation between the channels and clbs is: *
55  * *
56  * | IO | chan_ | CLB | chan_ | CLB | *
57  * |grid[0][2] | y[0][2] |grid[1][2] | y[1][2] | grid[2][2]| *
58  * +-----------+ +-----------+ +-----------+ *
59  * } capacity in *
60  * No channel chan_x[1][1] chan_x[2][1] } chan_width *
61  * } _x[1] *
62  * +-----------+ +-----------+ +-----------+ *
63  * | | chan_ | | chan_ | | *
64  * | IO | y[0][1] | CLB | y[1][1] | CLB | *
65  * |grid[0][1] | |grid[1][1] | |grid[2][1] | *
66  * | | | | | | *
67  * +-----------+ +-----------+ +-----------+ *
68  * } capacity in *
69  * chan_x[1][0] chan_x[2][0] } chan_width *
70  * } _x[0] *
71  * +-----------+ +-----------+ *
72  * No | | No | | *
73  * Channel | IO | Channel | IO | *
74  * |grid[1][0] | |grid[2][0] | *
75  * | | | | *
76  * +-----------+ +-----------+ *
77  * *
78  * {=======} {=======} *
79  * Capacity in Capacity in *
80  * chan_width_y[0] chan_width_y[1] *
81  * */
82 
83 /******************** Subroutines local to route_common.c *******************/
84 
85 static void free_trace_data(struct s_trace *tptr);
86 static void load_route_bb(int bb_factor);
87 
88 static struct s_trace *alloc_trace_data(void);
89 static void add_to_heap(struct s_heap *hptr);
90 static struct s_heap *alloc_heap_data(void);
91 static struct s_linked_f_pointer *alloc_linked_f_pointer(void);
92 
94 static void adjust_one_rr_occ_and_pcost(int inode, int add_or_sub,
95  float pres_fac);
96 
97 /************************** Subroutine definitions ***************************/
98 
101  t_ivec ** saved_clb_opins_used_locally) {
102 
103  /* This routing frees any routing currently held in best routing, *
104  * then copies over the current routing (held in trace_head), and *
105  * finally sets trace_head and trace_tail to all NULLs so that the *
106  * connection to the saved routing is broken. This is necessary so *
107  * that the next iteration of the router does not free the saved *
108  * routing elements. Also saves any data about locally used clb_opins, *
109  * since this is also part of the routing. */
110 
111  int inet, iblk, iclass, ipin, num_local_opins;
112  struct s_trace *tptr, *tempptr;
113  t_type_ptr type;
114 
115  for (inet = 0; inet < num_nets; inet++) {
116 
117  /* Free any previously saved routing. It is no longer best. */
118  tptr = best_routing[inet];
119  while (tptr != NULL) {
120  tempptr = tptr->next;
121  free_trace_data(tptr);
122  tptr = tempptr;
123  }
124 
125  /* Save a pointer to the current routing in best_routing. */
126  best_routing[inet] = trace_head[inet];
127 
128  /* Set the current (working) routing to NULL so the current trace *
129  * elements won't be reused by the memory allocator. */
130 
131  trace_head[inet] = NULL;
132  trace_tail[inet] = NULL;
133  }
134 
135  /* Save which OPINs are locally used. */
136 
137  for (iblk = 0; iblk < num_blocks; iblk++) {
138  type = block[iblk].type;
139  for (iclass = 0; iclass < type->num_class; iclass++) {
140  num_local_opins = clb_opins_used_locally[iblk][iclass].nelem;
141  for (ipin = 0; ipin < num_local_opins; ipin++) {
142  saved_clb_opins_used_locally[iblk][iclass].list[ipin] =
143  clb_opins_used_locally[iblk][iclass].list[ipin];
144  }
145  }
146  }
147 }
148 
151  t_ivec ** saved_clb_opins_used_locally) {
152 
153  /* Deallocates any current routing in trace_head, and replaces it with *
154  * the routing in best_routing. Best_routing is set to NULL to show that *
155  * it no longer points to a valid routing. NOTE: trace_tail is not *
156  * restored -- it is set to all NULLs since it is only used in *
157  * update_traceback. If you need trace_tail restored, modify this *
158  * routine. Also restores the locally used opin data. */
159 
160  int inet, iblk, ipin, iclass, num_local_opins;
161  t_type_ptr type;
162 
163  for (inet = 0; inet < num_nets; inet++) {
164 
165  /* Free any current routing. */
166  free_traceback(inet);
167 
168  /* Set the current routing to the saved one. */
169  trace_head[inet] = best_routing[inet];
170  best_routing[inet] = NULL; /* No stored routing. */
171  }
172 
173  /* Save which OPINs are locally used. */
174 
175  for (iblk = 0; iblk < num_blocks; iblk++) {
176  type = block[iblk].type;
177  for (iclass = 0; iclass < type->num_class; iclass++) {
178  num_local_opins = clb_opins_used_locally[iblk][iclass].nelem;
179  for (ipin = 0; ipin < num_local_opins; ipin++) {
180  clb_opins_used_locally[iblk][iclass].list[ipin] =
181  saved_clb_opins_used_locally[iblk][iclass].list[ipin];
182  }
183  }
184 
185  }
186 }
187 
188 void get_serial_num(void) {
189 
190  /* This routine finds a "magic cookie" for the routing and prints it. *
191  * Use this number as a routing serial number to ensure that programming *
192  * changes do not break the router. */
193 
194  int inet, serial_num, inode;
195  struct s_trace *tptr;
196 
197  serial_num = 0;
198 
199  for (inet = 0; inet < num_nets; inet++) {
200 
201  /* Global nets will have null trace_heads (never routed) so they *
202  * are not included in the serial number calculation. */
203 
204  tptr = trace_head[inet];
205  while (tptr != NULL) {
206  inode = tptr->index;
207  serial_num += (inet + 1)
208  * (rr_node[inode].xlow * (nx + 1) - rr_node[inode].yhigh);
209 
210  serial_num -= rr_node[inode].ptc_num * (inet + 1) * 10;
211 
212  serial_num -= rr_node[inode].type * (inet + 1) * 100;
213  serial_num %= 2000000000; /* Prevent overflow */
214  tptr = tptr->next;
215  }
216  }
217  vpr_printf(TIO_MESSAGE_INFO, "Serial number (magic cookie) for the routing is: %d\n", serial_num);
218 }
219 
220 boolean try_route(int width_fac, struct s_router_opts router_opts,
221  struct s_det_routing_arch det_routing_arch, t_segment_inf * segment_inf,
222  t_timing_inf timing_inf, float **net_delay, t_slack * slacks,
223  t_chan_width_dist chan_width_dist, t_ivec ** clb_opins_used_locally,
224  boolean * Fc_clipped, t_direct_inf *directs, int num_directs) {
225 
226  /* Attempts a routing via an iterated maze router algorithm. Width_fac *
227  * specifies the relative width of the channels, while the members of *
228  * router_opts determine the value of the costs assigned to routing *
229  * resource node, etc. det_routing_arch describes the detailed routing *
230  * architecture (connection and switch boxes) of the FPGA; it is used *
231  * only if a DETAILED routing has been selected. */
232 
233  int tmp;
234  clock_t begin, end;
235  boolean success;
236  t_graph_type graph_type;
237 
238  if (router_opts.route_type == GLOBAL) {
239  graph_type = GRAPH_GLOBAL;
240  } else {
241  graph_type = (
242  det_routing_arch.directionality == BI_DIRECTIONAL ?
244  }
245 
246  /* Set the channel widths */
247 
248  init_chan(width_fac, chan_width_dist);
249 
250  /* Free any old routing graph, if one exists. */
251 
252  free_rr_graph();
253 
254  begin = clock();
255 
256  /* Set up the routing resource graph defined by this FPGA architecture. */
257 
259  chan_width_x[0], NULL, det_routing_arch.switch_block_type,
260  det_routing_arch.Fs, det_routing_arch.num_segment,
261  det_routing_arch.num_switch, segment_inf,
262  det_routing_arch.global_route_switch,
263  det_routing_arch.delayless_switch, timing_inf,
264  det_routing_arch.wire_to_ipin_switch, router_opts.base_cost_type,
265  directs, num_directs, FALSE,
266  &tmp);
267 
268  end = clock();
269 #ifdef CLOCKS_PER_SEC
270  vpr_printf(TIO_MESSAGE_INFO, "Build rr_graph took %g seconds.\n", (float)(end - begin) / CLOCKS_PER_SEC);
271 #else
272  vpr_printf(TIO_MESSAGE_INFO, "Build rr_graph took %g seconds.\n", (float)(end - begin) / CLK_PER_SEC);
273 #endif
274 
275  /* Allocate and load some additional rr_graph information needed only by *
276  * the router. */
277 
279 
280  init_route_structs(router_opts.bb_factor);
281 
282  if (router_opts.router_algorithm == BREADTH_FIRST) {
283  vpr_printf(TIO_MESSAGE_INFO, "Confirming Router Algorithm: BREADTH_FIRST.\n");
284  success = try_breadth_first_route(router_opts, clb_opins_used_locally,
285  width_fac);
286  } else { /* TIMING_DRIVEN route */
287  vpr_printf(TIO_MESSAGE_INFO, "Confirming Router Algorithm: TIMING_DRIVEN.\n");
288  assert(router_opts.route_type != GLOBAL);
289  success = try_timing_driven_route(router_opts, net_delay, slacks,
290  clb_opins_used_locally,timing_inf.timing_analysis_enabled);
291  }
292 
294 
295  return (success);
296 }
297 
298 boolean feasible_routing(void) {
299 
300  /* This routine checks to see if this is a resource-feasible routing. *
301  * That is, are all rr_node capacity limitations respected? It assumes *
302  * that the occupancy arrays are up to date when it is called. */
303 
304  int inode;
305 
306  for (inode = 0; inode < num_rr_nodes; inode++) {
307  if (rr_node[inode].occ > rr_node[inode].capacity) {
308  return (FALSE);
309  }
310  }
311 
312  return (TRUE);
313 }
314 
315 void pathfinder_update_one_cost(struct s_trace *route_segment_start,
316  int add_or_sub, float pres_fac) {
317 
318  /* This routine updates the occupancy and pres_cost of the rr_nodes that are *
319  * affected by the portion of the routing of one net that starts at *
320  * route_segment_start. If route_segment_start is trace_head[inet], the *
321  * cost of all the nodes in the routing of net inet are updated. If *
322  * add_or_sub is -1 the net (or net portion) is ripped up, if it is 1 the *
323  * net is added to the routing. The size of pres_fac determines how severly *
324  * oversubscribed rr_nodes are penalized. */
325 
326  struct s_trace *tptr;
327  int inode, occ, capacity;
328 
329  tptr = route_segment_start;
330  if (tptr == NULL) /* No routing yet. */
331  return;
332 
333  for (;;) {
334  inode = tptr->index;
335 
336  occ = rr_node[inode].occ + add_or_sub;
337  capacity = rr_node[inode].capacity;
338 
339  rr_node[inode].occ = occ;
340 
341  /* pres_cost is Pn in the Pathfinder paper. I set my pres_cost according to *
342  * the overuse that would result from having ONE MORE net use this routing *
343  * node. */
344 
345  if (occ < capacity) {
346  rr_node_route_inf[inode].pres_cost = 1.;
347  } else {
348  rr_node_route_inf[inode].pres_cost = 1.
349  + (occ + 1 - capacity) * pres_fac;
350  }
351 
352  if (rr_node[inode].type == SINK) {
353  tptr = tptr->next; /* Skip next segment. */
354  if (tptr == NULL)
355  break;
356  }
357 
358  tptr = tptr->next;
359 
360  } /* End while loop -- did an entire traceback. */
361 }
362 
363 void pathfinder_update_cost(float pres_fac, float acc_fac) {
364 
365  /* This routine recomputes the pres_cost and acc_cost of each routing *
366  * resource for the pathfinder algorithm after all nets have been routed. *
367  * It updates the accumulated cost to by adding in the number of extra *
368  * signals sharing a resource right now (i.e. after each complete iteration) *
369  * times acc_fac. It also updates pres_cost, since pres_fac may have *
370  * changed. THIS ROUTINE ASSUMES THE OCCUPANCY VALUES IN RR_NODE ARE UP TO *
371  * DATE. */
372 
373  int inode, occ, capacity;
374 
375  for (inode = 0; inode < num_rr_nodes; inode++) {
376  occ = rr_node[inode].occ;
377  capacity = rr_node[inode].capacity;
378 
379  if (occ > capacity) {
380  rr_node_route_inf[inode].acc_cost += (occ - capacity) * acc_fac;
381  rr_node_route_inf[inode].pres_cost = 1.
382  + (occ + 1 - capacity) * pres_fac;
383  }
384 
385  /* If occ == capacity, we don't need to increase acc_cost, but a change *
386  * in pres_fac could have made it necessary to recompute the cost anyway. */
387 
388  else if (occ == capacity) {
389  rr_node_route_inf[inode].pres_cost = 1. + pres_fac;
390  }
391  }
392 }
393 
394 void init_route_structs(int bb_factor) {
395 
396  /* Call this before you route any nets. It frees any old traceback and *
397  * sets the list of rr_nodes touched to empty. */
398 
399  int i;
400 
401  for (i = 0; i < num_nets; i++)
402  free_traceback(i);
403 
404  load_route_bb(bb_factor);
405 
406  /* Check that things that should have been emptied after the last routing *
407  * really were. */
408 
409  if (rr_modified_head != NULL) {
410  vpr_printf(TIO_MESSAGE_ERROR, "in init_route_structs. List of modified rr nodes is not empty.\n");
411  exit(1);
412  }
413 
414  if (heap_tail != 1) {
415  vpr_printf(TIO_MESSAGE_ERROR, "in init_route_structs. Heap is not empty.\n");
416  exit(1);
417  }
418 }
419 
420 struct s_trace *
421 update_traceback(struct s_heap *hptr, int inet) {
422 
423  /* This routine adds the most recently finished wire segment to the *
424  * traceback linked list. The first connection starts with the net SOURCE *
425  * and begins at the structure pointed to by trace_head[inet]. Each *
426  * connection ends with a SINK. After each SINK, the next connection *
427  * begins (if the net has more than 2 pins). The first element after the *
428  * SINK gives the routing node on a previous piece of the routing, which is *
429  * the link from the existing net to this new piece of the net. *
430  * In each traceback I start at the end of a path and trace back through *
431  * its predecessors to the beginning. I have stored information on the *
432  * predecesser of each node to make traceback easy -- this sacrificies some *
433  * memory for easier code maintenance. This routine returns a pointer to *
434  * the first "new" node in the traceback (node not previously in trace). */
435 
436  struct s_trace *tptr, *prevptr, *temptail, *ret_ptr;
437  int inode;
438  short iedge;
439 
440 #ifdef DEBUG
441  t_rr_type rr_type;
442 #endif
443 
444  inode = hptr->index;
445 
446 #ifdef DEBUG
447  rr_type = rr_node[inode].type;
448  if (rr_type != SINK) {
449  vpr_printf(TIO_MESSAGE_ERROR, "in update_traceback. Expected type = SINK (%d).\n", SINK);
450  vpr_printf(TIO_MESSAGE_ERROR, "\tGot type = %d while tracing back net %d.\n", rr_type, inet);
451  exit(1);
452  }
453 #endif
454 
455  tptr = alloc_trace_data(); /* SINK on the end of the connection */
456  tptr->index = inode;
457  tptr->iswitch = OPEN;
458  tptr->next = NULL;
459  temptail = tptr; /* This will become the new tail at the end */
460  /* of the routine. */
461 
462  /* Now do it's predecessor. */
463 
464  inode = hptr->u.prev_node;
465  iedge = hptr->prev_edge;
466 
467  while (inode != NO_PREVIOUS) {
468  prevptr = alloc_trace_data();
469  prevptr->index = inode;
470  prevptr->iswitch = rr_node[inode].switches[iedge];
471  prevptr->next = tptr;
472  tptr = prevptr;
473 
474  iedge = rr_node_route_inf[inode].prev_edge;
475  inode = rr_node_route_inf[inode].prev_node;
476  }
477 
478  if (trace_tail[inet] != NULL) {
479  trace_tail[inet]->next = tptr; /* Traceback ends with tptr */
480  ret_ptr = tptr->next; /* First new segment. */
481  } else { /* This was the first "chunk" of the net's routing */
482  trace_head[inet] = tptr;
483  ret_ptr = tptr; /* Whole traceback is new. */
484  }
485 
486  trace_tail[inet] = temptail;
487  return (ret_ptr);
488 }
489 
490 void reset_path_costs(void) {
491 
492  /* The routine sets the path_cost to HUGE_POSITIVE_FLOAT for all channel segments *
493  * touched by previous routing phases. */
494 
495  struct s_linked_f_pointer *mod_ptr;
496 
497 #ifdef DEBUG
498  int num_mod_ptrs;
499 #endif
500 
501  /* The traversal method below is slightly painful to make it faster. */
502 
503  if (rr_modified_head != NULL) {
504  mod_ptr = rr_modified_head;
505 
506 #ifdef DEBUG
507  num_mod_ptrs = 1;
508 #endif
509 
510  while (mod_ptr->next != NULL) {
511  *(mod_ptr->fptr) = HUGE_POSITIVE_FLOAT;
512  mod_ptr = mod_ptr->next;
513 #ifdef DEBUG
514  num_mod_ptrs++;
515 #endif
516  }
517  *(mod_ptr->fptr) = HUGE_POSITIVE_FLOAT; /* Do last one. */
518 
519  /* Reset the modified list and put all the elements back in the free *
520  * list. */
521 
522  mod_ptr->next = linked_f_pointer_free_head;
523  linked_f_pointer_free_head = rr_modified_head;
524  rr_modified_head = NULL;
525 
526 #ifdef DEBUG
527  num_linked_f_pointer_allocated -= num_mod_ptrs;
528 #endif
529  }
530 }
531 
532 float get_rr_cong_cost(int inode) {
533 
534  /* Returns the *congestion* cost of using this rr_node. */
535 
536  short cost_index;
537  float cost;
538 
539  cost_index = rr_node[inode].cost_index;
540  cost = rr_indexed_data[cost_index].base_cost
541  * rr_node_route_inf[inode].acc_cost
542  * rr_node_route_inf[inode].pres_cost;
543  return (cost);
544 }
545 
546 void mark_ends(int inet) {
547 
548  /* Mark all the SINKs of this net as targets by setting their target flags *
549  * to the number of times the net must connect to each SINK. Note that *
550  * this number can occassionally be greater than 1 -- think of connecting *
551  * the same net to two inputs of an and-gate (and-gate inputs are logically *
552  * equivalent, so both will connect to the same SINK). */
553 
554  int ipin, inode;
555 
556  for (ipin = 1; ipin <= clb_net[inet].num_sinks; ipin++) {
557  inode = net_rr_terminals[inet][ipin];
558  rr_node_route_inf[inode].target_flag++;
559  }
560 }
561 
562 void node_to_heap(int inode, float cost, int prev_node, int prev_edge,
563  float backward_path_cost, float R_upstream) {
564 
565  /* Puts an rr_node on the heap, if the new cost given is lower than the *
566  * current path_cost to this channel segment. The index of its predecessor *
567  * is stored to make traceback easy. The index of the edge used to get *
568  * from its predecessor to it is also stored to make timing analysis, etc. *
569  * easy. The backward_path_cost and R_upstream values are used only by the *
570  * timing-driven router -- the breadth-first router ignores them. */
571 
572  struct s_heap *hptr;
573 
574  if (cost >= rr_node_route_inf[inode].path_cost)
575  return;
576 
577  hptr = alloc_heap_data();
578  hptr->index = inode;
579  hptr->cost = cost;
580  hptr->u.prev_node = prev_node;
581  hptr->prev_edge = prev_edge;
583  hptr->R_upstream = R_upstream;
584  add_to_heap(hptr);
585 }
586 
587 void free_traceback(int inet) {
588 
589  /* Puts the entire traceback (old routing) for this net on the free list *
590  * and sets the trace_head pointers etc. for the net to NULL. */
591 
592  struct s_trace *tptr, *tempptr;
593 
594  if(trace_head == NULL) {
595  return;
596  }
597 
598  tptr = trace_head[inet];
599 
600  while (tptr != NULL) {
601  tempptr = tptr->next;
602  free_trace_data(tptr);
603  tptr = tempptr;
604  }
605 
606  trace_head[inet] = NULL;
607  trace_tail[inet] = NULL;
608 }
609 
610 t_ivec **
612 
613  /* Allocates the data structures needed for routing. */
614 
616 
618  clb_opins_used_locally = alloc_and_load_clb_opins_used_locally();
619 
620  return (clb_opins_used_locally);
621 }
622 
624  trace_head = (struct s_trace **) my_calloc(num_nets,
625  sizeof(struct s_trace *));
626  trace_tail = (struct s_trace **) my_malloc(
627  num_nets * sizeof(struct s_trace *));
628 
629  heap_size = nx * ny;
630  heap = (struct s_heap **) my_malloc(heap_size * sizeof(struct s_heap *));
631  heap--; /* heap stores from [1..heap_size] */
632  heap_tail = 1;
633 
634  route_bb = (struct s_bb *) my_malloc(num_nets * sizeof(struct s_bb));
635 }
636 
637 struct s_trace **
639  t_ivec *** saved_clb_opins_used_locally_ptr) {
640 
641  /* Allocates data structures into which the key routing data can be saved, *
642  * allowing the routing to be recovered later (e.g. after a another routing *
643  * is attempted). */
644 
645  struct s_trace **best_routing;
646  t_ivec **saved_clb_opins_used_locally;
647  int iblk, iclass, num_local_opins;
648  t_type_ptr type;
649 
650  best_routing = (struct s_trace **) my_calloc(num_nets,
651  sizeof(struct s_trace *));
652 
653  saved_clb_opins_used_locally = (t_ivec **) my_malloc(
654  num_blocks * sizeof(t_ivec *));
655 
656  for (iblk = 0; iblk < num_blocks; iblk++) {
657  type = block[iblk].type;
658  saved_clb_opins_used_locally[iblk] = (t_ivec *) my_malloc(
659  type->num_class * sizeof(t_ivec));
660  for (iclass = 0; iclass < type->num_class; iclass++) {
661  num_local_opins = clb_opins_used_locally[iblk][iclass].nelem;
662  saved_clb_opins_used_locally[iblk][iclass].nelem = num_local_opins;
663 
664  if (num_local_opins == 0) {
665  saved_clb_opins_used_locally[iblk][iclass].list = NULL;
666  } else {
667  saved_clb_opins_used_locally[iblk][iclass].list =
668  (int *) my_malloc(num_local_opins * sizeof(int));
669  }
670  }
671  }
672 
673  *saved_clb_opins_used_locally_ptr = saved_clb_opins_used_locally;
674  return (best_routing);
675 }
676 
677 /* TODO: super hacky, jluu comment, I need to rethink this whole function, without it, logically equivalent output pins incorrectly use more pins than needed. I force that CLB output pin uses at most one output pin */
678 static t_ivec **
680 
681  /* Allocates and loads the data needed to make the router reserve some CLB *
682  * output pins for connections made locally within a CLB (if the netlist *
683  * specifies that this is necessary). */
684 
686  int iblk, clb_pin, iclass, num_local_opins;
687  int class_low, class_high;
688  t_type_ptr type;
689 
690  clb_opins_used_locally = (t_ivec **) my_malloc(
691  num_blocks * sizeof(t_ivec *));
692 
693  for (iblk = 0; iblk < num_blocks; iblk++) {
694  type = block[iblk].type;
695  get_class_range_for_block(iblk, &class_low, &class_high);
696  clb_opins_used_locally[iblk] = (t_ivec *) my_malloc(
697  type->num_class * sizeof(t_ivec));
698  for (iclass = 0; iclass < type->num_class; iclass++)
699  clb_opins_used_locally[iblk][iclass].nelem = 0;
700 
701  for (clb_pin = 0; clb_pin < type->num_pins; clb_pin++) {
702  // another hack to avoid I/Os, whole function needs a rethink
703  if(type == IO_TYPE) {
704  continue;
705  }
706 
707  if ((block[iblk].nets[clb_pin] != OPEN
708  && clb_net[block[iblk].nets[clb_pin]].num_sinks == 0) || block[iblk].nets[clb_pin] == OPEN
709  ) {
710  iclass = type->pin_class[clb_pin];
711  if(type->class_inf[iclass].type == DRIVER) {
712  /* Check to make sure class is in same range as that assigned to block */
713  assert(iclass >= class_low && iclass <= class_high);
714  clb_opins_used_locally[iblk][iclass].nelem++;
715  }
716  }
717  }
718 
719  for (iclass = 0; iclass < type->num_class; iclass++) {
720  num_local_opins = clb_opins_used_locally[iblk][iclass].nelem;
721 
722  if (num_local_opins == 0)
723  clb_opins_used_locally[iblk][iclass].list = NULL;
724  else
725  clb_opins_used_locally[iblk][iclass].list = (int *) my_malloc(
726  num_local_opins * sizeof(int));
727  }
728  }
729 
730  return (clb_opins_used_locally);
731 }
732 
733 void free_trace_structs(void) {
734  /*the trace lists are only freed after use by the timing-driven placer */
735  /*Do not free them after use by the router, since stats, and draw */
736  /*routines use the trace values */
737  int i;
738 
739  for (i = 0; i < num_nets; i++)
740  free_traceback(i);
741 
742  if(trace_head) {
743  free(trace_head);
744  free(trace_tail);
745  }
746  trace_head = NULL;
747  trace_tail = NULL;
748 }
749 
751 
752  /* Frees the temporary storage needed only during the routing. The *
753  * final routing result is not freed. */
754  if(heap != NULL) {
755  free(heap + 1);
756  }
757  if(route_bb != NULL) {
758  free(route_bb);
759  }
760 
761  heap = NULL; /* Defensive coding: crash hard if I use these. */
762  route_bb = NULL;
763 
764  /*free the memory chunks that were used by heap and linked f pointer */
765  free_chunk_memory(&heap_ch);
766  free_chunk_memory(&linked_f_pointer_ch);
767  heap_free_head = NULL;
768  linked_f_pointer_free_head = NULL;
769 }
770 
772  t_ivec ** saved_clb_opins_used_locally) {
773 
774  /* Frees the data structures needed to save a routing. */
775  int i;
776 
777  free(best_routing);
778  for (i = 0; i < num_blocks; i++) {
779  free_ivec_vector(saved_clb_opins_used_locally[i], 0,
780  block[i].type->num_class - 1);
781  }
782  free(saved_clb_opins_used_locally);
783 }
784 
786 
787  /* Allocates some extra information about each rr_node that is used only *
788  * during routing. */
789 
790  int inode;
791 
792  if (rr_node_route_inf != NULL) {
793  vpr_printf(TIO_MESSAGE_ERROR, "in alloc_and_load_rr_node_route_structs: old rr_node_route_inf array exists.\n");
794  exit(1);
795  }
796 
797  rr_node_route_inf = (t_rr_node_route_inf *) my_malloc(num_rr_nodes * sizeof(t_rr_node_route_inf));
798 
799  for (inode = 0; inode < num_rr_nodes; inode++) {
800  rr_node_route_inf[inode].prev_node = NO_PREVIOUS;
801  rr_node_route_inf[inode].prev_edge = NO_PREVIOUS;
802  rr_node_route_inf[inode].pres_cost = 1.;
803  rr_node_route_inf[inode].acc_cost = 1.;
804  rr_node_route_inf[inode].path_cost = HUGE_POSITIVE_FLOAT;
805  rr_node_route_inf[inode].target_flag = 0;
806  }
807 }
808 
810 
811  /* Allocates some extra information about each rr_node that is used only *
812  * during routing. */
813 
814  int inode;
815 
816  assert(rr_node_route_inf != NULL);
817 
818  for (inode = 0; inode < num_rr_nodes; inode++) {
819  rr_node_route_inf[inode].prev_node = NO_PREVIOUS;
820  rr_node_route_inf[inode].prev_edge = NO_PREVIOUS;
821  rr_node_route_inf[inode].pres_cost = 1.;
822  rr_node_route_inf[inode].acc_cost = 1.;
823  rr_node_route_inf[inode].path_cost = HUGE_POSITIVE_FLOAT;
824  rr_node_route_inf[inode].target_flag = 0;
825  }
826 }
827 
829 
830  /* Frees the extra information about each rr_node that is needed only *
831  * during routing. */
832 
833  free(rr_node_route_inf);
834  rr_node_route_inf = NULL; /* Mark as free */
835 }
836 
837 /* RESEARCH TODO: Bounding box heuristic needs to be redone for heterogeneous blocks */
838 static void load_route_bb(int bb_factor) {
839 
840  /* This routine loads the bounding box arrays used to limit the space *
841  * searched by the maze router when routing each net. The search is *
842  * limited to channels contained with the net bounding box expanded *
843  * by bb_factor channels on each side. For example, if bb_factor is *
844  * 0, the maze router must route each net within its bounding box. *
845  * If bb_factor = nx, the maze router will search every channel in *
846  * the FPGA if necessary. The bounding boxes returned by this routine *
847  * are different from the ones used by the placer in that they are *
848  * clipped to lie within (0,0) and (nx+1,ny+1) rather than (1,1) and *
849  * (nx,ny). */
850 
851  int k, xmax, ymax, xmin, ymin, x, y, inet;
852 
853  for (inet = 0; inet < num_nets; inet++) {
854  x = block[clb_net[inet].node_block[0]].x;
855  y =
856  block[clb_net[inet].node_block[0]].y
858 
859  xmin = x;
860  ymin = y;
861  xmax = x;
862  ymax = y;
863 
864  for (k = 1; k <= clb_net[inet].num_sinks; k++) {
865  x = block[clb_net[inet].node_block[k]].x;
866  y =
867  block[clb_net[inet].node_block[k]].y
869 
870  if (x < xmin) {
871  xmin = x;
872  } else if (x > xmax) {
873  xmax = x;
874  }
875 
876  if (y < ymin) {
877  ymin = y;
878  } else if (y > ymax) {
879  ymax = y;
880  }
881  }
882 
883  /* Want the channels on all 4 sides to be usuable, even if bb_factor = 0. */
884 
885  xmin -= 1;
886  ymin -= 1;
887 
888  /* Expand the net bounding box by bb_factor, then clip to the physical *
889  * chip area. */
890 
891  route_bb[inet].xmin = std::max(xmin - bb_factor, 0);
892  route_bb[inet].xmax = std::min(xmax + bb_factor, nx + 1);
893  route_bb[inet].ymin = std::max(ymin - bb_factor, 0);
894  route_bb[inet].ymax = std::min(ymax + bb_factor, ny + 1);
895  }
896 }
897 
898 void add_to_mod_list(float *fptr) {
899 
900  /* This routine adds the floating point pointer (fptr) into a *
901  * linked list that indicates all the pathcosts that have been *
902  * modified thus far. */
903 
904  struct s_linked_f_pointer *mod_ptr;
905 
906  mod_ptr = alloc_linked_f_pointer();
907 
908  /* Add this element to the start of the modified list. */
909 
910  mod_ptr->next = rr_modified_head;
911  mod_ptr->fptr = fptr;
912  rr_modified_head = mod_ptr;
913 }
914 
915 static void add_to_heap(struct s_heap *hptr) {
916 
917  /* Adds an item to the heap, expanding the heap if necessary. */
918 
919  int ito, ifrom;
920  struct s_heap *temp_ptr;
921 
922  if (heap_tail > heap_size) { /* Heap is full */
923  heap_size *= 2;
924  heap = (struct s_heap **) my_realloc((void *) (heap + 1),
925  heap_size * sizeof(struct s_heap *));
926  heap--; /* heap goes from [1..heap_size] */
927  }
928 
929  heap[heap_tail] = hptr;
930  ifrom = heap_tail;
931  ito = ifrom / 2;
932  heap_tail++;
933 
934  while ((ito >= 1) && (heap[ifrom]->cost < heap[ito]->cost)) {
935  temp_ptr = heap[ito];
936  heap[ito] = heap[ifrom];
937  heap[ifrom] = temp_ptr;
938  ifrom = ito;
939  ito = ifrom / 2;
940  }
941 }
942 
943 /*WMF: peeking accessor :) */
944 boolean is_empty_heap(void) {
945  return (boolean)(heap_tail == 1);
946 }
947 
948 struct s_heap *
950 
951  /* Returns a pointer to the smallest element on the heap, or NULL if the *
952  * heap is empty. Invalid (index == OPEN) entries on the heap are never *
953  * returned -- they are just skipped over. */
954 
955  int ito, ifrom;
956  struct s_heap *heap_head, *temp_ptr;
957 
958  do {
959  if (heap_tail == 1) { /* Empty heap. */
960  vpr_printf(TIO_MESSAGE_WARNING, "Empty heap occurred in get_heap_head.\n");
961  vpr_printf(TIO_MESSAGE_WARNING, "Some blocks are impossible to connect in this architecture.\n");
962  return (NULL);
963  }
964 
965  heap_head = heap[1]; /* Smallest element. */
966 
967  /* Now fix up the heap */
968 
969  heap_tail--;
970  heap[1] = heap[heap_tail];
971  ifrom = 1;
972  ito = 2 * ifrom;
973 
974  while (ito < heap_tail) {
975  if (heap[ito + 1]->cost < heap[ito]->cost)
976  ito++;
977  if (heap[ito]->cost > heap[ifrom]->cost)
978  break;
979  temp_ptr = heap[ito];
980  heap[ito] = heap[ifrom];
981  heap[ifrom] = temp_ptr;
982  ifrom = ito;
983  ito = 2 * ifrom;
984  }
985 
986  } while (heap_head->index == OPEN); /* Get another one if invalid entry. */
987 
988  return (heap_head);
989 }
990 
991 void empty_heap(void) {
992 
993  int i;
994 
995  for (i = 1; i < heap_tail; i++)
996  free_heap_data(heap[i]);
997 
998  heap_tail = 1;
999 }
1000 
1001 static struct s_heap *
1003 
1004  struct s_heap *temp_ptr;
1005 
1006  if (heap_free_head == NULL) { /* No elements on the free list */
1007  heap_free_head = (struct s_heap *) my_chunk_malloc(sizeof(struct s_heap),&heap_ch);
1008  heap_free_head->u.next = NULL;
1009  }
1010 
1011  temp_ptr = heap_free_head;
1012  heap_free_head = heap_free_head->u.next;
1013 #ifdef DEBUG
1014  num_heap_allocated++;
1015 #endif
1016  return (temp_ptr);
1017 }
1018 
1019 void free_heap_data(struct s_heap *hptr) {
1020 
1021  hptr->u.next = heap_free_head;
1022  heap_free_head = hptr;
1023 #ifdef DEBUG
1024  num_heap_allocated--;
1025 #endif
1026 }
1027 
1028 void invalidate_heap_entries(int sink_node, int ipin_node) {
1029 
1030  /* Marks all the heap entries consisting of sink_node, where it was reached *
1031  * via ipin_node, as invalid (OPEN). Used only by the breadth_first router *
1032  * and even then only in rare circumstances. */
1033 
1034  int i;
1035 
1036  for (i = 1; i < heap_tail; i++) {
1037  if (heap[i]->index == sink_node && heap[i]->u.prev_node == ipin_node)
1038  heap[i]->index = OPEN; /* Invalid. */
1039  }
1040 }
1041 
1042 static struct s_trace *
1044 
1045  struct s_trace *temp_ptr;
1046 
1047  if (trace_free_head == NULL) { /* No elements on the free list */
1048  trace_free_head = (struct s_trace *) my_chunk_malloc(sizeof(struct s_trace),&trace_ch);
1049  trace_free_head->next = NULL;
1050  }
1051  temp_ptr = trace_free_head;
1052  trace_free_head = trace_free_head->next;
1053 #ifdef DEBUG
1054  num_trace_allocated++;
1055 #endif
1056  return (temp_ptr);
1057 }
1058 
1059 static void free_trace_data(struct s_trace *tptr) {
1060 
1061  /* Puts the traceback structure pointed to by tptr on the free list. */
1062 
1063  tptr->next = trace_free_head;
1064  trace_free_head = tptr;
1065 #ifdef DEBUG
1066  num_trace_allocated--;
1067 #endif
1068 }
1069 
1070 static struct s_linked_f_pointer *
1072 
1073  /* This routine returns a linked list element with a float pointer as *
1074  * the node data. */
1075 
1076  /*int i;*/
1077  struct s_linked_f_pointer *temp_ptr;
1078 
1079  if (linked_f_pointer_free_head == NULL) {
1080  /* No elements on the free list */
1081  linked_f_pointer_free_head = (struct s_linked_f_pointer *) my_chunk_malloc(sizeof(struct s_linked_f_pointer),&linked_f_pointer_ch);
1082  linked_f_pointer_free_head->next = NULL;
1083  }
1084 
1085  temp_ptr = linked_f_pointer_free_head;
1086  linked_f_pointer_free_head = linked_f_pointer_free_head->next;
1087 
1088 #ifdef DEBUG
1089  num_linked_f_pointer_allocated++;
1090 #endif
1091 
1092  return (temp_ptr);
1093 }
1094 
1095 void print_route(char *route_file) {
1096 
1097  /* Prints out the routing to file route_file. */
1098 
1099  int inet, inode, ipin, bnum, ilow, jlow, node_block_pin, iclass;
1100  t_rr_type rr_type;
1101  struct s_trace *tptr;
1102  const char *name_type[] = { "SOURCE", "SINK", "IPIN", "OPIN", "CHANX", "CHANY",
1103  "INTRA_CLUSTER_EDGE" };
1104  FILE *fp;
1105 
1106  fp = fopen(route_file, "w");
1107 
1108  fprintf(fp, "Array size: %d x %d logic blocks.\n", nx, ny);
1109  fprintf(fp, "\nRouting:");
1110  for (inet = 0; inet < num_nets; inet++) {
1111  if (clb_net[inet].is_global == FALSE) {
1112  if (clb_net[inet].num_sinks == FALSE) {
1113  fprintf(fp, "\n\nNet %d (%s)\n\n", inet, clb_net[inet].name);
1114  fprintf(fp, "\n\nUsed in local cluster only, reserved one CLB pin\n\n");
1115  } else {
1116  fprintf(fp, "\n\nNet %d (%s)\n\n", inet, clb_net[inet].name);
1117  tptr = trace_head[inet];
1118 
1119  while (tptr != NULL) {
1120  inode = tptr->index;
1121  rr_type = rr_node[inode].type;
1122  ilow = rr_node[inode].xlow;
1123  jlow = rr_node[inode].ylow;
1124 
1125  fprintf(fp, "Node:\t%d\t%6s (%d,%d) ", inode, name_type[rr_type], ilow, jlow);
1126 
1127  if ((ilow != rr_node[inode].xhigh)
1128  || (jlow != rr_node[inode].yhigh))
1129  fprintf(fp, "to (%d,%d) ", rr_node[inode].xhigh,
1130  rr_node[inode].yhigh);
1131 
1132  switch (rr_type) {
1133 
1134  case IPIN:
1135  case OPIN:
1136  if (grid[ilow][jlow].type == IO_TYPE) {
1137  fprintf(fp, " Pad: ");
1138  } else { /* IO Pad. */
1139  fprintf(fp, " Pin: ");
1140  }
1141  break;
1142 
1143  case CHANX:
1144  case CHANY:
1145  fprintf(fp, " Track: ");
1146  break;
1147 
1148  case SOURCE:
1149  case SINK:
1150  if (grid[ilow][jlow].type == IO_TYPE) {
1151  fprintf(fp, " Pad: ");
1152  } else { /* IO Pad. */
1153  fprintf(fp, " Class: ");
1154  }
1155  break;
1156 
1157  default:
1158  vpr_printf(TIO_MESSAGE_ERROR, "in print_route: Unexpected traceback element type: %d (%s).\n",
1159  rr_type, name_type[rr_type]);
1160  exit(1);
1161  break;
1162  }
1163 
1164  fprintf(fp, "%d ", rr_node[inode].ptc_num);
1165 
1166  /* Uncomment line below if you're debugging and want to see the switch types *
1167  * used in the routing. */
1168  /* fprintf (fp, "Switch: %d", tptr->iswitch); */
1169 
1170  fprintf(fp, "\n");
1171 
1172  tptr = tptr->next;
1173  }
1174  }
1175  }
1176 
1177  else { /* Global net. Never routed. */
1178  fprintf(fp, "\n\nNet %d (%s): global net connecting:\n\n", inet,
1179  clb_net[inet].name);
1180 
1181  for (ipin = 0; ipin <= clb_net[inet].num_sinks; ipin++) {
1182  bnum = clb_net[inet].node_block[ipin];
1183 
1184  node_block_pin = clb_net[inet].node_block_pin[ipin];
1185  iclass = block[bnum].type->pin_class[node_block_pin];
1186 
1187  fprintf(fp, "Block %s (#%d) at (%d, %d), Pin class %d.\n",
1188  block[bnum].name, bnum, block[bnum].x, block[bnum].y,
1189  iclass);
1190  }
1191  }
1192  }
1193 
1194  fclose(fp);
1195 
1197  fp = my_fopen(getEchoFileName(E_ECHO_MEM), "w", 0);
1198  fprintf(fp, "\nNum_heap_allocated: %d Num_trace_allocated: %d\n",
1199  num_heap_allocated, num_trace_allocated);
1200  fprintf(fp, "Num_linked_f_pointer_allocated: %d\n",
1201  num_linked_f_pointer_allocated);
1202  fclose(fp);
1203  }
1204 
1205 }
1206 
1207 /* TODO: check if this is still necessary for speed */
1208 void reserve_locally_used_opins(float pres_fac, boolean rip_up_local_opins,
1210 
1211  /* In the past, this function implicitly allowed LUT duplication when there are free LUTs.
1212  This was especially important for logical equivalence; however, now that we have a very general
1213  logic cluster, it does not make sense to allow LUT duplication implicitly. we'll need to look into how we want to handle this case
1214 
1215  */
1216 
1217  int iblk, num_local_opin, inode, from_node, iconn, num_edges, to_node;
1218  int iclass, ipin;
1219  float cost;
1220  struct s_heap *heap_head_ptr;
1221  t_type_ptr type;
1222 
1223  if (rip_up_local_opins) {
1224  for (iblk = 0; iblk < num_blocks; iblk++) {
1225  type = block[iblk].type;
1226  for (iclass = 0; iclass < type->num_class; iclass++) {
1227  num_local_opin = clb_opins_used_locally[iblk][iclass].nelem;
1228  /* Always 0 for pads and for RECEIVER (IPIN) classes */
1229  for (ipin = 0; ipin < num_local_opin; ipin++) {
1230  inode = clb_opins_used_locally[iblk][iclass].list[ipin];
1231  adjust_one_rr_occ_and_pcost(inode, -1, pres_fac);
1232  }
1233  }
1234  }
1235  }
1236 
1237  for (iblk = 0; iblk < num_blocks; iblk++) {
1238  type = block[iblk].type;
1239  for (iclass = 0; iclass < type->num_class; iclass++) {
1240  num_local_opin = clb_opins_used_locally[iblk][iclass].nelem;
1241  /* Always 0 for pads and for RECEIVER (IPIN) classes */
1242 
1243  if (num_local_opin != 0) { /* Have to reserve (use) some OPINs */
1244  from_node = rr_blk_source[iblk][iclass];
1245  num_edges = rr_node[from_node].num_edges;
1246  for (iconn = 0; iconn < num_edges; iconn++) {
1247  to_node = rr_node[from_node].edges[iconn];
1248  cost = get_rr_cong_cost(to_node);
1249  node_to_heap(to_node, cost, OPEN, OPEN, 0., 0.);
1250  }
1251 
1252  for (ipin = 0; ipin < num_local_opin; ipin++) {
1253  heap_head_ptr = get_heap_head();
1254  inode = heap_head_ptr->index;
1255  adjust_one_rr_occ_and_pcost(inode, 1, pres_fac);
1256  clb_opins_used_locally[iblk][iclass].list[ipin] = inode;
1257  free_heap_data(heap_head_ptr);
1258  }
1259 
1260  empty_heap();
1261  }
1262  }
1263  }
1264 }
1265 
1266 static void adjust_one_rr_occ_and_pcost(int inode, int add_or_sub,
1267  float pres_fac) {
1268 
1269  /* Increments or decrements (depending on add_or_sub) the occupancy of *
1270  * one rr_node, and adjusts the present cost of that node appropriately. */
1271 
1272  int occ, capacity;
1273 
1274  occ = rr_node[inode].occ + add_or_sub;
1275  capacity = rr_node[inode].capacity;
1276  rr_node[inode].occ = occ;
1277 
1278  if (occ < capacity) {
1279  rr_node_route_inf[inode].pres_cost = 1.;
1280  } else {
1281  rr_node_route_inf[inode].pres_cost = 1.
1282  + (occ + 1 - capacity) * pres_fac;
1283  }
1284 }
1285 
1286 
1288  if (trace_ch.chunk_ptr_head != NULL) {
1289  free_chunk_memory(&trace_ch);
1290  }
1291 }
1292 
1293 
int * node_block_pin
Definition: vpr_types.h:509
boolean feasible_routing(void)
Definition: route_common.c:298
static void load_route_bb(int bb_factor)
Definition: route_common.c:838
Definition: util.h:57
short num_edges
Definition: vpr_types.h:901
int index
Definition: route_common.h:4
static void free_trace_data(struct s_trace *tptr)
int index
Definition: vpr_types.h:866
float get_rr_cong_cost(int inode)
Definition: route_common.c:532
short cost_index
Definition: vpr_types.h:897
FILE * my_fopen(const char *fname, const char *flag, int prompt)
Definition: util.c:54
int * edges
Definition: vpr_types.h:903
enum e_router_algorithm router_algorithm
Definition: vpr_types.h:705
void free_rr_graph(void)
Definition: rr_graph.c:798
float backward_path_cost
Definition: route_common.h:11
struct s_trace ** trace_tail
Definition: globals.c:65
struct s_heap * get_heap_head(void)
Definition: route_common.c:949
t_rr_node * rr_node
Definition: globals.c:70
void free_traceback(int inet)
Definition: route_common.c:587
int prev_node
Definition: route_common.h:7
void mark_ends(int inet)
Definition: route_common.c:546
t_rr_node_route_inf * rr_node_route_inf
Definition: route_common.c:21
short ptc_num
Definition: vpr_types.h:895
void get_class_range_for_block(INP int iblk, OUTP int *class_low, OUTP int *class_high)
Definition: vpr_utils.c:162
struct s_class * class_inf
static struct s_linked_f_pointer * rr_modified_head
Definition: route_common.c:49
void save_routing(struct s_trace **best_routing, t_ivec **clb_opins_used_locally, t_ivec **saved_clb_opins_used_locally)
Definition: route_common.c:99
short iswitch
Definition: vpr_types.h:867
int xmax
Definition: vpr_types.h:533
t_rr_indexed_data * rr_indexed_data
Definition: globals.c:74
short delayless_switch
Definition: vpr_types.h:764
short global_route_switch
Definition: vpr_types.h:763
int ymin
Definition: vpr_types.h:534
static t_chunk heap_ch
Definition: route_common.c:36
void invalidate_heap_entries(int sink_node, int ipin_node)
#define NO_PREVIOUS
Definition: vpr_types.h:887
short ylow
Definition: vpr_types.h:892
int * list
Definition: util.h:49
void free_heap_data(struct s_heap *hptr)
void free_chunk_memory_trace(void)
static float ** net_delay
int x
Definition: vpr_types.h:563
enum e_graph_type t_graph_type
Definition: rr_graph.h:11
int * chan_width_x
Definition: globals.c:56
void * my_chunk_malloc(size_t size, t_chunk *chunk_info)
Definition: util.c:184
boolean try_route(int width_fac, struct s_router_opts router_opts, struct s_det_routing_arch det_routing_arch, t_segment_inf *segment_inf, t_timing_inf timing_inf, float **net_delay, t_slack *slacks, t_chan_width_dist chan_width_dist, t_ivec **clb_opins_used_locally, boolean *Fc_clipped, t_direct_inf *directs, int num_directs)
Definition: route_common.c:220
boolean timing_analysis_enabled
void free_saved_routing(struct s_trace **best_routing, t_ivec **saved_clb_opins_used_locally)
Definition: route_common.c:771
void * my_calloc(size_t nelem, size_t size)
Definition: util.c:132
void free_trace_structs(void)
Definition: route_common.c:733
struct s_heap * next
Definition: route_common.h:8
void pathfinder_update_cost(float pres_fac, float acc_fac)
Definition: route_common.c:363
void get_serial_num(void)
Definition: route_common.c:188
static struct s_heap * alloc_heap_data(void)
int num_nets
Definition: globals.c:27
int * node_block
Definition: vpr_types.h:507
float R_upstream
Definition: route_common.h:12
static void add_to_heap(struct s_heap *hptr)
Definition: route_common.c:915
struct s_linked_vptr * chunk_ptr_head
Definition: util.h:58
boolean try_breadth_first_route(struct s_router_opts router_opts, t_ivec **clb_opins_used_locally, int width_fac)
t_type_ptr type
Definition: vpr_types.h:561
static struct s_linked_f_pointer * alloc_linked_f_pointer(void)
int num_blocks
Definition: globals.c:30
boolean getEchoEnabled(void)
Definition: ReadOptions.c:67
static const char * name_type[]
Definition: draw.c:87
Definition: util.h:12
void reset_path_costs(void)
Definition: route_common.c:490
static struct s_heap ** heap
Definition: route_common.c:29
static float pres_fac
int y
Definition: vpr_types.h:564
#define min(a, b)
Definition: graphics.c:174
static t_ivec ** clb_opins_used_locally
union s_heap::@0 u
#define HUGE_POSITIVE_FLOAT
Definition: vpr_types.h:79
static struct s_linked_f_pointer * linked_f_pointer_free_head
Definition: route_common.c:50
struct s_trace * next
Definition: vpr_types.h:870
static void * my_malloc(int ibytes)
Definition: graphics.c:499
boolean * is_global
void alloc_and_load_rr_node_route_structs(void)
Definition: route_common.c:785
void init_route_structs(int bb_factor)
Definition: route_common.c:394
enum e_directionality directionality
Definition: vpr_types.h:758
int prev_edge
Definition: route_common.h:10
#define max(a, b)
Definition: graphics.c:171
void build_rr_graph(INP t_graph_type graph_type, INP int L_num_types, INP t_type_ptr types, INP int L_nx, INP int L_ny, INP struct s_grid_tile **L_grid, INP int chan_width, INP struct s_chan_width_dist *chan_capacity_inf, INP enum e_switch_block_type sb_type, INP int Fs, INP int num_seg_types, INP int num_switches, INP t_segment_inf *segment_inf, INP int global_route_switch, INP int delayless_switch, INP t_timing_inf timing_inf, INP int wire_to_ipin_switch, INP enum e_base_cost_type base_cost_type, INP t_direct_inf *directs, INP int num_directs, INP boolean ignore_Fc_0, OUTP int *Warnings)
Definition: rr_graph.c:192
struct s_block * block
Definition: globals.c:31
struct s_net * clb_net
Definition: globals.c:28
int nelem
Definition: util.h:48
int num_rr_nodes
Definition: globals.c:69
int nx
Definition: globals.c:46
void init_chan(int cfactor, t_chan_width_dist chan_width_dist)
void free_ivec_vector(struct s_ivec *ivec_vector, int nrmin, int nrmax)
Definition: util.c:498
t_ivec ** alloc_route_structs(void)
Definition: route_common.c:611
struct s_trace ** trace_head
Definition: globals.c:64
void print_route(char *route_file)
boolean isEchoFileEnabled(enum e_echo_files echo_option)
Definition: ReadOptions.c:115
short capacity
Definition: vpr_types.h:899
void restore_routing(struct s_trace **best_routing, t_ivec **clb_opins_used_locally, t_ivec **saved_clb_opins_used_locally)
Definition: route_common.c:149
enum e_route_type route_type
Definition: vpr_types.h:703
float cost
Definition: route_common.h:5
void node_to_heap(int inode, float cost, int prev_node, int prev_edge, float backward_path_cost, float R_upstream)
Definition: route_common.c:562
struct s_grid_tile ** grid
Definition: globals.c:59
boolean is_empty_heap(void)
Definition: route_common.c:944
void alloc_route_static_structs(void)
Definition: route_common.c:623
struct s_linked_f_pointer * next
Definition: vpr_types.h:842
Definition: util.h:47
static t_ivec ** alloc_and_load_clb_opins_used_locally(void)
Definition: route_common.c:679
static struct s_heap * heap_free_head
Definition: route_common.c:34
void pathfinder_update_one_cost(struct s_trace *route_segment_start, int add_or_sub, float pres_fac)
Definition: route_common.c:315
short yhigh
Definition: vpr_types.h:893
static t_chunk trace_ch
Definition: route_common.c:41
void reset_rr_node_route_structs(void)
Definition: route_common.c:809
struct s_trace * update_traceback(struct s_heap *hptr, int inet)
Definition: route_common.c:421
enum e_rr_type t_rr_type
struct s_trace ** alloc_saved_routing(t_ivec **clb_opins_used_locally, t_ivec ***saved_clb_opins_used_locally_ptr)
Definition: route_common.c:638
enum e_switch_block_type switch_block_type
Definition: vpr_types.h:760
int num_types
Definition: globals.c:37
short * switches
Definition: vpr_types.h:904
static void * my_realloc(void *memblk, int ibytes)
Definition: graphics.c:512
t_type_ptr IO_TYPE
Definition: globals.c:40
static void adjust_one_rr_occ_and_pcost(int inode, int add_or_sub, float pres_fac)
int ** rr_blk_source
Definition: globals.c:87
void add_to_mod_list(float *fptr)
Definition: route_common.c:898
short xlow
Definition: vpr_types.h:890
Definition: slre.c:50
int * nets
Definition: vpr_types.h:562
static struct s_trace * alloc_trace_data(void)
void reserve_locally_used_opins(float pres_fac, boolean rip_up_local_opins, t_ivec **clb_opins_used_locally)
void free_rr_node_route_structs(void)
Definition: route_common.c:828
int ** net_rr_terminals
Definition: globals.c:78
struct s_type_descriptor * type_descriptors
Definition: globals.c:38
char * getEchoFileName(enum e_echo_files echo_option)
Definition: ReadOptions.c:122
enum e_pin_type type
static t_chunk linked_f_pointer_ch
Definition: route_common.c:52
struct s_bb * route_bb
Definition: route_common.c:23
void empty_heap(void)
Definition: route_common.c:991
static int heap_tail
Definition: route_common.c:31
void free_chunk_memory(t_chunk *chunk_info)
Definition: util.c:270
int xmin
Definition: vpr_types.h:532
int ny
Definition: globals.c:47
void free_route_structs()
Definition: route_common.c:750
messagelogger vpr_printf
Definition: util.c:17
int num_sinks
Definition: vpr_types.h:506
short wire_to_ipin_switch
Definition: vpr_types.h:765
boolean try_timing_driven_route(struct s_router_opts router_opts, float **net_delay, t_slack *slacks, t_ivec **clb_opins_used_locally, boolean timing_analysis_enabled)
Definition: route_timing.c:44
enum e_base_cost_type base_cost_type
Definition: vpr_types.h:706
static struct s_trace * trace_free_head
Definition: route_common.c:39
short occ
Definition: vpr_types.h:898
t_rr_type type
Definition: vpr_types.h:902
Definition: util.h:12
static int heap_size
Definition: route_common.c:30
static struct s_trace ** best_routing
int ymax
Definition: vpr_types.h:535