source: vis_dev/glu-2.3/src/cmuPort/cmuPortIter.c @ 26

Last change on this file since 26 was 13, checked in by cecile, 13 years ago

library glu 2.3

File size: 21.1 KB
Line 
1/**CFile***********************************************************************
2
3  FileName    [cmuPort.c]
4
5  PackageName [cmu_port]
6
7  Synopsis    [Port routines for CMU package.]
8
9  Description [optional]
10
11  SeeAlso     [optional]
12
13  Author      [Thomas R. Shiple. Some changes by Rajeev K. Ranjan.]
14
15  Copyright   [Copyright (c) 1994-1996 The Regents of the Univ. of California.
16  All rights reserved.
17
18  Permission is hereby granted, without written agreement and without license
19  or royalty fees, to use, copy, modify, and distribute this software and its
20  documentation for any purpose, provided that the above copyright notice and
21  the following two paragraphs appear in all copies of this software.
22
23  IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
24  DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
25  OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF
26  CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28  THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
29  INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
30  FITNESS FOR A PARTICULAR PURPOSE.  THE SOFTWARE PROVIDED HEREUNDER IS ON AN
31  "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO PROVIDE
32  MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.]
33
34  Revision    [$Id: cmuPortIter.c,v 1.4 2005/04/15 23:24:39 fabio Exp $]
35
36******************************************************************************/
37#include "cmuPortInt.h"
38
39/*---------------------------------------------------------------------------*/
40/* Constant declarations                                                     */
41/*---------------------------------------------------------------------------*/
42
43
44/*---------------------------------------------------------------------------*/
45/* Type declarations                                                         */
46/*---------------------------------------------------------------------------*/
47/*
48 * Traversal of BDD Formulas
49 */
50
51typedef enum {
52    bdd_gen_cubes,
53    bdd_gen_nodes
54} bdd_gen_type;
55
56
57
58/*---------------------------------------------------------------------------*/
59/* Structure declarations                                                    */
60/*---------------------------------------------------------------------------*/
61typedef struct {
62  cmu_bdd_manager manager;
63    bdd_gen_status status;
64    bdd_gen_type type;
65    union {
66        struct {
67            array_t *cube;      /* of bdd_literal */
68            /* ... expansion ... */
69        } cubes;
70        struct {
71            st_table *visited;  /* of bdd_node* */
72            /* ... expansion ... */
73        } nodes;
74    } gen;     
75    struct {
76        int sp;
77        bdd_node **stack;
78    } stack;
79    bdd_node *node;
80} cmu_bdd_gen;
81
82
83/*---------------------------------------------------------------------------*/
84/* Variable declarations                                                     */
85/*---------------------------------------------------------------------------*/
86
87
88/*---------------------------------------------------------------------------*/
89/* Macro declarations                                                        */
90/*---------------------------------------------------------------------------*/
91
92
93/**AutomaticStart*************************************************************/
94
95/*---------------------------------------------------------------------------*/
96/* Static function prototypes                                                */
97/*---------------------------------------------------------------------------*/
98static void pop_cube_stack(cmu_bdd_gen *gen);
99static void pop_node_stack(cmu_bdd_gen *gen);
100static void push_cube_stack(bdd_node *f, cmu_bdd_gen *gen);
101static void push_node_stack(bdd_node *f, cmu_bdd_gen *gen);
102
103/**AutomaticEnd***************************************************************/
104
105
106/*---------------------------------------------------------------------------*/
107/* Definition of exported functions                                          */
108/*---------------------------------------------------------------------------*/
109bdd_gen_status
110bdd_gen_read_status(bdd_gen *gen)
111{
112  return ((cmu_bdd_gen *)gen)->status;
113}
114
115/*
116 *    Defines an iterator on the onset of a BDD.  Two routines are
117 *    provided: bdd_first_cube, which extracts one cube from a BDD and
118 *    returns a bdd_gen structure containing the information necessary to
119 *    continue the enumeration; and bdd_next_cube, which returns 1 if another cube was
120 *    found, and 0 otherwise. A cube is represented
121 *    as an array of bdd_literal (which are integers in {0, 1, 2}), where 0 represents
122 *    negated literal, 1 for literal, and 2 for don't care.  Returns a disjoint
123 *    cover.  A third routine is there to clean up.
124 */
125
126/*
127 *    bdd_first_cube - return the first cube of the function.
128 *    A generator is returned that will iterate over the rest.
129 *    Return the generator.
130 */
131bdd_gen *
132bdd_first_cube(bdd_t *fn, array_t **cube /* of bdd_literal */)
133{
134    struct bdd_manager_ *manager;
135    cmu_bdd_gen *gen;
136    int i;
137    long num_vars;
138    bdd_node *f;
139
140    if (fn == NIL(bdd_t)) {
141        cmu_bdd_fatal("bdd_first_cube: invalid BDD");
142    }
143
144    manager = fn->mgr;
145
146    /*
147     *    Allocate a new generator structure and fill it in; the stack and the
148     *    cube will be used, but the visited table and the node will not be used.
149     */
150    gen = ALLOC(cmu_bdd_gen, 1);
151    if (gen == NIL(cmu_bdd_gen)) {
152        cmu_bdd_fatal("bdd_first_cube: failed on memory allocation, location 1");
153    }
154
155    /*
156     *    first - init all the members to a rational value for cube iteration
157     */
158    gen->manager = manager;
159    gen->status = bdd_EMPTY;
160    gen->type = bdd_gen_cubes;
161    gen->gen.cubes.cube = NIL(array_t);
162    gen->stack.sp = 0;
163    gen->stack.stack = NIL(bdd_node *);
164    gen->node = NIL(bdd_node);
165
166    num_vars = cmu_bdd_vars(manager);
167    gen->gen.cubes.cube = array_alloc(bdd_literal, num_vars);
168    if (gen->gen.cubes.cube == NIL(array_t)) {
169        cmu_bdd_fatal("bdd_first_cube: failed on memory allocation, location 2");
170    }
171   
172    /*
173     * Initialize each literal to 2 (don't care).
174     */
175    for (i = 0; i < num_vars; i++) {
176        array_insert(bdd_literal, gen->gen.cubes.cube, i, 2);
177    }
178
179    /*
180     * The stack size will never exceed the number of variables in the BDD, since
181     * the longest possible path from root to constant 1 is the number of variables
182     * in the BDD.
183     */
184    gen->stack.sp = 0;
185    gen->stack.stack = ALLOC(bdd_node *, num_vars);
186    if (gen->stack.stack == NIL(bdd_node *)) {
187        cmu_bdd_fatal("bdd_first_cube: failed on memory allocation, location 3");
188    }
189    /*
190     * Clear out the stack so that in bdd_gen_free, we can decrement the ref count
191     * of those nodes still on the stack.
192     */
193    for (i = 0; i < num_vars; i++) {
194        gen->stack.stack[i] = NIL(bdd_node);
195    }
196
197    if (bdd_is_tautology(fn, 0)) {
198        /*
199         *    All done, for this was but the zero constant ...
200         *    We are enumerating the onset, (which is vacuous).
201         *    gen->status initialized to bdd_EMPTY above, so this
202         *    appears to be redundant.
203         */
204        gen->status = bdd_EMPTY;
205    } else {
206        /*
207         *    Get to work enumerating the onset.  Get the first cube.  Note that
208         *    if fn is just the constant 1, push_cube_stack will properly handle this.
209         *    Get a new pointer to fn->node beforehand: this increments
210         *    the reference count of fn->node; this is necessary, because when fn->node
211         *    is popped from the stack at the very end, it's ref count is decremented.
212         */
213        gen->status = bdd_NONEMPTY;
214        f = cmu_bdd_identity(manager, fn->node);
215        push_cube_stack(f, gen);
216    }
217
218    *cube = gen->gen.cubes.cube;
219    return (bdd_gen *)(gen);
220}
221
222/*
223 *    bdd_next_cube - get the next cube on the generator.
224 *    Returns {TRUE, FALSE} when {more, no more}.
225 */
226boolean
227bdd_next_cube(bdd_gen *gen_, array_t **cube /* of bdd_literal */)
228{
229  cmu_bdd_gen *gen = (cmu_bdd_gen *) gen_;
230  pop_cube_stack(gen);
231  if (gen->status == bdd_EMPTY) {
232    return (FALSE);
233  }
234  *cube = gen->gen.cubes.cube;
235  return (TRUE);
236}
237
238bdd_gen *
239bdd_first_disjoint_cube(bdd_t *fn,array_t **cube)
240{
241  return(bdd_first_cube(fn,cube));
242}
243
244boolean
245bdd_next_disjoint_cube(bdd_gen *gen_, array_t **cube)
246{
247  return(bdd_next_cube(gen_,cube));
248}
249
250/*
251 *    bdd_first_node - enumerates all bdd_node * in fn.
252 *    Return the generator.
253 */
254bdd_gen *
255bdd_first_node(bdd_t *fn, bdd_node **node /* return */)
256{
257    struct bdd_manager_ *manager;
258    cmu_bdd_gen *gen;
259    long num_vars;
260    bdd_node *f;
261    int i;
262
263    if (fn == NIL(bdd_t)) {
264        cmu_bdd_fatal("bdd_first_node: invalid BDD");
265    }
266
267    manager = fn->mgr;
268
269    /*
270     *    Allocate a new generator structure and fill it in; the
271     *    visited table will be used, as will the stack, but the
272     *    cube array will not be used.
273     */
274    gen = ALLOC(cmu_bdd_gen, 1);
275    if (gen == NIL(bdd_gen)) {
276        cmu_bdd_fatal("bdd_first_node: failed on memory allocation, location 1");
277    }
278
279    /*
280     *    first - init all the members to a rational value for node iteration.
281     */
282    gen->manager = manager;
283    gen->status = bdd_NONEMPTY;
284    gen->type = bdd_gen_nodes;
285    gen->gen.nodes.visited = NIL(st_table);
286    gen->stack.sp = 0;
287    gen->stack.stack = NIL(bdd_node *);
288    gen->node = NIL(bdd_node);
289 
290    /*
291     * Set up the hash table for visited nodes.  Every time we visit a node,
292     * we insert it into the table.
293     */
294    gen->gen.nodes.visited = st_init_table(st_ptrcmp, st_ptrhash);
295    if (gen->gen.nodes.visited == NIL(st_table)) {
296        cmu_bdd_fatal("bdd_first_node: failed on memory allocation, location 2");
297    }
298
299    /*
300     * The stack size will never exceed the number of variables in the BDD, since
301     * the longest possible path from root to constant 1 is the number of variables
302     * in the BDD.
303     */
304    gen->stack.sp = 0;
305    num_vars = cmu_bdd_vars(manager);
306    gen->stack.stack = ALLOC(bdd_node *, num_vars);
307    if (gen->stack.stack == NIL(bdd_node *)) {
308        cmu_bdd_fatal("bdd_first_node: failed on memory allocation, location 3");
309    }
310    /*
311     * Clear out the stack so that in bdd_gen_free, we can decrement the ref count
312     * of those nodes still on the stack.
313     */
314    for (i = 0; i < num_vars; i++) {
315        gen->stack.stack[i] = NIL(bdd_node);
316    }
317
318    /*
319     * Get the first node.  Get a new pointer to fn->node beforehand: this increments
320     * the reference count of fn->node; this is necessary, because when fn->node
321     * is popped from the stack at the very end, it's ref count is decremented.
322     */
323    f = cmu_bdd_identity(manager, fn->node);
324    push_node_stack(f, gen);
325    gen->status = bdd_NONEMPTY;
326
327    *node = gen->node;  /* return the node */
328    return (bdd_gen *) (gen);   /* and the new generator */
329}
330
331/*
332 *    bdd_next_node - get the next node in the BDD.
333 *    Return {TRUE, FALSE} when {more, no more}.
334 */
335boolean
336bdd_next_node(bdd_gen *gen_, bdd_node **node /* return */)
337{
338  cmu_bdd_gen *gen = (cmu_bdd_gen *) gen_;
339 
340    pop_node_stack(gen);
341    if (gen->status == bdd_EMPTY) {
342        return (FALSE);
343    }
344    *node = gen->node;
345    return (TRUE);
346}
347
348/*
349 *    bdd_gen_free - frees up the space used by the generator.
350 *    Return an int so that it is easier to fit in a foreach macro.
351 *    Return 0 (to make it easy to put in expressions).
352 */
353int
354bdd_gen_free(bdd_gen *gen_)
355{
356    long num_vars;
357    int i;
358    struct bdd_manager_ *mgr;
359    bdd_node *f;
360    st_table *visited_table;
361    st_generator *visited_gen;
362    cmu_bdd_gen *gen = (cmu_bdd_gen *) gen_;
363
364    mgr = gen->manager;
365
366    switch (gen->type) {
367    case bdd_gen_cubes:
368        array_free(gen->gen.cubes.cube);
369        gen->gen.cubes.cube = NIL(array_t);
370        break;
371    case bdd_gen_nodes:
372        visited_table = gen->gen.nodes.visited;
373        st_foreach_item(visited_table, visited_gen, &f, NIL(refany)) {
374            cmu_bdd_free(mgr, (bdd) f);
375        }
376        st_free_table(visited_table);
377        visited_table = NIL(st_table);
378        break;
379    }
380
381    /*
382     * Free the data associated with this generator.  If there are any nodes remaining
383     * on the stack, we must free them, to get their ref counts back to what they were before.
384     */
385    num_vars = cmu_bdd_vars(mgr);
386    for (i = 0; i < num_vars; i++) {
387        f = gen->stack.stack[i];
388        if (f != NIL(bdd_node)) {
389            cmu_bdd_free(mgr, (bdd) f);
390        }
391    }
392    FREE(gen->stack.stack);
393
394    FREE(gen);
395
396    return (0); /* make it return some sort of an int */
397}
398
399/*
400 *    INTERNAL INTERFACE
401 *
402 *    Invariants:
403 *
404 *    gen->stack.stack contains nodes that remain to be explored.
405 *
406 *    For a cube generator,
407 *        gen->gen.cubes.cube reflects the choices made to reach node at top of the stack.
408 *    For a node generator,
409 *        gen->gen.nodes.visited reflects the nodes already visited in the BDD dag.
410 */
411
412/*---------------------------------------------------------------------------*/
413/* Definition of static functions                                            */
414/*---------------------------------------------------------------------------*/
415/*
416 *    push_cube_stack - push a cube onto the stack to visit.
417 *    Return nothing, just do it.
418 *
419 *    The BDD is traversed using depth-first search, with the ELSE branch
420 *    searched before the THEN branch.
421 *
422 *    Caution: If you are creating new BDD's while iterating through the
423 *    cubes, and a garbage collection happens to be performed during this
424 *    process, then the BDD generator will get lost and an error will result.
425 *
426 */
427static void
428push_cube_stack(bdd_node *f, cmu_bdd_gen *gen)
429{
430    bdd_variableId topf_id;
431    bdd_node *f0, *f1;
432    struct bdd_manager_ *mgr;
433
434    mgr = gen->manager;
435
436    if (f == cmu_bdd_one(mgr)) {
437        return;
438    }
439
440    topf_id = (bdd_variableId) (cmu_bdd_if_id(mgr, (bdd) f) - 1);
441
442    /*
443     * Get the then and else branches of f. Note that cmu_bdd_then and cmu_bdd_else
444     * automatically take care of inverted pointers. 
445     */
446    f0 = cmu_bdd_else(mgr, (bdd) f);
447    f1 = cmu_bdd_then(mgr, (bdd) f);
448
449    if (f1 == cmu_bdd_zero(mgr)) {
450        /*
451         *    No choice: take the 0 branch.  Since there is only one branch to
452         *    explore from f, there is no need to push f onto the stack, because
453         *    after exploring this branch we are done with f.  A consequence of
454         *    this is that there will be no f to pop either.  Same goes for the
455         *    next case.  Decrement the ref count of f and of the branch leading
456         *    to zero, since we will no longer need to access these nodes.
457         */
458        array_insert(bdd_literal, gen->gen.cubes.cube, topf_id, 0);
459        push_cube_stack(f0, gen);
460        cmu_bdd_free(mgr, (bdd) f1);
461        cmu_bdd_free(mgr, (bdd) f);
462    } else if (f0 == cmu_bdd_zero(mgr)) {
463        /*
464         *    No choice: take the 1 branch
465         */
466        array_insert(bdd_literal, gen->gen.cubes.cube, topf_id, 1);
467        push_cube_stack(f1, gen);
468        cmu_bdd_free(mgr, (bdd) f0);
469        cmu_bdd_free(mgr, (bdd) f);
470    } else {
471        /*
472         * In this case, we must explore both branches of f.  We always choose
473         * to explore the 0 branch first.  We must push f on the stack, so that
474         * we can later pop it and explore its 1 branch. Decrement the ref count
475         * of f1 since we will no longer need to access this node.  Note that
476         * the parent of f1 was bdd_freed above or in pop_cube_stack.
477         */
478        gen->stack.stack[gen->stack.sp++] = f;
479        array_insert(bdd_literal, gen->gen.cubes.cube, topf_id, 0);
480        push_cube_stack(f0, gen);
481        cmu_bdd_free(mgr, (bdd) f1);
482    }
483}
484
485static void
486pop_cube_stack(cmu_bdd_gen *gen)
487{
488    bdd_variableId topf_id, level_i_id;
489    bdd_node *branch_f;
490    bdd_node *f1;
491    int i;
492    long topf_level;
493    struct bdd_manager_ *mgr;
494    struct bdd_ *var_bdd;
495
496    mgr = gen->manager;
497
498    if (gen->stack.sp == 0) {
499        /*
500         * Stack is empty.  Have already explored both the 0 and 1 branches of
501         * the root of the BDD.
502         */
503        gen->status = bdd_EMPTY;
504    } else {
505        /*
506         * Explore the 1 branch of the node at the top of the stack (since it is
507         * on the stack, this means we have already explored the 0 branch).  We
508         * permanently pop the top node, and bdd_free it, since there are no more edges left to
509         * explore.
510         */
511        branch_f = gen->stack.stack[--gen->stack.sp];
512        gen->stack.stack[gen->stack.sp] = NIL(bdd_node); /* overwrite with NIL */
513        topf_id = (bdd_variableId) (cmu_bdd_if_id(mgr, (bdd) branch_f) - 1);
514        array_insert(bdd_literal, gen->gen.cubes.cube, topf_id, 1);
515
516        /*
517         * We must set the variables with levels greater than the level of branch_f,
518         * back to 2 (don't care).  This is because these variables are not
519         * on the current path, and thus there values are don't care.
520         *
521         * Note the following correspondence:
522         *   CMU          UCB
523         *  index         level   (both start at zero)
524         *  indexindex    id      (CMU has id 0 for constant, thus really start numbering at 1;
525         *                                           UCB starts numbering at 0)
526         */
527        topf_level = cmu_bdd_if_index(mgr, (bdd) branch_f);
528        for (i = topf_level + 1; i < array_n(gen->gen.cubes.cube); i++) {
529            var_bdd = cmu_bdd_var_with_index(mgr, i);
530            level_i_id = (bdd_variableId) (cmu_bdd_if_id(mgr, var_bdd) - 1);
531            /*
532             * No need to free var_bdd, since single variable BDDs are never garbage collected.
533             * Note that level_i_id is just (mgr->indexindexes[i] - 1); however, wanted
534             * to avoid using CMU internals.
535             */
536            array_insert(bdd_literal, gen->gen.cubes.cube, level_i_id, 2);
537        }
538        f1 = cmu_bdd_then(mgr, (bdd) branch_f);
539        push_cube_stack(f1, gen);
540        cmu_bdd_free(mgr, (bdd) branch_f);
541    }
542}
543
544/*
545 *    push_node_stack - push a node onto the stack.
546 *
547 *    The same as push_cube_stack but for enumerating nodes instead of cubes.
548 *    The BDD is traversed using depth-first search, with the ELSE branch searched
549 *    before the THEN branch, and a node returned only after its children have been
550 *    returned.  Note that the returned bdd_node pointer has the complement
551 *    bit zeroed out.
552 *
553 *    Caution: If you are creating new BDD's while iterating through the
554 *    nodes, and a garbage collection happens to be performed during this
555 *    process, then the BDD generator will get lost and an error will result.
556 *
557 *    Return nothing, just do it.
558 */
559static void
560push_node_stack(bdd_node *f, cmu_bdd_gen *gen)
561{
562    bdd_node *f0, *f1;
563    bdd_node *reg_f, *reg_f0, *reg_f1;
564    struct bdd_manager_ *mgr;
565
566    mgr = gen->manager;
567
568    reg_f = (bdd_node *) BDD_POINTER(f);  /* use of bddint.h */
569    if (st_lookup(gen->gen.nodes.visited, (refany) reg_f, NIL(refany))) {
570        /*
571         * Already been visited.
572         */
573        return;
574    }
575
576    if (f == cmu_bdd_one(mgr) || f == cmu_bdd_zero(mgr)) {
577        /*
578         * If f is the constant node and it has not been visited yet, then put it in the visited table
579         * and set the gen->node pointer.  There is no need to put it in the stack because
580         * the constant node does not have any branches, and there is no need to free f because
581         * constant nodes have a saturated reference count.
582         */
583        st_insert(gen->gen.nodes.visited, (refany) reg_f, NIL(any));
584        gen->node = reg_f;
585    } else {
586        /*
587         * f has not been marked as visited.  We don't know yet if any of its branches
588         * remain to be explored.  First get its branches.  Note that cmu_bdd_then and
589         * cmu_bdd_else automatically take care of inverted pointers. 
590         */
591        f0 = cmu_bdd_else(mgr, (bdd) f);
592        f1 = cmu_bdd_then(mgr, (bdd) f);
593
594        reg_f0 = (bdd_node *) BDD_POINTER(f0);  /* use of bddint.h */
595        reg_f1 = (bdd_node *) BDD_POINTER(f1);
596        if (! st_lookup(gen->gen.nodes.visited, (refany) reg_f0, NIL(refany))) {
597            /*
598             * The 0 child has not been visited, so explore the 0 branch.  First push f on
599             * the stack.  Bdd_free f1 since we will not need to access this exact pointer
600             * any more.
601             */
602            gen->stack.stack[gen->stack.sp++] = f;
603            push_node_stack(f0, gen);
604            cmu_bdd_free(mgr, (bdd) f1);
605        } else if (! st_lookup(gen->gen.nodes.visited, (refany) reg_f1, NIL(refany))) {
606            /*
607             * The 0 child has been visited, but the 1 child has not been visited, so
608             * explore the 1 branch.  First push f on the stack. We are done with f0,
609             * so bdd_free it.
610             */
611            gen->stack.stack[gen->stack.sp++] = f;
612            push_node_stack(f1, gen);
613            cmu_bdd_free(mgr, (bdd) f0);
614        } else {
615            /*
616             * Both the 0 and 1 children have been visited. Thus we are done exploring from f. 
617             * Mark f as visited (put it in the visited table), and set the gen->node pointer.
618             * We will no longer need to refer to f0 and f1, so bdd_free them.  f will be
619             * bdd_freed when the visited table is freed.
620             */
621            st_insert(gen->gen.nodes.visited, (refany) reg_f, NIL(any));
622            gen->node = reg_f;
623            cmu_bdd_free(mgr, (bdd) f0);
624            cmu_bdd_free(mgr, (bdd) f1);
625        }
626    }
627}
628
629static void
630pop_node_stack(cmu_bdd_gen *gen)
631{
632    bdd_node *branch_f;
633
634    if (gen->stack.sp == 0) {
635        gen->status = bdd_EMPTY;
636    } else {
637        branch_f = gen->stack.stack[--gen->stack.sp];  /* overwrite with NIL */
638        gen->stack.stack[gen->stack.sp] = NIL(bdd_node);
639        push_node_stack(branch_f, gen);
640    }
641}
642
Note: See TracBrowser for help on using the repository browser.