ceptr
 All Data Structures Files Functions Variables Typedefs Macros Modules Pages
mtree.c
Go to the documentation of this file.
1 
12 #include "mtree.h"
13 #include "ceptr_error.h"
14 #include "hashfn.h"
15 #include "def.h"
16 
17 const H null_H = {0,{NULL_ADDR,NULL_ADDR}};
18 
19 // low-level function to allocate a new tree level to an mtree
21 void __m_add_level(M *m) {
22  if (!m->levels++) {
23  m->lP = malloc(sizeof(L));
24  }
25  else {
26  m->lP = realloc(m->lP,sizeof(L)*m->levels);
27  }
28  int i = m->levels-1;
29  m->lP[i].nodes = 0;
30 }
31 
32 // low-level function to add c nodes to given level
34 N *__m_add_nodes(H h,L *l,int c) {
35  N *n;
36  Mindex i = l->nodes;
37  if (!i) {
38  size_t s = sizeof(N)*c;
39  l->nP = malloc(s);
40  memset(l->nP,0,s);
41  l->nodes = c;
42  }
43  else {
44  // size_t os = sizeof(N)*l->nodes;
45  l->nodes += c;
46  size_t ns = sizeof(N)*l->nodes;
47  l->nP = realloc(l->nP,ns);
48  // memset(l->nP+ns,0,sizeof(N)*c);
49  }
50  n = _GET_NODE(h,l,i);
51  return n;
52 }
53 
54 // low level function to initialize a new node under a parent
55 void __m_new_init(H parent,H *h,L **l) {
56  h->m = parent.m;
57  h->a.l = parent.a.l+1;
58 
59  if (parent.a.l >= parent.m->levels) {
60  raise_error("address too deep!");
61  }
62  else if (parent.a.l == parent.m->levels-1) {
63  h->a.i = 0;
64  __m_add_level(h->m);
65  *l = GET_LEVEL(*h);
66  }
67  else {
68  *l = GET_LEVEL(*h);
69  h->a.i = (*l)->nodes;
70  }
71 }
72 
73 // low level function to initialize a new node as a root node
74 void __m_new_root(H *h, L **l) {
75  M *m = h->m = malloc(sizeof(M));
76  m->magic = matrixImpl;
77  m->levels = 0;
78  h->a.l = 0;
79  h->a.i = 0;
80  __m_add_level(m);
81  *l = GET_LEVEL(*h);
82 }
83 
94 H __m_new(H parent,Symbol symbol,void *surface,size_t size,uint32_t flags) {
95  H h;
96  L *l = 0;
97 
98  if (parent.m) {
99  __m_new_init(parent,&h,&l);
100  }
101  else {
102  __m_new_root(&h,&l);
103  }
104 
105  // add a node
106  N *n,*nl;
107  n = __m_add_nodes(h,l,1);
108 
109  n->symbol = symbol;
110  n->size = size;
111  n->parenti = parent.m ? parent.a.i : NULL_ADDR;
112  n->flags = flags;
113  if (size) {
114  if (size <= sizeof(void *)) {
115  memcpy(&n->surface,surface,size);
116  }
117  else {
118  n->flags |= TFLAG_ALLOCATED;
119  n->surface = malloc(size);
120  if (surface)
121  memcpy(n->surface,surface,size);
122  }
123  }
124 
125  return h;
126 }
127 
135  return _m_new(null_H,s,0,0);
136 }
137 
145 H _m_newr(H parent,Symbol s) {
146  return _m_new(parent,s,0,0);
147 }
148 
157 H _m_newi(H parent,Symbol symbol,int surface) {
158  return _m_new(parent,symbol,&surface,sizeof(int));
159 }
160 
161 // helper function to recursively traverse a ttree and build an mtree out of it
162 // used by _m_new_from_t
163 H __mnft(H parent,T *t) {
164  int i, c = _t_children(t);
165 
166  // clear the allocated flag, because that will get recalculated in __m_new
167  uint32_t flags = t->context.flags & ~TFLAG_ALLOCATED;
168  // if the ttree points to a type that has an allocated c structure as its surface
169  // it must be copied into the mtree as reference, otherwise it would get freed twice
170  // when the mtree is freed
171 
172  if (flags & (TFLAG_SURFACE_IS_RECEPTOR+TFLAG_SURFACE_IS_SCAPE+TFLAG_SURFACE_IS_CPTR)) flags |= TFLAG_REFERENCE;
173  void *surface = _t_surface(t);
174  void *sp;
175  H h;
176 
177  if (flags & TFLAG_SURFACE_IS_TREE && !(flags & TFLAG_SURFACE_IS_RECEPTOR)) {
178  H sh = _m_new_from_t((T *)surface);
179  h = _m_newt(parent,_t_symbol(t),sh);
180  }
181  else {
182  if (flags & (TFLAG_SURFACE_IS_RECEPTOR+TFLAG_SURFACE_IS_SCAPE+TFLAG_SURFACE_IS_CPTR)) {
183  sp = surface;
184  surface = &sp;
185  }
186  h = __m_new(parent,_t_symbol(t),surface,_t_size(t),flags);
187  }
188  if (flags&TFLAG_RUN_NODE) {
189  // @todo, make this more efficient, ie we shouldn't have to
190  // do an _m_get, instead there should be a way for mtrees to create run_nodes
191  N *n = __m_get(h);
192  n->cur_child = ((rT *)t)->cur_child;
193  }
194  for(i=1;i<=c;i++) {
195  __mnft(h,_t_child(t,i));
196  }
197  return h;
198 }
199 
207  H h = null_H;
208  h = __mnft(h,t);
209  h.a.l = 0;
210  h.a.i = 0;
211  return h;
212 }
213 
214 // mtree walk function for creating ttree nodes
215 // used by _t_new_from_m
216 void _m_2tfn(H h,N *n,void *data,MwalkState *s,Maddr ap) {
217 
218  T **tP = (T**) &(((struct {T *t;} *)data)->t);
219  T *t = h.a.l ? (s[h.a.l-1].user.t) : NULL;
220  int is_run_node = (n->flags&TFLAG_RUN_NODE);
221 
222  T *nt;
223 
224  if (n->flags & TFLAG_SURFACE_IS_TREE && !(n->flags & TFLAG_SURFACE_IS_RECEPTOR)) {
225  if (is_run_node) raise_error("not implemented");
226  nt = _t_newt(t,n->symbol,_t_new_from_m(*(H *)n->surface));
227  }
228  else if (n->flags & TFLAG_ALLOCATED) {
229  nt = __t_new(t,n->symbol,n->surface,n->size,is_run_node);
230  }
231  else {
232  nt = __t_new(t,n->symbol,&n->surface,n->size,is_run_node);
233  }
234  nt->context.flags |= (~TFLAG_ALLOCATED)&(n->flags);
235 
236  if (is_run_node) {
237  ((rT *)nt)->cur_child = n->cur_child;
238  }
239  *tP = nt;
240 
241  s[h.a.l].user.t = nt;
242 }
243 
251  struct {T *t;} d = {NULL};
252  Maddr ac = {0,0};
253  _m_walk(h,_m_2tfn,&d);
254  return _t_root(d.t);
255 }
256 
264 N *__m_get(H h) {
265  L *l = GET_LEVEL(h);
266  N *n = GET_NODE(h,l);
267  return n;
268 }
269 
276 size_t _m_size(H h) {return __m_get(h)->size;}
277 
284 void __m_free(H h,int free_surface) {
285  int i = h.m->levels;
286  while(i--) {
287  L *l = _GET_LEVEL(h,i);
288  Mindex j = l->nodes;
289  if (free_surface) {
290  while(j--) {
291  N *n = _GET_NODE(h,l,j);
292  if (!(n->flags & TFLAG_REFERENCE)) {
293  if (n->flags & TFLAG_SURFACE_IS_RECEPTOR) raise_error("mtree can't free receptor!");
294  if (n->flags & TFLAG_SURFACE_IS_TREE && !(n->flags & TFLAG_SURFACE_IS_RECEPTOR)) {
295  _m_free(*(H *)n->surface);
296  }
297  if (n->flags & TFLAG_ALLOCATED) {
298  free(n->surface);
299  }
300  }
301  }
302  }
303  free(l->nP);
304  }
305  free(h.m->lP);
306  free(h.m);
307 }
308 
315 int _m_children(H h) {
316  Mlevel levels = h.m->levels;
317 
318  if (h.a.l >= levels) {
319  raise_error("address too deep!");
320  }
321  else if (h.a.l == levels-1) {
322  return 0;
323  }
324  L *l = _GET_LEVEL(h,h.a.l+1);
325  Mindex c = 0;
326  Mindex i = 0,pi = h.a.i;
327  Mindex max = l->nodes;
328  N *n = &l->nP[0];
329 
330  /* this works if nodes are sorted
331  while (i < max && n->parenti != h.a.i) {
332  n++;i++;
333  }
334  while (i < max && n->parenti == h.a.i) {
335  n++;i++;c++;
336  }
337  */
338  while (i < max) {
339  if (!(n->flags & TFLAG_DELETED) && pi == n->parenti) c++;
340  n++;i++;
341  }
342 
343  return c;
344 }
345 
352 void * _m_surface(H h) {
353  N *n = __m_get(h);
354  if (n->flags & TFLAG_ALLOCATED)
355  return n->surface;
356  else
357  return &n->surface;
358 }
359 
367  Maddr a = {NULL_ADDR,NULL_ADDR};
368  if (h.a.l > 0) {
369  N *n = __m_get(h);
370  a.l = h.a.l-1;
371  a.i = n->parenti;
372  }
373  return a;
374 }
375 
383 Maddr _m_child(H h,Mindex c) {
384  Maddr a = {NULL_ADDR,NULL_ADDR};
385  Mlevel levels = h.m->levels;
386  if (h.a.l >= levels) {
387  raise_error("address too deep!");
388  }
389  else if (h.a.l == levels-1) {
390  return a;
391  }
392  a.l = h.a.l+1;
393  L *l = &h.m->lP[a.l];
394  Mindex ci = 0,max = l->nodes;
395  N *n = &l->nP[0];
396  if (c == NULL_ADDR) {
397  a.i = NULL_ADDR;
398  while(ci < max) {
399  if (n->parenti == h.a.i) a.i = ci;
400  ci++;
401  n++;
402  }
403  if (a.i == NULL_ADDR)
404  a.l = NULL_ADDR;
405  }
406  else {
407  a.i = 0;
408  while(a.i < max) {
409  if (n->parenti == h.a.i) ci++;
410  if (ci == c) return a;
411  a.i++;n++;
412  }
413  a.l = NULL_ADDR;
414  a.i = NULL_ADDR;
415  }
416  /* works if nodes are sorted
417  //skip past nodes of children of parents before our parent
418  while (a.i < max && n->parenti < h.a.i) {
419  a.i++;n++;
420  }
421 
422  // if you pass in NULL_ADDR for the child,
423  // this routine returns the last child address
424  if (c == NULL_ADDR) {
425  while (a.i < max && n->parenti == h.a.i) {
426  a.i++;n++;
427  }
428  a.i--;
429  if (a.i == -1) a.l = NULL_ADDR;
430  }
431  else {
432  if (a.i+c > l->nodes) {
433  raise_error("address too deep!");
434  }
435  a.i += c-1;
436  }
437  */
438 
439  return a;
440 }
441 
449  N *n = __m_get(h);
450  return n->symbol;
451 }
452 
460  L *l = GET_LEVEL(h);
461  int i = h.a.i+1;
462  Maddr r;
463  N *n = GET_NODE(h,l);
464  Mindex pi = n->parenti;
465  while (i<l->nodes) {
466  n++;
467  if (n->parenti == pi) {
468  r.l = h.a.l;
469  r.i = i;
470  return r;
471  }
472  i++;
473  }
474  return null_H.a;
475 }
476 
488 H _m_add(H parent,H h) {
489  L *pl,*l;
490  H r;
491  int x = _m_children(parent)+1;
492  int i,levels = h.m->levels;
493  H p = parent;
494  Mindex d = parent.a.i;
495  for (i=0;i<levels;i++) {
496  __m_new_init(p,&r,&pl);
497  l = _GET_LEVEL(h,i);
498  N *np = __m_add_nodes(r,pl,l->nodes);
499  N *n = &l->nP[0];
500  Mindex j = l->nodes;
501  while (j--) {
502  *np = *n;
503  if (np->parenti == NULL_ADDR) np->parenti = 0;
504  np->parenti += d;
505  np++; n++;
506  }
507  d = pl->nodes-l->nodes;
508  p.a.l++;
509  }
510  r.a = _m_child(parent,x);
511 
512  __m_free(h,0); //free h but not it's surfaces which were copied into the parent
513  return r;
514 }
515 
524 void _m_walk(H h,void (*walkfn)(H ,N *,void *,MwalkState *,Maddr),void *user_data) {
525  int levels = h.m->levels;
526  MwalkState state[levels];
527  int i,root;
528  // for(i=0;i<levels;i++) {state[i]=0;};
529  Maddr ap = _m_parent(h);
530  root = h.a.l;
531  int done = 0;
533  L *l = GET_LEVEL(h);
534  N *n;
535  int backup,nodes = h.a.i+1; // at root level pretend we are at last node
536  // raise(SIGINT);
537 
538  while(!done) {
539  backup = 0;
540  n = GET_NODE(h,l);
541  // look for child of parent at this level (may be node at current handle address)
542  while ((h.a.i < nodes) && ((n->flags & TFLAG_DELETED) || (n->parenti != ap.i))) {
543  n++;h.a.i++;
544  };
545 
546  // if we got one, then call the walk function
547  if (h.a.i != nodes) {
548  (*walkfn)(h,n,user_data,state,ap);
549  // and go down looking for children
550  if (h.a.l+1 < levels) {
551  state[h.a.l].i = h.a.i;
552  ap = h.a;
553  h.a.l++;
554  h.a.i = 0;
555  l = GET_LEVEL(h);
556  nodes = l->nodes;
557  }
558  else {
559  // if no more levels, then backup if no more nodes
560  if (++h.a.i == nodes)
561  backup = 1;
562  }
563  }
564  else backup = 1;
565 
566  while(backup) {
567  // if current node is at root level we are done
568  if (h.a.l == root) {backup = 0;done = 1;}
569  else {
570  // otherwise move up a level
571  h.a.l--;
572  // new node index is next node at that level from stored state
573  h.a.i = state[h.a.l].i+1;
574  l = GET_LEVEL(h);
575  nodes = l->nodes;
576  // if we still have nodes to check then we have finished backing up otherwise
577  // we'll loop to go back up another level
578  if (h.a.i < nodes) {
579  backup = 0;
580  ap.l = h.a.l -1;
581  ap.i = state[ap.l].i;
582  }
583  }
584  }
585  }
586 }
587 
588 // walkfn used by _m_detach to detach a branch of a tree
589 void _m_detatchfn(H oh,N *on,void *data,MwalkState *s,Maddr ap) {
590  struct {M *m;int l;} *d = data;
591 
592  H parent;
593  H h;
594  L *l;
595  if (!d->m) {
596  __m_new_root(&h,&l);
597  parent.m = 0;
598  d->m = h.m;
599  }
600  else {
601  parent.m = d->m;
602  parent.a.i = s[oh.a.l-1].user.pi;
603  // we use d->l here to give us a level offset because the root of
604  // the detached tree is not necessarily the same as the root of
605  // the whole tree.
606  parent.a.l = oh.a.l-1-d->l;
607  __m_new_init(parent,&h,&l);
608  }
609 
610  N *n,*nl;
611  n = __m_add_nodes(h,l,1);
612 
613  // everything in the node is the same except the parenti
614  *n = *on;
615  on->flags = TFLAG_DELETED;
616  on->surface = 0;
617  on->size = 0;
618  // which we got from the user portion of the state data
619  n->parenti = parent.m ? parent.a.i : NULL_ADDR;
620  s[oh.a.l].user.pi = l->nodes-1;
621 
622 }
623 
632  struct {M *m;int l;} d = {NULL,oh.a.l};
633  _m_walk(oh,_m_detatchfn,&d);
634  H h = {d.m,{0,0}};
635  return h;
636 }
637 
646 
647  uint32_t s_size = SERIALIZED_HEADER_SIZE(m->levels);
648  uint32_t levels_size = 0;
649  size_t blob_size = 0;
650 
651  Mindex i;
652  H h = {m,{0,0}};
653 
654  // calculate level and blob sizes so we can allocate
655  for(h.a.l=0; h.a.l<m->levels; h.a.l++) {
656  L *l = GET_LEVEL(h);
657 
658  levels_size += SERIALIZED_LEVEL_SIZE(l);
659 
660  for(h.a.i=0;h.a.i < l->nodes;h.a.i++) {
661  N *n = GET_NODE(h,l);
662  blob_size+=n->size;
663  }
664  }
665 
666  size_t total_size = s_size+levels_size+blob_size;
667  S *s = malloc(total_size);
668  memset(s,0,total_size);
669  s->magic = m->magic;
670  s->total_size = total_size;
671  s->levels = m->levels;
672  s->blob_offset = s_size+levels_size;
673 
674  void *blob = s->blob_offset + (void *)s;
675 
676  levels_size = 0;
677  blob_size = 0;
678 
679  for(h.a.l=0; h.a.l<m->levels; h.a.l++) {
680  s->level_offsets[h.a.l] = levels_size;
681  L *sl = (L *) (((void *)s) + s_size + levels_size);
682 
683  L *l = GET_LEVEL(h);
684  levels_size += SERIALIZED_LEVEL_SIZE(l);
685 
686  sl->nodes = l->nodes;
687 
688  N *sn = sizeof(Mindex)+(void *)sl;
689  for(h.a.i=0;h.a.i < l->nodes;h.a.i++) {
690  N *n = GET_NODE(h,l);
691  *sn = *n;
692  // raise(SIGINT);
693 
694  if (n->flags & TFLAG_SURFACE_IS_RECEPTOR) {
695  raise_error("can't serialize receptors");
696  }
697 
698  if (n->flags & TFLAG_SURFACE_IS_TREE && !(n->flags & TFLAG_SURFACE_IS_RECEPTOR)) {
699  H sh = *(H *)n->surface;
700  S *ss = _m_serialize(sh.m);
701  *(size_t *)&sn->surface = blob_size;
702  // orth tree size wasn't included in the original total size so
703  // we have to realloc the buffer and increase the size
704  // @todo, a better way to do this would have been to serialize the orthogonal
705  // trees ahead of time in the size calculation loop so as not to have to
706  // realloc here, instead we could just copy the tree in
707  size_t new_total_size = s->total_size + ss->total_size;
708  s = realloc(s,new_total_size);
709  s->total_size = new_total_size;
710  // reset pointers into the serialized block because of the realloc:
711  // blob, sl and sn
712  blob = s->blob_offset + (void *)s;
713  sl = (L *) (((void *)s) + s_size + levels_size);
714  sn = sizeof(Mindex)+(void *)sl + SERIALIZED_NODE_SIZE*h.a.i;
715 
716  memcpy(blob+blob_size,ss,ss->total_size);
717  blob_size+=ss->total_size;
718  free(ss);
719  }
720  else if (n->flags & TFLAG_ALLOCATED) {
721  *(size_t *)&sn->surface = blob_size;
722  memcpy(blob+blob_size,n->surface,n->size);
723  blob_size+=n->size;
724  }
725  else {
726  memcpy(&sn->surface,&n->surface,n->size);
727  }
728 
729  sn = (N *) (SERIALIZED_NODE_SIZE + ((void*)sn));
730  }
731  }
732  return s;
733 }
734 
735 
744  M *m = malloc(sizeof(M));
745  m->magic = s->magic;
746  m->levels = s->levels;
747  m->lP = malloc(sizeof(L)*m->levels);
748  H h = {m,{0,0}};
749  void *blob = s->blob_offset + (void *)s;
750 
751  uint32_t s_size = SERIALIZED_HEADER_SIZE(m->levels);
752  for(h.a.l=0; h.a.l<m->levels; h.a.l++) {
753  L *sl = (L *) (((void *)s) + s_size + ((S *)s)->level_offsets[h.a.l]);
754  L *l = GET_LEVEL(h);
755  l->nodes = sl->nodes;
756  l->nP = malloc(sizeof(N)*l->nodes);
757  N *sn = sizeof(Mindex)+(void *)sl;
758  for(h.a.i=0;h.a.i < l->nodes;h.a.i++) {
759  N *n = GET_NODE(h,l);
760  *n = *sn;
761  void *surface = blob+*(size_t *)&sn->surface;
762  if (n->flags & TFLAG_SURFACE_IS_TREE && !(n->flags & TFLAG_SURFACE_IS_RECEPTOR)) {
763  if (!(n->flags & TFLAG_ALLOCATED)) {
764  raise_error("whoa! orthogonal tree handles are supposed to be allocated!");
765  }
766  H sh = _m_unserialize((S *)surface);
767  n->surface = malloc(sizeof(H));
768  memcpy(n->surface,&sh,sn->size);
769  }
770  else if (n->flags & TFLAG_ALLOCATED) {
771  n->surface = malloc(sn->size);
772  memcpy(n->surface,surface,sn->size);
773  }
774  else {
775  memcpy(&n->surface,&sn->surface,sn->size);
776  }
777  sn = (N *) (SERIALIZED_NODE_SIZE + ((void*)sn));
778  }
779  }
780  h.a.i = h.a.l = 0;
781  return h;
782 }
783 
784 
void _m_walk(H h, void(*walkfn)(H, N *, void *, MwalkState *, Maddr), void *user_data)
Definition: mtree.c:524
Definition: ceptr_types.h:114
Definition: ceptr_types.h:42
H _m_unserialize(S *s)
Definition: mtree.c:743
Symbol _m_symbol(H h)
Definition: mtree.c:448
void * _m_surface(H h)
Definition: mtree.c:352
H _m_detatch(H oh)
Definition: mtree.c:631
header file for symbol and structure definition functions
S * _m_serialize(M *m)
Definition: mtree.c:645
Definition: ceptr_types.h:68
Definition: ceptr_types.h:56
T * _t_root(T *t)
Definition: tree.c:1272
T * __t_new(T *parent, Symbol symbol, void *surface, size_t size, bool is_run_node)
Definition: tree.c:59
Symbol _t_symbol(T *t)
Definition: tree.c:1228
T * _t_child(T *t, int i)
Definition: tree.c:1251
void __m_add_level(M *m)
Definition: mtree.c:21
void * _t_surface(T *t)
Definition: tree.c:1215
Maddr _m_child(H h, Mindex c)
Definition: mtree.c:383
T * _t_newt(T *parent, Symbol symbol, T *surface)
Definition: tree.c:133
SState * state(StateType type, int *statesP, int level)
Definition: semtrex.c:103
size_t _m_size(H h)
Definition: mtree.c:276
N * __m_get(H h)
Definition: mtree.c:264
T * _t_new_from_m(H h)
Definition: mtree.c:250
H __m_new(H parent, Symbol symbol, void *surface, size_t size, uint32_t flags)
Definition: mtree.c:94
Definition: ceptr_types.h:51
H _m_new_root(Symbol s)
Definition: mtree.c:134
H _m_new_from_t(T *t)
Definition: mtree.c:206
void __m_free(H h, int free_surface)
Definition: mtree.c:284
H _m_add(H parent, H h)
Definition: mtree.c:488
H _m_newi(H parent, Symbol symbol, int surface)
Definition: mtree.c:157
Definition: ceptr_types.h:83
Maddr _m_parent(H h)
Definition: mtree.c:366
int _m_children(H h)
Definition: mtree.c:315
semantic tree matrix header file ``
int _t_children(T *t)
Definition: tree.c:1205
Maddr _m_next_sibling(H h)
Definition: mtree.c:459
H _m_newr(H parent, Symbol s)
Definition: mtree.c:145
size_t _t_size(T *t)
Definition: tree.c:1238
N * __m_add_nodes(H h, L *l, int c)
Definition: mtree.c:34