13 #include "ceptr_error.h"
17 const H null_H = {0,{NULL_ADDR,NULL_ADDR}};
23 m->lP = malloc(
sizeof(
L));
26 m->lP = realloc(m->lP,
sizeof(
L)*m->levels);
38 size_t s =
sizeof(
N)*c;
46 size_t ns =
sizeof(
N)*l->nodes;
47 l->nP = realloc(l->nP,ns);
55 void __m_new_init(
H parent,
H *h,
L **l) {
57 h->a.l = parent.a.l+1;
59 if (parent.a.l >= parent.m->levels) {
60 raise_error(
"address too deep!");
62 else if (parent.a.l == parent.m->levels-1) {
74 void __m_new_root(
H *h,
L **l) {
75 M *m = h->m = malloc(
sizeof(
M));
76 m->magic = matrixImpl;
99 __m_new_init(parent,&h,&l);
111 n->parenti = parent.m ? parent.a.i : NULL_ADDR;
114 if (size <=
sizeof(
void *)) {
115 memcpy(&n->surface,surface,size);
118 n->flags |= TFLAG_ALLOCATED;
119 n->surface = malloc(size);
121 memcpy(n->surface,surface,size);
135 return _m_new(null_H,s,0,0);
146 return _m_new(parent,s,0,0);
158 return _m_new(parent,symbol,&surface,
sizeof(
int));
163 H __mnft(
H parent,
T *t) {
167 uint32_t flags = t->context.flags & ~TFLAG_ALLOCATED;
172 if (flags & (TFLAG_SURFACE_IS_RECEPTOR+TFLAG_SURFACE_IS_SCAPE+TFLAG_SURFACE_IS_CPTR)) flags |= TFLAG_REFERENCE;
177 if (flags & TFLAG_SURFACE_IS_TREE && !(flags & TFLAG_SURFACE_IS_RECEPTOR)) {
182 if (flags & (TFLAG_SURFACE_IS_RECEPTOR+TFLAG_SURFACE_IS_SCAPE+TFLAG_SURFACE_IS_CPTR)) {
188 if (flags&TFLAG_RUN_NODE) {
192 n->cur_child = ((
rT *)t)->cur_child;
218 T **tP = (
T**) &(((
struct {
T *t;} *)data)->t);
219 T *t = h.a.l ? (s[h.a.l-1].user.t) : NULL;
220 int is_run_node = (n->flags&TFLAG_RUN_NODE);
224 if (n->flags & TFLAG_SURFACE_IS_TREE && !(n->flags & TFLAG_SURFACE_IS_RECEPTOR)) {
225 if (is_run_node) raise_error(
"not implemented");
228 else if (n->flags & TFLAG_ALLOCATED) {
229 nt =
__t_new(t,n->symbol,n->surface,n->size,is_run_node);
232 nt =
__t_new(t,n->symbol,&n->surface,n->size,is_run_node);
234 nt->context.flags |= (~TFLAG_ALLOCATED)&(n->flags);
237 ((
rT *)nt)->cur_child = n->cur_child;
241 s[h.a.l].user.t = nt;
251 struct {
T *t;} d = {NULL};
266 N *n = GET_NODE(h,l);
287 L *l = _GET_LEVEL(h,i);
291 N *n = _GET_NODE(h,l,j);
292 if (!(n->flags & TFLAG_REFERENCE)) {
293 if (n->flags & TFLAG_SURFACE_IS_RECEPTOR) raise_error(
"mtree can't free receptor!");
294 if (n->flags & TFLAG_SURFACE_IS_TREE && !(n->flags & TFLAG_SURFACE_IS_RECEPTOR)) {
295 _m_free(*(
H *)n->surface);
297 if (n->flags & TFLAG_ALLOCATED) {
316 Mlevel levels = h.m->levels;
318 if (h.a.l >= levels) {
319 raise_error(
"address too deep!");
321 else if (h.a.l == levels-1) {
324 L *l = _GET_LEVEL(h,h.a.l+1);
326 Mindex i = 0,pi = h.a.i;
327 Mindex max = l->nodes;
339 if (!(n->flags & TFLAG_DELETED) && pi == n->parenti) c++;
354 if (n->flags & TFLAG_ALLOCATED)
367 Maddr a = {NULL_ADDR,NULL_ADDR};
384 Maddr a = {NULL_ADDR,NULL_ADDR};
385 Mlevel levels = h.m->levels;
386 if (h.a.l >= levels) {
387 raise_error(
"address too deep!");
389 else if (h.a.l == levels-1) {
393 L *l = &h.m->lP[a.l];
394 Mindex ci = 0,max = l->nodes;
396 if (c == NULL_ADDR) {
399 if (n->parenti == h.a.i) a.i = ci;
403 if (a.i == NULL_ADDR)
409 if (n->parenti == h.a.i) ci++;
410 if (ci == c)
return a;
463 N *n = GET_NODE(h,l);
464 Mindex pi = n->parenti;
467 if (n->parenti == pi) {
492 int i,levels = h.m->levels;
494 Mindex d = parent.a.i;
495 for (i=0;i<levels;i++) {
496 __m_new_init(p,&r,&pl);
503 if (np->parenti == NULL_ADDR) np->parenti = 0;
507 d = pl->nodes-l->nodes;
525 int levels = h.m->levels;
535 int backup,nodes = h.a.i+1;
542 while ((h.a.i < nodes) && ((n->flags & TFLAG_DELETED) || (n->parenti != ap.i))) {
547 if (h.a.i != nodes) {
548 (*walkfn)(h,n,user_data,
state,ap);
550 if (h.a.l+1 < levels) {
551 state[h.a.l].i = h.a.i;
560 if (++h.a.i == nodes)
568 if (h.a.l == root) {backup = 0;done = 1;}
573 h.a.i = state[h.a.l].i+1;
581 ap.i = state[ap.l].i;
590 struct {
M *m;
int l;} *d = data;
602 parent.a.i = s[oh.a.l-1].user.pi;
606 parent.a.l = oh.a.l-1-d->l;
607 __m_new_init(parent,&h,&l);
615 on->flags = TFLAG_DELETED;
619 n->parenti = parent.m ? parent.a.i : NULL_ADDR;
620 s[oh.a.l].user.pi = l->nodes-1;
632 struct {
M *m;
int l;} d = {NULL,oh.a.l};
647 uint32_t s_size = SERIALIZED_HEADER_SIZE(m->levels);
648 uint32_t levels_size = 0;
649 size_t blob_size = 0;
655 for(h.a.l=0; h.a.l<m->levels; h.a.l++) {
658 levels_size += SERIALIZED_LEVEL_SIZE(l);
660 for(h.a.i=0;h.a.i < l->nodes;h.a.i++) {
661 N *n = GET_NODE(h,l);
666 size_t total_size = s_size+levels_size+blob_size;
667 S *s = malloc(total_size);
668 memset(s,0,total_size);
670 s->total_size = total_size;
671 s->levels = m->levels;
672 s->blob_offset = s_size+levels_size;
674 void *blob = s->blob_offset + (
void *)s;
679 for(h.a.l=0; h.a.l<m->levels; h.a.l++) {
680 s->level_offsets[h.a.l] = levels_size;
681 L *sl = (
L *) (((
void *)s) + s_size + levels_size);
684 levels_size += SERIALIZED_LEVEL_SIZE(l);
686 sl->nodes = l->nodes;
688 N *sn =
sizeof(Mindex)+(
void *)sl;
689 for(h.a.i=0;h.a.i < l->nodes;h.a.i++) {
690 N *n = GET_NODE(h,l);
694 if (n->flags & TFLAG_SURFACE_IS_RECEPTOR) {
695 raise_error(
"can't serialize receptors");
698 if (n->flags & TFLAG_SURFACE_IS_TREE && !(n->flags & TFLAG_SURFACE_IS_RECEPTOR)) {
699 H sh = *(
H *)n->surface;
701 *(
size_t *)&sn->surface = blob_size;
707 size_t new_total_size = s->total_size + ss->total_size;
708 s = realloc(s,new_total_size);
709 s->total_size = new_total_size;
712 blob = s->blob_offset + (
void *)s;
713 sl = (
L *) (((
void *)s) + s_size + levels_size);
714 sn =
sizeof(Mindex)+(
void *)sl + SERIALIZED_NODE_SIZE*h.a.i;
716 memcpy(blob+blob_size,ss,ss->total_size);
717 blob_size+=ss->total_size;
720 else if (n->flags & TFLAG_ALLOCATED) {
721 *(
size_t *)&sn->surface = blob_size;
722 memcpy(blob+blob_size,n->surface,n->size);
726 memcpy(&sn->surface,&n->surface,n->size);
729 sn = (
N *) (SERIALIZED_NODE_SIZE + ((
void*)sn));
744 M *m = malloc(
sizeof(
M));
746 m->levels = s->levels;
747 m->lP = malloc(
sizeof(
L)*m->levels);
749 void *blob = s->blob_offset + (
void *)s;
751 uint32_t s_size = SERIALIZED_HEADER_SIZE(m->levels);
752 for(h.a.l=0; h.a.l<m->levels; h.a.l++) {
753 L *sl = (
L *) (((
void *)s) + s_size + ((
S *)s)->level_offsets[h.a.l]);
755 l->nodes = sl->nodes;
756 l->nP = malloc(
sizeof(
N)*l->nodes);
757 N *sn =
sizeof(Mindex)+(
void *)sl;
758 for(h.a.i=0;h.a.i < l->nodes;h.a.i++) {
759 N *n = GET_NODE(h,l);
761 void *surface = blob+*(
size_t *)&sn->surface;
762 if (n->flags & TFLAG_SURFACE_IS_TREE && !(n->flags & TFLAG_SURFACE_IS_RECEPTOR)) {
763 if (!(n->flags & TFLAG_ALLOCATED)) {
764 raise_error(
"whoa! orthogonal tree handles are supposed to be allocated!");
767 n->surface = malloc(
sizeof(
H));
768 memcpy(n->surface,&sh,sn->size);
770 else if (n->flags & TFLAG_ALLOCATED) {
771 n->surface = malloc(sn->size);
772 memcpy(n->surface,surface,sn->size);
775 memcpy(&n->surface,&sn->surface,sn->size);
777 sn = (
N *) (SERIALIZED_NODE_SIZE + ((
void*)sn));
void _m_walk(H h, void(*walkfn)(H, N *, void *, MwalkState *, Maddr), void *user_data)
header file for symbol and structure definition functions
T * __t_new(T *parent, Symbol symbol, void *surface, size_t size, bool is_run_node)
T * _t_child(T *t, int i)
Maddr _m_child(H h, Mindex c)
T * _t_newt(T *parent, Symbol symbol, T *surface)
SState * state(StateType type, int *statesP, int level)
H __m_new(H parent, Symbol symbol, void *surface, size_t size, uint32_t flags)
void __m_free(H h, int free_surface)
H _m_newi(H parent, Symbol symbol, int surface)
semantic tree matrix header file ``
Maddr _m_next_sibling(H h)
H _m_newr(H parent, Symbol s)
N * __m_add_nodes(H h, L *l, int c)