OpenSS7 SS7 for the Common Man |
© Copyright 1997-2004,OpenSS7 Corporation, All Rights Reserved. |
||||||||||||||||||||||||||
Home | Overview | Status | News | Documentation | Resources | About | |||||||||||||||||||||
File /code/strss7/drivers/sigtran/ua.c#ident "@(#) $RCSfile: ua.c,v $ $Name: $($Revision: 0.8.2.4 $) $Date: 2003/04/14 12:13:20 $" static char const ident[] = "$RCSfile: ua.c,v $ $Name: $($Revision: 0.8.2.4 $) $Date: 2003/04/14 12:13:20 $"; #define EXPORT_SYMTAB #ifdef _DEBUG #define DEBUG_SYMBOLS #endif #include <linux/config.h> #include <linux/version.h> #ifdef MODVERSIONS #include <linux/modversions.h> #endif #include <linux/module.h> #include <sys/stream.h> #include <sys/stropts.h> #include <sys/cmn_err.h> #include <sys/dki.h> #include <ss7/ua_lm.h> #include <ss7/ua_lm_ioctl.h> #include "../debug.h" #include "../bufq.h" #include "ua_data.h" #include "ua.h" #include "ua_msgs.h" #define UA_DESCRIP "SIGTRAN UA STREAMS MULTIPLEXING DRIVER." #define UA_COPYRIGHT "Copyright (c) 1997-2002 OpenSS7 Corporation. All Rights Reserved." #define UA_DEVICE "Part of the OpenSS7 Stack for LiS STREAMS." #define UA_CONTACT "Brian Bidulock <bidulock@openss7.org>" #define UA_LICENSE "GPL" #define UA_BANNER UA_DESCRIP "\n" \ UA_COPYRIGHT "\n" \ UA_DEVICE "\n" \ UA_CONTACT "\n" MODULE_AUTHOR(UA_CONTACT); MODULE_DESCRIPTION(UA_DESCRIP); MODULE_SUPPORTED_DEVICE(UA_DEVICE); #ifdef MODULE_LICENSE MODULE_LICENSE(UA_LICENSE); #endif #ifndef UALM_CMAJOR #define UALM_CMAJOR 220 #endif #undef INT #ifdef LINUX_2_4 #define INT int #else #define INT void #endif /* * ========================================================================= * * STREAMS Definitions * * ========================================================================= */ static int ua_open(queue_t *, dev_t *, int, int, cred_t *); static int ua_close(queue_t *, int, cred_t *); static INT ua_rput(queue_t *, mblk_t *); static INT ua_rsrv(queue_t *); static INT ua_wput(queue_t *, mblk_t *); static INT ua_wsrv(queue_t *); static struct module_info ua_minfo = { 0, /* Module ID number */ "ua", /* Module name */ 0, /* Min packet size accepted */ INFPSZ, /* Max packet size accepted */ 1, /* Hi water mark */ 0 /* Lo water mark */ }; static struct qinit ua_rinit = { ua_rput, /* Read put (msg from below) */ ua_rsrv, /* Read queue service */ ua_open, /* Each open */ ua_close, /* Last close */ NULL, /* Admin (not used) */ &ua_minfo, /* Information */ NULL /* Statistics */ }; static struct qinit ua_winit = { ua_wput, /* Write put (msg from above) */ ua_wsrv, /* Write queue service */ NULL, /* Each open */ NULL, /* Last close */ NULL, /* Admin (not used) */ &ua_minfo, /* Information */ NULL /* Statistics */ }; static struct streamtab ua_info = { &ua_rinit, /* Upper read queue */ &ua_winit, /* Upper write queue */ &ua_rinit, /* Lower read queue */ &ua_winit /* Lower write queue */ }; /* * ========================================================================= * * Buffer Allocation * * ========================================================================= */ /* * BUFSRV calls service routine * ------------------------------------ * Buffer call service routine, simply enables the queue against which the * buffer call was originally scheduled. */ static void ua_bufsrv(long data) { queue_t *q = (queue_t *) data; ua_t *ua = (ua_t *) q->q_ptr; if (q == ua->rq) if (ua->rbid) ua->rbid = 0; if (q == ua->wq) if (ua->wbid) ua->wbid = 0; qenable(q); } /* * BUFCALL for enobufs * ------------------------------------ * Generate a buffer call against a queue. */ void ua_bufcall(queue_t * q, size_t size, int prior) { ua_t *ua = (ua_t *) q->q_ptr; if (q == ua->rq) if (!ua->rbid) ua->rbid = bufcall(size, prior, &ua_bufsrv, (long) q); if (q == ua->wq) if (!ua->wbid) ua->wbid = bufcall(size, prior, &ua_bufsrv, (long) q); return; } /* * ========================================================================= * * STATE MACHINES * * ========================================================================= */ /* * SG ACTIVATE * ----------------------------------- * This function is for requesting activation of all of the SGP/SS7-P * associated with an SG. It also handles the situation where the SGP/SS7-P * is already activating or is in the process of deactivating. */ int sg_activate_req(queue_t * q, as_t * ps) { int err; gp_t *gp; for (gp = ps->gp; gp; gp = gp->xp.next) { xp_t *xp = gp->xp.xp; lp_t *lp = xp->pp.pp; if ((1 << gp->state) & ((ASM_INAC_STATES & ~ASF_WACK_ASPAC) | ASF_WACK_ASPIA)) { if (gp->t_ack) untimeout(xchg(&gp->t_ack, 0)); if ((err = ua_send_aspac_req(q, lp->wq, ps->tmode, gp->rc))) return (err); gp->t_ack = timeout(ua_gp_ack_timeout, (caddr_t) gp, ua_t_ack_val); if (gp->state == AS_WACK_ASPIA) ps->xpir_count--; ps->xpar_count++; gp->state = AS_WACK_ASPAC; } } return (0); } /* * SG INACTIVE * ----------------------------------- * This function is for requesting deactivation of all the SGP/SS7-P * sasociated with an SG. It also handles the stuation where the SGP/SS7-P * is already deactivating or is in the process of activating. */ int sg_inactive_req(queue_t * q, as_t * ps) { int err; gp_t *gp; for (gp = ps->gp; gp; gp = gp->xp.next) { xp_t *xp = gp->xp.xp; lp_t *lp = xp->pp.pp; if ((1 << gp->state) & ((ASM_ACTV_STATES & ~ASF_WACK_ASPIA) | ASF_WACK_ASPAC)) { if (gp->t_ack) untimeout(xchg(&gp->t_ack, 0)); if (gp->t_hbt) untimeout(xchg(&gp->t_hbt, 0)); if ((err = ua_send_aspia_req(q, lp->wq, gp->rc))) return (err); gp->t_ack = timeout(ua_gp_ack_timeout, (caddr_t) gp, ua_t_ack_val); if (gp->state == AS_WACK_ASPAC) ps->xpar_count--; ps->xpir_count++; gp->state = AS_WACK_ASPIA; } } return (0); } /* * AS ACTIVATE * ----------------------------------- * This function is for requesting activation of all the SG associated with * an AS. It also handles the sitation where SGs are already activating or * deactivating as well as the situation where all activation attempts fail. */ int as_active_req(queue_t * q, as_t * as) { int err; ap_t *ap; /* activate each SG */ for (ap = as->ap; ap; ap = ap->user.next) { as_t *ps = ap->prov.as; if ((err = sg_activate_req(q, ps))) return (err); if (ps->xpar_count) { as->spar_count++; ps->state = AS_WACK_ASPAC; } } if (as->spar_count) { as->state = AS_WACK_ASPAC; return (0); } /* failed to activate anything */ return UA_ECODE_REFUSED_MANAGEMENT_BLOCKING; } /* * AS INACTIVE * ----------------------------------- * This function is for requesting deactivation of all the SG associated with * an AS. It also handles the situation where the SGs are already * deactivating or activating, as well as the situation where all * deactivation attempts fail. */ int as_inactive_req(queue_t * q, as_t * as) { int err; ap_t *ap; /* deactivate each SG */ for (ap = as->ap; ap; ap = ap->user.next) { as_t *ps = ap->prov.as; if ((err = sg_inactive_req(q, ps))) return (err); if (ps->xpir_count) { as->spir_count++; ps->state = AS_WACK_ASPIA; } } if (as->spir_count) { as->state = AS_WACK_ASPIA; return (0); } /* failed to deactivate anything */ return UA_ECODE_REFUSED_MANAGEMENT_BLOCKING; } /* * ASP ACTIVE REQ * ----------------------------------- * This function is for requesting activation of a specific ASP within an AS. */ int asp_active_req(queue_t * q, gp_t * gp) { int err; as_t *as = gp->as.as; xp_t *xp = gp->xp.xp; lp_t *lp = xp->pp.pp; if ((1 << gp->state) & (ASM_DOWN_STATES)) { return UA_ECODE_UNEXPECTED_MESSAGE; } if ((1 << gp->state) & (ASM_UREG_STATES)) { return UA_ECODE_UNEXPECTED_MESSAGE; } if ((1 << gp->state) & (ASM_INAC_STATES)) { if (as->xpac_count) { if (gp->t_ack) untimeout(xchg(&gp->t_ack, 0)); if ((err = ua_send_aspac_ack(q, lp->wq, as->tmode, gp->rc))) return (err); xp->asia_count--; xp->asac_count++; as->xpia_count--; as->xpac_count++; gp->state = AS_ACTIVE; } else { if ((err = as_active_req(q, as))) return (err); as->xpar_count++; gp->state = AS_WACK_ASPAC; } return (0); } if ((1 << gp->state) & (ASM_ACTV_STATES)) { if ((err = ua_send_aspac_ack(q, lp->wq, as->tmode, gp->rc))) return (err); return (0); } if ((1 << gp->state) & (ASF_BLOCKED)) { return UA_ECODE_REFUSED_MANAGEMENT_BLOCKING; } return UA_ECODE_UNEXPECTED_MESSAGE; } /* * ASP ACTIVE ACK * ----------------------------------- * This function acknowledges activation of the AS to all ASP/SS7-Us * expecting acknowledgement. It also provides AS state change notification * when sufficient ASP/SS7-Us become active. */ int asp_active_ack(queue_t * q, as_t * as) { int err; gp_t *gp; for (gp = as->gp; gp; gp = gp->xp.next) { if (gp->state == AS_WACK_ASPAC) { xp_t *xp = gp->xp.xp; lp_t *lp = xp->pp.pp; if (gp->t_ack) untimeout(xchg(&gp->t_ack, 0)); if ((err = ua_send_aspac_ack(q, lp->wq, as->tmode, gp->rc))) return (err); xp->asia_count--; xp->asac_count++; as->xpia_count--; as->xpac_count++; gp->state = AS_ACTIVE; } } return (0); } /* * ASP ACTIVE NAK * ----------------------------------- * This function negatively acknowledges (indicates failure) activation of * the AS to all ASP/SS7-Us expecting acknowledgement. */ int asp_active_nack(queue_t * q, as_t * as) { int err; gp_t *gp; uint32_t rc, *rcp = &rc; for (gp = as->gp; gp; gp = gp->xp.next) { if (gp->state == AS_WACK_ASPAC) { xp_t *xp = gp->xp.xp; pp_t *pp = xp->pp.pp; if (gp->t_ack) untimeout(xchg(&gp->t_ack, 0)); fixme(("Either this or protocol error\n")); err = UA_ECODE_REFUSED_MANAGEMENT_BLOCKING; rc = htonl(gp->rc); if ((err = ua_reply_err(q, pp->rq, err, rcp, NULL, NULL, NULL))) return (err); gp->state = AS_INACTIVE; } } return (0); } /* * ASP INACTIVE ACK * ----------------------------------- * This function acknowledges deactivation of the AS to all ASP/SS7-Us * expecting acknowledgement. It also provides AS state change notification * when sufficient ASP/SS7-Us become inactive. */ int asp_inactive_ack(queue_t * q, as_t * as) { int err; gp_t *gp; for (gp = as->gp; gp; gp = gp->xp.next) { /* any active ASP has to go */ if ((1 << gp->state) & (ASM_ACTV_STATES)) { xp_t *xp = gp->xp.xp; lp_t *lp = xp->pp.pp; /* solicited or unsolicited */ if ((err = ua_send_aspia_ack(q, lp->wq, gp->rc))) return (err); if (as->xpac_count <= as->mina_count) if ((err = ua_ntfy_as_change(q, as, UA_STATUS_AS_INSUFFICIENT_ASPS))) return (err); if (gp->t_ack) untimeout(xchg(&gp->t_ack, 0)); if (gp->t_hbt) untimeout(xchg(&gp->t_hbt, 0)); xp->asac_count--; xp->asia_count++; as->xpac_count--; as->xpia_count++; gp->state = AS_INACTIVE; } } return (0); } /* * ASP OVERRIDE * ----------------------------------- * This function overrides one ASP with another when another ASP is active or * activating. */ int asp_override(queue_t * q, as_t * as) { int err; gp_t *gp; for (gp = as->gp; gp; gp = gp->xp.next) { if ((1 << gp->state) & (ASF_WACK_ASPAC | ASF_ACTIVE)) { xp_t *xp = gp->xp.xp; lp_t *lp = xp->pp.pp; if (gp->state == AS_WACK_ASPAC) { if ((err = ua_send_aspac_ack(q, lp->wq, as->tmode, gp->rc))) return (err); if (gp->t_ack) untimeout(xchg(&gp->t_ack, 0)); as->xpar_count--; } /* FIXME: needs aspid */ if ((err = ua_send_ntfy(q, lp->wq, &gp->rc, NULL, UA_STATUS_ALTERNATE_ASP_ACTIVE))) return (err); xp->asia_count--; xp->asac_count++; /* XXX Huh? */ as->xpia_count--; as->xpac_count++; /* XXX Huh? */ gp->state = AS_INACTIVE; break; } } if (as->xpac_count == as->mina_count) if ((err = ua_ntfy_as_change(q, as, UA_STATUS_AS_ACTIVE))) return (err); return (0); } /* * AS ACTIVE ACK * ----------------------------------- * If the AS is expecting an acknowledgement and we are the first successfully * responding SG, then acknowledge activation to all ASP/SS7-Us in the AS and * enter the active state. Provide notifications as appropriate. */ int as_active_ack(queue_t * q, as_t * as) { int err; if (as->state == AS_WACK_ASPAC) { if (as->spac_count == 0) { if ((err = asp_active_ack(q, as))) return (err); if (as->xpac_count == as->mina_count) if ((err = ua_ntfy_as_change(q, as, UA_STATUS_AS_ACTIVE))) return (err); if (as->xpac_count == as->mina_count) if ((err = ua_ntfy_as_change(q, as, UA_STATUS_AS_MINIMUM_ASPS))) return (err); as->state = AS_ACTIVE; } as->spia_count--; as->spac_count++; } if (as->spar_count) as->spar_count--; return (0); } /* * AS ACTIVE NACK * ----------------------------------- * If the AS is expecting an acknowledgement and we are the last responding SG, * the negatively acknowledge activation to all ASP/SS7-Us in the AS and enter * the inactive state. */ int as_active_nack(queue_t * q, as_t * as) { int err; if (as->state == AS_WACK_ASPAC) { if (as->spar_count == 1) { if ((err = asp_active_nack(q, as))) return (err); as->state = AS_INACTIVE; } if (as->spar_count) as->spar_count--; } return (0); } /* * AS INACTIVE ACK * ----------------------------------- * If the AS is in any active state and we are the last responding or last * active SG, then acknowledge deactivation to all ASP/SS7-Us in the AS and * enter the inactive state. Provide notifications as appropriate. */ int as_inactive_ack(queue_t * q, as_t * as) { int err; if ((1 << as->state) && (ASM_ACTV_STATES)) { if (as->spir_count == 1 || as->spac_count == 1) { if ((err = asp_inactive_ack(q, as))) return (err); if (as->xpac_count && as->xpac_count < as->mina_count) if ((err = ua_ntfy_as_change(q, as, UA_STATUS_AS_INSUFFICIENT_ASPS))) return (err); if (as->xpac_count && as->xpac_count == as->mina_count) if ((err = ua_ntfy_as_change(q, as, UA_STATUS_AS_MINIMUM_ASPS))) return (err); if (!as->xpac_count) if ((err = ua_ntfy_as_change(q, as, UA_STATUS_AS_INACTIVE))) return (err); as->state = AS_INACTIVE; } as->spac_count--; as->spia_count++; if (as->spir_count) as->spir_count--; } return (0); } /* * SG INACTIVE ACK * ----------------------------------- * If the SG is in any active state and either we are the last responding * SGP/SS7-P or we are the last active SGP/SS7-P, then we will acknolwedge * deactivation for the AS and enter the inactive state. * */ int sg_inactive_ack(queue_t * q, as_t * ps) { int err; switch (ps->state) { case AS_WACK_HBEAT: case AS_ACTIVE: if (ps->xpac_count == 1) { if ((err = as_inactive_ack(q, ps->ap->user.as))) return (err); ps->state = AS_INACTIVE; } ps->xpac_count--; ps->xpia_count++; return (0); case AS_WACK_ASPIA: if (ps->xpir_count == 1) { if ((err = as_inactive_ack(q, ps->ap->user.as))) return (err); ps->state = AS_INACTIVE; } ps->xpac_count--; ps->xpia_count++; ps->xpir_count--; return (0); case AS_WACK_ASPAC: if (ps->xpar_count == 1) { if ((err = as_inactive_ack(q, ps->ap->user.as))) return (err); ps->state = AS_INACTIVE; } ps->xpar_count--; return (0); case AS_INACTIVE: return (0); } return (UA_ECODE_UNEXPECTED_MESSAGE); } /* * SG ACTIVE ACK * ----------------------------------- * If the AS is expecting an activation acknowledgement from this SG, and no * SGP/SS7-Ps are currently active, then we will acknowledge activation for the * AS and enter the active state. */ int sg_active_ack(queue_t * q, as_t * ps) { int err; if (ps->state == AS_WACK_ASPAC) { if (ps->xpac_count == 0) { if ((err = as_active_ack(q, ps->ap->user.as))) return (err); ps->state = AS_ACTIVE; } ps->xpia_count--; ps->xpac_count++; } if (ps->xpar_count) ps->xpar_count--; return (0); } /* * SG ACTIVE NACK * ----------------------------------- * If the AS is expecting an activation acknowledgement from this SG, and no * SGPs are currently active and we are the last responding SGP/SS7-P in the * SG, then we will negatively acknowledge activation to the AS and enter the * inactive state. */ int sg_active_nack(queue_t * q, as_t * ps) { int err; if (ps->state == AS_WACK_ASPAC) { if (ps->xpac_count == 0 && ps->xpar_count == 1) { if ((err = as_active_nack(q, ps->ap->user.as))) return (err); ps->state = AS_INACTIVE; } } if (ps->xpar_count) ps->xpar_count--; return (0); } /* * SGP INACTIVE ACK * ----------------------------------- */ int sgp_inactive_ack(queue_t * q, gp_t * gp) { int err; xp_t *xp = gp->xp.xp; as_t *ps = gp->as.as; switch (gp->state) { case AS_WACK_ASPUP: case AS_WACK_ASPDN: case AS_DOWN: case AS_WRSP_REG: case AS_WRSP_DEREG: case AS_UNREG: case AS_PENDING: case AS_BLOCKED: default: ptrace(("ERROR: SGP inactive ack in unexpected state\n")); return (UA_ECODE_UNEXPECTED_MESSAGE); case AS_INACTIVE: case AS_WACK_ASPAC: if (gp->t_ack) untimeout(xchg(&gp->t_ack, 0)); if ((err = sg_inactive_ack(q, ps))) return (err); gp->state = AS_INACTIVE; return (0); case AS_WACK_HBEAT: if (gp->t_hbt) untimeout(xchg(&gp->t_hbt, 0)); case AS_WACK_ASPIA: if (gp->t_ack) untimeout(xchg(&gp->t_ack, 0)); case AS_ACTIVE: if ((err = sg_inactive_ack(q, ps))) return (err); ps->xpac_count--; xp->asac_count--; ps->xpia_count++; xp->asia_count++; gp->state = AS_INACTIVE; return (0); } } /* * SGP ACTIVE ACK * ----------------------------------- * Acknowledge activation of a specific SGP for an AS. */ int sgp_active_ack(queue_t * q, gp_t * gp) { int err; xp_t *xp = gp->xp.xp; as_t *ps = gp->as.as; switch (gp->state) { case AS_WACK_ASPUP: case AS_WACK_ASPDN: case AS_DOWN: case AS_WRSP_REG: case AS_WRSP_DEREG: case AS_UNREG: case AS_PENDING: case AS_BLOCKED: default: ptrace(("ERROR: SGP active ack in unexpected state\n")); return (UA_ECODE_UNEXPECTED_MESSAGE); case AS_WACK_ASPAC: if (gp->t_ack) untimeout(xchg(&gp->t_ack, 0)); if ((err = sg_active_ack(q, ps))) return (err); ps->xpia_count--; xp->asia_count--; ps->xpac_count++; xp->asac_count++; gp->state = AS_ACTIVE; return (0); case AS_WACK_ASPIA: if (gp->t_ack) untimeout(xchg(&gp->t_ack, 0)); if ((err = sg_active_ack(q, ps))) return (err); gp->state = AS_ACTIVE; return (0); case AS_INACTIVE: case AS_WACK_HBEAT: case AS_ACTIVE: return (0); } } /* * SGP ACTIVE NACK * ----------------------------------- * Negatively acknowledge activation of a specific SGP for an AS. */ int sgp_active_nack(queue_t * q, gp_t * gp) { int err; xp_t *xp = gp->xp.xp; as_t *ps = gp->as.as; (void) err; (void) xp; (void) ps; switch (gp->state) { case AS_WACK_ASPUP: case AS_WACK_ASPDN: case AS_DOWN: case AS_WRSP_REG: case AS_WRSP_DEREG: case AS_UNREG: case AS_PENDING: case AS_BLOCKED: default: ptrace(("ERROR: SGP active ack in unexpected state\n")); return (UA_ECODE_UNEXPECTED_MESSAGE); case AS_WACK_ASPAC: if (gp->t_ack) untimeout(xchg(&gp->t_ack, 0)); case AS_INACTIVE: case AS_WACK_HBEAT: if (gp->t_hbt) untimeout(xchg(&gp->t_hbt, 0)); case AS_WACK_ASPIA: if (gp->t_ack) untimeout(xchg(&gp->t_ack, 0)); case AS_ACTIVE: } return (-EFAULT); } /* * ========================================================================= * * Timeouts * * ========================================================================= * These are timeout functions. There are three levels of timeouts: one for * applicatoin servers, one for ASPs and SGPs, and one for Application Server * requests to a specific ASP or SGP. */ void ua_as_ack_timeout(caddr_t data) { /* * This timeout function is used for sending ASPAC and ASPIA. */ as_t *as = (as_t *) data; (void) as; switch (as->state) { case AS_PENDING: case AS_WACK_HBEAT: } fixme(("write this function\n")); return; } void ua_xp_ack_timeout(caddr_t data) { /* * This timeout function is used for sending ASPUP and ASPDN. */ xp_t *xp = (xp_t *) data; (void) xp; switch (xp->state) { case ASP_WACK_ASPUP: case ASP_WACK_ASPDN: case ASP_WACK_HBEAT: } fixme(("write this function\n")); return; } void ua_gp_ack_timeout(caddr_t data) { /* * This timeout function is used for sending ASPAC and ASPIA * as well as REG REQ and DEREG REQ. */ gp_t *gp = (gp_t *) data; (void) gp; switch (gp->state) { case ASP_WACK_ASPIA: case ASP_WACK_ASPAC: case ASP_WACK_HBEAT: } fixme(("write this function\n")); return; } /* * ========================================================================= * * Cache initialization and termination. * * ========================================================================= */ kmem_cache_t *ua_pp_cachep = NULL; kmem_cache_t *ua_xp_cachep = NULL; kmem_cache_t *ua_gp_cachep = NULL; kmem_cache_t *ua_as_cachep = NULL; kmem_cache_t *ua_ap_cachep = NULL; kmem_cache_t *ua_sp_cachep = NULL; kmem_cache_t *ua_np_cachep = NULL; static int ua_pp_cache_allocated = 0; static int ua_xp_cache_allocated = 0; static int ua_gp_cache_allocated = 0; static int ua_as_cache_allocated = 0; static int ua_ap_cache_allocated = 0; static int ua_sp_cache_allocated = 0; static int ua_np_cache_allocated = 0; static void ua_term_caches(void) { if (ua_pp_cachep) { if (ua_pp_cache_allocated) kmem_cache_destroy(ua_pp_cachep); ua_pp_cachep = NULL; } if (ua_xp_cachep) { if (ua_xp_cache_allocated) kmem_cache_destroy(ua_xp_cachep); ua_xp_cachep = NULL; } if (ua_gp_cachep) { if (ua_gp_cache_allocated) kmem_cache_destroy(ua_gp_cachep); ua_gp_cachep = NULL; } if (ua_sp_cachep) { if (ua_sp_cache_allocated) kmem_cache_destroy(ua_sp_cachep); ua_sp_cachep = NULL; } if (ua_as_cachep) { if (ua_as_cache_allocated) kmem_cache_destroy(ua_as_cachep); ua_as_cachep = NULL; } if (ua_ap_cachep) { if (ua_ap_cache_allocated) kmem_cache_destroy(ua_ap_cachep); ua_ap_cachep = NULL; } if (ua_np_cachep) { if (ua_np_cache_allocated) kmem_cache_destroy(ua_np_cachep); ua_np_cachep = NULL; } return; } static int ua_init_caches(void) { if (!(ua_pp_cachep)) if (!(ua_pp_cachep = kmem_cache_create("ua_pp_cachep", sizeof(pp_t), 0, SLAB_HWCACHE_ALIGN, NULL, NULL))) goto ua_init_caches_failed; else ua_pp_cache_allocated = 1; if (!(ua_xp_cachep)) if (!(ua_xp_cachep = kmem_cache_create("ua_xp_cachep", sizeof(xp_t), 0, SLAB_HWCACHE_ALIGN, NULL, NULL))) goto ua_init_caches_failed; else ua_xp_cache_allocated = 1; if (!(ua_gp_cachep)) if (!(ua_gp_cachep = kmem_cache_create("ua_gp_cachep", sizeof(gp_t), 0, SLAB_HWCACHE_ALIGN, NULL, NULL))) goto ua_init_caches_failed; else ua_gp_cache_allocated = 1; if (!(ua_sp_cachep)) if (!(ua_sp_cachep = kmem_cache_create("ua_sp_cachep", sizeof(sp_t), 0, SLAB_HWCACHE_ALIGN, NULL, NULL))) goto ua_init_caches_failed; else ua_sp_cache_allocated = 1; if (!(ua_as_cachep)) if (!(ua_as_cachep = kmem_cache_create("ua_as_cachep", sizeof(as_t), 0, SLAB_HWCACHE_ALIGN, NULL, NULL))) goto ua_init_caches_failed; else ua_as_cache_allocated = 1; if (!(ua_ap_cachep)) if (!(ua_ap_cachep = kmem_cache_create("ua_ap_cachep", sizeof(ap_t), 0, SLAB_HWCACHE_ALIGN, NULL, NULL))) goto ua_init_caches_failed; else ua_ap_cache_allocated = 1; if (!(ua_np_cachep)) if (!(ua_np_cachep = kmem_cache_create("ua_np_cachep", sizeof(np_t), 0, SLAB_HWCACHE_ALIGN, NULL, NULL))) goto ua_init_caches_failed; else ua_np_cache_allocated = 1; return (0); ua_init_caches_failed: ua_term_caches(); return (ENOMEM); } /* * ========================================================================= * * Struture Allocation and Deallocation * * ========================================================================= */ pp_t *ua_pp_alloc(void) { pp_t *pp; if ((pp = kmem_cache_alloc(ua_pp_cachep, SLAB_ATOMIC))) bzero(pp, sizeof(*pp)); return (pp); } xp_t *ua_xp_alloc(void) { xp_t *xp; if ((xp = kmem_cache_alloc(ua_xp_cachep, SLAB_ATOMIC))) bzero(xp, sizeof(*xp)); return (xp); } gp_t *ua_gp_alloc(void) { gp_t *gp; if ((gp = kmem_cache_alloc(ua_gp_cachep, SLAB_ATOMIC))) bzero(gp, sizeof(*gp)); return (gp); } as_t *ua_as_alloc(void) { as_t *as; if ((as = kmem_cache_alloc(ua_as_cachep, SLAB_ATOMIC))) bzero(as, sizeof(*as)); return (as); } ap_t *ua_ap_alloc(void) { ap_t *ap; if ((ap = kmem_cache_alloc(ua_ap_cachep, SLAB_ATOMIC))) bzero(ap, sizeof(*ap)); return (ap); } sp_t *ua_sp_alloc(void) { sp_t *sp; if ((sp = kmem_cache_alloc(ua_sp_cachep, SLAB_ATOMIC))) bzero(sp, sizeof(*sp)); return (sp); } np_t *ua_np_alloc(void) { np_t *np; if ((np = kmem_cache_alloc(ua_np_cachep, SLAB_ATOMIC))) bzero(np, sizeof(*np)); return (np); } void ua_pp_free(pp_t * pp) { kmem_cache_free(ua_pp_cachep, pp); } void ua_xp_free(xp_t * xp) { kmem_cache_free(ua_xp_cachep, xp); } void ua_gp_free(gp_t * gp) { kmem_cache_free(ua_gp_cachep, gp); } void ua_as_free(as_t * as) { kmem_cache_free(ua_as_cachep, as); } void ua_ap_free(ap_t * ap) { kmem_cache_free(ua_ap_cachep, ap); } void ua_sp_free(sp_t * sp) { kmem_cache_free(ua_sp_cachep, sp); } void ua_np_free(np_t * np) { kmem_cache_free(ua_np_cachep, np); } /* * ========================================================================= * * STREAMS Message Handling * * ========================================================================= * * Management Stream (LM) Message Handling * * ------------------------------------------------------------------------- * * UA --> LM Primitives * * ------------------------------------------------------------------------- */ static int lm_ok_ack(queue_t * q, int prim) { mblk_t *mp; lm_ok_ack_t *p; if ((mp = ua_allocb(q, sizeof(*p), BPRI_MED))) { mp->b_datap->db_type = M_PCPROTO; p = ((typeof(p)) mp->b_wptr)++; p->prim = LM_OK_ACK; p->correct_prim = prim; qreply(q, mp); return (0); } return (-ENOBUFS); } static int lm_error_ack(queue_t * q, int prim, int err) { mblk_t *mp; lm_error_ack_t *p; if ((mp = ua_allocb(q, sizeof(*p), BPRI_MED))) { mp->b_datap->db_type = M_PCPROTO; p = ((typeof(p)) mp->b_wptr)++; p->prim = LM_ERROR_ACK; p->error_prim = prim; p->lm_error = err < 0 ? LMSYSERR : err; p->unix_error = err < 0 ? -err : 0; qreply(q, mp); return (0); } return (-ENOBUFS); } static int lm_info_ack(queue_t * q, uint spid) { mblk_t *mp; lm_info_ack_t *p; if ((mp = ua_allocb(q, sizeof(*p), BPRI_MED))) { mp->b_datap->db_type = M_PCPROTO; p = ((typeof(p)) mp->b_wptr)++; p->prim = LM_INFO_ACK; p->spid = spid; qreply(q, mp); return (0); } return (-ENOBUFS); } /* * ------------------------------------------------------------------------- * * LM --> UA Primitives * * ------------------------------------------------------------------------- */ /* * LM_INFO_REQ * ------------------------------------------------------------------------- * Request information about the SP associated with the multiplexor instance. */ static int lm_info_req(queue_t * q, mblk_t * mp) { int err; sp_t *sp = ((dp_t *) q->q_ptr)->sp; lm_info_req_t *p = (typeof(p)) mp->b_rptr; size_t mlen = mp->b_wptr - mp->b_rptr; if (mlen < sizeof(*p)) { err = -EMSGSIZE; ptrace(("ERROR: M_PROTO to short\n")); goto lm_error_ack; } if (!sp) { err = -EFAULT; ptrace(("ERROR: Control stream without ASP\n")); goto lm_error_ack; } return lm_info_ack(q, sp->spid); lm_error_ack: return lm_error_ack(q, LM_INFO_REQ, err); } /* * LM_SG_ADD_REQ * ------------------------------------------------------------------------- * Requests that the driver add or agument the signalling gateway for the * protocol on whose user or control queue the request was issued. The SG * identifier is for further reference (such as in the LM_LINK_ADD_REQ). The * layer or local management queue normally uses this primitive to add a * signalling gateway which does not yet exist. This is normally only done on * a node acting as an ASP. The traffic mode describes the traffic mode to be * used across the SGP on the SG. */ static int lm_sg_add_req(queue_t * q, mblk_t * mp) { int err; sg_t *sg; np_t *np; sp_t *sp = ((pp_t *) q->q_ptr)->sp; lm_sg_add_req_t *p = (typeof(p)) mp->b_rptr; size_t mlen = mp->b_wptr - mp->b_rptr; if (mlen < sizeof(*p)) { err = -EMSGSIZE; ptrace(("ERROR: bad message size\n")); } if (!sp) { err = -EFAULT; ptrace(("ERROR: no SP pointer\n")); } /* check for existing sgid */ for (np = sp->np; np; np = np->sg.next) if (np->sg.sg->spid == p->sgid) break; if (!np && (np = ua_np_alloc())) { if ((sg = ua_sp_alloc())) { /* create sp linkage */ np->sp.sp = sp; if ((np->sp.next = sp->np)) np->sp.next->sp.prev = &np->sp.next; np->sp.prev = &sp->np; sp->np = np; /* create sg linkage */ np->sg.sg = sg; if ((np->sg.next = sg->np)) np->sg.next->sg.prev = &np->sg.next; np->sg.prev = &sg->np; sg->np = np; sg->spid = p->sgid; } else { ua_np_free(np); np = NULL; } } if (!np) { err = -ENOMEM; ptrace(("ERROR: cannot allocate memory\n")); goto lm_error_ack; } fixme(("We must do more for a change in traffic mode parameters\n")); /* * When we change the traffic mode, priority or costs * parameters, it will be necessary to determine which SGPs * are configured for the SG and rearrange how SGP are * treated. */ fixme(("We must do more for a change ASPID\n")); /* * When we change an ASPID parameter, we will have to * propagate that change down to the SGP associated with the * SG. If there are any SGP that have failed to come up due * to an Invalid ASPID cause should be reattempted. */ sg = np->sg.sg; sg->tmode = p->tmode; sg->prio = p->prio; sg->cost = p->cost; sg->aspid = p->aspid; return lm_ok_ack(q, LM_LINK_ADD_REQ); lm_error_ack: return lm_error_ack(q, LM_SG_ADD_REQ, err); } /* * LM_SG_DEL_REQ * ------------------------------------------------------------------------- * Deletes an SG datastructure (including all related datastructures). * * FIXME: Rather than returning an error when AS are allocated, we probably * want to walk the list of AS-Ps deleting each one as we go. We should * protect this operation with a FORCE flag. Any AS-Us that are active would * be deactivated if we are the only AS-P supporting the AS-U. Also, if we * have SGPs allocated to this SG, we want to walk the list and delete any * XPs if the FORCE flag is set. We will also have to unlink the XP from any * AS-Ps to which it is associated, but if we remove the AS-P list from the * SG first, we should not encounter any. Until this is done, SG cannot be * deleted until each of their AS-Ps and XPs are deleted. */ static int lm_sg_del_req(queue_t * q, mblk_t * mp) { int err; sg_t *sg; np_t *np; sp_t *sp = ((pp_t *) q->q_ptr)->sp; lm_sg_del_req_t *p = (typeof(p)) mp->b_rptr; size_t mlen = mp->b_wptr - mp->b_rptr; if (mlen < sizeof(*p)) { err = -EMSGSIZE; ptrace(("ERROR: bad message size\n")); goto lm_error_ack; } if (!sp) { /* look up the sg */ err = -EFAULT; ptrace(("ERROR: no SP pointer\n")); goto lm_error_ack; } for (np = sp->np; np; np = np->sg.next) if (np->sg.sg->spid == p->sgid) break; if (!np || !(sg = np->sg.sg)) { err = -EINVAL; ptrace(("ERROR: SG does not exist\n")); goto lm_error_ack; } if (sg->as) { err = -EPROTO; ptrace(("ERROR: SG has allocated AS-P\n")); goto lm_error_ack; } if (sg->xp) { err = -EPROTO; ptrace(("ERROR: SG has allocated XP\n")); goto lm_error_ack; } if (np->sp.prev) if ((*(np->sp.prev) = np->sp.next)) np->sp.next->sp.prev = np->sp.prev; if (np->sg.prev) if ((*(np->sg.prev) = np->sg.next)) np->sg.next->sg.prev = np->sg.prev; ua_np_free(np); ua_sp_free(sg); return lm_ok_ack(q, LM_SG_DEL_REQ); lm_error_ack: return lm_error_ack(q, LM_SG_DEL_REQ, err); } /* * LM_AS_ADD_REQ * ------------------------------------------------------------------------- * This command adds both an AS-U (if not already exists) and an AS-P. The * AS-U is associated with the ASP and the AS-P is associated with an SG that * was previously created witht the LM_SG_ADD_REQ. The command looks like * this: * * typedef struct * { * ulong prim; * always LM_AS_ADD_REQ * * ulong asid; * Application Server Index * * ulong flags; * Allocation flags * * ulong rc; * RC or IID for the AS * * ulong tmode; * Traffic Mode * * ulong sgid; * SG identifier * * ulong muxid; * multiplexor ID * * ulong KEY_offset; * Routing (Link) Key offset * * ulong KEY_length; * Routing (Link) Key length * * } * lm_as_add_req_t; * * ASID is an index that specifies the AS-U to create or augment. FLAGS * specifies whether the AS should be statically configured (AS_STATIC) or * registered (AS_DYNAMIC) with this SG. RC provides the routing context to * use when the AS is to be statically configured; it is ignored for * registered AS. TMODE provides the traffic mode type to use for the * Application Server on this Signalling Gateway. SGID provides the * identifier of the Signallnig Gateway previously added with LM_SG_ADD_REQ * that provides service to this AS. MUXID optionally specifies the * multiplexor id of an SS7-P (SS7 Provider) stream that provides service to * this AS; when MUXID is specified (non-zero) the flags, rc, and tmode * fields are ignored. KEY_OFFSET and KEY_LENGTH specify the offset and * length of the Routing Key associated with this AS. * */ static int lm_as_add_req(queue_t * q, mblk_t * mp) { int err; lp_t *lp = NULL; sg_t *sg = NULL; as_t *us, *ps; np_t *np; ap_t *ap; gp_t *gp; xp_t *xp; dp_t *dp = (dp_t *) q->q_ptr; sp_t *sp = dp->sp; lm_as_add_req_t *p = (typeof(p)) mp->b_rptr; size_t mlen = mp->b_wptr - mp->b_rptr; if (mlen < sizeof(*p)) { err = -EMSGSIZE; ptrace(("ERROR: bad message size\n")); goto lm_error_ack; } if (!sp) { err = -EFAULT; ptrace(("ERROR: no SP pointer\n")); goto lm_error_ack; } if (p->tmode && p->tmode != AS_TMODE_OVERRIDE && p->tmode != AS_TMODE_LOADSHARE && p->tmode != AS_TMODE_BROADCAST) { err = -EINVAL; ptrace(("ERROR: unsupported Traffic Mode\n")); goto lm_error_ack; } { size_t kmax = dp->drv->max_alen; size_t kmin = dp->drv->min_alen; size_t klen = p->KEY_length; size_t koff = p->KEY_offset; unsigned char *kptr = mp->b_rptr + koff; unsigned char *rk; if (klen > kmax || klen < kmin || koff + klen > mlen) { err = -EINVAL; ptrace(("ERROR: invalid RK format\n")); goto lm_error_ack; } if (p->muxid) { for (lp = ua_links_list; lp; lp = (lp_t *) lp->next) if (lp->id.mux == p->muxid) break; if (!lp) { err = -EINVAL; ptrace(("ERROR: bad MUXID\n")); goto lm_error_ack; } if (lp->as) { err = -EINVAL; ptrace(("ERROR: already AS for LP\n")); goto lm_error_ack; } } if (p->sgid) { for (np = sp->np; np; np = np->sg.next) if ((sg = np->sg.sg)) { if (sg->spid == p->sgid) break; else sg = NULL; } if (!sg) { err = -EINVAL; ptrace(("ERROR: bad SGID\n")); goto lm_error_ack; } if (!sg->xp) { err = -EINVAL; ptrace(("ERROR: no XP for SG\n")); goto lm_error_ack; } } if (!p->asid) { err = -EINVAL; ptrace(("ERROR: invalid ASID\n")); goto lm_error_ack; } for (us = sp->as; us; us = us->sp.next) if (us->rc == p->asid) break; if (!us) { /* create an AS */ if (!(us = ua_as_alloc())) { err = -ENOMEM; ptrace(("ERROR: no memory\n")); goto lm_error_ack; } if ((rk = kmalloc(klen, GFP_ATOMIC))) { ua_as_free(us); err = -ENOMEM; ptrace(("ERROR: no memory\n")); goto lm_error_ack; } bcopy(kptr, rk, klen); { us->flags = AS_STATIC; us->rc = p->asid; us->rk = rk; us->state = AS_DOWN; us->tmode = p->tmode ? AS_TMODE_DEFAULT : p->tmode; us->mina_count = 1; us->maxa_count = -1UL; us->sp.sp = sp; if ((us->sp.next = sp->as)) us->sp.next->sp.prev = &us->sp.next; us->sp.prev = &sp->as; sp->as = us; for (xp = sp->xp; xp; xp = xp->sp.next) { if ((gp = ua_gp_alloc())) { /* fill out GP */ gp->rc = p->asid; gp->flags = AS_STATIC; gp->state = AS_DOWN; /* link to AS */ gp->as.as = us; if ((gp->as.next = us->gp)) gp->as.next->as.prev = &gp->as.next; gp->as.prev = &us->gp; us->gp = gp; /* link to XP */ gp->xp.xp = xp; if ((gp->xp.next = xp->gp)) gp->xp.next->xp.prev = &gp->xp.next; gp->xp.prev = &xp->gp; xp->gp = gp; us->xpat_count++; us->xpdn_count++; xp->asat_count++; xp->asdn_count++; } /* FIXME handle allocation errors */ } } } } if (!(ps = ua_as_alloc())) { err = -ENOMEM; ptrace(("ERROR: no memory\n")); goto lm_error_ack; } if ((ap = ua_ap_alloc())) { ua_as_free(ps); err = -ENOMEM; ptrace(("ERROR: no memory\n")); goto lm_error_ack; } ps->flags = p->flags ? p->flags : AS_STATIC; ps->rc = p->rc; ps->rk = us->rk; ps->state = AS_DOWN; ps->tmode = p->tmode; ps->mina_count = 1; ps->maxa_count = -1UL; /* link AS-U */ ap->user.as = us; if ((ap->user.next = us->ap)) ap->user.next->user.prev = &ap->user.next; ap->user.prev = &us->ap; us->ap = ap; /* link AS-P */ ap->prov.as = ps; if ((ap->prov.next = ps->ap)) ap->prov.next->prov.prev = &ap->prov.next; ap->prov.prev = &ps->ap; ps->ap = ap; us->spat_count++; us->spdn_count++; ps->spat_count++; ps->spdn_count++; if (p->muxid) { /* we are creating an SS7-P */ if (!(xp = ua_xp_alloc())) { err = -ENOMEM; ptrace(("ERROR: no memory\n")); goto lm_error_ack; } if (!(gp = ua_gp_alloc())) { ua_xp_free(xp); err = -ENOMEM; ptrace(("ERROR: no memory\n")); goto lm_error_ack; } /* fill out XP */ xp->type = LM_SGP | LM_SS7; xp->aspid = 0; xp->prio = 0; xp->cost = 0; xp->state = ASP_DOWN; /* fill out GP */ gp->rc = p->rc; gp->flags = p->flags ? p->flags : AS_STATIC; gp->state = AS_DOWN; /* link to AS */ gp->as.as = ps; if ((gp->as.next = ps->gp)) gp->as.next->as.prev = &gp->as.next; gp->as.prev = &ps->gp; ps->gp = gp; /* link to XP */ gp->xp.xp = xp; if ((gp->xp.next = xp->gp)) gp->xp.next->xp.prev = &gp->xp.next; gp->xp.prev = &xp->gp; xp->gp = gp; ps->xpat_count++; ps->xpdn_count++; xp->asat_count++; xp->asdn_count++; /* link to LP */ lp->xp = xp; xp->pp.pp = lp; /* link to AS as well */ ps->pp = lp; lp->as = ps; /* enable queues */ lp->ops = dp->drv->l_ops_ss7; enableok(lp->rq); enableok(lp->wq); qenable(lp->rq); qenable(lp->wq); return lm_ok_ack(q, LM_AS_ADD_REQ); } else if (p->sgid) { /* we are creating an SG AS */ ps->sp.sp = sg; if ((ps->sp.next = sg->as)) ps->sp.next->sp.prev = &ps->sp.next; ps->sp.prev = &sg->as; sg->as = ps; for (xp = sg->xp; xp; xp = xp->sp.next) { if ((gp = ua_gp_alloc())) { /* fill out GP */ gp->rc = p->rc; gp->flags = p->flags ? p->flags : AS_STATIC; gp->state = AS_DOWN; /* link to AS */ gp->as.as = ps; if ((gp->as.next = ps->gp)) gp->as.next->as.prev = &gp->as.next; gp->as.prev = &ps->gp; ps->gp = gp; /* link to XP */ gp->xp.xp = xp; if ((gp->xp.next = xp->gp)) gp->xp.next->xp.prev = &gp->xp.next; gp->xp.prev = &xp->gp; xp->gp = gp; ps->xpat_count++; ps->xpdn_count++; xp->asat_count++; xp->asdn_count++; } /* FIXME handle allocation errors */ } return lm_ok_ack(q, LM_AS_ADD_REQ); } else { err = -EINVAL; ptrace(("ERROR: no SGID or MUXID\n")); } lm_error_ack: return lm_error_ack(q, LM_AS_ADD_REQ, err); } /* * LM_AS_DEL_REQ * ------------------------------------------------------------------------- * This function deletes an AS-P and possibly and AS-U from the UA * datastructures. It currently does not handle the situation where the AS * is in an active state. */ static int lm_as_del_req(queue_t * q, mblk_t * mp) { int err; gp_t *gp; ap_t *ap; as_t *us, *ps; sp_t *sp = ((pp_t *) q->q_ptr)->sp; lm_as_del_req_t *p = (typeof(p)) mp->b_rptr; size_t mlen = mp->b_wptr - mp->b_rptr; if (mlen > sizeof(*p)) { err = -EMSGSIZE; ptrace(("ERROR: bad message size\n")); goto lm_error_ack; } if (!sp) { err = -EFAULT; ptrace(("ERROR: no SP pointer\n")); goto lm_error_ack; } if (!p->asid) { err = -EINVAL; ptrace(("ERROR: invalid ASID\n")); goto lm_error_ack; } for (us = sp->as; us; us = us->sp.next) if (us->rc == p->asid) break; if (!us) { err = -EINVAL; ptrace(("ERROR: couldn't find AS\n")); goto lm_error_ack; } { ap_t *ap_next; gp_t *gp_next; for (ap = us->ap; ap; ap = ap_next) { ap_next = ap->user.next; ps = ap->prov.as; for (gp = ps->gp; gp; gp = gp_next) { gp_next = gp->xp.next; if (gp->as.prev) if ((*(gp->as.prev) = gp->as.next)) gp->as.next->as.prev = gp->as.prev; if (gp->xp.prev) if ((*(gp->xp.prev) = gp->xp.next)) gp->xp.next->xp.prev = gp->xp.prev; if (gp->t_ack) untimeout(gp->t_ack); if (gp->t_hbt) untimeout(gp->t_hbt); ua_gp_free(gp); } if (ap->user.prev) if ((*(ap->user.prev) = ap->user.next)) ap->user.next->user.prev = ap->user.prev; if (ap->prov.prev) if ((*(ap->prov.prev) = ap->prov.next)) ap->prov.next->prov.prev = ap->prov.prev; { as_t *us = ap->user.as; as_t *ps = ap->prov.as; us->spat_count--; us->spdn_count--; ps->spat_count--; ps->spdn_count--; } ua_ap_free(ap); if (ps->sp.prev) if ((*(ps->sp.prev) = ps->sp.next)) ps->sp.next->sp.prev = ps->sp.prev; if (ps->t_pnd) untimeout(ps->t_pnd); ua_as_free(ps); } for (gp = us->gp; gp; gp = gp_next) { gp_next = gp->xp.next; if (gp->as.prev) if ((*(gp->as.prev) = gp->as.next)) gp->as.next->as.prev = gp->as.prev; if (gp->xp.prev) if ((*(gp->xp.prev) = gp->xp.next)) gp->xp.next->xp.prev = gp->xp.prev; if (gp->t_ack) untimeout(gp->t_ack); if (gp->t_hbt) untimeout(gp->t_hbt); ua_gp_free(gp); } if (us->rk) kfree(us->rk); if (us->sp.prev) if ((*(us->sp.prev) = us->sp.next)) us->sp.next->sp.prev = us->sp.prev; if (us->t_pnd) untimeout(us->t_pnd); ua_as_free(us); return lm_ok_ack(q, LM_AS_DEL_REQ); } lm_error_ack: return lm_error_ack(q, LM_AS_ADD_REQ, err); } /* * LM_PROC_ADD_REQ * ------------------------------------------------------------------------- * This request adds either an ASP to an SG, or an SGP to an SP. It * currently does not handle the situation where the SP or SG has provisioned * Application Servers. For now, XPs must be added before AS. */ static int lm_proc_add_req(queue_t * q, mblk_t * mp) { int err; xp_t *xp; gp_t *gp; sg_t *sg = NULL; np_t *np; sp_t *sp = ((pp_t *) q->q_ptr)->sp; lm_proc_add_req_t *p = (typeof(p)) mp->b_rptr; size_t mlen = mp->b_wptr - mp->b_rptr; if (mlen < sizeof(*p)) { err = -EMSGSIZE; ptrace(("ERROR: bad message size\n")); goto lm_error_ack; } if (!sp) { err = -EFAULT; ptrace(("ERROR: no SP pointer\n")); goto lm_error_ack; } switch (p->type) { case LM_ASP | LM_UAP: for (xp = sp->xp; xp; xp = xp->sp.next) if (xp->type == p->type && xp->aspid == p->aspid) break; if (xp) { err = -EINVAL; ptrace(("ERROR: ASP exists\n")); goto lm_error_ack; } if (!(xp = ua_xp_alloc())) { err = -ENOMEM; ptrace(("ERROR: no memory\n")); goto lm_error_ack; } if (!(gp = ua_gp_alloc())) { ua_xp_free(xp); err = -ENOMEM; ptrace(("ERROR: no memory\n")); goto lm_error_ack; } xp->type = p->type; xp->aspid = p->aspid; xp->prio = p->prio; xp->cost = p->cost; xp->state = ASP_DOWN; sp->xpdn_count++; /* link to sp */ xp->sp.sp = sp; if ((xp->sp.next = sp->xp)) xp->sp.next->sp.prev = &xp->sp.next; xp->sp.prev = &sp->xp; xp->pp.pp = NULL; xp->pp.next = NULL; xp->pp.prev = &xp->pp.next; sp->xp = xp; sp->xpat_count++; return lm_ok_ack(q, LM_PROC_ADD_REQ); case LM_SGP | LM_UAP: for (np = sp->np; np; np = np->sg.next) { if ((sg = np->sg.sg)) { if (sg->spid == p->spid) break; else sg = NULL; } } if (sg) { err = -EINVAL; ptrace(("ERROR: couldn't find SG\n")); goto lm_error_ack; } for (xp = sg->xp; xp; xp = xp->sp.next) if (xp->type == p->type && xp->aspid == p->aspid) break; if (xp) { err = -EINVAL; ptrace(("ERROR: SGP exists\n")); goto lm_error_ack; } if (!(xp = ua_xp_alloc())) { err = -ENOMEM; ptrace(("ERROR: no memory\n")); goto lm_error_ack; } if (!(gp = ua_gp_alloc())) { ua_xp_free(xp); err = -ENOMEM; ptrace(("ERROR: no memory\n")); goto lm_error_ack; } xp->type = p->type; xp->aspid = p->aspid; xp->prio = p->prio; xp->cost = p->cost; xp->state = ASP_DOWN; sg->xpdn_count++; /* link to sg */ xp->sp.sp = sg; if ((xp->sp.next = sg->xp)) xp->sp.next->sp.prev = &xp->sp.next; xp->sp.prev = &sg->xp; xp->pp.pp = NULL; xp->pp.next = NULL; xp->pp.prev = &xp->pp.next; sg->xp = xp; sg->xpat_count++; return lm_ok_ack(q, LM_PROC_ADD_REQ); } err = -EINVAL; ptrace(("ERROR: invalid or unsupported XP type\n")); lm_error_ack: return lm_error_ack(q, LM_PROC_ADD_REQ, err); } /* * LM_PROC_DEL_REQ * ------------------------------------------------------------------------- * This request deletes an ASP from an SG or an SGP from the SP. It does not * currently handle the case where the XP is active or is already associated * with an AS. */ static int lm_proc_del_req(queue_t * q, mblk_t * mp) { int err; xp_t *xp; sg_t *sg = NULL; np_t *np; sp_t *sp = ((pp_t *) q->q_ptr)->sp; lm_proc_del_req_t *p = (typeof(p)) mp->b_rptr; size_t mlen = mp->b_wptr - mp->b_rptr; if (mlen < sizeof(*p)) { err = -EMSGSIZE; ptrace(("ERROR: bad message size\n")); goto lm_error_ack; } if (!sp) { err = -EFAULT; ptrace(("ERROR: no SP pointer\n")); goto lm_error_ack; } switch (p->type) { case LM_ASP | LM_UAP: for (xp = sp->xp; xp; xp = xp->sp.next) if (xp->type == p->type && xp->aspid == p->aspid) break; if (xp) { err = -EINVAL; ptrace(("ERROR: ASP not found\n")); goto lm_error_ack; } if (xp->pp.pp) { err = -EINVAL; ptrace(("ERROR: ASP has LP\n")); goto lm_error_ack; } if (xp->gp) { err = -EINVAL; ptrace(("ERROR: ASP has allocated AS\n")); goto lm_error_ack; } // if ( xp->state == // ASP_DOWN ) { sp->xpdn_count--; sp->xpat_count--; if (xp->sp.prev) if ((*(xp->sp.prev) = xp->sp.next)) xp->sp.next->sp.prev = xp->sp.prev; if (xp->pp.prev) if ((*(xp->pp.prev) = xp->pp.next)) xp->pp.next->pp.prev = xp->pp.prev; if (xp->t_ack) untimeout(xp->t_ack); if (xp->t_hbt) untimeout(xp->t_hbt); ua_xp_free(xp); return lm_ok_ack(q, LM_PROC_DEL_REQ); // } err = -EINVAL; // ptrace(("ERROR: ASP // // // // not ASP_DOWN\n" )); // break; case LM_SGP | LM_UAP: for (np = sp->np; np; np = np->sg.next) if ((sg = np->sg.sg)) { if (sg->spid == p->spid) break; else sg = NULL; } if (!sg) { err = -EINVAL; ptrace(("ERROR: couldn't find SG\n")); goto lm_error_ack; } for (xp = sg->xp; xp; xp = xp->sp.next) if (xp->type == p->type && xp->aspid == p->aspid) break; if (!xp) { err = -EINVAL; ptrace(("ERROR: SGP not found\n")); goto lm_error_ack; } if (xp->pp.pp) { err = -EINVAL; ptrace(("ERROR: SGP has LP\n")); goto lm_error_ack; } if (xp->gp) { err = -EINVAL; ptrace(("ERROR: SGP has allocated AS\n")); goto lm_error_ack; } // if ( // xp->state // == ASP_DOWN // // // // ) { sg->xpdn_count--; sg->xpat_count--; if (xp->sp.prev) if ((*(xp->sp.prev) = xp->sp.next)) xp->sp.next->sp.prev = xp->sp.prev; if (xp->pp.prev) if ((*(xp->pp.prev) = xp->pp.next)) xp->pp.next->pp.prev = xp->pp.prev; if (xp->t_ack) untimeout(xp->t_ack); if (xp->t_hbt) untimeout(xp->t_hbt); ua_xp_free(xp); return lm_ok_ack(q, LM_PROC_DEL_REQ); // } err = // -EINVAL; // ptrace(("ERROR: // // // // SGP not // ASP_DOWN\n" // )); break; } err = -EINVAL; ptrace(("ERROR: invalid or unsupported XP type\n")); lm_error_ack: return lm_error_ack(q, LM_PROC_DEL_REQ, err); } /* * LM_LINK_ADD_REQ * ----------------------------------- * This request associates an active SCTP STREAM to an ASP or SGP. If we are * adding an SGP, sending an ASPUP REQ is automatic. */ static int lm_link_add_req(queue_t * q, mblk_t * mp) { int err; lp_t *lp; np_t *np; xp_t *xp; sp_t *sg = NULL; dp_t *dp = (dp_t *) q->q_ptr; sp_t *sp = dp->sp; size_t mlen = mp->b_wptr - mp->b_rptr; lm_link_add_req_t *p = (typeof(p)) mp->b_rptr; if (mlen < sizeof(*p)) { err = -EMSGSIZE; ptrace(("ERROR: bad message size\n")); goto lm_error_ack; } if (!sp) { err = -EFAULT; ptrace(("ERROR: no SP pointer\n")); goto lm_error_ack; } for (lp = ua_links_list; lp; lp = (lp_t *) lp->next) if (lp->id.mux == p->muxid) break; if (!lp) { err = -EINVAL; ptrace(("ERROR: can't find muxid\n")); goto lm_error_ack; } switch (p->type) { case LM_ASP | LM_UAP: if (p->aspid) { for (xp = sp->xp; xp; xp = xp->sp.next) if (xp->type == p->type && xp->aspid == p->aspid) break; if (xp) { err = -EINVAL; ptrace(("ERROR: ASP not found\n")); goto lm_error_ack; } if (xp->pp.pp) { err = -EINVAL; ptrace(("ERROR: ASP has LP\n")); goto lm_error_ack; } xp->pp.pp = lp; if ((xp->pp.next = lp->xp)) xp->pp.next->pp.prev = &xp->pp.next; xp->pp.prev = &lp->xp; lp->xp = xp; lp->ops = dp->drv->l_ops_asp; enableok(lp->rq); enableok(lp->wq); qenable(lp->rq); qenable(lp->wq); return lm_ok_ack(q, LM_LINK_ADD_REQ); } /* let's find ASP on ASPUP w/ ASP Id */ lp->ops = dp->drv->l_ops_asp; enableok(lp->rq); enableok(lp->wq); qenable(lp->rq); qenable(lp->wq); return lm_ok_ack(q, LM_LINK_ADD_REQ); case LM_SGP | LM_UAP: for (np = sp->np; np; np = np->sg.next) if ((sg = np->sg.sg)) { if (sg->spid == p->spid) break; else sg = NULL; } if (!sg) { err = -EINVAL; ptrace(("ERROR: couldn't find SG\n")); goto lm_error_ack; } for (xp = sg->xp; xp; xp = xp->sp.next) if (xp->type == p->type && xp->aspid == p->aspid) break; if (!xp) { err = -EINVAL; ptrace(("ERROR: SGP not found\n")); goto lm_error_ack; } if (xp->pp.pp) { err = -EINVAL; ptrace(("ERROR: SGP has LP\n")); goto lm_error_ack; } enableok(lp->rq); enableok(lp->wq); /* We send an ASPUP as soon as an SGP is linked. */ if ((err = ua_send_aspup_req(lp->wq, xp->aspid))) return (err); xp->t_ack = timeout(ua_xp_ack_timeout, (caddr_t) xp, ua_t_ack_val); xp->state = ASP_WACK_ASPUP; xp->pp.pp = lp; if ((xp->pp.next = lp->xp)) xp->pp.next->pp.prev = &xp->pp.next; xp->pp.prev = &lp->xp; lp->xp = xp; lp->ops = dp->drv->l_ops_sgp; qenable(lp->rq); qenable(lp->wq); return lm_ok_ack(q, LM_LINK_ADD_REQ); } err = -EINVAL; ptrace(("ERROR: invalid or unsupported LP type\n")); lm_error_ack: return lm_error_ack(q, LM_LINK_ADD_REQ, err); } /* * LM_LINK_DEL_REQ * ------------------------------------------------------------------------- * This request removes an SCTP STREAM from an SGP or ASP. Currently we do * not hanlde the situation where the SGP or ASP is active. * * TODO: Handle the situation where the SGP or ASP is active. In this case * we want to deactivate for all the AS associated with the XP and then move * the XP to the down state before removeal. We should implement a FORCE * flag, so that the operation will fail unless the XP is in the DOWN state * or if the FORCE flag is set. */ static int lm_link_del_req(queue_t * q, mblk_t * mp) { int err; lp_t *lp; xp_t *xp; sp_t *sp = ((pp_t *) q->q_ptr)->sp; size_t mlen = mp->b_wptr - mp->b_rptr; lm_link_del_req_t *p = (typeof(p)) mp->b_rptr; if (mlen < sizeof(*p)) { err = -EMSGSIZE; ptrace(("ERROR: bad message size\n")); goto lm_error_ack; } if (!sp) { err = -EFAULT; ptrace(("ERROR: no SP pointer\n")); goto lm_error_ack; } for (lp = ua_links_list; lp; lp = (lp_t *) lp->next) if (lp->id.mux == p->muxid) break; if (!lp) { err = -EINVAL; ptrace(("ERROR: can't find muxid\n")); goto lm_error_ack; } if (!(xp = lp->xp)) { err = -EINVAL; ptrace(("ERROR: no XP linked\n")); goto lm_error_ack; } if (xp->state != ASP_DOWN) { err = -EPROTO; ptrace(("ERROR: XP is ASPUP\n")); goto lm_error_ack; } { ops_t nullops = { NULL, NULL }; xp->pp.pp = NULL; if (xp->pp.prev) if ((*(xp->pp.prev) = xp->pp.next)) xp->pp.next->pp.prev = xp->pp.prev; xp->pp.next = NULL; xp->pp.prev = &xp->pp.next; lp->xp = NULL; lp->ops = nullops; return lm_ok_ack(q, LM_LINK_DEL_REQ); } lm_error_ack: return lm_error_ack(q, LM_LINK_DEL_REQ, err); } /* * LM_ROUTE_ADD_REQ * ------------------------------------------------------------------------- * This request is unnecessary and will be removed. */ static int lm_route_add_req(queue_t * q, mblk_t * mp) { fixme(("Not complete yet\n")); return (-EFAULT); } /* * LM_ROUTE_DEL_REQ * ------------------------------------------------------------------------- * This request is unnecessary and will be removed. */ static int lm_route_del_req(queue_t * q, mblk_t * mp) { fixme(("Not complete yet\n")); return (-EFAULT); } /* * LM_REG_RES * ------------------------------------------------------------------------- * This primitive is a response to a previous LM_REG_REQ from the SP. When * an SP acting as an SG receives a registration request from an ASP or a * bind request from an SS7-U which cannot be fulfilled, the SP sends a * LM_REG_REQ to layer management requesting the addition of a corresponding * AS-U structure. Layer management responds with either a LM_REG_RES * indicating the AS-U against which to register or bind, or a LM_REG_REF * refusing the registration request. * * This capability has not been completed yet. */ static int lm_reg_res(queue_t * q, mblk_t * mp) { fixme(("Not complete yet\n")); return (-EFAULT); } /* * LM_REG_REF * ------------------------------------------------------------------------- * This primitive is a refusal by layer management of a previous LM_REG_REQ. * * This capability has not been completed yet. */ static int lm_reg_ref(queue_t * q, mblk_t * mp) { fixme(("Not complete yet\n")); return (-EFAULT); } /* * M_IOCTL Handling * ------------------------------------------------------------------------- * This function is responsible for linking transport and SS7 provider * STREAMS under the multiplexor. We do not characterize these STREAMS yet, * we simply link them. They will be later characterized with an * LM_LINK_ADD_REQ or with an LM_AS_ADD_REQ, both of which contain a muxid * that will refer to one of these linked STREAMS. * * TODO: Currently we are not handling the case where the STREAM to be * unlinked is associated with an XP in an ACTIVE state, or is associated * with an AS-P in an ACTIVE state. It should be fairly straightforward to * call one of our inactive_req state machine functions before unlinking to * ensure that our STREAM is not active before unlinking. */ extern int lm_w_ioctl(queue_t * q, mblk_t * mp) { int err = EOPNOTSUPP; struct iocblk *iocp = (struct iocblk *) mp->b_rptr; switch (_IOC_TYPE(iocp->ioc_cmd) << 8) { case __SID: { lp_t *lp; dp_t *dp = (dp_t *) q->q_ptr; sp_t *sp = dp->sp; struct linkblk *lb; if (!mp->b_cont) { err = EINVAL; break; } lb = (struct linkblk *) mp->b_cont->b_rptr; switch (iocp->ioc_cmd) { case I_PLINK: err = EPERM; if (iocp->ioc_cr->cr_uid != 0) { ptrace(("ERROR: Non-root attempt to I_PLINK\n")); break; } case I_LINK: err = 0; if ((lp = ua_pp_alloc())) { if ((lp->next = ua_links_list)) lp->next->prev = &lp->next; lp->prev = &ua_links_list; ua_links_list = lp; lp->id.mux = lb->l_index; lp->rq = RD(lb->l_qbot); lp->wq = WR(lb->l_qbot); lp->rq->q_ptr = lp; lp->wq->q_ptr = lp; lp->sp = sp; break; } err = ENOMEM; break; case I_PUNLINK: err = EPERM; if (iocp->ioc_cr->cr_uid != 0) { ptrace(("ERROR: Non-root attempt to I_PUNLINK\n")); break; } case I_UNLINK: err = 0; for (lp = ua_links_list; lp; lp = (lp_t *) lp->next) if (lp->id.mux == lb->l_index) break; if (!lp) { err = EINVAL; ptrace(("ERROR: Couldn't find I_UNLINK muxid\n")); break; } if (lp->xp) { xp_t *xp = lp->xp; xp->pp.pp = NULL; if (xp->pp.prev) if ((*(xp->pp.prev) = xp->pp.next)) xp->pp.next->pp.prev = xp->pp.prev; xp->pp.next = NULL; xp->pp.prev = &xp->pp.next; lp->xp = NULL; } fixme(("DELETE THE CORRECT STRUCTURES\n")); if (lp->xp) { lp->xp->pp.pp = NULL; lp->xp = NULL; } if (lp->prev) if ((*(lp->prev) = lp->next)) lp->next->prev = lp->prev; lp->prev = NULL; lp->next = NULL; if (lp->rbid) unbufcall(lp->rbid); if (lp->wbid) unbufcall(lp->wbid); lp->rq->q_ptr = NULL; lp->wq->q_ptr = NULL; ua_pp_free(lp); enableok(lp->rq); enableok(lp->wq); qenable(lp->rq); qenable(lp->wq); break; } } } if (err) { mp->b_datap->db_type = M_IOCNAK; iocp->ioc_error = err; iocp->ioc_rval = -1; iocp->ioc_count = 0; } else { mp->b_datap->db_type = M_IOCACK; iocp->ioc_error = 0; iocp->ioc_rval = 0; iocp->ioc_count = 0; } qreply(q, mp); return (1); } static int lm_w_proto(queue_t * q, mblk_t * mp) { switch (*((long *) mp->b_rptr)) { case LM_INFO_REQ: return lm_info_req(q, mp); case LM_SG_ADD_REQ: return lm_sg_add_req(q, mp); case LM_SG_DEL_REQ: return lm_sg_del_req(q, mp); case LM_AS_ADD_REQ: return lm_as_add_req(q, mp); case LM_AS_DEL_REQ: return lm_as_del_req(q, mp); case LM_PROC_ADD_REQ: return lm_proc_add_req(q, mp); case LM_PROC_DEL_REQ: return lm_proc_del_req(q, mp); case LM_LINK_ADD_REQ: return lm_link_add_req(q, mp); case LM_LINK_DEL_REQ: return lm_link_del_req(q, mp); case LM_ROUTE_ADD_REQ: return lm_route_add_req(q, mp); case LM_ROUTE_DEL_REQ: return lm_route_del_req(q, mp); case LM_REG_RES: return lm_reg_res(q, mp); case LM_REG_REF: return lm_reg_ref(q, mp); } return (-EOPNOTSUPP); } static int lm_w_pcproto(queue_t * q, mblk_t * mp) { return lm_w_proto(q, mp); } extern int lm_w_prim(queue_t * q, mblk_t * mp) { switch (mp->b_datap->db_type) { case M_PROTO: return lm_w_proto(q, mp); case M_PCPROTO: return lm_w_pcproto(q, mp); case M_IOCTL: return lm_w_ioctl(q, mp); case M_FLUSH: return ua_w_flush(q, mp); } return (-EOPNOTSUPP); } extern int lm_r_prim(queue_t * q, mblk_t * mp) { switch (mp->b_datap->db_type) { case M_FLUSH: return ua_r_flush(q, mp); } return (5); } /* * ------------------------------------------------------------------------- * * M_FLUSH Handling * * ------------------------------------------------------------------------- */ static int ua_m_flush(queue_t * q, mblk_t * mp, const uint8_t flag, const uint8_t oflag) { if (mp->b_rptr[0] & flag) { if (mp->b_rptr[0] & FLUSHBAND) flushband(q, mp->b_rptr[1], FLUSHALL); else flushq(q, FLUSHALL); if (q->q_next) { putnext(q, mp); return (1); } mp->b_rptr[0] &= ~flag; } if (mp->b_rptr[0] & oflag && !(mp->b_flag & MSGNOLOOP)) { queue_t *oq = OTHERQ(q); if (mp->b_rptr[0] & FLUSHBAND) flushband(oq, mp->b_rptr[1], FLUSHALL); else flushq(oq, FLUSHALL); mp->b_flag |= MSGNOLOOP; qreply(q, mp); return (1); } return (0); } extern int ua_w_flush(queue_t * q, mblk_t * mp) { return ua_m_flush(q, mp, FLUSHW, FLUSHR); } extern int ua_r_flush(queue_t * q, mblk_t * mp) { return ua_m_flush(q, mp, FLUSHR, FLUSHW); } /* * ========================================================================= * * PUTQ and SRVQ * * ========================================================================= */ /* * UA PUTQ * ----------------------------------- */ static int ua_putq(queue_t * q, mblk_t * mp, int (*proc) (queue_t *, mblk_t *)) { ensure(q, return (-EFAULT)); ensure(mp, return (-EFAULT)); if (mp->b_datap->db_type >= QPCTL && !q->q_count) { int rtn; switch ((rtn = (*proc) (q, mp))) { case QR_DONE: freemsg(mp); case QR_ABSORBED: break; case QR_TRIMMED: freeb(mp); break; case QR_LOOP: if (!q->q_next) { qreply(q, mp); break; } case QR_PASSALONG: if (q->q_next) { putnext(q, mp); break; } rtn = -EOPNOTSUPP; default: ptrace(("Error (dropping) %d\n", rtn)); freemsg(mp); break; case QR_DISABLE: putq(q, mp); rtn = 0; break; case QR_PASSFLOW: if (mp->b_datap->db_type >= QPCTL || canputnext(q)) { putnext(q, mp); break; } case -ENOBUFS: /* caller must schedule bufcall */ case -EBUSY: /* caller must have failed canput */ case -EAGAIN: /* caller must re-enable queue */ case -ENOMEM: /* caller must re-enable queue */ putq(q, mp); break; } return (rtn); } else { seldom(); putq(q, mp); return (0); } } /* * UA SRVQ * ----------------------------------- */ static int ua_srvq(queue_t * q, int (*proc) (queue_t *, mblk_t *)) { ensure(q, return (-EFAULT)); { int rtn; mblk_t *mp; while ((mp = getq(q))) { switch ((rtn = (*proc) (q, mp))) { case QR_DONE: freemsg(mp); case QR_ABSORBED: continue; case QR_TRIMMED: freeb(mp); continue; case QR_LOOP: if (!q->q_next) { qreply(q, mp); continue; } case QR_PASSALONG: if (q->q_next) { putnext(q, mp); continue; } default: ptrace(("Error (q dropping) %d\n", rtn)); freemsg(mp); continue; case QR_DISABLE: ptrace(("Error (q disabling) %d\n", rtn)); noenable(q); putbq(q, mp); break; case QR_PASSFLOW: if (mp->b_datap->db_type >= QPCTL || canputnext(q)) { putnext(q, mp); break; } case -ENOBUFS: /* caller must schedule bufcall */ case -EBUSY: /* caller must have failed canput */ case -EAGAIN: /* caller must re-enable queue */ case -ENOMEM: /* caller must re-enable queue */ ptrace(("Putting back, queue stalled\n")); if (mp->b_datap->db_type < QPCTL) { putbq(q, mp); return (rtn); } if (mp->b_datap->db_type == M_PCPROTO) { mp->b_datap->db_type = M_PROTO; mp->b_band = 255; putq(q, mp); break; } ptrace(("Error (q dropping) %d\n", rtn)); freemsg(mp); continue; } } return (0); } } static INT ua_rput(queue_t * q, mblk_t * mp) { pp_t *pp = (pp_t *) q->q_ptr; ensure(pp, return ((INT) (-EFAULT))); ensure(pp->ops.r_prim, return ((INT) (-EFAULT))); return (INT) ua_putq(q, mp, pp->ops.r_prim); } static INT ua_rsrv(queue_t * q) { pp_t *pp = (pp_t *) q->q_ptr; ensure(pp, return ((INT) (-EFAULT))); ensure(pp->ops.r_prim, return ((INT) (-EFAULT))); return (INT) ua_srvq(q, pp->ops.r_prim); } static INT ua_wput(queue_t * q, mblk_t * mp) { pp_t *pp = (pp_t *) q->q_ptr; ensure(pp, return ((INT) (-EFAULT))); ensure(pp->ops.w_prim, return ((INT) (-EFAULT))); return (INT) ua_putq(q, mp, pp->ops.w_prim); } static INT ua_wsrv(queue_t * q) { pp_t *pp = (pp_t *) q->q_ptr; ensure(pp, return ((INT) (-EFAULT))); ensure(pp->ops.w_prim, return ((INT) (-EFAULT))); return (INT) ua_srvq(q, pp->ops.w_prim); } /* * ========================================================================= * * OPEN and CLOSE * * ========================================================================= */ ua_t *ua_opens_list = NULL; ua_t *ua_links_list = NULL; drv_t *ua_drivers[] = { // &ualm_driver, // &m2ua_driver, &m3ua_driver, // &isua_driver, // & sua_driver, // & tua_driver, NULL }; /* * UA OPEN * ------------------------------------------------------------------------- */ static int ua_open(queue_t * q, dev_t * devp, int flag, int sflag, cred_t * crp) { int cmajor = getmajor(*devp); int cminor = getminor(*devp); int spid = 0; xp_t *xp; dp_t *dp, **dpp; drv_t **d; for (d = ua_drivers; *d; d++) if (cmajor >= (*d)->cmajor && cmajor < (*d)->cmajor + (*d)->nmajor) break; if (!(*d)) return (ENXIO); if (sflag == MODOPEN || WR(q)->q_next) { ptrace(("ERROR: Can't open as module\n")); return (EIO); } if (cmajor == UALM_CMAJOR && crp->cr_uid != 0) { ptrace(("ERROR: Can't open LM without r00t permission\n")); return (EPERM); } if (q->q_ptr != NULL) { // ptrace(("INFO: Device already open\n")); return (0); } if (sflag == CLONEOPEN) { // ptrace(("INFO: Clone open in effect\n")); cmajor = (*d)->cmajor; cminor = 0; } if (cmajor == (*d)->cmajor && cminor <= NUM_SPIDS) { // ptrace(("INFO: Clone minor opened\n")); if (cminor) spid = cminor - 1; else { for (spid = 0; spid < NUM_SPIDS && (*d)->sp[spid]; spid++); if (spid >= NUM_SPIDS) { ptrace(("ERROR: No SPIDs available\n")); return (ENXIO); } } sflag = CLONEOPEN; cminor = NUM_SPIDS + 1; } for (dpp = &ua_opens_list; *dpp; dpp = &(*dpp)->next) { ushort dmajor = getmajor((*dpp)->id.dev); // ptrace(("DEBUG: Testing against major %d\n", dmajor)); if (cmajor < dmajor) break; if (cmajor == dmajor) { ushort dminor = getminor((*dpp)->id.dev); // ptrace(("DEBUG: Testing against minor %d\n", dminor)); if (cminor < dminor) break; if (cminor == dminor) { if (sflag != CLONEOPEN) { ptrace(("ERROR: Requested device in use\n")); return (EIO); } if (++cminor > (*d)->nminor) { if (++cmajor >= (*d)->cmajor + (*d)->nmajor) break; cminor = 0; } continue; } } } if (cmajor >= (*d)->cmajor + (*d)->nmajor) { ptrace(("ERROR: No devices available\n")); return (ENXIO); } if (!(dp = ua_pp_alloc())) { ptrace(("ERROR: Could not allocate cache entry\n")); return (ENOMEM); } if ((xp = ua_xp_alloc())) { ua_pp_free(dp); ptrace(("ERROR: Could not allocate cache entry\n")); return (ENOMEM); } /* fill out XP */ xp->type = LM_ASP | LM_SS7; xp->aspid = 0; /* local ASP */ xp->prio = 0; /* priority (highest) */ xp->cost = 0; /* cost (lowest) */ xp->state = ASP_DOWN; /* starts life in down state */ /* link to DP */ if ((xp->pp.next = dp->xp)) xp->pp.next->pp.prev = &xp->pp.next; xp->pp.prev = &dp->xp; xp->pp.pp = dp; dp->xp = xp; { sp_t *sp; // ptrace(("DEBUG: Checking for allocated SP\n")); if ((sp = (*d)->sp[spid])) { // ptrace(("DEBUG: Allocating SS7-U for SPID = // // // // %d\n",spid)); dp->ops = (*d)->u_ops_ss7; } else { // ptrace(("DEBUG: Allocating SP for SPID = // %d\n",spid)); if (!(sp = ua_sp_alloc())) { ua_pp_free(dp); return (ENOMEM); } sp->spid = spid; (*d)->sp[spid] = sp; dp->ops = (*d)->u_ops_lm; } dp->sp = sp; sp->xpat_count++; sp->xpdn_count++; xp->asat_count++; xp->asdn_count++; } *devp = makedevice(cmajor, cminor); ptrace(("INFO: Making device major %d, minor %d\n", cmajor, cminor)); dp->id.dev = *devp; dp->rq = RD(q); dp->wq = WR(q); dp->rq->q_ptr = dp; dp->wq->q_ptr = dp; dp->drv = (*d); if ((dp->next = *dpp)) dp->next->prev = &dp->next; dp->prev = dpp; *dpp = dp; unusual(!*dpp); return (0); } /* * UA CLOSE * ------------------------------------------------------------------------- */ static int ua_close(queue_t * q, int flag, cred_t * crp) { dp_t *dp = (dp_t *) q->q_ptr; // ptrace(("Driver close\n")); if ((*(dp->prev) = dp->next)) dp->next->prev = dp->prev; dp->prev = NULL; dp->next = NULL; if (dp->rbid) unbufcall(dp->rbid); if (dp->wbid) unbufcall(dp->wbid); dp->rq->q_ptr = NULL; dp->wq->q_ptr = NULL; { /* we are closing a control stream */ sp_t *sp; if (!(sp = dp->sp)) { ptrace(("ERROR: Arrgh! No SP!\n")); } else { /* deallocate SP if we are the last stream using it */ // ptrace(("Check deallocation of SP\n")); if (!(--sp->xpat_count)) { drv_t *d = dp->drv; // ptrace(("Deallocating SP SPID = %d\n",sp->spid)); if (sp->as) { fixme(("ERROR: Arrgh! We have allocated AS! as = %p\n", sp->as)); } if (sp->np) { fixme(("ERROR: Arrgh! We have allocated SG! np = %p\n", sp->np)); } ua_sp_free(sp); d->sp[sp->spid] = NULL; } } } ua_pp_free(dp); return (0); } /* * ========================================================================= * * LiS Module Initialization * * ========================================================================= */ static int ua_initialized = 0; int ua_init(void) { int rtn; int err = 0; if (!ua_initialized) { drv_t **d; for (d = ua_drivers; *d; d++) { int cmajor, nminor; for (nminor = (*d)->nminor, cmajor = (*d)->cmajor; cmajor < (*d)->cmajor + (*d)->nmajor; cmajor++) { if ((rtn = lis_register_strdev(cmajor, &ua_info, nminor, (*d)->name)) < 0) { cmn_err(CE_WARN, "ua: couldn't register %s driver cmajor=%d\n", (*d)->name, cmajor); err = -rtn; } } } if (!err) err = ua_init_caches(); if (!err) ua_initialized = 1; } return (err); } void ua_terminate(void) { if (ua_initialized) { drv_t **d; for (d = ua_drivers; *d; d++) { int cmajor; for (cmajor = (*d)->cmajor; cmajor < (*d)->cmajor + (*d)->nmajor; cmajor++) { if (lis_unregister_strdev(cmajor)) { cmn_err(CE_WARN, "ua: couldn't unregister %s driver cmajor=%d\n", (*d)->name, cmajor); } } } ua_term_caches(); ua_initialized = 0; } return; } /* * ========================================================================= * * Kernel Module Initialization * * ========================================================================= */ int init_module(void) { cmn_err(CE_NOTE, UA_BANNER); return ua_init(); } void cleanup_module(void) { return ua_terminate(); }
|
|||||||||||||||||||||||||||
OpenSS7 SS7 for the Common Man |
Home | Overview | Status | News | Documentation | Resources | About | ||||||||||||||||||||
© Copyright 1997-2004,OpenSS7 Corporation, All Rights Reserved. |