/* * Copyright (C) 2010,2012 Avencall * Author: Guillaume Knispel * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* GRUIK HACK UGLY grrrr: */ #define REF_0_0 #undef REF_1_1 #define DBG_TRACE_ISR // #undef DBG_TRACE_ISR #ifdef DBG_TRACE_ISR #include // rdtscl static ulong tstamp; static int dump_count; module_param(dump_count, int, 0664); MODULE_PARM_DESC(dump_count, "number of traces to emit " "(can be reset at runtime via /sys)"); #define DBG_TRACE(...) \ do { \ if (dump_count-- > 0) { \ printk(KERN_ERR __VA_ARGS__); \ } \ } while (0) #else #define DBG_TRACE(...) /* nothing */ #endif /* DBG_TRACE_ISR */ #include #include #include #include // here starts the hacking festival... #include #include // HssAccQueueIdGet #include // ICP_HSSACC_MAX_NUM_CHANNELS #define DEBUGFS_MY_STUFF // #undef DEBUGFS_MY_STUFF #ifdef DEBUGFS_MY_STUFF # include #endif /* DEBUGFS_MY_STUFF */ #include "xivo_tdm_api.h" #include "xivo_ep80579_p.h" MODULE_LICENSE("GPL"); #if defined(REF_0_0) && defined(REF_1_1) # error dont define both REF_0_0 and REF_1_1 #endif #if !defined(REF_0_0) && !defined(REF_1_1) # error please define one of REF_0_0 or REF_1_1 #endif /* can be tuned */ #define MIN_HSS_PORTS_WANTED (2) #define MIN_HSS_CHANS_WANTED (10) /* fixed */ #define RX_BUF_NUM (2048) /* for Linux < 2.6.19; WARNING: DO NOT USE IN INTERFACES IN THIS CASE! */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) typedef unsigned char bool; enum { false = 0, true = 1 }; #else # warning "remember to not use bool in interfaces while Linux < 2.6.19 is supported" #endif struct xivo_tdm_callbacks_struct { /* process context only */ void (*port0_started)(void *data); void *port0_started_data; /* process context only */ void (*port0_configured)(void *data); void *port0_configured_data; }; static struct xivo_tdm_callbacks_struct xivo_tdm_callbacks; #define DATA_QUEUE_DEPTH_LOG2 (2) #define DATA_QUEUE_DEPTH (1u << DATA_QUEUE_DEPTH_LOG2) #define DATA_QUEUE_MASK (DATA_QUEUE_DEPTH - 1) /* The multiplication by 2 is for Rx and Tx */ #define DATA_CH_SIZE (2 * DATA_QUEUE_DEPTH * XIVO_TDM_VOICE_CHUNK_SZ) /* our local counters are coded on one byte: */ #define LOCAL_CPT_MASK (0xffu) static u8 __iomem *g_leb_cs_debug; struct local_cpt { u8 tail; u8 head; }; struct xivo_tdm_port { int port_num; bool allocated; u32 ts_rx_unsync; u32 ts_tx_unsync; u32 ts_started; u32 ts_allocated; u8 *data_zone; unsigned data_zone_order; unsigned err; unsigned int hss_chan_ids[XIVO_TDM_TS_NUM] ____cacheline_aligned; u8 **readchunk_ptrs[XIVO_TDM_TS_NUM]; u8 **writechunk_ptrs[XIVO_TDM_TS_NUM]; struct local_cpt tx_data_cpt[XIVO_TDM_TS_NUM]; struct local_cpt rx_data_cpt[XIVO_TDM_TS_NUM]; void (*txrx)(void *data); void *txrx_data; spinlock_t lock ____cacheline_aligned; u32 port_rx_ent; u8 _pad[0x7c]; /* so that the size is a nice 0x400 bytes */ } ____cacheline_aligned; static struct xivo_tdm_port xivo_tdm_ports[MIN_HSS_PORTS_WANTED] __cacheline_aligned; static u32 levels[5]; static u32 invalid_channels; static u32 global_count; static u32 rxq_empty; static u32 rxq_entries; static u32 ref_ts_not_started; static DECLARE_MUTEX(xivo_tdm_mutex); /* port/ts by icp channel id */ #define XIVO_TDM_PORT_TS_FREE 0xFF struct xivo_tdm_port_ts { u8 port; // port number or XIVO_TDM_PORT_TS_FREE u8 ts; // timeslot number or XIVO_TDM_PORT_TS_FREE }; static struct xivo_tdm_port_ts port_ts[ICP_HSSACC_MAX_NUM_CHANNELS]; #if XIVO_TDM_VOICE_CHUNK_SZ > 32 #error XIVO_TDM_VOICE_CHUNK_SZ > 32: unsupported #else struct xivo_tdm_rx_buf { struct hss_piu_entry piu_entry; u8 data[32]; } __attribute__ ((aligned (64))); #endif static unsigned xivo_tdm_rx_buf_table_order; static struct xivo_tdm_rx_buf *xivo_tdm_rx_buf_table; /* RX_BUF_NUM entries */ /* INTERNAL FUNCS */ static void dealloc_one_chan( struct xivo_tdm_port* xtp, const int cnum) { const u32 chid = xtp->hss_chan_ids[cnum]; icp_status_t status = icp_HssAccChannelDelete(chid); if (status != ICP_STATUS_SUCCESS) printk(KERN_ERR "%s: icp_HssAccChannelDelete returned " "error %d\n", __func__, (int) status); port_ts[chid].port = XIVO_TDM_PORT_TS_FREE; port_ts[chid].port = XIVO_TDM_PORT_TS_FREE; xtp->ts_allocated &= ~(1u << cnum); xtp->hss_chan_ids[cnum] = 0; } static int add_one_chan( struct xivo_tdm_port* xtp, const int cnum) { u32 chid; icp_status_t status; icp_hssacc_timeslot_map_t ts_map = {0}; if (xtp->ts_allocated & (1u << cnum)) { printk(KERN_ERR "%s: EVIL BUG, DIE!\n", __func__); return -42; } ts_map.line0_timeslot_bit_map = (1u << cnum); /*Allocate the channel */ status = icp_HssAccChannelAllocate( &xtp->hss_chan_ids[cnum], xtp->port_num, ts_map, ICP_HSSACC_CHAN_TYPE_VOICE); if (status != ICP_STATUS_SUCCESS) { printk(KERN_ERR "%s: icp_HssAccChannelAllocate returned " "error %d\n", __func__, (int) status); return -EIO; } chid = xtp->hss_chan_ids[cnum]; port_ts[chid].port = xtp->port_num; port_ts[chid].ts = cnum; status = icp_HssAccChannelConfigure( chid, /* channelDataInvert */ 0, /* channelBitEndianness */ 1, /* MSb first */ /* channelByteSwap */ 0, ICP_FALSE, ICP_FALSE, ICP_FALSE); /* no rbs */ if (status != ICP_STATUS_SUCCESS) { printk(KERN_ERR "%s: icp_HssAccChannelConfigure returned " "error %d\n", __func__, (int) status); dealloc_one_chan(xtp, cnum); return -EIO; } status = icp_HssAccChannelVoiceServiceConfigure( chid, ICP_HSSACC_VOICE_TX_IDLE_REPEAT_LAST_FRAME, 0xD5, XIVO_TDM_VOICE_CHUNK_SZ); if (status != ICP_STATUS_SUCCESS) { printk(KERN_ERR "%s: icp_HssAccChannelVoiceServiceConfigure " "returned error %d\n", __func__, (int) status); dealloc_one_chan(xtp, cnum); return -EIO; } xtp->ts_allocated |= (1u << cnum); return 0; } static int start_one_chan( struct xivo_tdm_port* xtp, const int cnum) { const unsigned int hss_chan_id = xtp->hss_chan_ids[cnum]; unsigned long flags; icp_status_t status; status = icp_HssAccChannelUp(hss_chan_id); if (status != ICP_STATUS_SUCCESS) { printk(KERN_ERR "%s: icp_HssAccChannelUp returned error %d \n", __func__, status); return -EIO; } spin_lock_irqsave(&xtp->lock, flags); xtp->ts_started |= (1u << cnum); spin_unlock_irqrestore(&xtp->lock, flags); return 0; } static int stop_one_chan( struct xivo_tdm_port* xtp, const int cnum) { icp_status_t status; unsigned int hss_chan_id = xtp->hss_chan_ids[cnum]; unsigned long flags; spin_lock_irqsave(&xtp->lock, flags); xtp->ts_started &= ~(1u << cnum); spin_unlock_irqrestore(&xtp->lock, flags); status = icp_HssAccChannelDown(hss_chan_id); if (status != ICP_STATUS_SUCCESS) { printk(KERN_ERR "%s: icp_HssAccChannelDown returned %d\n", __func__, (int) status); return -EIO; } return 0; } static void xivo_hss_port_error_cb( icp_user_context_t user_context, icp_hssacc_port_error_t error_type) { struct xivo_tdm_port* xtp = (struct xivo_tdm_port*) user_context; unsigned port_num = xtp->port_num; switch (error_type) { case ICP_HSSACC_PORT_ERROR_TX_LOS: printk(KERN_ERR "%s - Port error for port %u :" " Transmit loss of sync\n", __func__, port_num); break; case ICP_HSSACC_PORT_ERROR_RX_LOS: printk(KERN_ERR "%s - Port error for port %u :" " Receive loss of sync\n", __func__, port_num); break; case ICP_HSSACC_PORT_ERROR_TX_UNDERFLOW: printk(KERN_ERR "%s - Port error for port %u :" " Transmit underflow\n", __func__, port_num); break; case ICP_HSSACC_PORT_ERROR_RX_OVERFLOW: printk(KERN_ERR "%s - Port error for port %u :" " Receive overflow\n", __func__, port_num); break; case ICP_HSSACC_PORT_ERROR_TX_PARITY: printk(KERN_ERR "%s - Port error for port %u :" " Tx Parity Error\n", __func__, port_num); break; case ICP_HSSACC_PORT_ERROR_RX_PARITY: printk(KERN_ERR "%s - Port error for port %u :" " Rx Parity Error\n", __func__, port_num); break; default: printk(KERN_ERR "%s - Port error for port %u :" " Unidentified error %u\n", __func__, port_num, (unsigned) error_type); break; } } static void xivo_hss_error_cb( icp_user_context_t user_context, icp_hssacc_error_t error_type) { (void) user_context; switch (error_type) { case ICP_HSSACC_ERROR_RX_OVERFLOW: printk(KERN_ERR "%s - Error: Receive Queue Overflow " "reported\n", __func__); break; case ICP_HSSACC_ERROR_RX_FREE_UNDERFLOW: printk(KERN_ERR "%s - Error: Receive Free Queue Underflow " " reported\n", __func__); break; case ICP_HSSACC_ERROR_MESSAGE_FIFO_OVERFLOW: printk(KERN_ERR "%s - Error: Messaging FIFO Overflow " "reported\n", __func__); break; default: printk(KERN_ERR "%s - Unidentified Error %u\n", __func__, (unsigned) error_type); break; } } /* EXPORTED FUNCS */ struct xivo_tdm_port *xivo_tdm_get_port(int tdm_port_num) { struct xivo_tdm_port *xtp; if (tdm_port_num < 0 || tdm_port_num >= MIN_HSS_PORTS_WANTED) { printk(KERN_ERR "%s: attempt to allocate an out of range TDM " "port %d\n", __func__, tdm_port_num); return NULL; } down(&xivo_tdm_mutex); if (xivo_tdm_ports[tdm_port_num].allocated) { xtp = NULL; printk(KERN_ERR "%s: attempt to allocate TDM port %d, which " "is already allocated\n", __func__, tdm_port_num); } else { xivo_tdm_ports[tdm_port_num].allocated = true; xtp = &xivo_tdm_ports[tdm_port_num]; } up(&xivo_tdm_mutex); return xtp; } EXPORT_SYMBOL(xivo_tdm_get_port); void xivo_tdm_put_port(struct xivo_tdm_port *xtp) { unsigned long xtp_addr = (unsigned long)xtp; unsigned idx = (xtp_addr - (unsigned long)xivo_tdm_ports) / sizeof(*xtp); if (xtp_addr == 0 || xtp_addr < (unsigned long)xivo_tdm_ports || xtp_addr > (unsigned long)(xivo_tdm_ports + MIN_HSS_PORTS_WANTED - 1) || xtp_addr != (unsigned long)(xivo_tdm_ports + idx)) { printk(KERN_ERR "%s: attempt to free an invalid struct " "xivo_tdm_port pointer: %p\n", __func__, (void *) xtp); return; } down(&xivo_tdm_mutex); if (!xtp->allocated) { printk(KERN_ERR "%s: attempt to free TDM port %u, which is not " "allocated\n", __func__, idx); } else { xivo_tdm_stop_chans(xtp); xtp->allocated = false; } up(&xivo_tdm_mutex); } EXPORT_SYMBOL(xivo_tdm_put_port); int xivo_tdm_config_port( struct xivo_tdm_port* xtp, unsigned int port_config, u8 __iomem *leb_cs_debug) { icp_status_t status; icp_hssacc_port_config_params_t hss_port_config; g_leb_cs_debug = leb_cs_debug; if (port_config >= ICP_HSSDRV_PORT_CONFIG_DELIMITER) { printk(KERN_ERR "%s: invalid port config %u\n", __func__, port_config); return -EINVAL; } port_config_create(port_config, ICP_HSSDRV_NO_LOOPBACK, &hss_port_config); status = icp_HssAccPortConfig(xtp->port_num, &hss_port_config); if (status != ICP_STATUS_SUCCESS) { printk(KERN_ERR "%s: icp_HssAccPortConfig returned error %d\n", __func__, (int) status); return -EIO; } status = icp_HssAccPortErrorCallbackRegister( /* XXX to unreg? */ xtp->port_num, ICP_HSSACC_CHAN_TYPE_VOICE, (icp_user_context_t) xtp, xivo_hss_port_error_cb); if (status != ICP_STATUS_SUCCESS) { printk(KERN_ERR "%s: icp_HssAccPortErrorCallbackRegister " "returned error %d\n", __func__, (int) status); return -EIO; } status = icp_HssAccPortUp(xtp->port_num); if (status != ICP_STATUS_SUCCESS) { printk(KERN_ERR "%s: icp_HssAccPortUp returned error %d\n", __func__, (int) status); return -EIO; } if (xtp->port_num == 0 && xivo_tdm_callbacks.port0_configured) { msleep(3); xivo_tdm_callbacks.port0_configured( xivo_tdm_callbacks.port0_configured_data); } return 0; } EXPORT_SYMBOL(xivo_tdm_config_port); void xivo_tdm_register_port0_started( struct xivo_tdm_port* xtp, void (*port0_started)(void *), void *port0_started_data) { unsigned long flags; (void) xtp; spin_lock_irqsave(&xivo_tdm_ports[0].lock, flags); xivo_tdm_callbacks.port0_started = port0_started; xivo_tdm_callbacks.port0_started_data = port0_started_data; spin_unlock_irqrestore(&xivo_tdm_ports[0].lock, flags); } EXPORT_SYMBOL(xivo_tdm_register_port0_started); void xivo_tdm_register_port0_configured( struct xivo_tdm_port* xtp, void (*port0_configured)(void *), void *port0_configured_data) { unsigned long flags; (void) xtp; spin_lock_irqsave(&xivo_tdm_ports[0].lock, flags); xivo_tdm_callbacks.port0_configured = port0_configured; xivo_tdm_callbacks.port0_configured_data = port0_configured_data; spin_unlock_irqrestore(&xivo_tdm_ports[0].lock, flags); } EXPORT_SYMBOL(xivo_tdm_register_port0_configured); int xivo_tdm_start_chans( struct xivo_tdm_port* const xtp, u8 ** const readchunk_ptrs[XIVO_TDM_TS_NUM], u8 ** const writechunk_ptrs[XIVO_TDM_TS_NUM], void (*txrx)(void *), void *txrx_data) { int cnum; int past_last_ch = 0; memcpy(xtp->readchunk_ptrs, readchunk_ptrs, sizeof xtp->readchunk_ptrs); memcpy(xtp->writechunk_ptrs, writechunk_ptrs, sizeof xtp->writechunk_ptrs); xtp->txrx_data = txrx_data; smp_wmb(); xtp->txrx = txrx; for (cnum = 0; cnum < XIVO_TDM_TS_NUM; cnum++) { if (xtp->readchunk_ptrs[cnum] || xtp->writechunk_ptrs[cnum]) { if (xtp->writechunk_ptrs[cnum]) past_last_ch = cnum + 1; printk(KERN_INFO "%s: adding chan %d\n", __func__, cnum); /* XXX retval */ (void) add_one_chan(xtp, cnum); } } if (past_last_ch) { xtp->data_zone_order = get_order(past_last_ch * DATA_CH_SIZE); xtp->data_zone = (u8 *)__get_free_pages( GFP_KERNEL | __GFP_ZERO, xtp->data_zone_order); } for (cnum = 0; cnum < XIVO_TDM_TS_NUM; cnum++) { if (xtp->readchunk_ptrs[cnum] || xtp->writechunk_ptrs[cnum]) { printk(KERN_INFO "%s: starting chan %d\n", __func__, cnum); /* XXX retval */ (void) start_one_chan(xtp, cnum); } } printk(KERN_INFO "%s: DONE\n", __func__); if (xtp->port_num == 0 && xivo_tdm_callbacks.port0_started) xivo_tdm_callbacks.port0_started( xivo_tdm_callbacks.port0_started_data); return 0; } EXPORT_SYMBOL(xivo_tdm_start_chans); void xivo_tdm_stop_chans( struct xivo_tdm_port* xtp) { u32 scan; int cnum; for (cnum = 0, scan = 1; scan; scan <<= 1, cnum++) { if (scan & xtp->ts_started) { printk(KERN_INFO "%s: stopping chan %d\n", __func__, cnum); (void) stop_one_chan(xtp, cnum); } } for (cnum = 0, scan = 1; scan; scan <<= 1, cnum++) { if (scan & xtp->ts_allocated) { printk(KERN_INFO "%s: removing chan %d\n", __func__, cnum); (void) dealloc_one_chan(xtp, cnum); } } } EXPORT_SYMBOL(xivo_tdm_stop_chans); /* hacky sort of shutdown... */ void xivo_tdm_shutdown(void) { tdm_register_trigger_hardirq_handler(NULL); msleep(10); // BUGBUG racy hack } EXPORT_SYMBOL(xivo_tdm_shutdown); /* HANDLER */ typedef enum { LOCAL_VOICE_RX_Q = 0, LOCAL_VOICE_RX_FREE_Q, } local_rx_q_e; #define LOCAL_NUM_RX (LOCAL_VOICE_RX_FREE_Q + 1) static inline uint32_t hssacc_from_local_rx(local_rx_q_e lrxq) { switch (lrxq) { #define trans2hssacc(suffix) case LOCAL_ ## suffix : return ICP_HSSACC_ ## suffix ; trans2hssacc(VOICE_RX_Q); trans2hssacc(VOICE_RX_FREE_Q); #undef trans2hssacc } return 42; } // typedef UINT32 IxQMgrQId; static IxQMgrQId local_rx_ixQ[LOCAL_NUM_RX]; #define WORD_CPT_Q_MASK 0xFFFFu #define BYTE_CPT_Q_MASK 0xFFu #define CPT_PRINT_NB 0 #define SPURIOUS_PRINT_NB 32 /* used by xivo_tdm_trigger_handler (hardirq) and its children */ static struct { u32 wordHead; u32 wordTail; } cpt[LOCAL_NUM_RX]; static void xivo_tdm_load_counters(void) { int i; for (i = 0; i < LOCAL_NUM_RX; i++) { IxQMgrQueue *info = &ixQMgrQueues[local_rx_ixQ[i]]; cpt[i].wordHead = *info->queue.wordHead; // volatile read cpt[i].wordTail = *info->queue.wordTail; // volatile read } } static void xivo_tdm_save_our_counters(void) { IxQMgrQueue *vrxq_info = &ixQMgrQueues[local_rx_ixQ[LOCAL_VOICE_RX_Q]]; IxQMgrQueue *vrxfq_info = &ixQMgrQueues[ local_rx_ixQ[LOCAL_VOICE_RX_FREE_Q]]; *vrxq_info->queue.wordTail = cpt[LOCAL_VOICE_RX_Q].wordTail; *vrxfq_info->queue.wordHead = cpt[LOCAL_VOICE_RX_FREE_Q].wordHead; } static void xivo_tdm_rxfq_replenish( struct hss_piu_entry * const pe) { IxQMgrQueue * const vrxfq_info = &ixQMgrQueues[local_rx_ixQ[LOCAL_VOICE_RX_FREE_Q]]; if (likely(((cpt[LOCAL_VOICE_RX_FREE_Q].wordHead - cpt[LOCAL_VOICE_RX_FREE_Q].wordTail) & WORD_CPT_Q_MASK) < RX_BUF_NUM)) { volatile struct hss_queue_entry *qe = (volatile struct hss_queue_entry *) &vrxfq_info->queue.content[ (cpt[LOCAL_VOICE_RX_FREE_Q].wordHead & vrxfq_info->queue.mask) << 2]; qe->lsc = len_status_cid(XIVO_TDM_VOICE_CHUNK_SZ, 0, 0); qe->data_ptr = virt_to_phys((void*)pe->q_entry.data_ptr); qe->packet_len = 0; qe->entry_ptr = virt_to_phys(pe); cpt[LOCAL_VOICE_RX_FREE_Q].wordHead++; } else { static unsigned ml_num; printk(KERN_ERR "ML %u\n", ++ml_num); } } static inline unsigned local_rxq_level(unsigned port, unsigned ts) { return (xivo_tdm_ports[port].rx_data_cpt[ts].head - xivo_tdm_ports[port].rx_data_cpt[ts].tail) & LOCAL_CPT_MASK; } static inline unsigned local_rxq_full(unsigned port, unsigned ts) { return ((xivo_tdm_ports[port].rx_data_cpt[ts].head - xivo_tdm_ports[port].rx_data_cpt[ts].tail) & DATA_QUEUE_DEPTH) >> DATA_QUEUE_DEPTH_LOG2; } static inline u8 *local_rxq_data_head(unsigned port, unsigned ts) { const unsigned head = xivo_tdm_ports[port].rx_data_cpt[ts].head & DATA_QUEUE_MASK; return xivo_tdm_ports[port].data_zone + (ts * DATA_CH_SIZE) + ((DATA_QUEUE_DEPTH + head) * XIVO_TDM_VOICE_CHUNK_SZ); } static inline u8 *local_rxq_data_tail(unsigned port, unsigned ts) { const unsigned tail = xivo_tdm_ports[port].rx_data_cpt[ts].tail & DATA_QUEUE_MASK; return xivo_tdm_ports[port].data_zone + (ts * DATA_CH_SIZE) + ((DATA_QUEUE_DEPTH + tail) * XIVO_TDM_VOICE_CHUNK_SZ); } static inline u8 *local_txq_data_head(unsigned port, unsigned ts) { const unsigned head = xivo_tdm_ports[port].tx_data_cpt[ts].head & DATA_QUEUE_MASK; return xivo_tdm_ports[port].data_zone + (ts * DATA_CH_SIZE) + (head * XIVO_TDM_VOICE_CHUNK_SZ); } static inline unsigned local_txq_level(unsigned port, unsigned ts) { return (xivo_tdm_ports[port].tx_data_cpt[ts].head - xivo_tdm_ports[port].tx_data_cpt[ts].tail) & LOCAL_CPT_MASK; } static inline unsigned xivo_tdm_qid(unsigned port, unsigned ts) { return HssAccQueueIdGet(xivo_tdm_ports[port].hss_chan_ids[ts]); } static inline bool xivo_tdm_is_reference_channel(unsigned port, unsigned ts) { /* specific to our current timeslots allocation */ return port == ts; } /* * NOTE: the EP80579 has an IA-32 core (a Pentium M). * * It seems that the memory model for WB regions is as described under. * (This is the standard published memory model of IA-32 as of 2010/11/16. * At this date, it has not changed for more than 2 years and is used as * such in Linux.) * * * Load then Load: in-order * * Load then Store: in-order * * Store then Load: OUT OF ORDER (the store can be delayed, * living for a while in a write buffer) * Store then Store: in-order * * * Also see post and whole thread: http://lkml.org/lkml/2007/10/12/211 * * * The only problem that could remain would be that the memory model for * communication between the PIU and the IA-32 core through the queues in WB * memory indeed do not follow the IA-32 memory model, which is IMHO unlikely. */ static int xivo_tdm_port_txrx( const unsigned port, const int tick) { void (*txrx)(void *); int ts; u32 ts_tx_enqueue = 0; if (!(xivo_tdm_ports[port].ts_started & (1u << port))) // XXX hack ref channel return 0; for (ts = 0; ts < XIVO_TDM_TS_NUM; ts++) { int fill, low; u8 ** readchunk_ptr; u8 ** writechunk_ptr; unsigned qid; u32 num_entries = 0; if (!(xivo_tdm_ports[port].ts_started & (1u << ts))) continue; readchunk_ptr = xivo_tdm_ports[port].readchunk_ptrs[ts]; if (readchunk_ptr) { fill = (int)local_rxq_level(port, ts); low = (-((xivo_tdm_ports[port].ts_rx_unsync >> ts) & 1)) & tick; if (likely(fill > low)) { *readchunk_ptr = local_rxq_data_tail(port, ts); xivo_tdm_ports[port].rx_data_cpt[ts].tail++; xivo_tdm_ports[port].ts_rx_unsync &= ~(1u<= IX_QMGR_MAX_NUM_QUEUES)) panic("WHERE IS MY QUEUE GRRRRRR!\n"); /* can't fail: */ (void)ixQMgrShadowDeltaGet(qid, IX_QMGR_Q_SHADOW_TAIL_ONLY, &num_entries); /* can't fail because we just read num_entries, and the * delta can't decrease behind our back: */ (void)ixQMgrShadowAdvance(qid, IX_QMGR_Q_SHADOW_TAIL_ONLY, num_entries); if (unlikely(num_entries > local_txq_level(port, ts))) { printk(KERN_CRIT "port %d ts %d: mismatch " "local/PIU TXQ!!!\n", port, ts); xivo_tdm_ports[port].tx_data_cpt[ts].tail = xivo_tdm_ports[port] .tx_data_cpt[ts].head; } else xivo_tdm_ports[port].tx_data_cpt[ts].tail += num_entries; fill = (int)local_txq_level(port, ts); if (fill == 0) /* underflow */ xivo_tdm_ports[port].ts_tx_unsync |= (1u << ts); if (fill <= 3 - tick) { // XXX X-Ref 3 if (xivo_tdm_ports[port].ts_tx_unsync & (1u << ts)) xivo_tdm_ports[port] .tx_data_cpt[ts].head += 3 - tick; *writechunk_ptr = local_txq_data_head(port, ts); xivo_tdm_ports[port].tx_data_cpt[ts].head++; ts_tx_enqueue |= (1u << ts); } } } txrx = xivo_tdm_ports[port].txrx; if (txrx) { smp_rmb(); txrx(xivo_tdm_ports[port].txrx_data); } for (ts = 0; ts < XIVO_TDM_TS_NUM; ts++) { IxQMgrQueue *txq_info; unsigned chid; unsigned qid; int suppl_sync; u8 ** const writechunk_ptr = xivo_tdm_ports[port].writechunk_ptrs[ts]; if (!writechunk_ptr || unlikely(!(ts_tx_enqueue & (1u << ts)))) continue; chid = xivo_tdm_ports[port].hss_chan_ids[ts]; qid = xivo_tdm_qid(port, ts); if (unlikely(qid >= IX_QMGR_MAX_NUM_QUEUES)) panic("WHERE IS MY QUEUE GRRRRRR!\n"); txq_info = &ixQMgrQueues[qid]; suppl_sync = 0; if (unlikely((xivo_tdm_ports[port].ts_tx_unsync) & (1u << ts))){ suppl_sync = 3 - tick; // XXX X-Ref 3 xivo_tdm_ports[port].ts_tx_unsync &= ~(1u << ts); } do { volatile struct hss_queue_entry * const qe = (volatile struct hss_queue_entry *) &txq_info->queue.content[ (*txq_info->queue.byteHead & txq_info->queue.mask) << 2]; qe->lsc = len_status_cid(XIVO_TDM_VOICE_CHUNK_SZ, 0, chid); qe->data_ptr = virt_to_phys(*writechunk_ptr); qe->packet_len = 0; qe->entry_ptr = 0; (*txq_info->queue.byteHead)++; // volatile real counter } while (unlikely(suppl_sync-- > 0)); } // XXX hack ref channel: return (int)local_rxq_level(port, port) - 1 > 0; } /* hardirq */ void xivo_tdm_trigger_handler(void) { int port, ts, tick; IxQMgrQueue * const vrxq_info = &ixQMgrQueues[local_rx_ixQ[LOCAL_VOICE_RX_Q]]; #ifdef DBG_TRACE_ISR ulong tsc; ulong delta_tsc; rdtscl(tsc); delta_tsc = tsc - tstamp; tstamp = tsc; #endif /* DBG_TRACE_ISR */ xivo_tdm_load_counters(); #define RXQ_EMPTY \ (!((cpt[LOCAL_VOICE_RX_Q].wordHead \ - cpt[LOCAL_VOICE_RX_Q].wordTail) & WORD_CPT_Q_MASK)) global_count++; if (unlikely(RXQ_EMPTY)) { DBG_TRACE("-- empty -- %lu\n", delta_tsc); rxq_empty++; return; } else { DBG_TRACE("-- rx -- %lu\n", delta_tsc); } if (g_leb_cs_debug) writeb(0, g_leb_cs_debug); do { const u32 rx_entry = vrxq_info->queue.content[ // volatile (cpt[LOCAL_VOICE_RX_Q].wordTail++) & (RX_BUF_NUM - 1)]; struct hss_piu_entry * const piu_entry = phys_to_virt((unsigned long)rx_entry); struct xivo_tdm_rx_buf * const rx_buf = container_of(piu_entry, struct xivo_tdm_rx_buf, piu_entry); const unsigned chid = HSS_PIU_ENTRY_CHANNEL_ID(piu_entry); rxq_entries++; if (likely(chid < ICP_HSSACC_MAX_NUM_CHANNELS && port_ts[chid].port < MIN_HSS_PORTS_WANTED)) { unsigned full; u32 *dest; u32 *src; port = port_ts[chid].port; xivo_tdm_ports[port].port_rx_ent++; ts = port_ts[chid].ts; full = local_rxq_full(port, ts); dest = (u32 *)local_rxq_data_head(port, ts); src = (u32 *)rx_buf->data; #if XIVO_TDM_VOICE_CHUNK_SZ == 8 dest[0] = src[0]; dest[1] = src[1]; #else # error XIVO_TDM_VOICE_CHUNK_SZ != 8: update code #endif xivo_tdm_ports[port].rx_data_cpt[ts].head++; xivo_tdm_ports[port].rx_data_cpt[ts].tail += full; xivo_tdm_ports[port].err += (full & ~(port ^ ts)); // XXX hack ref channel } else { invalid_channels++; } if (0) { static int lol = 5000; if (lol-- > 0) printk(KERN_ERR "all glory to the " "hypnotoad!\n"); } xivo_tdm_rxfq_replenish(piu_entry); } while (!RXQ_EMPTY); xivo_tdm_save_our_counters(); #undef RXQ_EMPTY // XXX big hack #if defined(REF_0_0) if (!(xivo_tdm_ports[0].ts_started & 1)) #elif defined(REF_1_1) if (!(xivo_tdm_ports[1].ts_started & 2)) #endif { ref_ts_not_started++; return; } #if defined(REF_0_0) tick = (int)local_rxq_level(0, 0); #elif defined(REF_1_1) tick = (int)local_rxq_level(1, 1); #endif if (tick < 0) tick = 0; if (tick > 4) tick = 4; levels[tick]++; do { // XXX ultimate fucking big hack: #if defined(REF_0_0) tick = (int)local_rxq_level(0, 0) - 1; #elif defined(REF_1_1) tick = (int)local_rxq_level(1, 1) - 1; #endif if (tick <= 0) return; for (port = 0; port < MIN_HSS_PORTS_WANTED; port++) xivo_tdm_port_txrx(port, tick); } while (1); } /* INIT / CLEANUP */ static void xivo_internal_cleanup(void) { int port; for (port = 0; port < (int)ARRAY_SIZE(xivo_tdm_ports); port++) { if (xivo_tdm_ports[port].data_zone) { free_pages((unsigned long) xivo_tdm_ports[port].data_zone, xivo_tdm_ports[port].data_zone_order); xivo_tdm_ports[port].data_zone = NULL; xivo_tdm_ports[port].data_zone_order = 0; } } if (xivo_tdm_rx_buf_table) { free_pages((unsigned long)xivo_tdm_rx_buf_table, xivo_tdm_rx_buf_table_order); xivo_tdm_rx_buf_table = NULL; xivo_tdm_rx_buf_table_order = 0; } } static int __init xivo_internal_init(void) { int i; memset(port_ts, XIVO_TDM_PORT_TS_FREE, sizeof port_ts); xivo_tdm_rx_buf_table_order = get_order(sizeof (struct xivo_tdm_rx_buf) * RX_BUF_NUM); xivo_tdm_rx_buf_table = (struct xivo_tdm_rx_buf *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, xivo_tdm_rx_buf_table_order); if (!xivo_tdm_rx_buf_table) panic("ALL GLORY TO THE HYPNOTOAD!\n"); for (i = 0; i < (int)ARRAY_SIZE(xivo_tdm_ports); i++) { xivo_tdm_ports[i].port_num = i; xivo_tdm_ports[i].allocated = false; xivo_tdm_ports[i].ts_rx_unsync = 0xffffffff; xivo_tdm_ports[i].ts_tx_unsync = 0xffffffff; xivo_tdm_ports[i].ts_started = 0; xivo_tdm_ports[i].ts_allocated = 0; memset(xivo_tdm_ports[i].hss_chan_ids, 0, sizeof xivo_tdm_ports[i].hss_chan_ids); spin_lock_init(&xivo_tdm_ports[i].lock); xivo_tdm_ports[i].data_zone = NULL; xivo_tdm_ports[i].data_zone_order = 0; memset(xivo_tdm_ports[i].readchunk_ptrs, 0, sizeof xivo_tdm_ports[i].readchunk_ptrs); memset(xivo_tdm_ports[i].writechunk_ptrs, 0, sizeof xivo_tdm_ports[i].writechunk_ptrs); memset(xivo_tdm_ports[i].tx_data_cpt, 0, sizeof xivo_tdm_ports[i].tx_data_cpt); memset(xivo_tdm_ports[i].rx_data_cpt, 0, sizeof xivo_tdm_ports[i].rx_data_cpt); xivo_tdm_ports[i].txrx = NULL; xivo_tdm_ports[i].txrx_data = NULL; } return 0; } static int __init xivo_icp_Hss_init(void) { icp_status_t status; int hss_port_number; int hss_chan_number; int i, err = 0; #define RETURN_ERR(x, label) do { err = (x); goto label; } while (0) hss_port_number = icp_HssAccNumSupportedPortsGet(); if (hss_port_number < MIN_HSS_PORTS_WANTED) { printk(KERN_ERR "%s: wants %d HSS (TDM) ports but the system " "has only %d\n", __func__, MIN_HSS_PORTS_WANTED, hss_port_number); RETURN_ERR(-ENXIO, ret_err); } hss_chan_number = icp_HssAccNumSupportedChannelsGet(); if (hss_chan_number < MIN_HSS_CHANS_WANTED) { printk(KERN_ERR "%s: wants %d HSS (TDM) chans but the system " "can only use %d\n", __func__, MIN_HSS_CHANS_WANTED, hss_chan_number); RETURN_ERR(-ENXIO, ret_err); } status = icp_HssAccVoiceInit(XIVO_TDM_VOICE_CHUNK_SZ, /* intGenerationEnable */ ICP_TRUE); if (status != ICP_STATUS_SUCCESS) { printk(KERN_ERR "%s: icp_HssAccVoiceInit() returned error %d\n", __func__, (int) status); RETURN_ERR(-EIO, ret_err); } status = icp_HssAccErrorCallbackRegister(ICP_HSSACC_CHAN_TYPE_VOICE, NULL, xivo_hss_error_cb); if (status != ICP_STATUS_SUCCESS) { printk(KERN_ERR "%s: icp_HssAccErrorCallbackRegister() " "returned error %d\n", __func__, (int) status); RETURN_ERR(-EIO, ret_err); } for (i = 0; i < LOCAL_NUM_RX; i++) { local_rx_ixQ[i] = HssAccQueueIdGet(hssacc_from_local_rx(i)); printk(KERN_ERR "local_rx=%d ixQ=%u\n", i, (unsigned)local_rx_ixQ[i]); } xivo_tdm_load_counters(); /* GIVE ME A spare_wheel_osal_buf AND A PONEY (and LSD) */ for (i = 0; i < RX_BUF_NUM; i++) { struct xivo_tdm_rx_buf *rx_buf = &xivo_tdm_rx_buf_table[i]; memset(rx_buf->data, 0xD5, sizeof rx_buf->data); rx_buf->piu_entry.q_entry.lsc = len_status_cid( XIVO_TDM_VOICE_CHUNK_SZ, 0, 0); rx_buf->piu_entry.q_entry.data_ptr = (u32)rx_buf->data; rx_buf->piu_entry.q_entry.packet_len = 0; rx_buf->piu_entry.q_entry.entry_ptr = 0; xivo_tdm_rxfq_replenish(&rx_buf->piu_entry); } xivo_tdm_save_our_counters(); tdm_register_trigger_hardirq_handler(xivo_tdm_trigger_handler); return 0; ret_err: // there used to be some stuff here return err; } #ifdef DEBUGFS_MY_STUFF static struct dentry *dbg_dir; #define DBG_FIELDS \ F(u32, ts_rx_unsync) \ F(u32, ts_tx_unsync) \ F(u32, ts_started) \ F(u32, ts_allocated) \ F(u32, port_rx_ent) #define DBG_GLOBAL_FIELDS \ F(u32, rxq_empty) \ F(u32, global_count) \ F(u32, invalid_channels) \ F(u32, rxq_entries) \ F(u32, ref_ts_not_started) #define DBG_GLOBAL_ENT \ G(u32, levels, 0) \ G(u32, levels, 1) \ G(u32, levels, 2) \ G(u32, levels, 3) \ G(u32, levels, 4) #define F(t, n) static struct dentry *dbg_1_ ## n; DBG_FIELDS #undef F #define F(t, n) static struct dentry *dbg_2_ ## n; DBG_FIELDS #undef F #define F(t, n) static struct dentry *dbg_g_ ## n; DBG_GLOBAL_FIELDS #undef F #define G(t, n, i) static struct dentry *dbg_ge_ ## n ## _ ## i; DBG_GLOBAL_ENT #undef G static struct dentry *dbg_all; #define PORT_LIST \ E(u32, ts_rx_unsync) \ E(u32, ts_tx_unsync) \ E(u32, ts_started) \ E(u32, ts_allocated) \ E(unsigned, err) \ E(u32, port_rx_ent) struct trucs { #define E(t,n) t n; PORT_LIST #undef E }; #define GLOB_LIST \ E(u32, rxq_empty) \ E(u32, global_count) \ E(u32, invalid_channels) \ E(u32, rxq_entries) \ E(u32, ref_ts_not_started) struct trucs_globaux { #define E(t,n) t n; GLOB_LIST #undef E }; static int dbg_all_show(struct seq_file *f, void *_d) { static DEFINE_SPINLOCK(atom_lock); /* gruik hack */ /* some non-trivial allocations on stack */ struct trucs deux[2]; struct trucs_globaux blop; u32 levels_copy[5]; int i; unsigned long flags; spin_lock_irqsave(&atom_lock, flags); /* gruik hack */ for (i = 0; i < 2; i++) { #define E(t,n) deux[i].n = xivo_tdm_ports[i].n; PORT_LIST #undef E } memcpy(levels_copy, levels, sizeof levels_copy); #define E(t,n) blop.n = n; GLOB_LIST #undef E spin_unlock_irqrestore(&atom_lock, flags); /* gruik hack */ /* DISPLAY */ #define E(t,n) seq_printf(f, "%s: %u\n", #n, blop.n); GLOB_LIST #undef E seq_printf(f, "\n"); for (i = 0; i < (int)ARRAY_SIZE(levels_copy); i++) seq_printf(f, "levels[%d]: %u\n", i, levels_copy[i]); seq_printf(f, "\n"); for (i = 0; i < 2; i++) { seq_printf(f, "PORT %d:\n", i); #define E(t,n) seq_printf(f, " %s: %u\n", #n, deux[i].n); PORT_LIST #undef E seq_printf(f, "\n"); } seq_printf(f, "LOOOOOOOOOOL\n"); return 0; } static int dbg_all_open(struct inode *i, struct file *f) { return single_open(f, dbg_all_show, NULL); } static const struct file_operations dbg_all_fops = { .owner = THIS_MODULE, .open = dbg_all_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif /* DEBUGFS_MY_STUFF */ static int __init xivo_tdm_init(void) { int rc; #ifdef DEBUGFS_MY_STUFF dbg_dir = debugfs_create_dir("xivo_tdm", NULL); if (dbg_dir) { #define F(t, n) dbg_1_ ## n = debugfs_create_ ## t ( \ "1_" #n, \ 0444, \ dbg_dir, \ (t *) &xivo_tdm_ports[0].n); DBG_FIELDS #undef F #define F(t, n) dbg_2_ ## n = debugfs_create_ ## t ( \ "2_" #n, \ 0444, \ dbg_dir, \ (t *) &xivo_tdm_ports[1].n); DBG_FIELDS #undef F #define F(t, n) dbg_g_ ## n = debugfs_create_ ## t ( \ #n, \ 0444, \ dbg_dir, \ &n); DBG_GLOBAL_FIELDS #undef F #define G(t, n, i) dbg_ge_ ## n ## _ ## i = \ debugfs_create_ ## t ( \ #n "_" #i, \ 0444, \ dbg_dir, \ &n[i]); DBG_GLOBAL_ENT #undef G dbg_all = debugfs_create_file("zoby", 0444, dbg_dir, NULL, &dbg_all_fops); } #endif /* DEBUGFS_MY_STUFF */ if ((rc = xivo_internal_init()) < 0) return rc; if ((rc = xivo_icp_Hss_init()) < 0) { xivo_internal_cleanup(); return rc; } printk(KERN_NOTICE "%s DONE\n", __func__); return 0; } static void __exit xivo_tdm_exit(void) { printk(KERN_ERR "BUGBUG WARNING MEMORY LEAK YOU'LL DIE and probably " "crash too (triggered by module unload)\n"); tdm_register_trigger_hardirq_handler(NULL); msleep(10); // BUGBUG racy hack xivo_internal_cleanup(); #ifdef DEBUGFS_MY_STUFF if (dbg_dir) { #define F(t, n) debugfs_remove(dbg_1_ ## n); DBG_FIELDS #undef F #define F(t, n) debugfs_remove(dbg_2_ ## n); DBG_FIELDS #undef F #define F(t, n) debugfs_remove(dbg_g_ ## n); DBG_GLOBAL_FIELDS #undef F #define G(t, n, i) debugfs_remove(dbg_ge_ ## n ## _ ## i); DBG_GLOBAL_ENT #undef G debugfs_remove(dbg_all); debugfs_remove(dbg_dir); } #endif /* DEBUGFS_MY_STUFF */ } module_init(xivo_tdm_init); module_exit(xivo_tdm_exit);