summaryrefslogtreecommitdiff
path: root/tdm/xivo_tdm.c
diff options
context:
space:
mode:
authorNoe Rubinstein <nrubinstein@proformatique.com>2010-12-24 15:41:18 +0100
committerNoe Rubinstein <nrubinstein@proformatique.com>2010-12-24 15:41:18 +0100
commit85fd3be00b20552f8c8785f93e4dd25fe4669be6 (patch)
tree8b92d4fb6d37b2f31733cf0fccd1902e2d19ed4b /tdm/xivo_tdm.c
parentd3abe0006137f4c626e1ad27fe3eb76f9eacdcf8 (diff)
update TDM driver with xilun's changes
Diffstat (limited to 'tdm/xivo_tdm.c')
-rw-r--r--tdm/xivo_tdm.c1103
1 files changed, 842 insertions, 261 deletions
diff --git a/tdm/xivo_tdm.c b/tdm/xivo_tdm.c
index 151454b..718ce6e 100644
--- a/tdm/xivo_tdm.c
+++ b/tdm/xivo_tdm.c
@@ -4,22 +4,71 @@
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
+#include <linux/compiler.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+
+#include <asm/io.h>
+#include <asm/msr.h>
+#include <asm/system.h>
+#include <asm/page.h>
+
+
+#define DBG_TRACE_ISR
+// #undef DBG_TRACE_ISR
+
+#ifdef DBG_TRACE_ISR
+#include <asm/msr.h> // rdtscl
+static ulong tstamp;
+static int dump_count;
+module_param(dump_count, int, 0666);
+MODULE_PARM_DESC(dump_count, "number of traces to emit "
+ "(can be reset at runtime via /sys)");
+#define DBG_TRACE(...) \
+ do { \
+ if (dump_count-- > 0) { \
+ printk(KERN_ERR __VA_ARGS__); \
+ } \
+ } while (0)
+#else
+#define DBG_TRACE(...) /* nothing */
+#endif /* DBG_TRACE_ISR */
+
#include <IxOsal.h>
#include <icp.h>
#include <icp_hssdrv_common.h>
+#include <icp_tdmsetupdrv.h>
+
+// here starts the hacking festival...
+#include <IxQMgr.h>
+#include <icp_hssacc_queues_config.h> // HssAccQueueIdGet
+
+#include <icp_hssacc_common.h> // ICP_HSSACC_MAX_NUM_CHANNELS
+
+
+#define DEBUGFS_MY_STUFF
+// #undef DEBUGFS_MY_STUFF
+
+#ifdef DEBUGFS_MY_STUFF
+# include <linux/debugfs.h>
+#endif /* DEBUGFS_MY_STUFF */
+
#include "xivo_tdm_api.h"
+#include "xivo_ep80579_p.h"
+
MODULE_LICENSE("GPL");
+/* can be tuned */
#define MIN_HSS_PORTS_WANTED (2)
#define MIN_HSS_CHANS_WANTED (10)
-#define TDM_SAMPLE_SIZE_FOR_DAHDI (8)
-
+/* fixed */
#define RX_BUF_NUM (2048)
/* for Linux < 2.6.19; WARNING: DO NOT USE IN INTERFACES IN THIS CASE!
@@ -32,96 +81,107 @@ enum { false = 0, true = 1 };
struct xivo_tdm_callbacks_struct {
- /* all contexts including irq */
- void (*tick)(void *data);
- void *tick_data;
-
- /* process context */
+ /* process context only */
void (*port0_started)(void *data);
void *port0_started_data;
+
+ /* process context only */
+ void (*port0_configured)(void *data);
+ void *port0_configured_data;
};
static struct xivo_tdm_callbacks_struct xivo_tdm_callbacks;
-static void xivo_tdm_audio_work_func(void *data);
-static DECLARE_WORK(xivo_tdm_audio_work, xivo_tdm_audio_work_func, NULL);
+#define DATA_QUEUE_DEPTH_LOG2 (2)
+#define DATA_QUEUE_DEPTH (1u << DATA_QUEUE_DEPTH_LOG2)
+#define DATA_QUEUE_MASK (DATA_QUEUE_DEPTH - 1)
+
+/* The multiplication by 2 is for Rx and Tx */
+#define DATA_CH_SIZE (2 * DATA_QUEUE_DEPTH * XIVO_TDM_VOICE_CHUNK_SZ)
+
+/* our local counters are coded on one byte: */
+#define LOCAL_CPT_MASK (0xffu)
+
+struct local_cpt {
+ u8 tail;
+ u8 head;
+};
struct xivo_tdm_port {
int port_num;
bool allocated;
- u32 ts_allocated;
+ u32 ts_rx_unsync;
+ u32 ts_tx_unsync;
u32 ts_started;
- unsigned int hss_chan_ids[XIVO_TDM_TS_NUM];
+ u32 ts_allocated;
- spinlock_t lock;
+ u8 *data_zone;
+ unsigned data_zone_order;
- /* WARNING: this file is a big hack, coherency of following chunks is
- * not guaranteed. Blame Intel for their shitty nearly unusable code.
- */
- u8 tx_bufs[XIVO_TDM_TS_NUM][8];
- u8 rx_bufs[XIVO_TDM_TS_NUM][8];
+ unsigned err;
- unsigned tx_errs;
-};
+ unsigned int hss_chan_ids[XIVO_TDM_TS_NUM] ____cacheline_aligned;
+ u8 **readchunk_ptrs[XIVO_TDM_TS_NUM];
+ u8 **writechunk_ptrs[XIVO_TDM_TS_NUM];
-static struct xivo_tdm_port xivo_tdm_ports[MIN_HSS_PORTS_WANTED];
+ struct local_cpt tx_data_cpt[XIVO_TDM_TS_NUM];
+ struct local_cpt rx_data_cpt[XIVO_TDM_TS_NUM];
-static DECLARE_MUTEX(xivo_tdm_mutex);
-static struct workqueue_struct *xivo_tdm_wq;
+ void (*txrx)(void *data);
+ void *txrx_data;
-/* INTERNAL FUNCS */
+ spinlock_t lock ____cacheline_aligned;
-/* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! USER CONTEXT ONLY !!!!!!!!!!!!!!!! */
-static IX_OSAL_MBUF *alloc_init_osal_buf(
- unsigned int data_buf_length)
-{
- IX_OSAL_MBUF *osal_buf;
- char *data_buf;
+ u32 port_rx_ent;
- osal_buf = kmalloc(sizeof(IX_OSAL_MBUF), GFP_KERNEL);
- if (!osal_buf)
- return NULL;
+ u8 _pad[0x7c]; /* so that the size is a nice 0x400 bytes */
+} ____cacheline_aligned;
- data_buf = kmalloc(data_buf_length, GFP_KERNEL);
- if (!data_buf) {
- kfree(osal_buf);
- return NULL;
- }
+static struct xivo_tdm_port xivo_tdm_ports[MIN_HSS_PORTS_WANTED] __cacheline_aligned;
- IX_OSAL_MBUF_ALLOCATED_BUFF_LEN(osal_buf) = data_buf_length;
+static u32 levels[5];
+static u32 invalid_channels;
+static u32 global_count;
+static u32 rxq_empty;
+static u32 rxq_entries;
+static u32 ref_ts_not_started;
- /*Fill in the allocated size of the data buf */
- IX_OSAL_MBUF_ALLOCATED_BUFF_LEN(osal_buf) = data_buf_length;
- IX_OSAL_MBUF_MLEN(osal_buf) = data_buf_length;
+static DECLARE_MUTEX(xivo_tdm_mutex);
- IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(osal_buf) = NULL;
- IX_OSAL_MBUF_NEXT_BUFFER_IN_PKT_PTR(osal_buf) = NULL;
+/* port/ts by icp channel id */
+#define XIVO_TDM_PORT_TS_FREE 0xFF
+struct xivo_tdm_port_ts {
+ u8 port; // port number or XIVO_TDM_PORT_TS_FREE
+ u8 ts; // timeslot number or XIVO_TDM_PORT_TS_FREE
+};
+static struct xivo_tdm_port_ts port_ts[ICP_HSSACC_MAX_NUM_CHANNELS];
- /*Attach the data buf to the OSAL buf */
- IX_OSAL_MBUF_MDATA(osal_buf) = data_buf;
+#if XIVO_TDM_VOICE_CHUNK_SZ > 32
+ #error XIVO_TDM_VOICE_CHUNK_SZ > 32: unsupported
+#else
+struct xivo_tdm_rx_buf {
+ struct hss_piu_entry piu_entry;
+ u8 data[32];
+} __attribute__ ((aligned (64)));
+#endif
- return osal_buf;
-}
+static unsigned xivo_tdm_rx_buf_table_order;
+static struct xivo_tdm_rx_buf *xivo_tdm_rx_buf_table; /* RX_BUF_NUM entries */
-static void free_osal_buf(
- IX_OSAL_MBUF *osal_buf)
-{
- if (osal_buf) {
- char *data_buf = IX_OSAL_MBUF_MDATA(osal_buf);
- kfree(data_buf);
- kfree(osal_buf);
- }
-}
+/* INTERNAL FUNCS */
static void dealloc_one_chan(
struct xivo_tdm_port* xtp,
const int cnum)
{
- icp_status_t status = icp_HssAccChannelDelete(xtp->hss_chan_ids[cnum]);
+ const u32 chid = xtp->hss_chan_ids[cnum];
+ icp_status_t status = icp_HssAccChannelDelete(chid);
if (status != ICP_STATUS_SUCCESS)
printk(KERN_ERR "%s: icp_HssAccChannelDelete returned "
"error %d\n", __func__, (int) status);
+ port_ts[chid].port = XIVO_TDM_PORT_TS_FREE;
+ port_ts[chid].port = XIVO_TDM_PORT_TS_FREE;
xtp->ts_allocated &= ~(1u << cnum);
xtp->hss_chan_ids[cnum] = 0;
}
@@ -130,6 +190,7 @@ static int add_one_chan(
struct xivo_tdm_port* xtp,
const int cnum)
{
+ u32 chid;
icp_status_t status;
icp_hssacc_timeslot_map_t ts_map = {0};
@@ -152,8 +213,12 @@ static int add_one_chan(
return -EIO;
}
+ chid = xtp->hss_chan_ids[cnum];
+ port_ts[chid].port = xtp->port_num;
+ port_ts[chid].ts = cnum;
+
status = icp_HssAccChannelConfigure(
- xtp->hss_chan_ids[cnum],
+ chid,
/* channelDataInvert */ 0,
/* channelBitEndianness */ 1, /* MSb first */
/* channelByteSwap */ 0,
@@ -166,10 +231,10 @@ static int add_one_chan(
}
status = icp_HssAccChannelVoiceServiceConfigure(
- xtp->hss_chan_ids[cnum],
+ chid,
ICP_HSSACC_VOICE_TX_IDLE_REPEAT_LAST_FRAME,
0xD5,
- TDM_SAMPLE_SIZE_FOR_DAHDI);
+ XIVO_TDM_VOICE_CHUNK_SZ);
if (status != ICP_STATUS_SUCCESS) {
printk(KERN_ERR "%s: icp_HssAccChannelVoiceServiceConfigure "
"returned error %d\n", __func__, (int) status);
@@ -182,86 +247,6 @@ static int add_one_chan(
return 0;
}
-static int xivo_tdm_common_rx(
- struct xivo_tdm_port* xtp,
- const unsigned int cnum,
- u8 *rx_buf)
-{
- IX_OSAL_MBUF *osal_buf = NULL;
- icp_status_t status;
- int nb_read = 0;
- const unsigned int hss_chan_id = xtp->hss_chan_ids[cnum];
-
- while (1) {
- char *data_buf;
- status = icp_HssAccReceive(hss_chan_id, &osal_buf);
- if (status != ICP_STATUS_SUCCESS || osal_buf == NULL)
- return nb_read;
- nb_read++;
- data_buf = IX_OSAL_MBUF_MDATA(osal_buf);
- if (likely(rx_buf))
- memcpy(rx_buf, data_buf, 8 /* XXX is this right? */ );
- IX_OSAL_MBUF_MLEN(osal_buf) = IX_OSAL_MBUF_ALLOCATED_BUFF_LEN(osal_buf);
- status = icp_HssAccRxFreeReplenish(ICP_HSSACC_CHAN_TYPE_VOICE, osal_buf);
- if (status != ICP_STATUS_SUCCESS) {
- if (printk_ratelimit())
- printk(KERN_ERR "%s: icp_HssAccRxFreeReplenish "
- "err %d cnum %u\n",
- __func__, (int) status, cnum);
- free_osal_buf(osal_buf);
- }
- osal_buf = NULL;
- }
-}
-
-static void xivo_tdm_common_tx(
- struct xivo_tdm_port* xtp,
- const unsigned int cnum,
- const u8 *tx_buf)
-{
- IX_OSAL_MBUF *osal_buf = NULL;
- icp_status_t status;
- const unsigned int hss_chan_id = xtp->hss_chan_ids[cnum];
-
- status = icp_HssAccTxDoneRetrieve(hss_chan_id, &osal_buf);
- if (status != ICP_STATUS_SUCCESS) {
- if (status == ICP_STATUS_UNDERFLOW) {
- osal_buf = alloc_init_osal_buf(TDM_SAMPLE_SIZE_FOR_DAHDI);
- if (!osal_buf && printk_ratelimit())
- printk(KERN_ERR
- "%s: osal_buf alloc failed - cnum %u\n",
- __func__, cnum);
- } else {
- osal_buf = NULL;
- if (printk_ratelimit())
- printk(KERN_ERR "%s: icp_HssAccTxDoneRetrieve "
- "err %d cnum %u\n",
- __func__, (int) status, cnum);
- }
- }
-
- if (!osal_buf)
- return;
-
- if (likely(tx_buf))
- memcpy(IX_OSAL_MBUF_MDATA(osal_buf), tx_buf, 8);
- IX_OSAL_MBUF_MLEN(osal_buf) = 8;
- IX_OSAL_MBUF_PKT_LEN(osal_buf) = 8;
-
- status = icp_HssAccTransmit(hss_chan_id, osal_buf);
- if (status != ICP_STATUS_SUCCESS) {
- xtp->tx_errs++;
- if (printk_ratelimit())
- printk("%s%s: icp_HssAccTransmit err %d cnum %u "
- "tx_errs %u\n",
- status == ICP_STATUS_OVERFLOW ?
- KERN_DEBUG :
- KERN_ERR,
- __func__, (int) status, cnum, xtp->tx_errs);
- free_osal_buf(osal_buf);
- }
-}
-
static int start_one_chan(
struct xivo_tdm_port* xtp,
const int cnum)
@@ -296,8 +281,6 @@ static int stop_one_chan(
xtp->ts_started &= ~(1u << cnum);
spin_unlock_irqrestore(&xtp->lock, flags);
- flush_workqueue(xivo_tdm_wq);
-
status = icp_HssAccChannelDown(hss_chan_id);
if (status != ICP_STATUS_SUCCESS) {
printk(KERN_ERR "%s: icp_HssAccChannelDown returned %d\n",
@@ -383,53 +366,6 @@ static void xivo_hss_error_cb(
}
}
-static int xivo_tdm_deferred_receive(
- struct xivo_tdm_port* xtp,
- const unsigned int cnum)
-{
- u8 *rx_buf = xtp->rx_bufs[cnum];
- return xivo_tdm_common_rx(xtp, cnum, rx_buf);
-}
-
-static void xivo_tdm_deferred_transmit(
- struct xivo_tdm_port* xtp,
- const unsigned int cnum)
-{
- const u8 *tx_buf = xtp->tx_bufs[cnum];
- xivo_tdm_common_tx(xtp, cnum, tx_buf);
-}
-
-static void xivo_tdm_work_port(struct xivo_tdm_port *xtp)
-{
- unsigned long flags;
- u32 ts_started;
- u32 scan;
- int cnum;
-
- spin_lock_irqsave(&xtp->lock, flags);
- ts_started = xtp->ts_started;
- spin_unlock_irqrestore(&xtp->lock, flags);
-
- for (cnum = 0, scan = 1; scan; scan <<= 1, cnum++) {
- if (ts_started & scan) {
- /* XXX use ret value: */
- (void) xivo_tdm_deferred_receive(xtp, cnum);
- xivo_tdm_deferred_transmit(xtp, cnum);
- }
- }
-}
-
-static void xivo_tdm_audio_work_func(void *data)
-{
- int i;
-
- (void) data;
-
- for (i = 0; i < (int)ARRAY_SIZE(xivo_tdm_ports); i++) {
- xivo_tdm_work_port(&xivo_tdm_ports[i]);
- }
-}
-
/* EXPORTED FUNCS */
@@ -531,6 +467,12 @@ int xivo_tdm_config_port(
return -EIO;
}
+ if (xtp->port_num == 0 && xivo_tdm_callbacks.port0_configured) {
+ msleep(3);
+ xivo_tdm_callbacks.port0_configured(
+ xivo_tdm_callbacks.port0_configured_data);
+ }
+
return 0;
}
EXPORT_SYMBOL(xivo_tdm_config_port);
@@ -551,32 +493,62 @@ void xivo_tdm_register_port0_started(
}
EXPORT_SYMBOL(xivo_tdm_register_port0_started);
-int xivo_tdm_start_chans(
+void xivo_tdm_register_port0_configured(
struct xivo_tdm_port* xtp,
- const u32 chans,
- struct xivo_tdm_tick_cb_struct *tick_cb)
+ void (*port0_configured)(void *),
+ void *port0_configured_data)
{
unsigned long flags;
- u32 scan;
+
+ (void) xtp;
+
+ spin_lock_irqsave(&xivo_tdm_ports[0].lock, flags);
+ xivo_tdm_callbacks.port0_configured = port0_configured;
+ xivo_tdm_callbacks.port0_configured_data = port0_configured_data;
+ spin_unlock_irqrestore(&xivo_tdm_ports[0].lock, flags);
+}
+EXPORT_SYMBOL(xivo_tdm_register_port0_configured);
+
+int xivo_tdm_start_chans(
+ struct xivo_tdm_port* const xtp,
+ u8 ** const readchunk_ptrs[XIVO_TDM_TS_NUM],
+ u8 ** const writechunk_ptrs[XIVO_TDM_TS_NUM],
+ void (*txrx)(void *),
+ void *txrx_data)
+{
int cnum;
+ int past_last_ch = 0;
- for (cnum = 0, scan = 1; scan; scan <<= 1, cnum++) {
- if (scan & chans) {
+ memcpy(xtp->readchunk_ptrs, readchunk_ptrs,
+ sizeof xtp->readchunk_ptrs);
+
+ memcpy(xtp->writechunk_ptrs, writechunk_ptrs,
+ sizeof xtp->writechunk_ptrs);
+
+ xtp->txrx_data = txrx_data;
+
+ smp_wmb();
+ xtp->txrx = txrx;
+
+ for (cnum = 0; cnum < XIVO_TDM_TS_NUM; cnum++) {
+ if (xtp->readchunk_ptrs[cnum] || xtp->writechunk_ptrs[cnum]) {
+ if (xtp->writechunk_ptrs[cnum])
+ past_last_ch = cnum + 1;
printk(KERN_INFO "%s: adding chan %d\n",
__func__, cnum);
/* XXX retval */ (void) add_one_chan(xtp, cnum);
}
}
- if (tick_cb) {
- spin_lock_irqsave(&xtp->lock, flags);
- xivo_tdm_callbacks.tick = tick_cb->tick;
- xivo_tdm_callbacks.tick_data = tick_cb->tick_data;
- spin_unlock_irqrestore(&xtp->lock, flags);
+ if (past_last_ch) {
+ xtp->data_zone_order = get_order(past_last_ch * DATA_CH_SIZE);
+ xtp->data_zone = (u8 *)__get_free_pages(
+ GFP_KERNEL | __GFP_ZERO,
+ xtp->data_zone_order);
}
- for (cnum = 0, scan = 1; scan; scan <<= 1, cnum++) {
- if (scan & chans) {
+ for (cnum = 0; cnum < XIVO_TDM_TS_NUM; cnum++) {
+ if (xtp->readchunk_ptrs[cnum] || xtp->writechunk_ptrs[cnum]) {
printk(KERN_INFO "%s: starting chan %d\n",
__func__, cnum);
/* XXX retval */ (void) start_one_chan(xtp, cnum);
@@ -615,44 +587,431 @@ void xivo_tdm_stop_chans(
}
EXPORT_SYMBOL(xivo_tdm_stop_chans);
-void xivo_tdm_receive(
- struct xivo_tdm_port* xtp,
- const unsigned int cnum,
- u8 samples[8])
+/* hacky sort of shutdown... */
+void xivo_tdm_shutdown(void)
{
- memcpy(samples, xtp->rx_bufs[cnum], 8);
+ tdm_register_trigger_hardirq_handler(NULL);
+ msleep(10); // BUGBUG racy hack
}
-EXPORT_SYMBOL(xivo_tdm_receive);
+EXPORT_SYMBOL(xivo_tdm_shutdown);
-void xivo_tdm_transmit(
- struct xivo_tdm_port* xtp,
- const unsigned int cnum,
- const u8 samples[8])
+
+/* HANDLER */
+
+typedef enum {
+ LOCAL_VOICE_RX_Q = 0,
+ LOCAL_VOICE_RX_FREE_Q,
+} local_rx_q_e;
+#define LOCAL_NUM_RX (LOCAL_VOICE_RX_FREE_Q + 1)
+
+static inline uint32_t hssacc_from_local_rx(local_rx_q_e lrxq)
{
- memcpy(xtp->tx_bufs[cnum], samples, 8);
+ switch (lrxq) {
+#define trans2hssacc(suffix) case LOCAL_ ## suffix : return ICP_HSSACC_ ## suffix ;
+ trans2hssacc(VOICE_RX_Q);
+ trans2hssacc(VOICE_RX_FREE_Q);
+#undef trans2hssacc
+ }
+ return 42;
}
-EXPORT_SYMBOL(xivo_tdm_transmit);
-/* hardirq: yes -- softirq: yes -- user: yes */
-void xivo_tdm_tick(
- struct xivo_tdm_port* xtp)
+// typedef UINT32 IxQMgrQId;
+static IxQMgrQId local_rx_ixQ[LOCAL_NUM_RX];
+
+#define WORD_CPT_Q_MASK 0xFFFFu
+#define BYTE_CPT_Q_MASK 0xFFu
+
+
+#define CPT_PRINT_NB 0
+#define SPURIOUS_PRINT_NB 32
+
+/* used by xivo_tdm_trigger_handler (hardirq) and its children */
+static struct {
+ u32 wordHead;
+ u32 wordTail;
+} cpt[LOCAL_NUM_RX];
+
+static void xivo_tdm_load_counters(void)
+{
+ int i;
+ for (i = 0; i < LOCAL_NUM_RX; i++) {
+ IxQMgrQueue *info = &ixQMgrQueues[local_rx_ixQ[i]];
+ cpt[i].wordHead = *info->queue.wordHead; // volatile read
+ cpt[i].wordTail = *info->queue.wordTail; // volatile read
+ }
+}
+
+static void xivo_tdm_save_our_counters(void)
+{
+ IxQMgrQueue *vrxq_info = &ixQMgrQueues[local_rx_ixQ[LOCAL_VOICE_RX_Q]];
+ IxQMgrQueue *vrxfq_info = &ixQMgrQueues[
+ local_rx_ixQ[LOCAL_VOICE_RX_FREE_Q]];
+ *vrxq_info->queue.wordTail = cpt[LOCAL_VOICE_RX_Q].wordTail;
+ *vrxfq_info->queue.wordHead = cpt[LOCAL_VOICE_RX_FREE_Q].wordHead;
+}
+
+static void xivo_tdm_rxfq_replenish(
+ struct hss_piu_entry * const pe)
+{
+ IxQMgrQueue * const vrxfq_info =
+ &ixQMgrQueues[local_rx_ixQ[LOCAL_VOICE_RX_FREE_Q]];
+ if (likely(((cpt[LOCAL_VOICE_RX_FREE_Q].wordHead
+ - cpt[LOCAL_VOICE_RX_FREE_Q].wordTail) & WORD_CPT_Q_MASK)
+ < RX_BUF_NUM)) {
+ volatile struct hss_queue_entry *qe =
+ (volatile struct hss_queue_entry *)
+ &vrxfq_info->queue.content[
+ (cpt[LOCAL_VOICE_RX_FREE_Q].wordHead
+ & vrxfq_info->queue.mask) << 2];
+ qe->lsc = len_status_cid(XIVO_TDM_VOICE_CHUNK_SZ, 0, 0);
+ qe->data_ptr = virt_to_phys((void*)pe->q_entry.data_ptr);
+ qe->packet_len = 0;
+ qe->entry_ptr = virt_to_phys(pe);
+ cpt[LOCAL_VOICE_RX_FREE_Q].wordHead++;
+ } else {
+ static unsigned ml_num;
+ printk(KERN_ERR "ML %u\n", ++ml_num);
+ }
+}
+
+static inline unsigned local_rxq_level(unsigned port, unsigned ts)
+{
+ return (xivo_tdm_ports[port].rx_data_cpt[ts].head
+ - xivo_tdm_ports[port].rx_data_cpt[ts].tail)
+ & LOCAL_CPT_MASK;
+}
+
+static inline unsigned local_rxq_full(unsigned port, unsigned ts)
{
- if (xivo_tdm_callbacks.tick)
- xivo_tdm_callbacks.tick(xivo_tdm_callbacks.tick_data);
+ return ((xivo_tdm_ports[port].rx_data_cpt[ts].head
+ - xivo_tdm_ports[port].rx_data_cpt[ts].tail)
+ & DATA_QUEUE_DEPTH) >> DATA_QUEUE_DEPTH_LOG2;
+}
+
+static inline u8 *local_rxq_data_head(unsigned port, unsigned ts)
+{
+ const unsigned head = xivo_tdm_ports[port].rx_data_cpt[ts].head
+ & DATA_QUEUE_MASK;
+ return xivo_tdm_ports[port].data_zone
+ + (ts * DATA_CH_SIZE)
+ + ((DATA_QUEUE_DEPTH + head) * XIVO_TDM_VOICE_CHUNK_SZ);
+}
+
+static inline u8 *local_rxq_data_tail(unsigned port, unsigned ts)
+{
+ const unsigned tail = xivo_tdm_ports[port].rx_data_cpt[ts].tail
+ & DATA_QUEUE_MASK;
+ return xivo_tdm_ports[port].data_zone
+ + (ts * DATA_CH_SIZE)
+ + ((DATA_QUEUE_DEPTH + tail) * XIVO_TDM_VOICE_CHUNK_SZ);
+}
+
+static inline u8 *local_txq_data_head(unsigned port, unsigned ts)
+{
+ const unsigned head = xivo_tdm_ports[port].rx_data_cpt[ts].head
+ & DATA_QUEUE_MASK;
+ return xivo_tdm_ports[port].data_zone
+ + (ts * DATA_CH_SIZE)
+ + (head * XIVO_TDM_VOICE_CHUNK_SZ);
+}
+
+static inline unsigned local_txq_level(unsigned port, unsigned ts)
+{
+ return (xivo_tdm_ports[port].tx_data_cpt[ts].head
+ - xivo_tdm_ports[port].tx_data_cpt[ts].tail)
+ & LOCAL_CPT_MASK;
+}
+
+static inline unsigned xivo_tdm_qid(unsigned port, unsigned ts)
+{
+ return HssAccQueueIdGet(xivo_tdm_ports[port].hss_chan_ids[ts]);
+}
+
+static inline bool xivo_tdm_is_reference_channel(unsigned port, unsigned ts)
+{
+ /* specific to our current timeslots allocation */
+ return port == ts;
+}
+
+/*
+ * NOTE: the EP80579 has an IA-32 core (a Pentium M).
+ *
+ * It seems that the memory model for WB regions is as described under.
+ * (This is the standard published memory model of IA-32 as of 2010/11/16.
+ * At this date, it has not changed for more than 2 years and is used as
+ * such in Linux.)
+ *
+ *
+ * Load then Load: in-order
+ *
+ * Load then Store: in-order
+ *
+ * Store then Load: OUT OF ORDER (the store can be delayed,
+ * living for a while in a write buffer)
+ * Store then Store: in-order
+ *
+ *
+ * Also see post and whole thread: http://lkml.org/lkml/2007/10/12/211
+ *
+ *
+ * The only problem that could remain would be that the memory model for
+ * communication between the PIU and the IA-32 core through the queues in WB
+ * memory indeed do not follow the IA-32 memory model, which is IMHO unlikely.
+ */
+
+static int xivo_tdm_port_txrx(
+ const unsigned port,
+ const int tick)
+{
+ void (*txrx)(void *);
+ int ts;
+ u32 ts_tx_enqueue = 0;
+
+ if (!(xivo_tdm_ports[port].ts_started & (1u << port))) // XXX hack
+ return 0;
+
+ for (ts = 0; ts < XIVO_TDM_TS_NUM; ts++) {
+ int fill, low;
+ u8 ** readchunk_ptr;
+ u8 ** writechunk_ptr;
+ unsigned qid;
+ u32 num_entries = 0;
+
+ if (!(xivo_tdm_ports[port].ts_started & (1u << ts)))
+ continue;
+
+ readchunk_ptr = xivo_tdm_ports[port].readchunk_ptrs[ts];
+ if (readchunk_ptr) {
+ fill = (int)local_rxq_level(port, ts);
+ low = (-((xivo_tdm_ports[port].ts_rx_unsync >> ts) & 1))
+ & tick;
+ if (likely(fill > low)) {
+ *readchunk_ptr = local_rxq_data_tail(port, ts);
+ xivo_tdm_ports[port].rx_data_cpt[ts].tail++;
+ xivo_tdm_ports[port].ts_rx_unsync &= ~(1u<<ts);
+ } else
+ xivo_tdm_ports[port].ts_rx_unsync |= (1u<<ts);
+ }
+
+ writechunk_ptr = xivo_tdm_ports[port].writechunk_ptrs[ts];
+ if (writechunk_ptr) {
+ qid = xivo_tdm_qid(port, ts);
+ if (unlikely(qid >= IX_QMGR_MAX_NUM_QUEUES))
+ panic("WHERE IS MY QUEUE GRRRRRR!\n");
+
+ /* can't fail: */
+ (void)ixQMgrShadowDeltaGet(qid,
+ IX_QMGR_Q_SHADOW_TAIL_ONLY,
+ &num_entries);
+ /* can't fail because we just read num_entries, and the
+ * delta can't decrease behind our back: */
+ (void)ixQMgrShadowAdvance(qid,
+ IX_QMGR_Q_SHADOW_TAIL_ONLY,
+ num_entries);
+
+ if (unlikely(num_entries > local_txq_level(port, ts))) {
+ printk(KERN_CRIT
+ "port %d ts %d: mismatch "
+ "local/PIU TXQ!!!\n",
+ port, ts);
+ xivo_tdm_ports[port].tx_data_cpt[ts].tail =
+ xivo_tdm_ports[port]
+ .tx_data_cpt[ts].head;
+ } else
+ xivo_tdm_ports[port].tx_data_cpt[ts].tail +=
+ num_entries;
+
+ fill = (int)local_txq_level(port, ts);
+ if (fill == 0) /* underflow */
+ xivo_tdm_ports[port].ts_tx_unsync |= (1u << ts);
+
+ if (fill <= 3 - tick) { // XXX X-Ref 3
+ if (xivo_tdm_ports[port].ts_tx_unsync
+ & (1u << ts))
+ xivo_tdm_ports[port]
+ .tx_data_cpt[ts].head += 3 - tick;
+ *writechunk_ptr = local_txq_data_head(port, ts);
+ xivo_tdm_ports[port].tx_data_cpt[ts].head++;
+ ts_tx_enqueue |= (1u << ts);
+ }
+ }
+ }
+
+ txrx = xivo_tdm_ports[port].txrx;
+ if (txrx) {
+ smp_rmb();
+ txrx(xivo_tdm_ports[port].txrx_data);
+ }
+
+ for (ts = 0; ts < XIVO_TDM_TS_NUM; ts++) {
+ IxQMgrQueue *txq_info;
+ unsigned chid;
+ unsigned qid;
+ int suppl_sync;
+ u8 ** const writechunk_ptr =
+ xivo_tdm_ports[port].writechunk_ptrs[ts];
+
+ if (!writechunk_ptr
+ || unlikely(!(ts_tx_enqueue & (1u << ts))))
+ continue;
+
+ chid = xivo_tdm_ports[port].hss_chan_ids[ts];
+ qid = xivo_tdm_qid(port, ts);
+ if (unlikely(qid >= IX_QMGR_MAX_NUM_QUEUES))
+ panic("WHERE IS MY QUEUE GRRRRRR!\n");
+ txq_info = &ixQMgrQueues[qid];
+
+ suppl_sync = 0;
+ if (unlikely((xivo_tdm_ports[port].ts_tx_unsync) & (1u << ts))){
+ suppl_sync = 3 - tick; // XXX X-Ref 3
+ xivo_tdm_ports[port].ts_tx_unsync &= ~(1u << ts);
+ }
+
+ do {
+ volatile struct hss_queue_entry * const qe =
+ (volatile struct hss_queue_entry *)
+ &txq_info->queue.content[
+ (*txq_info->queue.byteHead
+ & txq_info->queue.mask) << 2];
+ qe->lsc = len_status_cid(XIVO_TDM_VOICE_CHUNK_SZ,
+ 0, chid);
+ qe->data_ptr = virt_to_phys(*writechunk_ptr);
+ qe->packet_len = 0;
+ qe->entry_ptr = 0;
+ (*txq_info->queue.byteHead)++; // volatile real counter
+ } while (unlikely(suppl_sync-- > 0));
+ }
+
+ // XXX hack:
+ return (int)local_rxq_level(port, port) - 1 > 0;
+}
+
+/* hardirq */
+void xivo_tdm_trigger_handler(void)
+{
+ int port, ts, tick;
+ int again;
+ IxQMgrQueue * const vrxq_info =
+ &ixQMgrQueues[local_rx_ixQ[LOCAL_VOICE_RX_Q]];
+#ifdef DBG_TRACE_ISR
+ ulong tsc;
+ ulong delta_tsc;
+ rdtscl(tsc);
+ delta_tsc = tsc - tstamp;
+ tstamp = tsc;
+#endif /* DBG_TRACE_ISR */
+
+ xivo_tdm_load_counters();
+
+#define RXQ_EMPTY \
+ (!((cpt[LOCAL_VOICE_RX_Q].wordHead \
+ - cpt[LOCAL_VOICE_RX_Q].wordTail) & WORD_CPT_Q_MASK))
+
+ global_count++;
+
+ if (unlikely(RXQ_EMPTY)) {
+ DBG_TRACE("-- empty -- %lu\n", delta_tsc);
+ rxq_empty++;
+ return;
+ } else {
+ DBG_TRACE("-- rx -- %lu\n", delta_tsc);
+ }
+
+ do {
+ const u32 rx_entry = vrxq_info->queue.content[ // volatile
+ (cpt[LOCAL_VOICE_RX_Q].wordTail++)
+ & (RX_BUF_NUM - 1)];
+ struct hss_piu_entry * const piu_entry =
+ phys_to_virt((unsigned long)rx_entry);
+ struct xivo_tdm_rx_buf * const rx_buf =
+ container_of(piu_entry,
+ struct xivo_tdm_rx_buf,
+ piu_entry);
+ const unsigned chid = HSS_PIU_ENTRY_CHANNEL_ID(piu_entry);
+ rxq_entries++;
+ if (likely(chid < ICP_HSSACC_MAX_NUM_CHANNELS
+ && port_ts[chid].port < MIN_HSS_PORTS_WANTED)) {
+
+ unsigned full;
+ u32 *dest;
+ u32 *src;
+
+ port = port_ts[chid].port;
+
+ xivo_tdm_ports[port].port_rx_ent++;
+
+ ts = port_ts[chid].ts;
+ full = local_rxq_full(port, ts);
+ dest = (u32 *)local_rxq_data_head(port, ts);
+ src = (u32 *)rx_buf->data;
+#if XIVO_TDM_VOICE_CHUNK_SZ == 8
+ dest[0] = src[0];
+ dest[1] = src[1];
+#else
+# error XIVO_TDM_VOICE_CHUNK_SZ != 8: update code
+#endif
+ xivo_tdm_ports[port].rx_data_cpt[ts].head++;
+ xivo_tdm_ports[port].rx_data_cpt[ts].tail += full;
+ xivo_tdm_ports[port].err += (full & ~(port ^ ts)); // XXX hack
+ } else {
+ invalid_channels++;
+ }
+
+ if (0) {
+ static int lol = 5000;
+ if (lol-- > 0)
+ printk(KERN_ERR "all glory to the "
+ "hypnotoad!\n");
+ }
+
+ xivo_tdm_rxfq_replenish(piu_entry);
+ } while (!RXQ_EMPTY);
+ xivo_tdm_save_our_counters();
+#undef RXQ_EMPTY
+
+ if (!(xivo_tdm_ports[0].ts_started & 1)) // XXX big hack
+ {
+ ref_ts_not_started++;
+ return;
+ }
- queue_work(xivo_tdm_wq, &xivo_tdm_audio_work);
+ tick = (int)local_rxq_level(0, 0);
+ if (tick < 0) tick = 0;
+ if (tick > 4) tick = 4;
+ levels[tick]++;
+
+ do {
+ again = 0;
+ tick = (int)local_rxq_level(0, 0) - 1; // XXX ultimate hack
+ if (tick <= 0)
+ return;
+ for (port = 0; port < MIN_HSS_PORTS_WANTED; port++)
+ again = xivo_tdm_port_txrx(port, tick);
+ } while (1);
}
-EXPORT_SYMBOL(xivo_tdm_tick);
/* INIT / CLEANUP */
static void xivo_internal_cleanup(void)
{
- if (xivo_tdm_wq) {
- /* XXX we should prevent queueing here */
- destroy_workqueue(xivo_tdm_wq);
- xivo_tdm_wq = NULL;
+ int port;
+
+ for (port = 0; port < (int)ARRAY_SIZE(xivo_tdm_ports); port++) {
+ if (xivo_tdm_ports[port].data_zone) {
+ free_pages((unsigned long)
+ xivo_tdm_ports[port].data_zone,
+ xivo_tdm_ports[port].data_zone_order);
+ xivo_tdm_ports[port].data_zone = NULL;
+ xivo_tdm_ports[port].data_zone_order = 0;
+ }
+ }
+
+ if (xivo_tdm_rx_buf_table) {
+ free_pages((unsigned long)xivo_tdm_rx_buf_table,
+ xivo_tdm_rx_buf_table_order);
+ xivo_tdm_rx_buf_table = NULL;
+ xivo_tdm_rx_buf_table_order = 0;
}
}
@@ -660,30 +1019,45 @@ static int __init xivo_internal_init(void)
{
int i;
- xivo_tdm_wq = create_singlethread_workqueue("xivo_tdm");
- if (!xivo_tdm_wq) {
- printk(KERN_ERR "%s: create_singlethread_worqueue "
- "returned NULL\n", __func__);
- return -ENOMEM;
- }
+ memset(port_ts, XIVO_TDM_PORT_TS_FREE, sizeof port_ts);
+
+ xivo_tdm_rx_buf_table_order =
+ get_order(sizeof (struct xivo_tdm_rx_buf) * RX_BUF_NUM);
+ xivo_tdm_rx_buf_table =
+ (struct xivo_tdm_rx_buf *)
+ __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ xivo_tdm_rx_buf_table_order);
+ if (!xivo_tdm_rx_buf_table)
+ panic("ALL GLORY TO THE HYPNOTOAD!\n");
for (i = 0; i < (int)ARRAY_SIZE(xivo_tdm_ports); i++) {
xivo_tdm_ports[i].port_num = i;
xivo_tdm_ports[i].allocated = false;
- xivo_tdm_ports[i].ts_allocated = 0;
+ xivo_tdm_ports[i].ts_rx_unsync = 0xffffffff;
+ xivo_tdm_ports[i].ts_tx_unsync = 0xffffffff;
xivo_tdm_ports[i].ts_started = 0;
+ xivo_tdm_ports[i].ts_allocated = 0;
memset(xivo_tdm_ports[i].hss_chan_ids, 0,
sizeof xivo_tdm_ports[i].hss_chan_ids);
spin_lock_init(&xivo_tdm_ports[i].lock);
- memset(xivo_tdm_ports[i].tx_bufs, 0xD5,
- sizeof xivo_tdm_ports[i].tx_bufs);
- memset(xivo_tdm_ports[i].rx_bufs, 0xD5,
- sizeof xivo_tdm_ports[i].rx_bufs);
+ xivo_tdm_ports[i].data_zone = NULL;
+ xivo_tdm_ports[i].data_zone_order = 0;
+
+ memset(xivo_tdm_ports[i].readchunk_ptrs, 0,
+ sizeof xivo_tdm_ports[i].readchunk_ptrs);
+ memset(xivo_tdm_ports[i].writechunk_ptrs, 0,
+ sizeof xivo_tdm_ports[i].writechunk_ptrs);
- xivo_tdm_ports[i].tx_errs = 0;
+ memset(xivo_tdm_ports[i].tx_data_cpt, 0,
+ sizeof xivo_tdm_ports[i].tx_data_cpt);
+ memset(xivo_tdm_ports[i].rx_data_cpt, 0,
+ sizeof xivo_tdm_ports[i].rx_data_cpt);
+
+ xivo_tdm_ports[i].txrx = NULL;
+ xivo_tdm_ports[i].txrx_data = NULL;
}
return 0;
@@ -694,29 +1068,31 @@ static int __init xivo_icp_Hss_init(void)
icp_status_t status;
int hss_port_number;
int hss_chan_number;
- int i;
+ int i, err = 0;
+
+#define RETURN_ERR(x, label) do { err = (x); goto label; } while (0)
hss_port_number = icp_HssAccNumSupportedPortsGet();
if (hss_port_number < MIN_HSS_PORTS_WANTED) {
printk(KERN_ERR "%s: wants %d HSS (TDM) ports but the system "
"has only %d\n",
__func__, MIN_HSS_PORTS_WANTED, hss_port_number);
- return -ENXIO;
+ RETURN_ERR(-ENXIO, ret_err);
}
hss_chan_number = icp_HssAccNumSupportedChannelsGet();
if (hss_chan_number < MIN_HSS_CHANS_WANTED) {
printk(KERN_ERR "%s: wants %d HSS (TDM) chans but the system "
"can only use %d\n",
__func__, MIN_HSS_CHANS_WANTED, hss_chan_number);
- return -ENXIO;
+ RETURN_ERR(-ENXIO, ret_err);
}
- status = icp_HssAccVoiceInit(TDM_SAMPLE_SIZE_FOR_DAHDI,
+ status = icp_HssAccVoiceInit(XIVO_TDM_VOICE_CHUNK_SZ,
/* intGenerationEnable */ ICP_TRUE);
if (status != ICP_STATUS_SUCCESS) {
printk(KERN_ERR "%s: icp_HssAccVoiceInit() returned error %d\n",
__func__, (int) status);
- return -EIO;
+ RETURN_ERR(-EIO, ret_err);
}
status = icp_HssAccErrorCallbackRegister(ICP_HSSACC_CHAN_TYPE_VOICE,
@@ -725,36 +1101,215 @@ static int __init xivo_icp_Hss_init(void)
printk(KERN_ERR "%s: icp_HssAccErrorCallbackRegister() "
"returned error %d\n",
__func__, (int) status);
- return -EIO;
+ RETURN_ERR(-EIO, ret_err);
}
+ for (i = 0; i < LOCAL_NUM_RX; i++) {
+ local_rx_ixQ[i] = HssAccQueueIdGet(hssacc_from_local_rx(i));
+ printk(KERN_ERR "local_rx=%d ixQ=%u\n", i, (unsigned)local_rx_ixQ[i]);
+ }
+
+ xivo_tdm_load_counters();
/* GIVE ME A spare_wheel_osal_buf AND A PONEY (and LSD) */
for (i = 0; i < RX_BUF_NUM; i++) {
- IX_OSAL_MBUF* osal_buf =
- alloc_init_osal_buf(TDM_SAMPLE_SIZE_FOR_DAHDI);
- if (!osal_buf) {
- printk(KERN_ERR "BUGBUG WARNING MEMORY LEAK YOU'LL DIE "
- "(triggered by memory alloc failure in "
- "alloc_init_osal_buf)\n");
- return -ENOMEM;
- }
+ struct xivo_tdm_rx_buf *rx_buf = &xivo_tdm_rx_buf_table[i];
+ memset(rx_buf->data, 0xD5, sizeof rx_buf->data);
+ rx_buf->piu_entry.q_entry.lsc = len_status_cid(
+ XIVO_TDM_VOICE_CHUNK_SZ,
+ 0, 0);
+ rx_buf->piu_entry.q_entry.data_ptr = (u32)rx_buf->data;
+ rx_buf->piu_entry.q_entry.packet_len = 0;
+ rx_buf->piu_entry.q_entry.entry_ptr = 0;
+ xivo_tdm_rxfq_replenish(&rx_buf->piu_entry);
+ }
+ xivo_tdm_save_our_counters();
- status = icp_HssAccRxFreeReplenish(ICP_HSSACC_CHAN_TYPE_VOICE,
- osal_buf);
- if (status != ICP_STATUS_SUCCESS) {
- printk(KERN_WARNING "the hss soft is not hungry "
- "anymore, giving up feeding\n");
- break;
- }
+ tdm_register_trigger_hardirq_handler(xivo_tdm_trigger_handler);
+
+ return 0;
+
+ ret_err:
+ // there used to be some stuff here
+ return err;
+}
+
+#ifdef DEBUGFS_MY_STUFF
+static struct dentry *dbg_dir;
+#define DBG_FIELDS \
+ F(u32, ts_rx_unsync) \
+ F(u32, ts_tx_unsync) \
+ F(u32, ts_started) \
+ F(u32, ts_allocated) \
+ F(u32, port_rx_ent)
+
+#define DBG_GLOBAL_FIELDS \
+ F(u32, rxq_empty) \
+ F(u32, global_count) \
+ F(u32, invalid_channels) \
+ F(u32, rxq_entries) \
+ F(u32, ref_ts_not_started)
+
+#define DBG_GLOBAL_ENT \
+ G(u32, levels, 0) \
+ G(u32, levels, 1) \
+ G(u32, levels, 2) \
+ G(u32, levels, 3) \
+ G(u32, levels, 4)
+
+#define F(t, n) static struct dentry *dbg_1_ ## n;
+DBG_FIELDS
+#undef F
+#define F(t, n) static struct dentry *dbg_2_ ## n;
+DBG_FIELDS
+#undef F
+#define F(t, n) static struct dentry *dbg_g_ ## n;
+DBG_GLOBAL_FIELDS
+#undef F
+#define G(t, n, i) static struct dentry *dbg_ge_ ## n ## _ ## i;
+DBG_GLOBAL_ENT
+#undef G
+
+static struct dentry *dbg_all;
+
+#define PORT_LIST \
+ E(u32, ts_rx_unsync) \
+ E(u32, ts_tx_unsync) \
+ E(u32, ts_started) \
+ E(u32, ts_allocated) \
+ E(unsigned, err) \
+ E(u32, port_rx_ent)
+
+struct trucs {
+ #define E(t,n) t n;
+ PORT_LIST
+ #undef E
+};
+
+#define GLOB_LIST \
+ E(u32, rxq_empty) \
+ E(u32, global_count) \
+ E(u32, invalid_channels) \
+ E(u32, rxq_entries) \
+ E(u32, ref_ts_not_started)
+
+struct trucs_globaux {
+ #define E(t,n) t n;
+ GLOB_LIST
+ #undef E
+};
+
+static int dbg_all_show(struct seq_file *f, void *_d)
+{
+ static DEFINE_SPINLOCK(atom_lock); /* gruik hack */
+
+ /* some non-trivial allocations on stack */
+ struct trucs deux[2];
+ struct trucs_globaux blop;
+ u32 levels_copy[5];
+
+ int i;
+
+ unsigned long flags;
+ spin_lock_irqsave(&atom_lock, flags); /* gruik hack */
+ for (i = 0; i < 2; i++) {
+ #define E(t,n) deux[i].n = xivo_tdm_ports[i].n;
+ PORT_LIST
+ #undef E
}
+ memcpy(levels_copy, levels, sizeof levels_copy);
+
+ #define E(t,n) blop.n = n;
+ GLOB_LIST
+ #undef E
+
+ spin_unlock_irqrestore(&atom_lock, flags); /* gruik hack */
+
+
+ /* DISPLAY */
+
+ #define E(t,n) seq_printf(f, "%s: %u\n", #n, blop.n);
+ GLOB_LIST
+ #undef E
+ seq_printf(f, "\n");
+
+ for (i = 0; i < (int)ARRAY_SIZE(levels_copy); i++)
+ seq_printf(f, "levels[%d]: %u\n", i, levels_copy[i]);
+ seq_printf(f, "\n");
+
+ for (i = 0; i < 2; i++) {
+ seq_printf(f, "PORT %d:\n", i);
+ #define E(t,n) seq_printf(f, " %s: %u\n", #n, deux[i].n);
+ PORT_LIST
+ #undef E
+ seq_printf(f, "\n");
+ }
+
+ seq_printf(f, "LOOOOOOOOOOL\n");
+
return 0;
}
+static int dbg_all_open(struct inode *i, struct file *f)
+{
+ return single_open(f, dbg_all_show, NULL);
+}
+
+static const struct file_operations dbg_all_fops = {
+ .owner = THIS_MODULE,
+ .open = dbg_all_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#endif /* DEBUGFS_MY_STUFF */
+
static int __init xivo_tdm_init(void)
{
int rc;
+#ifdef DEBUGFS_MY_STUFF
+ dbg_dir = debugfs_create_dir("xivo_tdm", NULL);
+ if (dbg_dir) {
+ #define F(t, n) dbg_1_ ## n = debugfs_create_ ## t ( \
+ "1_" #n, \
+ 0444, \
+ dbg_dir, \
+ (t *) &xivo_tdm_ports[0].n);
+ DBG_FIELDS
+ #undef F
+
+ #define F(t, n) dbg_2_ ## n = debugfs_create_ ## t ( \
+ "2_" #n, \
+ 0444, \
+ dbg_dir, \
+ (t *) &xivo_tdm_ports[1].n);
+ DBG_FIELDS
+ #undef F
+
+ #define F(t, n) dbg_g_ ## n = debugfs_create_ ## t ( \
+ #n, \
+ 0444, \
+ dbg_dir, \
+ &n);
+ DBG_GLOBAL_FIELDS
+ #undef F
+
+ #define G(t, n, i) dbg_ge_ ## n ## _ ## i = \
+ debugfs_create_ ## t ( \
+ #n "_" #i, \
+ 0444, \
+ dbg_dir, \
+ &n[i]);
+ DBG_GLOBAL_ENT
+ #undef G
+
+ dbg_all = debugfs_create_file("zoby", 0444, dbg_dir,
+ NULL, &dbg_all_fops);
+ }
+#endif /* DEBUGFS_MY_STUFF */
+
if ((rc = xivo_internal_init()) < 0)
return rc;
@@ -769,10 +1324,36 @@ static int __init xivo_tdm_init(void)
static void __exit xivo_tdm_exit(void)
{
- printk(KERN_ERR "BUGBUG WARNING MEMORY LEAK YOU'LL DIE "
- "(triggered by module unload)\n");
- /* XXX */
+ printk(KERN_ERR "BUGBUG WARNING MEMORY LEAK YOU'LL DIE and probably "
+ "crash too (triggered by module unload)\n");
+
+ tdm_register_trigger_hardirq_handler(NULL);
+ msleep(10); // BUGBUG racy hack
xivo_internal_cleanup();
+
+#ifdef DEBUGFS_MY_STUFF
+ if (dbg_dir) {
+ #define F(t, n) debugfs_remove(dbg_1_ ## n);
+ DBG_FIELDS
+ #undef F
+
+ #define F(t, n) debugfs_remove(dbg_2_ ## n);
+ DBG_FIELDS
+ #undef F
+
+ #define F(t, n) debugfs_remove(dbg_g_ ## n);
+ DBG_GLOBAL_FIELDS
+ #undef F
+
+ #define G(t, n, i) debugfs_remove(dbg_ge_ ## n ## _ ## i);
+ DBG_GLOBAL_ENT
+ #undef G
+
+ debugfs_remove(dbg_all);
+
+ debugfs_remove(dbg_dir);
+ }
+#endif /* DEBUGFS_MY_STUFF */
}
module_init(xivo_tdm_init);