summaryrefslogtreecommitdiff
path: root/tdm
diff options
context:
space:
mode:
Diffstat (limited to 'tdm')
-rw-r--r--tdm/Makefile29
-rw-r--r--tdm/xivo_tdm.c659
-rw-r--r--tdm/xivo_tdm_api.h45
3 files changed, 733 insertions, 0 deletions
diff --git a/tdm/Makefile b/tdm/Makefile
new file mode 100644
index 0000000..7393f89
--- /dev/null
+++ b/tdm/Makefile
@@ -0,0 +1,29 @@
+PWD := $(shell pwd)
+
+KSRC ?= /bad__ksrc__not_set
+ICP_ROOT ?= /bad__icp_root___not_set
+
+include $(ICP_ROOT)/build_system/build_files/includes.mk
+
+# XXX This is an ugly evil hack, but this is probably the only way to build
+# against Intel code without killing myself because of depression induced by
+# their overcomplicated makefile mess (which BTW is buggy anyway, and has most
+# probably been written under influence of much illegal drugs -- i don't have
+# that kind of drugs available)
+CFLAGS_MODULE += -O2 -DENABLE_IOMEM -DENABLE_BUFFERMGT
+
+CFLAGS_MODULE += $(INCLUDES)
+
+CFLAGS_MODULE += -DEP805XX -D__ep805xx -DTOLAPAI -D__tolapai \
+ -DIX_HW_COHERENT_MEMORY=1
+
+
+obj-m := xivo_tdm.o
+
+modules:
+
+modules modules_install clean:
+ $(MAKE) -C $(KSRC) M=$(PWD) $@
+
+distclean: clean
+ rm -f Module.symvers
diff --git a/tdm/xivo_tdm.c b/tdm/xivo_tdm.c
new file mode 100644
index 0000000..00cfae9
--- /dev/null
+++ b/tdm/xivo_tdm.c
@@ -0,0 +1,659 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include <IxOsal.h>
+#include <icp.h>
+#include <icp_hssdrv_common.h>
+
+#include "xivo_tdm_api.h"
+
+
+MODULE_LICENSE("GPL");
+
+
+#define MIN_HSS_PORTS_WANTED (2)
+#define MIN_HSS_CHANS_WANTED (10)
+
+#define TDM_SAMPLE_SIZE_FOR_DAHDI (8)
+
+#define RX_BUF_NUM (2048)
+
+/* for Linux < 2.6.19; WARNING: DO NOT USE IN INTERFACES IN THIS CASE!
+ * (no ifdef so that this warning can be respected when porting
+ * to modern kernel) thanks to a clash between this definition of 'bool'
+ * and those of Linux
+ */
+typedef unsigned char bool;
+enum { false = 0, true = 1 };
+
+/* fixed for now: */
+#define TS_NUM 32
+
+struct xivo_tdm_port {
+ int port_num;
+ bool allocated;
+
+ u32 ts_allocated;
+ u32 ts_started;
+ unsigned int hss_chan_ids[TS_NUM];
+
+ spinlock_t lock;
+ struct work_struct work;
+
+ /* WARNING: this file is a big hack, coherency of following chunks is
+ * not guaranteed. Blame Intel for their shitty nearly unusable code.
+ */
+ u8 tx_bufs[TS_NUM][8];
+ u8 rx_bufs[TS_NUM][8];
+};
+
+static struct xivo_tdm_port xivo_tdm_ports[MIN_HSS_PORTS_WANTED];
+
+static DECLARE_MUTEX(xivo_tdm_mutex);
+static struct workqueue_struct *xivo_tdm_wq;
+
+/* INTERNAL FUNCS */
+
+/* !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! USER CONTEXT ONLY !!!!!!!!!!!!!!!! */
+static IX_OSAL_MBUF *alloc_init_osal_buf(
+ unsigned int data_buf_length)
+{
+ IX_OSAL_MBUF *osal_buf;
+ char *data_buf;
+
+ osal_buf = kmalloc(sizeof(IX_OSAL_MBUF), GFP_KERNEL);
+ if (!osal_buf)
+ return NULL;
+
+ data_buf = kmalloc(data_buf_length, GFP_KERNEL);
+ if (!data_buf) {
+ kfree(osal_buf);
+ return NULL;
+ }
+
+ IX_OSAL_MBUF_ALLOCATED_BUFF_LEN(osal_buf) = data_buf_length;
+
+ /*Fill in the allocated size of the data buf */
+ IX_OSAL_MBUF_ALLOCATED_BUFF_LEN(osal_buf) = data_buf_length;
+ IX_OSAL_MBUF_MLEN(osal_buf) = data_buf_length;
+
+ IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(osal_buf) = NULL;
+ IX_OSAL_MBUF_NEXT_BUFFER_IN_PKT_PTR(osal_buf) = NULL;
+
+ /*Attach the data buf to the OSAL buf */
+ IX_OSAL_MBUF_MDATA(osal_buf) = data_buf;
+
+ return osal_buf;
+}
+
+static void free_osal_buf(
+ IX_OSAL_MBUF *osal_buf)
+{
+ if (osal_buf) {
+ char *data_buf = IX_OSAL_MBUF_MDATA(osal_buf);
+ kfree(data_buf);
+
+ kfree(osal_buf);
+ }
+}
+
+static void dealloc_one_chan(
+ struct xivo_tdm_port* xtp,
+ const int cnum)
+{
+ icp_status_t status = icp_HssAccChannelDelete(xtp->hss_chan_ids[cnum]);
+ if (status != ICP_STATUS_SUCCESS)
+ printk(KERN_ERR "%s: icp_HssAccChannelDelete returned "
+ "error %d\n", __func__, (int) status);
+ xtp->ts_allocated &= ~(1u << cnum);
+ xtp->hss_chan_ids[cnum] = 0;
+}
+
+static int add_one_chan(
+ struct xivo_tdm_port* xtp,
+ const int cnum)
+{
+ icp_status_t status;
+ icp_hssacc_timeslot_map_t ts_map = {0};
+
+ if (xtp->ts_allocated & (1u << cnum)) {
+ printk(KERN_ERR "%s: EVIL BUG, DIE!\n", __func__);
+ return -42;
+ }
+
+ ts_map.line0_timeslot_bit_map = (1u << cnum);
+
+ /*Allocate the channel */
+ status = icp_HssAccChannelAllocate(
+ &xtp->hss_chan_ids[cnum],
+ xtp->port_num,
+ ts_map,
+ ICP_HSSACC_CHAN_TYPE_VOICE);
+ if (status != ICP_STATUS_SUCCESS) {
+ printk(KERN_ERR "%s: icp_HssAccChannelAllocate returned "
+ "error %d\n", __func__, (int) status);
+ return -EIO;
+ }
+
+ status = icp_HssAccChannelConfigure(
+ xtp->hss_chan_ids[cnum],
+ /* channelDataInvert */ 0,
+ /* channelBitEndianness */ 1, /* MSb first */
+ /* channelByteSwap */ 0,
+ ICP_FALSE, ICP_FALSE, ICP_FALSE);
+ if (status != ICP_STATUS_SUCCESS) {
+ printk(KERN_ERR "%s: icp_HssAccChannelConfigure returned "
+ "error %d\n", __func__, (int) status);
+ dealloc_one_chan(xtp, cnum);
+ return -EIO;
+ }
+
+ status = icp_HssAccChannelVoiceServiceConfigure(
+ xtp->hss_chan_ids[cnum],
+ ICP_HSSACC_VOICE_TX_IDLE_REPEAT_LAST_FRAME,
+ 0xD5,
+ TDM_SAMPLE_SIZE_FOR_DAHDI);
+ if (status != ICP_STATUS_SUCCESS) {
+ printk(KERN_ERR "%s: icp_HssAccChannelVoiceServiceConfigure "
+ "returned error %d\n", __func__, (int) status);
+ dealloc_one_chan(xtp, cnum);
+ return -EIO;
+ }
+
+ xtp->ts_allocated |= (1u << cnum);
+
+ return 0;
+}
+
+static int start_one_chan(
+ struct xivo_tdm_port* xtp,
+ const int cnum)
+{
+ const unsigned int hss_chan_id = xtp->hss_chan_ids[cnum];
+ unsigned long flags;
+
+ icp_status_t status = icp_HssAccChannelUp(hss_chan_id);
+ if (status != ICP_STATUS_SUCCESS) {
+ printk(KERN_ERR "%s: icp_HssAccChannelUp returned error %d \n",
+ __func__, status);
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&xtp->lock, flags);
+ /* WARNING: only protect startup, not shutdown! */
+ xtp->ts_started |= (1u << cnum);
+ spin_unlock_irqrestore(&xtp->lock, flags);
+
+ return 0;
+}
+
+static void xivo_hss_port_error_cb(
+ icp_user_context_t user_context,
+ icp_hssacc_port_error_t error_type)
+{
+ struct xivo_tdm_port* xtp = (struct xivo_tdm_port*) user_context;
+ unsigned port_num = xtp->port_num;
+
+ switch (error_type) {
+ case ICP_HSSACC_PORT_ERROR_TX_LOS:
+ printk(KERN_ERR "%s - Port error for port %u :"
+ " Transmit loss of sync\n",
+ __func__, port_num);
+ break;
+ case ICP_HSSACC_PORT_ERROR_RX_LOS:
+ printk(KERN_ERR "%s - Port error for port %u :"
+ " Receive loss of sync\n",
+ __func__, port_num);
+ break;
+ case ICP_HSSACC_PORT_ERROR_TX_UNDERFLOW:
+ printk(KERN_ERR "%s - Port error for port %u :"
+ " Transmit underflow\n",
+ __func__, port_num);
+ break;
+ case ICP_HSSACC_PORT_ERROR_RX_OVERFLOW:
+ printk(KERN_ERR "%s - Port error for port %u :"
+ " Receive overflow\n",
+ __func__, port_num);
+ break;
+ case ICP_HSSACC_PORT_ERROR_TX_PARITY:
+ printk(KERN_ERR "%s - Port error for port %u :"
+ " Tx Parity Error\n",
+ __func__, port_num);
+ break;
+ case ICP_HSSACC_PORT_ERROR_RX_PARITY:
+ printk(KERN_ERR "%s - Port error for port %u :"
+ " Rx Parity Error\n",
+ __func__, port_num);
+ break;
+ default:
+ printk(KERN_ERR "%s - Port error for port %u :"
+ " Unidentified error %u\n",
+ __func__, port_num, (unsigned) error_type);
+ break;
+ }
+}
+
+static void xivo_hss_error_cb(
+ icp_user_context_t user_context,
+ icp_hssacc_error_t error_type)
+{
+ (void) user_context;
+
+ switch (error_type) {
+ case ICP_HSSACC_ERROR_RX_OVERFLOW:
+ printk(KERN_ERR "%s - Error: Receive Queue Overflow "
+ "reported\n",
+ __func__);
+ break;
+ case ICP_HSSACC_ERROR_RX_FREE_UNDERFLOW:
+ printk(KERN_ERR "%s - Error: Receive Free Queue Underflow "
+ " reported\n",
+ __func__);
+ break;
+ case ICP_HSSACC_ERROR_MESSAGE_FIFO_OVERFLOW:
+ printk(KERN_ERR "%s - Error: Messaging FIFO Overflow "
+ "reported\n",
+ __func__);
+ break;
+ default:
+ printk(KERN_ERR "%s - Unidentified Error %u\n",
+ __func__, (unsigned) error_type);
+ break;
+ }
+}
+
+static int xivo_tdm_deferred_receive(
+ struct xivo_tdm_port* xtp,
+ const unsigned int cnum)
+{
+ IX_OSAL_MBUF *osal_buf = NULL;
+ icp_status_t status;
+ int nb_read = 0;
+ const unsigned int hss_chan_id = xtp->hss_chan_ids[cnum];
+ u8 *rx_buf = xtp->rx_bufs[cnum];
+
+ while (1) {
+ char *data_buf;
+ status = icp_HssAccReceive(hss_chan_id, &osal_buf);
+ if (status != ICP_STATUS_SUCCESS || osal_buf == NULL)
+ return nb_read;
+ nb_read++;
+ data_buf = IX_OSAL_MBUF_MDATA(osal_buf);
+ memcpy(rx_buf, data_buf, 8 /* XXX is this right? */ );
+ IX_OSAL_MBUF_MLEN(osal_buf) = IX_OSAL_MBUF_ALLOCATED_BUFF_LEN(osal_buf);
+ status = icp_HssAccRxFreeReplenish(ICP_HSSACC_CHAN_TYPE_VOICE, osal_buf);
+ if (status != ICP_STATUS_SUCCESS) {
+ if (printk_ratelimit())
+ printk(KERN_ERR "%s: icp_HssAccRxFreeReplenish "
+ "err %d cnum %u\n",
+ __func__, (int) status, cnum);
+ free_osal_buf(osal_buf);
+ }
+ osal_buf = NULL;
+ }
+}
+
+static void xivo_tdm_deferred_transmit(
+ struct xivo_tdm_port* xtp,
+ const unsigned int cnum)
+{
+ static unsigned tx_errs = 0;
+
+ IX_OSAL_MBUF *osal_buf = NULL;
+ icp_status_t status;
+ const unsigned int hss_chan_id = xtp->hss_chan_ids[cnum];
+ u8 *tx_buf = xtp->tx_bufs[cnum];
+
+ status = icp_HssAccTxDoneRetrieve(hss_chan_id, &osal_buf);
+ if (status != ICP_STATUS_SUCCESS) {
+ if (status == ICP_STATUS_UNDERFLOW) {
+ osal_buf = alloc_init_osal_buf(TDM_SAMPLE_SIZE_FOR_DAHDI);
+ if (!osal_buf && printk_ratelimit())
+ printk(KERN_ERR
+ "%s: osal_buf alloc failed - cnum %u\n",
+ __func__, cnum);
+ } else {
+ osal_buf = NULL;
+ if (printk_ratelimit())
+ printk(KERN_ERR "%s: icp_HssAccTxDoneRetrieve "
+ "err %d cnum %u\n",
+ __func__, (int) status, cnum);
+ }
+ }
+
+ if (!osal_buf)
+ return;
+
+ memcpy(IX_OSAL_MBUF_MDATA(osal_buf), tx_buf, 8);
+ IX_OSAL_MBUF_MLEN(osal_buf) = 8;
+ IX_OSAL_MBUF_PKT_LEN(osal_buf) = 8;
+
+ status = icp_HssAccTransmit(hss_chan_id, osal_buf);
+ if (status != ICP_STATUS_SUCCESS) {
+ tx_errs++;
+ if (printk_ratelimit())
+ printk(KERN_ERR
+ "%s: icp_HssAccTransmit err %d cnum %u "
+ "tx_errs %u\n",
+ __func__, (int) status, cnum, tx_errs);
+ free_osal_buf(osal_buf);
+ }
+}
+
+static void xivo_tdm_work(void *arg)
+{
+ struct xivo_tdm_port *xtp = arg;
+
+ unsigned long flags;
+ u32 ts_started;
+ u32 scan;
+ int cnum;
+
+ spin_lock_irqsave(&xtp->lock, flags);
+ ts_started = xtp->ts_started;
+ spin_unlock_irqrestore(&xtp->lock, flags);
+
+ for (cnum = 0, scan = 1; scan; scan <<= 1, cnum++) {
+ if (ts_started & scan) {
+ /* XXX use ret value: */
+ (void) xivo_tdm_deferred_receive(xtp, cnum);
+ xivo_tdm_deferred_transmit(xtp, cnum);
+ }
+ }
+}
+
+
+/* EXPORTED FUNCS */
+
+struct xivo_tdm_port *xivo_tdm_get_port(int tdm_port_num)
+{
+ struct xivo_tdm_port *xtp;
+
+ if (tdm_port_num < 0 || tdm_port_num >= MIN_HSS_PORTS_WANTED) {
+ printk(KERN_ERR "%s: attempt to allocate an out of range TDM "
+ "port %d\n", __func__, tdm_port_num);
+ return NULL;
+ }
+
+ down(&xivo_tdm_mutex);
+ if (xivo_tdm_ports[tdm_port_num].allocated) {
+ xtp = NULL;
+ printk(KERN_ERR "%s: attempt to allocate TDM port %d, which "
+ "is already allocated\n",
+ __func__, tdm_port_num);
+ } else {
+ xivo_tdm_ports[tdm_port_num].allocated = true;
+ xtp = &xivo_tdm_ports[tdm_port_num];
+ }
+ up(&xivo_tdm_mutex);
+
+ return xtp;
+}
+EXPORT_SYMBOL(xivo_tdm_get_port);
+
+void xivo_tdm_put_port(struct xivo_tdm_port *xtp)
+{
+ unsigned long xtp_addr = (unsigned long)xtp;
+
+ unsigned idx = (xtp_addr - (unsigned long)xivo_tdm_ports) / sizeof(*xtp);
+
+ if (xtp_addr == 0
+ || xtp_addr < (unsigned long)xivo_tdm_ports
+ || xtp_addr > (unsigned long)(xivo_tdm_ports + MIN_HSS_PORTS_WANTED - 1)
+ || xtp_addr != (unsigned long)(xivo_tdm_ports + idx)) {
+ printk(KERN_ERR "%s: attempt to free an invalid struct "
+ "xivo_tdm_port pointer: %p\n",
+ __func__, (void *) xtp);
+ return;
+ }
+
+ down(&xivo_tdm_mutex);
+ if (!xtp->allocated) {
+ printk(KERN_ERR "%s: attempt to free TDM port %u, which is not "
+ "allocated\n",
+ __func__, idx);
+ } else {
+ xtp->allocated = false;
+ }
+ up(&xivo_tdm_mutex);
+}
+EXPORT_SYMBOL(xivo_tdm_put_port);
+
+int xivo_tdm_config_port(
+ struct xivo_tdm_port* xtp,
+ unsigned int port_config)
+{
+ icp_status_t status;
+ icp_hssacc_port_config_params_t hss_port_config;
+
+ if (port_config >= ICP_HSSDRV_PORT_CONFIG_DELIMITER) {
+ printk(KERN_ERR "%s: invalid port config %u\n",
+ __func__, port_config);
+ return -EINVAL;
+ }
+
+ port_config_create(port_config,
+ ICP_HSSDRV_NO_LOOPBACK,
+ &hss_port_config);
+
+ status = icp_HssAccPortConfig(xtp->port_num, &hss_port_config);
+ if (status != ICP_STATUS_SUCCESS) {
+ printk(KERN_ERR "%s: icp_HssAccPortConfig returned error %d\n",
+ __func__, (int) status);
+ return -EIO;
+ }
+
+ status = icp_HssAccPortErrorCallbackRegister( /* XXX to unreg? */
+ xtp->port_num,
+ ICP_HSSACC_CHAN_TYPE_VOICE,
+ (icp_user_context_t) xtp,
+ xivo_hss_port_error_cb);
+ if (status != ICP_STATUS_SUCCESS) {
+ printk(KERN_ERR "%s: icp_HssAccPortErrorCallbackRegister "
+ "returned error %d\n",
+ __func__, (int) status);
+ return -EIO;
+ }
+
+ status = icp_HssAccPortUp(xtp->port_num);
+ if (status != ICP_STATUS_SUCCESS) {
+ printk(KERN_ERR "%s: icp_HssAccPortUp returned error %d\n",
+ __func__, (int) status);
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(xivo_tdm_config_port);
+
+int xivo_tdm_start_chans(
+ struct xivo_tdm_port* xtp,
+ const u32 chans)
+{
+ u32 scan;
+ int cnum;
+
+ for (cnum = 0, scan = 1; scan; scan <<= 1, cnum++) {
+ if (scan & chans) {
+ printk(KERN_INFO "%s: adding chan %d\n",
+ __func__, cnum);
+ /* XXX retval */ (void) add_one_chan(xtp, cnum);
+ }
+ }
+
+ for (cnum = 0, scan = 1; scan; scan <<= 1, cnum++) {
+ if (scan & chans) {
+ printk(KERN_INFO "%s: starting chan %d\n",
+ __func__, cnum);
+ /* XXX retval */ (void) start_one_chan(xtp, cnum);
+ }
+ }
+
+ printk(KERN_INFO "%s: DONE\n", __func__);
+
+ return 0;
+}
+EXPORT_SYMBOL(xivo_tdm_start_chans);
+
+void xivo_tdm_receive(
+ struct xivo_tdm_port* xtp,
+ const unsigned int cnum,
+ u8 samples[8])
+{
+ memcpy(samples, xtp->rx_bufs[cnum], 8);
+}
+EXPORT_SYMBOL(xivo_tdm_receive);
+
+void xivo_tdm_transmit(
+ struct xivo_tdm_port* xtp,
+ const unsigned int cnum,
+ const u8 samples[8])
+{
+ memcpy(xtp->tx_bufs[cnum], samples, 8);
+}
+EXPORT_SYMBOL(xivo_tdm_transmit);
+
+void xivo_tdm_tick(
+ struct xivo_tdm_port* xtp)
+{
+ queue_work(xivo_tdm_wq, &xtp->work);
+}
+EXPORT_SYMBOL(xivo_tdm_tick);
+
+
+/* INIT / CLEANUP */
+
+static void xivo_internal_cleanup(void)
+{
+ if (xivo_tdm_wq) {
+ destroy_workqueue(xivo_tdm_wq);
+ xivo_tdm_wq = NULL;
+ }
+}
+
+static int __init xivo_internal_init(void)
+{
+ int i;
+
+ xivo_tdm_wq = create_singlethread_workqueue("xivo_tdm");
+ if (!xivo_tdm_wq) {
+ printk(KERN_ERR "%s: create_singlethread_worqueue "
+ "returned NULL\n", __func__);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < (int)ARRAY_SIZE(xivo_tdm_ports); i++) {
+ xivo_tdm_ports[i].port_num = i;
+ xivo_tdm_ports[i].allocated = false;
+
+ xivo_tdm_ports[i].ts_allocated = 0;
+ xivo_tdm_ports[i].ts_started = 0;
+ memset(xivo_tdm_ports[i].hss_chan_ids, 0,
+ sizeof xivo_tdm_ports[i].hss_chan_ids);
+
+ spin_lock_init(&xivo_tdm_ports[i].lock);
+ INIT_WORK(&xivo_tdm_ports[i].work,
+ xivo_tdm_work,
+ &xivo_tdm_ports[i]);
+
+ memset(xivo_tdm_ports[i].tx_bufs, 0xD5,
+ sizeof xivo_tdm_ports[i].tx_bufs);
+ memset(xivo_tdm_ports[i].rx_bufs, 0xD5,
+ sizeof xivo_tdm_ports[i].rx_bufs);
+ }
+
+ return 0;
+}
+
+static int __init xivo_icp_Hss_init(void)
+{
+ icp_status_t status;
+ int hss_port_number;
+ int hss_chan_number;
+ int i;
+
+ hss_port_number = icp_HssAccNumSupportedPortsGet();
+ if (hss_port_number < MIN_HSS_PORTS_WANTED) {
+ printk(KERN_ERR "%s: wants %d HSS (TDM) ports but the system "
+ "has only %d\n",
+ __func__, MIN_HSS_PORTS_WANTED, hss_port_number);
+ return -ENXIO;
+ }
+ hss_chan_number = icp_HssAccNumSupportedChannelsGet();
+ if (hss_chan_number < MIN_HSS_CHANS_WANTED) {
+ printk(KERN_ERR "%s: wants %d HSS (TDM) chans but the system "
+ "can only use %d\n",
+ __func__, MIN_HSS_CHANS_WANTED, hss_chan_number);
+ return -ENXIO;
+ }
+
+ status = icp_HssAccVoiceInit(TDM_SAMPLE_SIZE_FOR_DAHDI,
+ /* intGenerationEnable */ ICP_TRUE);
+ if (status != ICP_STATUS_SUCCESS) {
+ printk(KERN_ERR "%s: icp_HssAccVoiceInit() returned error %d\n",
+ __func__, (int) status);
+ return -EIO;
+ }
+
+ status = icp_HssAccErrorCallbackRegister(ICP_HSSACC_CHAN_TYPE_VOICE,
+ NULL, xivo_hss_error_cb);
+ if (status != ICP_STATUS_SUCCESS) {
+ printk(KERN_ERR "%s: icp_HssAccErrorCallbackRegister() "
+ "returned error %d\n",
+ __func__, (int) status);
+ return -EIO;
+ }
+
+ /* GIVE ME A spare_wheel_osal_buf AND A PONEY (and LSD) */
+ for (i = 0; i < RX_BUF_NUM; i++) {
+ IX_OSAL_MBUF* osal_buf =
+ alloc_init_osal_buf(TDM_SAMPLE_SIZE_FOR_DAHDI);
+ if (!osal_buf) {
+ printk(KERN_ERR "BUGBUG WARNING MEMORY LEAK YOU'LL DIE "
+ "(triggered by memory alloc failure in "
+ "alloc_init_osal_buf)\n");
+ return -ENOMEM;
+ }
+
+ status = icp_HssAccRxFreeReplenish(ICP_HSSACC_CHAN_TYPE_VOICE,
+ osal_buf);
+ if (status != ICP_STATUS_SUCCESS) {
+ printk(KERN_WARNING "the hss soft is not hungry "
+ "anymore, giving up feeding\n");
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int __init xivo_tdm_init(void)
+{
+ int rc;
+
+ if ((rc = xivo_internal_init()) < 0)
+ return rc;
+
+ if ((rc = xivo_icp_Hss_init()) < 0) {
+ xivo_internal_cleanup();
+ return rc;
+ }
+
+ printk(KERN_NOTICE "%s DONE\n", __func__);
+ return 0;
+}
+
+static void __exit xivo_tdm_exit(void)
+{
+ printk(KERN_ERR "BUGBUG WARNING MEMORY LEAK YOU'LL DIE "
+ "(triggered by module unload)\n");
+ /* XXX */
+ xivo_internal_cleanup();
+}
+
+module_init(xivo_tdm_init);
+module_exit(xivo_tdm_exit);
diff --git a/tdm/xivo_tdm_api.h b/tdm/xivo_tdm_api.h
new file mode 100644
index 0000000..ce0d448
--- /dev/null
+++ b/tdm/xivo_tdm_api.h
@@ -0,0 +1,45 @@
+#ifndef XIVO_TDM_API_H
+#define XIVO_TDM_API_H
+
+/* XXX matches with ICP_HSSDRV_PORT_XHFC_MEGREZ_PROTO_XIVO_CONFIG */
+#define XHFC_MEGREZ_PROTO_XIVO_CONFIG 4
+
+struct xivo_tdm_port;
+
+/* TDM port allocation
+ * Returns a xivo_tdm_port pointer if available, else NULL. */
+/* hardirq: no -- softirq: no -- user: yes */
+struct xivo_tdm_port *xivo_tdm_get_port(int tdm_port_num);
+
+/* TDM port release */
+/* hardirq: no -- softirq: no -- user: yes */
+void xivo_tdm_put_port(struct xivo_tdm_port *);
+
+/* TDM port config + startup */
+/* hardirq: no -- softirq: no -- user: yes */
+int xivo_tdm_config_port(
+ struct xivo_tdm_port* xtp,
+ unsigned int port_config);
+
+/* hardirq: no -- softirq: no -- user: yes */
+int xivo_tdm_start_chans(
+ struct xivo_tdm_port* xtp,
+ u32 chans);
+
+/* hardirq: yes -- softirq: yes -- user: yes */
+void xivo_tdm_receive(
+ struct xivo_tdm_port* xtp,
+ const unsigned int cnum,
+ u8 samples[8]);
+
+/* hardirq: yes -- softirq: yes -- user: yes */
+void xivo_tdm_transmit(
+ struct xivo_tdm_port* xtp,
+ const unsigned int cnum,
+ const u8 samples[8]);
+
+/* hardirq: yes -- softirq: yes -- user: yes */
+void xivo_tdm_tick(
+ struct xivo_tdm_port* xtp);
+
+#endif /* XIVO_TDM_API_H */