[package] base-files: add "mtu" option for route sections, allows setting per-route...
[openwrt.git] / target / linux / mpc83xx / patches-2.6.33 / 030-ucc_tdm.patch
1 --- /dev/null
2 +++ b/drivers/misc/ucc_tdm.h
3 @@ -0,0 +1,221 @@
4 +/*
5 + * drivers/misc/ucc_tdm.h
6 + *
7 + * UCC Based Linux TDM Driver
8 + * This driver is designed to support UCC based TDM for PowerPC processors.
9 + * This driver can interface with SLIC device to run VOIP kind of
10 + * applications.
11 + *
12 + * Author: Ashish Kalra & Poonam Aggrwal
13 + *
14 + * Copyright (c) 2007 Freescale Semiconductor, Inc.
15 + *
16 + * This program is free software; you can redistribute it and/or modify it
17 + * under the terms of the GNU General Public License as published by the
18 + * Free Software Foundation; either version 2 of the License, or (at your
19 + * option) any later version.
20 + */
21 +
22 +#ifndef TDM_H
23 +#define TDM_H
24 +
25 +#define NUM_TS 8
26 +#define ACTIVE_CH 8
27 +
28 +/* SAMPLE_DEPTH is the sample depth is the number of frames before
29 + * an interrupt. Must be a multiple of 4
30 + */
31 +#define SAMPLE_DEPTH 80
32 +
33 +/* define the number of Rx interrupts to go by for initial stuttering */
34 +#define STUTTER_INT_CNT 1
35 +
36 +/* BMRx Field Descriptions to specify tstate and rstate in UCC parameter RAM*/
37 +#define EN_BUS_SNOOPING 0x20
38 +#define BE_BO 0x10
39 +
40 +/* UPSMR Register for Transparent UCC controller Bit definitions*/
41 +#define NBO 0x00000000 /* Normal Mode 1 bit of data per clock */
42 +
43 +/* SI Mode register bit definitions */
44 +#define NORMAL_OPERATION 0x0000
45 +#define AUTO_ECHO 0x0400
46 +#define INTERNAL_LB 0x0800
47 +#define CONTROL_LB 0x0c00
48 +#define SIMODE_CRT (0x8000 >> 9)
49 +#define SIMODE_SL (0x8000 >> 10)
50 +#define SIMODE_CE (0x8000 >> 11)
51 +#define SIMODE_FE (0x8000 >> 12)
52 +#define SIMODE_GM (0x8000 >> 13)
53 +#define SIMODE_TFSD(val) (val)
54 +#define SIMODE_RFSD(val) ((val) << 8)
55 +
56 +#define SI_TDM_MODE_REGISTER_OFFSET 0
57 +
58 +#define R_CM 0x02000000
59 +#define T_CM 0x02000000
60 +
61 +#define SET_RX_SI_RAM(n, val) \
62 + out_be16((u16 *)&qe_immr->sir.rx[(n)*2], (u16)(val))
63 +
64 +#define SET_TX_SI_RAM(n, val) \
65 + out_be16((u16 *)&qe_immr->sir.tx[(n)*2], (u16)(val))
66 +
67 +/* SI RAM entries */
68 +#define SIR_LAST 0x0001
69 +#define SIR_CNT(n) ((n) << 2)
70 +#define SIR_BYTE 0x0002
71 +#define SIR_BIT 0x0000
72 +#define SIR_IDLE 0
73 +#define SIR_UCC(uccx) (((uccx+9)) << 5)
74 +
75 +/* BRGC Register Bit definitions */
76 +#define BRGC_RESET (0x1<<17)
77 +#define BRGC_EN (0x1<<16)
78 +#define BRGC_EXTC_QE (0x00<<14)
79 +#define BRGC_EXTC_CLK3 (0x01<<14)
80 +#define BRGC_EXTC_CLK5 (0x01<<15)
81 +#define BRGC_EXTC_CLK9 (0x01<<14)
82 +#define BRGC_EXTC_CLK11 (0x01<<14)
83 +#define BRGC_EXTC_CLK13 (0x01<<14)
84 +#define BRGC_EXTC_CLK15 (0x01<<15)
85 +#define BRGC_ATB (0x1<<13)
86 +#define BRGC_DIV16 (0x1)
87 +
88 +/* structure representing UCC transparent parameter RAM */
89 +struct ucc_transparent_pram {
90 + __be16 riptr;
91 + __be16 tiptr;
92 + __be16 res0;
93 + __be16 mrblr;
94 + __be32 rstate;
95 + __be32 rbase;
96 + __be16 rbdstat;
97 + __be16 rbdlen;
98 + __be32 rdptr;
99 + __be32 tstate;
100 + __be32 tbase;
101 + __be16 tbdstat;
102 + __be16 tbdlen;
103 + __be32 tdptr;
104 + __be32 rbptr;
105 + __be32 tbptr;
106 + __be32 rcrc;
107 + __be32 res1;
108 + __be32 tcrc;
109 + __be32 res2;
110 + __be32 res3;
111 + __be32 c_mask;
112 + __be32 c_pres;
113 + __be16 disfc;
114 + __be16 crcec;
115 + __be32 res4[4];
116 + __be16 ts_tmp;
117 + __be16 tmp_mb;
118 +};
119 +
120 +#define UCC_TRANSPARENT_PRAM_SIZE 0x100
121 +
122 +struct tdm_cfg {
123 + u8 com_pin; /* Common receive and transmit pins
124 + * 0 = separate pins
125 + * 1 = common pins
126 + */
127 +
128 + u8 fr_sync_level; /* SLx bit Frame Sync Polarity
129 + * 0 = L1R/TSYNC active logic "1"
130 + * 1 = L1R/TSYNC active logic "0"
131 + */
132 +
133 + u8 clk_edge; /* CEx bit Tx Rx Clock Edge
134 + * 0 = TX data on rising edge of clock
135 + * RX data on falling edge
136 + * 1 = TX data on falling edge of clock
137 + * RX data on rising edge
138 + */
139 +
140 + u8 fr_sync_edge; /* FEx bit Frame sync edge
141 + * Determine when the sync pulses are sampled
142 + * 0 = Falling edge
143 + * 1 = Rising edge
144 + */
145 +
146 + u8 rx_fr_sync_delay; /* TFSDx/RFSDx bits Frame Sync Delay
147 + * 00 = no bit delay
148 + * 01 = 1 bit delay
149 + * 10 = 2 bit delay
150 + * 11 = 3 bit delay
151 + */
152 +
153 + u8 tx_fr_sync_delay; /* TFSDx/RFSDx bits Frame Sync Delay
154 + * 00 = no bit delay
155 + * 01 = 1 bit delay
156 + * 10 = 2 bit delay
157 + * 11 = 3 bit delay
158 + */
159 +
160 + u8 active_num_ts; /* Number of active time slots in TDM
161 + * assume same active Rx/Tx time slots
162 + */
163 +};
164 +
165 +struct ucc_tdm_info {
166 + struct ucc_fast_info uf_info;
167 + u32 ucc_busy;
168 +};
169 +
170 +struct tdm_ctrl {
171 + u32 device_busy;
172 + struct device *device;
173 + struct ucc_fast_private *uf_private;
174 + struct ucc_tdm_info *ut_info;
175 + u32 tdm_port; /* port for this tdm:TDMA,TDMB,TDMC,TDMD */
176 + u32 si; /* serial interface: 0 or 1 */
177 + struct ucc_fast __iomem *uf_regs; /* UCC Fast registers */
178 + u16 rx_mask[8]; /* Active Receive channels LSB is ch0 */
179 + u16 tx_mask[8]; /* Active Transmit channels LSB is ch0 */
180 + /* Only channels less than the number of FRAME_SIZE are implemented */
181 + struct tdm_cfg cfg_ctrl; /* Signaling controls configuration */
182 + u8 *tdm_input_data; /* buffer used for Rx by the tdm */
183 + u8 *tdm_output_data; /* buffer used for Tx by the tdm */
184 +
185 + dma_addr_t dma_input_addr; /* dma mapped buffer for TDM Rx */
186 + dma_addr_t dma_output_addr; /* dma mapped buffer for TDM Tx */
187 + u16 physical_num_ts; /* physical number of timeslots in the tdm
188 + frame */
189 + u32 phase_rx; /* cycles through 0, 1, 2 */
190 + u32 phase_tx; /* cycles through 0, 1, 2 */
191 + /*
192 + * the following two variables are for dealing with "stutter" problem
193 + * "stutter" period is about 20 frames or so, varies depending active
194 + * channel num depending on the sample depth, the code should let a
195 + * few Rx interrupts go by
196 + */
197 + u32 tdm_icnt;
198 + u32 tdm_flag;
199 + struct ucc_transparent_pram __iomem *ucc_pram;
200 + struct qe_bd __iomem *tx_bd;
201 + struct qe_bd __iomem *rx_bd;
202 + u32 ucc_pram_offset;
203 + u32 tx_bd_offset;
204 + u32 rx_bd_offset;
205 + u32 rx_ucode_buf_offset;
206 + u32 tx_ucode_buf_offset;
207 + bool leg_slic;
208 + wait_queue_head_t wakeup_event;
209 +};
210 +
211 +struct tdm_client {
212 + u32 client_id;
213 + void (*tdm_read)(u32 client_id, short chn_id,
214 + short *pcm_buffer, short len);
215 + void (*tdm_write)(u32 client_id, short chn_id,
216 + short *pcm_buffer, short len);
217 + wait_queue_head_t *wakeup_event;
218 + };
219 +
220 +#define MAX_PHASE 1
221 +#define NR_BUFS 2
222 +#define EFF_ACTIVE_CH ACTIVE_CH / 2
223 +
224 +#endif
225 --- /dev/null
226 +++ b/drivers/misc/ucc_tdm.c
227 @@ -0,0 +1,1017 @@
228 +/*
229 + * drivers/misc/ucc_tdm.c
230 + *
231 + * UCC Based Linux TDM Driver
232 + * This driver is designed to support UCC based TDM for PowerPC processors.
233 + * This driver can interface with SLIC device to run VOIP kind of
234 + * applications.
235 + *
236 + * Author: Ashish Kalra & Poonam Aggrwal
237 + *
238 + * Copyright (c) 2007 Freescale Semiconductor, Inc.
239 + *
240 + * This program is free software; you can redistribute it and/or modify it
241 + * under the terms of the GNU General Public License as published by the
242 + * Free Software Foundation; either version 2 of the License, or (at your
243 + * option) any later version.
244 + */
245 +
246 +#include <generated/autoconf.h>
247 +#include <linux/module.h>
248 +#include <linux/sched.h>
249 +#include <linux/kernel.h>
250 +#include <linux/slab.h>
251 +#include <linux/errno.h>
252 +#include <linux/types.h>
253 +#include <linux/interrupt.h>
254 +#include <linux/time.h>
255 +#include <linux/skbuff.h>
256 +#include <linux/proc_fs.h>
257 +#include <linux/delay.h>
258 +#include <linux/dma-mapping.h>
259 +#include <linux/string.h>
260 +#include <linux/irq.h>
261 +#include <linux/of_platform.h>
262 +#include <linux/io.h>
263 +#include <linux/wait.h>
264 +#include <linux/timer.h>
265 +
266 +#include <asm/immap_qe.h>
267 +#include <asm/qe.h>
268 +#include <asm/ucc.h>
269 +#include <asm/ucc_fast.h>
270 +#include <asm/ucc_slow.h>
271 +
272 +#include "ucc_tdm.h"
273 +#define DRV_DESC "Freescale QE UCC TDM Driver"
274 +#define DRV_NAME "ucc_tdm"
275 +
276 +
277 +/*
278 + * define the following #define if snooping or hardware-based cache coherency
279 + * is disabled on the UCC transparent controller.This flag enables
280 + * software-based cache-coherency support by explicitly flushing data cache
281 + * contents after setting up the TDM output buffer(s) and invalidating the
282 + * data cache contents before the TDM input buffer(s) are read.
283 + */
284 +#undef UCC_CACHE_SNOOPING_DISABLED
285 +
286 +#define MAX_NUM_TDM_DEVICES 8
287 +
288 +static struct tdm_ctrl *tdm_ctrl[MAX_NUM_TDM_DEVICES];
289 +
290 +static int num_tdm_devices;
291 +static int num_tdm_clients;
292 +
293 +static struct ucc_tdm_info utdm_primary_info = {
294 + .uf_info = {
295 + .tsa = 1,
296 + .cdp = 1,
297 + .cds = 1,
298 + .ctsp = 1,
299 + .ctss = 1,
300 + .revd = 1,
301 + .urfs = 0x128,
302 + .utfs = 0x128,
303 + .utfet = 0,
304 + .utftt = 0x128,
305 + .ufpt = 256,
306 + .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_TRANSPARENT,
307 + .tenc = UCC_FAST_TX_ENCODING_NRZ,
308 + .renc = UCC_FAST_RX_ENCODING_NRZ,
309 + .tcrc = UCC_FAST_16_BIT_CRC,
310 + .synl = UCC_FAST_SYNC_LEN_NOT_USED,
311 + },
312 + .ucc_busy = 0,
313 +};
314 +
315 +static struct ucc_tdm_info utdm_info[8];
316 +
317 +static void dump_siram(struct tdm_ctrl *tdm_c)
318 +{
319 +#ifdef DEBUG
320 + int i;
321 + u16 phy_num_ts;
322 +
323 + phy_num_ts = tdm_c->physical_num_ts;
324 +
325 + pr_debug("SI TxRAM dump\n");
326 + /* each slot entry in SI RAM is of 2 bytes */
327 + for (i = 0; i < phy_num_ts * 2; i++)
328 + pr_debug("%x ", in_8(&qe_immr->sir.tx[i]));
329 + pr_debug("\nSI RxRAM dump\n");
330 + for (i = 0; i < phy_num_ts * 2; i++)
331 + pr_debug("%x ", in_8(&qe_immr->sir.rx[i]));
332 + pr_debug("\n");
333 +#endif
334 +}
335 +
336 +static void dump_ucc(struct tdm_ctrl *tdm_c)
337 +{
338 +#ifdef DEBUG
339 + struct ucc_transparent_pram *ucc_pram;
340 +
341 + ucc_pram = tdm_c->ucc_pram;
342 +
343 + pr_debug("%s Dumping UCC Registers\n", __FUNCTION__);
344 + ucc_fast_dump_regs(tdm_c->uf_private);
345 + pr_debug("%s Dumping UCC Parameter RAM\n", __FUNCTION__);
346 + pr_debug("rbase = 0x%x\n", in_be32(&ucc_pram->rbase));
347 + pr_debug("rbptr = 0x%x\n", in_be32(&ucc_pram->rbptr));
348 + pr_debug("mrblr = 0x%x\n", in_be16(&ucc_pram->mrblr));
349 + pr_debug("rbdlen = 0x%x\n", in_be16(&ucc_pram->rbdlen));
350 + pr_debug("rbdstat = 0x%x\n", in_be16(&ucc_pram->rbdstat));
351 + pr_debug("rstate = 0x%x\n", in_be32(&ucc_pram->rstate));
352 + pr_debug("rdptr = 0x%x\n", in_be32(&ucc_pram->rdptr));
353 + pr_debug("tbase = 0x%x\n", in_be32(&ucc_pram->tbase));
354 + pr_debug("tbptr = 0x%x\n", in_be32(&ucc_pram->tbptr));
355 + pr_debug("tbdlen = 0x%x\n", in_be16(&ucc_pram->tbdlen));
356 + pr_debug("tbdstat = 0x%x\n", in_be16(&ucc_pram->tbdstat));
357 + pr_debug("tstate = 0x%x\n", in_be32(&ucc_pram->tstate));
358 + pr_debug("tdptr = 0x%x\n", in_be32(&ucc_pram->tdptr));
359 +#endif
360 +}
361 +
362 +/*
363 + * For use when a framing bit is not present
364 + * Program current-route SI ram
365 + * Set SIxRAM TDMx
366 + * Entries must be in units of 8.
367 + * SIR_UCC -> Channel Select
368 + * SIR_CNT -> Number of bits or bytes
369 + * SIR_BYTE -> Byte or Bit resolution
370 + * SIR_LAST -> Indicates last entry in SIxRAM
371 + * SIR_IDLE -> The Tx data pin is Tri-stated and the Rx data pin is
372 + * ignored
373 + */
374 +static void set_siram(struct tdm_ctrl *tdm_c, enum comm_dir dir)
375 +{
376 + const u16 *mask;
377 + u16 temp_mask = 1;
378 + u16 siram_code = 0;
379 + u32 i, j, k;
380 + u32 ucc;
381 + u32 phy_num_ts;
382 +
383 + phy_num_ts = tdm_c->physical_num_ts;
384 + ucc = tdm_c->ut_info->uf_info.ucc_num;
385 +
386 + if (dir == COMM_DIR_RX)
387 + mask = tdm_c->rx_mask;
388 + else
389 + mask = tdm_c->tx_mask;
390 + k = 0;
391 + j = 0;
392 + for (i = 0; i < phy_num_ts; i++) {
393 + if ((mask[k] & temp_mask) == temp_mask)
394 + siram_code = SIR_UCC(ucc) | SIR_CNT(0) | SIR_BYTE;
395 + else
396 + siram_code = SIR_IDLE | SIR_CNT(0) | SIR_BYTE;
397 + if (dir == COMM_DIR_RX)
398 + out_be16((u16 *)&qe_immr->sir.rx[i * 2], siram_code);
399 + else
400 + out_be16((u16 *)&qe_immr->sir.tx[i * 2], siram_code);
401 + temp_mask = temp_mask << 1;
402 + j++;
403 + if (j >= 16) {
404 + j = 0;
405 + temp_mask = 0x0001;
406 + k++;
407 + }
408 + }
409 + siram_code = siram_code | SIR_LAST;
410 +
411 + if (dir == COMM_DIR_RX)
412 + out_be16((u16 *)&qe_immr->sir.rx[(phy_num_ts - 1) * 2],
413 + siram_code);
414 + else
415 + out_be16((u16 *)&qe_immr->sir.tx[(phy_num_ts - 1) * 2],
416 + siram_code);
417 +}
418 +
419 +static void config_si(struct tdm_ctrl *tdm_c)
420 +{
421 + u8 rxsyncdelay, txsyncdelay, tdm_port;
422 + u16 sixmr_val = 0;
423 + u32 tdma_mode_off;
424 + u16 *si1_tdm_mode_reg;
425 +
426 + tdm_port = tdm_c->tdm_port;
427 +
428 + set_siram(tdm_c, COMM_DIR_RX);
429 +
430 + set_siram(tdm_c, COMM_DIR_TX);
431 +
432 + rxsyncdelay = tdm_c->cfg_ctrl.rx_fr_sync_delay;
433 + txsyncdelay = tdm_c->cfg_ctrl.tx_fr_sync_delay;
434 + if (tdm_c->cfg_ctrl.com_pin)
435 + sixmr_val |= SIMODE_CRT;
436 + if (tdm_c->cfg_ctrl.fr_sync_level == 1)
437 + sixmr_val |= SIMODE_SL;
438 + if (tdm_c->cfg_ctrl.clk_edge == 1)
439 + sixmr_val |= SIMODE_CE;
440 + if (tdm_c->cfg_ctrl.fr_sync_edge == 1)
441 + sixmr_val |= SIMODE_FE;
442 + sixmr_val |= (SIMODE_TFSD(txsyncdelay) | SIMODE_RFSD(rxsyncdelay));
443 +
444 + tdma_mode_off = SI_TDM_MODE_REGISTER_OFFSET * tdm_c->tdm_port;
445 +
446 + si1_tdm_mode_reg = (u8 *)&qe_immr->si1 + tdma_mode_off;
447 + out_be16(si1_tdm_mode_reg, sixmr_val);
448 +
449 + dump_siram(tdm_c);
450 +}
451 +
452 +static int tdm_init(struct tdm_ctrl *tdm_c)
453 +{
454 + u32 tdm_port, ucc, act_num_ts;
455 + int ret, i, err;
456 + u32 cecr_subblock;
457 + u32 pram_offset;
458 + u32 rxbdt_offset;
459 + u32 txbdt_offset;
460 + u32 rx_ucode_buf_offset, tx_ucode_buf_offset;
461 + u16 bd_status, bd_len;
462 + enum qe_clock clock;
463 + struct qe_bd __iomem *rx_bd, *tx_bd;
464 +
465 + tdm_port = tdm_c->tdm_port;
466 + ucc = tdm_c->ut_info->uf_info.ucc_num;
467 + act_num_ts = tdm_c->cfg_ctrl.active_num_ts;
468 +
469 + /*
470 + * TDM Tx and Rx CLKs = 2048 KHz.
471 + */
472 + if (strstr(tdm_c->ut_info->uf_info.tdm_tx_clk, "BRG")) {
473 + clock = qe_clock_source(tdm_c->ut_info->uf_info.tdm_tx_clk);
474 + err = qe_setbrg(clock, 2048000, 1);
475 + if (err < 0) {
476 + printk(KERN_ERR "%s: Failed to set %s\n", __FUNCTION__,
477 + tdm_c->ut_info->uf_info.tdm_tx_clk);
478 + return err;
479 + }
480 + }
481 + if (strstr(tdm_c->ut_info->uf_info.tdm_rx_clk, "BRG")) {
482 + clock = qe_clock_source(tdm_c->ut_info->uf_info.tdm_rx_clk);
483 + err = qe_setbrg(clock, 2048000, 1);
484 + if (err < 0) {
485 + printk(KERN_ERR "%s: Failed to set %s\n", __FUNCTION__,
486 + tdm_c->ut_info->uf_info.tdm_rx_clk);
487 + return err;
488 + }
489 + }
490 + /*
491 + * TDM FSyncs = 4 KHz.
492 + */
493 + if (strstr(tdm_c->ut_info->uf_info.tdm_tx_sync, "BRG")) {
494 + clock = qe_clock_source(tdm_c->ut_info->uf_info.tdm_tx_sync);
495 + err = qe_setbrg(clock, 4000, 1);
496 + if (err < 0) {
497 + printk(KERN_ERR "%s: Failed to set %s\n", __FUNCTION__,
498 + tdm_c->ut_info->uf_info.tdm_tx_sync);
499 + return err;
500 + }
501 + }
502 + if (strstr(tdm_c->ut_info->uf_info.tdm_rx_sync, "BRG")) {
503 + clock = qe_clock_source(tdm_c->ut_info->uf_info.tdm_rx_sync);
504 + err = qe_setbrg(clock, 4000, 1);
505 + if (err < 0) {
506 + printk(KERN_ERR "%s: Failed to set %s\n", __FUNCTION__,
507 + tdm_c->ut_info->uf_info.tdm_rx_sync);
508 + return err;
509 + }
510 + }
511 +
512 + tdm_c->ut_info->uf_info.uccm_mask = (u32)
513 + ((UCC_TRANS_UCCE_RXB | UCC_TRANS_UCCE_BSY) << 16);
514 +
515 + if (ucc_fast_init(&(tdm_c->ut_info->uf_info), &tdm_c->uf_private)) {
516 + printk(KERN_ERR "%s: Failed to init uccf\n", __FUNCTION__);
517 + return -ENOMEM;
518 + }
519 +
520 + ucc_fast_disable(tdm_c->uf_private, COMM_DIR_RX | COMM_DIR_TX);
521 +
522 + /* Write to QE CECR, UCCx channel to Stop Transmission */
523 + cecr_subblock = ucc_fast_get_qe_cr_subblock(ucc);
524 + qe_issue_cmd(QE_STOP_TX, cecr_subblock,
525 + (u8) QE_CR_PROTOCOL_UNSPECIFIED, 0);
526 +
527 + pram_offset = qe_muram_alloc(UCC_TRANSPARENT_PRAM_SIZE,
528 + ALIGNMENT_OF_UCC_SLOW_PRAM);
529 + if (IS_ERR_VALUE(pram_offset)) {
530 + printk(KERN_ERR "%s: Cannot allocate MURAM memory for"
531 + " transparent UCC\n", __FUNCTION__);
532 + ret = -ENOMEM;
533 + goto pram_alloc_error;
534 + }
535 +
536 + cecr_subblock = ucc_fast_get_qe_cr_subblock(ucc);
537 + qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
538 + QE_CR_PROTOCOL_UNSPECIFIED, pram_offset);
539 +
540 + tdm_c->ucc_pram = qe_muram_addr(pram_offset);
541 + tdm_c->ucc_pram_offset = pram_offset;
542 +
543 + /*
544 + * zero-out pram, this will also ensure RSTATE, TSTATE are cleared, also
545 + * DISFC & CRCEC counters will be initialized.
546 + */
547 + memset(tdm_c->ucc_pram, 0, sizeof(struct ucc_transparent_pram));
548 +
549 + /* rbase, tbase alignment is 8. */
550 + rxbdt_offset = qe_muram_alloc(NR_BUFS * sizeof(struct qe_bd),
551 + QE_ALIGNMENT_OF_BD);
552 + if (IS_ERR_VALUE(rxbdt_offset)) {
553 + printk(KERN_ERR "%s: Cannot allocate MURAM memory for RxBDs\n",
554 + __FUNCTION__);
555 + ret = -ENOMEM;
556 + goto rxbd_alloc_error;
557 + }
558 + txbdt_offset = qe_muram_alloc(NR_BUFS * sizeof(struct qe_bd),
559 + QE_ALIGNMENT_OF_BD);
560 + if (IS_ERR_VALUE(txbdt_offset)) {
561 + printk(KERN_ERR "%s: Cannot allocate MURAM memory for TxBDs\n",
562 + __FUNCTION__);
563 + ret = -ENOMEM;
564 + goto txbd_alloc_error;
565 + }
566 + tdm_c->tx_bd = qe_muram_addr(txbdt_offset);
567 + tdm_c->rx_bd = qe_muram_addr(rxbdt_offset);
568 +
569 + tdm_c->tx_bd_offset = txbdt_offset;
570 + tdm_c->rx_bd_offset = rxbdt_offset;
571 +
572 + rx_bd = tdm_c->rx_bd;
573 + tx_bd = tdm_c->tx_bd;
574 +
575 + out_be32(&tdm_c->ucc_pram->rbase, (u32) immrbar_virt_to_phys(rx_bd));
576 + out_be32(&tdm_c->ucc_pram->tbase, (u32) immrbar_virt_to_phys(tx_bd));
577 +
578 + for (i = 0; i < NR_BUFS - 1; i++) {
579 + bd_status = (u16) ((R_E | R_CM | R_I) >> 16);
580 + bd_len = 0;
581 + out_be16(&rx_bd->length, bd_len);
582 + out_be16(&rx_bd->status, bd_status);
583 + out_be32(&rx_bd->buf,
584 + tdm_c->dma_input_addr + i * SAMPLE_DEPTH * act_num_ts);
585 + rx_bd += 1;
586 +
587 + bd_status = (u16) ((T_R | T_CM) >> 16);
588 + bd_len = SAMPLE_DEPTH * act_num_ts;
589 + out_be16(&tx_bd->length, bd_len);
590 + out_be16(&tx_bd->status, bd_status);
591 + out_be32(&tx_bd->buf,
592 + tdm_c->dma_output_addr + i * SAMPLE_DEPTH * act_num_ts);
593 + tx_bd += 1;
594 + }
595 +
596 + bd_status = (u16) ((R_E | R_CM | R_I | R_W) >> 16);
597 + bd_len = 0;
598 + out_be16(&rx_bd->length, bd_len);
599 + out_be16(&rx_bd->status, bd_status);
600 + out_be32(&rx_bd->buf,
601 + tdm_c->dma_input_addr + i * SAMPLE_DEPTH * act_num_ts);
602 +
603 + bd_status = (u16) ((T_R | T_CM | T_W) >> 16);
604 + bd_len = SAMPLE_DEPTH * act_num_ts;
605 + out_be16(&tx_bd->length, bd_len);
606 + out_be16(&tx_bd->status, bd_status);
607 + out_be32(&tx_bd->buf,
608 + tdm_c->dma_output_addr + i * SAMPLE_DEPTH * act_num_ts);
609 +
610 + config_si(tdm_c);
611 +
612 + setbits32(&qe_immr->ic.qimr, (0x80000000UL >> ucc));
613 +
614 + rx_ucode_buf_offset = qe_muram_alloc(32, 32);
615 + if (IS_ERR_VALUE(rx_ucode_buf_offset)) {
616 + printk(KERN_ERR "%s: Cannot allocate MURAM mem for Rx"
617 + " ucode buf\n", __FUNCTION__);
618 + ret = -ENOMEM;
619 + goto rxucode_buf_alloc_error;
620 + }
621 +
622 + tx_ucode_buf_offset = qe_muram_alloc(32, 32);
623 + if (IS_ERR_VALUE(tx_ucode_buf_offset)) {
624 + printk(KERN_ERR "%s: Cannot allocate MURAM mem for Tx"
625 + " ucode buf\n", __FUNCTION__);
626 + ret = -ENOMEM;
627 + goto txucode_buf_alloc_error;
628 + }
629 + out_be16(&tdm_c->ucc_pram->riptr, (u16) rx_ucode_buf_offset);
630 + out_be16(&tdm_c->ucc_pram->tiptr, (u16) tx_ucode_buf_offset);
631 +
632 + tdm_c->rx_ucode_buf_offset = rx_ucode_buf_offset;
633 + tdm_c->tx_ucode_buf_offset = tx_ucode_buf_offset;
634 +
635 + /*
636 + * set the receive buffer descriptor maximum size to be
637 + * SAMPLE_DEPTH * number of active RX channels
638 + */
639 + out_be16(&tdm_c->ucc_pram->mrblr, (u16) SAMPLE_DEPTH * act_num_ts);
640 +
641 + /*
642 + * enable snooping and BE byte ordering on the UCC pram's
643 + * tstate & rstate registers.
644 + */
645 + out_be32(&tdm_c->ucc_pram->tstate, 0x30000000UL);
646 + out_be32(&tdm_c->ucc_pram->rstate, 0x30000000UL);
647 +
648 + /*Put UCC transparent controller into serial interface mode. */
649 + out_be32(&tdm_c->uf_regs->upsmr, 0);
650 +
651 + /* Reset TX and RX for UCCx */
652 + cecr_subblock = ucc_fast_get_qe_cr_subblock(ucc);
653 + qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
654 + (u8) QE_CR_PROTOCOL_UNSPECIFIED, 0);
655 +
656 + return 0;
657 +
658 +txucode_buf_alloc_error:
659 + qe_muram_free(rx_ucode_buf_offset);
660 +rxucode_buf_alloc_error:
661 + qe_muram_free(txbdt_offset);
662 +txbd_alloc_error:
663 + qe_muram_free(rxbdt_offset);
664 +rxbd_alloc_error:
665 + qe_muram_free(pram_offset);
666 +pram_alloc_error:
667 + ucc_fast_free(tdm_c->uf_private);
668 + return ret;
669 +}
670 +
671 +static void tdm_deinit(struct tdm_ctrl *tdm_c)
672 +{
673 + qe_muram_free(tdm_c->rx_ucode_buf_offset);
674 + qe_muram_free(tdm_c->tx_ucode_buf_offset);
675 +
676 + if (tdm_c->rx_bd_offset) {
677 + qe_muram_free(tdm_c->rx_bd_offset);
678 + tdm_c->rx_bd = NULL;
679 + tdm_c->rx_bd_offset = 0;
680 + }
681 + if (tdm_c->tx_bd_offset) {
682 + qe_muram_free(tdm_c->tx_bd_offset);
683 + tdm_c->tx_bd = NULL;
684 + tdm_c->tx_bd_offset = 0;
685 + }
686 + if (tdm_c->ucc_pram_offset) {
687 + qe_muram_free(tdm_c->ucc_pram_offset);
688 + tdm_c->ucc_pram = NULL;
689 + tdm_c->ucc_pram_offset = 0;
690 + }
691 +}
692 +
693 +
694 +static irqreturn_t tdm_isr(int irq, void *dev_id)
695 +{
696 + u8 *input_tdm_buffer, *output_tdm_buffer;
697 + u32 txb, rxb;
698 + u32 ucc;
699 + register u32 ucce = 0;
700 + struct tdm_ctrl *tdm_c;
701 + tdm_c = (struct tdm_ctrl *)dev_id;
702 +
703 + tdm_c->tdm_icnt++;
704 + ucc = tdm_c->ut_info->uf_info.ucc_num;
705 + input_tdm_buffer = tdm_c->tdm_input_data;
706 + output_tdm_buffer = tdm_c->tdm_output_data;
707 +
708 + if (in_be32(tdm_c->uf_private->p_ucce) &
709 + (UCC_TRANS_UCCE_BSY << 16)) {
710 + out_be32(tdm_c->uf_private->p_ucce,
711 + (UCC_TRANS_UCCE_BSY << 16));
712 + pr_info("%s: From tdm isr busy interrupt\n",
713 + __FUNCTION__);
714 + dump_ucc(tdm_c);
715 +
716 + return IRQ_HANDLED;
717 + }
718 +
719 + if (tdm_c->tdm_flag == 1) {
720 + /* track phases for Rx/Tx */
721 + tdm_c->phase_rx += 1;
722 + if (tdm_c->phase_rx == MAX_PHASE)
723 + tdm_c->phase_rx = 0;
724 +
725 + tdm_c->phase_tx += 1;
726 + if (tdm_c->phase_tx == MAX_PHASE)
727 + tdm_c->phase_tx = 0;
728 +
729 +#ifdef CONFIG_TDM_HW_LB_TSA_SLIC
730 + {
731 + u32 temp_rx, temp_tx, phase_tx, phase_rx;
732 + int i;
733 + phase_rx = tdm_c->phase_rx;
734 + phase_tx = tdm_c->phase_tx;
735 + if (phase_rx == 0)
736 + phase_rx = MAX_PHASE;
737 + else
738 + phase_rx -= 1;
739 + if (phase_tx == 0)
740 + phase_tx = MAX_PHASE;
741 + else
742 + phase_tx -= 1;
743 + temp_rx = phase_rx * SAMPLE_DEPTH * ACTIVE_CH;
744 + temp_tx = phase_tx * SAMPLE_DEPTH * ACTIVE_CH;
745 +
746 + /*check if loopback received data on TS0 is correct. */
747 + pr_debug("%s: check if loopback received data on TS0"
748 + " is correct\n", __FUNCTION__);
749 + pr_debug("%d,%d ", phase_rx, phase_tx);
750 + for (i = 0; i < 8; i++)
751 + pr_debug("%1d,%1d ",
752 + input_tdm_buffer[temp_rx + i],
753 + output_tdm_buffer[temp_tx + i]);
754 + pr_debug("\n");
755 + }
756 +#endif
757 +
758 + /* schedule BH */
759 + wake_up_interruptible(&tdm_c->wakeup_event);
760 + } else {
761 + if (tdm_c->tdm_icnt == STUTTER_INT_CNT) {
762 + txb = in_be32(&tdm_c->ucc_pram->tbptr) -
763 + in_be32(&tdm_c->ucc_pram->tbase);
764 + rxb = in_be32(&tdm_c->ucc_pram->rbptr) -
765 + in_be32(&tdm_c->ucc_pram->rbase);
766 + tdm_c->phase_tx = txb / sizeof(struct qe_bd);
767 + tdm_c->phase_rx = rxb / sizeof(struct qe_bd);
768 +
769 +#ifdef CONFIG_TDM_HW_LB_TSA_SLIC
770 + tdm_c->phase_tx = tdm_c->phase_rx;
771 +#endif
772 +
773 + /* signal "stuttering" period is over */
774 + tdm_c->tdm_flag = 1;
775 +
776 + pr_debug("%s: stuttering period is over\n",
777 + __FUNCTION__);
778 +
779 + if (in_be32(tdm_c->uf_private->p_ucce) &
780 + (UCC_TRANS_UCCE_TXE << 16)) {
781 + u32 cecr_subblock;
782 + out_be32(tdm_c->uf_private->p_ucce,
783 + (UCC_TRANS_UCCE_TXE << 16));
784 + pr_debug("%s: From tdm isr txe interrupt\n",
785 + __FUNCTION__);
786 +
787 + cecr_subblock =
788 + ucc_fast_get_qe_cr_subblock(ucc);
789 + qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
790 + (u8) QE_CR_PROTOCOL_UNSPECIFIED,
791 + 0);
792 + }
793 + }
794 + }
795 +
796 + ucce = (in_be32(tdm_c->uf_private->p_ucce)
797 + & in_be32(tdm_c->uf_private->p_uccm));
798 +
799 + out_be32(tdm_c->uf_private->p_ucce, ucce);
800 +
801 + return IRQ_HANDLED;
802 +}
803 +
804 +static int tdm_start(struct tdm_ctrl *tdm_c)
805 +{
806 + if (request_irq(tdm_c->ut_info->uf_info.irq, tdm_isr,
807 + 0, "tdm", tdm_c)) {
808 + printk(KERN_ERR "%s: request_irq for tdm_isr failed\n",
809 + __FUNCTION__);
810 + return -ENODEV;
811 + }
812 +
813 + ucc_fast_enable(tdm_c->uf_private, COMM_DIR_RX | COMM_DIR_TX);
814 +
815 + pr_info("%s 16-bit linear pcm mode active with"
816 + " slots 0 & 2\n", __FUNCTION__);
817 +
818 + dump_siram(tdm_c);
819 + dump_ucc(tdm_c);
820 +
821 + setbits8(&(qe_immr->si1.siglmr1_h), (0x1 << tdm_c->tdm_port));
822 + pr_info("%s UCC based TDM enabled\n", __FUNCTION__);
823 +
824 + return 0;
825 +}
826 +
827 +static void tdm_stop(struct tdm_ctrl *tdm_c)
828 +{
829 + u32 port, si;
830 + u32 ucc;
831 + u32 cecr_subblock;
832 +
833 + port = tdm_c->tdm_port;
834 + si = tdm_c->si;
835 + ucc = tdm_c->ut_info->uf_info.ucc_num;
836 + cecr_subblock = ucc_fast_get_qe_cr_subblock(ucc);
837 +
838 + qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
839 + (u8) QE_CR_PROTOCOL_UNSPECIFIED, 0);
840 + qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
841 + (u8) QE_CR_PROTOCOL_UNSPECIFIED, 0);
842 +
843 + clrbits8(&qe_immr->si1.siglmr1_h, (0x1 << port));
844 + ucc_fast_disable(tdm_c->uf_private, COMM_DIR_RX);
845 + ucc_fast_disable(tdm_c->uf_private, COMM_DIR_TX);
846 + free_irq(tdm_c->ut_info->uf_info.irq, tdm_c);
847 +}
848 +
849 +
850 +static void config_tdm(struct tdm_ctrl *tdm_c)
851 +{
852 + u32 i, j, k;
853 +
854 + j = 0;
855 + k = 0;
856 +
857 + /* Set Mask Bits */
858 + for (i = 0; i < ACTIVE_CH; i++) {
859 + tdm_c->tx_mask[k] |= (1 << j);
860 + tdm_c->rx_mask[k] |= (1 << j);
861 + j++;
862 + if (j >= 16) {
863 + j = 0;
864 + k++;
865 + }
866 + }
867 + /* physical number of slots in a frame */
868 + tdm_c->physical_num_ts = NUM_TS;
869 +
870 + /* common receive and transmit pins */
871 + tdm_c->cfg_ctrl.com_pin = 1;
872 +
873 + /* L1R/TSYNC active logic "1" */
874 + tdm_c->cfg_ctrl.fr_sync_level = 0;
875 +
876 + /*
877 + * TX data on rising edge of clock
878 + * RX data on falling edge
879 + */
880 + tdm_c->cfg_ctrl.clk_edge = 0;
881 +
882 + /* Frame sync sampled on falling edge */
883 + tdm_c->cfg_ctrl.fr_sync_edge = 0;
884 +
885 + /* no bit delay */
886 + tdm_c->cfg_ctrl.rx_fr_sync_delay = 0;
887 +
888 + /* no bit delay */
889 + tdm_c->cfg_ctrl.tx_fr_sync_delay = 0;
890 +
891 +#ifndef CONFIG_TDM_HW_LB_TSA_SLIC
892 + if (tdm_c->leg_slic) {
893 + /* Need 1 bit delay for Legrity SLIC */
894 + tdm_c->cfg_ctrl.rx_fr_sync_delay = 1;
895 + tdm_c->cfg_ctrl.tx_fr_sync_delay = 1;
896 + pr_info("%s Delay for Legerity!\n", __FUNCTION__);
897 + }
898 +#endif
899 +
900 + tdm_c->cfg_ctrl.active_num_ts = ACTIVE_CH;
901 +}
902 +
903 +static void tdm_read(u32 client_id, short chn_id, short *pcm_buffer,
904 + short len)
905 +{
906 + int i;
907 + u32 phase_rx;
908 + /* point to where to start for the current phase data processing */
909 + u32 temp_rx;
910 +
911 + struct tdm_ctrl *tdm_c = tdm_ctrl[client_id];
912 +
913 + u16 *input_tdm_buffer =
914 + (u16 *)tdm_c->tdm_input_data;
915 +
916 + phase_rx = tdm_c->phase_rx;
917 + if (phase_rx == 0)
918 + phase_rx = MAX_PHASE;
919 + else
920 + phase_rx -= 1;
921 +
922 + temp_rx = phase_rx * SAMPLE_DEPTH * EFF_ACTIVE_CH;
923 +
924 +#ifdef UCC_CACHE_SNOOPING_DISABLED
925 + flush_dcache_range((size_t) &input_tdm_buffer[temp_rx],
926 + (size_t) &input_tdm_buffer[temp_rx +
927 + SAMPLE_DEPTH * ACTIVE_CH]);
928 +#endif
929 + for (i = 0; i < len; i++)
930 + pcm_buffer[i] =
931 + input_tdm_buffer[i * EFF_ACTIVE_CH + temp_rx + chn_id];
932 +
933 +}
934 +
935 +static void tdm_write(u32 client_id, short chn_id, short *pcm_buffer,
936 + short len)
937 +{
938 + int i;
939 + int phase_tx;
940 + u32 txb;
941 + /* point to where to start for the current phase data processing */
942 + int temp_tx;
943 + struct tdm_ctrl *tdm_c = tdm_ctrl[client_id];
944 +
945 + u16 *output_tdm_buffer;
946 + output_tdm_buffer = (u16 *)tdm_c->tdm_output_data;
947 + txb = in_be32(&tdm_c->ucc_pram->tbptr) -
948 + in_be32(&tdm_c->ucc_pram->tbase);
949 + phase_tx = txb / sizeof(struct qe_bd);
950 +
951 + if (phase_tx == 0)
952 + phase_tx = MAX_PHASE;
953 + else
954 + phase_tx -= 1;
955 +
956 + temp_tx = phase_tx * SAMPLE_DEPTH * EFF_ACTIVE_CH;
957 +
958 + for (i = 0; i < len; i++)
959 + output_tdm_buffer[i * EFF_ACTIVE_CH + temp_tx + chn_id] =
960 + pcm_buffer[i];
961 +
962 +#ifdef UCC_CACHE_SNOOPING_DISABLED
963 + flush_dcache_range((size_t) &output_tdm_buffer[temp_tx],
964 + (size_t) &output_tdm_buffer[temp_tx + SAMPLE_DEPTH *
965 + ACTIVE_CH]);
966 +#endif
967 +}
968 +
969 +
970 +static int tdm_register_client(struct tdm_client *tdm_client)
971 +{
972 + u32 i;
973 + if (num_tdm_clients == num_tdm_devices) {
974 + printk(KERN_ERR "all TDM devices busy\n");
975 + return -EBUSY;
976 + }
977 +
978 + for (i = 0; i < num_tdm_devices; i++) {
979 + if (!tdm_ctrl[i]->device_busy) {
980 + tdm_ctrl[i]->device_busy = 1;
981 + break;
982 + }
983 + }
984 + num_tdm_clients++;
985 + tdm_client->client_id = i;
986 + tdm_client->tdm_read = tdm_read;
987 + tdm_client->tdm_write = tdm_write;
988 + tdm_client->wakeup_event =
989 + &(tdm_ctrl[i]->wakeup_event);
990 + return 0;
991 +}
992 +EXPORT_SYMBOL_GPL(tdm_register_client);
993 +
994 +static int tdm_deregister_client(struct tdm_client *tdm_client)
995 +{
996 + num_tdm_clients--;
997 + tdm_ctrl[tdm_client->client_id]->device_busy = 0;
998 + return 0;
999 +}
1000 +EXPORT_SYMBOL_GPL(tdm_deregister_client);
1001 +
1002 +static int ucc_tdm_probe(struct of_device *ofdev,
1003 + const struct of_device_id *match)
1004 +{
1005 + struct device_node *np = ofdev->node;
1006 + struct resource res;
1007 + const unsigned int *prop;
1008 + u32 ucc_num, device_num, err, ret = 0;
1009 + struct device_node *np_tmp;
1010 + dma_addr_t physaddr;
1011 + void *tdm_buff;
1012 + struct ucc_tdm_info *ut_info;
1013 +
1014 + prop = of_get_property(np, "device-id", NULL);
1015 + if (prop == NULL) {
1016 + printk(KERN_ERR "ucc_tdm: device-id missing\n");
1017 + return -ENODEV;
1018 + }
1019 +
1020 + ucc_num = *prop - 1;
1021 + if ((ucc_num < 0) || (ucc_num > 7))
1022 + return -ENODEV;
1023 +
1024 + ut_info = &utdm_info[ucc_num];
1025 + if (ut_info->ucc_busy) {
1026 + printk(KERN_ERR "ucc_tdm: UCC in use by another TDM driver"
1027 + "instance\n");
1028 + return -EBUSY;
1029 + }
1030 + if (num_tdm_devices == MAX_NUM_TDM_DEVICES) {
1031 + printk(KERN_ERR "ucc_tdm: All TDM devices already"
1032 + " initialized\n");
1033 + return -ENODEV;
1034 + }
1035 +
1036 + ut_info->ucc_busy = 1;
1037 + tdm_ctrl[num_tdm_devices++] =
1038 + kzalloc(sizeof(struct tdm_ctrl), GFP_KERNEL);
1039 + if (!tdm_ctrl[num_tdm_devices - 1]) {
1040 + printk(KERN_ERR "ucc_tdm: no memory to allocate for"
1041 + " tdm control structure\n");
1042 + num_tdm_devices--;
1043 + return -ENOMEM;
1044 + }
1045 + device_num = num_tdm_devices - 1;
1046 +
1047 + tdm_ctrl[device_num]->device = &ofdev->dev;
1048 + tdm_ctrl[device_num]->ut_info = ut_info;
1049 +
1050 + tdm_ctrl[device_num]->ut_info->uf_info.ucc_num = ucc_num;
1051 +
1052 + prop = of_get_property(np, "fsl,tdm-num", NULL);
1053 + if (prop == NULL) {
1054 + ret = -EINVAL;
1055 + goto get_property_error;
1056 + }
1057 +
1058 + tdm_ctrl[device_num]->tdm_port = *prop - 1;
1059 +
1060 + if (tdm_ctrl[device_num]->tdm_port > 3) {
1061 + ret = -EINVAL;
1062 + goto get_property_error;
1063 + }
1064 +
1065 + prop = of_get_property(np, "fsl,si-num", NULL);
1066 + if (prop == NULL) {
1067 + ret = -EINVAL;
1068 + goto get_property_error;
1069 + }
1070 +
1071 + tdm_ctrl[device_num]->si = *prop - 1;
1072 +
1073 + tdm_ctrl[device_num]->ut_info->uf_info.tdm_tx_clk =
1074 + of_get_property(np, "fsl,tdm-tx-clk", NULL);
1075 + if (tdm_ctrl[device_num]->ut_info->uf_info.tdm_tx_clk == NULL) {
1076 + ret = -EINVAL;
1077 + goto get_property_error;
1078 + }
1079 +
1080 + tdm_ctrl[device_num]->ut_info->uf_info.tdm_rx_clk =
1081 + of_get_property(np, "fsl,tdm-rx-clk", NULL);
1082 + if (tdm_ctrl[device_num]->ut_info->uf_info.tdm_rx_clk == NULL) {
1083 + ret = -EINVAL;
1084 + goto get_property_error;
1085 + }
1086 +
1087 + tdm_ctrl[device_num]->ut_info->uf_info.tdm_tx_sync =
1088 + of_get_property(np, "fsl,tdm-tx-sync", NULL);
1089 + if (tdm_ctrl[device_num]->ut_info->uf_info.tdm_tx_sync == NULL) {
1090 + ret = -EINVAL;
1091 + goto get_property_error;
1092 + }
1093 +
1094 + tdm_ctrl[device_num]->ut_info->uf_info.tdm_rx_sync =
1095 + of_get_property(np, "fsl,tdm-rx-sync", NULL);
1096 + if (tdm_ctrl[device_num]->ut_info->uf_info.tdm_rx_sync == NULL) {
1097 + ret = -EINVAL;
1098 + goto get_property_error;
1099 + }
1100 +
1101 + tdm_ctrl[device_num]->ut_info->uf_info.irq =
1102 + irq_of_parse_and_map(np, 0);
1103 + err = of_address_to_resource(np, 0, &res);
1104 + if (err) {
1105 + ret = -EINVAL;
1106 + goto get_property_error;
1107 + }
1108 + tdm_ctrl[device_num]->ut_info->uf_info.regs = res.start;
1109 + tdm_ctrl[device_num]->uf_regs = of_iomap(np, 0);
1110 +
1111 + np_tmp = NULL;
1112 + np_tmp = of_find_compatible_node(np_tmp, "slic", "legerity-slic");
1113 + if (np_tmp != NULL) {
1114 + tdm_ctrl[device_num]->leg_slic = 1;
1115 + of_node_put(np_tmp);
1116 + } else
1117 + tdm_ctrl[device_num]->leg_slic = 0;
1118 +
1119 + config_tdm(tdm_ctrl[device_num]);
1120 +
1121 + tdm_buff = dma_alloc_coherent(NULL, 2 * NR_BUFS * SAMPLE_DEPTH *
1122 + tdm_ctrl[device_num]->cfg_ctrl.active_num_ts,
1123 + &physaddr, GFP_KERNEL);
1124 + if (!tdm_buff) {
1125 + printk(KERN_ERR "ucc-tdm: could not allocate buffer"
1126 + "descriptors\n");
1127 + ret = -ENOMEM;
1128 + goto alloc_error;
1129 + }
1130 +
1131 + tdm_ctrl[device_num]->tdm_input_data = tdm_buff;
1132 + tdm_ctrl[device_num]->dma_input_addr = physaddr;
1133 +
1134 + tdm_ctrl[device_num]->tdm_output_data = tdm_buff + NR_BUFS *
1135 + SAMPLE_DEPTH * tdm_ctrl[device_num]->cfg_ctrl.active_num_ts;
1136 + tdm_ctrl[device_num]->dma_output_addr = physaddr + NR_BUFS *
1137 + SAMPLE_DEPTH * tdm_ctrl[device_num]->cfg_ctrl.active_num_ts;
1138 +
1139 + init_waitqueue_head(&(tdm_ctrl[device_num]->wakeup_event));
1140 +
1141 + ret = tdm_init(tdm_ctrl[device_num]);
1142 + if (ret != 0)
1143 + goto tdm_init_error;
1144 +
1145 + ret = tdm_start(tdm_ctrl[device_num]);
1146 + if (ret != 0)
1147 + goto tdm_start_error;
1148 +
1149 + dev_set_drvdata(&(ofdev->dev), tdm_ctrl[device_num]);
1150 +
1151 + pr_info("%s UCC based tdm module installed\n", __FUNCTION__);
1152 + return 0;
1153 +
1154 +tdm_start_error:
1155 + tdm_deinit(tdm_ctrl[device_num]);
1156 +tdm_init_error:
1157 + dma_free_coherent(NULL, 2 * NR_BUFS * SAMPLE_DEPTH *
1158 + tdm_ctrl[device_num]->cfg_ctrl.active_num_ts,
1159 + tdm_ctrl[device_num]->tdm_input_data,
1160 + tdm_ctrl[device_num]->dma_input_addr);
1161 +
1162 +alloc_error:
1163 + irq_dispose_mapping(tdm_ctrl[device_num]->ut_info->uf_info.irq);
1164 + iounmap(tdm_ctrl[device_num]->uf_regs);
1165 +
1166 +get_property_error:
1167 + num_tdm_devices--;
1168 + kfree(tdm_ctrl[device_num]);
1169 + ut_info->ucc_busy = 0;
1170 + return ret;
1171 +}
1172 +
1173 +static int ucc_tdm_remove(struct of_device *ofdev)
1174 +{
1175 + struct tdm_ctrl *tdm_c;
1176 + struct ucc_tdm_info *ut_info;
1177 + u32 ucc_num;
1178 +
1179 + tdm_c = dev_get_drvdata(&(ofdev->dev));
1180 + dev_set_drvdata(&(ofdev->dev), NULL);
1181 + ucc_num = tdm_c->ut_info->uf_info.ucc_num;
1182 + ut_info = &utdm_info[ucc_num];
1183 + tdm_stop(tdm_c);
1184 + tdm_deinit(tdm_c);
1185 +
1186 + ucc_fast_free(tdm_c->uf_private);
1187 +
1188 + dma_free_coherent(NULL, 2 * NR_BUFS * SAMPLE_DEPTH *
1189 + tdm_c->cfg_ctrl.active_num_ts,
1190 + tdm_c->tdm_input_data,
1191 + tdm_c->dma_input_addr);
1192 +
1193 + irq_dispose_mapping(tdm_c->ut_info->uf_info.irq);
1194 + iounmap(tdm_c->uf_regs);
1195 +
1196 + num_tdm_devices--;
1197 + kfree(tdm_c);
1198 +
1199 + ut_info->ucc_busy = 0;
1200 +
1201 + pr_info("%s UCC based tdm module uninstalled\n", __FUNCTION__);
1202 + return 0;
1203 +}
1204 +
1205 +const struct of_device_id ucc_tdm_match[] = {
1206 + { .type = "tdm", .compatible = "fsl,ucc-tdm", },
1207 + {},
1208 +};
1209 +
1210 +MODULE_DEVICE_TABLE(of, ucc_tdm_match);
1211 +
1212 +static struct of_platform_driver ucc_tdm_driver = {
1213 + .name = DRV_NAME,
1214 + .match_table = ucc_tdm_match,
1215 + .probe = ucc_tdm_probe,
1216 + .remove = ucc_tdm_remove,
1217 + .driver = {
1218 + .name = DRV_NAME,
1219 + .owner = THIS_MODULE,
1220 + },
1221 +};
1222 +
1223 +static int __init ucc_tdm_init(void)
1224 +{
1225 + u32 i;
1226 +
1227 + pr_info("ucc_tdm: " DRV_DESC "\n");
1228 + for (i = 0; i < 8; i++)
1229 + memcpy(&(utdm_info[i]), &utdm_primary_info,
1230 + sizeof(utdm_primary_info));
1231 +
1232 + return of_register_platform_driver(&ucc_tdm_driver);
1233 +}
1234 +
1235 +static void __exit ucc_tdm_exit(void)
1236 +{
1237 + of_unregister_platform_driver(&ucc_tdm_driver);
1238 +}
1239 +
1240 +module_init(ucc_tdm_init);
1241 +module_exit(ucc_tdm_exit);
1242 +MODULE_AUTHOR("Freescale Semiconductor, Inc");
1243 +MODULE_DESCRIPTION(DRV_DESC);
1244 +MODULE_LICENSE("GPL");
1245 --- a/drivers/misc/Makefile
1246 +++ b/drivers/misc/Makefile
1247 @@ -8,6 +8,7 @@ obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot
1248 obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
1249 obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
1250 obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
1251 +obj-$(CONFIG_UCC_TDM) += ucc_tdm.o
1252 obj-$(CONFIG_ICS932S401) += ics932s401.o
1253 obj-$(CONFIG_LKDTM) += lkdtm.o
1254 obj-$(CONFIG_TIFM_CORE) += tifm_core.o
1255 --- a/drivers/misc/Kconfig
1256 +++ b/drivers/misc/Kconfig
1257 @@ -164,6 +164,20 @@ config ATMEL_SSC
1258
1259 If unsure, say N.
1260
1261 +config UCC_TDM
1262 + tristate "Freescale UCC TDM Driver"
1263 + depends on QUICC_ENGINE && UCC_FAST
1264 + default n
1265 + help
1266 + The TDM driver is for UCC based TDM devices for example, TDM device on
1267 + MPC832x RDB. Select it to run PowerVoIP on MPC832x RDB board.
1268 + The TDM driver can interface with SLIC kind of devices to transmit
1269 + and receive TDM samples. The TDM driver receives Time Division
1270 + multiplexed samples(for different channels) from the SLIC device,
1271 + demutiplexes them and sends them to the upper layers. At the transmit
1272 + end the TDM drivers receives samples for different channels, it
1273 + multiplexes them and sends them to the SLIC device.
1274 +
1275 config ENCLOSURE_SERVICES
1276 tristate "Enclosure Services"
1277 default n
1278 --- a/arch/powerpc/include/asm/ucc_fast.h
1279 +++ b/arch/powerpc/include/asm/ucc_fast.h
1280 @@ -150,6 +150,10 @@ struct ucc_fast_info {
1281 enum ucc_fast_rx_decoding_method renc;
1282 enum ucc_fast_transparent_tcrc tcrc;
1283 enum ucc_fast_sync_len synl;
1284 + char *tdm_rx_clk;
1285 + char *tdm_tx_clk;
1286 + char *tdm_rx_sync;
1287 + char *tdm_tx_sync;
1288 };
1289
1290 struct ucc_fast_private {
1291 --- a/arch/powerpc/include/asm/qe.h
1292 +++ b/arch/powerpc/include/asm/qe.h
1293 @@ -669,6 +669,14 @@ struct ucc_slow_pram {
1294 #define UCC_GETH_UCCE_RXF1 0x00000002
1295 #define UCC_GETH_UCCE_RXF0 0x00000001
1296
1297 +/* Transparent UCC Event Register (UCCE) */
1298 +#define UCC_TRANS_UCCE_GRA 0x0080
1299 +#define UCC_TRANS_UCCE_TXE 0x0010
1300 +#define UCC_TRANS_UCCE_RXF 0x0008
1301 +#define UCC_TRANS_UCCE_BSY 0x0004
1302 +#define UCC_TRANS_UCCE_TXB 0x0002
1303 +#define UCC_TRANS_UCCE_RXB 0x0001
1304 +
1305 /* UCC Protocol Specific Mode Register (UPSMR), when used for UART */
1306 #define UCC_UART_UPSMR_FLC 0x8000
1307 #define UCC_UART_UPSMR_SL 0x4000
This page took 0.105865 seconds and 5 git commands to generate.