drivers: checkout serial drivers to LE.UM.3.2.3-43600-SA2150p

Signed-off-by: UtsavBalar1231 <utsavbalar1231@gmail.com>
This commit is contained in:
UtsavBalar1231
2020-07-24 14:23:56 +05:30
parent f9dcdee078
commit d09ba209fc
19 changed files with 1711 additions and 783 deletions

View File

@@ -29,6 +29,7 @@
#include <linux/dmaengine.h>
#include <linux/msm_gpi.h>
#include <soc/qcom/boot_stats.h>
#include <linux/slab.h>
#define SE_I2C_TX_TRANS_LEN (0x26C)
#define SE_I2C_RX_TRANS_LEN (0x270)
@@ -82,6 +83,8 @@
#define I2C_TIMEOUT_MIN_USEC 500000
#define MAX_SE 20
enum i2c_se_mode {
UNINITIALIZED,
FIFO_SE_DMA,
@@ -96,6 +99,16 @@ struct geni_i2c_clk_fld {
u8 t_cycle;
};
struct geni_i2c_ssr {
struct mutex ssr_lock;
bool is_ssr_down;
};
struct dbg_buf_ctxt {
void *virt_buf;
void *map_buf;
};
struct geni_i2c_dev {
struct device *dev;
void __iomem *base;
@@ -131,8 +144,17 @@ struct geni_i2c_dev {
enum i2c_se_mode se_mode;
bool cmd_done;
struct geni_i2c_clk_fld geni_i2c_clk_param;
struct geni_i2c_ssr i2c_ssr;
u32 dbg_num;
struct dbg_buf_ctxt *dbg_buf_ptr;
};
static void ssr_i2c_force_suspend(struct device *dev);
static void ssr_i2c_force_resume(struct device *dev);
static struct geni_i2c_dev *gi2c_dev_dbg[MAX_SE];
static int arr_idx;
struct geni_i2c_err_log {
int err;
const char *msg;
@@ -210,12 +232,6 @@ static inline void qcom_geni_i2c_calc_timeout(struct geni_i2c_dev *gi2c)
static void geni_i2c_err(struct geni_i2c_dev *gi2c, int err)
{
if (gi2c->cur)
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
"len:%d, slv-addr:0x%x, RD/WR:%d timeout:%u\n",
gi2c->cur->len, gi2c->cur->addr, gi2c->cur->flags,
gi2c->xfer_timeout);
if (err == I2C_NACK || err == GENI_ABORT_DONE) {
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev, "%s\n",
gi2c_log[err].msg);
@@ -224,8 +240,6 @@ static void geni_i2c_err(struct geni_i2c_dev *gi2c, int err)
GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev, "%s\n",
gi2c_log[err].msg);
}
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev, "%s: se-mode:%d\n", __func__,
gi2c->se_mode);
geni_se_dump_dbg_regs(&gi2c->i2c_rsc, gi2c->base, gi2c->ipcl);
err_ret:
gi2c->err = gi2c_log[err].err;
@@ -235,14 +249,30 @@ static irqreturn_t geni_i2c_irq(int irq, void *dev)
{
struct geni_i2c_dev *gi2c = dev;
int i, j;
u32 m_stat = readl_relaxed(gi2c->base + SE_GENI_M_IRQ_STATUS);
u32 rx_st = readl_relaxed(gi2c->base + SE_GENI_RX_FIFO_STATUS);
u32 dm_tx_st = readl_relaxed(gi2c->base + SE_DMA_TX_IRQ_STAT);
u32 dm_rx_st = readl_relaxed(gi2c->base + SE_DMA_RX_IRQ_STAT);
u32 dma = readl_relaxed(gi2c->base + SE_GENI_DMA_MODE_EN);
u32 m_stat, rx_st, dm_tx_st, dm_rx_st, dma;
struct i2c_msg *cur = gi2c->cur;
if (!cur || (m_stat & M_CMD_FAILURE_EN) ||
if (gi2c->i2c_ssr.is_ssr_down) {
gi2c->cmd_done = false;
complete(&gi2c->xfer);
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
"%s: SSR down\n", __func__);
return IRQ_HANDLED;
}
m_stat = readl_relaxed(gi2c->base + SE_GENI_M_IRQ_STATUS);
rx_st = readl_relaxed(gi2c->base + SE_GENI_RX_FIFO_STATUS);
dm_tx_st = readl_relaxed(gi2c->base + SE_DMA_TX_IRQ_STAT);
dm_rx_st = readl_relaxed(gi2c->base + SE_DMA_RX_IRQ_STAT);
dma = readl_relaxed(gi2c->base + SE_GENI_DMA_MODE_EN);
if (!cur) {
geni_se_dump_dbg_regs(&gi2c->i2c_rsc, gi2c->base, gi2c->ipcl);
GENI_SE_ERR(gi2c->ipcl, false, gi2c->dev, "Spurious irq\n");
goto irqret;
}
if ((m_stat & M_CMD_FAILURE_EN) ||
(dm_rx_st & (DM_I2C_CB_ERR)) ||
(m_stat & M_CMD_CANCEL_EN) ||
(m_stat & M_CMD_ABORT_EN)) {
@@ -269,12 +299,6 @@ static irqreturn_t geni_i2c_irq(int irq, void *dev)
goto irqret;
}
if (dma) {
dev_dbg(gi2c->dev, "i2c dma tx:0x%x, dma rx:0x%x\n", dm_tx_st,
dm_rx_st);
goto irqret;
}
if (((m_stat & M_RX_FIFO_WATERMARK_EN) ||
(m_stat & M_RX_FIFO_LAST_EN)) && (cur->flags & I2C_M_RD)) {
u32 rxcnt = rx_st & RX_FIFO_WC_MSK;
@@ -283,6 +307,9 @@ static irqreturn_t geni_i2c_irq(int irq, void *dev)
u32 temp;
int p;
if (gi2c->i2c_ssr.is_ssr_down)
break;
temp = readl_relaxed(gi2c->base + SE_GENI_RX_FIFOn);
for (i = gi2c->cur_rd, p = 0; (i < cur->len && p < 4);
i++, p++)
@@ -303,6 +330,10 @@ static irqreturn_t geni_i2c_irq(int irq, void *dev)
for (i = gi2c->cur_wr, p = 0; (i < cur->len && p < 4);
i++, p++)
temp |= (((u32)(cur->buf[i]) << (p * 8)));
if (gi2c->i2c_ssr.is_ssr_down)
break;
writel_relaxed(temp, gi2c->base + SE_GENI_TX_FIFOn);
gi2c->cur_wr = i;
dev_dbg(gi2c->dev, "FIFO i:%d,wrote 0x%x\n", i, temp);
@@ -315,6 +346,14 @@ static irqreturn_t geni_i2c_irq(int irq, void *dev)
}
}
irqret:
if (gi2c->i2c_ssr.is_ssr_down) {
gi2c->cmd_done = false;
complete(&gi2c->xfer);
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
"%s: SSR down\n", __func__);
return IRQ_HANDLED;
}
if (m_stat)
writel_relaxed(m_stat, gi2c->base + SE_GENI_M_IRQ_CLEAR);
@@ -452,6 +491,7 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
goto geni_i2c_gsi_xfer_out;
}
}
if (!gi2c->rx_c) {
gi2c->rx_c = dma_request_slave_channel(gi2c->dev, "rx");
if (!gi2c->rx_c) {
@@ -530,6 +570,8 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
sizeof(gi2c->go_t));
if (msgs[i].flags & I2C_M_RD) {
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
"msg[%d].len:%d R\n", i, gi2c->cur->len);
sg_init_table(&gi2c->rx_sg, 1);
ret = geni_se_iommu_map_buf(rx_dev, &gi2c->rx_ph,
msgs[i].buf, msgs[i].len,
@@ -540,6 +582,11 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
ret);
goto geni_i2c_gsi_xfer_out;
} else if (gi2c->dbg_buf_ptr) {
gi2c->dbg_buf_ptr[i].virt_buf =
(void *)msgs[i].buf;
gi2c->dbg_buf_ptr[i].map_buf =
(void *)&gi2c->rx_ph;
}
gi2c->rx_t.dword[0] =
MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(gi2c->rx_ph);
@@ -570,6 +617,8 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
rx_cookie = dmaengine_submit(gi2c->rx_desc);
dma_async_issue_pending(gi2c->rx_c);
} else {
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
"msg[%d].len:%d W\n", i, gi2c->cur->len);
ret = geni_se_iommu_map_buf(tx_dev, &gi2c->tx_ph,
msgs[i].buf, msgs[i].len,
DMA_TO_DEVICE);
@@ -579,7 +628,13 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
ret);
goto geni_i2c_gsi_xfer_out;
} else if (gi2c->dbg_buf_ptr) {
gi2c->dbg_buf_ptr[i].virt_buf =
(void *)msgs[i].buf;
gi2c->dbg_buf_ptr[i].map_buf =
(void *)&gi2c->tx_ph;
}
gi2c->tx_t.dword[0] =
MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(gi2c->tx_ph);
gi2c->tx_t.dword[1] =
@@ -615,8 +670,11 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
if (!timeout) {
GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
"GSI Txn timed out: %u len: %d\n",
gi2c->xfer_timeout, gi2c->cur->len);
"I2C gsi xfer timeout:%u flags:%d addr:0x%x\n",
gi2c->xfer_timeout, gi2c->cur->flags,
gi2c->cur->addr);
geni_se_dump_dbg_regs(&gi2c->i2c_rsc, gi2c->base,
gi2c->ipcl);
gi2c->err = -ETIMEDOUT;
}
geni_i2c_err_prep_sg:
@@ -650,8 +708,21 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
int i, ret = 0, timeout = 0;
gi2c->err = 0;
gi2c->cur = &msgs[0];
reinit_completion(&gi2c->xfer);
mutex_lock(&gi2c->i2c_ssr.ssr_lock);
if (gi2c->i2c_ssr.is_ssr_down) {
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
"%s: SSR Down\n", __func__);
mutex_unlock(&gi2c->i2c_ssr.ssr_lock);
return -EINVAL;
}
/* Client to respect system suspend */
if (!pm_runtime_enabled(gi2c->dev)) {
GENI_SE_ERR(gi2c->ipcl, false, gi2c->dev,
"%s: System suspended\n", __func__);
return -EACCES;
}
ret = pm_runtime_get_sync(gi2c->dev);
if (ret < 0) {
GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
@@ -659,16 +730,26 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
pm_runtime_put_noidle(gi2c->dev);
/* Set device in suspended since resume failed */
pm_runtime_set_suspended(gi2c->dev);
mutex_unlock(&gi2c->i2c_ssr.ssr_lock);
return ret;
}
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
"n:%d addr:0x%x\n", num, msgs[0].addr);
gi2c->dbg_num = num;
kfree(gi2c->dbg_buf_ptr);
gi2c->dbg_buf_ptr =
kcalloc(num, sizeof(struct dbg_buf_ctxt), GFP_KERNEL);
if (!gi2c->dbg_buf_ptr)
GENI_SE_ERR(gi2c->ipcl, false, gi2c->dev,
"Buf logging pointer not available\n");
if (gi2c->se_mode == GSI_ONLY) {
ret = geni_i2c_gsi_xfer(adap, msgs, num);
goto geni_i2c_txn_ret;
}
qcom_geni_i2c_conf(gi2c, 0);
dev_dbg(gi2c->dev, "i2c xfer:num:%d, msgs:len:%d,flg:%d\n",
num, msgs[0].len, msgs[0].flags);
for (i = 0; i < num; i++) {
int stretch = (i < (num - 1));
u32 m_param = 0;
@@ -691,9 +772,8 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
break;
}
if (msgs[i].flags & I2C_M_RD) {
dev_dbg(gi2c->dev,
"READ,n:%d,i:%d len:%d, stretch:%d\n",
num, i, msgs[i].len, stretch);
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
"msgs[%d].len:%d R\n", i, msgs[i].len);
geni_write_reg(msgs[i].len,
gi2c->base, SE_I2C_RX_TRANS_LEN);
m_cmd = I2C_READ;
@@ -706,12 +786,16 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
mode = FIFO_MODE;
ret = geni_se_select_mode(gi2c->base,
mode);
} else if (gi2c->dbg_buf_ptr) {
gi2c->dbg_buf_ptr[i].virt_buf =
(void *)msgs[i].buf;
gi2c->dbg_buf_ptr[i].map_buf =
(void *)&rx_dma;
}
}
} else {
dev_dbg(gi2c->dev,
"WRITE:n:%d,i:%d len:%d, stretch:%d, m_param:0x%x\n",
num, i, msgs[i].len, stretch, m_param);
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
"msgs[%d].len:%d W\n", i, msgs[i].len);
geni_write_reg(msgs[i].len, gi2c->base,
SE_I2C_TX_TRANS_LEN);
m_cmd = I2C_WRITE;
@@ -724,6 +808,11 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
mode = FIFO_MODE;
ret = geni_se_select_mode(gi2c->base,
mode);
} else if (gi2c->dbg_buf_ptr) {
gi2c->dbg_buf_ptr[i].virt_buf =
(void *)msgs[i].buf;
gi2c->dbg_buf_ptr[i].map_buf =
(void *)&tx_dma;
}
}
if (mode == FIFO_MODE) /* Get FIFO IRQ */
@@ -732,18 +821,40 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
}
/* Ensure FIFO write go through before waiting for Done evet */
mb();
mutex_unlock(&gi2c->i2c_ssr.ssr_lock);
timeout = wait_for_completion_timeout(&gi2c->xfer,
gi2c->xfer_timeout);
if (!timeout)
mutex_lock(&gi2c->i2c_ssr.ssr_lock);
if (gi2c->i2c_ssr.is_ssr_down) {
ret = -EINVAL;
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
"%s: SSR Down\n", __func__);
goto geni_i2c_txn_ret;
}
if (!timeout) {
GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
"I2C xfer timeout: %d\n", gi2c->xfer_timeout);
geni_i2c_err(gi2c, GENI_TIMEOUT);
}
if (gi2c->err) {
reinit_completion(&gi2c->xfer);
gi2c->cur = NULL;
geni_cancel_m_cmd(gi2c->base);
mutex_unlock(&gi2c->i2c_ssr.ssr_lock);
timeout = wait_for_completion_timeout(&gi2c->xfer, HZ);
if (!timeout)
mutex_lock(&gi2c->i2c_ssr.ssr_lock);
if (gi2c->i2c_ssr.is_ssr_down) {
ret = -EINVAL;
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
"%s: SSR Down\n", __func__);
goto geni_i2c_txn_ret;
}
if (!timeout) {
GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
"Abort\n");
geni_abort_m_cmd(gi2c->base);
}
}
gi2c->cur_wr = 0;
@@ -764,9 +875,11 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
geni_se_tx_dma_unprep(gi2c->wrapper_dev, tx_dma,
msgs[i].len);
}
ret = gi2c->err;
if (gi2c->err) {
dev_err(gi2c->dev, "i2c error :%d\n", gi2c->err);
GENI_SE_ERR(gi2c->ipcl, true, gi2c->dev,
"i2c error :%d\n", gi2c->err);
break;
}
}
@@ -776,9 +889,13 @@ geni_i2c_txn_ret:
pm_runtime_mark_last_busy(gi2c->dev);
pm_runtime_put_autosuspend(gi2c->dev);
gi2c->cur_wr = 0;
gi2c->cur_rd = 0;
gi2c->cur = NULL;
gi2c->err = 0;
dev_dbg(gi2c->dev, "i2c txn ret:%d\n", ret);
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
"i2c txn ret:%d\n", ret);
mutex_unlock(&gi2c->i2c_ssr.ssr_lock);
return ret;
}
@@ -806,6 +923,10 @@ static int geni_i2c_probe(struct platform_device *pdev)
if (!gi2c)
return -ENOMEM;
if (arr_idx++ < MAX_SE)
/* Debug purpose */
gi2c_dev_dbg[arr_idx] = gi2c;
gi2c->dev = &pdev->dev;
snprintf(boot_marker, sizeof(boot_marker),
"M - DRIVER GENI_I2C Init");
@@ -845,14 +966,12 @@ static int geni_i2c_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
return ret;
}
gi2c->i2c_rsc.m_ahb_clk = devm_clk_get(&pdev->dev, "m-ahb");
if (IS_ERR(gi2c->i2c_rsc.m_ahb_clk)) {
ret = PTR_ERR(gi2c->i2c_rsc.m_ahb_clk);
dev_err(&pdev->dev, "Err getting M AHB clk %d\n", ret);
return ret;
}
gi2c->i2c_rsc.s_ahb_clk = devm_clk_get(&pdev->dev, "s-ahb");
if (IS_ERR(gi2c->i2c_rsc.s_ahb_clk)) {
ret = PTR_ERR(gi2c->i2c_rsc.s_ahb_clk);
@@ -937,6 +1056,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
gi2c->irq, ret);
return ret;
}
disable_irq(gi2c->irq);
i2c_set_adapdata(&gi2c->adap, gi2c);
gi2c->adap.dev.parent = gi2c->dev;
@@ -948,7 +1068,14 @@ static int geni_i2c_probe(struct platform_device *pdev)
pm_runtime_set_autosuspend_delay(gi2c->dev, I2C_AUTO_SUSPEND_DELAY);
pm_runtime_use_autosuspend(gi2c->dev);
pm_runtime_enable(gi2c->dev);
i2c_add_adapter(&gi2c->adap);
gi2c->i2c_rsc.rsc_ssr.force_suspend = ssr_i2c_force_suspend;
gi2c->i2c_rsc.rsc_ssr.force_resume = ssr_i2c_force_resume;
mutex_init(&gi2c->i2c_ssr.ssr_lock);
ret = i2c_add_adapter(&gi2c->adap);
if (ret) {
dev_err(gi2c->dev, "Add adapter failed\n");
return ret;
}
snprintf(boot_marker, sizeof(boot_marker),
"M - DRIVER GENI_I2C_%d Ready", gi2c->adap.nr);
@@ -970,6 +1097,9 @@ static int geni_i2c_remove(struct platform_device *pdev)
static int geni_i2c_resume_noirq(struct device *device)
{
struct geni_i2c_dev *gi2c = dev_get_drvdata(device);
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev, "%s\n", __func__);
return 0;
}
@@ -987,15 +1117,21 @@ static int geni_i2c_hib_resume_noirq(struct device *device)
static int geni_i2c_runtime_suspend(struct device *dev)
{
struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
int ret = 0;
if (gi2c->se_mode == FIFO_SE_DMA) {
disable_irq(gi2c->irq);
se_geni_resources_off(&gi2c->i2c_rsc);
ret = se_geni_resources_off(&gi2c->i2c_rsc);
} else {
/* GPIO is set to sleep state already. So just clocks off */
se_geni_clks_off(&gi2c->i2c_rsc);
ret = se_geni_clks_off(&gi2c->i2c_rsc);
}
return 0;
if (ret)
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
"%s failed ret:%d\n", __func__, ret);
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev, "%s\n", __func__);
return ret;
}
static int geni_i2c_runtime_resume(struct device *dev)
@@ -1034,23 +1170,27 @@ static int geni_i2c_runtime_resume(struct device *dev)
gi2c->se_mode = GSI_ONLY;
geni_se_select_mode(gi2c->base, GSI_DMA);
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
"i2c in GSI ONLY mode\n");
"i2c GSI mode\n");
} else {
int gi2c_tx_depth = get_tx_fifo_depth(gi2c->base);
gi2c->se_mode = FIFO_SE_DMA;
gi2c->tx_wm = gi2c_tx_depth - 1;
geni_se_init(gi2c->base, gi2c->tx_wm, gi2c_tx_depth);
se_config_packing(gi2c->base, 8, 4, true);
qcom_geni_i2c_conf(gi2c, 0);
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
"i2c fifo/se-dma mode. fifo depth:%d\n",
gi2c_tx_depth);
}
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev, "i2c-%d: %s\n",
gi2c->adap.nr, dev_name(gi2c->dev));
}
if (gi2c->se_mode == FIFO_SE_DMA)
enable_irq(gi2c->irq);
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev, "%s\n", __func__);
return 0;
}
@@ -1067,6 +1207,8 @@ static int geni_i2c_suspend_noirq(struct device *device)
return -EBUSY;
}
if (!pm_runtime_status_suspended(device)) {
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev,
"%s\n", __func__);
geni_i2c_runtime_suspend(device);
pm_runtime_disable(device);
pm_runtime_set_suspended(device);
@@ -1102,6 +1244,39 @@ static const struct dev_pm_ops geni_i2c_pm_ops = {
.thaw = geni_i2c_hib_resume_noirq,
};
static void ssr_i2c_force_suspend(struct device *dev)
{
struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
int ret = 0;
mutex_lock(&gi2c->i2c_ssr.ssr_lock);
gi2c->i2c_ssr.is_ssr_down = true;
if (!pm_runtime_status_suspended(gi2c->dev)) {
ret = geni_i2c_runtime_suspend(gi2c->dev);
if (ret) {
dev_err(gi2c->dev, "%s failed ret:%d\n", __func__, ret);
} else {
pm_runtime_disable(gi2c->dev);
pm_runtime_set_suspended(gi2c->dev);
pm_runtime_enable(gi2c->dev);
}
}
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev, "%s done\n", __func__);
mutex_unlock(&gi2c->i2c_ssr.ssr_lock);
}
static void ssr_i2c_force_resume(struct device *dev)
{
struct geni_i2c_dev *gi2c = dev_get_drvdata(dev);
mutex_lock(&gi2c->i2c_ssr.ssr_lock);
gi2c->i2c_ssr.is_ssr_down = false;
gi2c->se_mode = UNINITIALIZED;
GENI_SE_DBG(gi2c->ipcl, false, gi2c->dev, "%s done\n", __func__);
mutex_unlock(&gi2c->i2c_ssr.ssr_lock);
}
static const struct of_device_id geni_i2c_dt_match[] = {
{ .compatible = "qcom,i2c-geni" },
{}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -159,7 +159,7 @@ static int geni_se_iommu_map_and_attach(struct geni_se_device *geni_se_dev);
*/
unsigned int geni_read_reg_nolog(void __iomem *base, int offset)
{
return readl_relaxed(base + offset);
return readl_relaxed_no_log(base + offset);
}
EXPORT_SYMBOL(geni_read_reg_nolog);
@@ -171,7 +171,7 @@ EXPORT_SYMBOL(geni_read_reg_nolog);
*/
void geni_write_reg_nolog(unsigned int value, void __iomem *base, int offset)
{
return writel_relaxed(value, (base + offset));
return writel_relaxed_no_log(value, (base + offset));
}
EXPORT_SYMBOL(geni_write_reg_nolog);
@@ -458,11 +458,7 @@ static int geni_se_ssr_notify_block(struct notifier_block *n,
"SSR notification before power down\n");
break;
case SUBSYS_AFTER_POWERUP:
if (dev->ssr.probe_completed)
geni_se_ssc_qup_up(dev);
else
dev->ssr.probe_completed = true;
geni_se_ssc_qup_up(dev);
GENI_SE_DBG(dev->log_ctx, false, NULL,
"SSR notification after power up\n");
break;
@@ -1233,7 +1229,7 @@ int geni_se_resources_init(struct se_geni_rsc *rsc,
INIT_LIST_HEAD(&rsc->ab_list);
INIT_LIST_HEAD(&rsc->ib_list);
if (geni_se_dev->ssr.subsys_name && rsc->rsc_ssr.ssr_enable) {
if (geni_se_dev->ssr.subsys_name) {
INIT_LIST_HEAD(&rsc->rsc_ssr.active_list);
list_add(&rsc->rsc_ssr.active_list,
&geni_se_dev->ssr.active_list_head);
@@ -1722,16 +1718,6 @@ int geni_se_iommu_free_buf(struct device *wrapper_dev, dma_addr_t *iova,
}
EXPORT_SYMBOL(geni_se_iommu_free_buf);
struct device *geni_get_iommu_dev(struct device *wrapper_dev)
{
struct geni_se_device *geni_se_dev;
geni_se_dev = dev_get_drvdata(wrapper_dev);
return geni_se_dev->cb_dev;
}
EXPORT_SYMBOL(geni_get_iommu_dev);
/**
* geni_se_dump_dbg_regs() - Print relevant registers that capture most
* accurately the state of an SE.
@@ -2013,7 +1999,7 @@ static int geni_se_probe(struct platform_device *pdev)
geni_se_dev->log_ctx = ipc_log_context_create(NUM_LOG_PAGES,
dev_name(geni_se_dev->dev), 0);
if (!geni_se_dev->log_ctx)
dev_dbg(dev, "%s Failed to allocate log context\n", __func__);
dev_err(dev, "%s Failed to allocate log context\n", __func__);
dev_set_drvdata(dev, geni_se_dev);
ret = of_platform_populate(dev->of_node, geni_se_dt_match, NULL, dev);
@@ -2045,19 +2031,18 @@ static int geni_se_probe(struct platform_device *pdev)
}
INIT_LIST_HEAD(&geni_se_dev->ssr.active_list_head);
geni_se_dev->ssr.probe_completed = false;
ret = geni_se_ssc_qup_ssr_reg(geni_se_dev);
if (ret) {
dev_err(dev, "Unable to register SSR notification\n");
return ret;
}
sysfs_create_file(&geni_se_dev->dev->kobj,
ret = sysfs_create_file(&geni_se_dev->dev->kobj,
&dev_attr_ssc_qup_state.attr);
if (ret)
dev_err(dev, "Unable to create sysfs file\n");
}
device_enable_async_suspend(&pdev->dev);
GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
"%s: Probe successful\n", __func__);
return 0;

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/ipc_logging.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -103,6 +104,10 @@
#define MAX_TX_SG (3)
#define NUM_SPI_XFER (8)
#define SPI_ERROR_BITS (M_CMD_OVERRUN_EN | M_ILLEGAL_CMD_EN | \
M_RX_FIFO_RD_ERR_EN | M_RX_FIFO_WR_ERR_EN | \
M_TX_FIFO_RD_ERR_EN | M_TX_FIFO_WR_ERR_EN)
struct gsi_desc_cb {
struct spi_master *spi;
struct spi_transfer *xfer;
@@ -173,8 +178,8 @@ struct spi_geni_master {
};
static void spi_slv_setup(struct spi_geni_master *mas);
static int ssr_spi_force_suspend(struct device *dev);
static int ssr_spi_force_resume(struct device *dev);
static void ssr_spi_force_suspend(struct device *dev);
static void ssr_spi_force_resume(struct device *dev);
static ssize_t show_slave_state(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -278,6 +283,7 @@ static int get_spi_clk_cfg(u32 speed_hz, struct spi_geni_master *mas,
static void spi_setup_word_len(struct spi_geni_master *mas, u32 mode,
int bits_per_word)
{
struct spi_master *spi = get_spi_master(mas->dev);
int pack_words = 1;
bool msb_first = (mode & SPI_LSB_FIRST) ? false : true;
u32 word_len = geni_read_reg(mas->base, SE_SPI_WORD_LEN);
@@ -289,10 +295,18 @@ static void spi_setup_word_len(struct spi_geni_master *mas, u32 mode,
*/
if (!(mas->tx_fifo_width % bits_per_word))
pack_words = mas->tx_fifo_width / bits_per_word;
word_len &= ~WORD_LEN_MSK;
word_len |= ((bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK);
se_config_packing(mas->base, bits_per_word, pack_words, msb_first);
geni_write_reg(word_len, mas->base, SE_SPI_WORD_LEN);
/*
* Don't configure SPI word length for SPI Slave it is an optional
* property for SPI Slave. H/W will take care of SPI slave word
* if SPI_SLAVE_EN bit is set.
*/
if (!spi->slave) {
word_len &= ~WORD_LEN_MSK;
word_len |= ((bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK);
geni_write_reg(word_len, mas->base, SE_SPI_WORD_LEN);
}
se_get_packing_config(bits_per_word, pack_words, msb_first,
&cfg0, &cfg1);
GENI_SE_DBG(mas->ipc, false, mas->dev,
@@ -328,9 +342,15 @@ static int setup_fifo_params(struct spi_device *spi_slv,
if (mode & SPI_CPOL)
cpol |= CPOL;
if (!spi->slave) {
if (mode & SPI_CPHA)
cpha |= CPHA;
if (mode & SPI_CPHA)
cpha |= CPHA;
/* SPI slave supports only mode 1, log unsuppoted mode and exit */
if (spi->slave && !(cpol == 0 && cpha == 1)) {
GENI_SE_DBG(mas->ipc, false, mas->dev,
"%s: Unsupported SPI Slave mode cpol %d cpha %d\n",
__func__, cpol, cpha);
return -EINVAL;
}
if (spi_slv->mode & SPI_CS_HIGH)
@@ -1042,10 +1062,11 @@ static int spi_geni_unprepare_transfer_hardware(struct spi_master *spi)
if (count < 0)
GENI_SE_ERR(mas->ipc, false, NULL,
"suspend usage count mismatch:%d", count);
} else {
} else if (!pm_runtime_suspended(mas->dev)) {
pm_runtime_mark_last_busy(mas->dev);
pm_runtime_put_autosuspend(mas->dev);
}
return 0;
}
@@ -1184,16 +1205,22 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
mb();
}
static void handle_fifo_timeout(struct spi_geni_master *mas,
static void handle_fifo_timeout(struct spi_master *spi,
struct spi_transfer *xfer)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
unsigned long timeout;
geni_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
reinit_completion(&mas->xfer_done);
geni_cancel_m_cmd(mas->base);
if (mas->cur_xfer_mode == FIFO_MODE)
geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
if (spi->slave)
goto dma_unprep;
reinit_completion(&mas->xfer_done);
geni_cancel_m_cmd(mas->base);
/* Ensure cmd cancel is written */
mb();
timeout = wait_for_completion_timeout(&mas->xfer_done, HZ);
@@ -1208,6 +1235,7 @@ static void handle_fifo_timeout(struct spi_geni_master *mas,
dev_err(mas->dev,
"Failed to cancel/abort m_cmd\n");
}
dma_unprep:
if (mas->cur_xfer_mode == SE_DMA) {
if (xfer->tx_buf) {
reinit_completion(&mas->xfer_done);
@@ -1234,6 +1262,8 @@ static void handle_fifo_timeout(struct spi_geni_master *mas,
xfer->rx_dma, xfer->len);
}
}
if (spi->slave && !mas->dis_autosuspend)
pm_runtime_put_sync_suspend(mas->dev);
}
@@ -1250,18 +1280,18 @@ static int spi_geni_transfer_one(struct spi_master *spi,
return -EINVAL;
}
mutex_lock(&mas->spi_ssr.ssr_lock);
if (mas->spi_ssr.is_ssr_down || !mas->spi_ssr.xfer_prepared) {
mutex_unlock(&mas->spi_ssr.ssr_lock);
return -EINVAL;
}
/* Check for zero length transfer */
if (xfer->len < 1) {
dev_err(mas->dev, "Zero length transfer\n");
return -EINVAL;
}
mutex_lock(&mas->spi_ssr.ssr_lock);
if (mas->spi_ssr.is_ssr_down || !mas->spi_ssr.xfer_prepared) {
mutex_unlock(&mas->spi_ssr.ssr_lock);
return -EINVAL;
}
if (mas->cur_xfer_mode != GSI_DMA) {
reinit_completion(&mas->xfer_done);
setup_fifo_xfer(xfer, mas, slv->mode, spi);
@@ -1348,10 +1378,7 @@ err_gsi_geni_transfer_one:
mutex_unlock(&mas->spi_ssr.ssr_lock);
return ret;
err_fifo_geni_transfer_one:
if (!spi->slave)
handle_fifo_timeout(mas, xfer);
if (spi->slave)
geni_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
handle_fifo_timeout(spi, xfer);
err_ssr_transfer_one:
mutex_unlock(&mas->spi_ssr.ssr_lock);
return ret;
@@ -1485,6 +1512,14 @@ static irqreturn_t geni_spi_irq(int irq, void *data)
}
m_irq = geni_read_reg(mas->base, SE_GENI_M_IRQ_STATUS);
if (SPI_ERROR_BITS & m_irq) {
dev_err_ratelimited(mas->dev, "%s: Error m_irq status:0x%x\n",
__func__, m_irq);
GENI_SE_ERR(mas->ipc, false, mas->dev,
"%s: Error m_irq status:0x%x\n", __func__, m_irq);
goto exit_geni_spi_irq;
}
if (mas->cur_xfer_mode == FIFO_MODE) {
if ((m_irq & M_RX_FIFO_WATERMARK_EN) ||
(m_irq & M_RX_FIFO_LAST_EN))
@@ -1593,8 +1628,6 @@ static int spi_geni_probe(struct platform_device *pdev)
}
geni_mas->wrapper_dev = &wrapper_pdev->dev;
geni_mas->spi_rsc.wrapper_dev = &wrapper_pdev->dev;
rsc->rsc_ssr.ssr_enable = of_property_read_bool(pdev->dev.of_node,
"ssr-enable");
ret = geni_se_resources_init(rsc, SPI_CORE2X_VOTE,
(DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH));
if (ret) {
@@ -1704,6 +1737,7 @@ static int spi_geni_probe(struct platform_device *pdev)
ret = geni_mas->irq;
goto spi_geni_probe_unmap;
}
irq_set_status_flags(geni_mas->irq, IRQ_NOAUTOEN);
ret = devm_request_irq(&pdev->dev, geni_mas->irq, geni_spi_irq,
IRQF_TRIGGER_HIGH, "spi_geni", geni_mas);
if (ret) {
@@ -1749,7 +1783,7 @@ static int spi_geni_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to register SPI master\n");
goto spi_geni_probe_unmap;
}
sysfs_create_file(&(geni_mas->dev->kobj),
ret = sysfs_create_file(&(geni_mas->dev->kobj),
&dev_attr_spi_slave_state.attr);
snprintf(boot_marker, sizeof(boot_marker),
"M - DRIVER GENI_SPI_%d Ready", spi->bus_num);
@@ -1783,6 +1817,7 @@ static int spi_geni_runtime_suspend(struct device *dev)
struct spi_master *spi = get_spi_master(dev);
struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
disable_irq(geni_mas->irq);
if (geni_mas->shared_se) {
ret = se_geni_clks_off(&geni_mas->spi_rsc);
if (ret)
@@ -1814,6 +1849,7 @@ static int spi_geni_runtime_resume(struct device *dev)
} else {
ret = se_geni_resources_on(&geni_mas->spi_rsc);
}
enable_irq(geni_mas->irq);
return ret;
}
@@ -1870,7 +1906,7 @@ static int spi_geni_suspend(struct device *dev)
}
#endif
static int ssr_spi_force_suspend(struct device *dev)
static void ssr_spi_force_suspend(struct device *dev)
{
struct spi_master *spi = get_spi_master(dev);
struct spi_geni_master *mas = spi_master_get_devdata(spi);
@@ -1878,7 +1914,6 @@ static int ssr_spi_force_suspend(struct device *dev)
mutex_lock(&mas->spi_ssr.ssr_lock);
mas->spi_ssr.xfer_prepared = false;
disable_irq(mas->irq);
mas->spi_ssr.is_ssr_down = true;
complete(&mas->xfer_done);
@@ -1895,22 +1930,17 @@ static int ssr_spi_force_suspend(struct device *dev)
GENI_SE_DBG(mas->ipc, false, mas->dev, "force suspend done\n");
mutex_unlock(&mas->spi_ssr.ssr_lock);
return ret;
}
static int ssr_spi_force_resume(struct device *dev)
static void ssr_spi_force_resume(struct device *dev)
{
struct spi_master *spi = get_spi_master(dev);
struct spi_geni_master *mas = spi_master_get_devdata(spi);
mutex_lock(&mas->spi_ssr.ssr_lock);
mas->spi_ssr.is_ssr_down = false;
enable_irq(mas->irq);
GENI_SE_DBG(mas->ipc, false, mas->dev, "force resume done\n");
mutex_unlock(&mas->spi_ssr.ssr_lock);
return 0;
}
static const struct dev_pm_ops spi_geni_pm_ops = {

View File

@@ -256,6 +256,7 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
port.port.line = rc;
port.port.irq = irq_of_parse_and_map(np, 0);
port.port.irqflags = IRQF_SHARED;
port.port.iotype = UPIO_MEM;
port.port.type = PORT_16550A;
port.port.uartclk = clk;

View File

@@ -181,7 +181,7 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
struct hlist_head *h;
struct hlist_node *n;
struct irq_info *i;
int ret;
int ret, irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0;
mutex_lock(&hash_mutex);
@@ -216,8 +216,9 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
INIT_LIST_HEAD(&up->list);
i->head = &up->list;
spin_unlock_irq(&i->lock);
irq_flags |= up->port.irqflags;
ret = request_irq(up->port.irq, serial8250_interrupt,
up->port.irqflags, up->port.name, i);
irq_flags, up->port.name, i);
if (ret < 0)
serial_do_unlink(i, up);
}

View File

@@ -27,14 +27,6 @@
#include "8250.h"
#define PCI_DEVICE_ID_ACCES_COM_2S 0x1052
#define PCI_DEVICE_ID_ACCES_COM_4S 0x105d
#define PCI_DEVICE_ID_ACCES_COM_8S 0x106c
#define PCI_DEVICE_ID_ACCES_COM232_8 0x10a8
#define PCI_DEVICE_ID_ACCES_COM_2SM 0x10d2
#define PCI_DEVICE_ID_ACCES_COM_4SM 0x10db
#define PCI_DEVICE_ID_ACCES_COM_8SM 0x10ea
#define PCI_DEVICE_ID_COMMTECH_4224PCI335 0x0002
#define PCI_DEVICE_ID_COMMTECH_4222PCI335 0x0004
#define PCI_DEVICE_ID_COMMTECH_2324PCI335 0x000a
@@ -570,22 +562,6 @@ static int __maybe_unused exar_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(exar_pci_pm, exar_suspend, exar_resume);
static const struct exar8250_board acces_com_2x = {
.num_ports = 2,
.setup = pci_xr17c154_setup,
};
static const struct exar8250_board acces_com_4x = {
.num_ports = 4,
.setup = pci_xr17c154_setup,
};
static const struct exar8250_board acces_com_8x = {
.num_ports = 8,
.setup = pci_xr17c154_setup,
};
static const struct exar8250_board pbn_fastcom335_2 = {
.num_ports = 2,
.setup = pci_fastcom335_setup,
@@ -656,15 +632,6 @@ static const struct exar8250_board pbn_exar_XR17V8358 = {
}
static const struct pci_device_id exar_pci_tbl[] = {
EXAR_DEVICE(ACCESSIO, ACCES_COM_2S, acces_com_2x),
EXAR_DEVICE(ACCESSIO, ACCES_COM_4S, acces_com_4x),
EXAR_DEVICE(ACCESSIO, ACCES_COM_8S, acces_com_8x),
EXAR_DEVICE(ACCESSIO, ACCES_COM232_8, acces_com_8x),
EXAR_DEVICE(ACCESSIO, ACCES_COM_2SM, acces_com_2x),
EXAR_DEVICE(ACCESSIO, ACCES_COM_4SM, acces_com_4x),
EXAR_DEVICE(ACCESSIO, ACCES_COM_8SM, acces_com_8x),
CONNECT_DEVICE(XR17C152, UART_2_232, pbn_connect),
CONNECT_DEVICE(XR17C154, UART_4_232, pbn_connect),
CONNECT_DEVICE(XR17C158, UART_8_232, pbn_connect),

View File

@@ -2258,10 +2258,6 @@ int serial8250_do_startup(struct uart_port *port)
}
}
/* Check if we need to have shared IRQs */
if (port->irq && (up->port.flags & UPF_SHARE_IRQ))
up->port.irqflags |= IRQF_SHARED;
if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
unsigned char iir1;
/*

View File

@@ -2605,7 +2605,6 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
uap->port.fifosize = uap->fifosize;
uap->port.flags = UPF_BOOT_AUTOCONF;
uap->port.line = index;
spin_lock_init(&uap->port.lock);
amba_ports[index] = uap;

View File

@@ -289,10 +289,6 @@ static void ar933x_uart_set_termios(struct uart_port *port,
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
AR933X_UART_CS_HOST_INT_EN);
/* enable RX and TX ready overide */
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
/* reenable the UART */
ar933x_uart_rmw(up, AR933X_UART_CS_REG,
AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S,
@@ -425,10 +421,6 @@ static int ar933x_uart_startup(struct uart_port *port)
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
AR933X_UART_CS_HOST_INT_EN);
/* enable RX and TX ready overide */
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
/* Enable RX interrupts */
up->ier = AR933X_UART_INT_RX_VALID;
ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);

View File

@@ -498,8 +498,7 @@ static void atmel_stop_tx(struct uart_port *port)
atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
if (atmel_uart_is_half_duplex(port))
if (!atomic_read(&atmel_port->tasklet_shutdown))
atmel_start_rx(port);
atmel_start_rx(port);
}

View File

@@ -80,7 +80,7 @@
#define UCR1_IDEN (1<<12) /* Idle condition interrupt */
#define UCR1_ICD_REG(x) (((x) & 3) << 10) /* idle condition detect */
#define UCR1_RRDYEN (1<<9) /* Recv ready interrupt enable */
#define UCR1_RXDMAEN (1<<8) /* Recv ready DMA enable */
#define UCR1_RDMAEN (1<<8) /* Recv ready DMA enable */
#define UCR1_IREN (1<<7) /* Infrared interface enable */
#define UCR1_TXMPTYEN (1<<6) /* Transimitter empty interrupt enable */
#define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */
@@ -352,30 +352,6 @@ static void imx_port_rts_auto(struct imx_port *sport, unsigned long *ucr2)
*ucr2 |= UCR2_CTSC;
}
/*
* interrupts disabled on entry
*/
static void imx_start_rx(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
unsigned int ucr1, ucr2;
ucr1 = readl(port->membase + UCR1);
ucr2 = readl(port->membase + UCR2);
ucr2 |= UCR2_RXEN;
if (sport->dma_is_enabled) {
ucr1 |= UCR1_RXDMAEN | UCR1_ATDMAEN;
} else {
ucr1 |= UCR1_RRDYEN;
}
/* Write UCR2 first as it includes RXEN */
writel(ucr2, port->membase + UCR2);
writel(ucr1, port->membase + UCR1);
}
/*
* interrupts disabled on entry
*/
@@ -402,10 +378,9 @@ static void imx_stop_tx(struct uart_port *port)
imx_port_rts_active(sport, &temp);
else
imx_port_rts_inactive(sport, &temp);
temp |= UCR2_RXEN;
writel(temp, port->membase + UCR2);
imx_start_rx(port);
temp = readl(port->membase + UCR4);
temp &= ~UCR4_TCEN;
writel(temp, port->membase + UCR4);
@@ -418,7 +393,7 @@ static void imx_stop_tx(struct uart_port *port)
static void imx_stop_rx(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
unsigned long ucr1, ucr2;
unsigned long temp;
if (sport->dma_is_enabled && sport->dma_is_rxing) {
if (sport->port.suspended) {
@@ -429,18 +404,12 @@ static void imx_stop_rx(struct uart_port *port)
}
}
ucr1 = readl(sport->port.membase + UCR1);
ucr2 = readl(sport->port.membase + UCR2);
temp = readl(sport->port.membase + UCR2);
writel(temp & ~UCR2_RXEN, sport->port.membase + UCR2);
if (sport->dma_is_enabled) {
ucr1 &= ~(UCR1_RXDMAEN | UCR1_ATDMAEN);
} else {
ucr1 &= ~UCR1_RRDYEN;
}
writel(ucr1, port->membase + UCR1);
ucr2 &= ~UCR2_RXEN;
writel(ucr2, port->membase + UCR2);
/* disable the `Receiver Ready Interrrupt` */
temp = readl(sport->port.membase + UCR1);
writel(temp & ~UCR1_RRDYEN, sport->port.membase + UCR1);
}
/*
@@ -538,11 +507,6 @@ static void dma_tx_callback(void *data)
if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port))
imx_dma_tx(sport);
else if (sport->port.rs485.flags & SER_RS485_ENABLED) {
temp = readl(sport->port.membase + UCR4);
temp |= UCR4_TCEN;
writel(temp, sport->port.membase + UCR4);
}
spin_unlock_irqrestore(&sport->port.lock, flags);
}
@@ -560,13 +524,9 @@ static void imx_dma_tx(struct imx_port *sport)
if (sport->dma_is_txing)
return;
temp = readl(sport->port.membase + UCR4);
temp &= ~UCR4_TCEN;
writel(temp, sport->port.membase + UCR4);
sport->tx_bytes = uart_circ_chars_pending(xmit);
if (xmit->tail < xmit->head || xmit->head == 0) {
if (xmit->tail < xmit->head) {
sport->dma_tx_nents = 1;
sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
} else {
@@ -621,20 +581,14 @@ static void imx_start_tx(struct uart_port *port)
imx_port_rts_active(sport, &temp);
else
imx_port_rts_inactive(sport, &temp);
if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
temp &= ~UCR2_RXEN;
writel(temp, port->membase + UCR2);
if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
imx_stop_rx(port);
/*
* Enable transmitter and shifter empty irq only if DMA is off.
* In the DMA case this is done in the tx-callback.
*/
if (!sport->dma_is_enabled) {
temp = readl(port->membase + UCR4);
temp |= UCR4_TCEN;
writel(temp, port->membase + UCR4);
}
/* enable transmitter and shifter empty irq */
temp = readl(port->membase + UCR4);
temp |= UCR4_TCEN;
writel(temp, port->membase + UCR4);
}
if (!sport->dma_is_enabled) {
@@ -857,42 +811,14 @@ static void imx_mctrl_check(struct imx_port *sport)
static irqreturn_t imx_int(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
unsigned int usr1, usr2, ucr1, ucr2, ucr3, ucr4;
unsigned int sts;
unsigned int sts2;
irqreturn_t ret = IRQ_NONE;
usr1 = readl(sport->port.membase + USR1);
usr2 = readl(sport->port.membase + USR2);
ucr1 = readl(sport->port.membase + UCR1);
ucr2 = readl(sport->port.membase + UCR2);
ucr3 = readl(sport->port.membase + UCR3);
ucr4 = readl(sport->port.membase + UCR4);
sts = readl(sport->port.membase + USR1);
sts2 = readl(sport->port.membase + USR2);
/*
* Even if a condition is true that can trigger an irq only handle it if
* the respective irq source is enabled. This prevents some undesired
* actions, for example if a character that sits in the RX FIFO and that
* should be fetched via DMA is tried to be fetched using PIO. Or the
* receiver is currently off and so reading from URXD0 results in an
* exception. So just mask the (raw) status bits for disabled irqs.
*/
if ((ucr1 & UCR1_RRDYEN) == 0)
usr1 &= ~USR1_RRDY;
if ((ucr2 & UCR2_ATEN) == 0)
usr1 &= ~USR1_AGTIM;
if ((ucr1 & UCR1_TXMPTYEN) == 0)
usr1 &= ~USR1_TRDY;
if ((ucr4 & UCR4_TCEN) == 0)
usr2 &= ~USR2_TXDC;
if ((ucr3 & UCR3_DTRDEN) == 0)
usr1 &= ~USR1_DTRD;
if ((ucr1 & UCR1_RTSDEN) == 0)
usr1 &= ~USR1_RTSD;
if ((ucr3 & UCR3_AWAKEN) == 0)
usr1 &= ~USR1_AWAKE;
if ((ucr4 & UCR4_OREN) == 0)
usr2 &= ~USR2_ORE;
if (usr1 & (USR1_RRDY | USR1_AGTIM)) {
if (sts & (USR1_RRDY | USR1_AGTIM)) {
if (sport->dma_is_enabled)
imx_dma_rxint(sport);
else
@@ -900,15 +826,18 @@ static irqreturn_t imx_int(int irq, void *dev_id)
ret = IRQ_HANDLED;
}
if ((usr1 & USR1_TRDY) || (usr2 & USR2_TXDC)) {
if ((sts & USR1_TRDY &&
readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN) ||
(sts2 & USR2_TXDC &&
readl(sport->port.membase + UCR4) & UCR4_TCEN)) {
imx_txint(irq, dev_id);
ret = IRQ_HANDLED;
}
if (usr1 & USR1_DTRD) {
if (sts & USR1_DTRD) {
unsigned long flags;
if (usr1 & USR1_DTRD)
if (sts & USR1_DTRD)
writel(USR1_DTRD, sport->port.membase + USR1);
spin_lock_irqsave(&sport->port.lock, flags);
@@ -918,17 +847,17 @@ static irqreturn_t imx_int(int irq, void *dev_id)
ret = IRQ_HANDLED;
}
if (usr1 & USR1_RTSD) {
if (sts & USR1_RTSD) {
imx_rtsint(irq, dev_id);
ret = IRQ_HANDLED;
}
if (usr1 & USR1_AWAKE) {
if (sts & USR1_AWAKE) {
writel(USR1_AWAKE, sport->port.membase + USR1);
ret = IRQ_HANDLED;
}
if (usr2 & USR2_ORE) {
if (sts2 & USR2_ORE) {
sport->port.icount.overrun++;
writel(USR2_ORE, sport->port.membase + USR2);
ret = IRQ_HANDLED;
@@ -1277,7 +1206,7 @@ static void imx_enable_dma(struct imx_port *sport)
/* set UCR1 */
temp = readl(sport->port.membase + UCR1);
temp |= UCR1_RXDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN;
temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN;
writel(temp, sport->port.membase + UCR1);
temp = readl(sport->port.membase + UCR2);
@@ -1295,7 +1224,7 @@ static void imx_disable_dma(struct imx_port *sport)
/* clear UCR1 */
temp = readl(sport->port.membase + UCR1);
temp &= ~(UCR1_RXDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN);
temp &= ~(UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN);
writel(temp, sport->port.membase + UCR1);
/* clear UCR2 */
@@ -1360,9 +1289,11 @@ static int imx_startup(struct uart_port *port)
writel(USR1_RTSD | USR1_DTRD, sport->port.membase + USR1);
writel(USR2_ORE, sport->port.membase + USR2);
if (sport->dma_is_inited && !sport->dma_is_enabled)
imx_enable_dma(sport);
temp = readl(sport->port.membase + UCR1);
temp &= ~UCR1_RRDYEN;
temp |= UCR1_UARTEN;
temp |= UCR1_RRDYEN | UCR1_UARTEN;
if (sport->have_rtscts)
temp |= UCR1_RTSDEN;
@@ -1401,13 +1332,14 @@ static int imx_startup(struct uart_port *port)
*/
imx_enable_ms(&sport->port);
if (sport->dma_is_inited) {
imx_enable_dma(sport);
/*
* Start RX DMA immediately instead of waiting for RX FIFO interrupts.
* In our iMX53 the average delay for the first reception dropped from
* approximately 35000 microseconds to 1000 microseconds.
*/
if (sport->dma_is_enabled) {
imx_disable_rx_int(sport);
start_rx_dma(sport);
} else {
temp = readl(sport->port.membase + UCR1);
temp |= UCR1_RRDYEN;
writel(temp, sport->port.membase + UCR1);
}
spin_unlock_irqrestore(&sport->port.lock, flags);
@@ -1454,8 +1386,7 @@ static void imx_shutdown(struct uart_port *port)
spin_lock_irqsave(&sport->port.lock, flags);
temp = readl(sport->port.membase + UCR1);
temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN |
UCR1_RXDMAEN | UCR1_ATDMAEN);
temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN);
writel(temp, sport->port.membase + UCR1);
spin_unlock_irqrestore(&sport->port.lock, flags);
@@ -1728,7 +1659,7 @@ static int imx_poll_init(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
unsigned long flags;
unsigned long ucr1, ucr2;
unsigned long temp;
int retval;
retval = clk_prepare_enable(sport->clk_ipg);
@@ -1742,29 +1673,16 @@ static int imx_poll_init(struct uart_port *port)
spin_lock_irqsave(&sport->port.lock, flags);
/*
* Be careful about the order of enabling bits here. First enable the
* receiver (UARTEN + RXEN) and only then the corresponding irqs.
* This prevents that a character that already sits in the RX fifo is
* triggering an irq but the try to fetch it from there results in an
* exception because UARTEN or RXEN is still off.
*/
ucr1 = readl(port->membase + UCR1);
ucr2 = readl(port->membase + UCR2);
temp = readl(sport->port.membase + UCR1);
if (is_imx1_uart(sport))
ucr1 |= IMX1_UCR1_UARTCLKEN;
temp |= IMX1_UCR1_UARTCLKEN;
temp |= UCR1_UARTEN | UCR1_RRDYEN;
temp &= ~(UCR1_TXMPTYEN | UCR1_RTSDEN);
writel(temp, sport->port.membase + UCR1);
ucr1 |= UCR1_UARTEN;
ucr1 &= ~(UCR1_TXMPTYEN | UCR1_RTSDEN | UCR1_RRDYEN);
ucr2 |= UCR2_RXEN;
writel(ucr1, sport->port.membase + UCR1);
writel(ucr2, sport->port.membase + UCR2);
/* now enable irqs */
writel(ucr1 | UCR1_RRDYEN, sport->port.membase + UCR1);
temp = readl(sport->port.membase + UCR2);
temp |= UCR2_RXEN;
writel(temp, sport->port.membase + UCR2);
spin_unlock_irqrestore(&sport->port.lock, flags);
@@ -1824,8 +1742,11 @@ static int imx_rs485_config(struct uart_port *port,
/* Make sure Rx is enabled in case Tx is active with Rx disabled */
if (!(rs485conf->flags & SER_RS485_ENABLED) ||
rs485conf->flags & SER_RS485_RX_DURING_TX)
imx_start_rx(port);
rs485conf->flags & SER_RS485_RX_DURING_TX) {
temp = readl(sport->port.membase + UCR2);
temp |= UCR2_RXEN;
writel(temp, sport->port.membase + UCR2);
}
port->rs485 = *rs485conf;

File diff suppressed because it is too large Load Diff

View File

@@ -391,10 +391,14 @@ no_rx:
static inline void msm_wait_for_xmitr(struct uart_port *port)
{
unsigned int timeout = 500000;
while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) {
if (msm_read(port, UART_ISR) & UART_ISR_TX_READY)
break;
udelay(1);
if (!timeout--)
break;
}
msm_write(port, UART_CR_CMD_RESET_TX_READY, UART_CR);
}
@@ -868,6 +872,7 @@ static void msm_handle_tx(struct uart_port *port)
struct circ_buf *xmit = &msm_port->uart.state->xmit;
struct msm_dma *dma = &msm_port->tx_dma;
unsigned int pio_count, dma_count, dma_min;
char buf[4] = { 0 };
void __iomem *tf;
int err = 0;
@@ -877,10 +882,12 @@ static void msm_handle_tx(struct uart_port *port)
else
tf = port->membase + UART_TF;
buf[0] = port->x_char;
if (msm_port->is_uartdm)
msm_reset_dm_count(port, 1);
iowrite8_rep(tf, &port->x_char, 1);
iowrite32_rep(tf, buf, 1);
port->icount.tx++;
port->x_char = 0;
return;
@@ -981,6 +988,7 @@ static unsigned int msm_get_mctrl(struct uart_port *port)
static void msm_reset(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
unsigned int mr;
/* reset everything */
msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
@@ -988,7 +996,10 @@ static void msm_reset(struct uart_port *port)
msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
mr = msm_read(port, UART_MR1);
mr &= ~UART_MR1_RX_RDY_CTL;
msm_write(port, mr, UART_MR1);
/* Disable DM modes */
if (msm_port->is_uartdm)
@@ -1591,6 +1602,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
int num_newlines = 0;
bool replaced = false;
void __iomem *tf;
int locked = 1;
if (is_uartdm)
tf = port->membase + UARTDM_TF;
@@ -1603,7 +1615,13 @@ static void __msm_console_write(struct uart_port *port, const char *s,
num_newlines++;
count += num_newlines;
spin_lock(&port->lock);
if (port->sysrq)
locked = 0;
else if (oops_in_progress)
locked = spin_trylock(&port->lock);
else
spin_lock(&port->lock);
if (is_uartdm)
msm_reset_dm_count(port, count);
@@ -1639,7 +1657,9 @@ static void __msm_console_write(struct uart_port *port, const char *s,
iowrite32_rep(tf, buf, 1);
i += num_chars;
}
spin_unlock(&port->lock);
if (locked)
spin_unlock(&port->lock);
}
#ifdef CONFIG_SERIAL_RX_CONSOLE_ONLY

View File

@@ -61,7 +61,6 @@
#include <linux/ipc_logging.h>
#include <asm/irq.h>
#include <linux/kthread.h>
#include <uapi/linux/sched.h>
#include <linux/msm-sps.h>
#include <linux/platform_data/msm_serial_hs.h>
@@ -89,13 +88,26 @@ enum {
DBG_LEV = 4U,
};
#define MSM_HS_DBG(x...) ((void)0)
#define MSM_HS_DBG(x...) do { \
if (msm_uport->ipc_debug_mask >= DBG_LEV) { \
if (msm_uport->ipc_msm_hs_log_ctxt) \
ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
} \
} while (0)
#define MSM_HS_INFO(x...) ((void)0)
#define MSM_HS_INFO(x...) do { \
if (msm_uport->ipc_debug_mask >= INFO_LEV) {\
if (msm_uport->ipc_msm_hs_log_ctxt) \
ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
} \
} while (0)
/* warnings and errors show up on console always */
#define MSM_HS_WARN(x...) do { \
pr_warn(x); \
if (msm_uport->ipc_msm_hs_log_ctxt && \
msm_uport->ipc_debug_mask >= WARN_LEV) \
ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
} while (0)
/* ERROR condition in the driver sets the hs_serial_debug_mask
@@ -104,9 +116,17 @@ enum {
*/
#define MSM_HS_ERR(x...) do { \
pr_err(x); \
if (msm_uport->ipc_msm_hs_log_ctxt && \
msm_uport->ipc_debug_mask >= ERR_LEV) { \
ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
msm_uport->ipc_debug_mask = FATAL_LEV; \
} \
} while (0)
#define LOG_USR_MSG(ctx, x...) ((void)0)
#define LOG_USR_MSG(ctx, x...) do { \
if (ctx) \
ipc_log_string(ctx, x); \
} while (0)
/*
* There are 3 different kind of UART Core available on MSM.
@@ -2739,7 +2759,6 @@ static int uartdm_init_port(struct uart_port *uport)
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
struct msm_hs_tx *tx = &msm_uport->tx;
struct msm_hs_rx *rx = &msm_uport->rx;
struct sched_param param = { .sched_priority = 1 };
init_waitqueue_head(&rx->wait);
init_waitqueue_head(&tx->wait);
@@ -2754,18 +2773,15 @@ static int uartdm_init_port(struct uart_port *uport)
MSM_HS_ERR("%s(): error creating task\n", __func__);
goto exit_lh_init;
}
sched_setscheduler(rx->task, SCHED_FIFO, &param);
kthread_init_work(&rx->kwork, msm_serial_hs_rx_work);
kthread_init_worker(&tx->kworker);
tx->task = kthread_run(kthread_worker_fn,
&tx->kworker, "msm_serial_hs_%d_tx_work", uport->line);
if (IS_ERR(tx->task)) {
if (IS_ERR(rx->task)) {
MSM_HS_ERR("%s(): error creating task\n", __func__);
goto exit_lh_init;
}
sched_setscheduler(tx->task, SCHED_FIFO, &param);
kthread_init_work(&tx->kwork, msm_serial_hs_tx_work);
@@ -3285,7 +3301,6 @@ static void msm_serial_hs_rt_init(struct uart_port *uport)
msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
mutex_unlock(&msm_uport->mtx);
pm_runtime_enable(uport->dev);
tty_port_set_policy(&uport->state->port, SCHED_FIFO, 1);
}
static int msm_hs_runtime_suspend(struct device *dev)

View File

@@ -581,7 +581,7 @@ static int mvebu_uart_probe(struct platform_device *pdev)
port->membase = devm_ioremap_resource(&pdev->dev, reg);
if (IS_ERR(port->membase))
return PTR_ERR(port->membase);
return -PTR_ERR(port->membase);
data = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart_data),
GFP_KERNEL);

View File

@@ -1524,12 +1524,10 @@ static int __init sc16is7xx_init(void)
#endif
return ret;
#ifdef CONFIG_SERIAL_SC16IS7XX_SPI
err_spi:
#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
i2c_del_driver(&sc16is7xx_i2c_uart_driver);
#endif
#endif
err_i2c:
uart_unregister_driver(&sc16is7xx_uart);
return ret;

View File

@@ -841,16 +841,9 @@ static void sci_receive_chars(struct uart_port *port)
tty_insert_flip_char(tport, c, TTY_NORMAL);
} else {
for (i = 0; i < count; i++) {
char c;
char c = serial_port_in(port, SCxRDR);
if (port->type == PORT_SCIF ||
port->type == PORT_HSCIF) {
status = serial_port_in(port, SCxSR);
c = serial_port_in(port, SCxRDR);
} else {
c = serial_port_in(port, SCxRDR);
status = serial_port_in(port, SCxSR);
}
status = serial_port_in(port, SCxSR);
if (uart_handle_sysrq_char(port, c)) {
count--; i--;
continue;

View File

@@ -31,7 +31,6 @@
#include <linux/of.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/iopoll.h>
#define CDNS_UART_TTY_NAME "ttyPS"
#define CDNS_UART_NAME "xuartps"
@@ -40,7 +39,6 @@
#define CDNS_UART_NR_PORTS 2
#define CDNS_UART_FIFO_SIZE 64 /* FIFO size */
#define CDNS_UART_REGISTER_SPACE 0x1000
#define TX_TIMEOUT 500000
/* Rx Trigger level */
static int rx_trigger_level = 56;
@@ -687,21 +685,18 @@ static void cdns_uart_set_termios(struct uart_port *port,
unsigned int cval = 0;
unsigned int baud, minbaud, maxbaud;
unsigned long flags;
unsigned int ctrl_reg, mode_reg, val;
int err;
unsigned int ctrl_reg, mode_reg;
spin_lock_irqsave(&port->lock, flags);
/* Wait for the transmit FIFO to empty before making changes */
if (!(readl(port->membase + CDNS_UART_CR) &
CDNS_UART_CR_TX_DIS)) {
err = readl_poll_timeout(port->membase + CDNS_UART_SR,
val, (val & CDNS_UART_SR_TXEMPTY),
1000, TX_TIMEOUT);
if (err) {
dev_err(port->dev, "timed out waiting for tx empty");
return;
while (!(readl(port->membase + CDNS_UART_SR) &
CDNS_UART_SR_TXEMPTY)) {
cpu_relax();
}
}
spin_lock_irqsave(&port->lock, flags);
/* Disable the TX and RX to set baud rate */
ctrl_reg = readl(port->membase + CDNS_UART_CR);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -50,14 +50,12 @@ struct ssc_qup_nb {
/**
* struct ssc_qup_ssr GENI Serial Engine SSC qup SSR Structure.
* @probe_completed To ignore up notification during probe.
* @is_ssr_down To check SE status.
* @subsys_name Subsystem name for ssr registration.
* @active_list_head List Head of all client in SSC QUPv3.
*/
struct ssc_qup_ssr {
struct ssc_qup_nb ssc_qup_nb;
bool probe_completed;
bool is_ssr_down;
const char *subsys_name;
struct list_head active_list_head;
@@ -68,13 +66,11 @@ struct ssc_qup_ssr {
* @active_list List of SSC qup SE clients.
* @force_suspend Function pointer for Subsystem shutdown case.
* @force_resume Function pointer for Subsystem restart case.
* @ssr_enable To check SSC Qup SSR enable status.
*/
struct se_rsc_ssr {
struct list_head active_list;
int (*force_suspend)(struct device *ctrl_dev);
int (*force_resume)(struct device *ctrl_dev);
bool ssr_enable;
void (*force_suspend)(struct device *ctrl_dev);
void (*force_resume)(struct device *ctrl_dev);
};
/**
@@ -181,8 +177,8 @@ struct se_geni_rsc {
/* GENI_OUTPUT_CTRL fields */
#define DEFAULT_IO_OUTPUT_CTRL_MSK (GENMASK(6, 0))
#define GENI_IO_MUX_0_EN BIT(1)
#define GENI_IO_MUX_1_EN BIT(2)
#define GENI_IO_MUX_0_EN BIT(0)
#define GENI_IO_MUX_1_EN BIT(1)
/* GENI_CFG_REG80 fields */
#define IO1_SEL_TX BIT(2)
@@ -386,6 +382,7 @@ struct se_geni_rsc {
#define TX_EOT (BIT(1))
#define TX_SBE (BIT(2))
#define TX_RESET_DONE (BIT(3))
#define TX_GENI_CANCEL_IRQ (BIT(14))
/* SE_DMA_RX_IRQ_STAT Register fields */
#define RX_DMA_DONE (BIT(0))
@@ -394,9 +391,20 @@ struct se_geni_rsc {
#define RX_RESET_DONE (BIT(3))
#define RX_FLUSH_DONE (BIT(4))
#define RX_GENI_GP_IRQ (GENMASK(10, 5))
#define RX_GENI_CANCEL_IRQ (BIT(11))
/*
* QUPs which have HW version <=1.2 11th bit of
* DMA_RX_IRQ_STAT register denotes RX_GENI_CANCEL_IRQ bit.
*/
#define RX_GENI_CANCEL_IRQ(n) (((n.hw_major_ver <= 1) &&\
(n.hw_minor_ver <= 2)) ? BIT(11) : BIT(14))
#define RX_GENI_GP_IRQ_EXT (GENMASK(13, 12))
/* DMA DEBUG Register fields */
#define DMA_TX_ACTIVE (BIT(0))
#define DMA_RX_ACTIVE (BIT(1))
#define DMA_TX_STATE (GENMASK(7, 4))
#define DMA_RX_STATE (GENMASK(11, 8))
#define DEFAULT_BUS_WIDTH (4)
#define DEFAULT_SE_CLK (19200000)
@@ -740,16 +748,6 @@ int geni_se_tx_dma_prep(struct device *wrapper_dev, void __iomem *base,
*/
void geni_se_rx_dma_start(void __iomem *base, int rx_len, dma_addr_t *rx_dma);
/**
* geni_get_iommu_dev() - Returns IOMMU device attached to QUP wrapper.
* @wrapper_dev Pointer to QUP wrapper device.
*
* This functions returns IOMMU device attached to QUP wrapper node.
*
* Return Pointer to IOMMU dev.
*/
struct device *geni_get_iommu_dev(struct device *wrapper_dev);
/**
* geni_se_rx_dma_prep() - Prepare the Serial Engine for RX DMA transfer
* @wrapper_dev: QUPv3 Wrapper Device to which the TX buffer is mapped.
@@ -1070,9 +1068,5 @@ static void geni_se_rx_dma_start(void __iomem *base, int rx_len,
{
}
struct device *geni_get_iommu_dev(struct device *wrapper_dev)
{
}
#endif
#endif