From b2c72692ffdd6e39da5f2cad75ab88db925745ca Mon Sep 17 00:00:00 2001 From: Avaneesh Kumar Dwivedi Date: Mon, 24 Apr 2017 21:51:42 +0530 Subject: [PATCH 001/786] soc: qcom: Clear the memory before freeing it up It is a case of write after free, this is causing page allocation failure due to corruption. This is due to freeing up of segments allocated for venus subsystem, when venus fw loading fail midway. Change-Id: I0019a05b1d1336dcf361264607597430e5f1625a Signed-off-by: Avaneesh Kumar Dwivedi --- drivers/soc/qcom/peripheral-loader.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c index 1f28712dc1ea..ea6b40b95635 100644 --- a/drivers/soc/qcom/peripheral-loader.c +++ b/drivers/soc/qcom/peripheral-loader.c @@ -925,13 +925,13 @@ int pil_boot(struct pil_desc *desc) priv->region_start), VMID_HLOS); } + if (desc->clear_fw_region && priv->region_start) + pil_clear_segment(desc); dma_free_attrs(desc->dev, priv->region_size, priv->region, priv->region_start, desc->attrs); priv->region = NULL; } - if (desc->clear_fw_region && priv->region_start) - pil_clear_segment(desc); pil_release_mmap(desc); } return ret; -- GitLab From 0fd10f0eb2e81e3c34bcbe9d3ab2e77eafa49b10 Mon Sep 17 00:00:00 2001 From: David Collins Date: Mon, 15 May 2017 16:19:55 -0700 Subject: [PATCH 002/786] ARM: dts: msm: reduce VDD_APC0 CPR voltage adjustments for SDM845 Reduce the VDD_APC0 power cluster and L3 cache per-fuse-corner CPR open-loop and closed-loop voltage adjustments based upon characterization results. Change-Id: Ib215b453d3aff52a90192211af87a3fc6f5ce4b3 Signed-off-by: David Collins --- .../arm64/boot/dts/qcom/sdm845-regulator.dtsi | 112 +++++++++++++++++- 1 file changed, 108 insertions(+), 4 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi index 19b8744401f0..5d756717ea5d 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi @@ -191,10 +191,62 @@ 1950 2632>; qcom,cpr-open-loop-voltage-fuse-adjustment = - <100000 100000 100000 100000>; + /* Speed bin 0 */ + <100000 100000 100000 100000>, + < 0 0 0 100000>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + /* Speed bin 1 */ + <100000 100000 100000 100000>, + < 0 0 0 100000>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + /* Speed bin 2 */ + <100000 100000 100000 100000>, + < 0 0 0 100000>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>; qcom,cpr-closed-loop-voltage-fuse-adjustment = - <100000 100000 100000 100000>; + /* Speed bin 0 */ + <100000 100000 100000 100000>, + < 0 0 0 100000>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + /* Speed bin 1 */ + <100000 100000 100000 100000>, + < 0 0 0 100000>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + /* Speed bin 2 */ + <100000 100000 100000 100000>, + < 0 0 0 100000>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>, + < 0 0 0 0>; qcom,allow-voltage-interpolation; qcom,allow-quotient-interpolation; @@ -322,10 +374,62 @@ 2501 2095>; qcom,cpr-open-loop-voltage-fuse-adjustment = - <100000 100000 100000 100000>; + /* Speed bin 0 */ + <100000 100000 100000 100000>, + < 0 24000 4000 100000>, + < 0 24000 4000 0>, + < 0 24000 4000 0>, + < 0 24000 4000 0>, + < 0 24000 4000 0>, + < 0 24000 4000 0>, + < 0 24000 4000 0>, + /* Speed bin 1 */ + <100000 100000 100000 100000>, + < 0 24000 4000 100000>, + < 0 24000 4000 20000>, + < 0 24000 4000 20000>, + < 0 24000 4000 20000>, + < 0 24000 4000 20000>, + < 0 24000 4000 20000>, + < 0 24000 4000 20000>, + /* Speed bin 2 */ + <100000 100000 100000 100000>, + < 0 24000 4000 100000>, + < 0 24000 4000 40000>, + < 0 24000 4000 40000>, + < 0 24000 4000 40000>, + < 0 24000 4000 40000>, + < 0 24000 4000 40000>, + < 0 24000 4000 40000>; qcom,cpr-closed-loop-voltage-fuse-adjustment = - <100000 100000 100000 100000>; + /* Speed bin 0 */ + <100000 100000 100000 100000>, + < 0 29000 6000 100000>, + < 0 29000 6000 0>, + < 0 29000 6000 0>, + < 0 29000 6000 0>, + < 0 29000 6000 0>, + < 0 29000 6000 0>, + < 0 29000 6000 0>, + /* Speed bin 1 */ + <100000 100000 100000 100000>, + < 0 29000 6000 100000>, + < 0 29000 6000 20000>, + < 0 29000 6000 20000>, + < 0 29000 6000 20000>, + < 0 29000 6000 20000>, + < 0 29000 6000 20000>, + < 0 29000 6000 20000>, + /* Speed bin 2 */ + <100000 100000 100000 100000>, + < 0 29000 6000 100000>, + < 0 29000 6000 40000>, + < 0 29000 6000 40000>, + < 0 29000 6000 40000>, + < 0 29000 6000 40000>, + < 0 29000 6000 40000>, + < 0 29000 6000 40000>; qcom,allow-voltage-interpolation; qcom,allow-quotient-interpolation; -- GitLab From 8d5cc6ed87ef500ea69ddc38f350a5e1052fd1fe Mon Sep 17 00:00:00 2001 From: Rajesh Kemisetti Date: Tue, 6 Jun 2017 16:44:17 +0530 Subject: [PATCH 003/786] msm: kgsl: Add support for A615 GPU Add new GPU ID and required initial settings to support Graphics functionality on SDM670. Change-Id: I275a8bdb4ed7e5310b27765b74c5d6a930711a2d Signed-off-by: Rajesh Kemisetti --- drivers/gpu/msm/adreno-gpulist.h | 17 +++++++++++++++++ drivers/gpu/msm/adreno.h | 2 ++ drivers/gpu/msm/adreno_a6xx.c | 3 ++- 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h index 9a44f3410772..b48a96c65442 100644 --- a/drivers/gpu/msm/adreno-gpulist.h +++ b/drivers/gpu/msm/adreno-gpulist.h @@ -339,4 +339,21 @@ static const struct adreno_gpu_core adreno_gpulist[] = { .gpmu_tsens = 0x000C000D, .max_power = 5448, }, + { + .gpurev = ADRENO_REV_A615, + .core = 6, + .major = 1, + .minor = 5, + .patchid = ANY_ID, + .features = ADRENO_64BIT | ADRENO_RPMH, + .sqefw_name = "a630_sqe.fw", + .zap_name = "a615_zap", + .gpudev = &adreno_a6xx_gpudev, + .gmem_size = SZ_512K, + .num_protected_regs = 0x20, + .busy_mask = 0xFFFFFFFE, + .gpmufw_name = "a630_gmu.bin", + .gpmu_major = 0x0, + .gpmu_minor = 0x005, + }, }; diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index 26c55058dce3..86a2c9fb0f75 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -195,6 +195,7 @@ enum adreno_gpurev { ADRENO_REV_A512 = 512, ADRENO_REV_A530 = 530, ADRENO_REV_A540 = 540, + ADRENO_REV_A615 = 615, ADRENO_REV_A630 = 630, }; @@ -1140,6 +1141,7 @@ static inline int adreno_is_a6xx(struct adreno_device *adreno_dev) ADRENO_GPUREV(adreno_dev) < 700; } +ADRENO_TARGET(a615, ADRENO_REV_A615) ADRENO_TARGET(a630, ADRENO_REV_A630) static inline int adreno_is_a630v1(struct adreno_device *adreno_dev) diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c index 314b2d8f576b..ced17de1d8c9 100644 --- a/drivers/gpu/msm/adreno_a6xx.c +++ b/drivers/gpu/msm/adreno_a6xx.c @@ -175,7 +175,8 @@ static const struct { const struct kgsl_hwcg_reg *regs; unsigned int count; } a6xx_hwcg_registers[] = { - {adreno_is_a630, a630_hwcg_regs, ARRAY_SIZE(a630_hwcg_regs)} + {adreno_is_a630, a630_hwcg_regs, ARRAY_SIZE(a630_hwcg_regs)}, + {adreno_is_a615, a630_hwcg_regs, ARRAY_SIZE(a630_hwcg_regs)}, }; static struct a6xx_protected_regs { -- GitLab From 3815c79d459a0769c9eda5baef6dc82491943463 Mon Sep 17 00:00:00 2001 From: Rupesh Tatiya Date: Fri, 17 Feb 2017 12:58:01 +0530 Subject: [PATCH 004/786] Bluetooth: Enhance logging in btfm slim & audio codec drivers Add dai name & slim port numbers for better logs for debugging. Remove function name from pr_debug as it can be enabled by dynamic debugging. Change-Id: If9c300e1fe22680e98dd29aadfd2bf3b8c2b5624 Signed-off-by: Rupesh Tatiya --- drivers/bluetooth/btfm_slim.c | 2 +- drivers/bluetooth/btfm_slim.h | 2 +- drivers/bluetooth/btfm_slim_codec.c | 16 ++++++++-------- drivers/bluetooth/btfm_slim_wcn3990.c | 2 +- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/bluetooth/btfm_slim.c b/drivers/bluetooth/btfm_slim.c index dc9bb0b906be..f50bf6fce832 100644 --- a/drivers/bluetooth/btfm_slim.c +++ b/drivers/bluetooth/btfm_slim.c @@ -127,7 +127,7 @@ int btfm_slim_enable_ch(struct btfmslim *btfmslim, struct btfmslim_ch *ch, if (!btfmslim || !ch) return -EINVAL; - BTFMSLIM_DBG("port:%d", ch->port); + BTFMSLIM_DBG("port: %d ch: %d", ch->port, ch->ch); /* Define the channel with below parameters */ prop.prot = SLIM_AUTO_ISO; diff --git a/drivers/bluetooth/btfm_slim.h b/drivers/bluetooth/btfm_slim.h index 00d46a5671d9..161be782826d 100644 --- a/drivers/bluetooth/btfm_slim.h +++ b/drivers/bluetooth/btfm_slim.h @@ -13,7 +13,7 @@ #define BTFM_SLIM_H #include -#define BTFMSLIM_DBG(fmt, arg...) pr_debug("%s: " fmt "\n", __func__, ## arg) +#define BTFMSLIM_DBG(fmt, arg...) pr_debug(fmt "\n", ## arg) #define BTFMSLIM_INFO(fmt, arg...) pr_info("%s: " fmt "\n", __func__, ## arg) #define BTFMSLIM_ERR(fmt, arg...) pr_err("%s: " fmt "\n", __func__, ## arg) diff --git a/drivers/bluetooth/btfm_slim_codec.c b/drivers/bluetooth/btfm_slim_codec.c index 86760cd55a76..73a789cd43af 100644 --- a/drivers/bluetooth/btfm_slim_codec.c +++ b/drivers/bluetooth/btfm_slim_codec.c @@ -54,8 +54,8 @@ static int btfm_slim_dai_startup(struct snd_pcm_substream *substream, int ret; struct btfmslim *btfmslim = dai->dev->platform_data; - BTFMSLIM_DBG("substream = %s stream = %d", - substream->name, substream->stream); + BTFMSLIM_DBG("substream = %s stream = %d dai name = %s", + substream->name, substream->stream, dai->name); ret = btfm_slim_hw_init(btfmslim); return ret; } @@ -65,8 +65,8 @@ static void btfm_slim_dai_shutdown(struct snd_pcm_substream *substream, { struct btfmslim *btfmslim = dai->dev->platform_data; - BTFMSLIM_DBG("substream = %s stream = %d", - substream->name, substream->stream); + BTFMSLIM_DBG("substream = %s stream = %d dai name = %s", + substream->name, substream->stream, dai->name); btfm_slim_hw_deinit(btfmslim); } @@ -74,7 +74,7 @@ static int btfm_slim_dai_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { - BTFMSLIM_DBG("dai_name = %s DAI-ID %x rate %d num_ch %d", + BTFMSLIM_DBG("dai name = %s DAI-ID %x rate %d num_ch %d", dai->name, dai->id, params_rate(params), params_channels(params)); @@ -89,7 +89,7 @@ int btfm_slim_dai_prepare(struct snd_pcm_substream *substream, struct btfmslim_ch *ch; uint8_t rxport, grp = false, nchan = 1; - BTFMSLIM_DBG("dai->name:%s, dai->id: %d, dai->rate: %d", dai->name, + BTFMSLIM_DBG("dai name: %s, dai->id: %d, dai->rate: %d", dai->name, dai->id, dai->rate); switch (dai->id) { @@ -137,7 +137,7 @@ int btfm_slim_dai_hw_free(struct snd_pcm_substream *substream, struct btfmslim_ch *ch; uint8_t rxport, grp = false, nchan = 1; - BTFMSLIM_DBG("dai->name:%s, dai->id: %d, dai->rate: %d", dai->name, + BTFMSLIM_DBG("dai name: %s, dai->id: %d, dai->rate: %d", dai->name, dai->id, dai->rate); switch (dai->id) { @@ -387,7 +387,7 @@ static struct snd_soc_dai_driver btfmslim_dai[] = { static struct snd_soc_codec_driver btfmslim_codec = { .probe = btfm_slim_codec_probe, .remove = btfm_slim_codec_remove, - .read = btfm_slim_codec_read, + .read = btfm_slim_codec_read, .write = btfm_slim_codec_write, }; diff --git a/drivers/bluetooth/btfm_slim_wcn3990.c b/drivers/bluetooth/btfm_slim_wcn3990.c index c2d5b7b7bde9..72e28da4bd3b 100644 --- a/drivers/bluetooth/btfm_slim_wcn3990.c +++ b/drivers/bluetooth/btfm_slim_wcn3990.c @@ -76,7 +76,7 @@ int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t port_num, uint8_t reg_val = 0; uint16_t reg; - BTFMSLIM_DBG("enable(%d)", enable); + BTFMSLIM_DBG("port(%d) enable(%d)", port_num, enable); if (rxport) { /* Port enable */ reg = CHRK_SB_PGD_PORT_RX_CFGN(port_num - 0x10); -- GitLab From d021963ec9755ce28498ebfb3da91c2b8b3e0ff4 Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Thu, 25 May 2017 20:17:43 -0600 Subject: [PATCH 005/786] sched: Remove debug WARN_ONCE messages If flow control functionality is not enabled in traffic controller userspace modules, WARN_ONCE messages are triggered. Note that qdisc will still be setup even if these debug messages are logged. Remove these messages to reduce log spam. CRs-Fixed: 2053221 Change-Id: I62a2fcb30d19579180b3df16f33953546f94511a Signed-off-by: Subash Abhinov Kasiviswanathan --- net/sched/sch_api.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 744cfe6c50aa..c2225cc5240d 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1206,12 +1206,7 @@ tc_qdisc_flow_control(struct net_device *dev, u32 tcm_handle, int enable_flow) qdisc_len = q->q.qlen; if (q->ops->change(q, &req.attr)) pr_err("%s(): qdisc change failed", __func__); - } else { - WARN_ONCE(1, "%s(): called on queue which does %s", - __func__, "not support change() operation"); } - } else { - WARN_ONCE(1, "%s(): called on bad queue", __func__); } return qdisc_len; } -- GitLab From 15d64c46e041dc8e08bc56825a7c8a458001e324 Mon Sep 17 00:00:00 2001 From: Ping Li Date: Mon, 5 Jun 2017 11:52:18 -0700 Subject: [PATCH 006/786] drm/msm/sde: correct AD4 bypass register setting Enable SW reset control capability for Apical Iridix core always. This ensures that when ares and apical_core_sw_reset are ORed together, the in/out conversion blocks reset first to block propagation of any potential hazards out of the apical wrapper; also clocks to the apical core are stopped preventing internal hazards. Change-Id: I57af405f34d7daf6675c46422afbda0fb541e9ae Signed-off-by: Ping Li --- drivers/gpu/drm/msm/sde/sde_hw_ad4.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c index b02cc06fe74a..35fc2b559c37 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_ad4.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_ad4.c @@ -202,7 +202,7 @@ static int ad4_mode_setup(struct sde_hw_dspp *dspp, enum ad4_modes mode) } else { info[dspp->idx].state = ad4_state_run; SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->ad.base + blk_offset, - 0); + 0x100); } return 0; -- GitLab From ce38f15ddfe6115cb315bcfadc06470a2c35d422 Mon Sep 17 00:00:00 2001 From: Sudarshan Rajagopalan Date: Wed, 17 May 2017 18:15:15 -0700 Subject: [PATCH 007/786] msm: ion: remove unused ion CP flags Remove certain ION content protection flags which are not used, to free up some bits to accommodate adding more vmids in the future. Change-Id: I728532293afdc8a00d5d24cad44261823d77c4fc Signed-off-by: Sudarshan Rajagopalan --- drivers/staging/android/uapi/msm_ion.h | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h index db4fc6360523..cc77674bbcc3 100644 --- a/drivers/staging/android/uapi/msm_ion.h +++ b/drivers/staging/android/uapi/msm_ion.h @@ -84,7 +84,6 @@ enum cp_mem_usage { #define ION_FLAG_CP_NON_PIXEL ION_BIT(20) #define ION_FLAG_CP_CAMERA ION_BIT(21) #define ION_FLAG_CP_HLOS ION_BIT(22) -#define ION_FLAG_CP_HLOS_FREE ION_BIT(23) #define ION_FLAG_CP_SEC_DISPLAY ION_BIT(25) #define ION_FLAG_CP_APP ION_BIT(26) #define ION_FLAG_CP_CAMERA_PREVIEW ION_BIT(27) @@ -96,13 +95,6 @@ enum cp_mem_usage { */ #define ION_FLAG_SECURE ION_BIT(ION_HEAP_ID_RESERVED) -/** - * Flag for clients to force contiguous memort allocation - * - * Use of this flag is carefully monitored! - */ -#define ION_FLAG_FORCE_CONTIGUOUS ION_BIT(30) - /* * Used in conjunction with heap which pool memory to force an allocation * to come from the page allocator directly instead of from the pool allocation @@ -113,7 +105,6 @@ enum cp_mem_usage { * Deprecated! Please use the corresponding ION_FLAG_* */ #define ION_SECURE ION_FLAG_SECURE -#define ION_FORCE_CONTIGUOUS ION_FLAG_FORCE_CONTIGUOUS /** * Macro should be used with ion_heap_ids defined above. -- GitLab From c3e15fc14569b9d66a1dce675449621345a3841f Mon Sep 17 00:00:00 2001 From: Sudarshan Rajagopalan Date: Wed, 17 May 2017 18:34:42 -0700 Subject: [PATCH 008/786] ARM: dts: msm: convert SPSS heap into secure CMA heap type for sdm845 ION content protection is applicable only for secure CMA heap types. Hence, convert SPSS heap into secure CMA type for its usecase. Change-Id: I3af5db08ea10e9e8046d1ea5a4c99c7602f54802 Signed-off-by: Sudarshan Rajagopalan --- arch/arm64/boot/dts/qcom/sdm845-ion.dtsi | 8 ++++---- arch/arm64/boot/dts/qcom/sdm845.dtsi | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi b/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi index 25798199b649..829dfcc12ccf 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-ion.dtsi @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -33,10 +33,10 @@ qcom,ion-heap-type = "DMA"; }; - qcom,ion-heap@13 { /* SPSS HEAP */ + qcom,ion-heap@13 { /* SECURE SPSS HEAP */ reg = <13>; - memory-region = <&sp_mem>; - qcom,ion-heap-type = "DMA"; + memory-region = <&secure_sp_mem>; + qcom,ion-heap-type = "HYP_CMA"; }; qcom,ion-heap@10 { /* SECURE DISPLAY HEAP */ diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index ad451ce6392a..0860518ac8bf 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -551,7 +551,7 @@ size = <0 0x1400000>; }; - sp_mem: sp_region { /* SPSS-HLOS ION shared mem */ + secure_sp_mem: secure_sp_region { /* SPSS-HLOS ION shared mem */ compatible = "shared-dma-pool"; alloc-ranges = <0 0x00000000 0 0xffffffff>; /* 32-bit */ reusable; -- GitLab From c294886423eba366a94aec9401b0755a1900ed51 Mon Sep 17 00:00:00 2001 From: Karthikeyan Mani Date: Wed, 26 Apr 2017 14:30:31 -0700 Subject: [PATCH 009/786] ASoC: wsa881x: Assign device number in reset Assign the logical device number in the swr_reset function so that the dev_num of the speaker device is assigned to the correct one after any reset event. CRs-fixed: 2039206 Change-Id: Ief3c65c3b36c93e7dcf775413e527e92d9ec7b0c Signed-off-by: Karthikeyan Mani --- sound/soc/codecs/wsa881x.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c index 062bae234311..ba8a3ef9d829 100644 --- a/sound/soc/codecs/wsa881x.c +++ b/sound/soc/codecs/wsa881x.c @@ -1337,6 +1337,7 @@ static int wsa881x_swr_reset(struct swr_device *pdev) /* Retry after 1 msec delay */ usleep_range(1000, 1100); } + pdev->dev_num = devnum; regcache_mark_dirty(wsa881x->regmap); regcache_sync(wsa881x->regmap); return 0; -- GitLab From b33e26e941030774463fe235054ddabb976a878b Mon Sep 17 00:00:00 2001 From: Brahmaji K Date: Thu, 1 Jun 2017 17:20:10 +0530 Subject: [PATCH 010/786] qseecom: Fix accessing userspace memory in kernel space Use put_user API to write the data to userspace from kernel space to avoid accessing userspace memory directly in kernel space. Change-Id: I649fe2597e80ccad50cf16b355e220734810e94c Signed-off-by: Brahmaji K --- drivers/misc/qseecom.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c index c1857c7630f8..f68d880745d3 100644 --- a/drivers/misc/qseecom.c +++ b/drivers/misc/qseecom.c @@ -2912,7 +2912,11 @@ static int qseecom_send_service_cmd(struct qseecom_dev_handle *data, } if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) { pr_warn("RPMB key status is 0x%x\n", resp.result); - *(uint32_t *)req.resp_buf = resp.result; + if (put_user(resp.result, + (uint32_t __user *)req.resp_buf)) { + ret = -EINVAL; + goto exit; + } ret = 0; } break; @@ -6507,11 +6511,16 @@ static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data, void *cmd_buf = NULL; size_t cmd_len; struct sglist_info *table = data->sglistinfo_ptr; + void *req_ptr = NULL; + void *resp_ptr = NULL; ret = __qseecom_qteec_validate_msg(data, req); if (ret) return ret; + req_ptr = req->req_ptr; + resp_ptr = req->resp_ptr; + /* find app_id & img_name from list */ spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); list_for_each_entry(ptr_app, &qseecom.registered_app_list_head, @@ -6529,6 +6538,11 @@ static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data, return -ENOENT; } + req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data, + (uintptr_t)req->req_ptr); + req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data, + (uintptr_t)req->resp_ptr); + if ((cmd_id == QSEOS_TEE_OPEN_SESSION) || (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) { ret = __qseecom_update_qteec_req_buf( @@ -6540,10 +6554,10 @@ static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data, if (qseecom.qsee_version < QSEE_VERSION_40) { ireq.app_id = data->client.app_id; ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data, - (uintptr_t)req->req_ptr); + (uintptr_t)req_ptr); ireq.req_len = req->req_len; ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data, - (uintptr_t)req->resp_ptr); + (uintptr_t)resp_ptr); ireq.resp_len = req->resp_len; ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table); ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE; @@ -6554,10 +6568,10 @@ static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data, } else { ireq_64bit.app_id = data->client.app_id; ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data, - (uintptr_t)req->req_ptr); + (uintptr_t)req_ptr); ireq_64bit.req_len = req->req_len; ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data, - (uintptr_t)req->resp_ptr); + (uintptr_t)resp_ptr); ireq_64bit.resp_len = req->resp_len; if ((data->client.app_arch == ELFCLASS32) && ((ireq_64bit.req_ptr >= -- GitLab From 1d1adbb1eb7375227c7fa2a528dd730550b0de7a Mon Sep 17 00:00:00 2001 From: Ram Chandrasekar Date: Thu, 18 May 2017 17:44:50 -0600 Subject: [PATCH 011/786] drivers: lmh-dcvsh: Add debug support LMH DCVSh hardware can provide debug information. Provide a generic debugfs interface in LMH DCVSh driver to support querying the hardware for different information using a different command formats. Change-Id: I10053a224e79f549ec68f9c39d652e7668b34ffc Signed-off-by: Ram Chandrasekar --- drivers/thermal/qcom/Makefile | 2 +- drivers/thermal/qcom/lmh_dbg.c | 567 ++++++++++++++++++++++++++++ drivers/thermal/qcom/lmh_dbg.h | 20 + drivers/thermal/qcom/msm_lmh_dcvs.c | 2 + 4 files changed, 590 insertions(+), 1 deletion(-) create mode 100644 drivers/thermal/qcom/lmh_dbg.c create mode 100644 drivers/thermal/qcom/lmh_dbg.h diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile index 885938041140..2ba487d13a07 100644 --- a/drivers/thermal/qcom/Makefile +++ b/drivers/thermal/qcom/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_QCOM_TSENS) += qcom_tsens.o qcom_tsens-y += tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o tsens-8996.o obj-$(CONFIG_MSM_BCL_PERIPHERAL_CTL) += bcl_peripheral.o -obj-$(CONFIG_QTI_THERMAL_LIMITS_DCVS) += msm_lmh_dcvs.o +obj-$(CONFIG_QTI_THERMAL_LIMITS_DCVS) += msm_lmh_dcvs.o lmh_dbg.o obj-$(CONFIG_QTI_VIRTUAL_SENSOR) += qti_virtual_sensor.o diff --git a/drivers/thermal/qcom/lmh_dbg.c b/drivers/thermal/qcom/lmh_dbg.c new file mode 100644 index 000000000000..74ffeda3d89e --- /dev/null +++ b/drivers/thermal/qcom/lmh_dbg.c @@ -0,0 +1,567 @@ +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "lmh_dbg.h" + +#define LMH_MON_NAME "lmh_monitor" +#define LMH_DBGFS_READ "data" +#define LMH_DBGFS_CONFIG_READ "config" +#define LMH_DBGFS_READ_TYPES "data_types" +#define LMH_DBGFS_CONFIG_TYPES "config_types" +#define LMH_SCM_PAYLOAD_SIZE 10 +#define LMH_READ_LINE_LENGTH 10 +#define LMH_DEBUG_READ_TYPE 0x0 +#define LMH_DEBUG_CONFIG_TYPE 0x1 +#define LMH_DEBUG_SET 0x08 +#define LMH_DEBUG_READ_BUF_SIZE 0x09 +#define LMH_DEBUG_READ 0x0A +#define LMH_DEBUG_GET_TYPE 0x0B + +struct lmh_driver_data { + struct device *dev; + uint32_t *read_type; + uint32_t *config_type; + uint32_t read_type_count; + uint32_t config_type_count; + struct dentry *debugfs_parent; + struct dentry *debug_read; + struct dentry *debug_config; + struct dentry *debug_read_type; + struct dentry *debug_config_type; +}; + +enum lmh_read_type { + LMH_READ_TYPE = 0, + LMH_CONFIG_TYPE, +}; + +static struct lmh_driver_data *lmh_data; + +static int lmh_debug_read(uint32_t **buf) +{ + int ret = 0, size = 0, tz_ret = 0; + static uint32_t curr_size; + struct scm_desc desc_arg; + static uint32_t *payload; + + desc_arg.arginfo = SCM_ARGS(0); + ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, + LMH_DEBUG_READ_BUF_SIZE), &desc_arg); + size = desc_arg.ret[0]; + if (ret) { + pr_err("Error in SCM v%d get debug buffer size call. err:%d\n", + (is_scm_armv8()) ? 8 : 7, ret); + goto get_dbg_exit; + } + if (!size) { + pr_err("No Debug data to read.\n"); + ret = -ENODEV; + goto get_dbg_exit; + } + size = SCM_BUFFER_SIZE(uint32_t) * size * LMH_READ_LINE_LENGTH; + if (curr_size != size) { + if (payload) + devm_kfree(lmh_data->dev, payload); + payload = devm_kzalloc(lmh_data->dev, PAGE_ALIGN(size), + GFP_KERNEL); + if (!payload) { + ret = -ENOMEM; + goto get_dbg_exit; + } + curr_size = size; + } + + /* &payload may be a physical address > 4 GB */ + desc_arg.args[0] = SCM_BUFFER_PHYS(payload); + desc_arg.args[1] = curr_size; + desc_arg.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL); + dmac_flush_range(payload, (void *)payload + curr_size); + ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, LMH_DEBUG_READ), + &desc_arg); + dmac_inv_range(payload, (void *)payload + curr_size); + tz_ret = desc_arg.ret[0]; + /* Have memory barrier before we access the TZ data */ + mb(); + if (ret) { + pr_err("Error in get debug read. err:%d\n", ret); + goto get_dbg_exit; + } + if (tz_ret) { + pr_err("TZ API returned error. err:%d\n", tz_ret); + ret = tz_ret; + goto get_dbg_exit; + } + +get_dbg_exit: + if (ret && payload) { + devm_kfree(lmh_data->dev, payload); + payload = NULL; + curr_size = 0; + } + *buf = payload; + + return (ret < 0) ? ret : curr_size; +} + +static int lmh_debug_config_write(uint32_t cmd_id, uint32_t *buf, int size) +{ + int ret = 0, size_bytes = 0; + struct scm_desc desc_arg; + uint32_t *payload = NULL; + + size_bytes = (size - 3) * sizeof(uint32_t); + payload = devm_kzalloc(lmh_data->dev, PAGE_ALIGN(size_bytes), + GFP_KERNEL); + if (!payload) { + ret = -ENOMEM; + goto set_cfg_exit; + } + memcpy(payload, &buf[3], size_bytes); + + /* &payload may be a physical address > 4 GB */ + desc_arg.args[0] = SCM_BUFFER_PHYS(payload); + desc_arg.args[1] = size_bytes; + desc_arg.args[2] = buf[0]; + desc_arg.args[3] = buf[1]; + desc_arg.args[4] = buf[2]; + desc_arg.arginfo = SCM_ARGS(5, SCM_RO, SCM_VAL, SCM_VAL, SCM_VAL, + SCM_VAL); + dmac_flush_range(payload, (void *)payload + size_bytes); + ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, cmd_id), &desc_arg); + /* Have memory barrier before we access the TZ data */ + mb(); + if (ret) { + pr_err("Error in config debug read. err:%d\n", ret); + goto set_cfg_exit; + } + +set_cfg_exit: + return ret; +} + +static int lmh_parse_and_extract(const char __user *user_buf, size_t count, + enum lmh_read_type type) +{ + char *local_buf = NULL, *token = NULL, *curr_ptr = NULL, *token1 = NULL; + char *next_line = NULL; + int ret = 0, data_ct = 0, i = 0, size = 0; + uint32_t *config_buf = NULL; + + /* Allocate two extra space to add ';' character and NULL terminate */ + local_buf = kzalloc(count + 2, GFP_KERNEL); + if (!local_buf) { + ret = -ENOMEM; + goto dfs_cfg_write_exit; + } + if (copy_from_user(local_buf, user_buf, count)) { + pr_err("user buf error\n"); + ret = -EFAULT; + goto dfs_cfg_write_exit; + } + size = count + (strnchr(local_buf, count, '\n') ? 1 : 2); + local_buf[size - 2] = ';'; + local_buf[size - 1] = '\0'; + curr_ptr = next_line = local_buf; + while ((token1 = strnchr(next_line, local_buf + size - next_line, ';')) + != NULL) { + data_ct = 0; + *token1 = '\0'; + curr_ptr = next_line; + next_line = token1 + 1; + for (token = (char *)curr_ptr; token && + ((token = strnchr(token, next_line - token, ' ')) + != NULL); token++) + data_ct++; + if (data_ct < 2) { + pr_err("Invalid format string:[%s]\n", curr_ptr); + ret = -EINVAL; + goto dfs_cfg_write_exit; + } + config_buf = kzalloc((++data_ct) * sizeof(uint32_t), + GFP_KERNEL); + if (!config_buf) { + ret = -ENOMEM; + goto dfs_cfg_write_exit; + } + pr_debug("Input:%s data_ct:%d\n", curr_ptr, data_ct); + for (i = 0, token = (char *)curr_ptr; token && (i < data_ct); + i++) { + token = strnchr(token, next_line - token, ' '); + if (token) + *token = '\0'; + ret = kstrtouint(curr_ptr, 0, &config_buf[i]); + if (ret < 0) { + pr_err("Data[%s] scan error. err:%d\n", + curr_ptr, ret); + kfree(config_buf); + goto dfs_cfg_write_exit; + } + if (token) + curr_ptr = ++token; + } + switch (type) { + case LMH_READ_TYPE: + case LMH_CONFIG_TYPE: + ret = lmh_debug_config_write(LMH_DEBUG_SET, + config_buf, data_ct); + break; + default: + ret = -EINVAL; + break; + } + kfree(config_buf); + if (ret) { + pr_err("Config error. type:%d err:%d\n", type, ret); + goto dfs_cfg_write_exit; + } + } + +dfs_cfg_write_exit: + kfree(local_buf); + return ret; +} + +static ssize_t lmh_dbgfs_config_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + lmh_parse_and_extract(user_buf, count, LMH_CONFIG_TYPE); + return count; +} + +static int lmh_dbgfs_data_read(struct seq_file *seq_fp, void *data) +{ + static uint32_t *read_buf; + static int read_buf_size; + int idx = 0, ret = 0; + + if (!read_buf_size) { + ret = lmh_debug_read(&read_buf); + if (ret <= 0) + goto dfs_read_exit; + if (!read_buf || ret < sizeof(uint32_t)) { + ret = -EINVAL; + goto dfs_read_exit; + } + read_buf_size = ret; + ret = 0; + } + + do { + seq_printf(seq_fp, "0x%x ", read_buf[idx]); + if (seq_has_overflowed(seq_fp)) { + pr_err("Seq overflow. idx:%d\n", idx); + goto dfs_read_exit; + } + idx++; + if ((idx % LMH_READ_LINE_LENGTH) == 0) { + seq_puts(seq_fp, "\n"); + if (seq_has_overflowed(seq_fp)) { + pr_err("Seq overflow. idx:%d\n", idx); + goto dfs_read_exit; + } + } + } while (idx < (read_buf_size / sizeof(uint32_t))); + read_buf_size = 0; + read_buf = NULL; + +dfs_read_exit: + return ret; +} + +static int lmh_get_recurssive_data(struct scm_desc *desc_arg, uint32_t cmd_idx, + uint32_t *payload, uint32_t *size, uint32_t *dest_buf) +{ + int idx = 0, ret = 0; + uint32_t next = 0; + + do { + desc_arg->args[cmd_idx] = next; + dmac_flush_range(payload, (void *)payload + + sizeof(*payload) * LMH_SCM_PAYLOAD_SIZE); + ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, LMH_DEBUG_GET_TYPE), + desc_arg); + dmac_inv_range(payload, (void *)payload + + sizeof(*payload) * LMH_SCM_PAYLOAD_SIZE); + *size = desc_arg->ret[0]; + /* Have barrier before reading from TZ data */ + mb(); + if (ret) { + pr_err("Error in SCM get type. cmd:%x err:%d\n", + LMH_DEBUG_GET_TYPE, ret); + return ret; + } + if (!*size) { + pr_err("No LMH device supported.\n"); + return -ENODEV; + } + if (!dest_buf) + dest_buf = devm_kcalloc(lmh_data->dev, *size, + sizeof(*dest_buf), GFP_KERNEL); + if (!dest_buf) + return -ENOMEM; + + for (idx = next; + idx < min((next + LMH_SCM_PAYLOAD_SIZE), *size); + idx++) + dest_buf[idx] = payload[idx - next]; + next += LMH_SCM_PAYLOAD_SIZE; + } while (next < *size); + + return ret; +} + +static ssize_t lmh_dbgfs_data_write(struct file *file, + const char __user *user_buf, size_t count, loff_t *ppos) +{ + lmh_parse_and_extract(user_buf, count, LMH_READ_TYPE); + return count; +} + +static int lmh_dbgfs_data_open(struct inode *inode, struct file *file) +{ + return single_open(file, lmh_dbgfs_data_read, inode->i_private); +} + +static int lmh_debug_get_types(bool is_read, uint32_t **buf) +{ + int ret = 0; + uint32_t size = 0; + struct scm_desc desc_arg; + uint32_t *payload = NULL, *dest_buf = NULL; + + if (is_read && lmh_data->read_type) { + *buf = lmh_data->read_type; + return lmh_data->read_type_count; + } else if (!is_read && lmh_data->config_type) { + *buf = lmh_data->config_type; + return lmh_data->config_type_count; + } + payload = devm_kzalloc(lmh_data->dev, + PAGE_ALIGN(LMH_SCM_PAYLOAD_SIZE * + sizeof(*payload)), GFP_KERNEL); + if (!payload) + return -ENOMEM; + /* &payload may be a physical address > 4 GB */ + desc_arg.args[0] = SCM_BUFFER_PHYS(payload); + desc_arg.args[1] = + SCM_BUFFER_SIZE(uint32_t) * LMH_SCM_PAYLOAD_SIZE; + desc_arg.args[2] = (is_read) ? + LMH_DEBUG_READ_TYPE : LMH_DEBUG_CONFIG_TYPE; + desc_arg.arginfo = SCM_ARGS(4, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL); + ret = lmh_get_recurssive_data(&desc_arg, 3, payload, &size, dest_buf); + if (ret) + goto get_type_exit; + pr_debug("Total %s types:%d\n", (is_read) ? "read" : "config", size); + if (is_read) { + lmh_data->read_type = *buf = dest_buf; + lmh_data->read_type_count = size; + } else { + lmh_data->config_type = *buf = dest_buf; + lmh_data->config_type_count = size; + } + +get_type_exit: + if (ret) { + if (lmh_data->read_type_count) { + devm_kfree(lmh_data->dev, lmh_data->read_type); + lmh_data->read_type_count = 0; + } + if (lmh_data->config_type_count) { + devm_kfree(lmh_data->dev, lmh_data->config_type); + lmh_data->config_type_count = 0; + } + } + if (payload) + devm_kfree(lmh_data->dev, payload); + + return (ret) ? ret : size; +} + +static int lmh_get_types(struct seq_file *seq_fp, enum lmh_read_type type) +{ + int ret = 0, idx = 0, size = 0; + uint32_t *type_list = NULL; + + switch (type) { + case LMH_READ_TYPE: + ret = lmh_debug_get_types(true, &type_list); + break; + case LMH_CONFIG_TYPE: + ret = lmh_debug_get_types(false, &type_list); + break; + default: + return -EINVAL; + } + if (ret <= 0 || !type_list) { + pr_err("No device information. err:%d\n", ret); + return -ENODEV; + } + size = ret; + for (idx = 0; idx < size; idx++) + seq_printf(seq_fp, "0x%x ", type_list[idx]); + seq_puts(seq_fp, "\n"); + + return 0; +} + +static int lmh_dbgfs_read_type(struct seq_file *seq_fp, void *data) +{ + return lmh_get_types(seq_fp, LMH_READ_TYPE); +} + +static int lmh_dbgfs_read_type_open(struct inode *inode, struct file *file) +{ + return single_open(file, lmh_dbgfs_read_type, inode->i_private); +} + +static int lmh_dbgfs_config_type(struct seq_file *seq_fp, void *data) +{ + return lmh_get_types(seq_fp, LMH_CONFIG_TYPE); +} + +static int lmh_dbgfs_config_type_open(struct inode *inode, struct file *file) +{ + return single_open(file, lmh_dbgfs_config_type, inode->i_private); +} + +static const struct file_operations lmh_dbgfs_config_fops = { + .write = lmh_dbgfs_config_write, +}; +static const struct file_operations lmh_dbgfs_read_fops = { + .open = lmh_dbgfs_data_open, + .read = seq_read, + .write = lmh_dbgfs_data_write, + .llseek = seq_lseek, + .release = single_release, +}; +static const struct file_operations lmh_dbgfs_read_type_fops = { + .open = lmh_dbgfs_read_type_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +static const struct file_operations lmh_dbgfs_config_type_fops = { + .open = lmh_dbgfs_config_type_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int lmh_check_tz_debug_cmds(void) +{ + if (!scm_is_call_available(SCM_SVC_LMH, LMH_DEBUG_SET) + || !scm_is_call_available(SCM_SVC_LMH, LMH_DEBUG_READ_BUF_SIZE) + || !scm_is_call_available(SCM_SVC_LMH, LMH_DEBUG_READ) + || !scm_is_call_available(SCM_SVC_LMH, LMH_DEBUG_GET_TYPE)) { + pr_debug("LMH debug scm not available\n"); + return -ENODEV; + } + + return 0; +} + +static int lmh_debug_init(void) +{ + int ret = 0; + + if (lmh_check_tz_debug_cmds()) { + pr_debug("Debug commands not available.\n"); + return -ENODEV; + } + + lmh_data->debugfs_parent = debugfs_create_dir(LMH_MON_NAME, NULL); + if (IS_ERR(lmh_data->debugfs_parent)) { + ret = PTR_ERR(lmh_data->debugfs_parent); + pr_debug("Error creating debugfs dir:%s. err:%d\n", + LMH_MON_NAME, ret); + return ret; + } + lmh_data->debug_read = debugfs_create_file(LMH_DBGFS_READ, 0600, + lmh_data->debugfs_parent, NULL, + &lmh_dbgfs_read_fops); + if (IS_ERR(lmh_data->debug_read)) { + pr_err("Error creating" LMH_DBGFS_READ "entry.\n"); + ret = PTR_ERR(lmh_data->debug_read); + goto dbg_reg_exit; + } + lmh_data->debug_config = debugfs_create_file(LMH_DBGFS_CONFIG_READ, + 0200, lmh_data->debugfs_parent, NULL, + &lmh_dbgfs_config_fops); + if (IS_ERR(lmh_data->debug_config)) { + pr_err("Error creating" LMH_DBGFS_CONFIG_READ "entry\n"); + ret = PTR_ERR(lmh_data->debug_config); + goto dbg_reg_exit; + } + lmh_data->debug_read_type = debugfs_create_file(LMH_DBGFS_READ_TYPES, + 0400, lmh_data->debugfs_parent, NULL, + &lmh_dbgfs_read_type_fops); + if (IS_ERR(lmh_data->debug_read_type)) { + pr_err("Error creating" LMH_DBGFS_READ_TYPES "entry\n"); + ret = PTR_ERR(lmh_data->debug_read_type); + goto dbg_reg_exit; + } + lmh_data->debug_read_type = debugfs_create_file( + LMH_DBGFS_CONFIG_TYPES, + 0400, lmh_data->debugfs_parent, NULL, + &lmh_dbgfs_config_type_fops); + if (IS_ERR(lmh_data->debug_config_type)) { + pr_err("Error creating" LMH_DBGFS_CONFIG_TYPES "entry\n"); + ret = PTR_ERR(lmh_data->debug_config_type); + goto dbg_reg_exit; + } + +dbg_reg_exit: + if (ret) + /*Clean up all the dbg nodes*/ + debugfs_remove_recursive(lmh_data->debugfs_parent); + + return ret; +} + +int lmh_debug_register(struct platform_device *pdev) +{ + int ret = 0; + + if (lmh_data) { + pr_debug("Reinitializing lmh hardware driver\n"); + return -EEXIST; + } + lmh_data = devm_kzalloc(&pdev->dev, sizeof(*lmh_data), GFP_KERNEL); + if (!lmh_data) + return -ENOMEM; + lmh_data->dev = &pdev->dev; + + ret = lmh_debug_init(); + if (ret) { + pr_debug("LMH debug init failed. err:%d\n", ret); + goto probe_exit; + } + + return ret; + +probe_exit: + lmh_data = NULL; + return ret; +} +EXPORT_SYMBOL(lmh_debug_register); diff --git a/drivers/thermal/qcom/lmh_dbg.h b/drivers/thermal/qcom/lmh_dbg.h new file mode 100644 index 000000000000..6ceb83202fa0 --- /dev/null +++ b/drivers/thermal/qcom/lmh_dbg.h @@ -0,0 +1,20 @@ +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __QTI_LMH_H__ +#define __QTI_LMH_H__ + +#include + +int lmh_debug_register(struct platform_device *pdev); + +#endif /* __QTI_LMH_H__ */ diff --git a/drivers/thermal/qcom/msm_lmh_dcvs.c b/drivers/thermal/qcom/msm_lmh_dcvs.c index 65dc2df36077..4284b6c786e1 100644 --- a/drivers/thermal/qcom/msm_lmh_dcvs.c +++ b/drivers/thermal/qcom/msm_lmh_dcvs.c @@ -33,6 +33,7 @@ #include #include "../thermal_core.h" +#include "lmh_dbg.h" #define CREATE_TRACE_POINTS #include @@ -590,6 +591,7 @@ static int limits_dcvs_probe(struct platform_device *pdev) INIT_LIST_HEAD(&hw->list); list_add(&hw->list, &lmh_dcvs_hw_list); mutex_unlock(&lmh_dcvs_list_access); + lmh_debug_register(pdev); ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "lmh-dcvs/cdev:online", limits_cpu_online, NULL); -- GitLab From b127389487d173ba082fcd59804970ae182e24fe Mon Sep 17 00:00:00 2001 From: Harshdeep Dhatt Date: Thu, 1 Jun 2017 13:12:07 -0600 Subject: [PATCH 012/786] msm: kgsl: Put back the process refcount In case we fail to create the context timeline, put back the process refcount. Change-Id: Ic7241f0e28d6c53822d21fbe59ac16ca5ebd9ba5 Signed-off-by: Harshdeep Dhatt --- drivers/gpu/msm/kgsl.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 6a39792834e9..84c065faf140 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -580,8 +580,10 @@ int kgsl_context_init(struct kgsl_device_private *dev_priv, context->tid = task_pid_nr(current); ret = kgsl_sync_timeline_create(context); - if (ret) + if (ret) { + kgsl_process_private_put(dev_priv->process_priv); goto out; + } snprintf(name, sizeof(name), "context-%d", id); kgsl_add_event_group(&context->events, context, name, -- GitLab From ac1200c30b21f368f75a6f5762329349cf100c78 Mon Sep 17 00:00:00 2001 From: Mayank Rana Date: Tue, 25 Apr 2017 13:48:46 -0700 Subject: [PATCH 013/786] dwc3: resize txfifo of IN/INT endpoint before enabling it USB IN/INT endpoint stalls when performing TX FIFO resize functionality when IN/INT endpoint is already active i.e. usb endpoint is enabled and usb request is pending with it. Fix this issue by making sure that TX FIFO resize is performed before enabling endpoint which shall happen after set_alt(1) and before any function queues request with its allocated USB endpoint. CRs-Fixed: 2039310 Change-Id: I13a590f87ab8492f7c95a15b2da9f00c9c63c4f9 Signed-off-by: Mayank Rana --- drivers/usb/dwc3/core.h | 8 +- drivers/usb/dwc3/dwc3-msm.c | 7 +- drivers/usb/dwc3/ep0.c | 30 +++++--- drivers/usb/dwc3/gadget.c | 141 ++++++++++++++++-------------------- 4 files changed, 92 insertions(+), 94 deletions(-) diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 009193ca306c..b0421528a9e7 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -590,6 +590,7 @@ struct dwc3_ep_events { * @dbg_ep_events: different events counter for endpoint * @dbg_ep_events_diff: differential events counter for endpoint * @dbg_ep_events_ts: timestamp for previous event counters + * @fifo_depth: allocated TXFIFO depth */ struct dwc3_ep { struct usb_ep endpoint; @@ -644,6 +645,7 @@ struct dwc3_ep { struct dwc3_ep_events dbg_ep_events; struct dwc3_ep_events dbg_ep_events_diff; struct timespec dbg_ep_events_ts; + int fifo_depth; }; enum dwc3_phy { @@ -905,7 +907,6 @@ struct dwc3_scratchpad_array { * @pending_events: true when we have pending IRQs to be handled * @needs_fifo_resize: not all users might want fifo resizing, flag it * @pullups_connected: true when Run/Stop bit is set - * @resize_fifos: tells us it's ok to reconfigure our TxFIFO sizes. * @setup_packet_pending: true when there's a Setup Packet in FIFO. Workaround * @start_config_issued: true when StartConfig command has been issued * @three_stage_setup: set if we perform a three phase setup @@ -947,6 +948,7 @@ struct dwc3_scratchpad_array { * @vbus_draw: current to be drawn from USB * @index: dwc3 instance's number * @dwc_ipc_log_ctxt: dwc3 ipc log context + * @last_fifo_depth: total TXFIFO depth of all enabled USB IN/INT endpoints * @imod_interval: set the interrupt moderation interval in 250ns * increments or 0 to disable. */ @@ -1082,7 +1084,6 @@ struct dwc3 { unsigned pending_events:1; unsigned needs_fifo_resize:1; unsigned pullups_connected:1; - unsigned resize_fifos:1; unsigned setup_packet_pending:1; unsigned three_stage_setup:1; unsigned usb3_lpm_capable:1; @@ -1142,6 +1143,7 @@ struct dwc3 { wait_queue_head_t wait_linkstate; unsigned int index; void *dwc_ipc_log_ctxt; + int last_fifo_depth; struct dwc3_gadget_events dbg_gadget_events; }; @@ -1297,7 +1299,7 @@ struct dwc3_gadget_ep_cmd_params { /* prototypes */ void dwc3_set_mode(struct dwc3 *dwc, u32 mode); u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type); -int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc); +int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc, struct dwc3_ep *dep); /* check whether we are on the DWC_usb3 core */ static inline bool dwc3_is_usb3(struct dwc3 *dwc) diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c index 228d8afb8a3a..d92df583dfd9 100644 --- a/drivers/usb/dwc3/dwc3-msm.c +++ b/drivers/usb/dwc3/dwc3-msm.c @@ -1127,7 +1127,8 @@ static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request) struct dwc3_gadget_ep_cmd_params params; const struct usb_endpoint_descriptor *desc = ep->desc; const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc; - u32 reg; + u32 reg; + int ret; memset(¶ms, 0x00, sizeof(params)); @@ -1175,6 +1176,10 @@ static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request) /* Set XferRsc Index for GSI EP */ if (!(dep->flags & DWC3_EP_ENABLED)) { + ret = dwc3_gadget_resize_tx_fifos(dwc, dep); + if (ret) + return; + memset(¶ms, 0x00, sizeof(params)); params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); dwc3_send_gadget_ep_cmd(dep, diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index b062d58e70a7..ec9ffc10fa71 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -588,8 +588,9 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) { enum usb_device_state state = dwc->gadget.state; u32 cfg; - int ret; + int ret, num; u32 reg; + struct dwc3_ep *dep; cfg = le16_to_cpu(ctrl->wValue); @@ -598,6 +599,24 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) return -EINVAL; case USB_STATE_ADDRESS: + /* Read ep0IN related TXFIFO size */ + dwc->last_fifo_depth = (dwc3_readl(dwc->regs, + DWC3_GTXFIFOSIZ(0)) & 0xFFFF); + /* Clear existing allocated TXFIFO for all IN eps except ep0 */ + for (num = 0; num < dwc->num_in_eps; num++) { + dep = dwc->eps[(num << 1) | 1]; + if (num) { + dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), 0); + dep->fifo_depth = 0; + } else { + dep->fifo_depth = dwc->last_fifo_depth; + } + + dev_dbg(dwc->dev, "%s(): %s dep->fifo_depth:%x\n", + __func__, dep->name, dep->fifo_depth); + dbg_event(0xFF, "fifo_reset", dep->number); + } + ret = dwc3_ep0_delegate_req(dwc, ctrl); /* if the cfg matches and the cfg is non zero */ if (cfg && (!ret || (ret == USB_GADGET_DELAYED_STATUS))) { @@ -619,9 +638,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) reg = dwc3_readl(dwc->regs, DWC3_DCTL); reg |= (DWC3_DCTL_ACCEPTU1ENA | DWC3_DCTL_ACCEPTU2ENA); dwc3_writel(dwc->regs, DWC3_DCTL, reg); - - dwc->resize_fifos = true; - dwc3_trace(trace_dwc3_ep0, "resize FIFOs flag SET"); } break; @@ -1080,12 +1096,6 @@ static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep) { int ret; - if (dwc->resize_fifos) { - dwc3_trace(trace_dwc3_ep0, "Resizing FIFOs"); - dwc3_gadget_resize_tx_fifos(dwc); - dwc->resize_fifos = 0; - } - ret = dwc3_ep0_start_control_status(dep); if (WARN_ON_ONCE(ret)) dbg_event(dep->number, "ECTRLSTATUS", ret); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index df0427c724b6..cf53d06da652 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -195,88 +195,64 @@ static void dwc3_ep_inc_deq(struct dwc3_ep *dep) * * Unfortunately, due to many variables that's not always the case. */ -int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc) -{ - int last_fifo_depth = 0; - int ram1_depth; - int fifo_size; - int mdwidth; - int num; - int num_eps; - int max_packet = 1024; - struct usb_composite_dev *cdev = get_gadget_data(&dwc->gadget); - - if (!(cdev && cdev->config) || !dwc->needs_fifo_resize) +int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc, struct dwc3_ep *dep) +{ + int fifo_size, mdwidth, max_packet = 1024; + int tmp, mult = 1; + + if (!dwc->needs_fifo_resize) return 0; - num_eps = dwc->num_in_eps; - ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7); - mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); + /* resize IN endpoints excepts ep0 */ + if (!usb_endpoint_dir_in(dep->endpoint.desc) || + dep->endpoint.ep_num == 0) + return 0; + /* Don't resize already resized IN endpoint */ + if (dep->fifo_depth) { + dev_dbg(dwc->dev, "%s fifo_depth:%d is already set\n", + dep->endpoint.name, dep->fifo_depth); + return 0; + } + + mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); /* MDWIDTH is represented in bits, we need it in bytes */ mdwidth >>= 3; - last_fifo_depth = (dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0)) & 0xFFFF); - dev_dbg(dwc->dev, "%s: num eps:%d max_packet:%d last_fifo_depth:%04x\n", - __func__, num_eps, max_packet, last_fifo_depth); - - /* Don't resize ep0IN TxFIFO, start with ep1IN only. */ - for (num = 1; num < num_eps; num++) { - /* bit0 indicates direction; 1 means IN ep */ - struct dwc3_ep *dep = dwc->eps[(num << 1) | 1]; - int mult = 1; - int tmp; - - tmp = max_packet + mdwidth; - /* - * Interfaces like MBIM or ECM is having multiple data - * interfaces. SET_CONFIG() happens before set_alt with - * data interface 1 which results into calling this API - * before GSI endpoint enabled. This results no txfifo - * resize with GSI endpoint causing low throughput. Hence - * use mult as 3 for GSI IN endpoint always irrespective - * USB speed. - */ - if (dep->endpoint.ep_type == EP_TYPE_GSI || - dep->endpoint.endless) - mult = 3; - - if (!(dep->flags & DWC3_EP_ENABLED)) { - dev_dbg(dwc->dev, "ep%dIn not enabled", num); - goto resize_fifo; - } - - if (((dep->endpoint.maxburst > 1) && - usb_endpoint_xfer_bulk(dep->endpoint.desc)) - || usb_endpoint_xfer_isoc(dep->endpoint.desc)) - mult = 3; - -resize_fifo: - tmp *= mult; - tmp += mdwidth; - - fifo_size = DIV_ROUND_UP(tmp, mdwidth); - - fifo_size |= (last_fifo_depth << 16); - - dev_dbg(dwc->dev, "%s: Fifo Addr %04x Size %d", - dep->name, last_fifo_depth, fifo_size & 0xffff); - - last_fifo_depth += (fifo_size & 0xffff); - if (dwc->tx_fifo_size && - (last_fifo_depth >= dwc->tx_fifo_size)) { - /* - * Fifo size allocated exceeded available RAM size. - * Hence return error. - */ - dev_err(dwc->dev, "Fifosize(%d) > available RAM(%d)\n", - last_fifo_depth, dwc->tx_fifo_size); - return -ENOMEM; - } - - dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size); + if (dep->endpoint.ep_type == EP_TYPE_GSI || dep->endpoint.endless) + mult = 3; + + if (((dep->endpoint.maxburst > 1) && + usb_endpoint_xfer_bulk(dep->endpoint.desc)) + || usb_endpoint_xfer_isoc(dep->endpoint.desc)) + mult = 3; + + tmp = ((max_packet + mdwidth) * mult) + mdwidth; + fifo_size = DIV_ROUND_UP(tmp, mdwidth); + dep->fifo_depth = fifo_size; + fifo_size |= (dwc->last_fifo_depth << 16); + dwc->last_fifo_depth += (fifo_size & 0xffff); + + dev_dbg(dwc->dev, "%s ep_num:%d last_fifo_depth:%04x fifo_depth:%d\n", + dep->endpoint.name, dep->endpoint.ep_num, dwc->last_fifo_depth, + dep->fifo_depth); + + dbg_event(0xFF, "resize_fifo", dep->number); + dbg_event(0xFF, "fifo_depth", dep->fifo_depth); + /* Check fifo size allocation doesn't exceed available RAM size. */ + if (dwc->tx_fifo_size && + ((dwc->last_fifo_depth * mdwidth) >= dwc->tx_fifo_size)) { + dev_err(dwc->dev, "Fifosize(%d) > RAM size(%d) %s depth:%d\n", + (dwc->last_fifo_depth * mdwidth), dwc->tx_fifo_size, + dep->endpoint.name, fifo_size); + dwc->last_fifo_depth -= (fifo_size & 0xffff); + dep->fifo_depth = 0; + WARN_ON(1); + return -ENOMEM; } + dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(dep->endpoint.ep_num), + fifo_size); return 0; } @@ -691,6 +667,17 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name); if (!(dep->flags & DWC3_EP_ENABLED)) { + dep->endpoint.desc = desc; + dep->comp_desc = comp_desc; + dep->type = usb_endpoint_type(desc); + ret = dwc3_gadget_resize_tx_fifos(dwc, dep); + if (ret) { + dep->endpoint.desc = NULL; + dep->comp_desc = NULL; + dep->type = 0; + return ret; + } + ret = dwc3_gadget_start_config(dwc, dep); if (ret) { dev_err(dwc->dev, "start_config() failed for %s\n", @@ -710,9 +697,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, struct dwc3_trb *trb_st_hw; struct dwc3_trb *trb_link; - dep->endpoint.desc = desc; - dep->comp_desc = comp_desc; - dep->type = usb_endpoint_type(desc); dep->flags |= DWC3_EP_ENABLED; reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); @@ -2986,9 +2970,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) dwc3_stop_active_transfers(dwc); dwc3_clear_stall_all_ep(dwc); - /* bus reset issued due to missing status stage of a control transfer */ - dwc->resize_fifos = 0; - /* Reset device address to zero */ reg = dwc3_readl(dwc->regs, DWC3_DCFG); reg &= ~(DWC3_DCFG_DEVADDR_MASK); -- GitLab From 5cd59a081a28e6c8523fcdc9e740ebde47c61572 Mon Sep 17 00:00:00 2001 From: Dhaval Patel Date: Tue, 13 Jun 2017 16:29:40 -0700 Subject: [PATCH 014/786] drm/msm: support multi-display bw vote and top config Display RSC will be used as primary resource state coordinator to vote for multi-display bandwidth. It should allow any client to change the vote. The sde top configuration is for DSI displays and should not be touched by DP or HDMI interfaces. This patch fix the support multi-display from sde side. Change-Id: I93c774a872e7369857c0a6b97f6d221b6fd25b40 Signed-off-by: Dhaval Patel --- drivers/gpu/drm/msm/sde/sde_encoder.c | 11 ++++++----- drivers/gpu/drm/msm/sde_rsc.c | 17 ----------------- 2 files changed, 6 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index 5ccd38561130..022c03b73ed4 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -354,6 +354,7 @@ void sde_encoder_helper_split_config( struct split_pipe_cfg cfg = { 0 }; struct sde_hw_mdp *hw_mdptop; enum sde_rm_topology_name topology; + struct msm_display_info *disp_info; if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) { SDE_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0); @@ -362,6 +363,10 @@ void sde_encoder_helper_split_config( sde_enc = to_sde_encoder_virt(phys_enc->parent); hw_mdptop = phys_enc->hw_mdptop; + disp_info = &sde_enc->disp_info; + + if (disp_info->intf_type != DRM_MODE_CONNECTOR_DSI) + return; /** * disable split modes since encoder will be operating in as the only @@ -986,15 +991,11 @@ static int sde_encoder_update_rsc_client( struct sde_rsc_client *sde_encoder_get_rsc_client(struct drm_encoder *drm_enc) { struct sde_encoder_virt *sde_enc; - struct msm_display_info *disp_info; if (!drm_enc) return NULL; - sde_enc = to_sde_encoder_virt(drm_enc); - disp_info = &sde_enc->disp_info; - - return disp_info->is_primary ? sde_enc->rsc_client : NULL; + return sde_enc->rsc_client; } static void _sde_encoder_resource_control_helper(struct drm_encoder *drm_enc, diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c index ac7996833622..91f858a430ba 100644 --- a/drivers/gpu/drm/msm/sde_rsc.c +++ b/drivers/gpu/drm/msm/sde_rsc.c @@ -667,8 +667,6 @@ int sde_rsc_client_vote(struct sde_rsc_client *caller_client, { int rc = 0; struct sde_rsc_priv *rsc; - bool amc_mode = false; - enum rpmh_state state; if (!caller_client) { pr_err("invalid client for ab/ib vote\n"); @@ -682,11 +680,6 @@ int sde_rsc_client_vote(struct sde_rsc_client *caller_client, if (!rsc) return -EINVAL; - if (caller_client != rsc->primary_client) { - pr_err("only primary client can use sde rsc:: curr client name:%s\n", - caller_client->name); - return -EINVAL; - } pr_debug("client:%s ab:%llu ib:%llu\n", caller_client->name, ab_vote, ib_vote); @@ -695,16 +688,6 @@ int sde_rsc_client_vote(struct sde_rsc_client *caller_client, if (rc) goto clk_enable_fail; - if (rsc->hw_ops.is_amc_mode) - amc_mode = rsc->hw_ops.is_amc_mode(rsc); - - if (rsc->current_state == SDE_RSC_CMD_STATE) - state = RPMH_WAKE_ONLY_STATE; - else if (amc_mode) - state = RPMH_ACTIVE_ONLY_STATE; - else - state = RPMH_AWAKE_STATE; - if (rsc->hw_ops.tcs_wait) { rc = rsc->hw_ops.tcs_wait(rsc); if (rc) { -- GitLab From 9f484acbb3c3c9305b077e7315ea35be66a24598 Mon Sep 17 00:00:00 2001 From: Vara Reddy Date: Wed, 14 Jun 2017 15:20:43 -0700 Subject: [PATCH 015/786] ARM: dts: msm: make DSC command mode as default panel for SDM845 Setting truly 2.5k DSC with single port configuration using DSI1 control path as the default panel for CDP and MTP boards that has 2.5K display panels. Change-Id: I1b5dd5ea20fa3be63c92bb798de6048865705a8a Signed-off-by: Vara Reddy --- arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts | 2 +- arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts | 2 +- arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi | 2 +- arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts index 4b7a68078767..e9b71b9a0f01 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts @@ -22,7 +22,7 @@ qcom,board-id = <1 1>; }; -&dsi_dual_nt35597_truly_cmd_display { +&dsi_nt35597_truly_dsc_cmd_display { /delete-property/ qcom,dsi-display-active; }; diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts index fcf6ad1c7600..73df0711bd02 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts @@ -22,7 +22,7 @@ qcom,board-id = <8 1>; }; -&dsi_dual_nt35597_truly_cmd_display { +&dsi_nt35597_truly_dsc_cmd_display { /delete-property/ qcom,dsi-display-active; }; diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi index 5e370d64ee48..a1e0e4fe2ede 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi @@ -298,7 +298,7 @@ aliases { qcom,platform-reset-gpio = <&tlmm 6 0>; }; -&dsi_dual_nt35597_truly_cmd_display { +&dsi_nt35597_truly_dsc_cmd_display { qcom,dsi-display-active; }; diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi index b5c471fe5d61..85bec5706643 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi @@ -150,7 +150,7 @@ qcom,platform-reset-gpio = <&tlmm 6 0>; }; -&dsi_dual_nt35597_truly_cmd_display { +&dsi_nt35597_truly_dsc_cmd_display { qcom,dsi-display-active; }; -- GitLab From 09e0e25d15dd2aa9aef87a6aba7526be67875abb Mon Sep 17 00:00:00 2001 From: Skylar Chang Date: Mon, 20 Mar 2017 14:51:29 -0700 Subject: [PATCH 016/786] msm: ipa: support aggregated ipa stats query add support on wan-driver to query modem or wlan-fw to get the total data usage for all tethered clients. Change-Id: I56f40f1c0f6b2ec4279e78b3aeb81c687d08bf2e Signed-off-by: Skylar Chang --- drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c | 3 + .../platform/msm/ipa/ipa_v2/ipa_qmi_service.h | 3 + drivers/platform/msm/ipa/ipa_v2/ipa_utils.c | 2 +- drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c | 97 +++++++++++++++++- .../msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c | 30 ++++++ drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c | 3 + .../platform/msm/ipa/ipa_v3/ipa_qmi_service.h | 3 + drivers/platform/msm/ipa/ipa_v3/ipa_utils.c | 2 +- drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c | 99 ++++++++++++++++++- .../msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c | 29 ++++++ include/uapi/linux/msm_ipa.h | 14 ++- include/uapi/linux/rmnet_ipa_fd_ioctl.h | 17 +++- 12 files changed, 288 insertions(+), 14 deletions(-) diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c index f935bab96e56..e8710a65e813 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c @@ -80,6 +80,9 @@ const char *ipa_event_name[] = { __stringify(ECM_DISCONNECT), __stringify(IPA_TETHERING_STATS_UPDATE_STATS), __stringify(IPA_TETHERING_STATS_UPDATE_NETWORK_STATS), + __stringify(IPA_QUOTA_REACH), + __stringify(IPA_SSR_BEFORE_SHUTDOWN), + __stringify(IPA_SSR_AFTER_POWERUP), }; const char *ipa_hdr_l2_type_name[] = { diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h index 67dd031316ed..4c504f1991b3 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h @@ -147,6 +147,9 @@ int rmnet_ipa_set_tether_client_pipe(struct wan_ioctl_set_tether_client_pipe int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data, bool reset); +int rmnet_ipa_query_tethering_stats_all( + struct wan_ioctl_query_tether_stats_all *data); + int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data); int ipa_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req, diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c index bec426494acf..b833b1be2b1c 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c @@ -878,7 +878,7 @@ int ipa2_get_ep_mapping(enum ipa_client_type client) void ipa2_set_client(int index, enum ipacm_client_enum client, bool uplink) { - if (client >= IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) { + if (client > IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) { IPAERR("Bad client number! client =%d\n", client); } else if (index >= IPA_MAX_NUM_PIPES || index < 0) { IPAERR("Bad pipe index! index =%d\n", index); diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c index bcd602cdb0e1..29766fb84dc8 100644 --- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c @@ -2338,6 +2338,29 @@ static struct platform_driver rmnet_ipa_driver = { .remove = ipa_wwan_remove, }; +/** + * rmnet_ipa_send_ssr_notification(bool ssr_done) - send SSR notification + * + * This function sends the SSR notification before modem shutdown and + * after_powerup from SSR framework, to user-space module + */ +static void rmnet_ipa_send_ssr_notification(bool ssr_done) +{ + struct ipa_msg_meta msg_meta; + int rc; + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + if (ssr_done) + msg_meta.msg_type = IPA_SSR_AFTER_POWERUP; + else + msg_meta.msg_type = IPA_SSR_BEFORE_SHUTDOWN; + rc = ipa_send_msg(&msg_meta, NULL, NULL); + if (rc) { + IPAWANERR("ipa_send_msg failed: %d\n", rc); + return; + } +} + static int ssr_notifier_cb(struct notifier_block *this, unsigned long code, void *data) @@ -2345,6 +2368,8 @@ static int ssr_notifier_cb(struct notifier_block *this, if (ipa_rmnet_ctx.ipa_rmnet_ssr) { if (code == SUBSYS_BEFORE_SHUTDOWN) { pr_info("IPA received MPSS BEFORE_SHUTDOWN\n"); + /* send SSR before-shutdown notification to IPACM */ + rmnet_ipa_send_ssr_notification(false); atomic_set(&is_ssr, 1); ipa_q6_pre_shutdown_cleanup(); if (ipa_netdevs[0]) @@ -2519,6 +2544,26 @@ static void rmnet_ipa_get_network_stats_and_update(void) } } +/** + * rmnet_ipa_send_quota_reach_ind() - send quota_reach notification from + * IPA Modem + * This function sends the quota_reach indication from the IPA Modem driver + * via QMI, to user-space module + */ +static void rmnet_ipa_send_quota_reach_ind(void) +{ + struct ipa_msg_meta msg_meta; + int rc; + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = IPA_QUOTA_REACH; + rc = ipa_send_msg(&msg_meta, NULL, NULL); + if (rc) { + IPAWANERR("ipa_send_msg failed: %d\n", rc); + return; + } +} + /** * rmnet_ipa_poll_tethering_stats() - Tethering stats polling IOCTL handler * @data - IOCTL data @@ -2808,10 +2853,6 @@ int rmnet_ipa_query_tethering_stats_modem( kfree(req); kfree(resp); return rc; - } else if (reset) { - kfree(req); - kfree(resp); - return 0; } if (resp->dl_dst_pipe_stats_list_valid) { @@ -2947,6 +2988,49 @@ int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data, return rc; } +int rmnet_ipa_query_tethering_stats_all( + struct wan_ioctl_query_tether_stats_all *data) +{ + struct wan_ioctl_query_tether_stats tether_stats; + enum ipa_upstream_type upstream_type; + int rc = 0; + + memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats)); + /* get IPA backhaul type */ + upstream_type = find_upstream_type(data->upstreamIface); + + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR(" Wrong upstreamIface name %s\n", + data->upstreamIface); + } else if (upstream_type == IPA_UPSTEAM_WLAN) { + IPAWANDBG_LOW(" query wifi-backhaul stats\n"); + rc = rmnet_ipa_query_tethering_stats_wifi( + &tether_stats, data->reset_stats); + if (rc) { + IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n"); + return rc; + } + data->tx_bytes = tether_stats.ipv4_tx_bytes + + tether_stats.ipv6_tx_bytes; + data->rx_bytes = tether_stats.ipv4_rx_bytes + + tether_stats.ipv6_rx_bytes; + } else { + IPAWANDBG_LOW(" query modem-backhaul stats\n"); + tether_stats.ipa_client = data->ipa_client; + rc = rmnet_ipa_query_tethering_stats_modem( + &tether_stats, data->reset_stats); + if (rc) { + IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n"); + return rc; + } + data->tx_bytes = tether_stats.ipv4_tx_bytes + + tether_stats.ipv6_tx_bytes; + data->rx_bytes = tether_stats.ipv4_rx_bytes + + tether_stats.ipv6_rx_bytes; + } + return rc; +} + int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data) { enum ipa_upstream_type upstream_type; @@ -3048,6 +3132,8 @@ void ipa_broadcast_quota_reach_ind(u32 mux_id, IPAWANERR("putting nlmsg: <%s> <%s> <%s>\n", alert_msg, iface_name_l, iface_name_m); kobject_uevent_env(&(ipa_netdevs[0]->dev.kobj), KOBJ_CHANGE, envp); + + rmnet_ipa_send_quota_reach_ind(); } /** @@ -3072,6 +3158,9 @@ void ipa_q6_handshake_complete(bool ssr_bootup) */ ipa2_proxy_clk_unvote(); + /* send SSR power-up notification to IPACM */ + rmnet_ipa_send_ssr_notification(true); + /* * It is required to recover the network stats after * SSR recovery diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c index 436cf21bb6ba..793529d71593 100644 --- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c +++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c @@ -47,6 +47,10 @@ #define WAN_IOC_QUERY_DL_FILTER_STATS32 _IOWR(WAN_IOC_MAGIC, \ WAN_IOCTL_QUERY_DL_FILTER_STATS, \ compat_uptr_t) +#define WAN_IOC_QUERY_TETHER_STATS_ALL32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_QUERY_TETHER_STATS_ALL, \ + compat_uptr_t) + #endif static unsigned int dev_num = 1; @@ -242,6 +246,32 @@ static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } break; + case WAN_IOC_QUERY_TETHER_STATS_ALL: + IPAWANDBG_LOW("got WAN_IOC_QUERY_TETHER_STATS_ALL :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_query_tether_stats_all); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + + if (rmnet_ipa_query_tethering_stats_all( + (struct wan_ioctl_query_tether_stats_all *)param)) { + IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n"); + retval = -EFAULT; + break; + } + + if (copy_to_user((u8 *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case WAN_IOC_RESET_TETHER_STATS: IPAWANDBG("device %s got WAN_IOC_RESET_TETHER_STATS :>>>\n", DRIVER_NAME); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c index 1ee8ec8b8b3b..fc53bd6a4eb6 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c @@ -61,6 +61,9 @@ const char *ipa3_event_name[] = { __stringify(ECM_DISCONNECT), __stringify(IPA_TETHERING_STATS_UPDATE_STATS), __stringify(IPA_TETHERING_STATS_UPDATE_NETWORK_STATS), + __stringify(IPA_QUOTA_REACH), + __stringify(IPA_SSR_BEFORE_SHUTDOWN), + __stringify(IPA_SSR_AFTER_POWERUP), }; const char *ipa3_hdr_l2_type_name[] = { diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h index 6cd82f84bf13..d5d850309696 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h @@ -190,6 +190,9 @@ int rmnet_ipa3_set_tether_client_pipe(struct wan_ioctl_set_tether_client_pipe int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data, bool reset); +int rmnet_ipa3_query_tethering_stats_all( + struct wan_ioctl_query_tether_stats_all *data); + int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data); int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req, diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index 079481d5141a..845edc2d16a1 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -2200,7 +2200,7 @@ u8 ipa3_get_qmb_master_sel(enum ipa_client_type client) void ipa3_set_client(int index, enum ipacm_client_enum client, bool uplink) { - if (client >= IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) { + if (client > IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) { IPAERR("Bad client number! client =%d\n", client); } else if (index >= IPA3_MAX_NUM_PIPES || index < 0) { IPAERR("Bad pipe index! index =%d\n", index); diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c index f408f23536cd..fcaabe3fb48f 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c @@ -2442,6 +2442,29 @@ static struct platform_driver rmnet_ipa_driver = { .remove = ipa3_wwan_remove, }; +/** + * rmnet_ipa_send_ssr_notification(bool ssr_done) - send SSR notification + * + * This function sends the SSR notification before modem shutdown and + * after_powerup from SSR framework, to user-space module + */ +static void rmnet_ipa_send_ssr_notification(bool ssr_done) +{ + struct ipa_msg_meta msg_meta; + int rc; + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + if (ssr_done) + msg_meta.msg_type = IPA_SSR_AFTER_POWERUP; + else + msg_meta.msg_type = IPA_SSR_BEFORE_SHUTDOWN; + rc = ipa_send_msg(&msg_meta, NULL, NULL); + if (rc) { + IPAWANERR("ipa_send_msg failed: %d\n", rc); + return; + } +} + static int ipa3_ssr_notifier_cb(struct notifier_block *this, unsigned long code, void *data) @@ -2452,6 +2475,8 @@ static int ipa3_ssr_notifier_cb(struct notifier_block *this, switch (code) { case SUBSYS_BEFORE_SHUTDOWN: IPAWANINFO("IPA received MPSS BEFORE_SHUTDOWN\n"); + /* send SSR before-shutdown notification to IPACM */ + rmnet_ipa_send_ssr_notification(false); atomic_set(&rmnet_ipa3_ctx->is_ssr, 1); ipa3_q6_pre_shutdown_cleanup(); if (IPA_NETDEV()) @@ -2627,6 +2652,26 @@ static void rmnet_ipa_get_network_stats_and_update(void) } } +/** + * rmnet_ipa_send_quota_reach_ind() - send quota_reach notification from + * IPA Modem + * This function sends the quota_reach indication from the IPA Modem driver + * via QMI, to user-space module + */ +static void rmnet_ipa_send_quota_reach_ind(void) +{ + struct ipa_msg_meta msg_meta; + int rc; + + memset(&msg_meta, 0, sizeof(struct ipa_msg_meta)); + msg_meta.msg_type = IPA_QUOTA_REACH; + rc = ipa_send_msg(&msg_meta, NULL, NULL); + if (rc) { + IPAWANERR("ipa_send_msg failed: %d\n", rc); + return; + } +} + /** * rmnet_ipa3_poll_tethering_stats() - Tethering stats polling IOCTL handler * @data - IOCTL data @@ -2908,7 +2953,7 @@ static int rmnet_ipa3_query_tethering_stats_modem( IPAWANERR("reset the pipe stats\n"); } else { /* print tethered-client enum */ - IPAWANDBG_LOW("Tethered-client enum(%d)\n", data->ipa_client); + IPAWANDBG("Tethered-client enum(%d)\n", data->ipa_client); } rc = ipa3_qmi_get_data_stats(req, resp); @@ -2917,10 +2962,6 @@ static int rmnet_ipa3_query_tethering_stats_modem( kfree(req); kfree(resp); return rc; - } else if (reset) { - kfree(req); - kfree(resp); - return 0; } if (resp->dl_dst_pipe_stats_list_valid) { @@ -3058,6 +3099,49 @@ int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data, return rc; } +int rmnet_ipa3_query_tethering_stats_all( + struct wan_ioctl_query_tether_stats_all *data) +{ + struct wan_ioctl_query_tether_stats tether_stats; + enum ipa_upstream_type upstream_type; + int rc = 0; + + memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats)); + /* get IPA backhaul type */ + upstream_type = find_upstream_type(data->upstreamIface); + + if (upstream_type == IPA_UPSTEAM_MAX) { + IPAWANERR(" Wrong upstreamIface name %s\n", + data->upstreamIface); + } else if (upstream_type == IPA_UPSTEAM_WLAN) { + IPAWANDBG_LOW(" query wifi-backhaul stats\n"); + rc = rmnet_ipa3_query_tethering_stats_wifi( + &tether_stats, data->reset_stats); + if (rc) { + IPAWANERR("wlan WAN_IOC_QUERY_TETHER_STATS failed\n"); + return rc; + } + data->tx_bytes = tether_stats.ipv4_tx_bytes + + tether_stats.ipv6_tx_bytes; + data->rx_bytes = tether_stats.ipv4_rx_bytes + + tether_stats.ipv6_rx_bytes; + } else { + IPAWANDBG_LOW(" query modem-backhaul stats\n"); + tether_stats.ipa_client = data->ipa_client; + rc = rmnet_ipa3_query_tethering_stats_modem( + &tether_stats, data->reset_stats); + if (rc) { + IPAWANERR("modem WAN_IOC_QUERY_TETHER_STATS failed\n"); + return rc; + } + data->tx_bytes = tether_stats.ipv4_tx_bytes + + tether_stats.ipv6_tx_bytes; + data->rx_bytes = tether_stats.ipv4_rx_bytes + + tether_stats.ipv6_rx_bytes; + } + return rc; +} + int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data) { enum ipa_upstream_type upstream_type; @@ -3155,6 +3239,8 @@ void ipa3_broadcast_quota_reach_ind(u32 mux_id, alert_msg, iface_name_l, iface_name_m); kobject_uevent_env(&(IPA_NETDEV()->dev.kobj), KOBJ_CHANGE, envp); + + rmnet_ipa_send_quota_reach_ind(); } /** @@ -3179,6 +3265,9 @@ void ipa3_q6_handshake_complete(bool ssr_bootup) */ ipa3_proxy_clk_unvote(); + /* send SSR power-up notification to IPACM */ + rmnet_ipa_send_ssr_notification(true); + /* * It is required to recover the network stats after * SSR recovery diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c index 3ef17f6ae7ef..c7a61868ebe0 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c @@ -47,6 +47,9 @@ #define WAN_IOC_QUERY_DL_FILTER_STATS32 _IOWR(WAN_IOC_MAGIC, \ WAN_IOCTL_QUERY_DL_FILTER_STATS, \ compat_uptr_t) +#define WAN_IOC_QUERY_TETHER_STATS_ALL32 _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_QUERY_TETHER_STATS_ALL, \ + compat_uptr_t) #endif static unsigned int dev_num = 1; @@ -265,6 +268,32 @@ static long ipa3_wan_ioctl(struct file *filp, } break; + case WAN_IOC_QUERY_TETHER_STATS_ALL: + IPAWANDBG_LOW("got WAN_IOC_QUERY_TETHER_STATS_ALL :>>>\n"); + pyld_sz = sizeof(struct wan_ioctl_query_tether_stats_all); + param = kzalloc(pyld_sz, GFP_KERNEL); + if (!param) { + retval = -ENOMEM; + break; + } + if (copy_from_user(param, (u8 __user *)arg, pyld_sz)) { + retval = -EFAULT; + break; + } + + if (rmnet_ipa3_query_tethering_stats_all( + (struct wan_ioctl_query_tether_stats_all *)param)) { + IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n"); + retval = -EFAULT; + break; + } + + if (copy_to_user((void __user *)arg, param, pyld_sz)) { + retval = -EFAULT; + break; + } + break; + case WAN_IOC_RESET_TETHER_STATS: IPAWANDBG_LOW("device %s got WAN_IOC_RESET_TETHER_STATS :>>>\n", DRIVER_NAME); diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h index 57c2ca429588..d31b6aea3ec9 100644 --- a/include/uapi/linux/msm_ipa.h +++ b/include/uapi/linux/msm_ipa.h @@ -431,10 +431,20 @@ enum ipa_tethering_stats_event { IPA_TETHERING_STATS_UPDATE_STATS = IPA_ECM_EVENT_MAX, IPA_TETHERING_STATS_UPDATE_NETWORK_STATS, IPA_TETHERING_STATS_EVENT_MAX, - IPA_EVENT_MAX_NUM = IPA_TETHERING_STATS_EVENT_MAX }; -#define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM) +enum ipa_quota_event { + IPA_QUOTA_REACH = IPA_TETHERING_STATS_EVENT_MAX, + IPA_QUOTA_EVENT_MAX, +}; + +enum ipa_ssr_event { + IPA_SSR_BEFORE_SHUTDOWN = IPA_QUOTA_EVENT_MAX, + IPA_SSR_AFTER_POWERUP, + IPA_SSR_EVENT_MAX +}; + +#define IPA_EVENT_MAX_NUM ((int)IPA_SSR_EVENT_MAX) /** * enum ipa_rm_resource_name - IPA RM clients identification names diff --git a/include/uapi/linux/rmnet_ipa_fd_ioctl.h b/include/uapi/linux/rmnet_ipa_fd_ioctl.h index 228bfe8274c6..f04ac495a5c0 100644 --- a/include/uapi/linux/rmnet_ipa_fd_ioctl.h +++ b/include/uapi/linux/rmnet_ipa_fd_ioctl.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -32,6 +32,7 @@ #define WAN_IOCTL_RESET_TETHER_STATS 7 #define WAN_IOCTL_QUERY_DL_FILTER_STATS 8 #define WAN_IOCTL_ADD_FLT_RULE_EX 9 +#define WAN_IOCTL_QUERY_TETHER_STATS_ALL 10 /* User space may not have this defined. */ #ifndef IFNAMSIZ @@ -99,6 +100,16 @@ struct wan_ioctl_query_tether_stats { uint64_t ipv6_rx_bytes; }; +struct wan_ioctl_query_tether_stats_all { + /* Name of the upstream interface */ + char upstreamIface[IFNAMSIZ]; + /* enum of tether interface */ + enum ipacm_client_enum ipa_client; + uint8_t reset_stats; + uint64_t tx_bytes; + uint64_t rx_bytes; +}; + struct wan_ioctl_reset_tether_stats { /* Name of the upstream interface, not support now */ char upstreamIface[IFNAMSIZ]; @@ -155,4 +166,8 @@ struct wan_ioctl_query_dl_filter_stats { WAN_IOCTL_ADD_FLT_RULE_EX, \ struct ipa_install_fltr_rule_req_ex_msg_v01 *) +#define WAN_IOC_QUERY_TETHER_STATS_ALL _IOWR(WAN_IOC_MAGIC, \ + WAN_IOCTL_QUERY_TETHER_STATS_ALL, \ + struct wan_ioctl_query_tether_stats_all *) + #endif /* _RMNET_IPA_FD_IOCTL_H */ -- GitLab From 30684866e2696ef141abc16dd413faf09ec361cd Mon Sep 17 00:00:00 2001 From: Puja Gupta Date: Thu, 8 Jun 2017 16:17:00 -0700 Subject: [PATCH 017/786] ARM: dts: msm: Enable avb 2.0 verity for vendor partition for SDM845 Update fsmgr_flags for vendor partition to have verity enabled. Also add vbmeta entry for avb2.0. Change-Id: I51d6aa939cb28f4b06de38abdc8af4a9bf0d44e4 Signed-off-by: Puja Gupta --- arch/arm64/boot/dts/qcom/sdm845.dtsi | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index 6284361fa3de..c56447840313 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -462,6 +462,11 @@ firmware: firmware { android { compatible = "android,firmware"; + vbmeta { + compatible = "android,vbmeta"; + parts = "vbmeta,boot,system,vendor,dtbo"; + }; + fstab { compatible = "android,fstab"; vendor { @@ -469,7 +474,7 @@ dev = "/dev/block/platform/soc/1d84000.ufshc/by-name/vendor"; type = "ext4"; mnt_flags = "ro,barrier=1,discard"; - fsmgr_flags = "wait,slotselect"; + fsmgr_flags = "wait,slotselect,avb"; }; }; }; -- GitLab From 2b3da4a6cae657b4b5595f63f4352430fd061929 Mon Sep 17 00:00:00 2001 From: Ganesh Mahendran Date: Thu, 25 May 2017 15:20:29 +0800 Subject: [PATCH 018/786] ANDROID: uid_sys_stats: check previous uid_entry before call find_or_register_uid Theads in a process are stored in list struct task_struct->thread_group, so it will be visited continiously in below loop: do_each_thread(temp, task) { ... } while_each_thread(temp, task); I add some log in the loop, we can see below information: [ 65.033561] uid 1000, uid_entry ffffffc0f2761600 [ 65.033567] uid 1000, uid_entry ffffffc0f2761600 [ 65.033574] uid 1000, uid_entry ffffffc0f2761600 [ 65.033581] uid 1000, uid_entry ffffffc0f2761600 [ 65.033588] uid 1000, uid_entry ffffffc0f2761600 [ 65.033595] uid 1000, uid_entry ffffffc0f2761600 [ 65.033602] uid 1000, uid_entry ffffffc0f2761600 [ 65.033609] uid 1000, uid_entry ffffffc0f2761600 [ 65.033615] uid 1000, uid_entry ffffffc0f2761600 [ 65.033622] uid 1000, uid_entry ffffffc0f2761600 [ 65.033629] uid 1000, uid_entry ffffffc0f2761600 [ 65.033637] uid 1000, uid_entry ffffffc0f2761600 [ 65.033644] uid 1000, uid_entry ffffffc0f2761600 [ 65.033651] uid 1000, uid_entry ffffffc0f2761600 [ 65.033658] uid 1000, uid_entry ffffffc0f2761600 [ 65.033665] uid 1000, uid_entry ffffffc0f2761600 [ 65.033672] uid 1000, uid_entry ffffffc0f2761600 [ 65.033680] uid 1000, uid_entry ffffffc0f2761600 [ 65.033687] uid 1000, uid_entry ffffffc0f2761600 [ 65.033694] uid 1000, uid_entry ffffffc0f2761600 [ 65.033701] uid 1000, uid_entry ffffffc0f2761600 [ 65.033708] uid 1000, uid_entry ffffffc0f2761600 [ 65.033715] uid 1000, uid_entry ffffffc0f2761600 [ 65.033722] uid 1000, uid_entry ffffffc0f2761600 [ 65.033729] uid 1000, uid_entry ffffffc0f2761600 [ 65.033736] uid 1000, uid_entry ffffffc0f2761600 [ 65.033743] uid 1000, uid_entry ffffffc0f2761600 [ 65.033750] uid 1000, uid_entry ffffffc0f2761600 [ 65.033757] uid 1000, uid_entry ffffffc0f2761600 [ 65.033763] uid 1000, uid_entry ffffffc0f2761600 [ 65.033770] uid 1000, uid_entry ffffffc0f2761600 [ 65.033777] uid 1000, uid_entry ffffffc0f2761600 [ 65.033784] uid 1000, uid_entry ffffffc0f2761600 [ 65.033791] uid 1000, uid_entry ffffffc0f2761600 [ 65.033798] uid 1000, uid_entry ffffffc0f2761600 So we can check the previous uid_entry before calling find_or_register_uid to save time. Change-Id: I05ec1a1405a80c0a620cb4b4b2f6483dbfde7829 Signed-off-by: Ganesh Mahendran --- drivers/misc/uid_sys_stats.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c index 871040e17b6b..8bf4c57dba5c 100644 --- a/drivers/misc/uid_sys_stats.c +++ b/drivers/misc/uid_sys_stats.c @@ -95,7 +95,7 @@ static struct uid_entry *find_or_register_uid(uid_t uid) static int uid_cputime_show(struct seq_file *m, void *v) { - struct uid_entry *uid_entry; + struct uid_entry *uid_entry = NULL; struct task_struct *task, *temp; struct user_namespace *user_ns = current_user_ns(); cputime_t utime; @@ -113,7 +113,8 @@ static int uid_cputime_show(struct seq_file *m, void *v) read_lock(&tasklist_lock); do_each_thread(temp, task) { uid = from_kuid_munged(user_ns, task_uid(task)); - uid_entry = find_or_register_uid(uid); + if (!uid_entry || uid_entry->uid != uid) + uid_entry = find_or_register_uid(uid); if (!uid_entry) { read_unlock(&tasklist_lock); rt_mutex_unlock(&uid_lock); @@ -252,7 +253,7 @@ static void compute_uid_io_bucket_stats(struct io_stats *io_bucket, static void update_io_stats_all_locked(void) { - struct uid_entry *uid_entry; + struct uid_entry *uid_entry = NULL; struct task_struct *task, *temp; struct user_namespace *user_ns = current_user_ns(); unsigned long bkt; @@ -265,7 +266,8 @@ static void update_io_stats_all_locked(void) rcu_read_lock(); do_each_thread(temp, task) { uid = from_kuid_munged(user_ns, task_uid(task)); - uid_entry = find_or_register_uid(uid); + if (!uid_entry || uid_entry->uid != uid) + uid_entry = find_or_register_uid(uid); if (!uid_entry) continue; add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR); -- GitLab From d27d963c1933586924664c635f5fe73decc32521 Mon Sep 17 00:00:00 2001 From: Clarence Ip Date: Wed, 14 Jun 2017 09:57:26 -0400 Subject: [PATCH 019/786] drm/msm/sde: get previous inline fbo during atomic check The intermediate buffer for inline rotation can't be allocated until the plane's "prepare" callback, but attempt to acquire a previously allocated intermediate buffer during the atomic check instead of waiting until the prepare callback. This holds an extra reference count on the buffer so that it persists past the end of the atomic check phase. CRs-Fixed: 2062021 Change-Id: I28c4a50f3272b41cebcdef05b0aaeabf5dc494b7 Signed-off-by: Clarence Ip --- drivers/gpu/drm/msm/sde/sde_plane.c | 77 +++++++++++++++++++---------- 1 file changed, 52 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c index b295cd09cd45..3eb45188ff7d 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.c +++ b/drivers/gpu/drm/msm/sde/sde_plane.c @@ -1623,7 +1623,12 @@ static void sde_plane_rot_calc_cfg(struct drm_plane *plane, attached_pstate = to_sde_plane_state(attached_state); attached_rstate = &attached_pstate->rot; - if (attached_rstate->rot_hw != rstate->rot_hw) + if (attached_state->fb != state->fb) + continue; + + if (sde_plane_get_property(pstate, PLANE_PROP_ROTATION) != + sde_plane_get_property(attached_pstate, + PLANE_PROP_ROTATION)) continue; found++; @@ -1891,6 +1896,46 @@ static int sde_plane_rot_submit_command(struct drm_plane *plane, return ret; } +/** + * _sde_plane_rot_get_fb - attempt to get previously allocated fb/fbo + * If an fb/fbo was already created, either from a previous frame or + * from another plane in the current commit cycle, attempt to reuse + * it for this commit cycle as well. + * @plane: Pointer to drm plane + * @cstate: Pointer to crtc state + * @rstate: Pointer to rotator plane state + */ +static void _sde_plane_rot_get_fb(struct drm_plane *plane, + struct drm_crtc_state *cstate, + struct sde_plane_rot_state *rstate) +{ + struct sde_kms_fbo *fbo; + struct drm_framebuffer *fb; + + if (!plane || !cstate || !rstate) + return; + + fbo = sde_crtc_res_get(cstate, SDE_CRTC_RES_ROT_OUT_FBO, + (u64) &rstate->rot_hw->base); + fb = sde_crtc_res_get(cstate, SDE_CRTC_RES_ROT_OUT_FB, + (u64) &rstate->rot_hw->base); + if (fb && fbo) { + SDE_DEBUG("plane%d.%d get fb/fbo\n", plane->base.id, + rstate->sequence_id); + } else if (fbo) { + sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FBO, + (u64) &rstate->rot_hw->base); + fbo = NULL; + } else if (fb) { + sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB, + (u64) &rstate->rot_hw->base); + fb = NULL; + } + + rstate->out_fbo = fbo; + rstate->out_fb = fb; +} + /** * sde_plane_rot_prepare_fb - prepare framebuffer of the new state * for rotator (pre-sspp) stage @@ -1927,30 +1972,8 @@ static int sde_plane_rot_prepare_fb(struct drm_plane *plane, sde_plane_rot_calc_cfg(plane, new_state); /* check if stream buffer is already attached to rotator */ - if (sde_plane_enabled(new_state)) { - struct sde_kms_fbo *fbo; - struct drm_framebuffer *fb; - - fbo = sde_crtc_res_get(cstate, SDE_CRTC_RES_ROT_OUT_FBO, - (u64) &new_rstate->rot_hw->base); - fb = sde_crtc_res_get(cstate, SDE_CRTC_RES_ROT_OUT_FB, - (u64) &new_rstate->rot_hw->base); - if (fb && fbo) { - SDE_DEBUG("plane%d.%d get fb/fbo\n", plane->base.id, - new_rstate->sequence_id); - } else if (fbo) { - sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FBO, - (u64) &new_rstate->rot_hw->base); - fbo = NULL; - } else if (fb) { - sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB, - (u64) &new_rstate->rot_hw->base); - fb = NULL; - } - - new_rstate->out_fbo = fbo; - new_rstate->out_fb = fb; - } + if (sde_plane_enabled(new_state) && !new_rstate->out_fb) + _sde_plane_rot_get_fb(plane, cstate, new_rstate); /* release buffer if output format configuration changes */ if (new_rstate->out_fb && @@ -2199,6 +2222,10 @@ static int sde_plane_rot_atomic_check(struct drm_plane *plane, sde_plane_rot_calc_cfg(plane, state); + /* attempt to reuse stream buffer if already available */ + if (sde_plane_enabled(state)) + _sde_plane_rot_get_fb(plane, cstate, rstate); + ret = sde_plane_rot_submit_command(plane, state, SDE_HW_ROT_CMD_VALIDATE); -- GitLab From 96854c2db1a49350e3a6c2d0137bd9bda8ead8ca Mon Sep 17 00:00:00 2001 From: Clarence Ip Date: Mon, 12 Jun 2017 14:32:26 -0400 Subject: [PATCH 020/786] msm: sde: simulate timestamp on skipped inline interrupt If an interrupt for regdma completion is missed for any reason, the ISR needs to indicate that more than one rotation request has been completed. This patch updates the software timestamp simulation for inline rotations to select a timestamp matching the current incoming interrupt, instead of simply selecting the timestamp of the oldest request. CRs-Fixed: 2062038 Change-Id: I4c651f277f4ae0eeb4d26a80feb5912c5ed56e17 Signed-off-by: Clarence Ip --- .../platform/msm/sde/rotator/sde_rotator_r3.c | 28 +++++++++++++------ .../msm/sde/rotator/sde_rotator_r3_hwio.h | 4 +++ 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c index b582934c88af..aa6c5223d2f3 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c @@ -2677,9 +2677,9 @@ static irqreturn_t sde_hw_rotator_rotirq_handler(int irq, void *ptr) static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr) { struct sde_hw_rotator *rot = ptr; - struct sde_hw_rotator_context *ctx; + struct sde_hw_rotator_context *ctx, *tmp; irqreturn_t ret = IRQ_NONE; - u32 isr; + u32 isr, isr_tmp; u32 ts; u32 q_id; @@ -2716,18 +2716,28 @@ static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr) * Timestamp packet is not available in sbuf mode. * Simulate timestamp update in the handler instead. */ - if (!list_empty(&rot->sbuf_ctx[q_id])) { - ctx = list_first_entry_or_null(&rot->sbuf_ctx[q_id], - struct sde_hw_rotator_context, list); - if (ctx) { + if (list_empty(&rot->sbuf_ctx[q_id])) + goto skip_sbuf; + + ctx = NULL; + isr_tmp = isr; + list_for_each_entry(tmp, &rot->sbuf_ctx[q_id], list) { + u32 mask; + + mask = tmp->timestamp & 0x1 ? REGDMA_INT_1_MASK : + REGDMA_INT_0_MASK; + if (isr_tmp & mask) { + isr_tmp &= ~mask; + ctx = tmp; ts = ctx->timestamp; sde_hw_rotator_update_swts(rot, ctx, ts); SDEROT_DBG("update swts:0x%X\n", ts); - } else { - SDEROT_ERR("invalid swts ctx\n"); } + SDEROT_EVTLOG(isr, tmp->timestamp); } - + if (ctx == NULL) + SDEROT_ERR("invalid swts ctx\n"); +skip_sbuf: ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK]; /* diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h index d2b81d596a27..2afd032dc6b5 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h @@ -297,4 +297,8 @@ #define REGDMA_TIMESTAMP_REG ROT_SSPP_TPG_PATTERN_GEN_INIT_VAL #define REGDMA_RESET_STATUS_REG ROT_SSPP_TPG_RGB_MAPPING +#define REGDMA_INT_0_MASK 0x101 +#define REGDMA_INT_1_MASK 0x202 +#define REGDMA_INT_2_MASK 0x404 + #endif /*_SDE_ROTATOR_R3_HWIO_H */ -- GitLab From 273eea68c1106ce60647582465eca51187b2e392 Mon Sep 17 00:00:00 2001 From: Chris Lew Date: Thu, 15 Jun 2017 18:35:11 -0700 Subject: [PATCH 021/786] trace: ipc_logging: Fix potential dead in ipc_log_write Fix potential deadlock by unlocking spinlock when returning from an error case during ipc_log_write. CRs-Fixed: 2059308 Change-Id: If253470d720af1b42f9ebcfb98d69d4c65ab73bc Signed-off-by: Chris Lew --- kernel/trace/ipc_logging.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kernel/trace/ipc_logging.c b/kernel/trace/ipc_logging.c index 6d310ab4d2c4..31e6a8e49c43 100644 --- a/kernel/trace/ipc_logging.c +++ b/kernel/trace/ipc_logging.c @@ -314,8 +314,11 @@ void ipc_log_write(void *ctxt, struct encode_context *ectxt) ilctxt->write_page->hdr.end_time = t_now; ilctxt->write_page = get_next_page(ilctxt, ilctxt->write_page); - if (WARN_ON(ilctxt->write_page == NULL)) + if (WARN_ON(ilctxt->write_page == NULL)) { + spin_unlock(&ilctxt->context_lock_lhb1); + read_unlock_irqrestore(&context_list_lock_lha1, flags); return; + } ilctxt->write_page->hdr.write_offset = 0; ilctxt->write_page->hdr.start_time = t_now; memcpy((ilctxt->write_page->data + -- GitLab From b263703672192f0ad80c1708ecb35977e032be75 Mon Sep 17 00:00:00 2001 From: Sayali Lokhande Date: Mon, 24 Apr 2017 13:40:50 +0530 Subject: [PATCH 022/786] mmc: core: Use mmc_reset instead of power_restore On 4.4 kernel, 'commit 364549ddc29d ("mmc: core: Remove redundant ->power_restore() callback for MMC")' removed power_restore callback for MMC since mmc_reset is implemented. Hence use reset instead of power_restore in mmc_cmdq_hw_reset. Also modify the caller function mmc_cmdq_hw_reset to properly use the mmc_reset. Change-Id: Ia06d579401b6a083b164dff7a253d1eb3caef1a3 Signed-off-by: Sayali Lokhande Signed-off-by: Vijay Viswanath --- drivers/mmc/core/core.c | 8 +++----- drivers/mmc/core/mmc.c | 25 ++++++++++++++++++++++++- 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 1848cdf42fbf..cfd3b3cf2bb5 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -4152,12 +4152,10 @@ static void mmc_hw_reset_for_init(struct mmc_host *host) */ int mmc_cmdq_hw_reset(struct mmc_host *host) { - if (!host->bus_ops->power_restore) - return -EOPNOTSUPP; + if (!host->bus_ops->reset) + return -EOPNOTSUPP; - mmc_power_cycle(host, host->ocr_avail); - mmc_select_voltage(host, host->card->ocr); - return host->bus_ops->power_restore(host); + return host->bus_ops->reset(host); } EXPORT_SYMBOL(mmc_cmdq_hw_reset); diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index a36bcbbf4388..b3febabf9e79 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -2960,6 +2960,7 @@ EXPORT_SYMBOL(mmc_can_reset); static int mmc_reset(struct mmc_host *host) { struct mmc_card *card = host->card; + int ret; /* * In the case of recovery, we can't expect flushing the cache to work @@ -2980,7 +2981,29 @@ static int mmc_reset(struct mmc_host *host) /* Do a brute force power cycle */ mmc_power_cycle(host, card->ocr); } - return mmc_init_card(host, card->ocr, card); + + /* Suspend clk scaling to avoid switching frequencies intermittently */ + + ret = mmc_suspend_clk_scaling(host); + if (ret) { + pr_err("%s: %s: fail to suspend clock scaling (%d)\n", + mmc_hostname(host), __func__, ret); + return ret; + } + + ret = mmc_init_card(host, host->card->ocr, host->card); + if (ret) { + pr_err("%s: %s: mmc_init_card failed (%d)\n", + mmc_hostname(host), __func__, ret); + return ret; + } + + ret = mmc_resume_clk_scaling(host); + if (ret) + pr_err("%s: %s: fail to resume clock scaling (%d)\n", + mmc_hostname(host), __func__, ret); + + return ret; } static const struct mmc_bus_ops mmc_ops = { -- GitLab From 7f5e93a4db582b89a8766327e396ed8d8fc57a6c Mon Sep 17 00:00:00 2001 From: Vijay Viswanath Date: Tue, 16 May 2017 10:34:21 +0530 Subject: [PATCH 023/786] mmc: block: Disable clock scaling during shutdown During shutdown of sdhc drivers, power off notification (PON) is sent to eMMC device. But it doesn't prevent the clock scaling framework from kicking in and sending commands to card after PON is sent. So exit clock scaling framework of sdhc before PON is sent. Change-Id: Ibfe097732042458965db529f235a7fd28cfabaab Signed-off-by: Vijay Viswanath --- drivers/mmc/card/block.c | 4 ---- drivers/mmc/core/mmc.c | 17 +++++++++++++++++ 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 9ac6568dac62..c6f34964d839 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -4664,10 +4664,6 @@ static int _mmc_blk_suspend(struct mmc_card *card, bool wait) static void mmc_blk_shutdown(struct mmc_card *card) { _mmc_blk_suspend(card, 1); - - /* send power off notification */ - if (mmc_card_mmc(card)) - mmc_send_pon(card); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index a36bcbbf4388..68d3d20fad4a 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -2983,6 +2983,22 @@ static int mmc_reset(struct mmc_host *host) return mmc_init_card(host, card->ocr, card); } +static int mmc_shutdown(struct mmc_host *host) +{ + struct mmc_card *card = host->card; + + /* + * Exit clock scaling so that it doesn't kick in after + * power off notification is sent + */ + if (host->caps2 & MMC_CAP2_CLK_SCALE) + mmc_exit_clk_scaling(card->host); + /* send power off notification */ + if (mmc_card_mmc(card)) + mmc_send_pon(card); + return 0; +} + static const struct mmc_bus_ops mmc_ops = { .remove = mmc_remove, .detect = mmc_detect, @@ -2993,6 +3009,7 @@ static const struct mmc_bus_ops mmc_ops = { .alive = mmc_alive, .change_bus_speed = mmc_change_bus_speed, .reset = mmc_reset, + .shutdown = mmc_shutdown, }; /* -- GitLab From edb6da11b95c34269d21e99e3b738da1114bdb5f Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 6 Jun 2017 18:38:04 +0200 Subject: [PATCH 024/786] FROMLIST: bpf: cgroup skb progs cannot access ld_abs/ind Commit fb9a307d11d6 ("bpf: Allow CGROUP_SKB eBPF program to access sk_buff") enabled programs of BPF_PROG_TYPE_CGROUP_SKB type to use ld_abs/ind instructions. However, at this point, we cannot use them, since offsets relative to SKF_LL_OFF will end up pointing skb_mac_header(skb) out of bounds since in the egress path it is not yet set at that point in time, but only after __dev_queue_xmit() did a general reset on the mac header. bpf_internal_load_pointer_neg_helper() will then end up reading data from a wrong offset. BPF_PROG_TYPE_CGROUP_SKB programs can use bpf_skb_load_bytes() already to access packet data, which is also more flexible than the insns carried over from cBPF. Fixes: fb9a307d11d6 ("bpf: Allow CGROUP_SKB eBPF program to access sk_buff") Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Cc: Chenbo Feng Signed-off-by: David S. Miller (url: http://patchwork.ozlabs.org/patch/771946/) Signed-off-by: Chenbo Feng Bug: 30950746 Change-Id: Ia32ac79d8c0d18f811ec101897284a8b60cb042a --- kernel/bpf/verifier.c | 1 - 1 file changed, 1 deletion(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index fe158bd01dc6..44c17f47d94c 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2164,7 +2164,6 @@ static bool may_access_skb(enum bpf_prog_type type) case BPF_PROG_TYPE_SOCKET_FILTER: case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: - case BPF_PROG_TYPE_CGROUP_SKB: return true; default: return false; -- GitLab From 2c4f58af89edfb798e2212618a5c4337010dd431 Mon Sep 17 00:00:00 2001 From: Chenbo Feng Date: Fri, 9 Jun 2017 12:06:07 -0700 Subject: [PATCH 025/786] FROMLIST: ipv6: Initial skb->dev and skb->protocol in ip6_output Move the initialization of skb->dev and skb->protocol from ip6_finish_output2 to ip6_output. This can make the skb->dev and skb->protocol information avalaible to the CGROUP eBPF filter. Signed-off-by: Chenbo Feng Acked-by: Eric Dumazet Signed-off-by: David S. Miller (url: http://patchwork.ozlabs.org/patch/774124/) Bug: 30950746 Change-Id: Iac2304f7ba8cd769ee01a062cde2deb50562c3ad --- net/ipv6/ip6_output.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index d472a5fd4da4..1b2ed0ea5eb1 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -67,9 +67,6 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff * struct in6_addr *nexthop; int ret; - skb->protocol = htons(ETH_P_IPV6); - skb->dev = dev; - if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); @@ -153,6 +150,9 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb) struct net_device *dev = skb_dst(skb)->dev; struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); + skb->protocol = htons(ETH_P_IPV6); + skb->dev = dev; + if (unlikely(idev->cnf.disable_ipv6)) { IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); kfree_skb(skb); -- GitLab From e22ff4d119bc7708d51af23cb78b41b8c6310515 Mon Sep 17 00:00:00 2001 From: Chenbo Feng Date: Fri, 9 Jun 2017 12:17:37 -0700 Subject: [PATCH 026/786] FROMLIST: bpf: Remove duplicate tcp_filter hook in ipv6 There are two tcp_filter hooks in tcp_ipv6 ingress path currently. One is at tcp_v6_rcv and another is in tcp_v6_do_rcv. It seems the tcp_filter() call inside tcp_v6_do_rcv is redundent and some packet will be filtered twice in this situation. This will cause trouble when using eBPF filters to account traffic data. Signed-off-by: Chenbo Feng Acked-by: Eric Dumazet Signed-off-by: David S. Miller (url: http://patchwork.ozlabs.org/patch/774126/) Bug: 30950746 Change-Id: Id4fe8cd5b7bac11a4d4141e203dd4b9fa59f3d6c --- net/ipv6/tcp_ipv6.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 368c23a44607..f54c7d3469ad 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1237,9 +1237,6 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) if (skb->protocol == htons(ETH_P_IP)) return tcp_v4_do_rcv(sk, skb); - if (tcp_filter(sk, skb)) - goto discard; - /* * socket locking is here for SMP purposes as backlog rcv * is currently called with bh processing disabled. -- GitLab From d937a80c5f34fb2e932c36cf27c82f5b0b00d05f Mon Sep 17 00:00:00 2001 From: Chenbo Feng Date: Sat, 10 Jun 2017 12:35:38 -0700 Subject: [PATCH 027/786] FROMLIST: Remove the redundant skb->dev initialization in ip6_fragment After moves the skb->dev and skb->protocol initialization into ip6_output, setting the skb->dev inside ip6_fragment is unnecessary. Fixes: 97a7a37a7b7b("ipv6: Initial skb->dev and skb->protocol in ip6_output") Signed-off-by: Chenbo Feng Signed-off-by: David S. Miller (url: http://patchwork.ozlabs.org/patch/774260/) Bug: 30950746 Change-Id: I6ab42ecca2e2ab57f2c5988edf19d584de35e007 --- net/ipv6/ip6_output.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 1b2ed0ea5eb1..7cec18062244 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -867,7 +867,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, if (skb->sk && dst_allfrag(skb_dst(skb))) sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK); - skb->dev = skb_dst(skb)->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); err = -EMSGSIZE; -- GitLab From f3a511912a7b56768101244a632c95d2841714d3 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 8 Dec 2016 19:55:22 +0800 Subject: [PATCH 028/786] usb: gadget: f_fs: Fix possibe deadlock When system try to close /dev/usb-ffs/adb/ep0 on one core, at the same time another core try to attach new UDC, which will cause deadlock as below scenario. Thus we should release ffs lock before issuing unregister_gadget_item(). [ 52.642225] c1 ====================================================== [ 52.642228] c1 [ INFO: possible circular locking dependency detected ] [ 52.642236] c1 4.4.6+ #1 Tainted: G W O [ 52.642241] c1 ------------------------------------------------------- [ 52.642245] c1 usb ffs open/2808 is trying to acquire lock: [ 52.642270] c0 (udc_lock){+.+.+.}, at: [] usb_gadget_unregister_driver+0x3c/0xc8 [ 52.642272] c1 but task is already holding lock: [ 52.642283] c0 (ffs_lock){+.+.+.}, at: [] ffs_data_clear+0x30/0x140 [ 52.642285] c1 which lock already depends on the new lock. [ 52.642287] c1 the existing dependency chain (in reverse order) is: [ 52.642295] c0 -> #1 (ffs_lock){+.+.+.}: [ 52.642307] c0 [] __lock_acquire+0x20f0/0x2238 [ 52.642314] c0 [] lock_acquire+0xe4/0x298 [ 52.642322] c0 [] mutex_lock_nested+0x7c/0x3cc [ 52.642328] c0 [] ffs_func_bind+0x504/0x6e8 [ 52.642334] c0 [] usb_add_function+0x84/0x184 [ 52.642340] c0 [] configfs_composite_bind+0x264/0x39c [ 52.642346] c0 [] udc_bind_to_driver+0x58/0x11c [ 52.642352] c0 [] usb_udc_attach_driver+0x90/0xc8 [ 52.642358] c0 [] gadget_dev_desc_UDC_store+0xd4/0x128 [ 52.642369] c0 [] configfs_write_file+0xd0/0x13c [ 52.642376] c0 [] vfs_write+0xb8/0x214 [ 52.642381] c0 [] SyS_write+0x54/0xb0 [ 52.642388] c0 [] el0_svc_naked+0x24/0x28 [ 52.642395] c0 -> #0 (udc_lock){+.+.+.}: [ 52.642401] c0 [] print_circular_bug+0x84/0x2e4 [ 52.642407] c0 [] __lock_acquire+0x2138/0x2238 [ 52.642412] c0 [] lock_acquire+0xe4/0x298 [ 52.642420] c0 [] mutex_lock_nested+0x7c/0x3cc [ 52.642427] c0 [] usb_gadget_unregister_driver+0x3c/0xc8 [ 52.642432] c0 [] unregister_gadget_item+0x28/0x44 [ 52.642439] c0 [] ffs_data_clear+0x138/0x140 [ 52.642444] c0 [] ffs_data_reset+0x20/0x6c [ 52.642450] c0 [] ffs_data_closed+0xac/0x12c [ 52.642454] c0 [] ffs_ep0_release+0x20/0x2c [ 52.642460] c0 [] __fput+0xb0/0x1f4 [ 52.642466] c0 [] ____fput+0x20/0x2c [ 52.642473] c0 [] task_work_run+0xb4/0xe8 [ 52.642482] c0 [] do_exit+0x360/0xb9c [ 52.642487] c0 [] do_group_exit+0x4c/0xb0 [ 52.642494] c0 [] get_signal+0x380/0x89c [ 52.642501] c0 [] do_signal+0x154/0x518 [ 52.642507] c0 [] do_notify_resume+0x70/0x78 [ 52.642512] c0 [] work_pending+0x1c/0x20 [ 52.642514] c1 other info that might help us debug this: [ 52.642517] c1 Possible unsafe locking scenario: [ 52.642518] c1 CPU0 CPU1 [ 52.642520] c1 ---- ---- [ 52.642525] c0 lock(ffs_lock); [ 52.642529] c0 lock(udc_lock); [ 52.642533] c0 lock(ffs_lock); [ 52.642537] c0 lock(udc_lock); [ 52.642539] c1 *** DEADLOCK *** [ 52.642543] c1 1 lock held by usb ffs open/2808: [ 52.642555] c0 #0: (ffs_lock){+.+.+.}, at: [] ffs_data_clear+0x30/0x140 [ 52.642557] c1 stack backtrace: [ 52.642563] c1 CPU: 1 PID: 2808 Comm: usb ffs open Tainted: G [ 52.642565] c1 Hardware name: Spreadtrum SP9860g Board (DT) [ 52.642568] c1 Call trace: [ 52.642573] c1 [] dump_backtrace+0x0/0x170 [ 52.642577] c1 [] show_stack+0x20/0x28 [ 52.642583] c1 [] dump_stack+0xa8/0xe0 [ 52.642587] c1 [] print_circular_bug+0x1fc/0x2e4 [ 52.642591] c1 [] __lock_acquire+0x2138/0x2238 [ 52.642595] c1 [] lock_acquire+0xe4/0x298 [ 52.642599] c1 [] mutex_lock_nested+0x7c/0x3cc [ 52.642604] c1 [] usb_gadget_unregister_driver+0x3c/0xc8 [ 52.642608] c1 [] unregister_gadget_item+0x28/0x44 [ 52.642613] c1 [] ffs_data_clear+0x138/0x140 [ 52.642618] c1 [] ffs_data_reset+0x20/0x6c [ 52.642621] c1 [] ffs_data_closed+0xac/0x12c [ 52.642625] c1 [] ffs_ep0_release+0x20/0x2c [ 52.642629] c1 [] __fput+0xb0/0x1f4 [ 52.642633] c1 [] ____fput+0x20/0x2c [ 52.642636] c1 [] task_work_run+0xb4/0xe8 [ 52.642640] c1 [] do_exit+0x360/0xb9c [ 52.642644] c1 [] do_group_exit+0x4c/0xb0 [ 52.642647] c1 [] get_signal+0x380/0x89c [ 52.642651] c1 [] do_signal+0x154/0x518 [ 52.642656] c1 [] do_notify_resume+0x70/0x78 [ 52.642659] c1 [] work_pending+0x1c/0x20 Bug: 62572621 Change-Id: I78adea011c3a87dfeb5c15750b7737975fda1742 Acked-by: Michal Nazarewicz Signed-off-by: Baolin Wang Signed-off-by: Felipe Balbi (cherry picked from commit b3ce3ce02d146841af012d08506b4071db8ffde3) Signed-off-by: Jerry Zhang --- drivers/usb/gadget/function/f_fs.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 89081b834615..8a788d69e4d5 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -3688,6 +3688,7 @@ static void ffs_closed(struct ffs_data *ffs) { struct ffs_dev *ffs_obj; struct f_fs_opts *opts; + struct config_item *ci; ENTER(); ffs_dev_lock(); @@ -3711,8 +3712,11 @@ static void ffs_closed(struct ffs_data *ffs) || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount)) goto done; - unregister_gadget_item(ffs_obj->opts-> - func_inst.group.cg_item.ci_parent->ci_parent); + ci = opts->func_inst.group.cg_item.ci_parent->ci_parent; + ffs_dev_unlock(); + + unregister_gadget_item(ci); + return; done: ffs_dev_unlock(); } -- GitLab From 25afc1d161415e9ceebb51e8432ba8afd578d69c Mon Sep 17 00:00:00 2001 From: Shihuan Liu Date: Tue, 13 Jun 2017 17:57:13 -0700 Subject: [PATCH 029/786] msm: ipa: optimize state machine in ipa uc offload Optimize state machine of ipa_uc_offload.c to avoid the race condition where client driver is not allowed to de-register interface properties after registering properties. Change-Id: I5569d29c97fd11cbf81b4388589841d162977f6b Acked-by: Shihuan Liu Signed-off-by: Skylar Chang --- drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c index ae06d54b8212..2dd82c12ed81 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c @@ -58,7 +58,6 @@ enum ipa_uc_offload_state { IPA_UC_OFFLOAD_STATE_INVALID, IPA_UC_OFFLOAD_STATE_INITIALIZED, IPA_UC_OFFLOAD_STATE_UP, - IPA_UC_OFFLOAD_STATE_DOWN, }; struct ipa_uc_offload_ctx { @@ -413,8 +412,7 @@ int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *inp, return -EINVAL; } - if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED && - offload_ctx->state != IPA_UC_OFFLOAD_STATE_DOWN) { + if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) { IPA_UC_OFFLOAD_ERR("Invalid state %d\n", offload_ctx->state); return -EPERM; } @@ -471,7 +469,7 @@ static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx) int ipa_ep_idx_ul, ipa_ep_idx_dl; int ret = 0; - ntn_ctx->state = IPA_UC_OFFLOAD_STATE_DOWN; + ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED; ret = ipa_rm_release_resource(IPA_RM_RESOURCE_ETHERNET_PROD); if (ret) { @@ -597,7 +595,7 @@ int ipa_uc_offload_cleanup(u32 clnt_hdl) return -EINVAL; } - if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_DOWN) { + if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) { IPA_UC_OFFLOAD_ERR("Invalid State %d\n", offload_ctx->state); return -EINVAL; } -- GitLab From c44d51bd7f720365f8a6034e51589b81fb7d9cbf Mon Sep 17 00:00:00 2001 From: Tatenda Chipeperekwa Date: Fri, 16 Jun 2017 17:12:00 -0700 Subject: [PATCH 030/786] drm: msm: dp: remove crypto clock parsing and rate setting Remove crypto clock parsing and rate setting logic from the mainlink clock enable path as the crypto clock is only required when HDCP is enabled. CRs-Fixed: 2020938 Change-Id: If379e158a7da099a34d34701e6bce12a1ff1a291 Signed-off-by: Tatenda Chipeperekwa --- drivers/gpu/drm/msm/dp/dp_ctrl.c | 3 --- drivers/gpu/drm/msm/dp/dp_parser.c | 3 +-- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index 954a2fa66451..706398db2aa8 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -21,7 +21,6 @@ #include "dp_ctrl.h" #define DP_KHZ_TO_HZ 1000 -#define DP_CRYPTO_CLK_RATE_KHZ 180000 #define DP_CTRL_INTR_READY_FOR_VIDEO BIT(0) #define DP_CTRL_INTR_IDLE_PATTERN_SENT BIT(3) @@ -1034,8 +1033,6 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl) dp_ctrl_set_clock_rate(ctrl, "ctrl_link_clk", drm_dp_bw_code_to_link_rate(ctrl->link->link_rate)); - dp_ctrl_set_clock_rate(ctrl, "ctrl_crypto_clk", DP_CRYPTO_CLK_RATE_KHZ); - dp_ctrl_set_clock_rate(ctrl, "ctrl_pixel_clk", ctrl->pixel_rate); ret = ctrl->power->clk_enable(ctrl->power, DP_CTRL_PM, true); diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c index 722c4362070f..e81bbb397a81 100644 --- a/drivers/gpu/drm/msm/dp/dp_parser.c +++ b/drivers/gpu/drm/msm/dp/dp_parser.c @@ -494,8 +494,7 @@ static int dp_parser_clock(struct dp_parser *parser) ctrl_clk_index++; if (!strcmp(clk_name, "ctrl_link_clk") || - !strcmp(clk_name, "ctrl_pixel_clk") || - !strcmp(clk_name, "ctrl_crypto_clk")) + !strcmp(clk_name, "ctrl_pixel_clk")) clk->type = DSS_CLK_PCLK; else clk->type = DSS_CLK_AHB; -- GitLab From 3e451564b55bfc05a6fc7595c1ac1aac9796d264 Mon Sep 17 00:00:00 2001 From: Tatenda Chipeperekwa Date: Fri, 16 Jun 2017 16:49:37 -0700 Subject: [PATCH 031/786] ARM: dts: msm: make DisplayPort crypto clock standalone on SDM845 Make the DisplayPort crypto clock a standalone clock by removing it from the controller clock group. This change is required on SDM845 because the crypto clock enable/disable is triggered by the link clock, and the only requirement is for the clock's rate to be set. CRs-Fixed: 2020938 Change-Id: I7e91a8ad8c5c7a2cdfa2ae0e9521b1bbc168f67e Signed-off-by: Tatenda Chipeperekwa --- arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi index 726a63f02be8..43dd1e3366b3 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi @@ -399,15 +399,15 @@ <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>, <&clock_dispcc DISP_CC_MDSS_DP_LINK_CLK>, <&clock_dispcc DISP_CC_MDSS_DP_LINK_INTF_CLK>, - <&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>, <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK>, + <&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>, <&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>, <&mdss_dp_pll DP_VCO_DIVIDED_CLK_SRC_MUX>; clock-names = "core_aux_clk", "core_usb_ref_clk_src", "core_usb_ref_clk", "core_usb_cfg_ahb_clk", "core_usb_pipe_clk", "ctrl_link_clk", - "ctrl_link_iface_clk", "ctrl_crypto_clk", - "ctrl_pixel_clk", "pixel_clk_rcg", "pixel_parent"; + "ctrl_link_iface_clk", "ctrl_pixel_clk", + "crypto_clk", "pixel_clk_rcg", "pixel_parent"; qcom,dp-usbpd-detection = <&pmi8998_pdphy>; -- GitLab From b372d35a522617967911b72e209d35a0c4161fe6 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Tue, 23 May 2017 14:18:17 -0500 Subject: [PATCH 032/786] PCI/PM: Add needs_resume flag to avoid suspend complete optimization commit 4d071c3238987325b9e50e33051a40d1cce311cc upstream. Some drivers - like i915 - may not support the system suspend direct complete optimization due to differences in their runtime and system suspend sequence. Add a flag that when set resumes the device before calling the driver's system suspend handlers which effectively disables the optimization. Needed by a future patch fixing suspend/resume on i915. Suggested by Rafael. Signed-off-by: Imre Deak Signed-off-by: Bjorn Helgaas Acked-by: Rafael J. Wysocki Cc: stable@vger.kernel.org (rebased on v4.8, added kernel version to commit message stable tag) Signed-off-by: Imre Deak Signed-off-by: Greg Kroah-Hartman --- drivers/pci/pci.c | 3 ++- include/linux/pci.h | 5 +++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 579c4946dc6e..e7d4048e81f2 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -2142,7 +2142,8 @@ bool pci_dev_keep_suspended(struct pci_dev *pci_dev) if (!pm_runtime_suspended(dev) || pci_target_state(pci_dev) != pci_dev->current_state - || platform_pci_need_resume(pci_dev)) + || platform_pci_need_resume(pci_dev) + || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME)) return false; /* diff --git a/include/linux/pci.h b/include/linux/pci.h index a38772a85588..1b711796d989 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -178,6 +178,11 @@ enum pci_dev_flags { PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7), /* Get VPD from function 0 VPD */ PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8), + /* + * Resume before calling the driver's system suspend hooks, disabling + * the direct_complete optimization. + */ + PCI_DEV_FLAGS_NEEDS_RESUME = (__force pci_dev_flags_t) (1 << 11), }; enum pci_irq_reroute_variant { -- GitLab From 7a7b2d5f65a7a12c4f5d43fb10913ca908102833 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Tue, 2 May 2017 15:04:09 +0300 Subject: [PATCH 033/786] drm/i915: Prevent the system suspend complete optimization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 6ab92afc95c9bd6877cb42e7b24f65be887a5440 upstream. Since commit bac2a909a096c9110525c18cbb8ce73c660d5f71 Author: Rafael J. Wysocki Date: Wed Jan 21 02:17:42 2015 +0100 PCI / PM: Avoid resuming PCI devices during system suspend PCI devices will default to allowing the system suspend complete optimization where devices are not woken up during system suspend if they were already runtime suspended. This however breaks the i915/HDA drivers for two reasons: - The i915 driver has system suspend specific steps that it needs to run, that bring the device to a different state than its runtime suspended state. - The HDA driver's suspend handler requires power that it will request from the i915 driver's power domain handler. This in turn requires the i915 driver to runtime resume itself, but this won't be possible if the suspend complete optimization is in effect: in this case the i915 runtime PM is disabled and trying to get an RPM reference returns -EACCESS. Solve this by requiring the PCI/PM core to resume the device during system suspend which in effect disables the suspend complete optimization. Regardless of the above commit the optimization stayed disabled for DRM devices until commit d14d2a8453d650bea32a1c5271af1458cd283a0f Author: Lukas Wunner Date: Wed Jun 8 12:49:29 2016 +0200 drm: Remove dev_pm_ops from drm_class so this patch is in practice a fix for this commit. Another reason for the bug staying hidden for so long is that the optimization for a device is disabled if it's disabled for any of its children devices. i915 may have a backlight device as its child which doesn't support runtime PM and so doesn't allow the optimization either. So if this backlight device got registered the bug stayed hidden. Credits to Marta, Tomi and David who enabled pstore logging, that caught one instance of this issue across a suspend/ resume-to-ram and Ville who rememberd that the optimization was enabled for some devices at one point. The first WARN triggered by the problem: [ 6250.746445] WARNING: CPU: 2 PID: 17384 at drivers/gpu/drm/i915/intel_runtime_pm.c:2846 intel_runtime_pm_get+0x6b/0xd0 [i915] [ 6250.746448] pm_runtime_get_sync() failed: -13 [ 6250.746451] Modules linked in: snd_hda_intel i915 vgem snd_hda_codec_hdmi x86_pkg_temp_thermal intel_powerclamp coretemp crct10dif_pclmul crc32_pclmul snd_hda_codec_realtek snd_hda_codec_generic ghash_clmulni_intel e1000e snd_hda_codec snd_hwdep snd_hda_core ptp mei_me pps_core snd_pcm lpc_ich mei prime_ numbers i2c_hid i2c_designware_platform i2c_designware_core [last unloaded: i915] [ 6250.746512] CPU: 2 PID: 17384 Comm: kworker/u8:0 Tainted: G U W 4.11.0-rc5-CI-CI_DRM_334+ #1 [ 6250.746515] Hardware name: /NUC5i5RYB, BIOS RYBDWi35.86A.0362.2017.0118.0940 01/18/2017 [ 6250.746521] Workqueue: events_unbound async_run_entry_fn [ 6250.746525] Call Trace: [ 6250.746530] dump_stack+0x67/0x92 [ 6250.746536] __warn+0xc6/0xe0 [ 6250.746542] ? pci_restore_standard_config+0x40/0x40 [ 6250.746546] warn_slowpath_fmt+0x46/0x50 [ 6250.746553] ? __pm_runtime_resume+0x56/0x80 [ 6250.746584] intel_runtime_pm_get+0x6b/0xd0 [i915] [ 6250.746610] intel_display_power_get+0x1b/0x40 [i915] [ 6250.746646] i915_audio_component_get_power+0x15/0x20 [i915] [ 6250.746654] snd_hdac_display_power+0xc8/0x110 [snd_hda_core] [ 6250.746661] azx_runtime_resume+0x218/0x280 [snd_hda_intel] [ 6250.746667] pci_pm_runtime_resume+0x76/0xa0 [ 6250.746672] __rpm_callback+0xb4/0x1f0 [ 6250.746677] ? pci_restore_standard_config+0x40/0x40 [ 6250.746682] rpm_callback+0x1f/0x80 [ 6250.746686] ? pci_restore_standard_config+0x40/0x40 [ 6250.746690] rpm_resume+0x4ba/0x740 [ 6250.746698] __pm_runtime_resume+0x49/0x80 [ 6250.746703] pci_pm_suspend+0x57/0x140 [ 6250.746709] dpm_run_callback+0x6f/0x330 [ 6250.746713] ? pci_pm_freeze+0xe0/0xe0 [ 6250.746718] __device_suspend+0xf9/0x370 [ 6250.746724] ? dpm_watchdog_set+0x60/0x60 [ 6250.746730] async_suspend+0x1a/0x90 [ 6250.746735] async_run_entry_fn+0x34/0x160 [ 6250.746741] process_one_work+0x1f2/0x6d0 [ 6250.746749] worker_thread+0x49/0x4a0 [ 6250.746755] kthread+0x107/0x140 [ 6250.746759] ? process_one_work+0x6d0/0x6d0 [ 6250.746763] ? kthread_create_on_node+0x40/0x40 [ 6250.746768] ret_from_fork+0x2e/0x40 [ 6250.746778] ---[ end trace 102a62fd2160f5e6 ]--- v2: - Use the new pci_dev->needs_resume flag, to avoid any overhead during the ->pm_prepare hook. (Rafael) v3: - Update commit message to reference the actual regressing commit. (Lukas) v4: - Rebase on v4 of patch 1/2. Fixes: d14d2a8453d6 ("drm: Remove dev_pm_ops from drm_class") References: https://bugs.freedesktop.org/show_bug.cgi?id=100378 References: https://bugs.freedesktop.org/show_bug.cgi?id=100770 Cc: Rafael J. Wysocki Cc: Marta Lofstedt Cc: David Weinehall Cc: Tomi Sarvela Cc: Ville Syrjälä Cc: Mika Kuoppala Cc: Chris Wilson Cc: Takashi Iwai Cc: Bjorn Helgaas Cc: Lukas Wunner Cc: linux-pci@vger.kernel.org Signed-off-by: Imre Deak Reviewed-by: Chris Wilson Reported-and-tested-by: Marta Lofstedt Link: http://patchwork.freedesktop.org/patch/msgid/1493726649-32094-2-git-send-email-imre.deak@intel.com (cherry picked from commit adfdf85d795f4d4f487b61ee0b169d64c6e19081) Signed-off-by: Jani Nikula Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/i915_drv.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ca6efb69ef66..7513e7678263 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1199,6 +1199,15 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_free_priv; pci_set_drvdata(pdev, &dev_priv->drm); + /* + * Disable the system suspend direct complete optimization, which can + * leave the device suspended skipping the driver's suspend handlers + * if the device was already runtime suspended. This is needed due to + * the difference in our runtime and system suspend sequence and + * becaue the HDA driver may require us to enable the audio power + * domain during system suspend. + */ + pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; ret = i915_driver_init_early(dev_priv, ent); if (ret < 0) -- GitLab From 220b67a16fde7187fae7efcc6809d658e4c49525 Mon Sep 17 00:00:00 2001 From: Richard Date: Sun, 21 May 2017 12:27:00 -0700 Subject: [PATCH 034/786] partitions/msdos: FreeBSD UFS2 file systems are not recognized commit 223220356d5ebc05ead9a8d697abb0c0a906fc81 upstream. The code in block/partitions/msdos.c recognizes FreeBSD, OpenBSD and NetBSD partitions and does a reasonable job picking out OpenBSD and NetBSD UFS subpartitions. But for FreeBSD the subpartitions are always "bad". Kernel: Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- block/partitions/msdos.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c index 93e7c1b32edd..5610cd537da7 100644 --- a/block/partitions/msdos.c +++ b/block/partitions/msdos.c @@ -300,6 +300,8 @@ static void parse_bsd(struct parsed_partitions *state, continue; bsd_start = le32_to_cpu(p->p_offset); bsd_size = le32_to_cpu(p->p_size); + if (memcmp(flavour, "bsd\0", 4) == 0) + bsd_start += offset; if (offset == bsd_start && size == bsd_size) /* full parent partition, we have it already */ continue; -- GitLab From d864e675c78983b287848a3df64c1d9dfed263da Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Wed, 1 Mar 2017 15:33:26 +0100 Subject: [PATCH 035/786] netfilter: nf_conntrack_sip: fix wrong memory initialisation commit da2f27e9e615d1c799c9582b15262458da61fddc upstream. In commit 82de0be6862cd ("netfilter: Add helper array register/unregister functions"), struct nf_conntrack_helper sip[MAX_PORTS][4] was changed to sip[MAX_PORTS * 4], so the memory init should have been changed to memset(&sip[4 * i], 0, 4 * sizeof(sip[i])); But as the sip[] table is allocated in the BSS, it is already set to 0 Fixes: 82de0be6862cd ("netfilter: Add helper array register/unregister functions") Signed-off-by: Christophe Leroy Signed-off-by: Pablo Neira Ayuso Signed-off-by: Greg Kroah-Hartman --- net/netfilter/nf_conntrack_sip.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index c3fc14e021ec..3a8dc39a9116 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -1630,8 +1630,6 @@ static int __init nf_conntrack_sip_init(void) ports[ports_c++] = SIP_PORT; for (i = 0; i < ports_c; i++) { - memset(&sip[i], 0, sizeof(sip[i])); - nf_ct_helper_init(&sip[4 * i], AF_INET, IPPROTO_UDP, "sip", SIP_PORT, ports[i], i, sip_exp_policy, SIP_EXPECT_MAX, -- GitLab From b5a1aa812a161dce89d296e599a003de17cccefa Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Tue, 23 May 2017 21:53:26 -0400 Subject: [PATCH 036/786] ibmvnic: Fix endian errors in error reporting output [ Upstream commit 75224c93fa985f4a6fb983f53208f5c5aa555fbf ] Error reports received from firmware were not being converted from big endian values, leading to bogus error codes reported on little endian systems. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/ibm/ibmvnic.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 0fbf686f5e7c..207cc562f803 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2197,12 +2197,12 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq, if (!found) { dev_err(dev, "Couldn't find error id %x\n", - crq->request_error_rsp.error_id); + be32_to_cpu(crq->request_error_rsp.error_id)); return; } dev_err(dev, "Detailed info for error id %x:", - crq->request_error_rsp.error_id); + be32_to_cpu(crq->request_error_rsp.error_id)); for (i = 0; i < error_buff->len; i++) { pr_cont("%02x", (int)error_buff->buff[i]); @@ -2281,8 +2281,8 @@ static void handle_error_indication(union ibmvnic_crq *crq, dev_err(dev, "Firmware reports %serror id %x, cause %d\n", crq->error_indication. flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "", - crq->error_indication.error_id, - crq->error_indication.error_cause); + be32_to_cpu(crq->error_indication.error_id), + be16_to_cpu(crq->error_indication.error_cause)); error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC); if (!error_buff) -- GitLab From 65e72723add09f7e260863b1f68f24d62c3cf96a Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Tue, 23 May 2017 21:53:27 -0400 Subject: [PATCH 037/786] ibmvnic: Fix endian error when requesting device capabilities [ Upstream commit 28f4d16570dcf440e54a4d72666d5be452f27d0e ] When a vNIC client driver requests a faulty device setting, the server returns an acceptable value for the client to request. This 64 bit value was incorrectly being swapped as a 32 bit value, resulting in loss of data. This patch corrects that by using the 64 bit swap function. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/ibm/ibmvnic.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 207cc562f803..d1cf37dc3aa2 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2400,10 +2400,10 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq, case PARTIALSUCCESS: dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", *req_value, - (long int)be32_to_cpu(crq->request_capability_rsp. + (long int)be64_to_cpu(crq->request_capability_rsp. number), name); release_sub_crqs_no_irqs(adapter); - *req_value = be32_to_cpu(crq->request_capability_rsp.number); + *req_value = be64_to_cpu(crq->request_capability_rsp.number); init_sub_crqs(adapter, 1); return; default: -- GitLab From bff3001afae781cc24dc816bc3e0f9c6f6a02296 Mon Sep 17 00:00:00 2001 From: Anssi Hannula Date: Tue, 23 May 2017 21:53:28 -0400 Subject: [PATCH 038/786] net: xilinx_emaclite: fix freezes due to unordered I/O [ Upstream commit acf138f1b00bdd1b7cd9894562ed0c2a1670888e ] The xilinx_emaclite uses __raw_writel and __raw_readl for register accesses. Those functions do not imply any kind of memory barriers and they may be reordered. The driver does not seem to take that into account, though, and the driver does not satisfy the ordering requirements of the hardware. For clear examples, see xemaclite_mdio_write() and xemaclite_mdio_read() which try to set MDIO address before initiating the transaction. I'm seeing system freezes with the driver with GCC 5.4 and current Linux kernels on Zynq-7000 SoC immediately when trying to use the interface. In commit 123c1407af87 ("net: emaclite: Do not use microblaze and ppc IO functions") the driver was switched from non-generic in_be32/out_be32 (memory barriers, big endian) to __raw_readl/__raw_writel (no memory barriers, native endian), so apparently the device follows system endianness and the driver was originally written with the assumption of memory barriers. Rather than try to hunt for each case of missing barrier, just switch the driver to use iowrite32/ioread32/iowrite32be/ioread32be depending on endianness instead. Tested on little-endian Zynq-7000 ARM SoC FPGA. Signed-off-by: Anssi Hannula Fixes: 123c1407af87 ("net: emaclite: Do not use microblaze and ppc IO functions") Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 116 ++++++++++-------- 1 file changed, 62 insertions(+), 54 deletions(-) diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 93dc10b10c09..dfdb78cc13ad 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -100,6 +100,14 @@ /* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */ #define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT) +#ifdef __BIG_ENDIAN +#define xemaclite_readl ioread32be +#define xemaclite_writel iowrite32be +#else +#define xemaclite_readl ioread32 +#define xemaclite_writel iowrite32 +#endif + /** * struct net_local - Our private per device data * @ndev: instance of the network device @@ -156,15 +164,15 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata) u32 reg_data; /* Enable the Tx interrupts for the first Buffer */ - reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); - __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK, - drvdata->base_addr + XEL_TSR_OFFSET); + reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET); + xemaclite_writel(reg_data | XEL_TSR_XMIT_IE_MASK, + drvdata->base_addr + XEL_TSR_OFFSET); /* Enable the Rx interrupts for the first buffer */ - __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); + xemaclite_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET); /* Enable the Global Interrupt Enable */ - __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); + xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); } /** @@ -179,17 +187,17 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata) u32 reg_data; /* Disable the Global Interrupt Enable */ - __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); + xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET); /* Disable the Tx interrupts for the first buffer */ - reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET); - __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), - drvdata->base_addr + XEL_TSR_OFFSET); + reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET); + xemaclite_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK), + drvdata->base_addr + XEL_TSR_OFFSET); /* Disable the Rx interrupts for the first buffer */ - reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET); - __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), - drvdata->base_addr + XEL_RSR_OFFSET); + reg_data = xemaclite_readl(drvdata->base_addr + XEL_RSR_OFFSET); + xemaclite_writel(reg_data & (~XEL_RSR_RECV_IE_MASK), + drvdata->base_addr + XEL_RSR_OFFSET); } /** @@ -321,7 +329,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, byte_count = ETH_FRAME_LEN; /* Check if the expected buffer is available */ - reg_data = __raw_readl(addr + XEL_TSR_OFFSET); + reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK)) == 0) { @@ -334,7 +342,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, addr = (void __iomem __force *)((u32 __force)addr ^ XEL_BUFFER_OFFSET); - reg_data = __raw_readl(addr + XEL_TSR_OFFSET); + reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK)) != 0) @@ -345,16 +353,16 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, /* Write the frame to the buffer */ xemaclite_aligned_write(data, (u32 __force *) addr, byte_count); - __raw_writel((byte_count & XEL_TPLR_LENGTH_MASK), - addr + XEL_TPLR_OFFSET); + xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK), + addr + XEL_TPLR_OFFSET); /* Update the Tx Status Register to indicate that there is a * frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which * is used by the interrupt handler to check whether a frame * has been transmitted */ - reg_data = __raw_readl(addr + XEL_TSR_OFFSET); + reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK); - __raw_writel(reg_data, addr + XEL_TSR_OFFSET); + xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET); return 0; } @@ -379,7 +387,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use); /* Verify which buffer has valid data */ - reg_data = __raw_readl(addr + XEL_RSR_OFFSET); + reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET); if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) { if (drvdata->rx_ping_pong != 0) @@ -396,14 +404,14 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) return 0; /* No data was available */ /* Verify that buffer has valid data */ - reg_data = __raw_readl(addr + XEL_RSR_OFFSET); + reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET); if ((reg_data & XEL_RSR_RECV_DONE_MASK) != XEL_RSR_RECV_DONE_MASK) return 0; /* No data was available */ } /* Get the protocol type of the ethernet frame that arrived */ - proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET + + proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET + XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & XEL_RPLR_LENGTH_MASK); @@ -412,7 +420,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) { if (proto_type == ETH_P_IP) { - length = ((ntohl(__raw_readl(addr + + length = ((ntohl(xemaclite_readl(addr + XEL_HEADER_IP_LENGTH_OFFSET + XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & @@ -434,9 +442,9 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) data, length); /* Acknowledge the frame */ - reg_data = __raw_readl(addr + XEL_RSR_OFFSET); + reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET); reg_data &= ~XEL_RSR_RECV_DONE_MASK; - __raw_writel(reg_data, addr + XEL_RSR_OFFSET); + xemaclite_writel(reg_data, addr + XEL_RSR_OFFSET); return length; } @@ -463,14 +471,14 @@ static void xemaclite_update_address(struct net_local *drvdata, xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN); - __raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); + xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET); /* Update the MAC address in the EmacLite */ - reg_data = __raw_readl(addr + XEL_TSR_OFFSET); - __raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET); + reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET); + xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET); /* Wait for EmacLite to finish with the MAC address update */ - while ((__raw_readl(addr + XEL_TSR_OFFSET) & + while ((xemaclite_readl(addr + XEL_TSR_OFFSET) & XEL_TSR_PROG_MAC_ADDR) != 0) ; } @@ -640,32 +648,32 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id) u32 tx_status; /* Check if there is Rx Data available */ - if ((__raw_readl(base_addr + XEL_RSR_OFFSET) & + if ((xemaclite_readl(base_addr + XEL_RSR_OFFSET) & XEL_RSR_RECV_DONE_MASK) || - (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET) + (xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET) & XEL_RSR_RECV_DONE_MASK)) xemaclite_rx_handler(dev); /* Check if the Transmission for the first buffer is completed */ - tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET); + tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET); if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; - __raw_writel(tx_status, base_addr + XEL_TSR_OFFSET); + xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET); tx_complete = true; } /* Check if the Transmission for the second buffer is completed */ - tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); + tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) && (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) { tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK; - __raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET + - XEL_TSR_OFFSET); + xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET + + XEL_TSR_OFFSET); tx_complete = true; } @@ -698,7 +706,7 @@ static int xemaclite_mdio_wait(struct net_local *lp) /* wait for the MDIO interface to not be busy or timeout after some time. */ - while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & + while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) & XEL_MDIOCTRL_MDIOSTS_MASK) { if (time_before_eq(end, jiffies)) { WARN_ON(1); @@ -734,17 +742,17 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg) * MDIO Address register. Set the Status bit in the MDIO Control * register to start a MDIO read transaction. */ - ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); - __raw_writel(XEL_MDIOADDR_OP_MASK | - ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), - lp->base_addr + XEL_MDIOADDR_OFFSET); - __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, - lp->base_addr + XEL_MDIOCTRL_OFFSET); + ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); + xemaclite_writel(XEL_MDIOADDR_OP_MASK | + ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), + lp->base_addr + XEL_MDIOADDR_OFFSET); + xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, + lp->base_addr + XEL_MDIOCTRL_OFFSET); if (xemaclite_mdio_wait(lp)) return -ETIMEDOUT; - rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET); + rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET); dev_dbg(&lp->ndev->dev, "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n", @@ -781,13 +789,13 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg, * Data register. Finally, set the Status bit in the MDIO Control * register to start a MDIO write transaction. */ - ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); - __raw_writel(~XEL_MDIOADDR_OP_MASK & - ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), - lp->base_addr + XEL_MDIOADDR_OFFSET); - __raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET); - __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, - lp->base_addr + XEL_MDIOCTRL_OFFSET); + ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET); + xemaclite_writel(~XEL_MDIOADDR_OP_MASK & + ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg), + lp->base_addr + XEL_MDIOADDR_OFFSET); + xemaclite_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET); + xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK, + lp->base_addr + XEL_MDIOCTRL_OFFSET); return 0; } @@ -834,8 +842,8 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev) /* Enable the MDIO bus by asserting the enable bit in MDIO Control * register. */ - __raw_writel(XEL_MDIOCTRL_MDIOEN_MASK, - lp->base_addr + XEL_MDIOCTRL_OFFSET); + xemaclite_writel(XEL_MDIOCTRL_MDIOEN_MASK, + lp->base_addr + XEL_MDIOCTRL_OFFSET); bus = mdiobus_alloc(); if (!bus) { @@ -1140,8 +1148,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev) } /* Clear the Tx CSR's in case this is a restart */ - __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET); - __raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); + xemaclite_writel(0, lp->base_addr + XEL_TSR_OFFSET); + xemaclite_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET); /* Set the MAC address in the EmacLite device */ xemaclite_update_address(lp, ndev->dev_addr); -- GitLab From a2901d01a6c7aac1b1e0603bc7d8fce448760195 Mon Sep 17 00:00:00 2001 From: Anssi Hannula Date: Tue, 23 May 2017 21:53:29 -0400 Subject: [PATCH 039/786] net: xilinx_emaclite: fix receive buffer overflow [ Upstream commit cd224553641848dd17800fe559e4ff5d208553e8 ] xilinx_emaclite looks at the received data to try to determine the Ethernet packet length but does not properly clamp it if proto_type == ETH_P_IP or 1500 < proto_type <= 1518, causing a buffer overflow and a panic via skb_panic() as the length exceeds the allocated skb size. Fix those cases. Also add an additional unconditional check with WARN_ON() at the end. Signed-off-by: Anssi Hannula Fixes: bb81b2ddfa19 ("net: add Xilinx emac lite device driver") Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/xilinx/xilinx_emaclite.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index dfdb78cc13ad..aa02a03a6d8d 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -377,7 +377,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data, * * Return: Total number of bytes received */ -static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) +static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen) { void __iomem *addr; u16 length, proto_type; @@ -417,7 +417,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) /* Check if received ethernet frame is a raw ethernet frame * or an IP packet or an ARP packet */ - if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) { + if (proto_type > ETH_DATA_LEN) { if (proto_type == ETH_P_IP) { length = ((ntohl(xemaclite_readl(addr + @@ -425,6 +425,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) & XEL_RPLR_LENGTH_MASK); + length = min_t(u16, length, ETH_DATA_LEN); length += ETH_HLEN + ETH_FCS_LEN; } else if (proto_type == ETH_P_ARP) @@ -437,6 +438,9 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data) /* Use the length in the frame, plus the header and trailer */ length = proto_type + ETH_HLEN + ETH_FCS_LEN; + if (WARN_ON(length > maxlen)) + length = maxlen; + /* Read from the EmacLite device */ xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET), data, length); @@ -611,7 +615,7 @@ static void xemaclite_rx_handler(struct net_device *dev) skb_reserve(skb, 2); - len = xemaclite_recv_data(lp, (u8 *) skb->data); + len = xemaclite_recv_data(lp, (u8 *) skb->data, len); if (!len) { dev->stats.rx_errors++; -- GitLab From 116589a5a7e0f3dc91a50b9d39365306b2198680 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 23 May 2017 21:53:32 -0400 Subject: [PATCH 040/786] tcp: tcp_probe: use spin_lock_bh() [ Upstream commit e70ac171658679ecf6bea4bbd9e9325cd6079d2b ] tcp_rcv_established() can now run in process context. We need to disable BH while acquiring tcp probe spinlock, or risk a deadlock. Fixes: 5413d1babe8f ("net: do not block BH while processing socket backlog") Signed-off-by: Eric Dumazet Reported-by: Ricardo Nabinger Sanchez Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/ipv4/tcp_probe.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index f6c50af24a64..3d063eb37848 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -117,7 +117,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, (fwmark > 0 && skb->mark == fwmark)) && (full || tp->snd_cwnd != tcp_probe.lastcwnd)) { - spin_lock(&tcp_probe.lock); + spin_lock_bh(&tcp_probe.lock); /* If log fills, just silently drop */ if (tcp_probe_avail() > 1) { struct tcp_log *p = tcp_probe.log + tcp_probe.head; @@ -157,7 +157,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1); } tcp_probe.lastcwnd = tp->snd_cwnd; - spin_unlock(&tcp_probe.lock); + spin_unlock_bh(&tcp_probe.lock); wake_up(&tcp_probe.wait); } -- GitLab From 12ec2560d467812424ff8eb6f7fccc83f64aae3d Mon Sep 17 00:00:00 2001 From: "Jonathan T. Leighton" Date: Tue, 23 May 2017 21:53:33 -0400 Subject: [PATCH 041/786] ipv6: Handle IPv4-mapped src to in6addr_any dst. [ Upstream commit 052d2369d1b479cdbbe020fdd6d057d3c342db74 ] This patch adds a check on the type of the source address for the case where the destination address is in6addr_any. If the source is an IPv4-mapped IPv6 source address, the destination is changed to ::ffff:127.0.0.1, and otherwise the destination is changed to ::1. This is done in three locations to handle UDP calls to either connect() or sendmsg() and TCP calls to connect(). Note that udpv6_sendmsg() delays handling an in6addr_any destination until very late, so the patch only needs to handle the case where the source is an IPv4-mapped IPv6 address. Signed-off-by: Jonathan T. Leighton Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/ipv6/datagram.c | 14 +++++++++----- net/ipv6/tcp_ipv6.c | 11 ++++++++--- net/ipv6/udp.c | 4 ++++ 3 files changed, 21 insertions(+), 8 deletions(-) diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 442ec1f39ed1..38062f403ceb 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -166,18 +166,22 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, if (np->sndflow) fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK; - addr_type = ipv6_addr_type(&usin->sin6_addr); - - if (addr_type == IPV6_ADDR_ANY) { + if (ipv6_addr_any(&usin->sin6_addr)) { /* * connect to self */ - usin->sin6_addr.s6_addr[15] = 0x01; + if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) + ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), + &usin->sin6_addr); + else + usin->sin6_addr = in6addr_loopback; } + addr_type = ipv6_addr_type(&usin->sin6_addr); + daddr = &usin->sin6_addr; - if (addr_type == IPV6_ADDR_MAPPED) { + if (addr_type & IPV6_ADDR_MAPPED) { struct sockaddr_in sin; if (__ipv6_only_sock(sk)) { diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index aef9b28067f4..7ac2365aa6fb 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -148,8 +148,13 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, * connect() to INADDR_ANY means loopback (BSD'ism). */ - if (ipv6_addr_any(&usin->sin6_addr)) - usin->sin6_addr.s6_addr[15] = 0x1; + if (ipv6_addr_any(&usin->sin6_addr)) { + if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) + ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), + &usin->sin6_addr); + else + usin->sin6_addr = in6addr_loopback; + } addr_type = ipv6_addr_type(&usin->sin6_addr); @@ -188,7 +193,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, * TCP over IPv4 */ - if (addr_type == IPV6_ADDR_MAPPED) { + if (addr_type & IPV6_ADDR_MAPPED) { u32 exthdrlen = icsk->icsk_ext_hdr_len; struct sockaddr_in sin; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 40a289f78d77..2497f62fa4c2 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1049,6 +1049,10 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; daddr = &sin6->sin6_addr; + if (ipv6_addr_any(daddr) && + ipv6_addr_v4mapped(&np->saddr)) + ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK), + daddr); break; case AF_INET: goto do_udp_sendmsg; -- GitLab From cb351da6f2badf45d87cf0d21826d81a77f5e433 Mon Sep 17 00:00:00 2001 From: "Jonathan T. Leighton" Date: Tue, 23 May 2017 21:53:34 -0400 Subject: [PATCH 042/786] ipv6: Inhibit IPv4-mapped src address on the wire. [ Upstream commit ec5e3b0a1d41fbda0cc33a45bc9e54e91d9d12c7 ] This patch adds a check for the problematic case of an IPv4-mapped IPv6 source address and a destination address that is neither an IPv4-mapped IPv6 address nor in6addr_any, and returns an appropriate error. The check in done before returning from looking up the route. Signed-off-by: Jonathan T. Leighton Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ip6_output.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 1ac3cea49171..3ab32ac57ccd 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1019,6 +1019,9 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk, } } #endif + if (ipv6_addr_v4mapped(&fl6->saddr) && + !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) + return -EAFNOSUPPORT; return 0; -- GitLab From 44bc7cae603167feb86b7f8f0684df62ffaac7f4 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 23 May 2017 21:53:35 -0400 Subject: [PATCH 043/786] tipc: Fix tipc_sk_reinit race conditions [ Upstream commit 9dbbfb0ab6680c6a85609041011484e6658e7d3c ] There are two problems with the function tipc_sk_reinit. Firstly it's doing a manual walk over an rhashtable. This is broken as an rhashtable can be resized and if you manually walk over it during a resize then you may miss entries. Secondly it's missing memory barriers as previously the code used spinlocks which provide the barriers implicitly. This patch fixes both problems. Fixes: 07f6c4bc048a ("tipc: convert tipc reference table to...") Signed-off-by: Herbert Xu Acked-by: Ying Xue Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/tipc/net.c | 4 ++++ net/tipc/socket.c | 30 +++++++++++++++++++----------- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/net/tipc/net.c b/net/tipc/net.c index 28bf4feeb81c..ab8a2d5d1e32 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -110,6 +110,10 @@ int tipc_net_start(struct net *net, u32 addr) char addr_string[16]; tn->own_addr = addr; + + /* Ensure that the new address is visible before we reinit. */ + smp_mb(); + tipc_named_reinit(net); tipc_sk_reinit(net); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 41f013888f07..25bc5c30d7fb 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -335,8 +335,6 @@ static int tipc_sk_create(struct net *net, struct socket *sock, INIT_LIST_HEAD(&tsk->publications); msg = &tsk->phdr; tn = net_generic(sock_net(sk), tipc_net_id); - tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, - NAMED_H_SIZE, 0); /* Finish initializing socket data structures */ sock->ops = ops; @@ -346,6 +344,13 @@ static int tipc_sk_create(struct net *net, struct socket *sock, pr_warn("Socket create failed; port number exhausted\n"); return -EINVAL; } + + /* Ensure tsk is visible before we read own_addr. */ + smp_mb(); + + tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, + NAMED_H_SIZE, 0); + msg_set_origport(msg, tsk->portid); setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk); sk->sk_backlog_rcv = tipc_backlog_rcv; @@ -2264,24 +2269,27 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, void tipc_sk_reinit(struct net *net) { struct tipc_net *tn = net_generic(net, tipc_net_id); - const struct bucket_table *tbl; - struct rhash_head *pos; + struct rhashtable_iter iter; struct tipc_sock *tsk; struct tipc_msg *msg; - int i; - rcu_read_lock(); - tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht); - for (i = 0; i < tbl->size; i++) { - rht_for_each_entry_rcu(tsk, pos, tbl, i, node) { + rhashtable_walk_enter(&tn->sk_rht, &iter); + + do { + tsk = ERR_PTR(rhashtable_walk_start(&iter)); + if (tsk) + continue; + + while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) { spin_lock_bh(&tsk->sk.sk_lock.slock); msg = &tsk->phdr; msg_set_prevnode(msg, tn->own_addr); msg_set_orignode(msg, tn->own_addr); spin_unlock_bh(&tsk->sk.sk_lock.slock); } - } - rcu_read_unlock(); + + rhashtable_walk_stop(&iter); + } while (tsk == ERR_PTR(-EAGAIN)); } static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid) -- GitLab From 2557969fb4f703f1364a58fabde53e6567b46120 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Tue, 23 May 2017 21:53:36 -0400 Subject: [PATCH 044/786] gfs2: Use rhashtable walk interface in glock_hash_walk [ Upstream commit 6a25478077d987edc5e2f880590a2bc5fcab4441 ] The function glock_hash_walk walks the rhashtable by hand. This is broken because if it catches the hash table in the middle of a rehash, then it will miss entries. This patch replaces the manual walk by using the rhashtable walk interface. Fixes: 88ffbf3e037e ("GFS2: Use resizable hash table for glocks") Signed-off-by: Herbert Xu Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/gfs2/glock.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 133f322573b5..6528724ad6e5 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1425,26 +1425,32 @@ static struct shrinker glock_shrinker = { * @sdp: the filesystem * @bucket: the bucket * + * Note that the function can be called multiple times on the same + * object. So the user must ensure that the function can cope with + * that. */ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) { struct gfs2_glock *gl; - struct rhash_head *pos; - const struct bucket_table *tbl; - int i; + struct rhashtable_iter iter; - rcu_read_lock(); - tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table); - for (i = 0; i < tbl->size; i++) { - rht_for_each_entry_rcu(gl, pos, tbl, i, gl_node) { + rhashtable_walk_enter(&gl_hash_table, &iter); + + do { + gl = ERR_PTR(rhashtable_walk_start(&iter)); + if (gl) + continue; + + while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) if ((gl->gl_name.ln_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref)) examiner(gl); - } - } - rcu_read_unlock(); - cond_resched(); + + rhashtable_walk_stop(&iter); + } while (cond_resched(), gl == ERR_PTR(-EAGAIN)); + + rhashtable_walk_exit(&iter); } /** -- GitLab From 74e24d1ea1a4fc8e1d4faa43263229aa45513cd3 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 23 May 2017 21:53:37 -0400 Subject: [PATCH 045/786] NET: Fix /proc/net/arp for AX.25 [ Upstream commit 4872e57c812dd312bf8193b5933fa60585cda42f ] When sending ARP requests over AX.25 links the hwaddress in the neighbour cache are not getting initialized. For such an incomplete arp entry ax2asc2 will generate an empty string resulting in /proc/net/arp output like the following: $ cat /proc/net/arp IP address HW type Flags HW address Mask Device 192.168.122.1 0x1 0x2 52:54:00:00:5d:5f * ens3 172.20.1.99 0x3 0x0 * bpq0 The missing field will confuse the procfs parsing of arp(8) resulting in incorrect output for the device such as the following: $ arp Address HWtype HWaddress Flags Mask Iface gateway ether 52:54:00:00:5d:5f C ens3 172.20.1.99 (incomplete) ens3 This changes the content of /proc/net/arp to: $ cat /proc/net/arp IP address HW type Flags HW address Mask Device 172.20.1.99 0x3 0x0 * * bpq0 192.168.122.1 0x1 0x2 52:54:00:00:5d:5f * ens3 To do so it change ax2asc to put the string "*" in buf for a NULL address argument. Finally the HW address field is left aligned in a 17 character field (the length of an ethernet HW address in the usual hex notation) for readability. Signed-off-by: Ralf Baechle Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/ipv4/arp.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 89a8cac4726a..51b27ae09fbd 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1263,7 +1263,7 @@ void __init arp_init(void) /* * ax25 -> ASCII conversion */ -static char *ax2asc2(ax25_address *a, char *buf) +static void ax2asc2(ax25_address *a, char *buf) { char c, *s; int n; @@ -1285,10 +1285,10 @@ static char *ax2asc2(ax25_address *a, char *buf) *s++ = n + '0'; *s++ = '\0'; - if (*buf == '\0' || *buf == '-') - return "*"; - - return buf; + if (*buf == '\0' || *buf == '-') { + buf[0] = '*'; + buf[1] = '\0'; + } } #endif /* CONFIG_AX25 */ @@ -1322,7 +1322,7 @@ static void arp_format_neigh_entry(struct seq_file *seq, } #endif sprintf(tbuf, "%pI4", n->primary_key); - seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n", + seq_printf(seq, "%-16s 0x%-10x0x%-10x%-17s * %s\n", tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name); read_unlock(&n->lock); } -- GitLab From 4544ba3173b7e4f15c8c921997847390e5652268 Mon Sep 17 00:00:00 2001 From: Nathan Fontenot Date: Tue, 23 May 2017 21:53:38 -0400 Subject: [PATCH 046/786] ibmvnic: Call napi_disable instead of napi_enable in failure path [ Upstream commit e722af6391949e8851310441bb0cec157d25611d ] The failure path in ibmvnic_open() mistakenly makes a second call to napi_enable instead of calling napi_disable. This can result in a BUG_ON for any queues that were enabled in the previous call to napi_enable. Signed-off-by: Nathan Fontenot Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/ibm/ibmvnic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index d1cf37dc3aa2..38314244f000 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -505,7 +505,7 @@ static int ibmvnic_open(struct net_device *netdev) adapter->rx_pool = NULL; rx_pool_arr_alloc_failed: for (i = 0; i < adapter->req_rx_queues; i++) - napi_enable(&adapter->napi[i]); + napi_disable(&adapter->napi[i]); alloc_napi_failed: return -ENOMEM; } -- GitLab From ae0b63eb344efec8aab9759ec3fcc94c799227e1 Mon Sep 17 00:00:00 2001 From: Nathan Fontenot Date: Tue, 23 May 2017 21:53:39 -0400 Subject: [PATCH 047/786] ibmvnic: Initialize completion variables before starting work [ Upstream commit db5d0b597bc27bbddf40f2f8359a73be4eb77104 ] Initialize condition variables prior to invoking any work that can mark them complete. This resolves a race in the ibmvnic driver where the driver faults trying to complete an uninitialized condition variable. Signed-off-by: Nathan Fontenot Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/ibm/ibmvnic.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 38314244f000..9f2184be55dc 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -189,9 +189,10 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, } ltb->map_id = adapter->map_id; adapter->map_id++; + + init_completion(&adapter->fw_done); send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); - init_completion(&adapter->fw_done); wait_for_completion(&adapter->fw_done); return 0; } @@ -1133,10 +1134,10 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token); crq.request_statistics.len = cpu_to_be32(sizeof(struct ibmvnic_statistics)); - ibmvnic_send_crq(adapter, &crq); /* Wait for data to be written */ init_completion(&adapter->stats_done); + ibmvnic_send_crq(adapter, &crq); wait_for_completion(&adapter->stats_done); for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) @@ -2809,9 +2810,9 @@ static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len, crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator; crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok); crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size; - ibmvnic_send_crq(adapter, &crq); init_completion(&adapter->fw_done); + ibmvnic_send_crq(adapter, &crq); wait_for_completion(&adapter->fw_done); if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size)) @@ -3591,9 +3592,9 @@ static int ibmvnic_dump_show(struct seq_file *seq, void *v) memset(&crq, 0, sizeof(crq)); crq.request_dump_size.first = IBMVNIC_CRQ_CMD; crq.request_dump_size.cmd = REQUEST_DUMP_SIZE; - ibmvnic_send_crq(adapter, &crq); init_completion(&adapter->fw_done); + ibmvnic_send_crq(adapter, &crq); wait_for_completion(&adapter->fw_done); seq_write(seq, adapter->dump_data, adapter->dump_data_size); @@ -3639,8 +3640,8 @@ static void handle_crq_init_rsp(struct work_struct *work) } } - send_version_xchg(adapter); reinit_completion(&adapter->init_done); + send_version_xchg(adapter); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { dev_err(dev, "Passive init timeout\n"); goto task_failed; @@ -3650,9 +3651,9 @@ static void handle_crq_init_rsp(struct work_struct *work) if (adapter->renegotiate) { adapter->renegotiate = false; release_sub_crqs_no_irqs(adapter); - send_cap_queries(adapter); reinit_completion(&adapter->init_done); + send_cap_queries(adapter); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { dev_err(dev, "Passive init timeout\n"); @@ -3780,9 +3781,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) adapter->debugfs_dump = ent; } } - ibmvnic_send_crq_init(adapter); init_completion(&adapter->init_done); + ibmvnic_send_crq_init(adapter); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) return 0; @@ -3790,9 +3791,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) if (adapter->renegotiate) { adapter->renegotiate = false; release_sub_crqs_no_irqs(adapter); - send_cap_queries(adapter); reinit_completion(&adapter->init_done); + send_cap_queries(adapter); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) return 0; -- GitLab From 008798746e6e1d68a24674690e355b5619711fc4 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 23 May 2017 21:53:40 -0400 Subject: [PATCH 048/786] NET: mkiss: Fix panic [ Upstream commit 7ba1b689038726d34e3244c1ac9e2e18c2ea4787 ] If a USB-to-serial adapter is unplugged, the driver re-initializes, with dev->hard_header_len and dev->addr_len set to zero, instead of the correct values. If then a packet is sent through the half-dead interface, the kernel will panic due to running out of headroom in the skb when pushing for the AX.25 headers resulting in this panic: [] (skb_panic) from [] (skb_push+0x4c/0x50) [] (skb_push) from [] (ax25_hard_header+0x34/0xf4 [ax25]) [] (ax25_hard_header [ax25]) from [] (ax_header+0x38/0x40 [mkiss]) [] (ax_header [mkiss]) from [] (neigh_compat_output+0x8c/0xd8) [] (neigh_compat_output) from [] (ip_finish_output+0x2a0/0x914) [] (ip_finish_output) from [] (ip_output+0xd8/0xf0) [] (ip_output) from [] (ip_local_out_sk+0x44/0x48) This patch makes mkiss behave like the 6pack driver. 6pack does not panic. In 6pack.c sp_setup() (same function name here) the values for dev->hard_header_len and dev->addr_len are set to the same values as in my mkiss patch. [ralf@linux-mips.org: Massages original submission to conform to the usual standards for patch submissions.] Signed-off-by: Thomas Osterried Signed-off-by: Ralf Baechle Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/hamradio/mkiss.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 1dfe2304daa7..e0a6b1a0ca88 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -648,8 +648,8 @@ static void ax_setup(struct net_device *dev) { /* Finish setting up the DEVICE info. */ dev->mtu = AX_MTU; - dev->hard_header_len = 0; - dev->addr_len = 0; + dev->hard_header_len = AX25_MAX_HEADER_LEN; + dev->addr_len = AX25_ADDR_LEN; dev->type = ARPHRD_AX25; dev->tx_queue_len = 10; dev->header_ops = &ax25_header_ops; -- GitLab From bed8b8627a3339f30c62534581deb8a37c7b8b8a Mon Sep 17 00:00:00 2001 From: Kejian Yan Date: Tue, 23 May 2017 21:53:41 -0400 Subject: [PATCH 049/786] net: hns: Fix the device being used for dma mapping during TX [ Upstream commit b85ea006b6bebb692628f11882af41c3e12e1e09 ] This patch fixes the device being used to DMA map skb->data. Erroneous device assignment causes the crash when SMMU is enabled. This happens during TX since buffer gets DMA mapped with device correspondign to net_device and gets unmapped using the device related to DSAF. Signed-off-by: Kejian Yan Reviewed-by: Yisen Zhuang Signed-off-by: Salil Mehta Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index dff7b60345d8..c06845b7b666 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -304,8 +304,8 @@ int hns_nic_net_xmit_hw(struct net_device *ndev, struct hns_nic_ring_data *ring_data) { struct hns_nic_priv *priv = netdev_priv(ndev); - struct device *dev = priv->dev; struct hnae_ring *ring = ring_data->ring; + struct device *dev = ring_to_dev(ring); struct netdev_queue *dev_queue; struct skb_frag_struct *frag; int buf_num; -- GitLab From 9217eeefee159b9dec3bd111022e471e66eb48ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Br=C3=BCns?= Date: Tue, 23 May 2017 21:53:42 -0400 Subject: [PATCH 050/786] sierra_net: Skip validating irrelevant fields for IDLE LSIs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 764895d3039e903dac3a70f219949efe43d036a0 ] When the context is deactivated, the link_type is set to 0xff, which triggers a warning message, and results in a wrong link status, as the LSI is ignored. Signed-off-by: Stefan Brüns Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/sierra_net.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index a251588762ec..d997d24798f0 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -365,6 +365,13 @@ static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen) return -1; } + /* Validate the session state */ + if (lsi->session_state == SIERRA_NET_SESSION_IDLE) { + netdev_err(dev->net, "Session idle, 0x%02x\n", + lsi->session_state); + return 0; + } + /* Validate the protocol - only support UMTS for now */ if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) { netdev_err(dev->net, "Protocol unsupported, 0x%02x\n", @@ -386,13 +393,6 @@ static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen) return 0; } - /* Validate the session state */ - if (lsi->session_state == SIERRA_NET_SESSION_IDLE) { - netdev_err(dev->net, "Session idle, 0x%02x\n", - lsi->session_state); - return 0; - } - /* Set link_sense true */ return 1; } -- GitLab From 7a6fcf38fabb404dd6fa589fa257c104c217c662 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20Br=C3=BCns?= Date: Tue, 23 May 2017 21:53:43 -0400 Subject: [PATCH 051/786] sierra_net: Add support for IPv6 and Dual-Stack Link Sense Indications MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 5a70348e1187c5bf1cbd0ec51843f36befed1c2d ] If a context is configured as dualstack ("IPv4v6"), the modem indicates the context activation with a slightly different indication message. The dual-stack indication omits the link_type (IPv4/v6) and adds additional address fields. IPv6 LSIs are identical to IPv4 LSIs, but have a different link type. Signed-off-by: Stefan Brüns Reviewed-by: Bjørn Mork Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/sierra_net.c | 101 +++++++++++++++++++++++------------ 1 file changed, 66 insertions(+), 35 deletions(-) diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index d997d24798f0..0b5a84c9022c 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -73,8 +73,6 @@ static atomic_t iface_counter = ATOMIC_INIT(0); /* Private data structure */ struct sierra_net_data { - u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */ - u16 link_up; /* air link up or down */ u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */ @@ -122,6 +120,7 @@ struct param { /* LSI Protocol types */ #define SIERRA_NET_PROTOCOL_UMTS 0x01 +#define SIERRA_NET_PROTOCOL_UMTS_DS 0x04 /* LSI Coverage */ #define SIERRA_NET_COVERAGE_NONE 0x00 #define SIERRA_NET_COVERAGE_NOPACKET 0x01 @@ -129,7 +128,8 @@ struct param { /* LSI Session */ #define SIERRA_NET_SESSION_IDLE 0x00 /* LSI Link types */ -#define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00 +#define SIERRA_NET_AS_LINK_TYPE_IPV4 0x00 +#define SIERRA_NET_AS_LINK_TYPE_IPV6 0x02 struct lsi_umts { u8 protocol; @@ -137,9 +137,14 @@ struct lsi_umts { __be16 length; /* eventually use a union for the rest - assume umts for now */ u8 coverage; - u8 unused2[41]; + u8 network_len; /* network name len */ + u8 network[40]; /* network name (UCS2, bigendian) */ u8 session_state; u8 unused3[33]; +} __packed; + +struct lsi_umts_single { + struct lsi_umts lsi; u8 link_type; u8 pdp_addr_len; /* NW-supplied PDP address len */ u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */ @@ -158,10 +163,31 @@ struct lsi_umts { u8 reserved[8]; } __packed; +struct lsi_umts_dual { + struct lsi_umts lsi; + u8 pdp_addr4_len; /* NW-supplied PDP IPv4 address len */ + u8 pdp_addr4[4]; /* NW-supplied PDP IPv4 address (bigendian)) */ + u8 pdp_addr6_len; /* NW-supplied PDP IPv6 address len */ + u8 pdp_addr6[16]; /* NW-supplied PDP IPv6 address (bigendian)) */ + u8 unused4[23]; + u8 dns1_addr4_len; /* NW-supplied 1st DNS v4 address len (bigendian) */ + u8 dns1_addr4[4]; /* NW-supplied 1st DNS v4 address */ + u8 dns1_addr6_len; /* NW-supplied 1st DNS v6 address len */ + u8 dns1_addr6[16]; /* NW-supplied 1st DNS v6 address (bigendian)*/ + u8 dns2_addr4_len; /* NW-supplied 2nd DNS v4 address len (bigendian) */ + u8 dns2_addr4[4]; /* NW-supplied 2nd DNS v4 address */ + u8 dns2_addr6_len; /* NW-supplied 2nd DNS v6 address len */ + u8 dns2_addr6[16]; /* NW-supplied 2nd DNS v6 address (bigendian)*/ + u8 unused5[68]; +} __packed; + #define SIERRA_NET_LSI_COMMON_LEN 4 -#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts)) +#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts_single)) #define SIERRA_NET_LSI_UMTS_STATUS_LEN \ (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN) +#define SIERRA_NET_LSI_UMTS_DS_LEN (sizeof(struct lsi_umts_dual)) +#define SIERRA_NET_LSI_UMTS_DS_STATUS_LEN \ + (SIERRA_NET_LSI_UMTS_DS_LEN - SIERRA_NET_LSI_COMMON_LEN) /* Forward definitions */ static void sierra_sync_timer(unsigned long syncdata); @@ -191,10 +217,11 @@ static inline void sierra_net_set_private(struct usbnet *dev, dev->data[0] = (unsigned long)priv; } -/* is packet IPv4 */ +/* is packet IPv4/IPv6 */ static inline int is_ip(struct sk_buff *skb) { - return skb->protocol == cpu_to_be16(ETH_P_IP); + return skb->protocol == cpu_to_be16(ETH_P_IP) || + skb->protocol == cpu_to_be16(ETH_P_IPV6); } /* @@ -350,18 +377,11 @@ static inline int sierra_net_is_valid_addrlen(u8 len) static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen) { struct lsi_umts *lsi = (struct lsi_umts *)data; + u32 expected_length; - if (datalen < sizeof(struct lsi_umts)) { - netdev_err(dev->net, "%s: Data length %d, exp %Zu\n", - __func__, datalen, - sizeof(struct lsi_umts)); - return -1; - } - - if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) { - netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n", - __func__, be16_to_cpu(lsi->length), - (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN); + if (datalen < sizeof(struct lsi_umts_single)) { + netdev_err(dev->net, "%s: Data length %d, exp >= %Zu\n", + __func__, datalen, sizeof(struct lsi_umts_single)); return -1; } @@ -373,22 +393,34 @@ static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen) } /* Validate the protocol - only support UMTS for now */ - if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) { + if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS) { + struct lsi_umts_single *single = (struct lsi_umts_single *)lsi; + + /* Validate the link type */ + if (single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV4 && + single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV6) { + netdev_err(dev->net, "Link type unsupported: 0x%02x\n", + single->link_type); + return -1; + } + expected_length = SIERRA_NET_LSI_UMTS_STATUS_LEN; + } else if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS_DS) { + expected_length = SIERRA_NET_LSI_UMTS_DS_STATUS_LEN; + } else { netdev_err(dev->net, "Protocol unsupported, 0x%02x\n", - lsi->protocol); + lsi->protocol); return -1; } - /* Validate the link type */ - if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) { - netdev_err(dev->net, "Link type unsupported: 0x%02x\n", - lsi->link_type); + if (be16_to_cpu(lsi->length) != expected_length) { + netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n", + __func__, be16_to_cpu(lsi->length), expected_length); return -1; } /* Validate the coverage */ - if (lsi->coverage == SIERRA_NET_COVERAGE_NONE - || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) { + if (lsi->coverage == SIERRA_NET_COVERAGE_NONE || + lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) { netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage); return 0; } @@ -662,7 +694,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) u8 numendpoints; u16 fwattr = 0; int status; - struct ethhdr *eth; struct sierra_net_data *priv; static const u8 sync_tmplate[sizeof(priv->sync_msg)] = { 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00}; @@ -700,11 +731,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter); dev->net->dev_addr[ETH_ALEN-1] = ifacenum; - /* we will have to manufacture ethernet headers, prepare template */ - eth = (struct ethhdr *)priv->ethr_hdr_tmpl; - memcpy(ð->h_dest, dev->net->dev_addr, ETH_ALEN); - eth->h_proto = cpu_to_be16(ETH_P_IP); - /* prepare shutdown message template */ memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg)); /* set context index initially to 0 - prepares tx hdr template */ @@ -833,9 +859,14 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb) skb_pull(skb, hh.hdrlen); - /* We are going to accept this packet, prepare it */ - memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl, - ETH_HLEN); + /* We are going to accept this packet, prepare it. + * In case protocol is IPv6, keep it, otherwise force IPv4. + */ + skb_reset_mac_header(skb); + if (eth_hdr(skb)->h_proto != cpu_to_be16(ETH_P_IPV6)) + eth_hdr(skb)->h_proto = cpu_to_be16(ETH_P_IP); + eth_zero_addr(eth_hdr(skb)->h_source); + memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN); /* Last packet in batch handled by usbnet */ if (hh.payload_len.word == skb->len) -- GitLab From c1a4306f24a092f55ee9bbdff0c0a8741e9c833c Mon Sep 17 00:00:00 2001 From: Ricardo Ribalda Date: Tue, 23 May 2017 21:53:44 -0400 Subject: [PATCH 052/786] i2c: piix4: Request the SMBUS semaphore inside the mutex [ Upstream commit bbb27fc33d44e7b8d96369810654df4ee1837566 ] SMBSLVCNT must be protected with the piix4_mutex_sb800 in order to avoid multiple buses accessing to the semaphore at the same time. Fixes: 701dc207bf55 ("i2c: piix4: Avoid race conditions with IMC") Reported-by: Jean Delvare Signed-off-by: Ricardo Ribalda Delgado Signed-off-by: Jean Delvare Signed-off-by: Wolfram Sang Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/i2c/busses/i2c-piix4.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index e34d82e79b98..1250e5bc46b7 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -592,6 +592,8 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr, u8 port; int retval; + mutex_lock(&piix4_mutex_sb800); + /* Request the SMBUS semaphore, avoid conflicts with the IMC */ smbslvcnt = inb_p(SMBSLVCNT); do { @@ -605,10 +607,10 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr, usleep_range(1000, 2000); } while (--retries); /* SMBus is still owned by the IMC, we give up */ - if (!retries) + if (!retries) { + mutex_unlock(&piix4_mutex_sb800); return -EBUSY; - - mutex_lock(&piix4_mutex_sb800); + } outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX); smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1); @@ -623,11 +625,11 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr, outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1); - mutex_unlock(&piix4_mutex_sb800); - /* Release the semaphore */ outb_p(smbslvcnt | 0x20, SMBSLVCNT); + mutex_unlock(&piix4_mutex_sb800); + return retval; } -- GitLab From 47a5aabc34247d75c7bfa87f4e3f884f055dded6 Mon Sep 17 00:00:00 2001 From: Ricardo Ribalda Date: Tue, 23 May 2017 21:53:45 -0400 Subject: [PATCH 053/786] i2c: piix4: Fix request_region size [ Upstream commit f43128c75202f29ee71aa83e6c320a911137c189 ] Since '701dc207bf55 ("i2c: piix4: Avoid race conditions with IMC")' we are using the SMBSLVCNT register at offset 0x8. We need to request it. Fixes: 701dc207bf55 ("i2c: piix4: Avoid race conditions with IMC") Signed-off-by: Ricardo Ribalda Delgado Signed-off-by: Jean Delvare Signed-off-by: Wolfram Sang Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/i2c/busses/i2c-piix4.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 1250e5bc46b7..c21ca7bf2efe 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -58,7 +58,7 @@ #define SMBSLVDAT (0xC + piix4_smba) /* count for request_region */ -#define SMBIOSIZE 8 +#define SMBIOSIZE 9 /* PCI Address Constants */ #define SMBBA 0x090 -- GitLab From c419fe260b549c543119ea7e2db4916b9aac26f3 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 23 May 2017 21:53:46 -0400 Subject: [PATCH 054/786] powerpc/powernv: Properly set "host-ipi" on IPIs [ Upstream commit f83e6862047e1e371bdc5d512dd6cabe8a3965b8 ] Otherwise KVM will fail to pass them through to the host Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Michael Ellerman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/sysdev/xics/icp-opal.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c index 32c46b424dd0..b53f80f0b4d8 100644 --- a/arch/powerpc/sysdev/xics/icp-opal.c +++ b/arch/powerpc/sysdev/xics/icp-opal.c @@ -130,14 +130,16 @@ static void icp_opal_cause_ipi(int cpu, unsigned long data) { int hw_cpu = get_hard_smp_processor_id(cpu); + kvmppc_set_host_ipi(cpu, 1); opal_int_set_mfrr(hw_cpu, IPI_PRIORITY); } static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id) { - int hw_cpu = hard_smp_processor_id(); + int cpu = smp_processor_id(); - opal_int_set_mfrr(hw_cpu, 0xff); + kvmppc_set_host_ipi(cpu, 0); + opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff); return smp_ipi_demux(); } -- GitLab From baaa84b43608a922e6be101982388e50aa0a4422 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Tue, 23 May 2017 21:53:47 -0400 Subject: [PATCH 055/786] kernel/ucount.c: mark user_header with kmemleak_ignore() [ Upstream commit ed5bd7dc88edf4a4a9c67130742b1b59aa017a5f ] The user_header gets caught by kmemleak with the following splat as missing a free: unreferenced object 0xffff99667a733d80 (size 96): comm "swapper/0", pid 1, jiffies 4294892317 (age 62191.468s) hex dump (first 32 bytes): a0 b6 92 b4 ff ff ff ff 00 00 00 00 01 00 00 00 ................ 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: kmemleak_alloc+0x4a/0xa0 __kmalloc+0x144/0x260 __register_sysctl_table+0x54/0x5e0 register_sysctl+0x1b/0x20 user_namespace_sysctl_init+0x17/0x34 do_one_initcall+0x52/0x1a0 kernel_init_freeable+0x173/0x200 kernel_init+0xe/0x100 ret_from_fork+0x2c/0x40 The BUG_ON()s are intended to crash so no need to clean up after ourselves on error there. This is also a kernel/ subsys_init() we don't need a respective exit call here as this is never modular, so just white list it. Link: http://lkml.kernel.org/r/20170203211404.31458-1-mcgrof@kernel.org Signed-off-by: Luis R. Rodriguez Cc: Eric W. Biederman Cc: Kees Cook Cc: Nikolay Borisov Cc: Serge Hallyn Cc: Jan Kara Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- kernel/ucount.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/ucount.c b/kernel/ucount.c index f4ac18509ecf..c761cdba2a2d 100644 --- a/kernel/ucount.c +++ b/kernel/ucount.c @@ -231,11 +231,10 @@ static __init int user_namespace_sysctl_init(void) * properly. */ user_header = register_sysctl("user", empty); + kmemleak_ignore(user_header); BUG_ON(!user_header); BUG_ON(!setup_userns_sysctls(&init_user_ns)); #endif return 0; } subsys_initcall(user_namespace_sysctl_init); - - -- GitLab From d7b2b380c08d7e9b6caf1584656b98c47eb665f2 Mon Sep 17 00:00:00 2001 From: Thanneeru Srinivasulu Date: Tue, 23 May 2017 21:53:48 -0400 Subject: [PATCH 056/786] net: thunderx: Fix PHY autoneg for SGMII QLM mode [ Upstream commit 075ad765ef7541b2860de8408c165a92b78aefa3 ] This patch fixes the case where there is no phydev attached to a LMAC in DT due to non-existance of a PHY driver or due to usage of non-stanadard PHY which doesn't support autoneg. Changes dependeds on firmware to send correct info w.r.t PHY and autoneg capability. This patch also covers a case where a 10G/40G interface is used as a 1G with convertors with Cortina PHY in between. Signed-off-by: Thanneeru Srinivasulu Signed-off-by: Sunil Goutham Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- .../net/ethernet/cavium/thunder/thunder_bgx.c | 108 ++++++++++++++++-- .../net/ethernet/cavium/thunder/thunder_bgx.h | 5 + 2 files changed, 101 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 050e21fbb147..679679a4ccb2 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -31,6 +31,7 @@ struct lmac { u8 lmac_type; u8 lane_to_sds; bool use_training; + bool autoneg; bool link_up; int lmacid; /* ID within BGX */ int lmacid_bd; /* ID on board */ @@ -418,7 +419,17 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac) /* power down, reset autoneg, autoneg enable */ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL); cfg &= ~PCS_MRX_CTL_PWR_DN; - cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN); + cfg |= PCS_MRX_CTL_RST_AN; + if (lmac->phydev) { + cfg |= PCS_MRX_CTL_AN_EN; + } else { + /* In scenarios where PHY driver is not present or it's a + * non-standard PHY, FW sets AN_EN to inform Linux driver + * to do auto-neg and link polling or not. + */ + if (cfg & PCS_MRX_CTL_AN_EN) + lmac->autoneg = true; + } bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg); if (lmac->lmac_type == BGX_MODE_QSGMII) { @@ -429,7 +440,7 @@ static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac) return 0; } - if (lmac->lmac_type == BGX_MODE_SGMII) { + if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) { if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS, PCS_MRX_STATUS_AN_CPT, false)) { dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n"); @@ -623,12 +634,71 @@ static int bgx_xaui_check_link(struct lmac *lmac) return -1; } +static void bgx_poll_for_sgmii_link(struct lmac *lmac) +{ + u64 pcs_link, an_result; + u8 speed; + + pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid, + BGX_GMP_PCS_MRX_STATUS); + + /*Link state bit is sticky, read it again*/ + if (!(pcs_link & PCS_MRX_STATUS_LINK)) + pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid, + BGX_GMP_PCS_MRX_STATUS); + + if (bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_GMP_PCS_MRX_STATUS, + PCS_MRX_STATUS_AN_CPT, false)) { + lmac->link_up = false; + lmac->last_speed = SPEED_UNKNOWN; + lmac->last_duplex = DUPLEX_UNKNOWN; + goto next_poll; + } + + lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false; + an_result = bgx_reg_read(lmac->bgx, lmac->lmacid, + BGX_GMP_PCS_ANX_AN_RESULTS); + + speed = (an_result >> 3) & 0x3; + lmac->last_duplex = (an_result >> 1) & 0x1; + switch (speed) { + case 0: + lmac->last_speed = 10; + break; + case 1: + lmac->last_speed = 100; + break; + case 2: + lmac->last_speed = 1000; + break; + default: + lmac->link_up = false; + lmac->last_speed = SPEED_UNKNOWN; + lmac->last_duplex = DUPLEX_UNKNOWN; + break; + } + +next_poll: + + if (lmac->last_link != lmac->link_up) { + if (lmac->link_up) + bgx_sgmii_change_link_state(lmac); + lmac->last_link = lmac->link_up; + } + + queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 3); +} + static void bgx_poll_for_link(struct work_struct *work) { struct lmac *lmac; u64 spu_link, smu_link; lmac = container_of(work, struct lmac, dwork.work); + if (lmac->is_sgmii) { + bgx_poll_for_sgmii_link(lmac); + return; + } /* Receive link is latching low. Force it high and verify it */ bgx_reg_modify(lmac->bgx, lmac->lmacid, @@ -720,9 +790,21 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) (lmac->lmac_type != BGX_MODE_XLAUI) && (lmac->lmac_type != BGX_MODE_40G_KR) && (lmac->lmac_type != BGX_MODE_10G_KR)) { - if (!lmac->phydev) - return -ENODEV; - + if (!lmac->phydev) { + if (lmac->autoneg) { + bgx_reg_write(bgx, lmacid, + BGX_GMP_PCS_LINKX_TIMER, + PCS_LINKX_TIMER_COUNT); + goto poll; + } else { + /* Default to below link speed and duplex */ + lmac->link_up = true; + lmac->last_speed = 1000; + lmac->last_duplex = 1; + bgx_sgmii_change_link_state(lmac); + return 0; + } + } lmac->phydev->dev_flags = 0; if (phy_connect_direct(&lmac->netdev, lmac->phydev, @@ -731,15 +813,17 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) return -ENODEV; phy_start_aneg(lmac->phydev); - } else { - lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND | - WQ_MEM_RECLAIM, 1); - if (!lmac->check_link) - return -ENOMEM; - INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link); - queue_delayed_work(lmac->check_link, &lmac->dwork, 0); + return 0; } +poll: + lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND | + WQ_MEM_RECLAIM, 1); + if (!lmac->check_link) + return -ENOMEM; + INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link); + queue_delayed_work(lmac->check_link, &lmac->dwork, 0); + return 0; } diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index 01cc7c859131..1143e9575e53 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -144,10 +144,15 @@ #define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14) #define PCS_MRX_CTL_RESET BIT_ULL(15) #define BGX_GMP_PCS_MRX_STATUS 0x30008 +#define PCS_MRX_STATUS_LINK BIT_ULL(2) #define PCS_MRX_STATUS_AN_CPT BIT_ULL(5) +#define BGX_GMP_PCS_ANX_ADV 0x30010 #define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020 +#define BGX_GMP_PCS_LINKX_TIMER 0x30040 +#define PCS_LINKX_TIMER_COUNT 0x1E84 #define BGX_GMP_PCS_SGM_AN_ADV 0x30068 #define BGX_GMP_PCS_MISCX_CTL 0x30078 +#define PCS_MISC_CTL_MODE BIT_ULL(8) #define PCS_MISC_CTL_DISP_EN BIT_ULL(13) #define PCS_MISC_CTL_GMX_ENO BIT_ULL(11) #define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full -- GitLab From d385ed7ad30737aa649034ded83afdbef65217d1 Mon Sep 17 00:00:00 2001 From: Marcus Huewe Date: Tue, 23 May 2017 21:53:49 -0400 Subject: [PATCH 057/786] ipv6: addrconf: fix generation of new temporary addresses [ Upstream commit a11a7f71cac209c7c9cca66eb506e1ebb033a3b3 ] Under some circumstances it is possible that no new temporary addresses will be generated. For instance, addrconf_prefix_rcv_add_addr() indirectly calls ipv6_create_tempaddr(), which creates a tentative temporary address and starts dad. Next, addrconf_prefix_rcv_add_addr() indirectly calls addrconf_verify_rtnl(). Now, assume that the previously created temporary address has the least preferred lifetime among all existing addresses and is still tentative (that is, dad is still running). Hence, the next run of addrconf_verify_rtnl() is performed when the preferred lifetime of the temporary address ends. If dad succeeds before the next run, the temporary address becomes deprecated during the next run, but no new temporary address is generated. In order to fix this, schedule the next addrconf_verify_rtnl() run slightly before the temporary address becomes deprecated, if dad succeeded. Signed-off-by: Marcus Huewe Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/ipv6/addrconf.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index f088a1d9a618..9ed836aa1903 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -4004,6 +4004,12 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id) if (bump_id) rt_genid_bump_ipv6(dev_net(dev)); + + /* Make sure that a new temporary address will be created + * before this temporary address becomes deprecated. + */ + if (ifp->flags & IFA_F_TEMPORARY) + addrconf_verify_rtnl(); } static void addrconf_dad_run(struct inet6_dev *idev) -- GitLab From d536202202eef1102a90ece32d91c20dffdbeede Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Tue, 23 May 2017 21:53:51 -0400 Subject: [PATCH 058/786] vfio/spapr_tce: Set window when adding additional groups to container [ Upstream commit 930a42ded3fede7ca3acafc9153f4f2d0f56a92c ] If a container already has a group attached, attaching a new group should just program already created IOMMU tables to the hardware via the iommu_table_group_ops::set_window() callback. However commit 6f01cc692a16 ("vfio/spapr: Add a helper to create default DMA window") did not just simplify the code but also removed the set_window() calls in the case of attaching groups to a container which already has tables so it broke VFIO PCI hotplug. This reverts set_window() bits in tce_iommu_take_ownership_ddw(). Fixes: 6f01cc692a16 ("vfio/spapr: Add a helper to create default DMA window") Signed-off-by: Alexey Kardashevskiy Reviewed-by: David Gibson Signed-off-by: Alex Williamson Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/vfio/vfio_iommu_spapr_tce.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index c8823578a1b2..79ddcb05d126 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -1246,6 +1246,8 @@ static void tce_iommu_release_ownership_ddw(struct tce_container *container, static long tce_iommu_take_ownership_ddw(struct tce_container *container, struct iommu_table_group *table_group) { + long i, ret = 0; + if (!table_group->ops->create_table || !table_group->ops->set_window || !table_group->ops->release_ownership) { WARN_ON_ONCE(1); @@ -1254,7 +1256,27 @@ static long tce_iommu_take_ownership_ddw(struct tce_container *container, table_group->ops->take_ownership(table_group); + /* Set all windows to the new group */ + for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { + struct iommu_table *tbl = container->tables[i]; + + if (!tbl) + continue; + + ret = table_group->ops->set_window(table_group, i, tbl); + if (ret) + goto release_exit; + } + return 0; + +release_exit: + for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) + table_group->ops->unset_window(table_group, i); + + table_group->ops->release_ownership(table_group); + + return ret; } static int tce_iommu_attach_group(void *iommu_data, -- GitLab From 1cadd394bbf94976bcffeab54e94f1d083e2d390 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20L=C3=BCssing?= Date: Tue, 23 May 2017 21:53:52 -0400 Subject: [PATCH 059/786] ipv6: Fix IPv6 packet loss in scenarios involving roaming + snooping switches MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit a088d1d73a4bcfd7bc482f8d08375b9b665dc3e5 ] When for instance a mobile Linux device roams from one access point to another with both APs sharing the same broadcast domain and a multicast snooping switch in between: 1) (c) <~~~> (AP1) <--[SSW]--> (AP2) 2) (AP1) <--[SSW]--> (AP2) <~~~> (c) Then currently IPv6 multicast packets will get lost for (c) until an MLD Querier sends its next query message. The packet loss occurs because upon roaming the Linux host so far stayed silent regarding MLD and the snooping switch will therefore be unaware of the multicast topology change for a while. This patch fixes this by always resending MLD reports when an interface change happens, for instance from NO-CARRIER to CARRIER state. Signed-off-by: Linus Lüssing Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/ipv6/addrconf.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 9ed836aa1903..045738319e8b 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3387,9 +3387,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, } if (idev) { - if (idev->if_flags & IF_READY) - /* device is already configured. */ + if (idev->if_flags & IF_READY) { + /* device is already configured - + * but resend MLD reports, we might + * have roamed and need to update + * multicast snooping switches + */ + ipv6_mc_up(idev); break; + } idev->if_flags |= IF_READY; } -- GitLab From 40f6d71c0a0900c2b76cab421ee4ccfc6f425a35 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 23 May 2017 21:53:53 -0400 Subject: [PATCH 060/786] ARM: defconfigs: make NF_CT_PROTO_SCTP and NF_CT_PROTO_UDPLITE built-in [ Upstream commit 5aff1d245e8cc1ab5c4517d916edaed9e3f7f973 ] The symbols can no longer be used as loadable modules, leading to a harmless Kconfig warning: arch/arm/configs/imote2_defconfig:60:warning: symbol value 'm' invalid for NF_CT_PROTO_UDPLITE arch/arm/configs/imote2_defconfig:59:warning: symbol value 'm' invalid for NF_CT_PROTO_SCTP arch/arm/configs/ezx_defconfig:68:warning: symbol value 'm' invalid for NF_CT_PROTO_UDPLITE arch/arm/configs/ezx_defconfig:67:warning: symbol value 'm' invalid for NF_CT_PROTO_SCTP Let's make them built-in. Signed-off-by: Arnd Bergmann Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/arm/configs/ezx_defconfig | 4 ++-- arch/arm/configs/imote2_defconfig | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig index ea316c4b890e..d3f1768840e2 100644 --- a/arch/arm/configs/ezx_defconfig +++ b/arch/arm/configs/ezx_defconfig @@ -64,8 +64,8 @@ CONFIG_NETFILTER=y CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CT_PROTO_SCTP=m -CONFIG_NF_CT_PROTO_UDPLITE=m +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig index 18e59feaa307..7f479cdb3479 100644 --- a/arch/arm/configs/imote2_defconfig +++ b/arch/arm/configs/imote2_defconfig @@ -56,8 +56,8 @@ CONFIG_NETFILTER=y CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CT_PROTO_SCTP=m -CONFIG_NF_CT_PROTO_UDPLITE=m +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m CONFIG_NF_CONNTRACK_H323=m -- GitLab From 36d9659cde0dcb48c711d3dd460467d76c0b84eb Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 23 May 2017 21:53:54 -0400 Subject: [PATCH 061/786] PM / runtime: Avoid false-positive warnings from might_sleep_if() [ Upstream commit a9306a63631493afc75893a4ac405d4e1cbae6aa ] The might_sleep_if() assertions in __pm_runtime_idle(), __pm_runtime_suspend() and __pm_runtime_resume() may generate false-positive warnings in some situations. For example, that happens if a nested pm_runtime_get_sync()/pm_runtime_put() pair is executed with disabled interrupts within an outer pm_runtime_get_sync()/pm_runtime_put() section for the same device. [Generally, pm_runtime_get_sync() may sleep, so it should not be called with disabled interrupts, but in this particular case the previous pm_runtime_get_sync() guarantees that the device will not be suspended, so the inner pm_runtime_get_sync() will return immediately after incrementing the device's usage counter.] That started to happen in the i915 driver in 4.10-rc, leading to the following splat: BUG: sleeping function called from invalid context at drivers/base/power/runtime.c:1032 in_atomic(): 1, irqs_disabled(): 0, pid: 1500, name: Xorg 1 lock held by Xorg/1500: #0: (&dev->struct_mutex){+.+.+.}, at: [] i915_mutex_lock_interruptible+0x43/0x140 [i915] CPU: 0 PID: 1500 Comm: Xorg Not tainted Call Trace: dump_stack+0x85/0xc2 ___might_sleep+0x196/0x260 __might_sleep+0x53/0xb0 __pm_runtime_resume+0x7a/0x90 intel_runtime_pm_get+0x25/0x90 [i915] aliasing_gtt_bind_vma+0xaa/0xf0 [i915] i915_vma_bind+0xaf/0x1e0 [i915] i915_gem_execbuffer_relocate_entry+0x513/0x6f0 [i915] i915_gem_execbuffer_relocate_vma.isra.34+0x188/0x250 [i915] ? trace_hardirqs_on+0xd/0x10 ? i915_gem_execbuffer_reserve_vma.isra.31+0x152/0x1f0 [i915] ? i915_gem_execbuffer_reserve.isra.32+0x372/0x3a0 [i915] i915_gem_do_execbuffer.isra.38+0xa70/0x1a40 [i915] ? __might_fault+0x4e/0xb0 i915_gem_execbuffer2+0xc5/0x260 [i915] ? __might_fault+0x4e/0xb0 drm_ioctl+0x206/0x450 [drm] ? i915_gem_execbuffer+0x340/0x340 [i915] ? __fget+0x5/0x200 do_vfs_ioctl+0x91/0x6f0 ? __fget+0x111/0x200 ? __fget+0x5/0x200 SyS_ioctl+0x79/0x90 entry_SYSCALL_64_fastpath+0x23/0xc6 even though the code triggering it is correct. Unfortunately, the might_sleep_if() assertions in question are too coarse-grained to cover such cases correctly, so make them a bit less sensitive in order to avoid the false-positives. Reported-and-tested-by: Sedat Dilek Signed-off-by: Rafael J. Wysocki Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/base/power/runtime.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 23f3b95a1158..147d2e3678aa 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -889,13 +889,13 @@ int __pm_runtime_idle(struct device *dev, int rpmflags) unsigned long flags; int retval; - might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); - if (rpmflags & RPM_GET_PUT) { if (!atomic_dec_and_test(&dev->power.usage_count)) return 0; } + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); + spin_lock_irqsave(&dev->power.lock, flags); retval = rpm_idle(dev, rpmflags); spin_unlock_irqrestore(&dev->power.lock, flags); @@ -921,13 +921,13 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags) unsigned long flags; int retval; - might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); - if (rpmflags & RPM_GET_PUT) { if (!atomic_dec_and_test(&dev->power.usage_count)) return 0; } + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); + spin_lock_irqsave(&dev->power.lock, flags); retval = rpm_suspend(dev, rpmflags); spin_unlock_irqrestore(&dev->power.lock, flags); @@ -952,7 +952,8 @@ int __pm_runtime_resume(struct device *dev, int rpmflags) unsigned long flags; int retval; - might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); + might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe && + dev->power.runtime_status != RPM_ACTIVE); if (rpmflags & RPM_GET_PUT) atomic_inc(&dev->power.usage_count); -- GitLab From 3ec4141c68de07ec94e5781bf4a17f48e644fc3a Mon Sep 17 00:00:00 2001 From: David Lin Date: Tue, 23 May 2017 21:53:55 -0400 Subject: [PATCH 062/786] jump label: pass kbuild_cflags when checking for asm goto support [ Upstream commit 35f860f9ba6aac56cc38e8b18916d833a83f1157 ] Some versions of ARM GCC compiler such as Android toolchain throws in a '-fpic' flag by default. This causes the gcc-goto check script to fail although some config would have '-fno-pic' flag in the KBUILD_CFLAGS. This patch passes the KBUILD_CFLAGS to the check script so that the script does not rely on the default config from different compilers. Link: http://lkml.kernel.org/r/20170120234329.78868-1-dtwlin@google.com Signed-off-by: David Lin Acked-by: Steven Rostedt Cc: Michal Marek Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3d8781997968..30f72a08c6da 100644 --- a/Makefile +++ b/Makefile @@ -797,7 +797,7 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types) KBUILD_ARFLAGS := $(call ar-option,D) # check for 'asm goto' -ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y) +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO endif -- GitLab From 8936b74fd339be61f85c2ec74e311093d855bb07 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 23 May 2017 21:53:56 -0400 Subject: [PATCH 063/786] shmem: fix sleeping from atomic context [ Upstream commit 253fd0f02040a19c6fe80e4171659fa3482a422d ] Syzkaller fuzzer managed to trigger this: BUG: sleeping function called from invalid context at mm/shmem.c:852 in_atomic(): 1, irqs_disabled(): 0, pid: 529, name: khugepaged 3 locks held by khugepaged/529: #0: (shrinker_rwsem){++++..}, at: [] shrink_slab.part.59+0x121/0xd30 mm/vmscan.c:451 #1: (&type->s_umount_key#29){++++..}, at: [] trylock_super+0x20/0x100 fs/super.c:392 #2: (&(&sbinfo->shrinklist_lock)->rlock){+.+.-.}, at: [] spin_lock include/linux/spinlock.h:302 [inline] #2: (&(&sbinfo->shrinklist_lock)->rlock){+.+.-.}, at: [] shmem_unused_huge_shrink+0x28e/0x1490 mm/shmem.c:427 CPU: 2 PID: 529 Comm: khugepaged Not tainted 4.10.0-rc5+ #201 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Call Trace: shmem_undo_range+0xb20/0x2710 mm/shmem.c:852 shmem_truncate_range+0x27/0xa0 mm/shmem.c:939 shmem_evict_inode+0x35f/0xca0 mm/shmem.c:1030 evict+0x46e/0x980 fs/inode.c:553 iput_final fs/inode.c:1515 [inline] iput+0x589/0xb20 fs/inode.c:1542 shmem_unused_huge_shrink+0xbad/0x1490 mm/shmem.c:446 shmem_unused_huge_scan+0x10c/0x170 mm/shmem.c:512 super_cache_scan+0x376/0x450 fs/super.c:106 do_shrink_slab mm/vmscan.c:378 [inline] shrink_slab.part.59+0x543/0xd30 mm/vmscan.c:481 shrink_slab mm/vmscan.c:2592 [inline] shrink_node+0x2c7/0x870 mm/vmscan.c:2592 shrink_zones mm/vmscan.c:2734 [inline] do_try_to_free_pages+0x369/0xc80 mm/vmscan.c:2776 try_to_free_pages+0x3c6/0x900 mm/vmscan.c:2982 __perform_reclaim mm/page_alloc.c:3301 [inline] __alloc_pages_direct_reclaim mm/page_alloc.c:3322 [inline] __alloc_pages_slowpath+0xa24/0x1c30 mm/page_alloc.c:3683 __alloc_pages_nodemask+0x544/0xae0 mm/page_alloc.c:3848 __alloc_pages include/linux/gfp.h:426 [inline] __alloc_pages_node include/linux/gfp.h:439 [inline] khugepaged_alloc_page+0xc2/0x1b0 mm/khugepaged.c:750 collapse_huge_page+0x182/0x1fe0 mm/khugepaged.c:955 khugepaged_scan_pmd+0xfdf/0x12a0 mm/khugepaged.c:1208 khugepaged_scan_mm_slot mm/khugepaged.c:1727 [inline] khugepaged_do_scan mm/khugepaged.c:1808 [inline] khugepaged+0xe9b/0x1590 mm/khugepaged.c:1853 kthread+0x326/0x3f0 kernel/kthread.c:227 ret_from_fork+0x31/0x40 arch/x86/entry/entry_64.S:430 The iput() from atomic context was a bad idea: if after igrab() somebody else calls iput() and we left with the last inode reference, our iput() would lead to inode eviction and therefore sleeping. This patch should fix the situation. Link: http://lkml.kernel.org/r/20170131093141.GA15899@node.shutemov.name Signed-off-by: Kirill A. Shutemov Reported-by: Dmitry Vyukov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- mm/shmem.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index 9d32e1cb9f38..d99cfb6eb03a 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -412,6 +412,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, struct shrink_control *sc, unsigned long nr_to_split) { LIST_HEAD(list), *pos, *next; + LIST_HEAD(to_remove); struct inode *inode; struct shmem_inode_info *info; struct page *page; @@ -438,9 +439,8 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, /* Check if there's anything to gain */ if (round_up(inode->i_size, PAGE_SIZE) == round_up(inode->i_size, HPAGE_PMD_SIZE)) { - list_del_init(&info->shrinklist); + list_move(&info->shrinklist, &to_remove); removed++; - iput(inode); goto next; } @@ -451,6 +451,13 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, } spin_unlock(&sbinfo->shrinklist_lock); + list_for_each_safe(pos, next, &to_remove) { + info = list_entry(pos, struct shmem_inode_info, shrinklist); + inode = &info->vfs_inode; + list_del_init(&info->shrinklist); + iput(inode); + } + list_for_each_safe(pos, next, &list) { int ret; -- GitLab From 5331baaeb7e6a7448c5e2d1e48e91260cc99b710 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 23 May 2017 21:53:57 -0400 Subject: [PATCH 064/786] kasan: respect /proc/sys/kernel/traceoff_on_warning [ Upstream commit 4f40c6e5627ea73b4e7c615c59631f38cc880885 ] After much waiting I finally reproduced a KASAN issue, only to find my trace-buffer empty of useful information because it got spooled out :/ Make kasan_report honour the /proc/sys/kernel/traceoff_on_warning interface. Link: http://lkml.kernel.org/r/20170125164106.3514-1-aryabinin@virtuozzo.com Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Andrey Ryabinin Acked-by: Alexander Potapenko Cc: Dmitry Vyukov Cc: Steven Rostedt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- mm/kasan/report.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 073325aedc68..8ca412aebcf1 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -13,6 +13,7 @@ * */ +#include #include #include #include @@ -298,6 +299,8 @@ void kasan_report(unsigned long addr, size_t size, if (likely(!kasan_report_enabled())) return; + disable_trace_on_warning(); + info.access_addr = (void *)addr; info.access_size = size; info.is_write = is_write; -- GitLab From 8de6ea44af5c55f61848ca3dab6f0b6642ef4c33 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 2 Feb 2017 18:05:26 +0000 Subject: [PATCH 065/786] log2: make order_base_2() behave correctly on const input value zero commit 29905b52fad0854351f57bab867647e4982285bf upstream. The function order_base_2() is defined (according to the comment block) as returning zero on input zero, but subsequently passes the input into roundup_pow_of_two(), which is explicitly undefined for input zero. This has gone unnoticed until now, but optimization passes in GCC 7 may produce constant folded function instances where a constant value of zero is passed into order_base_2(), resulting in link errors against the deliberately undefined '____ilog2_NaN'. So update order_base_2() to adhere to its own documented interface. [ See http://marc.info/?l=linux-kernel&m=147672952517795&w=2 and follow-up discussion for more background. The gcc "optimization pass" is really just broken, but now the GCC trunk problem seems to have escaped out of just specially built daily images, so we need to work around it in mainline. - Linus ] Signed-off-by: Ard Biesheuvel Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- include/linux/log2.h | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/include/linux/log2.h b/include/linux/log2.h index f38fae23bdac..c373295f359f 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -194,6 +194,17 @@ unsigned long __rounddown_pow_of_two(unsigned long n) * ... and so on. */ -#define order_base_2(n) ilog2(roundup_pow_of_two(n)) +static inline __attribute_const__ +int __order_base_2(unsigned long n) +{ + return n > 1 ? ilog2(n - 1) + 1 : 0; +} +#define order_base_2(n) \ +( \ + __builtin_constant_p(n) ? ( \ + ((n) == 0 || (n) == 1) ? 0 : \ + ilog2((n) - 1) + 1) : \ + __order_base_2(n) \ +) #endif /* _LINUX_LOG2_H */ -- GitLab From 2fba4f5b70f304fd05f257bc0cbf0a236e144ac5 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Tue, 23 May 2017 21:53:59 -0400 Subject: [PATCH 066/786] ethtool: do not vzalloc(0) on registers dump [ Upstream commit 3808d34838184fd29088d6b3a364ba2f1c018fb6 ] If ->get_regs_len() callback return 0, we allocate 0 bytes of memory, what print ugly warning in dmesg, which can be found further below. This happen on mac80211 devices where ieee80211_get_regs_len() just return 0 and driver only fills ethtool_regs structure and actually do not provide any dump. However I assume this can happen on other drivers i.e. when for some devices driver provide regs dump and for others do not. Hence preventing to to print warning in ethtool code seems to be reasonable. ethtool: vmalloc: allocation failure: 0 bytes, mode:0x24080c2(GFP_KERNEL|__GFP_HIGHMEM|__GFP_ZERO) Call Trace: [] dump_stack+0x63/0x8c [] warn_alloc+0x13f/0x170 [] __vmalloc_node_range+0x1e6/0x2c0 [] vzalloc+0x54/0x60 [] dev_ethtool+0xb4c/0x1b30 [] dev_ioctl+0x181/0x520 [] sock_do_ioctl+0x42/0x50 Mem-Info: active_anon:435809 inactive_anon:173951 isolated_anon:0 active_file:835822 inactive_file:196932 isolated_file:0 unevictable:0 dirty:8 writeback:0 unstable:0 slab_reclaimable:157732 slab_unreclaimable:10022 mapped:83042 shmem:306356 pagetables:9507 bounce:0 free:130041 free_pcp:1080 free_cma:0 Node 0 active_anon:1743236kB inactive_anon:695804kB active_file:3343288kB inactive_file:787728kB unevictable:0kB isolated(anon):0kB isolated(file):0kB mapped:332168kB dirty:32kB writeback:0kB shmem:0kB shmem_thp: 0kB shmem_pmdmapped: 0kB anon_thp: 1225424kB writeback_tmp:0kB unstable:0kB pages_scanned:0 all_unreclaimable? no Node 0 DMA free:15900kB min:136kB low:168kB high:200kB active_anon:0kB inactive_anon:0kB active_file:0kB inactive_file:0kB unevictable:0kB writepending:0kB present:15984kB managed:15900kB mlocked:0kB slab_reclaimable:0kB slab_unreclaimable:0kB kernel_stack:0kB pagetables:0kB bounce:0kB free_pcp:0kB local_pcp:0kB free_cma:0kB lowmem_reserve[]: 0 3187 7643 7643 Node 0 DMA32 free:419732kB min:28124kB low:35152kB high:42180kB active_anon:541180kB inactive_anon:248988kB active_file:1466388kB inactive_file:389632kB unevictable:0kB writepending:0kB present:3370280kB managed:3290932kB mlocked:0kB slab_reclaimable:217184kB slab_unreclaimable:4180kB kernel_stack:160kB pagetables:984kB bounce:0kB free_pcp:2236kB local_pcp:660kB free_cma:0kB lowmem_reserve[]: 0 0 4456 4456 Signed-off-by: Stanislaw Gruszka Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/core/ethtool.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 047a1752ece1..072c1f4998c9 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -1394,9 +1394,12 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) if (regs.len > reglen) regs.len = reglen; - regbuf = vzalloc(reglen); - if (reglen && !regbuf) - return -ENOMEM; + regbuf = NULL; + if (reglen) { + regbuf = vzalloc(reglen); + if (!regbuf) + return -ENOMEM; + } ops->get_regs(dev, ®s, regbuf); -- GitLab From ea14fabd43a5e52739d55fda2a95d7218fd700ee Mon Sep 17 00:00:00 2001 From: Mao Wenan Date: Tue, 23 May 2017 21:54:00 -0400 Subject: [PATCH 067/786] net: phy: Fix lack of reference count on PHY driver [ Upstream commit cafe8df8b9bc9aa3dffa827c1a6757c6cd36f657 ] There is currently no reference count being held on the PHY driver, which makes it possible to remove the PHY driver module while the PHY state machine is running and polling the PHY. This could cause crashes similar to this one to show up: [ 43.361162] BUG: unable to handle kernel NULL pointer dereference at 0000000000000140 [ 43.361162] IP: phy_state_machine+0x32/0x490 [ 43.361162] PGD 59dc067 [ 43.361162] PUD 0 [ 43.361162] [ 43.361162] Oops: 0000 [#1] SMP [ 43.361162] Modules linked in: dsa_loop [last unloaded: broadcom] [ 43.361162] CPU: 0 PID: 1299 Comm: kworker/0:3 Not tainted 4.10.0-rc5+ #415 [ 43.361162] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Ubuntu-1.8.2-1ubuntu2 04/01/2014 [ 43.361162] Workqueue: events_power_efficient phy_state_machine [ 43.361162] task: ffff880006782b80 task.stack: ffffc90000184000 [ 43.361162] RIP: 0010:phy_state_machine+0x32/0x490 [ 43.361162] RSP: 0018:ffffc90000187e18 EFLAGS: 00000246 [ 43.361162] RAX: 0000000000000000 RBX: ffff8800059e53c0 RCX: ffff880006a15c60 [ 43.361162] RDX: ffff880006782b80 RSI: 0000000000000000 RDI: ffff8800059e5428 [ 43.361162] RBP: ffffc90000187e48 R08: ffff880006a15c40 R09: 0000000000000000 [ 43.361162] R10: 0000000000000000 R11: 0000000000000000 R12: ffff8800059e5428 [ 43.361162] R13: ffff8800059e5000 R14: 0000000000000000 R15: ffff880006a15c40 [ 43.361162] FS: 0000000000000000(0000) GS:ffff880006a00000(0000) knlGS:0000000000000000 [ 43.361162] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 43.361162] CR2: 0000000000000140 CR3: 0000000005979000 CR4: 00000000000006f0 [ 43.361162] Call Trace: [ 43.361162] process_one_work+0x1b4/0x3e0 [ 43.361162] worker_thread+0x43/0x4d0 [ 43.361162] ? __schedule+0x17f/0x4e0 [ 43.361162] kthread+0xf7/0x130 [ 43.361162] ? process_one_work+0x3e0/0x3e0 [ 43.361162] ? kthread_create_on_node+0x40/0x40 [ 43.361162] ret_from_fork+0x29/0x40 [ 43.361162] Code: 56 41 55 41 54 4c 8d 67 68 53 4c 8d af 40 fc ff ff 48 89 fb 4c 89 e7 48 83 ec 08 e8 c9 9d 27 00 48 8b 83 60 ff ff ff 44 8b 73 98 <48> 8b 90 40 01 00 00 44 89 f0 48 85 d2 74 08 4c 89 ef ff d2 8b Keep references on the PHY driver module right before we are going to utilize it in phy_attach_direct(), and conversely when we don't use it anymore in phy_detach(). Signed-off-by: Mao Wenan [florian: rebase, rework commit message] Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/phy/phy_device.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index c4ceb082e970..67571f9627e5 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -872,6 +872,11 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, return -EIO; } + if (!try_module_get(d->driver->owner)) { + dev_err(&dev->dev, "failed to get the device driver module\n"); + return -EIO; + } + get_device(d); /* Assume that if there is no driver, that it doesn't @@ -927,6 +932,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, error: put_device(d); + module_put(d->driver->owner); if (ndev_owner != bus->owner) module_put(bus->owner); return err; @@ -1007,6 +1013,7 @@ void phy_detach(struct phy_device *phydev) bus = phydev->mdio.bus; put_device(&phydev->mdio.dev); + module_put(phydev->mdio.dev.driver->owner); if (ndev_owner != bus->owner) module_put(bus->owner); } -- GitLab From 3a6ebd3f963c9d2fda47714562606a495fee0b2c Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 8 Feb 2017 19:05:26 -0800 Subject: [PATCH 068/786] net: phy: Fix PHY module checks and NULL deref in phy_attach_direct() [ Upstream commit 6d9f66ac7fec2a6ccd649e5909806dfe36f1fc25 ] The Generic PHY drivers gets assigned after we checked that the current PHY driver is NULL, so we need to check a few things before we can safely dereference d->driver. This would be causing a NULL deference to occur when a system binds to the Generic PHY driver. Update phy_attach_direct() to do the following: - grab the driver module reference after we have assigned the Generic PHY drivers accordingly, and remember we came from the generic PHY path - update the error path to clean up the module reference in case the Generic PHY probe function fails - split the error path involving phy_detacht() to avoid double free/put since phy_detach() does all the clean up - finally, have phy_detach() drop the module reference count before we call device_release_driver() for the Generic PHY driver case Fixes: cafe8df8b9bc ("net: phy: Fix lack of reference count on PHY driver") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/phy/phy_device.c | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 67571f9627e5..14d57d0d1c04 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -860,6 +860,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, struct module *ndev_owner = dev->dev.parent->driver->owner; struct mii_bus *bus = phydev->mdio.bus; struct device *d = &phydev->mdio.dev; + bool using_genphy = false; int err; /* For Ethernet device drivers that register their own MDIO bus, we @@ -872,11 +873,6 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, return -EIO; } - if (!try_module_get(d->driver->owner)) { - dev_err(&dev->dev, "failed to get the device driver module\n"); - return -EIO; - } - get_device(d); /* Assume that if there is no driver, that it doesn't @@ -890,12 +886,22 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, d->driver = &genphy_driver[GENPHY_DRV_1G].mdiodrv.driver; + using_genphy = true; + } + + if (!try_module_get(d->driver->owner)) { + dev_err(&dev->dev, "failed to get the device driver module\n"); + err = -EIO; + goto error_put_device; + } + + if (using_genphy) { err = d->driver->probe(d); if (err >= 0) err = device_bind_driver(d); if (err) - goto error; + goto error_module_put; } if (phydev->attached_dev) { @@ -931,8 +937,14 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, return err; error: - put_device(d); + /* phy_detach() does all of the cleanup below */ + phy_detach(phydev); + return err; + +error_module_put: module_put(d->driver->owner); +error_put_device: + put_device(d); if (ndev_owner != bus->owner) module_put(bus->owner); return err; @@ -993,6 +1005,8 @@ void phy_detach(struct phy_device *phydev) phydev->attached_dev = NULL; phy_suspend(phydev); + module_put(phydev->mdio.dev.driver->owner); + /* If the device had no specific driver before (i.e. - it * was using the generic driver), we unbind the device * from the generic driver so that there's a chance a @@ -1013,7 +1027,6 @@ void phy_detach(struct phy_device *phydev) bus = phydev->mdio.bus; put_device(&phydev->mdio.dev); - module_put(phydev->mdio.dev.driver->owner); if (ndev_owner != bus->owner) module_put(bus->owner); } -- GitLab From f4d2d05ffb8b3c4286feeaade7f27a3b18c59d18 Mon Sep 17 00:00:00 2001 From: Dimitris Michailidis Date: Tue, 23 May 2017 21:54:02 -0400 Subject: [PATCH 069/786] net: fix ndo_features_check/ndo_fix_features comment ordering [ Upstream commit 1a2a14444d32b89b28116daea86f63ced1716668 ] Commit cdba756f5803a2 ("net: move ndo_features_check() close to ndo_start_xmit()") inadvertently moved the doc comment for .ndo_fix_features instead of .ndo_features_check. Fix the comment ordering. Fixes: cdba756f5803a2 ("net: move ndo_features_check() close to ndo_start_xmit()") Signed-off-by: Dimitris Michailidis Acked-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- include/linux/netdevice.h | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index bb9b102c15cd..780e7171f548 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -865,11 +865,15 @@ struct netdev_xdp { * of useless work if you return NETDEV_TX_BUSY. * Required; cannot be NULL. * - * netdev_features_t (*ndo_fix_features)(struct net_device *dev, - * netdev_features_t features); - * Adjusts the requested feature flags according to device-specific - * constraints, and returns the resulting flags. Must not modify - * the device state. + * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, + * struct net_device *dev + * netdev_features_t features); + * Called by core transmit path to determine if device is capable of + * performing offload operations on a given packet. This is to give + * the device an opportunity to implement any restrictions that cannot + * be otherwise expressed by feature flags. The check is called with + * the set of features that the stack has calculated and it returns + * those the driver believes to be appropriate. * * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, * void *accel_priv, select_queue_fallback_t fallback); @@ -1027,6 +1031,12 @@ struct netdev_xdp { * Called to release previously enslaved netdev. * * Feature/offload setting functions. + * netdev_features_t (*ndo_fix_features)(struct net_device *dev, + * netdev_features_t features); + * Adjusts the requested feature flags according to device-specific + * constraints, and returns the resulting flags. Must not modify + * the device state. + * * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); * Called to update device configuration to new features. Passed * feature set might be less than what was returned by ndo_fix_features()). @@ -1099,15 +1109,6 @@ struct netdev_xdp { * Callback to use for xmit over the accelerated station. This * is used in place of ndo_start_xmit on accelerated net * devices. - * netdev_features_t (*ndo_features_check)(struct sk_buff *skb, - * struct net_device *dev - * netdev_features_t features); - * Called by core transmit path to determine if device is capable of - * performing offload operations on a given packet. This is to give - * the device an opportunity to implement any restrictions that cannot - * be otherwise expressed by feature flags. The check is called with - * the set of features that the stack has calculated and it returns - * those the driver believes to be appropriate. * int (*ndo_set_tx_maxrate)(struct net_device *dev, * int queue_index, u32 maxrate); * Called when a user wants to set a max-rate limitation of specific -- GitLab From 11696dcea28222967af5ed8105695ec1751fe061 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 23 May 2017 21:54:04 -0400 Subject: [PATCH 070/786] fscache: Fix dead object requeue [ Upstream commit e26bfebdfc0d212d366de9990a096665d5c0209a ] Under some circumstances, an fscache object can become queued such that it fscache_object_work_func() can be called once the object is in the OBJECT_DEAD state. This results in the kernel oopsing when it tries to invoke the handler for the state (which is hard coded to 0x2). The way this comes about is something like the following: (1) The object dispatcher is processing a work state for an object. This is done in workqueue context. (2) An out-of-band event comes in that isn't masked, causing the object to be queued, say EV_KILL. (3) The object dispatcher finishes processing the current work state on that object and then sees there's another event to process, so, without returning to the workqueue core, it processes that event too. It then follows the chain of events that initiates until we reach OBJECT_DEAD without going through a wait state (such as WAIT_FOR_CLEARANCE). At this point, object->events may be 0, object->event_mask will be 0 and oob_event_mask will be 0. (4) The object dispatcher returns to the workqueue processor, and in due course, this sees that the object's work item is still queued and invokes it again. (5) The current state is a work state (OBJECT_DEAD), so the dispatcher jumps to it - resulting in an OOPS. When I'm seeing this, the work state in (1) appears to have been either LOOK_UP_OBJECT or CREATE_OBJECT (object->oob_table is fscache_osm_lookup_oob). The window for (2) is very small: (A) object->event_mask is cleared whilst the event dispatch process is underway - though there's no memory barrier to force this to the top of the function. The window, therefore is from the time the object was selected by the workqueue processor and made requeueable to the time the mask was cleared. (B) fscache_raise_event() will only queue the object if it manages to set the event bit and the corresponding event_mask bit was set. The enqueuement is then deferred slightly whilst we get a ref on the object and get the per-CPU variable for workqueue congestion. This slight deferral slightly increases the probability by allowing extra time for the workqueue to make the item requeueable. Handle this by giving the dead state a processor function and checking the for the dead state address rather than seeing if the processor function is address 0x2. The dead state processor function can then set a flag to indicate that it's occurred and give a warning if it occurs more than once per object. If this race occurs, an oops similar to the following is seen (note the RIP value): BUG: unable to handle kernel NULL pointer dereference at 0000000000000002 IP: [<0000000000000002>] 0x1 PGD 0 Oops: 0010 [#1] SMP Modules linked in: ... CPU: 17 PID: 16077 Comm: kworker/u48:9 Not tainted 3.10.0-327.18.2.el7.x86_64 #1 Hardware name: HP ProLiant DL380 Gen9/ProLiant DL380 Gen9, BIOS P89 12/27/2015 Workqueue: fscache_object fscache_object_work_func [fscache] task: ffff880302b63980 ti: ffff880717544000 task.ti: ffff880717544000 RIP: 0010:[<0000000000000002>] [<0000000000000002>] 0x1 RSP: 0018:ffff880717547df8 EFLAGS: 00010202 RAX: ffffffffa0368640 RBX: ffff880edf7a4480 RCX: dead000000200200 RDX: 0000000000000002 RSI: 00000000ffffffff RDI: ffff880edf7a4480 RBP: ffff880717547e18 R08: 0000000000000000 R09: dfc40a25cb3a4510 R10: dfc40a25cb3a4510 R11: 0000000000000400 R12: 0000000000000000 R13: ffff880edf7a4510 R14: ffff8817f6153400 R15: 0000000000000600 FS: 0000000000000000(0000) GS:ffff88181f420000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000002 CR3: 000000000194a000 CR4: 00000000001407e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 Stack: ffffffffa0363695 ffff880edf7a4510 ffff88093f16f900 ffff8817faa4ec00 ffff880717547e60 ffffffff8109d5db 00000000faa4ec18 0000000000000000 ffff8817faa4ec18 ffff88093f16f930 ffff880302b63980 ffff88093f16f900 Call Trace: [] ? fscache_object_work_func+0xa5/0x200 [fscache] [] process_one_work+0x17b/0x470 [] worker_thread+0x21c/0x400 [] ? rescuer_thread+0x400/0x400 [] kthread+0xcf/0xe0 [] ? kthread_create_on_node+0x140/0x140 [] ret_from_fork+0x58/0x90 [] ? kthread_create_on_node+0x140/0x140 Signed-off-by: David Howells Acked-by: Jeremy McNicoll Tested-by: Frank Sorenson Tested-by: Benjamin Coddington Reviewed-by: Benjamin Coddington Signed-off-by: Al Viro Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/fscache/object.c | 26 ++++++++++++++++++++++++-- include/linux/fscache-cache.h | 1 + 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/fs/fscache/object.c b/fs/fscache/object.c index 9e792e30f4db..f3a024fcff81 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c @@ -30,6 +30,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object static const struct fscache_state *fscache_object_available(struct fscache_object *, int); static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int); static const struct fscache_state *fscache_update_object(struct fscache_object *, int); +static const struct fscache_state *fscache_object_dead(struct fscache_object *, int); #define __STATE_NAME(n) fscache_osm_##n #define STATE(n) (&__STATE_NAME(n)) @@ -91,7 +92,7 @@ static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure); static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object); static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents); static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object); -static WORK_STATE(OBJECT_DEAD, "DEAD", (void*)2UL); +static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead); static WAIT_STATE(WAIT_FOR_INIT, "?INI", TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD)); @@ -229,6 +230,10 @@ static void fscache_object_sm_dispatcher(struct fscache_object *object) event = -1; if (new_state == NO_TRANSIT) { _debug("{OBJ%x} %s notrans", object->debug_id, state->name); + if (unlikely(state == STATE(OBJECT_DEAD))) { + _leave(" [dead]"); + return; + } fscache_enqueue_object(object); event_mask = object->oob_event_mask; goto unmask_events; @@ -239,7 +244,7 @@ static void fscache_object_sm_dispatcher(struct fscache_object *object) object->state = state = new_state; if (state->work) { - if (unlikely(state->work == ((void *)2UL))) { + if (unlikely(state == STATE(OBJECT_DEAD))) { _leave(" [dead]"); return; } @@ -1077,3 +1082,20 @@ void fscache_object_mark_killed(struct fscache_object *object, } } EXPORT_SYMBOL(fscache_object_mark_killed); + +/* + * The object is dead. We can get here if an object gets queued by an event + * that would lead to its death (such as EV_KILL) when the dispatcher is + * already running (and so can be requeued) but hasn't yet cleared the event + * mask. + */ +static const struct fscache_state *fscache_object_dead(struct fscache_object *object, + int event) +{ + if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD, + &object->flags)) + return NO_TRANSIT; + + WARN(true, "FS-Cache object redispatched after death"); + return NO_TRANSIT; +} diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 13ba552e6c09..4c467ef50159 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -360,6 +360,7 @@ struct fscache_object { #define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ #define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */ #define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */ +#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */ struct list_head cache_link; /* link in cache->object_list */ struct hlist_node cookie_link; /* link in cookie->backing_objects */ -- GitLab From 34f1a4626badd6d88f07f222378ba304330be717 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 23 May 2017 21:54:05 -0400 Subject: [PATCH 071/786] fscache: Clear outstanding writes when disabling a cookie [ Upstream commit 6bdded59c8933940ac7e5b416448276ac89d1144 ] fscache_disable_cookie() needs to clear the outstanding writes on the cookie it's disabling because they cannot be completed after. Without this, fscache_nfs_open_file() gets stuck because it disables the cookie when the file is opened for writing but can't uncache the pages till afterwards - otherwise there's a race between the open routine and anyone who already has it open R/O and is still reading from it. Looking in /proc/pid/stack of the offending process shows: [] __fscache_wait_on_page_write+0x82/0x9b [fscache] [] __fscache_uncache_all_inode_pages+0x91/0xe1 [fscache] [] nfs_fscache_open_file+0x59/0x9e [nfs] [] nfs4_file_open+0x17f/0x1b8 [nfsv4] [] do_dentry_open+0x16d/0x2b7 [] vfs_open+0x5c/0x65 [] path_openat+0x785/0x8fb [] do_filp_open+0x48/0x9e [] do_sys_open+0x13b/0x1cb [] SyS_open+0x19/0x1b [] do_syscall_64+0x80/0x17a [] return_from_SYSCALL_64+0x0/0x7a [] 0xffffffffffffffff Reported-by: Jianhong Yin Signed-off-by: David Howells Acked-by: Jeff Layton Acked-by: Steve Dickson Signed-off-by: Al Viro Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/fscache/cookie.c | 5 +++++ fs/fscache/object.c | 6 ++++++ 2 files changed, 11 insertions(+) diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c index 4304072161aa..40d61077bead 100644 --- a/fs/fscache/cookie.c +++ b/fs/fscache/cookie.c @@ -542,6 +542,7 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate) hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) { if (invalidate) set_bit(FSCACHE_OBJECT_RETIRED, &object->flags); + clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL); } } else { @@ -560,6 +561,10 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate) wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t, TASK_UNINTERRUPTIBLE); + /* Make sure any pending writes are cancelled. */ + if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) + fscache_invalidate_writes(cookie); + /* Reset the cookie state if it wasn't relinquished */ if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) { atomic_inc(&cookie->n_active); diff --git a/fs/fscache/object.c b/fs/fscache/object.c index f3a024fcff81..7a182c87f378 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c @@ -650,6 +650,12 @@ static const struct fscache_state *fscache_kill_object(struct fscache_object *ob fscache_mark_object_dead(object); object->oob_event_mask = 0; + if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) { + /* Reject any new read/write ops and abort any that are pending. */ + clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); + fscache_cancel_all_ops(object); + } + if (list_empty(&object->dependents) && object->n_ops == 0 && object->n_children == 0) -- GitLab From 0542f979124662c219e0c8c66f6ad1e784bf20dc Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 23 May 2017 21:54:06 -0400 Subject: [PATCH 072/786] FS-Cache: Initialise stores_lock in netfs cookie [ Upstream commit 62deb8187d116581c88c69a2dd9b5c16588545d4 ] Initialise the stores_lock in fscache netfs cookies. Technically, it shouldn't be necessary, since the netfs cookie is an index and stores no data, but initialising it anyway adds insignificant overhead. Signed-off-by: David Howells Reviewed-by: Jeff Layton Acked-by: Steve Dickson Signed-off-by: Al Viro Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/fscache/netfs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/fscache/netfs.c b/fs/fscache/netfs.c index 9b28649df3a1..a8aa00be4444 100644 --- a/fs/fscache/netfs.c +++ b/fs/fscache/netfs.c @@ -48,6 +48,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs) cookie->flags = 1 << FSCACHE_COOKIE_ENABLED; spin_lock_init(&cookie->lock); + spin_lock_init(&cookie->stores_lock); INIT_HLIST_HEAD(&cookie->backing_objects); /* check the netfs type is not already present */ -- GitLab From 16f733d8db525e0c3519888071b2e644b12717f7 Mon Sep 17 00:00:00 2001 From: Dimitris Michailidis Date: Tue, 23 May 2017 21:54:07 -0400 Subject: [PATCH 073/786] ipv6: fix flow labels when the traffic class is non-0 [ Upstream commit 90427ef5d2a4b9a24079889bf16afdcdaebc4240 ] ip6_make_flowlabel() determines the flow label for IPv6 packets. It's supposed to be passed a flow label, which it returns as is if non-0 and in some other cases, otherwise it calculates a new value. The problem is callers often pass a flowi6.flowlabel, which may also contain traffic class bits. If the traffic class is non-0 ip6_make_flowlabel() mistakes the non-0 it gets as a flow label and returns the whole thing. Thus it can return a 'flow label' longer than 20b and the low 20b of that is typically 0 resulting in packets with 0 label. Moreover, different packets of a flow may be labeled differently. For a TCP flow with ECN non-payload and payload packets get different labels as exemplified by this pair of consecutive packets: (pure ACK) Internet Protocol Version 6, Src: 2002:af5:11a3::, Dst: 2002:af5:11a2:: 0110 .... = Version: 6 .... 0000 0000 .... .... .... .... .... = Traffic Class: 0x00 (DSCP: CS0, ECN: Not-ECT) .... 0000 00.. .... .... .... .... .... = Differentiated Services Codepoint: Default (0) .... .... ..00 .... .... .... .... .... = Explicit Congestion Notification: Not ECN-Capable Transport (0) .... .... .... 0001 1100 1110 0100 1001 = Flow Label: 0x1ce49 Payload Length: 32 Next Header: TCP (6) (payload) Internet Protocol Version 6, Src: 2002:af5:11a3::, Dst: 2002:af5:11a2:: 0110 .... = Version: 6 .... 0000 0010 .... .... .... .... .... = Traffic Class: 0x02 (DSCP: CS0, ECN: ECT(0)) .... 0000 00.. .... .... .... .... .... = Differentiated Services Codepoint: Default (0) .... .... ..10 .... .... .... .... .... = Explicit Congestion Notification: ECN-Capable Transport codepoint '10' (2) .... .... .... 0000 0000 0000 0000 0000 = Flow Label: 0x00000 Payload Length: 688 Next Header: TCP (6) This patch allows ip6_make_flowlabel() to be passed more than just a flow label and has it extract the part it really wants. This was simpler than modifying the callers. With this patch packets like the above become Internet Protocol Version 6, Src: 2002:af5:11a3::, Dst: 2002:af5:11a2:: 0110 .... = Version: 6 .... 0000 0000 .... .... .... .... .... = Traffic Class: 0x00 (DSCP: CS0, ECN: Not-ECT) .... 0000 00.. .... .... .... .... .... = Differentiated Services Codepoint: Default (0) .... .... ..00 .... .... .... .... .... = Explicit Congestion Notification: Not ECN-Capable Transport (0) .... .... .... 1010 1111 1010 0101 1110 = Flow Label: 0xafa5e Payload Length: 32 Next Header: TCP (6) Internet Protocol Version 6, Src: 2002:af5:11a3::, Dst: 2002:af5:11a2:: 0110 .... = Version: 6 .... 0000 0010 .... .... .... .... .... = Traffic Class: 0x02 (DSCP: CS0, ECN: ECT(0)) .... 0000 00.. .... .... .... .... .... = Differentiated Services Codepoint: Default (0) .... .... ..10 .... .... .... .... .... = Explicit Congestion Notification: ECN-Capable Transport codepoint '10' (2) .... .... .... 1010 1111 1010 0101 1110 = Flow Label: 0xafa5e Payload Length: 688 Next Header: TCP (6) Signed-off-by: Dimitris Michailidis Acked-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- include/net/ipv6.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 91afb4aadaa6..615ce0abba9c 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -776,6 +776,11 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, { u32 hash; + /* @flowlabel may include more than a flow label, eg, the traffic class. + * Here we want only the flow label value. + */ + flowlabel &= IPV6_FLOWLABEL_MASK; + if (flowlabel || net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF || (!autolabel && -- GitLab From cf336eea809d10ea9df4e7096d7d916c3d07eeaf Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 23 May 2017 21:54:08 -0400 Subject: [PATCH 074/786] drm/nouveau: prevent userspace from deleting client object [ Upstream commit c966b6279f610a24ac1d42dcbe30e10fa61220b2 ] Signed-off-by: Ben Skeggs Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/nouveau/nouveau_usif.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c index 08f9c6fa0f7f..1fba38622744 100644 --- a/drivers/gpu/drm/nouveau/nouveau_usif.c +++ b/drivers/gpu/drm/nouveau/nouveau_usif.c @@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc) if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) { /* block access to objects not created via this interface */ owner = argv->v0.owner; - if (argv->v0.object == 0ULL) + if (argv->v0.object == 0ULL && + argv->v0.type != NVIF_IOCTL_V0_DEL) argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */ else argv->v0.owner = NVDRM_OBJECT_USIF; -- GitLab From c33f1bd7f28999950918912b25d2ed9514669edf Mon Sep 17 00:00:00 2001 From: Ben Skeggs Date: Tue, 23 May 2017 21:54:09 -0400 Subject: [PATCH 075/786] drm/nouveau/fence/g84-: protect against concurrent access to semaphore buffers [ Upstream commit 96692b097ba76d0c637ae8af47b29c73da33c9d0 ] Signed-off-by: Ben Skeggs Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/nouveau/nouveau_fence.h | 1 + drivers/gpu/drm/nouveau/nv84_fence.c | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index 64c4ce7115ad..75e1f09484ff 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h @@ -100,6 +100,7 @@ struct nv84_fence_priv { struct nouveau_bo *bo; struct nouveau_bo *bo_gart; u32 *suspend; + struct mutex mutex; }; u64 nv84_fence_crtc(struct nouveau_channel *, int); diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 18bde9d8e6d6..90a5dd6311c6 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c @@ -121,8 +121,10 @@ nv84_fence_context_del(struct nouveau_channel *chan) } nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence); + mutex_lock(&priv->mutex); nouveau_bo_vma_del(priv->bo, &fctx->vma_gart); nouveau_bo_vma_del(priv->bo, &fctx->vma); + mutex_unlock(&priv->mutex); nouveau_fence_context_del(&fctx->base); chan->fence = NULL; nouveau_fence_context_free(&fctx->base); @@ -148,11 +150,13 @@ nv84_fence_context_new(struct nouveau_channel *chan) fctx->base.sync32 = nv84_fence_sync32; fctx->base.sequence = nv84_fence_read(chan); + mutex_lock(&priv->mutex); ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); if (ret == 0) { ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm, &fctx->vma_gart); } + mutex_unlock(&priv->mutex); /* map display semaphore buffers into channel's vm */ for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { @@ -232,6 +236,8 @@ nv84_fence_create(struct nouveau_drm *drm) priv->base.context_base = fence_context_alloc(priv->base.contexts); priv->base.uevent = true; + mutex_init(&priv->mutex); + /* Use VRAM if there is any ; otherwise fallback to system memory */ domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM : /* -- GitLab From 581e4003bf126ef479734a25f7f57c365d7c790a Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Mon, 30 Jan 2017 15:11:45 +0200 Subject: [PATCH 076/786] net/mlx4_core: Avoid command timeouts during VF driver device shutdown [ Upstream commit d585df1c5ccf995fcee910705ad7a9cdd11d4152 ] Some Hypervisors detach VFs from VMs by instantly causing an FLR event to be generated for a VF. In the mlx4 case, this will cause that VF's comm channel to be disabled before the VM has an opportunity to invoke the VF device's "shutdown" method. The result is that the VF driver on the VM will experience a command timeout during the shutdown process when the Hypervisor does not deliver a command-completion event to the VM. To avoid FW command timeouts on the VM when the driver's shutdown method is invoked, we detect the absence of the VF's comm channel at the very start of the shutdown process. If the comm-channel has already been disabled, we cause all FW commands during the device shutdown process to immediately return success (and thus avoid all command timeouts). Signed-off-by: Jack Morgenstein Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlx4/catas.c | 2 +- drivers/net/ethernet/mellanox/mlx4/intf.c | 12 ++++++++++++ drivers/net/ethernet/mellanox/mlx4/mlx4.h | 1 + 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index c7e939945259..53daa6ca5d83 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev) return -ETIMEDOUT; } -static int mlx4_comm_internal_err(u32 slave_read) +int mlx4_comm_internal_err(u32 slave_read) { return (u32)COMM_CHAN_EVENT_INTERNAL_ERR == (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index 0e8b7c44931f..8258d08acd8c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -222,6 +222,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev) return; mlx4_stop_catas_poll(dev); + if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION && + mlx4_is_slave(dev)) { + /* In mlx4_remove_one on a VF */ + u32 slave_read = + swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read)); + + if (mlx4_comm_internal_err(slave_read)) { + mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n", + __func__); + mlx4_enter_error_state(dev->persist); + } + } mutex_lock(&intf_mutex); list_for_each_entry(intf, &intf_list, list) diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 88ee7d8a5923..086920b615af 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -1220,6 +1220,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type); void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); void mlx4_enter_error_state(struct mlx4_dev_persistent *persist); +int mlx4_comm_internal_err(u32 slave_read); int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, enum mlx4_port_type *type); -- GitLab From 3564d41e5c84e5c71ec1172958cbafc19575a550 Mon Sep 17 00:00:00 2001 From: Arseny Solokha Date: Sun, 29 Jan 2017 19:52:20 +0700 Subject: [PATCH 077/786] gianfar: synchronize DMA API usage by free_skb_rx_queue w/ gfar_new_page [ Upstream commit 4af0e5bb95ee3ba5ea4bd7dbb94e1648a5279cc9 ] In spite of switching to paged allocation of Rx buffers, the driver still called dma_unmap_single() in the Rx queues tear-down path. The DMA region unmapping code in free_skb_rx_queue() basically predates the introduction of paged allocation to the driver. While being refactored, it apparently hasn't reflected the change in the DMA API usage by its counterpart gfar_new_page(). As a result, setting an interface to the DOWN state now yields the following: # ip link set eth2 down fsl-gianfar ffe24000.ethernet: DMA-API: device driver frees DMA memory with wrong function [device address=0x000000001ecd0000] [size=40] ------------[ cut here ]------------ WARNING: CPU: 1 PID: 189 at lib/dma-debug.c:1123 check_unmap+0x8e0/0xa28 CPU: 1 PID: 189 Comm: ip Tainted: G O 4.9.5 #1 task: dee73400 task.stack: dede2000 NIP: c02101e8 LR: c02101e8 CTR: c0260d74 REGS: dede3bb0 TRAP: 0700 Tainted: G O (4.9.5) MSR: 00021000 CR: 28002222 XER: 00000000 GPR00: c02101e8 dede3c60 dee73400 000000b6 dfbd033c dfbd36c4 1f622000 dede2000 GPR08: 00000007 c05b1634 1f622000 00000000 22002484 100a9904 00000000 00000000 GPR16: 00000000 db4c849c 00000002 db4c8480 00000001 df142240 db4c84bc 00000000 GPR24: c0706148 c0700000 00029000 c07552e8 c07323b4 dede3cb8 c07605e0 db535540 NIP [c02101e8] check_unmap+0x8e0/0xa28 LR [c02101e8] check_unmap+0x8e0/0xa28 Call Trace: [dede3c60] [c02101e8] check_unmap+0x8e0/0xa28 (unreliable) [dede3cb0] [c02103b8] debug_dma_unmap_page+0x88/0x9c [dede3d30] [c02dffbc] free_skb_resources+0x2c4/0x404 [dede3d80] [c02e39b4] gfar_close+0x24/0xc8 [dede3da0] [c0361550] __dev_close_many+0xa0/0xf8 [dede3dd0] [c03616f0] __dev_close+0x2c/0x4c [dede3df0] [c036b1b8] __dev_change_flags+0xa0/0x174 [dede3e10] [c036b2ac] dev_change_flags+0x20/0x60 [dede3e30] [c03e130c] devinet_ioctl+0x540/0x824 [dede3e90] [c0347dcc] sock_ioctl+0x134/0x298 [dede3eb0] [c0111814] do_vfs_ioctl+0xac/0x854 [dede3f20] [c0111ffc] SyS_ioctl+0x40/0x74 [dede3f40] [c000f290] ret_from_syscall+0x0/0x3c --- interrupt: c01 at 0xff45da0 LR = 0xff45cd0 Instruction dump: 811d001c 7c66482e 813d0020 9061000c 807f000c 5463103a 7cc6182e 3c60c052 386309ac 90c10008 4cc63182 4826b845 <0fe00000> 4bfffa60 3c80c052 388402c4 ---[ end trace 695ae6d7ac1d0c47 ]--- Mapped at: [] gfar_alloc_rx_buffs+0x178/0x248 [] startup_gfar+0x368/0x570 [] __dev_open+0xdc/0x150 [] __dev_change_flags+0xa0/0x174 [] dev_change_flags+0x20/0x60 Even though the issue was discovered in 4.9 kernel, the code in question is identical in the current net and net-next trees. Fixes: 75354148ce69 ("gianfar: Add paged allocation and Rx S/G") Signed-off-by: Arseny Solokha Acked-by: Claudiu Manoil Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/freescale/gianfar.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 9061c2f82b9c..d391beebe591 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2007,8 +2007,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) if (!rxb->page) continue; - dma_unmap_single(rx_queue->dev, rxb->dma, - PAGE_SIZE, DMA_FROM_DEVICE); + dma_unmap_page(rx_queue->dev, rxb->dma, + PAGE_SIZE, DMA_FROM_DEVICE); __free_page(rxb->page); rxb->page = NULL; -- GitLab From 62614714e30a963ff325d7b5547b24d650153817 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 26 Jan 2017 19:24:07 +0200 Subject: [PATCH 078/786] pinctrl: baytrail: Rectify debounce support (part 2) [ Upstream commit 827e1579e1d5cb66e340e7be1944b825b542bbdf ] The commit 04ff5a095d66 ("pinctrl: baytrail: Rectify debounce support") almost fixes the logic of debuonce but missed couple of things, i.e. typo in mask when disabling debounce and lack of enabling it back. This patch addresses above issues. Reported-by: Jean Delvare Fixes: 04ff5a095d66 ("pinctrl: baytrail: Rectify debounce support") Signed-off-by: Andy Shevchenko Reviewed-by: Jean Delvare Acked-by: Mika Westerberg Signed-off-by: Linus Walleij Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/pinctrl/intel/pinctrl-baytrail.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 583ae3f38fc0..5419de8e20b1 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -1250,10 +1250,12 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, debounce = readl(db_reg); debounce &= ~BYT_DEBOUNCE_PULSE_MASK; + if (arg) + conf |= BYT_DEBOUNCE_EN; + else + conf &= ~BYT_DEBOUNCE_EN; + switch (arg) { - case 0: - conf &= BYT_DEBOUNCE_EN; - break; case 375: debounce |= BYT_DEBOUNCE_PULSE_375US; break; @@ -1276,7 +1278,9 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev, debounce |= BYT_DEBOUNCE_PULSE_24MS; break; default: - ret = -EINVAL; + if (arg) + ret = -EINVAL; + break; } if (!ret) -- GitLab From d934fe02ba176b5e88d8b67d603cd94fc930f3ce Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Tue, 10 Jan 2017 09:44:54 -0200 Subject: [PATCH 079/786] cec: fix wrong last_la determination [ Upstream commit f9f96fc10c09ca16e336854c08bc1563eed97985 ] Due to an incorrect condition the last_la used for the initial attempt at claiming a logical address could be wrong. The last_la wasn't converted to a mask when ANDing with type2mask, so that test was broken. Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/staging/media/cec/cec-adap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/staging/media/cec/cec-adap.c index 611e07b78bfe..057c9b5ab1e5 100644 --- a/drivers/staging/media/cec/cec-adap.c +++ b/drivers/staging/media/cec/cec-adap.c @@ -1017,7 +1017,7 @@ static int cec_config_thread_func(void *arg) las->log_addr[i] = CEC_LOG_ADDR_INVALID; if (last_la == CEC_LOG_ADDR_INVALID || last_la == CEC_LOG_ADDR_UNREGISTERED || - !(last_la & type2mask[type])) + !((1 << last_la) & type2mask[type])) last_la = la_list[0]; err = cec_config_log_addr(adap, i, last_la); -- GitLab From 82b6693bd4153dc437255dabaa4f9d0d0f8c9da2 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 18 Dec 2016 14:35:45 +0100 Subject: [PATCH 080/786] drm: prevent double-(un)registration for connectors [ Upstream commit e73ab00e9a0f1731f34d0620a9c55f5c30c4ad4e ] If we're unlucky then the registration from a hotplugged connector might race with the final registration step on driver load. And since MST topology discover is asynchronous that's even somewhat likely. v2: Also update the kerneldoc for @registered! v3: Review from Chris: - Improve kerneldoc for late_register/early_unregister callbacks. - Use mutex_destroy. Reviewed-by: Chris Wilson Cc: Chris Wilson Reviewed-by: Sean Paul Reported-by: Chris Wilson Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/20161218133545.2106-1-daniel.vetter@ffwll.ch Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/drm_connector.c | 20 +++++++++++++++----- include/drm/drm_connector.h | 16 +++++++++++++++- 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 2db7fb510b6c..a103f1fcbdbf 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -225,6 +225,7 @@ int drm_connector_init(struct drm_device *dev, INIT_LIST_HEAD(&connector->probed_modes); INIT_LIST_HEAD(&connector->modes); + mutex_init(&connector->mutex); connector->edid_blob_ptr = NULL; connector->status = connector_status_unknown; @@ -359,6 +360,8 @@ void drm_connector_cleanup(struct drm_connector *connector) connector->funcs->atomic_destroy_state(connector, connector->state); + mutex_destroy(&connector->mutex); + memset(connector, 0, sizeof(*connector)); } EXPORT_SYMBOL(drm_connector_cleanup); @@ -374,14 +377,15 @@ EXPORT_SYMBOL(drm_connector_cleanup); */ int drm_connector_register(struct drm_connector *connector) { - int ret; + int ret = 0; + mutex_lock(&connector->mutex); if (connector->registered) - return 0; + goto unlock; ret = drm_sysfs_connector_add(connector); if (ret) - return ret; + goto unlock; ret = drm_debugfs_connector_add(connector); if (ret) { @@ -397,12 +401,14 @@ int drm_connector_register(struct drm_connector *connector) drm_mode_object_register(connector->dev, &connector->base); connector->registered = true; - return 0; + goto unlock; err_debugfs: drm_debugfs_connector_remove(connector); err_sysfs: drm_sysfs_connector_remove(connector); +unlock: + mutex_unlock(&connector->mutex); return ret; } EXPORT_SYMBOL(drm_connector_register); @@ -415,8 +421,11 @@ EXPORT_SYMBOL(drm_connector_register); */ void drm_connector_unregister(struct drm_connector *connector) { - if (!connector->registered) + mutex_lock(&connector->mutex); + if (!connector->registered) { + mutex_unlock(&connector->mutex); return; + } if (connector->funcs->early_unregister) connector->funcs->early_unregister(connector); @@ -425,6 +434,7 @@ void drm_connector_unregister(struct drm_connector *connector) drm_debugfs_connector_remove(connector); connector->registered = false; + mutex_unlock(&connector->mutex); } EXPORT_SYMBOL(drm_connector_unregister); diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index ac9d7d8e0e43..d8bb8d151825 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -345,6 +345,8 @@ struct drm_connector_funcs { * core drm connector interfaces. Everything added from this callback * should be unregistered in the early_unregister callback. * + * This is called while holding drm_connector->mutex. + * * Returns: * * 0 on success, or a negative error code on failure. @@ -359,6 +361,8 @@ struct drm_connector_funcs { * late_register(). It is called from drm_connector_unregister(), * early in the driver unload sequence to disable userspace access * before data structures are torndown. + * + * This is called while holding drm_connector->mutex. */ void (*early_unregister)(struct drm_connector *connector); @@ -511,7 +515,6 @@ struct drm_cmdline_mode { * @interlace_allowed: can this connector handle interlaced modes? * @doublescan_allowed: can this connector handle doublescan? * @stereo_allowed: can this connector handle stereo modes? - * @registered: is this connector exposed (registered) with userspace? * @modes: modes available on this connector (from fill_modes() + user) * @status: one of the drm_connector_status enums (connected, not, or unknown) * @probed_modes: list of modes derived directly from the display @@ -559,6 +562,13 @@ struct drm_connector { char *name; + /** + * @mutex: Lock for general connector state, but currently only protects + * @registered. Most of the connector state is still protected by the + * mutex in &drm_mode_config. + */ + struct mutex mutex; + /** * @index: Compacted connector index, which matches the position inside * the mode_config.list for drivers not supporting hot-add/removing. Can @@ -572,6 +582,10 @@ struct drm_connector { bool interlace_allowed; bool doublescan_allowed; bool stereo_allowed; + /** + * @registered: Is this connector exposed (registered) with userspace? + * Protected by @mutex. + */ bool registered; struct list_head modes; /* list of modes on this connector */ -- GitLab From 326fdffd7078257706e4126c256e144bd3859eff Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 12 Jan 2017 17:15:56 +0100 Subject: [PATCH 081/786] drm: Don't race connector registration [ Upstream commit e6e7b48b295afa5a5ab440de0a94d9ad8b3ce2d0 ] I was under the misconception that the sysfs dev stuff can be fully set up, and then registered all in one step with device_add. That's true for properties and property groups, but not for parents and child devices. Those must be fully registered before you can register a child. Add a bit of tracking to make sure that asynchronous mst connector hotplugging gets this right. For consistency we rely upon the implicit barriers of the connector->mutex, which is taken anyway, to ensure that at least either the connector or device registration call will work out. Mildly tested since I can't reliably reproduce this on my mst box here. Reported-by: Dave Hansen Cc: Dave Hansen Acked-by: Chris Wilson Cc: Chris Wilson Signed-off-by: Daniel Vetter Link: http://patchwork.freedesktop.org/patch/msgid/1484237756-2720-1-git-send-email-daniel.vetter@ffwll.ch Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/drm_connector.c | 3 +++ drivers/gpu/drm/drm_drv.c | 4 ++++ include/drm/drmP.h | 1 + 3 files changed, 8 insertions(+) diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index a103f1fcbdbf..0e934a9ac63c 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -379,6 +379,9 @@ int drm_connector_register(struct drm_connector *connector) { int ret = 0; + if (!connector->dev->registered) + return 0; + mutex_lock(&connector->mutex); if (connector->registered) goto unlock; diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 0f2fa9044668..362b8cd68a24 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -710,6 +710,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) if (ret) goto err_minors; + dev->registered = true; + if (dev->driver->load) { ret = dev->driver->load(dev, flags); if (ret) @@ -749,6 +751,8 @@ void drm_dev_unregister(struct drm_device *dev) drm_lastclose(dev); + dev->registered = false; + if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_modeset_unregister_all(dev); diff --git a/include/drm/drmP.h b/include/drm/drmP.h index e9fb2e802feb..0c4f9c67c221 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -776,6 +776,7 @@ struct drm_device { struct drm_minor *control; /**< Control node */ struct drm_minor *primary; /**< Primary node */ struct drm_minor *render; /**< Render node */ + bool registered; /* currently active master for this device. Protected by master_mutex */ struct drm_master *master; -- GitLab From a15bbf44ab2fbde580b00005b3c1922c4904d6ab Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Mon, 23 Jan 2017 15:15:32 +0800 Subject: [PATCH 082/786] pinctrl: berlin-bg4ct: fix the value for "sd1a" of pin SCRD0_CRD_PRES [ Upstream commit e82d02580af45663fad6d3596e4344c606e81e10 ] This should be a typo. Signed-off-by: Jisheng Zhang Signed-off-by: Linus Walleij Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/pinctrl/berlin/berlin-bg4ct.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c index 09172043d589..c617ec49e9ed 100644 --- a/drivers/pinctrl/berlin/berlin-bg4ct.c +++ b/drivers/pinctrl/berlin/berlin-bg4ct.c @@ -217,7 +217,7 @@ static const struct berlin_desc_group berlin4ct_soc_pinctrl_groups[] = { BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15, BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */ BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */ - BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */ + BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */ BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18, BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */ BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */ -- GitLab From e6549f36278cf03bfc8ecf7daa1ae77055398c13 Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Sat, 28 Jan 2017 01:07:30 +0300 Subject: [PATCH 083/786] net: adaptec: starfire: add checks for dma mapping errors [ Upstream commit d1156b489fa734d1af763d6a07b1637c01bb0aed ] init_ring(), refill_rx_ring() and start_tx() don't check if mapping dma memory succeed. The patch adds the checks and failure handling. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Alexey Khoroshilov Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/adaptec/starfire.c | 45 +++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index 8af2c88d5b33..45bb0fe50917 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -1153,6 +1153,12 @@ static void init_ring(struct net_device *dev) if (skb == NULL) break; np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(np->pci_dev, + np->rx_info[i].mapping)) { + dev_kfree_skb(skb); + np->rx_info[i].skb = NULL; + break; + } /* Grrr, we cannot offset to correctly align the IP header. */ np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); } @@ -1183,8 +1189,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); unsigned int entry; + unsigned int prev_tx; u32 status; - int i; + int i, j; /* * be cautious here, wrapping the queue has weird semantics @@ -1202,6 +1209,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) } #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ + prev_tx = np->cur_tx; entry = np->cur_tx % TX_RING_SIZE; for (i = 0; i < skb_num_frags(skb); i++) { int wrap_ring = 0; @@ -1235,6 +1243,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) skb_frag_size(this_frag), PCI_DMA_TODEVICE); } + if (pci_dma_mapping_error(np->pci_dev, + np->tx_info[entry].mapping)) { + dev->stats.tx_dropped++; + goto err_out; + } np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); np->tx_ring[entry].status = cpu_to_le32(status); @@ -1269,8 +1282,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); return NETDEV_TX_OK; -} +err_out: + entry = prev_tx % TX_RING_SIZE; + np->tx_info[entry].skb = NULL; + if (i > 0) { + pci_unmap_single(np->pci_dev, + np->tx_info[entry].mapping, + skb_first_frag_len(skb), + PCI_DMA_TODEVICE); + np->tx_info[entry].mapping = 0; + entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE; + for (j = 1; j < i; j++) { + pci_unmap_single(np->pci_dev, + np->tx_info[entry].mapping, + skb_frag_size( + &skb_shinfo(skb)->frags[j-1]), + PCI_DMA_TODEVICE); + entry++; + } + } + dev_kfree_skb_any(skb); + np->cur_tx = prev_tx; + return NETDEV_TX_OK; +} /* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ @@ -1570,6 +1605,12 @@ static void refill_rx_ring(struct net_device *dev) break; /* Better luck next round. */ np->rx_info[entry].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(np->pci_dev, + np->rx_info[entry].mapping)) { + dev_kfree_skb(skb); + np->rx_info[entry].skb = NULL; + break; + } np->rx_ring[entry].rxaddr = cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); } -- GitLab From 2bcbe747629e2b1ec7f162878d8969613a1c65c6 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 29 Jan 2017 13:50:06 -0800 Subject: [PATCH 084/786] drm/i915: Check for NULL i915_vma in intel_unpin_fb_obj() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 39cb2c9a316e77f6dfba96c543e55b6672d5a37e ] I've seen this trigger twice now, where the i915_gem_object_to_ggtt() call in intel_unpin_fb_obj() returns NULL, resulting in an oops immediately afterwards as the (inlined) call to i915_vma_unpin_fence() tries to dereference it. It seems to be some race condition where the object is going away at shutdown time, since both times happened when shutting down the X server. The call chains were different: - VT ioctl(KDSETMODE, KD_TEXT): intel_cleanup_plane_fb+0x5b/0xa0 [i915] drm_atomic_helper_cleanup_planes+0x6f/0x90 [drm_kms_helper] intel_atomic_commit_tail+0x749/0xfe0 [i915] intel_atomic_commit+0x3cb/0x4f0 [i915] drm_atomic_commit+0x4b/0x50 [drm] restore_fbdev_mode+0x14c/0x2a0 [drm_kms_helper] drm_fb_helper_restore_fbdev_mode_unlocked+0x34/0x80 [drm_kms_helper] drm_fb_helper_set_par+0x2d/0x60 [drm_kms_helper] intel_fbdev_set_par+0x18/0x70 [i915] fb_set_var+0x236/0x460 fbcon_blank+0x30f/0x350 do_unblank_screen+0xd2/0x1a0 vt_ioctl+0x507/0x12a0 tty_ioctl+0x355/0xc30 do_vfs_ioctl+0xa3/0x5e0 SyS_ioctl+0x79/0x90 entry_SYSCALL_64_fastpath+0x13/0x94 - i915 unpin_work workqueue: intel_unpin_work_fn+0x58/0x140 [i915] process_one_work+0x1f1/0x480 worker_thread+0x48/0x4d0 kthread+0x101/0x140 and this patch purely papers over the issue by adding a NULL pointer check and a WARN_ON_ONCE() to avoid the oops that would then generally make the machine unresponsive. Other callers of i915_gem_object_to_ggtt() seem to also check for the returned pointer being NULL and warn about it, so this clearly has happened before in other places. [ Reported it originally to the i915 developers on Jan 8, applying the ugly workaround on my own now after triggering the problem for the second time with no feedback. This is likely to be the same bug reported as https://bugs.freedesktop.org/show_bug.cgi?id=98829 https://bugs.freedesktop.org/show_bug.cgi?id=99134 which has a patch for the underlying problem, but it hasn't gotten to me, so I'm applying the workaround. ] Cc: Daniel Vetter Cc: Jani Nikula Cc: Ville Syrjälä Cc: Chris Wilson Cc: Maarten Lankhorst Cc: Tvrtko Ursulin Cc: Imre Deak Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/intel_display.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5dc6082639db..3673ab3aa991 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2253,6 +2253,9 @@ void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) intel_fill_fb_ggtt_view(&view, fb, rotation); vma = i915_gem_object_to_ggtt(obj, &view); + if (WARN_ON_ONCE(!vma)) + return; + i915_vma_unpin_fence(vma); i915_gem_object_unpin_from_display_plane(vma); } -- GitLab From b445ecbdff838e534077a62133f1e9b6354b3840 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Wed, 11 Jan 2017 19:39:42 +0200 Subject: [PATCH 085/786] net/mlx5: E-Switch, Err when retrieving steering name-space fails [ Upstream commit 5403dc703ff277f8a2a12a83ac820750485f13b3 ] Make sure to return error when we failed retrieving the FDB steering name space. Also, while around, correctly print the error when mode change revert fails in the warning message. Signed-off-by: Or Gerlitz Reported-by: Leon Romanovsky Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index d239f5d0ea36..d3f8840f10fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -414,6 +414,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); if (!root_ns) { esw_warn(dev, "Failed to get FDB flow namespace\n"); + err = -EOPNOTSUPP; goto ns_err; } @@ -639,7 +640,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw) esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err); err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); if (err1) - esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err); + esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1); } return err; } -- GitLab From 4038524f7fef3beb83d61c54b428a4fd3b2d4388 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 12 Jan 2017 13:04:01 +0200 Subject: [PATCH 086/786] net/mlx5: Return EOPNOTSUPP when failing to get steering name-space [ Upstream commit eff596da48784316ccb83bef82bc1213b512d5e0 ] When we fail to retrieve a hardware steering name-space, the returned error code should say that this operation is not supported. Align the various places in the driver where this call is made to this convention. Also, make sure to warn when we fail to retrieve a SW (ANCHOR) name-space. Signed-off-by: Or Gerlitz Reviewed-by: Matan Barak Signed-off-by: Saeed Mahameed Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 6 +++--- drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 36fbc6b21a33..8cd7227fbdfc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -1081,7 +1081,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) MLX5_FLOW_NAMESPACE_KERNEL); if (!priv->fs.ns) - return -EINVAL; + return -EOPNOTSUPP; err = mlx5e_arfs_create_tables(priv); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index c7011ef4e351..a8966e6dbe1b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -352,7 +352,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports) root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); if (!root_ns) { esw_warn(dev, "Failed to get FDB flow namespace\n"); - return -ENOMEM; + return -EOPNOTSUPP; } flow_group_in = mlx5_vzalloc(inlen); @@ -961,7 +961,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); if (!root_ns) { esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); - return -EIO; + return -EOPNOTSUPP; } flow_group_in = mlx5_vzalloc(inlen); @@ -1078,7 +1078,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); if (!root_ns) { esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); - return -EIO; + return -EOPNOTSUPP; } flow_group_in = mlx5_vzalloc(inlen); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index d3f8840f10fe..b08b9e2c6a76 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -521,7 +521,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw) ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); if (!ns) { esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); - return -ENOMEM; + return -EOPNOTSUPP; } ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 7e20e4bc4cc7..4de3c28b0547 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1678,7 +1678,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering) struct mlx5_flow_table *ft; ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR); - if (!ns) + if (WARN_ON(!ns)) return -EINVAL; ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL); if (IS_ERR(ft)) { -- GitLab From 2f970b437e86a13c13b958d5fefdfc256fa7266a Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Tue, 3 Jan 2017 22:55:50 +0100 Subject: [PATCH 087/786] parisc, parport_gsc: Fixes for printk continuation lines [ Upstream commit 83b5d1e3d3013dbf90645a5d07179d018c8243fa ] Signed-off-by: Helge Deller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/parport/parport_gsc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c index 6e3a60c78873..50f3bb0dd1f1 100644 --- a/drivers/parport/parport_gsc.c +++ b/drivers/parport/parport_gsc.c @@ -293,7 +293,7 @@ struct parport *parport_gsc_probe_port(unsigned long base, p->irq = PARPORT_IRQ_NONE; } if (p->irq != PARPORT_IRQ_NONE) { - printk(", irq %d", p->irq); + pr_cont(", irq %d", p->irq); if (p->dma == PARPORT_DMA_AUTO) { p->dma = PARPORT_DMA_NONE; @@ -303,8 +303,8 @@ struct parport *parport_gsc_probe_port(unsigned long base, is mandatory (see above) */ p->dma = PARPORT_DMA_NONE; - printk(" ["); -#define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}} + pr_cont(" ["); +#define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}} { int f = 0; printmode(PCSPP); @@ -315,7 +315,7 @@ struct parport *parport_gsc_probe_port(unsigned long base, // printmode(DMA); } #undef printmode - printk("]\n"); + pr_cont("]\n"); if (p->irq != PARPORT_IRQ_NONE) { if (request_irq (p->irq, parport_irq_handler, -- GitLab From 7dddbfcd96e265bc6e28d456d48e9fab4c57b232 Mon Sep 17 00:00:00 2001 From: Sean Nyekjaer Date: Fri, 27 Jan 2017 08:46:23 +0100 Subject: [PATCH 088/786] net: phy: micrel: add support for KSZ8795 [ Upstream commit 9d162ed69f51cbd9ee5a0c7e82aba7acc96362ff ] This is adds support for the PHYs in the KSZ8795 5port managed switch. It will allow to detect the link between the switch and the soc and uses the same read_status functions as the KSZ8873MLL switch. Signed-off-by: Sean Nyekjaer Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/phy/micrel.c | 14 ++++++++++++++ include/linux/micrel_phy.h | 2 ++ 2 files changed, 16 insertions(+) diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index ea92d524d5a8..fab56c9350cf 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -1014,6 +1014,20 @@ static struct phy_driver ksphy_driver[] = { .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, +}, { + .phy_id = PHY_ID_KSZ8795, + .phy_id_mask = MICREL_PHY_ID_MASK, + .name = "Micrel KSZ8795", + .features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause), + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .config_init = kszphy_config_init, + .config_aneg = ksz8873mll_config_aneg, + .read_status = ksz8873mll_read_status, + .get_sset_count = kszphy_get_sset_count, + .get_strings = kszphy_get_strings, + .get_stats = kszphy_get_stats, + .suspend = genphy_suspend, + .resume = genphy_resume, } }; module_phy_driver(ksphy_driver); diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h index 257173e0095e..f541da68d1e7 100644 --- a/include/linux/micrel_phy.h +++ b/include/linux/micrel_phy.h @@ -35,6 +35,8 @@ #define PHY_ID_KSZ886X 0x00221430 #define PHY_ID_KSZ8863 0x00221435 +#define PHY_ID_KSZ8795 0x00221550 + /* struct phy_device dev_flags definitions */ #define MICREL_PHY_50MHZ_CLK 0x00000001 #define MICREL_PHY_FXEN 0x00000002 -- GitLab From eb846414674ce1accb0e64fff88287f541422312 Mon Sep 17 00:00:00 2001 From: Andreas Schultz Date: Fri, 27 Jan 2017 10:40:56 +0100 Subject: [PATCH 089/786] gtp: add genl family modules alias [ Upstream commit ab729823ec16aef384f09fd2cffe0b3d3f6e6cba ] Auto-load the module when userspace asks for the gtp netlink family. Signed-off-by: Andreas Schultz Acked-by: Harald Welte Acked-by: Pablo Neira Ayuso Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/gtp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 97e0cbca0a08..cebde074d196 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -1372,3 +1372,4 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Harald Welte "); MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic"); MODULE_ALIAS_RTNL_LINK("gtp"); +MODULE_ALIAS_GENL_FAMILY("gtp"); -- GitLab From ef66745a1bd231a2c53639de27e4d6bcd00be4cc Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 9 Nov 2016 18:17:44 +0100 Subject: [PATCH 090/786] drm/nouveau: Intercept ACPI_VIDEO_NOTIFY_PROBE [ Upstream commit 3a6536c51d5db3adf58dcd466a3aee6233b58544 ] Various notebooks with nvidia GPUs generate an ACPI_VIDEO_NOTIFY_PROBE acpi-video event when an external device gets plugged in (and again on modesets on that connector), the default behavior in the acpi-video driver for this is to send a KEY_SWITCHVIDEOMODE evdev event, which causes e.g. gnome-settings-daemon to ask us to rescan the connectors (good), but also causes g-s-d to switch to mirror mode on a newly plugged monitor rather then using the monitor to extend the desktop (bad) as KEY_SWITCHVIDEOMODE is supposed to switch between extend the desktop vs mirror mode. More troublesome are the repeated ACPI_VIDEO_NOTIFY_PROBE events on changing the mode on the connector, which cause g-s-d to switch between mirror/extend mode, which causes a new ACPI_VIDEO_NOTIFY_PROBE event and we end up with an endless loop. This commit fixes this by adding an acpi notifier block handler to nouveau_display.c to intercept ACPI_VIDEO_NOTIFY_PROBE and: 1) Wake-up runtime suspended GPUs and call drm_helper_hpd_irq_event() on them, this is necessary in some cases for the GPU to detect connector hotplug events while runtime suspended 2) Return NOTIFY_BAD to stop acpi-video from emitting a bogus KEY_SWITCHVIDEOMODE key-press event There already is another acpi notifier block handler registered in drivers/gpu/drm/nouveau/nvkm/engine/device/acpi.c, but that is not suitable since that one gets unregistered on runtime suspend, and we also want to intercept ACPI_VIDEO_NOTIFY_PROBE when runtime suspended. Signed-off-by: Hans de Goede Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/nouveau/nouveau_display.c | 59 +++++++++++++++++++++++ drivers/gpu/drm/nouveau/nouveau_drv.h | 6 +++ 2 files changed, 65 insertions(+) diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index afbf557b23d4..a0be029886d0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -24,6 +24,7 @@ * */ +#include #include #include @@ -358,6 +359,55 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = { } \ } while(0) +#ifdef CONFIG_ACPI + +/* + * Hans de Goede: This define belongs in acpi/video.h, I've submitted a patch + * to the acpi subsys to move it there from drivers/acpi/acpi_video.c . + * This should be dropped once that is merged. + */ +#ifndef ACPI_VIDEO_NOTIFY_PROBE +#define ACPI_VIDEO_NOTIFY_PROBE 0x81 +#endif + +static void +nouveau_display_acpi_work(struct work_struct *work) +{ + struct nouveau_drm *drm = container_of(work, typeof(*drm), acpi_work); + + pm_runtime_get_sync(drm->dev->dev); + + drm_helper_hpd_irq_event(drm->dev); + + pm_runtime_mark_last_busy(drm->dev->dev); + pm_runtime_put_sync(drm->dev->dev); +} + +static int +nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb); + struct acpi_bus_event *info = data; + + if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) { + if (info->type == ACPI_VIDEO_NOTIFY_PROBE) { + /* + * This may be the only indication we receive of a + * connector hotplug on a runtime suspended GPU, + * schedule acpi_work to check. + */ + schedule_work(&drm->acpi_work); + + /* acpi-video should not generate keypresses for this */ + return NOTIFY_BAD; + } + } + + return NOTIFY_DONE; +} +#endif + int nouveau_display_init(struct drm_device *dev) { @@ -537,6 +587,12 @@ nouveau_display_create(struct drm_device *dev) } nouveau_backlight_init(dev); +#ifdef CONFIG_ACPI + INIT_WORK(&drm->acpi_work, nouveau_display_acpi_work); + drm->acpi_nb.notifier_call = nouveau_display_acpi_ntfy; + register_acpi_notifier(&drm->acpi_nb); +#endif + return 0; vblank_err: @@ -552,6 +608,9 @@ nouveau_display_destroy(struct drm_device *dev) { struct nouveau_display *disp = nouveau_display(dev); +#ifdef CONFIG_ACPI + unregister_acpi_notifier(&nouveau_drm(dev)->acpi_nb); +#endif nouveau_backlight_exit(dev); nouveau_display_vblank_fini(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 822a0212cd48..71d45324bd78 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -37,6 +37,8 @@ * - implemented limited ABI16/NVIF interop */ +#include + #include #include #include @@ -161,6 +163,10 @@ struct nouveau_drm { struct nvbios vbios; struct nouveau_display *display; struct backlight_device *backlight; +#ifdef CONFIG_ACPI + struct notifier_block acpi_nb; + struct work_struct acpi_work; +#endif /* power management */ struct nouveau_hwmon *hwmon; -- GitLab From d2beb1a9dd824bcf442e025cbf7fd1e3b4cf8db2 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 21 Nov 2016 17:50:54 +0100 Subject: [PATCH 091/786] drm/nouveau: Rename acpi_work to hpd_work [ Upstream commit 81280d0e24e76c35f40f997af26c779bcb10b04d ] We need to call drm_helper_hpd_irq_event() on resume to properly detect monitor connection / disconnection on some laptops. For runtime-resume (which gets called on resume from normal suspend too) we must call drm_helper_hpd_irq_event() from a workqueue to avoid a deadlock. Rename acpi_work to hpd_work, and move it out of the #ifdef CONFIG_ACPI blocks to make it suitable for generic work. Signed-off-by: Hans de Goede Signed-off-by: Ben Skeggs Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/nouveau/nouveau_display.c | 32 +++++++++++------------ drivers/gpu/drm/nouveau/nouveau_drv.h | 2 +- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index a0be029886d0..3cd2b8a7e530 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -359,21 +359,10 @@ static struct nouveau_drm_prop_enum_list dither_depth[] = { } \ } while(0) -#ifdef CONFIG_ACPI - -/* - * Hans de Goede: This define belongs in acpi/video.h, I've submitted a patch - * to the acpi subsys to move it there from drivers/acpi/acpi_video.c . - * This should be dropped once that is merged. - */ -#ifndef ACPI_VIDEO_NOTIFY_PROBE -#define ACPI_VIDEO_NOTIFY_PROBE 0x81 -#endif - static void -nouveau_display_acpi_work(struct work_struct *work) +nouveau_display_hpd_work(struct work_struct *work) { - struct nouveau_drm *drm = container_of(work, typeof(*drm), acpi_work); + struct nouveau_drm *drm = container_of(work, typeof(*drm), hpd_work); pm_runtime_get_sync(drm->dev->dev); @@ -383,6 +372,17 @@ nouveau_display_acpi_work(struct work_struct *work) pm_runtime_put_sync(drm->dev->dev); } +#ifdef CONFIG_ACPI + +/* + * Hans de Goede: This define belongs in acpi/video.h, I've submitted a patch + * to the acpi subsys to move it there from drivers/acpi/acpi_video.c . + * This should be dropped once that is merged. + */ +#ifndef ACPI_VIDEO_NOTIFY_PROBE +#define ACPI_VIDEO_NOTIFY_PROBE 0x81 +#endif + static int nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val, void *data) @@ -395,9 +395,9 @@ nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val, /* * This may be the only indication we receive of a * connector hotplug on a runtime suspended GPU, - * schedule acpi_work to check. + * schedule hpd_work to check. */ - schedule_work(&drm->acpi_work); + schedule_work(&drm->hpd_work); /* acpi-video should not generate keypresses for this */ return NOTIFY_BAD; @@ -587,8 +587,8 @@ nouveau_display_create(struct drm_device *dev) } nouveau_backlight_init(dev); + INIT_WORK(&drm->hpd_work, nouveau_display_hpd_work); #ifdef CONFIG_ACPI - INIT_WORK(&drm->acpi_work, nouveau_display_acpi_work); drm->acpi_nb.notifier_call = nouveau_display_acpi_ntfy; register_acpi_notifier(&drm->acpi_nb); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 71d45324bd78..0c17ca1f5757 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -163,9 +163,9 @@ struct nouveau_drm { struct nvbios vbios; struct nouveau_display *display; struct backlight_device *backlight; + struct work_struct hpd_work; #ifdef CONFIG_ACPI struct notifier_block acpi_nb; - struct work_struct acpi_work; #endif /* power management */ -- GitLab From c7a29cf6c34a7873eb8e92b9a60b1a863ea72f6d Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Wed, 11 Jan 2017 21:25:24 -0500 Subject: [PATCH 092/786] drm/nouveau: Handle fbcon suspend/resume in seperate worker [ Upstream commit 15266ae38fe09dae07bd8812cb7a7717b1e1d992 ] Resuming from RPM can happen while already holding dev->mode_config.mutex. This means we can't actually handle fbcon in any RPM resume workers, since restoring fbcon requires grabbing dev->mode_config.mutex again. So move the fbcon suspend/resume code into it's own worker, and rely on that instead to avoid deadlocking. This fixes more deadlocks for runtime suspending the GPU on the ThinkPad W541. Reproduction recipe: - Get a machine with both optimus and a nvidia card with connectors attached to it - Wait for the nvidia GPU to suspend - Attempt to manually reprobe any of the connectors on the nvidia GPU using sysfs - *deadlock* [airlied: use READ_ONCE to address Hans's comment] Signed-off-by: Lyude Cc: Hans de Goede Cc: Kilian Singer Cc: Lukas Wunner Cc: David Airlie Signed-off-by: Dave Airlie Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/nouveau/nouveau_drv.h | 2 ++ drivers/gpu/drm/nouveau/nouveau_fbcon.c | 43 +++++++++++++++++++------ 2 files changed, 36 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 0c17ca1f5757..1e7f1e326b3c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -164,6 +164,8 @@ struct nouveau_drm { struct nouveau_display *display; struct backlight_device *backlight; struct work_struct hpd_work; + struct work_struct fbcon_work; + int fbcon_new_state; #ifdef CONFIG_ACPI struct notifier_block acpi_nb; #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 9f5692726c16..2b79e27dd89c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -491,19 +491,43 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { .fb_probe = nouveau_fbcon_create, }; +static void +nouveau_fbcon_set_suspend_work(struct work_struct *work) +{ + struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work); + int state = READ_ONCE(drm->fbcon_new_state); + + if (state == FBINFO_STATE_RUNNING) + pm_runtime_get_sync(drm->dev->dev); + + console_lock(); + if (state == FBINFO_STATE_RUNNING) + nouveau_fbcon_accel_restore(drm->dev); + drm_fb_helper_set_suspend(&drm->fbcon->helper, state); + if (state != FBINFO_STATE_RUNNING) + nouveau_fbcon_accel_save_disable(drm->dev); + console_unlock(); + + if (state == FBINFO_STATE_RUNNING) { + pm_runtime_mark_last_busy(drm->dev->dev); + pm_runtime_put_sync(drm->dev->dev); + } +} + void nouveau_fbcon_set_suspend(struct drm_device *dev, int state) { struct nouveau_drm *drm = nouveau_drm(dev); - if (drm->fbcon) { - console_lock(); - if (state == FBINFO_STATE_RUNNING) - nouveau_fbcon_accel_restore(dev); - drm_fb_helper_set_suspend(&drm->fbcon->helper, state); - if (state != FBINFO_STATE_RUNNING) - nouveau_fbcon_accel_save_disable(dev); - console_unlock(); - } + + if (!drm->fbcon) + return; + + drm->fbcon_new_state = state; + /* Since runtime resume can happen as a result of a sysfs operation, + * it's possible we already have the console locked. So handle fbcon + * init/deinit from a seperate work thread + */ + schedule_work(&drm->fbcon_work); } int @@ -524,6 +548,7 @@ nouveau_fbcon_init(struct drm_device *dev) fbcon->dev = dev; drm->fbcon = fbcon; + INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work); drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs); -- GitLab From c94e2edacea7f3f3567e14d5cb8bee58ffc536d9 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Wed, 11 Jan 2017 21:25:23 -0500 Subject: [PATCH 093/786] drm/nouveau: Don't enabling polling twice on runtime resume [ Upstream commit cae9ff036eea577856d5b12860b4c79c5e71db4a ] As it turns out, on cards that actually have CRTCs on them we're already calling drm_kms_helper_poll_enable(drm_dev) from nouveau_display_resume() before we call it in nouveau_pmops_runtime_resume(). This leads us to accidentally trying to enable polling twice, which results in a potential deadlock between the RPM locks and drm_dev->mode_config.mutex if we end up trying to enable polling the second time while output_poll_execute is running and holding the mode_config lock. As such, make sure we only enable polling in nouveau_pmops_runtime_resume() if we need to. This fixes hangs observed on the ThinkPad W541 Signed-off-by: Lyude Cc: Hans de Goede Cc: Kilian Singer Cc: Lukas Wunner Cc: David Airlie Signed-off-by: Dave Airlie Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/nouveau/nouveau_display.c | 3 ++- drivers/gpu/drm/nouveau/nouveau_drm.c | 5 ++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 3cd2b8a7e530..73c8c57b04c9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -421,7 +421,8 @@ nouveau_display_init(struct drm_device *dev) return ret; /* enable polling for external displays */ - drm_kms_helper_poll_enable(dev); + if (!dev->mode_config.poll_enabled) + drm_kms_helper_poll_enable(dev); /* enable hotplug interrupts */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 3100fd88a015..8b5068ca3329 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -761,7 +761,10 @@ nouveau_pmops_runtime_resume(struct device *dev) pci_set_master(pdev); ret = nouveau_do_resume(drm_dev, true); - drm_kms_helper_poll_enable(drm_dev); + + if (!drm_dev->mode_config.poll_enabled) + drm_kms_helper_poll_enable(drm_dev); + /* do magic */ nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); -- GitLab From bfa4d2e461dab77476f4570bf9ac0213b6cbe484 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Mon, 15 May 2017 12:04:31 +0300 Subject: [PATCH 094/786] drm/nouveau: Fix drm poll_helper handling [ Upstream commit 9a2eba337cacefc95b97c2726e3efdd435b3460e ] Commit cae9ff036eea effectively disabled the drm poll_helper by checking the wrong flag to see if the driver should enable the poll or not: mode_config.poll_enabled is only set to true by poll_init and it is not indicating if the poll is enabled or not. nouveau_display_create() will initialize the poll and going to disable it right away. After poll_init() the mode_config.poll_enabled will be true, but the poll itself is disabled. To avoid the race caused by calling the poll_enable() from different paths, this patch will enable the poll from one place, in the nouveau_display_hpd_work(). In case the pm_runtime is disabled we will enable the poll in nouveau_drm_load() once. Fixes: cae9ff036eea ("drm/nouveau: Don't enabling polling twice on runtime resume") Signed-off-by: Peter Ujfalusi Reviewed-by: Lyude Signed-off-by: Ben Skeggs Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/nouveau/nouveau_display.c | 6 ++---- drivers/gpu/drm/nouveau/nouveau_drm.c | 6 +++--- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 73c8c57b04c9..2c2b86d68129 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -367,6 +367,8 @@ nouveau_display_hpd_work(struct work_struct *work) pm_runtime_get_sync(drm->dev->dev); drm_helper_hpd_irq_event(drm->dev); + /* enable polling for external displays */ + drm_kms_helper_poll_enable(drm->dev); pm_runtime_mark_last_busy(drm->dev->dev); pm_runtime_put_sync(drm->dev->dev); @@ -420,10 +422,6 @@ nouveau_display_init(struct drm_device *dev) if (ret) return ret; - /* enable polling for external displays */ - if (!dev->mode_config.poll_enabled) - drm_kms_helper_poll_enable(dev); - /* enable hotplug interrupts */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct nouveau_connector *conn = nouveau_connector(connector); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 8b5068ca3329..42829a942e33 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -483,6 +483,9 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) pm_runtime_allow(dev->dev); pm_runtime_mark_last_busy(dev->dev); pm_runtime_put(dev->dev); + } else { + /* enable polling for external displays */ + drm_kms_helper_poll_enable(dev); } return 0; @@ -762,9 +765,6 @@ nouveau_pmops_runtime_resume(struct device *dev) ret = nouveau_do_resume(drm_dev, true); - if (!drm_dev->mode_config.poll_enabled) - drm_kms_helper_poll_enable(drm_dev); - /* do magic */ nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25)); vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON); -- GitLab From 47c362f147aaee88c7da3fdce6e99212376bbe44 Mon Sep 17 00:00:00 2001 From: "Y.C. Chen" Date: Thu, 26 Jan 2017 09:45:40 +0800 Subject: [PATCH 095/786] drm/ast: Fixed system hanged if disable P2A [ Upstream commit 6c971c09f38704513c426ba6515f22fb3d6c87d5 ] The original ast driver will access some BMC configuration through P2A bridge that can be disabled since AST2300 and after. It will cause system hanged if P2A bridge is disabled. Here is the update to fix it. Signed-off-by: Y.C. Chen Signed-off-by: Dave Airlie Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/ast/ast_drv.h | 1 + drivers/gpu/drm/ast/ast_main.c | 157 +++++++++++++++++---------------- drivers/gpu/drm/ast/ast_post.c | 18 ++-- 3 files changed, 97 insertions(+), 79 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 908011d2c8f5..7abda94fc2cf 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -113,6 +113,7 @@ struct ast_private { struct ttm_bo_kmap_obj cache_kmap; int next_cursor; bool support_wide_screen; + bool DisableP2A; enum ast_tx_chip tx_chip_type; u8 dp501_maxclk; diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index f75c6421db62..533e762d036d 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) } else *need_post = false; + /* Check P2A Access */ + ast->DisableP2A = true; + data = ast_read32(ast, 0xf004); + if (data != 0xFFFFFFFF) + ast->DisableP2A = false; + /* Check if we support wide screen */ switch (ast->chip) { case AST1180: @@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) ast->support_wide_screen = true; else { ast->support_wide_screen = false; - /* Read SCU7c (silicon revision register) */ - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - data = ast_read32(ast, 0x1207c); - data &= 0x300; - if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ - ast->support_wide_screen = true; - if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ - ast->support_wide_screen = true; + if (ast->DisableP2A == false) { + /* Read SCU7c (silicon revision register) */ + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + data = ast_read32(ast, 0x1207c); + data &= 0x300; + if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ + ast->support_wide_screen = true; + if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ + ast->support_wide_screen = true; + } } break; } @@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev) uint32_t data, data2; uint32_t denum, num, div, ref_pll; - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - - - ast_write32(ast, 0x10000, 0xfc600309); - - do { - if (pci_channel_offline(dev->pdev)) - return -EIO; - } while (ast_read32(ast, 0x10000) != 0x01); - data = ast_read32(ast, 0x10004); - - if (data & 0x40) + if (ast->DisableP2A) + { ast->dram_bus_width = 16; + ast->dram_type = AST_DRAM_1Gx16; + ast->mclk = 396; + } else - ast->dram_bus_width = 32; + { + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + data = ast_read32(ast, 0x10004); + + if (data & 0x40) + ast->dram_bus_width = 16; + else + ast->dram_bus_width = 32; + + if (ast->chip == AST2300 || ast->chip == AST2400) { + switch (data & 0x03) { + case 0: + ast->dram_type = AST_DRAM_512Mx16; + break; + default: + case 1: + ast->dram_type = AST_DRAM_1Gx16; + break; + case 2: + ast->dram_type = AST_DRAM_2Gx16; + break; + case 3: + ast->dram_type = AST_DRAM_4Gx16; + break; + } + } else { + switch (data & 0x0c) { + case 0: + case 4: + ast->dram_type = AST_DRAM_512Mx16; + break; + case 8: + if (data & 0x40) + ast->dram_type = AST_DRAM_1Gx16; + else + ast->dram_type = AST_DRAM_512Mx32; + break; + case 0xc: + ast->dram_type = AST_DRAM_1Gx32; + break; + } + } - if (ast->chip == AST2300 || ast->chip == AST2400) { - switch (data & 0x03) { - case 0: - ast->dram_type = AST_DRAM_512Mx16; - break; - default: - case 1: - ast->dram_type = AST_DRAM_1Gx16; - break; - case 2: - ast->dram_type = AST_DRAM_2Gx16; - break; + data = ast_read32(ast, 0x10120); + data2 = ast_read32(ast, 0x10170); + if (data2 & 0x2000) + ref_pll = 14318; + else + ref_pll = 12000; + + denum = data & 0x1f; + num = (data & 0x3fe0) >> 5; + data = (data & 0xc000) >> 14; + switch (data) { case 3: - ast->dram_type = AST_DRAM_4Gx16; - break; - } - } else { - switch (data & 0x0c) { - case 0: - case 4: - ast->dram_type = AST_DRAM_512Mx16; + div = 0x4; break; - case 8: - if (data & 0x40) - ast->dram_type = AST_DRAM_1Gx16; - else - ast->dram_type = AST_DRAM_512Mx32; + case 2: + case 1: + div = 0x2; break; - case 0xc: - ast->dram_type = AST_DRAM_1Gx32; + default: + div = 0x1; break; } + ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); } - - data = ast_read32(ast, 0x10120); - data2 = ast_read32(ast, 0x10170); - if (data2 & 0x2000) - ref_pll = 14318; - else - ref_pll = 12000; - - denum = data & 0x1f; - num = (data & 0x3fe0) >> 5; - data = (data & 0xc000) >> 14; - switch (data) { - case 3: - div = 0x4; - break; - case 2: - case 1: - div = 0x2; - break; - default: - div = 0x1; - break; - } - ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); return 0; } diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 30672a3df8a9..270e8fb2803f 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -375,12 +375,20 @@ void ast_post_gpu(struct drm_device *dev) ast_enable_mmio(dev); ast_set_def_ext_reg(dev); - if (ast->chip == AST2300 || ast->chip == AST2400) - ast_init_dram_2300(dev); - else - ast_init_dram_reg(dev); + if (ast->DisableP2A == false) + { + if (ast->chip == AST2300 || ast->chip == AST2400) + ast_init_dram_2300(dev); + else + ast_init_dram_reg(dev); - ast_init_3rdtx(dev); + ast_init_3rdtx(dev); + } + else + { + if (ast->tx_chip_type != AST_TX_NONE) + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ + } } /* AST 2300 DRAM settings */ -- GitLab From d1626825968346dc975640cff8f823a5fc92a6c6 Mon Sep 17 00:00:00 2001 From: Kazuya Mizuguchi Date: Thu, 26 Jan 2017 14:29:27 +0100 Subject: [PATCH 096/786] ravb: unmap descriptors when freeing rings [ Upstream commit a47b70ea86bdeb3091341f5ae3ef580f1a1ad822 ] "swiotlb buffer is full" errors occur after repeated initialisation of a device - f.e. suspend/resume or ip link set up/down. This is because memory mapped using dma_map_single() in ravb_ring_format() and ravb_start_xmit() is not released. Resolve this problem by unmapping descriptors when freeing rings. Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper") Signed-off-by: Kazuya Mizuguchi [simon: reworked] Signed-off-by: Simon Horman Acked-by: Sergei Shtylyov Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/renesas/ravb_main.c | 112 +++++++++++++---------- 1 file changed, 64 insertions(+), 48 deletions(-) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 862f18ed6022..510ff62584d6 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -179,6 +179,49 @@ static struct mdiobb_ops bb_ops = { .get_mdio_data = ravb_get_mdio_data, }; +/* Free TX skb function for AVB-IP */ +static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct net_device_stats *stats = &priv->stats[q]; + struct ravb_tx_desc *desc; + int free_num = 0; + int entry; + u32 size; + + for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { + bool txed; + + entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * + NUM_TX_DESC); + desc = &priv->tx_ring[q][entry]; + txed = desc->die_dt == DT_FEMPTY; + if (free_txed_only && !txed) + break; + /* Descriptor type must be checked before all other reads */ + dma_rmb(); + size = le16_to_cpu(desc->ds_tagl) & TX_DS; + /* Free the original skb. */ + if (priv->tx_skb[q][entry / NUM_TX_DESC]) { + dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), + size, DMA_TO_DEVICE); + /* Last packet descriptor? */ + if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { + entry /= NUM_TX_DESC; + dev_kfree_skb_any(priv->tx_skb[q][entry]); + priv->tx_skb[q][entry] = NULL; + if (txed) + stats->tx_packets++; + } + free_num++; + } + if (txed) + stats->tx_bytes += size; + desc->die_dt = DT_EEMPTY; + } + return free_num; +} + /* Free skb's and DMA buffers for Ethernet AVB */ static void ravb_ring_free(struct net_device *ndev, int q) { @@ -194,19 +237,21 @@ static void ravb_ring_free(struct net_device *ndev, int q) kfree(priv->rx_skb[q]); priv->rx_skb[q] = NULL; - /* Free TX skb ringbuffer */ - if (priv->tx_skb[q]) { - for (i = 0; i < priv->num_tx_ring[q]; i++) - dev_kfree_skb(priv->tx_skb[q][i]); - } - kfree(priv->tx_skb[q]); - priv->tx_skb[q] = NULL; - /* Free aligned TX buffers */ kfree(priv->tx_align[q]); priv->tx_align[q] = NULL; if (priv->rx_ring[q]) { + for (i = 0; i < priv->num_rx_ring[q]; i++) { + struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; + + if (!dma_mapping_error(ndev->dev.parent, + le32_to_cpu(desc->dptr))) + dma_unmap_single(ndev->dev.parent, + le32_to_cpu(desc->dptr), + PKT_BUF_SZ, + DMA_FROM_DEVICE); + } ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], @@ -215,12 +260,20 @@ static void ravb_ring_free(struct net_device *ndev, int q) } if (priv->tx_ring[q]) { + ravb_tx_free(ndev, q, false); + ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] * NUM_TX_DESC + 1); dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], priv->tx_desc_dma[q]); priv->tx_ring[q] = NULL; } + + /* Free TX skb ringbuffer. + * SKBs are freed by ravb_tx_free() call above. + */ + kfree(priv->tx_skb[q]); + priv->tx_skb[q] = NULL; } /* Format skb and descriptor buffer for Ethernet AVB */ @@ -431,44 +484,6 @@ static int ravb_dmac_init(struct net_device *ndev) return 0; } -/* Free TX skb function for AVB-IP */ -static int ravb_tx_free(struct net_device *ndev, int q) -{ - struct ravb_private *priv = netdev_priv(ndev); - struct net_device_stats *stats = &priv->stats[q]; - struct ravb_tx_desc *desc; - int free_num = 0; - int entry; - u32 size; - - for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { - entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * - NUM_TX_DESC); - desc = &priv->tx_ring[q][entry]; - if (desc->die_dt != DT_FEMPTY) - break; - /* Descriptor type must be checked before all other reads */ - dma_rmb(); - size = le16_to_cpu(desc->ds_tagl) & TX_DS; - /* Free the original skb. */ - if (priv->tx_skb[q][entry / NUM_TX_DESC]) { - dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), - size, DMA_TO_DEVICE); - /* Last packet descriptor? */ - if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) { - entry /= NUM_TX_DESC; - dev_kfree_skb_any(priv->tx_skb[q][entry]); - priv->tx_skb[q][entry] = NULL; - stats->tx_packets++; - } - free_num++; - } - stats->tx_bytes += size; - desc->die_dt = DT_EEMPTY; - } - return free_num; -} - static void ravb_get_tx_tstamp(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); @@ -902,7 +917,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) spin_lock_irqsave(&priv->lock, flags); /* Clear TX interrupt */ ravb_write(ndev, ~mask, TIS); - ravb_tx_free(ndev, q); + ravb_tx_free(ndev, q, true); netif_wake_subqueue(ndev, q); mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); @@ -1571,7 +1586,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) priv->cur_tx[q] += NUM_TX_DESC; if (priv->cur_tx[q] - priv->dirty_tx[q] > - (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q)) + (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && + !ravb_tx_free(ndev, q, true)) netif_stop_subqueue(ndev, q); exit: -- GitLab From 5a0d41409b4dbfb50243ddee7a8b62ba838c5295 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Thu, 26 Jan 2017 15:14:52 -0500 Subject: [PATCH 097/786] nfs: Fix "Don't increment lock sequence ID after NFS4ERR_MOVED" [ Upstream commit 406dab8450ec76eca88a1af2fc15d18a2b36ca49 ] Lock sequence IDs are bumped in decode_lock by calling nfs_increment_seqid(). nfs_increment_sequid() does not use the seqid_mutating_err() function fixed in commit 059aa7348241 ("Don't increment lock sequence ID after NFS4ERR_MOVED"). Fixes: 059aa7348241 ("Don't increment lock sequence ID after ...") Signed-off-by: Chuck Lever Tested-by: Xuan Qi Cc: stable@vger.kernel.org # v3.7+ Signed-off-by: Trond Myklebust Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/nfs/nfs4state.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 0959c9661662..92671914067f 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1079,6 +1079,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) case -NFS4ERR_BADXDR: case -NFS4ERR_RESOURCE: case -NFS4ERR_NOFILEHANDLE: + case -NFS4ERR_MOVED: /* Non-seqid mutating errors */ return; }; -- GitLab From 6149abe7f4042e5720544d4e5675d7a45a827ce7 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Thu, 19 Jan 2017 09:55:08 -0600 Subject: [PATCH 098/786] nvmet-rdma: Fix missing dma sync to nvme data structures [ Upstream commit 748ff8408f8e208f279ba221e5c12612fbb4dddb ] This patch performs dma sync operations on nvme_command and nvme_completion. nvme_command is synced (a) on receiving of the recv queue completion for cpu access. (b) before posting recv wqe back to rdma adapter for device access. nvme_completion is synced (a) on receiving of the recv queue completion of associated nvme_command for cpu access. (b) before posting send wqe to rdma adapter for device access. This patch is generated for git://git.infradead.org/nvme-fabrics.git Branch: nvmf-4.10 Signed-off-by: Parav Pandit Reviewed-by: Max Gurtovoy Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/nvme/target/rdma.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 005ef5d17a19..ca8ddc3fb19e 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -438,6 +438,10 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, { struct ib_recv_wr *bad_wr; + ib_dma_sync_single_for_device(ndev->device, + cmd->sge[0].addr, cmd->sge[0].length, + DMA_FROM_DEVICE); + if (ndev->srq) return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr); return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr); @@ -538,6 +542,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req) first_wr = &rsp->send_wr; nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); + + ib_dma_sync_single_for_device(rsp->queue->dev->device, + rsp->send_sge.addr, rsp->send_sge.length, + DMA_TO_DEVICE); + if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) { pr_err("sending cmd response failed\n"); nvmet_rdma_release_rsp(rsp); @@ -698,6 +707,14 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, cmd->n_rdma = 0; cmd->req.port = queue->port; + + ib_dma_sync_single_for_cpu(queue->dev->device, + cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, + DMA_FROM_DEVICE); + ib_dma_sync_single_for_cpu(queue->dev->device, + cmd->send_sge.addr, cmd->send_sge.length, + DMA_TO_DEVICE); + if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, &queue->nvme_sq, &nvmet_rdma_ops)) return; -- GitLab From 993ba7ffcf6e0aa5e4b3ed193a99fc8f4e073325 Mon Sep 17 00:00:00 2001 From: hayeswang Date: Thu, 26 Jan 2017 09:38:31 +0800 Subject: [PATCH 099/786] r8152: avoid start_xmit to call napi_schedule during autosuspend [ Upstream commit 26afec39306926654e9cd320f19bbf3685bb0997 ] Adjust the setting of the flag of SELECTIVE_SUSPEND to prevent start_xmit() from calling napi_schedule() directly during runtime suspend. After calling napi_disable() or clearing the flag of WORK_ENABLE, scheduling the napi is useless. Signed-off-by: Hayes Wang Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/r8152.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 90b426c5ffce..92c53d64fdc2 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -3583,10 +3583,15 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp) struct net_device *netdev = tp->netdev; int ret = 0; + set_bit(SELECTIVE_SUSPEND, &tp->flags); + smp_mb__after_atomic(); + if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) { u32 rcr = 0; if (delay_autosuspend(tp)) { + clear_bit(SELECTIVE_SUSPEND, &tp->flags); + smp_mb__after_atomic(); ret = -EBUSY; goto out1; } @@ -3603,6 +3608,8 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp) if (!(ocp_data & RXFIFO_EMPTY)) { rxdy_gated_en(tp, false); ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr); + clear_bit(SELECTIVE_SUSPEND, &tp->flags); + smp_mb__after_atomic(); ret = -EBUSY; goto out1; } @@ -3622,8 +3629,6 @@ static int rtl8152_rumtime_suspend(struct r8152 *tp) } } - set_bit(SELECTIVE_SUSPEND, &tp->flags); - out1: return ret; } @@ -3679,12 +3684,13 @@ static int rtl8152_resume(struct usb_interface *intf) if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) { if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { tp->rtl_ops.autosuspend_en(tp, false); - clear_bit(SELECTIVE_SUSPEND, &tp->flags); napi_disable(&tp->napi); set_bit(WORK_ENABLE, &tp->flags); if (netif_carrier_ok(tp->netdev)) rtl_start_rx(tp); napi_enable(&tp->napi); + clear_bit(SELECTIVE_SUSPEND, &tp->flags); + smp_mb__after_atomic(); } else { tp->rtl_ops.up(tp); netif_carrier_off(tp->netdev); -- GitLab From 4242f0bce47e744d28b3c98c34f5c792fefb62ef Mon Sep 17 00:00:00 2001 From: hayeswang Date: Thu, 26 Jan 2017 09:38:34 +0800 Subject: [PATCH 100/786] r8152: check rx after napi is enabled [ Upstream commit 7489bdadb7d17d3c81e39b85688500f700beb790 ] Schedule the napi after napi_enable() for rx, if it is necessary. If the rx is completed when napi is disabled, the sheduling of napi would be lost. Then, no one handles the rx packet until next napi is scheduled. Signed-off-by: Hayes Wang Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/r8152.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 92c53d64fdc2..32f1a4c46e71 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -32,7 +32,7 @@ #define NETNEXT_VERSION "08" /* Information for net */ -#define NET_VERSION "7" +#define NET_VERSION "8" #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_AUTHOR "Realtek linux nic maintainers " @@ -3552,6 +3552,9 @@ static int rtl8152_post_reset(struct usb_interface *intf) napi_enable(&tp->napi); + if (!list_empty(&tp->rx_done)) + napi_schedule(&tp->napi); + return 0; } @@ -3691,6 +3694,8 @@ static int rtl8152_resume(struct usb_interface *intf) napi_enable(&tp->napi); clear_bit(SELECTIVE_SUSPEND, &tp->flags); smp_mb__after_atomic(); + if (!list_empty(&tp->rx_done)) + napi_schedule(&tp->napi); } else { tp->rtl_ops.up(tp); netif_carrier_off(tp->netdev); -- GitLab From 9507910ca46d5b549198a6bf1787e3db5b3a651b Mon Sep 17 00:00:00 2001 From: hayeswang Date: Thu, 26 Jan 2017 09:38:33 +0800 Subject: [PATCH 101/786] r8152: re-schedule napi for tx [ Upstream commit 248b213ad908b88db15941202ef7cb7eb137c1a0 ] Re-schedule napi after napi_complete() for tx, if it is necessay. In r8152_poll(), if the tx is completed after tx_bottom() and before napi_complete(), the scheduling of napi would be lost. Then, no one handles the next tx until the next napi_schedule() is called. Signed-off-by: Hayes Wang Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/r8152.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 32f1a4c46e71..8b8343b3fc39 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1936,6 +1936,9 @@ static int r8152_poll(struct napi_struct *napi, int budget) napi_complete(napi); if (!list_empty(&tp->rx_done)) napi_schedule(napi); + else if (!skb_queue_empty(&tp->tx_queue) && + !list_empty(&tp->tx_free)) + napi_schedule(napi); } return work_done; -- GitLab From c1a7106b3b0149c263998d801619f3023af03234 Mon Sep 17 00:00:00 2001 From: hayeswang Date: Fri, 20 Jan 2017 14:33:55 +0800 Subject: [PATCH 102/786] r8152: fix rtl8152_post_reset function [ Upstream commit 2c561b2b728ca4013e76d6439bde2c137503745e ] The rtl8152_post_reset() should sumbit rx urb and interrupt transfer, otherwise the rx wouldn't work and the linking change couldn't be detected. Signed-off-by: Hayes Wang Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/r8152.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 8b8343b3fc39..039607dcfa8d 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -3548,12 +3548,14 @@ static int rtl8152_post_reset(struct usb_interface *intf) if (netif_carrier_ok(netdev)) { mutex_lock(&tp->control); tp->rtl_ops.enable(tp); + rtl_start_rx(tp); rtl8152_set_rx_mode(netdev); mutex_unlock(&tp->control); netif_wake_queue(netdev); } napi_enable(&tp->napi); + usb_submit_urb(tp->intr_urb, GFP_KERNEL); if (!list_empty(&tp->rx_done)) napi_schedule(&tp->napi); -- GitLab From e9ace99c4bb097a44b20b3e6d356e4dd9b5f0c02 Mon Sep 17 00:00:00 2001 From: hayeswang Date: Thu, 26 Jan 2017 09:38:32 +0800 Subject: [PATCH 103/786] r8152: avoid start_xmit to schedule napi when napi is disabled [ Upstream commit de9bf29dd6e4a8a874cb92f8901aed50a9d0b1d3 ] Stop the tx when the napi is disabled to prevent napi_schedule() is called. Signed-off-by: Hayes Wang Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/r8152.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 039607dcfa8d..afb953a258cd 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -3158,10 +3158,13 @@ static void set_carrier(struct r8152 *tp) if (!netif_carrier_ok(netdev)) { tp->rtl_ops.enable(tp); set_bit(RTL8152_SET_RX_MODE, &tp->flags); + netif_stop_queue(netdev); napi_disable(&tp->napi); netif_carrier_on(netdev); rtl_start_rx(tp); napi_enable(&tp->napi); + netif_wake_queue(netdev); + netif_info(tp, link, netdev, "carrier on\n"); } } else { if (netif_carrier_ok(netdev)) { @@ -3169,6 +3172,7 @@ static void set_carrier(struct r8152 *tp) napi_disable(&tp->napi); tp->rtl_ops.disable(tp); napi_enable(&tp->napi); + netif_info(tp, link, netdev, "carrier off\n"); } } } @@ -3518,12 +3522,12 @@ static int rtl8152_pre_reset(struct usb_interface *intf) if (!netif_running(netdev)) return 0; + netif_stop_queue(netdev); napi_disable(&tp->napi); clear_bit(WORK_ENABLE, &tp->flags); usb_kill_urb(tp->intr_urb); cancel_delayed_work_sync(&tp->schedule); if (netif_carrier_ok(netdev)) { - netif_stop_queue(netdev); mutex_lock(&tp->control); tp->rtl_ops.disable(tp); mutex_unlock(&tp->control); @@ -3551,10 +3555,10 @@ static int rtl8152_post_reset(struct usb_interface *intf) rtl_start_rx(tp); rtl8152_set_rx_mode(netdev); mutex_unlock(&tp->control); - netif_wake_queue(netdev); } napi_enable(&tp->napi); + netif_wake_queue(netdev); usb_submit_urb(tp->intr_urb, GFP_KERNEL); if (!list_empty(&tp->rx_done)) -- GitLab From cb7188295d18dcfbca1586d176f88113e2eae7f5 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Wed, 25 Jan 2017 09:20:55 +0100 Subject: [PATCH 104/786] net-next: ethernet: mediatek: change the compatible string [ Upstream commit 8b901f6bbcf12a20e43105d161bedde093431e61 ] When the binding was defined, I was not aware that mt2701 was an earlier version of the SoC. For sake of consistency, the ethernet driver should use mt2701 inside the compat string as this is the earliest SoC with the ethernet core. The ethernet driver is currently of no real use until we finish and upstream the DSA driver. There are no users of this binding yet. It should be safe to fix this now before it is too late and we need to provide backward compatibility for the mt7623-eth compat string. Reported-by: Sean Wang Signed-off-by: John Crispin Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 86a89cbd3ec9..4832223f1500 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2518,7 +2518,7 @@ static int mtk_remove(struct platform_device *pdev) } const struct of_device_id of_mtk_match[] = { - { .compatible = "mediatek,mt7623-eth" }, + { .compatible = "mediatek,mt2701-eth" }, {}, }; MODULE_DEVICE_TABLE(of, of_mtk_match); -- GitLab From 710ea9b028de2c5cd0565407d31b633972074182 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Wed, 25 Jan 2017 02:55:07 -0500 Subject: [PATCH 105/786] bnxt_en: Fix bnxt_reset() in the slow path task. [ Upstream commit a551ee94ea723b4af9b827c7460f108bc13425ee ] In bnxt_sp_task(), we set a bit BNXT_STATE_IN_SP_TASK so that bnxt_close() will synchronize and wait for bnxt_sp_task() to finish. Some functions in bnxt_sp_task() require us to clear BNXT_STATE_IN_SP_TASK and then acquire rtnl_lock() to prevent race conditions. There are some bugs related to this logic. This patch refactors the code to have common bnxt_rtnl_lock_sp() and bnxt_rtnl_unlock_sp() to handle the RTNL and the clearing/setting of the bit. Multiple functions will need the same logic. We also need to move bnxt_reset() to the end of bnxt_sp_task(). Functions that clear BNXT_STATE_IN_SP_TASK must be the last functions to be called in bnxt_sp_task(). The common scheme will handle the condition properly. Signed-off-by: Michael Chan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 38 +++++++++++++++-------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 48ee4110ef6e..b37108e077c3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6080,23 +6080,32 @@ static void bnxt_timer(unsigned long data) mod_timer(&bp->timer, jiffies + bp->current_interval); } -/* Only called from bnxt_sp_task() */ -static void bnxt_reset(struct bnxt *bp, bool silent) +static void bnxt_rtnl_lock_sp(struct bnxt *bp) { - /* bnxt_reset_task() calls bnxt_close_nic() which waits - * for BNXT_STATE_IN_SP_TASK to clear. - * If there is a parallel dev_close(), bnxt_close() may be holding + /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK + * set. If the device is being closed, bnxt_close() may be holding * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). */ clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); rtnl_lock(); - if (test_bit(BNXT_STATE_OPEN, &bp->state)) - bnxt_reset_task(bp, silent); +} + +static void bnxt_rtnl_unlock_sp(struct bnxt *bp) +{ set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); rtnl_unlock(); } +/* Only called from bnxt_sp_task() */ +static void bnxt_reset(struct bnxt *bp, bool silent) +{ + bnxt_rtnl_lock_sp(bp); + if (test_bit(BNXT_STATE_OPEN, &bp->state)) + bnxt_reset_task(bp, silent); + bnxt_rtnl_unlock_sp(bp); +} + static void bnxt_cfg_ntp_filters(struct bnxt *); static void bnxt_sp_task(struct work_struct *work) @@ -6142,18 +6151,21 @@ static void bnxt_sp_task(struct work_struct *work) bnxt_hwrm_tunnel_dst_port_free( bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); } - if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) - bnxt_reset(bp, false); - - if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) - bnxt_reset(bp, true); - if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) bnxt_get_port_module_status(bp); if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) bnxt_hwrm_port_qstats(bp); + /* These functions below will clear BNXT_STATE_IN_SP_TASK. They + * must be the last functions to be called before exiting. + */ + if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) + bnxt_reset(bp, false); + + if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) + bnxt_reset(bp, true); + smp_mb__before_atomic(); clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); } -- GitLab From e89ffe41e9a67901034accd1d19dbeefb390fafb Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Wed, 16 Nov 2016 21:13:08 -0500 Subject: [PATCH 106/786] bnxt_en: Enhance autoneg support. [ Upstream commit 286ef9d64ea7435a1e323d12b44a309e15cbff0e ] On some dual port NICs, the speed setting on one port can affect the available speed on the other port. Add logic to detect these changes and adjust the advertised speed settings when necessary. Signed-off-by: Michael Chan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 23 +++++++++++++++++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + 2 files changed, 24 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b37108e077c3..b30d447f8833 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1499,6 +1499,7 @@ static int bnxt_async_event_process(struct bnxt *bp, netdev_warn(bp->dev, "Link speed %d no longer supported\n", speed); } + set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event); /* fall thru */ } case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: @@ -5110,6 +5111,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) struct hwrm_port_phy_qcfg_input req = {0}; struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; u8 link_up = link_info->link_up; + u16 diff; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); @@ -5197,6 +5199,23 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) link_info->link_up = 0; } mutex_unlock(&bp->hwrm_cmd_lock); + + diff = link_info->support_auto_speeds ^ link_info->advertising; + if ((link_info->support_auto_speeds | diff) != + link_info->support_auto_speeds) { + /* An advertised speed is no longer supported, so we need to + * update the advertisement settings. See bnxt_reset() for + * comments about the rtnl_lock() sequence below. + */ + clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + rtnl_lock(); + link_info->advertising = link_info->support_auto_speeds; + if (test_bit(BNXT_STATE_OPEN, &bp->state) && + (link_info->autoneg & BNXT_AUTONEG_SPEED)) + bnxt_hwrm_set_link_setting(bp, true, false); + set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + rtnl_unlock(); + } return 0; } @@ -6126,6 +6145,10 @@ static void bnxt_sp_task(struct work_struct *work) if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) bnxt_cfg_ntp_filters(bp); if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { + if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, + &bp->sp_event)) + bnxt_hwrm_phy_qcaps(bp); + rc = bnxt_update_link(bp, true); if (rc) netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 51b164a0e844..666bc0608ed7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1089,6 +1089,7 @@ struct bnxt { #define BNXT_RESET_TASK_SILENT_SP_EVENT 11 #define BNXT_GENEVE_ADD_PORT_SP_EVENT 12 #define BNXT_GENEVE_DEL_PORT_SP_EVENT 13 +#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14 struct bnxt_pf_info pf; #ifdef CONFIG_BNXT_SRIOV -- GitLab From 66deb409251c6e88de9cdffc5a952670fe544410 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Wed, 25 Jan 2017 02:55:08 -0500 Subject: [PATCH 107/786] bnxt_en: Fix RTNL lock usage on bnxt_update_link(). [ Upstream commit 0eaa24b971ae251ae9d3be23f77662a655532063 ] bnxt_update_link() is called from multiple code paths. Most callers, such as open, ethtool, already hold RTNL. Only the caller bnxt_sp_task() does not. So it is a bug to take RTNL inside bnxt_update_link(). Fix it by removing the RTNL inside bnxt_update_link(). The function now expects the caller to always hold RTNL. In bnxt_sp_task(), call bnxt_rtnl_lock_sp() before calling bnxt_update_link(). We also need to move the call to the end of bnxt_sp_task() since it will be clearing the BNXT_STATE_IN_SP_TASK bit. Signed-off-by: Michael Chan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 37 +++++++++++------------ 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b30d447f8833..9f42850a10cf 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5204,17 +5204,12 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) if ((link_info->support_auto_speeds | diff) != link_info->support_auto_speeds) { /* An advertised speed is no longer supported, so we need to - * update the advertisement settings. See bnxt_reset() for - * comments about the rtnl_lock() sequence below. + * update the advertisement settings. Caller holds RTNL + * so we can modify link settings. */ - clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); - rtnl_lock(); link_info->advertising = link_info->support_auto_speeds; - if (test_bit(BNXT_STATE_OPEN, &bp->state) && - (link_info->autoneg & BNXT_AUTONEG_SPEED)) + if (link_info->autoneg & BNXT_AUTONEG_SPEED) bnxt_hwrm_set_link_setting(bp, true, false); - set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); - rtnl_unlock(); } return 0; } @@ -6130,7 +6125,6 @@ static void bnxt_cfg_ntp_filters(struct bnxt *); static void bnxt_sp_task(struct work_struct *work) { struct bnxt *bp = container_of(work, struct bnxt, sp_task); - int rc; set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); smp_mb__after_atomic(); @@ -6144,16 +6138,6 @@ static void bnxt_sp_task(struct work_struct *work) if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event)) bnxt_cfg_ntp_filters(bp); - if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { - if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, - &bp->sp_event)) - bnxt_hwrm_phy_qcaps(bp); - - rc = bnxt_update_link(bp, true); - if (rc) - netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", - rc); - } if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) bnxt_hwrm_exec_fwd_req(bp); if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { @@ -6183,6 +6167,21 @@ static void bnxt_sp_task(struct work_struct *work) /* These functions below will clear BNXT_STATE_IN_SP_TASK. They * must be the last functions to be called before exiting. */ + if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { + int rc = 0; + + if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, + &bp->sp_event)) + bnxt_hwrm_phy_qcaps(bp); + + bnxt_rtnl_lock_sp(bp); + if (test_bit(BNXT_STATE_OPEN, &bp->state)) + rc = bnxt_update_link(bp, true); + bnxt_rtnl_unlock_sp(bp); + if (rc) + netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", + rc); + } if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) bnxt_reset(bp, false); -- GitLab From 12a583ddf02a976080f36f75139a9167a9975e68 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Wed, 25 Jan 2017 02:55:09 -0500 Subject: [PATCH 108/786] bnxt_en: Fix RTNL lock usage on bnxt_get_port_module_status(). [ Upstream commit 90c694bb71819fb5bd3501ac397307d7e41ddeca ] bnxt_get_port_module_status() calls bnxt_update_link() which expects RTNL to be held. In bnxt_sp_task() that does not hold RTNL, we need to call it with a prior call to bnxt_rtnl_lock_sp() and the call needs to be moved to the end of bnxt_sp_task(). Signed-off-by: Michael Chan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 9f42850a10cf..5cc0f8cfec87 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6158,9 +6158,6 @@ static void bnxt_sp_task(struct work_struct *work) bnxt_hwrm_tunnel_dst_port_free( bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); } - if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) - bnxt_get_port_module_status(bp); - if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) bnxt_hwrm_port_qstats(bp); @@ -6182,6 +6179,12 @@ static void bnxt_sp_task(struct work_struct *work) netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", rc); } + if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { + bnxt_rtnl_lock_sp(bp); + if (test_bit(BNXT_STATE_OPEN, &bp->state)) + bnxt_get_port_module_status(bp); + bnxt_rtnl_unlock_sp(bp); + } if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) bnxt_reset(bp, false); -- GitLab From bf812fe92683cd52581824dc2c6767cbf04a72fb Mon Sep 17 00:00:00 2001 From: Xin Long Date: Tue, 24 Jan 2017 14:05:16 +0800 Subject: [PATCH 109/786] sctp: sctp gso should set feature with NETIF_F_SG when calling skb_segment [ Upstream commit 5207f3996338e1db71363fe381c81aaf1e54e4e3 ] Now sctp gso puts segments into skb's frag_list, then processes these segments in skb_segment. But skb_segment handles them only when gs is enabled, as it's in the same branch with skb's frags. Although almost all the NICs support sg other than some old ones, but since commit 1e16aa3ddf86 ("net: gso: use feature flag argument in all protocol gso handlers"), features &= skb->dev->hw_enc_features, and xfrm_output_gso call skb_segment with features = 0, which means sctp gso would call skb_segment with sg = 0, and skb_segment would not work as expected. This patch is to fix it by setting features param with NETIF_F_SG when calling skb_segment so that it can go the right branch to process the skb's frag_list. Signed-off-by: Xin Long Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/sctp/offload.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sctp/offload.c b/net/sctp/offload.c index 7e869d0cca69..4f5a2b580aa5 100644 --- a/net/sctp/offload.c +++ b/net/sctp/offload.c @@ -68,7 +68,7 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb, goto out; } - segs = skb_segment(skb, features | NETIF_F_HW_CSUM); + segs = skb_segment(skb, features | NETIF_F_HW_CSUM | NETIF_F_SG); if (IS_ERR(segs)) goto out; -- GitLab From fb72eca1333c374e6eefff7f6e99917c44c4f231 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Tue, 24 Jan 2017 14:01:53 +0800 Subject: [PATCH 110/786] sctp: sctp_addr_id2transport should verify the addr before looking up assoc [ Upstream commit 6f29a130613191d3c6335169febe002cba00edf5 ] sctp_addr_id2transport is a function for sockopt to look up assoc by address. As the address is from userspace, it can be a v4-mapped v6 address. But in sctp protocol stack, it always handles a v4-mapped v6 address as a v4 address. So it's necessary to convert it to a v4 address before looking up assoc by address. This patch is to fix it by calling sctp_verify_addr in which it can do this conversion before calling sctp_endpoint_lookup_assoc, just like what sctp_sendmsg and __sctp_connect do for the address from users. Signed-off-by: Xin Long Acked-by: Neil Horman Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/sctp/socket.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 14346dccc4fe..e1719c695174 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -235,8 +235,12 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, sctp_assoc_t id) { struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; - struct sctp_transport *transport; + struct sctp_af *af = sctp_get_af_specific(addr->ss_family); union sctp_addr *laddr = (union sctp_addr *)addr; + struct sctp_transport *transport; + + if (sctp_verify_addr(sk, laddr, af->sockaddr_len)) + return NULL; addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, laddr, -- GitLab From 5e4cafca06bfc6477b237161b7ddea7d14228803 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 24 Jan 2017 09:18:58 -0600 Subject: [PATCH 111/786] usb: musb: Fix external abort on non-linefetch for musb_irq_work() [ Upstream commit 3ba7b7795b7e8889af1377904c55c7fae9e0c775 ] While testing musb host mode cable plugging on a BeagleBone, I came across this error: Unhandled fault: external abort on non-linefetch (0x1008) at 0xd1dcfc60 ... [] (musb_default_readb [musb_hdrc]) from [] (musb_irq_work+0x1c/0x180 [musb_hdrc]) [] (musb_irq_work [musb_hdrc]) from [] (process_one_work+0x2b4/0x808) [] (process_one_work) from [] (worker_thread+0x3c/0x550) [] (worker_thread) from [] (kthread+0x104/0x148) [] (kthread) from [] (ret_from_fork+0x14/0x24) Signed-off-by: Tony Lindgren Signed-off-by: Bin Liu Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/usb/musb/musb_core.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 358feca54945..261ed2ca28f9 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -1909,6 +1909,14 @@ static void musb_pm_runtime_check_session(struct musb *musb) static void musb_irq_work(struct work_struct *data) { struct musb *musb = container_of(data, struct musb, irq_work.work); + int error; + + error = pm_runtime_get_sync(musb->controller); + if (error < 0) { + dev_err(musb->controller, "Could not enable: %i\n", error); + + return; + } musb_pm_runtime_check_session(musb); @@ -1916,6 +1924,9 @@ static void musb_irq_work(struct work_struct *data) musb->xceiv_old_state = musb->xceiv->otg->state; sysfs_notify(&musb->controller->kobj, NULL, "mode"); } + + pm_runtime_mark_last_busy(musb->controller); + pm_runtime_put_autosuspend(musb->controller); } static void musb_recover_from_babble(struct musb *musb) -- GitLab From 093d494c6d1db5d8bda26b64212edac62cb7d49a Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 24 Jan 2017 15:18:49 -0800 Subject: [PATCH 112/786] mn10300: fix build error of missing fpu_save() [ Upstream commit 3705ccfdd1e8b539225ce20e3925a945cc788d67 ] When CONFIG_FPU is not enabled on arch/mn10300, causes a build error with a call to fpu_save(): kernel/built-in.o: In function `.L410': core.c:(.sched.text+0x28a): undefined reference to `fpu_save' Fix this by including in so that an empty static inline fpu_save() is defined. Link: http://lkml.kernel.org/r/dc421c4f-4842-4429-1b99-92865c2f24b6@infradead.org Signed-off-by: Randy Dunlap Reported-by: kbuild test robot Reviewed-by: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/mn10300/include/asm/switch_to.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mn10300/include/asm/switch_to.h b/arch/mn10300/include/asm/switch_to.h index 393d311735c8..67e333aa7629 100644 --- a/arch/mn10300/include/asm/switch_to.h +++ b/arch/mn10300/include/asm/switch_to.h @@ -16,7 +16,7 @@ struct task_struct; struct thread_struct; -#if !defined(CONFIG_LAZY_SAVE_FPU) +#if defined(CONFIG_FPU) && !defined(CONFIG_LAZY_SAVE_FPU) struct fpu_state_struct; extern asmlinkage void fpu_save(struct fpu_state_struct *); #define switch_fpu(prev, next) \ -- GitLab From 013bbbc3e9025411e1327e825e736a501ba044f3 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Tue, 24 Jan 2017 15:18:46 -0800 Subject: [PATCH 113/786] romfs: use different way to generate fsid for BLOCK or MTD [ Upstream commit f598f82e204ec0b17797caaf1b0311c52d43fb9a ] Commit 8a59f5d25265 ("fs/romfs: return f_fsid for statfs(2)") generates a 64bit id from sb->s_bdev->bd_dev. This is only correct when romfs is defined with CONFIG_ROMFS_ON_BLOCK. If romfs is only defined with CONFIG_ROMFS_ON_MTD, sb->s_bdev is NULL, referencing sb->s_bdev->bd_dev will triger an oops. Richard Weinberger points out that when CONFIG_ROMFS_BACKED_BY_BOTH=y, both CONFIG_ROMFS_ON_BLOCK and CONFIG_ROMFS_ON_MTD are defined. Therefore when calling huge_encode_dev() to generate a 64bit id, I use the follow order to choose parameter, - CONFIG_ROMFS_ON_BLOCK defined use sb->s_bdev->bd_dev - CONFIG_ROMFS_ON_BLOCK undefined and CONFIG_ROMFS_ON_MTD defined use sb->s_dev when, - both CONFIG_ROMFS_ON_BLOCK and CONFIG_ROMFS_ON_MTD undefined leave id as 0 When CONFIG_ROMFS_ON_MTD is defined and sb->s_mtd is not NULL, sb->s_dev is set to a device ID generated by MTD_BLOCK_MAJOR and mtd index, otherwise sb->s_dev is 0. This is a try-best effort to generate a uniq file system ID, if all the above conditions are not meet, f_fsid of this romfs instance will be 0. Generally only one romfs can be built on single MTD block device, this method is enough to identify multiple romfs instances in a computer. Link: http://lkml.kernel.org/r/1482928596-115155-1-git-send-email-colyli@suse.de Signed-off-by: Coly Li Reported-by: Nong Li Tested-by: Nong Li Cc: Richard Weinberger Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/romfs/super.c | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/fs/romfs/super.c b/fs/romfs/super.c index d0f8a38dfafa..0186fe6d39f3 100644 --- a/fs/romfs/super.c +++ b/fs/romfs/super.c @@ -74,6 +74,7 @@ #include #include #include +#include #include "internal.h" static struct kmem_cache *romfs_inode_cachep; @@ -416,7 +417,22 @@ static void romfs_destroy_inode(struct inode *inode) static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; - u64 id = huge_encode_dev(sb->s_bdev->bd_dev); + u64 id = 0; + + /* When calling huge_encode_dev(), + * use sb->s_bdev->bd_dev when, + * - CONFIG_ROMFS_ON_BLOCK defined + * use sb->s_dev when, + * - CONFIG_ROMFS_ON_BLOCK undefined and + * - CONFIG_ROMFS_ON_MTD defined + * leave id as 0 when, + * - CONFIG_ROMFS_ON_BLOCK undefined and + * - CONFIG_ROMFS_ON_MTD undefined + */ + if (sb->s_bdev) + id = huge_encode_dev(sb->s_bdev->bd_dev); + else if (sb->s_dev) + id = huge_encode_dev(sb->s_dev); buf->f_type = ROMFS_MAGIC; buf->f_namelen = ROMFS_MAXFN; @@ -489,6 +505,11 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_flags |= MS_RDONLY | MS_NOATIME; sb->s_op = &romfs_super_ops; +#ifdef CONFIG_ROMFS_ON_MTD + /* Use same dev ID from the underlying mtdblock device */ + if (sb->s_mtd) + sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index); +#endif /* read the image superblock and check it */ rsb = kmalloc(512, GFP_KERNEL); if (!rsb) -- GitLab From 918684681dc207da5de3344b17814a2b50d677f9 Mon Sep 17 00:00:00 2001 From: Sudip Mukherjee Date: Tue, 24 Jan 2017 15:18:21 -0800 Subject: [PATCH 114/786] frv: add atomic64_add_unless() [ Upstream commit 545d58f677b21401f6de1ac12c25cc109f903ace ] The build of frv allmodconfig was failing with the error: lib/atomic64_test.c:209:9: error: implicit declaration of function 'atomic64_add_unless' All the atomic64 operations were defined in frv, but atomic64_add_unless() was not done. Implement atomic64_add_unless() as done in other arches. Link: http://lkml.kernel.org/r/1484781236-6698-1-git-send-email-sudipm.mukherjee@gmail.com Signed-off-by: Sudip Mukherjee Cc: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/frv/include/asm/atomic.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h index 1c2a5e264fc7..994ed3d5ca08 100644 --- a/arch/frv/include/asm/atomic.h +++ b/arch/frv/include/asm/atomic.h @@ -161,6 +161,22 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) return c; } +static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) +{ + long long c, old; + + c = atomic64_read(v); + for (;;) { + if (unlikely(c == u)) + break; + old = atomic64_cmpxchg(v, c, c + i); + if (likely(old == c)) + break; + c = old; + } + return c != u; +} + #define ATOMIC_OP(op) \ static inline int atomic_fetch_##op(int i, atomic_t *v) \ { \ -- GitLab From e23b1c05a50ffc7cabe12b9bc4fdfa55e32d9614 Mon Sep 17 00:00:00 2001 From: Sudip Mukherjee Date: Tue, 24 Jan 2017 15:18:43 -0800 Subject: [PATCH 115/786] frv: add missing atomic64 operations [ Upstream commit 4180c4c170a5a33b9987b314d248a9d572d89ab0 ] Some more atomic64 operations were missing and as a result frv allmodconfig was failing. Add the missing operations. Link: http://lkml.kernel.org/r/1485193844-12850-1-git-send-email-sudip.mukherjee@codethink.co.uk Signed-off-by: Sudip Mukherjee Cc: David Howells Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/frv/include/asm/atomic.h | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h index 994ed3d5ca08..e93c9494503a 100644 --- a/arch/frv/include/asm/atomic.h +++ b/arch/frv/include/asm/atomic.h @@ -139,7 +139,7 @@ static inline void atomic64_dec(atomic64_t *v) #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0) #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) #define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0) - +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new)) #define atomic_xchg(v, new) (xchg(&(v)->counter, new)) @@ -177,6 +177,23 @@ static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u) return c != u; } +static inline long long atomic64_dec_if_positive(atomic64_t *v) +{ + long long c, old, dec; + + c = atomic64_read(v); + for (;;) { + dec = c - 1; + if (unlikely(dec < 0)) + break; + old = atomic64_cmpxchg((v), c, dec); + if (likely(old == c)) + break; + c = old; + } + return dec; +} + #define ATOMIC_OP(op) \ static inline int atomic_fetch_##op(int i, atomic_t *v) \ { \ -- GitLab From 9618fba264999372c641b5cb3db777c6a216caa5 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 24 Jan 2017 15:18:07 -0800 Subject: [PATCH 116/786] proc: add a schedule point in proc_pid_readdir() [ Upstream commit 3ba4bceef23206349d4130ddf140819b365de7c8 ] We have seen proc_pid_readdir() invocations holding cpu for more than 50 ms. Add a cond_resched() to be gentle with other tasks. [akpm@linux-foundation.org: coding style fix] Link: http://lkml.kernel.org/r/1484238380.15816.42.camel@edumazet-glaptop3.roam.corp.google.com Signed-off-by: Eric Dumazet Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/proc/base.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/proc/base.c b/fs/proc/base.c index ca651ac00660..e67fec3c9856 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -3181,6 +3181,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx) iter.tgid += 1, iter = next_tgid(ns, iter)) { char name[PROC_NUMBUF]; int len; + + cond_resched(); if (!has_pid_permissions(ns, iter.task, 2)) continue; -- GitLab From dbd9eee1aaaf2cb40b2bc7b794d4a6f7afc7870a Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Tue, 24 Jan 2017 15:17:59 -0800 Subject: [PATCH 117/786] userfaultfd: fix SIGBUS resulting from false rwsem wakeups [ Upstream commit 15a77c6fe494f4b1757d30cd137fe66ab06a38c3 ] With >=32 CPUs the userfaultfd selftest triggered a graceful but unexpected SIGBUS because VM_FAULT_RETRY was returned by handle_userfault() despite the UFFDIO_COPY wasn't completed. This seems caused by rwsem waking the thread blocked in handle_userfault() and we can't run up_read() before the wait_event sequence is complete. Keeping the wait_even sequence identical to the first one, would require running userfaultfd_must_wait() again to know if the loop should be repeated, and it would also require retaking the rwsem and revalidating the whole vma status. It seems simpler to wait the targeted wakeup so that if false wakeups materialize we still wait for our specific wakeup event, unless of course there are signals or the uffd was released. Debug code collecting the stack trace of the wakeup showed this: $ ./userfaultfd 100 99999 nr_pages: 25600, nr_pages_per_cpu: 800 bounces: 99998, mode: racing ver poll, userfaults: 32 35 90 232 30 138 69 82 34 30 139 40 40 31 20 19 43 13 15 28 27 38 21 43 56 22 1 17 31 8 4 2 bounces: 99997, mode: rnd ver poll, Bus error (core dumped) save_stack_trace+0x2b/0x50 try_to_wake_up+0x2a6/0x580 wake_up_q+0x32/0x70 rwsem_wake+0xe0/0x120 call_rwsem_wake+0x1b/0x30 up_write+0x3b/0x40 vm_mmap_pgoff+0x9c/0xc0 SyS_mmap_pgoff+0x1a9/0x240 SyS_mmap+0x22/0x30 entry_SYSCALL_64_fastpath+0x1f/0xbd 0xffffffffffffffff FAULT_FLAG_ALLOW_RETRY missing 70 CPU: 24 PID: 1054 Comm: userfaultfd Tainted: G W 4.8.0+ #30 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.9.3-0-ge2fc41e-prebuilt.qemu-project.org 04/01/2014 Call Trace: dump_stack+0xb8/0x112 handle_userfault+0x572/0x650 handle_mm_fault+0x12cb/0x1520 __do_page_fault+0x175/0x500 trace_do_page_fault+0x61/0x270 do_async_page_fault+0x19/0x90 async_page_fault+0x25/0x30 This always happens when the main userfault selftest thread is running clone() while glibc runs either mprotect or mmap (both taking mmap_sem down_write()) to allocate the thread stack of the background threads, while locking/userfault threads already run at full throttle and are susceptible to false wakeups that may cause handle_userfault() to return before than expected (which results in graceful SIGBUS at the next attempt). This was reproduced only with >=32 CPUs because the loop to start the thread where clone() is too quick with fewer CPUs, while with 32 CPUs there's already significant activity on ~32 locking and userfault threads when the last background threads are started with clone(). This >=32 CPUs SMP race condition is likely reproducible only with the selftest because of the much heavier userfault load it generates if compared to real apps. We'll have to allow "one more" VM_FAULT_RETRY for the WP support and a patch floating around that provides it also hidden this problem but in reality only is successfully at hiding the problem. False wakeups could still happen again the second time handle_userfault() is invoked, even if it's a so rare race condition that getting false wakeups twice in a row is impossible to reproduce. This full fix is needed for correctness, the only alternative would be to allow VM_FAULT_RETRY to be returned infinitely. With this fix the WP support can stick to a strict "one more" VM_FAULT_RETRY logic (no need of returning it infinite times to avoid the SIGBUS). Link: http://lkml.kernel.org/r/20170111005535.13832-2-aarcange@redhat.com Signed-off-by: Andrea Arcangeli Reported-by: Shubham Kumar Sharma Tested-by: Mike Kravetz Acked-by: Hillf Danton Cc: Michael Rapoport Cc: "Dr. David Alan Gilbert" Cc: Pavel Emelyanov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/userfaultfd.c | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 85959d8324df..b86054cc41db 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -63,6 +63,7 @@ struct userfaultfd_wait_queue { struct uffd_msg msg; wait_queue_t wq; struct userfaultfd_ctx *ctx; + bool waken; }; struct userfaultfd_wake_range { @@ -86,6 +87,12 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode, if (len && (start > uwq->msg.arg.pagefault.address || start + len <= uwq->msg.arg.pagefault.address)) goto out; + WRITE_ONCE(uwq->waken, true); + /* + * The implicit smp_mb__before_spinlock in try_to_wake_up() + * renders uwq->waken visible to other CPUs before the task is + * waken. + */ ret = wake_up_state(wq->private, mode); if (ret) /* @@ -264,6 +271,7 @@ int handle_userfault(struct fault_env *fe, unsigned long reason) struct userfaultfd_wait_queue uwq; int ret; bool must_wait, return_to_userland; + long blocking_state; BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); @@ -333,10 +341,13 @@ int handle_userfault(struct fault_env *fe, unsigned long reason) uwq.wq.private = current; uwq.msg = userfault_msg(fe->address, fe->flags, reason); uwq.ctx = ctx; + uwq.waken = false; return_to_userland = (fe->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) == (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE); + blocking_state = return_to_userland ? TASK_INTERRUPTIBLE : + TASK_KILLABLE; spin_lock(&ctx->fault_pending_wqh.lock); /* @@ -349,8 +360,7 @@ int handle_userfault(struct fault_env *fe, unsigned long reason) * following the spin_unlock to happen before the list_add in * __add_wait_queue. */ - set_current_state(return_to_userland ? TASK_INTERRUPTIBLE : - TASK_KILLABLE); + set_current_state(blocking_state); spin_unlock(&ctx->fault_pending_wqh.lock); must_wait = userfaultfd_must_wait(ctx, fe->address, fe->flags, reason); @@ -362,6 +372,29 @@ int handle_userfault(struct fault_env *fe, unsigned long reason) wake_up_poll(&ctx->fd_wqh, POLLIN); schedule(); ret |= VM_FAULT_MAJOR; + + /* + * False wakeups can orginate even from rwsem before + * up_read() however userfaults will wait either for a + * targeted wakeup on the specific uwq waitqueue from + * wake_userfault() or for signals or for uffd + * release. + */ + while (!READ_ONCE(uwq.waken)) { + /* + * This needs the full smp_store_mb() + * guarantee as the state write must be + * visible to other CPUs before reading + * uwq.waken from other CPUs. + */ + set_current_state(blocking_state); + if (READ_ONCE(uwq.waken) || + READ_ONCE(ctx->released) || + (return_to_userland ? signal_pending(current) : + fatal_signal_pending(current))) + break; + schedule(); + } } __set_current_state(TASK_RUNNING); -- GitLab From b969a240448bfd8e6b0fb180a405e5cc881bf503 Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Wed, 14 Dec 2016 15:06:24 -0800 Subject: [PATCH 118/786] kernel/watchdog.c: move hardlockup detector to separate file [ Upstream commit 73ce0511c43686095efd2f65ef564aab952e07bc ] Separate hardlockup code from watchdog.c and move it to watchdog_hld.c. It is mostly straight forward. Remove everything inside CONFIG_HARDLOCKUP_DETECTORS. This code will go to file watchdog_hld.c. Also update the makefile accordigly. Link: http://lkml.kernel.org/r/1478034826-43888-3-git-send-email-babu.moger@oracle.com Signed-off-by: Babu Moger Acked-by: Don Zickus Cc: Ingo Molnar Cc: Jiri Kosina Cc: Andi Kleen Cc: Yaowei Bai Cc: Aaron Tomlin Cc: Ulrich Obergfell Cc: Tejun Heo Cc: Hidehiro Kawai Cc: Josh Hunt Cc: "David S. Miller" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- kernel/Makefile | 1 + kernel/watchdog.c | 241 ++---------------------------------------- kernel/watchdog_hld.c | 227 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 239 insertions(+), 230 deletions(-) create mode 100644 kernel/watchdog_hld.c diff --git a/kernel/Makefile b/kernel/Makefile index eb26e12c6c2a..314e7d62f5f0 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -84,6 +84,7 @@ obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_KGDB) += debug/ obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o +obj-$(CONFIG_HARDLOCKUP_DETECTOR) += watchdog_hld.o obj-$(CONFIG_SECCOMP) += seccomp.o obj-$(CONFIG_RELAY) += relay.o obj-$(CONFIG_SYSCTL) += utsname_sysctl.o diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 6d1020c03d41..94aed27d4ffd 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -24,7 +24,6 @@ #include #include -#include #include /* @@ -100,50 +99,9 @@ static DEFINE_PER_CPU(bool, soft_watchdog_warn); static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt); static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved); -#ifdef CONFIG_HARDLOCKUP_DETECTOR -static DEFINE_PER_CPU(bool, hard_watchdog_warn); -static DEFINE_PER_CPU(bool, watchdog_nmi_touch); static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); -static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); -#endif static unsigned long soft_lockup_nmi_warn; -/* boot commands */ -/* - * Should we panic when a soft-lockup or hard-lockup occurs: - */ -#ifdef CONFIG_HARDLOCKUP_DETECTOR -unsigned int __read_mostly hardlockup_panic = - CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; -static unsigned long hardlockup_allcpu_dumped; -/* - * We may not want to enable hard lockup detection by default in all cases, - * for example when running the kernel as a guest on a hypervisor. In these - * cases this function can be called to disable hard lockup detection. This - * function should only be executed once by the boot processor before the - * kernel command line parameters are parsed, because otherwise it is not - * possible to override this in hardlockup_panic_setup(). - */ -void hardlockup_detector_disable(void) -{ - watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; -} - -static int __init hardlockup_panic_setup(char *str) -{ - if (!strncmp(str, "panic", 5)) - hardlockup_panic = 1; - else if (!strncmp(str, "nopanic", 7)) - hardlockup_panic = 0; - else if (!strncmp(str, "0", 1)) - watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; - else if (!strncmp(str, "1", 1)) - watchdog_enabled |= NMI_WATCHDOG_ENABLED; - return 1; -} -__setup("nmi_watchdog=", hardlockup_panic_setup); -#endif - unsigned int __read_mostly softlockup_panic = CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; @@ -264,30 +222,12 @@ void touch_all_softlockup_watchdogs(void) wq_watchdog_touch(-1); } -#ifdef CONFIG_HARDLOCKUP_DETECTOR -void touch_nmi_watchdog(void) -{ - /* - * Using __raw here because some code paths have - * preemption enabled. If preemption is enabled - * then interrupts should be enabled too, in which - * case we shouldn't have to worry about the watchdog - * going off. - */ - raw_cpu_write(watchdog_nmi_touch, true); - touch_softlockup_watchdog(); -} -EXPORT_SYMBOL(touch_nmi_watchdog); - -#endif - void touch_softlockup_watchdog_sync(void) { __this_cpu_write(softlockup_touch_sync, true); __this_cpu_write(watchdog_touch_ts, 0); } -#ifdef CONFIG_HARDLOCKUP_DETECTOR /* watchdog detector functions */ static bool is_hardlockup(void) { @@ -299,7 +239,6 @@ static bool is_hardlockup(void) __this_cpu_write(hrtimer_interrupts_saved, hrint); return false; } -#endif static int is_softlockup(unsigned long touch_ts) { @@ -313,77 +252,22 @@ static int is_softlockup(unsigned long touch_ts) return 0; } -#ifdef CONFIG_HARDLOCKUP_DETECTOR - -static struct perf_event_attr wd_hw_attr = { - .type = PERF_TYPE_HARDWARE, - .config = PERF_COUNT_HW_CPU_CYCLES, - .size = sizeof(struct perf_event_attr), - .pinned = 1, - .disabled = 1, -}; - -/* Callback function for perf event subsystem */ -static void watchdog_overflow_callback(struct perf_event *event, - struct perf_sample_data *data, - struct pt_regs *regs) -{ - /* Ensure the watchdog never gets throttled */ - event->hw.interrupts = 0; - - if (__this_cpu_read(watchdog_nmi_touch) == true) { - __this_cpu_write(watchdog_nmi_touch, false); - return; - } - - /* check for a hardlockup - * This is done by making sure our timer interrupt - * is incrementing. The timer interrupt should have - * fired multiple times before we overflow'd. If it hasn't - * then this is a good indication the cpu is stuck - */ - if (is_hardlockup()) { - int this_cpu = smp_processor_id(); - - /* only print hardlockups once */ - if (__this_cpu_read(hard_watchdog_warn) == true) - return; - - pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu); - print_modules(); - print_irqtrace_events(current); - if (regs) - show_regs(regs); - else - dump_stack(); - - /* - * Perform all-CPU dump only once to avoid multiple hardlockups - * generating interleaving traces - */ - if (sysctl_hardlockup_all_cpu_backtrace && - !test_and_set_bit(0, &hardlockup_allcpu_dumped)) - trigger_allbutself_cpu_backtrace(); - - if (hardlockup_panic) - nmi_panic(regs, "Hard LOCKUP"); - - __this_cpu_write(hard_watchdog_warn, true); - return; - } - - __this_cpu_write(hard_watchdog_warn, false); - return; -} -#endif /* CONFIG_HARDLOCKUP_DETECTOR */ - static void watchdog_interrupt_count(void) { __this_cpu_inc(hrtimer_interrupts); } -static int watchdog_nmi_enable(unsigned int cpu); -static void watchdog_nmi_disable(unsigned int cpu); +/* + * These two functions are mostly architecture specific + * defining them as weak here. + */ +int __weak watchdog_nmi_enable(unsigned int cpu) +{ + return 0; +} +void __weak watchdog_nmi_disable(unsigned int cpu) +{ +} static int watchdog_enable_all_cpus(void); static void watchdog_disable_all_cpus(void); @@ -576,109 +460,6 @@ static void watchdog(unsigned int cpu) watchdog_nmi_disable(cpu); } -#ifdef CONFIG_HARDLOCKUP_DETECTOR -/* - * People like the simple clean cpu node info on boot. - * Reduce the watchdog noise by only printing messages - * that are different from what cpu0 displayed. - */ -static unsigned long cpu0_err; - -static int watchdog_nmi_enable(unsigned int cpu) -{ - struct perf_event_attr *wd_attr; - struct perf_event *event = per_cpu(watchdog_ev, cpu); - - /* nothing to do if the hard lockup detector is disabled */ - if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) - goto out; - - /* is it already setup and enabled? */ - if (event && event->state > PERF_EVENT_STATE_OFF) - goto out; - - /* it is setup but not enabled */ - if (event != NULL) - goto out_enable; - - wd_attr = &wd_hw_attr; - wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); - - /* Try to register using hardware perf events */ - event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); - - /* save cpu0 error for future comparision */ - if (cpu == 0 && IS_ERR(event)) - cpu0_err = PTR_ERR(event); - - if (!IS_ERR(event)) { - /* only print for cpu0 or different than cpu0 */ - if (cpu == 0 || cpu0_err) - pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n"); - goto out_save; - } - - /* - * Disable the hard lockup detector if _any_ CPU fails to set up - * set up the hardware perf event. The watchdog() function checks - * the NMI_WATCHDOG_ENABLED bit periodically. - * - * The barriers are for syncing up watchdog_enabled across all the - * cpus, as clear_bit() does not use barriers. - */ - smp_mb__before_atomic(); - clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled); - smp_mb__after_atomic(); - - /* skip displaying the same error again */ - if (cpu > 0 && (PTR_ERR(event) == cpu0_err)) - return PTR_ERR(event); - - /* vary the KERN level based on the returned errno */ - if (PTR_ERR(event) == -EOPNOTSUPP) - pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu); - else if (PTR_ERR(event) == -ENOENT) - pr_warn("disabled (cpu%i): hardware events not enabled\n", - cpu); - else - pr_err("disabled (cpu%i): unable to create perf event: %ld\n", - cpu, PTR_ERR(event)); - - pr_info("Shutting down hard lockup detector on all cpus\n"); - - return PTR_ERR(event); - - /* success path */ -out_save: - per_cpu(watchdog_ev, cpu) = event; -out_enable: - perf_event_enable(per_cpu(watchdog_ev, cpu)); -out: - return 0; -} - -static void watchdog_nmi_disable(unsigned int cpu) -{ - struct perf_event *event = per_cpu(watchdog_ev, cpu); - - if (event) { - perf_event_disable(event); - per_cpu(watchdog_ev, cpu) = NULL; - - /* should be in cleanup, but blocks oprofile */ - perf_event_release_kernel(event); - } - if (cpu == 0) { - /* watchdog_nmi_enable() expects this to be zero initially. */ - cpu0_err = 0; - } -} - -#else -static int watchdog_nmi_enable(unsigned int cpu) { return 0; } -static void watchdog_nmi_disable(unsigned int cpu) { return; } -#endif /* CONFIG_HARDLOCKUP_DETECTOR */ - static struct smp_hotplug_thread watchdog_threads = { .store = &softlockup_watchdog, .thread_should_run = watchdog_should_run, diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c new file mode 100644 index 000000000000..84016c8aee6b --- /dev/null +++ b/kernel/watchdog_hld.c @@ -0,0 +1,227 @@ +/* + * Detect hard lockups on a system + * + * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. + * + * Note: Most of this code is borrowed heavily from the original softlockup + * detector, so thanks to Ingo for the initial implementation. + * Some chunks also taken from the old x86-specific nmi watchdog code, thanks + * to those contributors as well. + */ + +#define pr_fmt(fmt) "NMI watchdog: " fmt + +#include +#include +#include +#include + +static DEFINE_PER_CPU(bool, hard_watchdog_warn); +static DEFINE_PER_CPU(bool, watchdog_nmi_touch); +static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); + +/* boot commands */ +/* + * Should we panic when a soft-lockup or hard-lockup occurs: + */ +unsigned int __read_mostly hardlockup_panic = + CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; +static unsigned long hardlockup_allcpu_dumped; +/* + * We may not want to enable hard lockup detection by default in all cases, + * for example when running the kernel as a guest on a hypervisor. In these + * cases this function can be called to disable hard lockup detection. This + * function should only be executed once by the boot processor before the + * kernel command line parameters are parsed, because otherwise it is not + * possible to override this in hardlockup_panic_setup(). + */ +void hardlockup_detector_disable(void) +{ + watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; +} + +static int __init hardlockup_panic_setup(char *str) +{ + if (!strncmp(str, "panic", 5)) + hardlockup_panic = 1; + else if (!strncmp(str, "nopanic", 7)) + hardlockup_panic = 0; + else if (!strncmp(str, "0", 1)) + watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; + else if (!strncmp(str, "1", 1)) + watchdog_enabled |= NMI_WATCHDOG_ENABLED; + return 1; +} +__setup("nmi_watchdog=", hardlockup_panic_setup); + +void touch_nmi_watchdog(void) +{ + /* + * Using __raw here because some code paths have + * preemption enabled. If preemption is enabled + * then interrupts should be enabled too, in which + * case we shouldn't have to worry about the watchdog + * going off. + */ + raw_cpu_write(watchdog_nmi_touch, true); + touch_softlockup_watchdog(); +} +EXPORT_SYMBOL(touch_nmi_watchdog); + +static struct perf_event_attr wd_hw_attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + .size = sizeof(struct perf_event_attr), + .pinned = 1, + .disabled = 1, +}; + +/* Callback function for perf event subsystem */ +static void watchdog_overflow_callback(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + /* Ensure the watchdog never gets throttled */ + event->hw.interrupts = 0; + + if (__this_cpu_read(watchdog_nmi_touch) == true) { + __this_cpu_write(watchdog_nmi_touch, false); + return; + } + + /* check for a hardlockup + * This is done by making sure our timer interrupt + * is incrementing. The timer interrupt should have + * fired multiple times before we overflow'd. If it hasn't + * then this is a good indication the cpu is stuck + */ + if (is_hardlockup()) { + int this_cpu = smp_processor_id(); + + /* only print hardlockups once */ + if (__this_cpu_read(hard_watchdog_warn) == true) + return; + + pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu); + print_modules(); + print_irqtrace_events(current); + if (regs) + show_regs(regs); + else + dump_stack(); + + /* + * Perform all-CPU dump only once to avoid multiple hardlockups + * generating interleaving traces + */ + if (sysctl_hardlockup_all_cpu_backtrace && + !test_and_set_bit(0, &hardlockup_allcpu_dumped)) + trigger_allbutself_cpu_backtrace(); + + if (hardlockup_panic) + nmi_panic(regs, "Hard LOCKUP"); + + __this_cpu_write(hard_watchdog_warn, true); + return; + } + + __this_cpu_write(hard_watchdog_warn, false); + return; +} + +/* + * People like the simple clean cpu node info on boot. + * Reduce the watchdog noise by only printing messages + * that are different from what cpu0 displayed. + */ +static unsigned long cpu0_err; + +int watchdog_nmi_enable(unsigned int cpu) +{ + struct perf_event_attr *wd_attr; + struct perf_event *event = per_cpu(watchdog_ev, cpu); + + /* nothing to do if the hard lockup detector is disabled */ + if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) + goto out; + + /* is it already setup and enabled? */ + if (event && event->state > PERF_EVENT_STATE_OFF) + goto out; + + /* it is setup but not enabled */ + if (event != NULL) + goto out_enable; + + wd_attr = &wd_hw_attr; + wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); + + /* Try to register using hardware perf events */ + event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); + + /* save cpu0 error for future comparision */ + if (cpu == 0 && IS_ERR(event)) + cpu0_err = PTR_ERR(event); + + if (!IS_ERR(event)) { + /* only print for cpu0 or different than cpu0 */ + if (cpu == 0 || cpu0_err) + pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n"); + goto out_save; + } + + /* + * Disable the hard lockup detector if _any_ CPU fails to set up + * set up the hardware perf event. The watchdog() function checks + * the NMI_WATCHDOG_ENABLED bit periodically. + * + * The barriers are for syncing up watchdog_enabled across all the + * cpus, as clear_bit() does not use barriers. + */ + smp_mb__before_atomic(); + clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled); + smp_mb__after_atomic(); + + /* skip displaying the same error again */ + if (cpu > 0 && (PTR_ERR(event) == cpu0_err)) + return PTR_ERR(event); + + /* vary the KERN level based on the returned errno */ + if (PTR_ERR(event) == -EOPNOTSUPP) + pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu); + else if (PTR_ERR(event) == -ENOENT) + pr_warn("disabled (cpu%i): hardware events not enabled\n", + cpu); + else + pr_err("disabled (cpu%i): unable to create perf event: %ld\n", + cpu, PTR_ERR(event)); + + pr_info("Shutting down hard lockup detector on all cpus\n"); + + return PTR_ERR(event); + + /* success path */ +out_save: + per_cpu(watchdog_ev, cpu) = event; +out_enable: + perf_event_enable(per_cpu(watchdog_ev, cpu)); +out: + return 0; +} + +void watchdog_nmi_disable(unsigned int cpu) +{ + struct perf_event *event = per_cpu(watchdog_ev, cpu); + + if (event) { + perf_event_disable(event); + per_cpu(watchdog_ev, cpu) = NULL; + + /* should be in cleanup, but blocks oprofile */ + perf_event_release_kernel(event); + } + if (cpu == 0) { + /* watchdog_nmi_enable() expects this to be zero initially. */ + cpu0_err = 0; + } +} -- GitLab From 0ce66ee6aec12f38ab6992233e92b9960b55e0c6 Mon Sep 17 00:00:00 2001 From: Babu Moger Date: Wed, 14 Dec 2016 15:06:21 -0800 Subject: [PATCH 119/786] kernel/watchdog.c: move shared definitions to nmi.h [ Upstream commit 249e52e35580fcfe5dad53a7dcd7c1252788749c ] Patch series "Clean up watchdog handlers", v2. This is an attempt to cleanup watchdog handlers. Right now, kernel/watchdog.c implements both softlockup and hardlockup detectors. Softlockup code is generic. Hardlockup code is arch specific. Some architectures don't use hardlockup detectors. They use their own watchdog detectors. To make both these combination work, we have numerous #ifdefs in kernel/watchdog.c. We are trying here to make these handlers independent of each other. Also provide an interface for architectures to implement their own handlers. watchdog_nmi_enable and watchdog_nmi_disable will be defined as weak such that architectures can override its definitions. Thanks to Don Zickus for his suggestions. Here are our previous discussions http://www.spinics.net/lists/sparclinux/msg16543.html http://www.spinics.net/lists/sparclinux/msg16441.html This patch (of 3): Move shared macros and definitions to nmi.h so that watchdog.c, new file watchdog_hld.c or any other architecture specific handler can use those definitions. Link: http://lkml.kernel.org/r/1478034826-43888-2-git-send-email-babu.moger@oracle.com Signed-off-by: Babu Moger Acked-by: Don Zickus Cc: Ingo Molnar Cc: Jiri Kosina Cc: Andi Kleen Cc: Yaowei Bai Cc: Aaron Tomlin Cc: Ulrich Obergfell Cc: Tejun Heo Cc: Hidehiro Kawai Cc: Josh Hunt Cc: "David S. Miller" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- include/linux/nmi.h | 24 ++++++++++++++++++++++++ kernel/watchdog.c | 28 ++++------------------------ 2 files changed, 28 insertions(+), 24 deletions(-) diff --git a/include/linux/nmi.h b/include/linux/nmi.h index a78c35cff1ae..aacca824a6ae 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -7,6 +7,23 @@ #include #include +/* + * The run state of the lockup detectors is controlled by the content of the + * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - + * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. + * + * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled' + * are variables that are only used as an 'interface' between the parameters + * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The + * 'watchdog_thresh' variable is handled differently because its value is not + * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh' + * is equal zero. + */ +#define NMI_WATCHDOG_ENABLED_BIT 0 +#define SOFT_WATCHDOG_ENABLED_BIT 1 +#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) +#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) + /** * touch_nmi_watchdog - restart NMI watchdog timeout. * @@ -91,9 +108,16 @@ extern int nmi_watchdog_enabled; extern int soft_watchdog_enabled; extern int watchdog_user_enabled; extern int watchdog_thresh; +extern unsigned long watchdog_enabled; extern unsigned long *watchdog_cpumask_bits; +#ifdef CONFIG_SMP extern int sysctl_softlockup_all_cpu_backtrace; extern int sysctl_hardlockup_all_cpu_backtrace; +#else +#define sysctl_softlockup_all_cpu_backtrace 0 +#define sysctl_hardlockup_all_cpu_backtrace 0 +#endif +extern bool is_hardlockup(void); struct ctl_table; extern int proc_watchdog(struct ctl_table *, int , void __user *, size_t *, loff_t *); diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 94aed27d4ffd..d4b0fa01cae3 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -26,29 +26,12 @@ #include #include -/* - * The run state of the lockup detectors is controlled by the content of the - * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - - * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. - * - * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled' - * are variables that are only used as an 'interface' between the parameters - * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The - * 'watchdog_thresh' variable is handled differently because its value is not - * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh' - * is equal zero. - */ -#define NMI_WATCHDOG_ENABLED_BIT 0 -#define SOFT_WATCHDOG_ENABLED_BIT 1 -#define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT) -#define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT) - static DEFINE_MUTEX(watchdog_proc_mutex); -#ifdef CONFIG_HARDLOCKUP_DETECTOR -static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED; +#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) +unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED; #else -static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED; +unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED; #endif int __read_mostly nmi_watchdog_enabled; int __read_mostly soft_watchdog_enabled; @@ -58,9 +41,6 @@ int __read_mostly watchdog_thresh = 10; #ifdef CONFIG_SMP int __read_mostly sysctl_softlockup_all_cpu_backtrace; int __read_mostly sysctl_hardlockup_all_cpu_backtrace; -#else -#define sysctl_softlockup_all_cpu_backtrace 0 -#define sysctl_hardlockup_all_cpu_backtrace 0 #endif static struct cpumask watchdog_cpumask __read_mostly; unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); @@ -229,7 +209,7 @@ void touch_softlockup_watchdog_sync(void) } /* watchdog detector functions */ -static bool is_hardlockup(void) +bool is_hardlockup(void) { unsigned long hrint = __this_cpu_read(hrtimer_interrupts); -- GitLab From b13b3b706a9dc03dd1a1c31f8268cd5193c1858c Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Tue, 24 Jan 2017 15:17:53 -0800 Subject: [PATCH 120/786] kernel/watchdog: prevent false hardlockup on overloaded system [ Upstream commit b94f51183b0617e7b9b4fb4137d4cf1cab7547c2 ] On an overloaded system, it is possible that a change in the watchdog threshold can be delayed long enough to trigger a false positive. This can easily be achieved by having a cpu spinning indefinitely on a task, while another cpu updates watchdog threshold. What happens is while trying to park the watchdog threads, the hrtimers on the other cpus trigger and reprogram themselves with the new slower watchdog threshold. Meanwhile, the nmi watchdog is still programmed with the old faster threshold. Because the one cpu is blocked, it prevents the thread parking on the other cpus from completing, which is needed to shutdown the nmi watchdog and reprogram it correctly. As a result, a false positive from the nmi watchdog is reported. Fix this by setting a park_in_progress flag to block all lockups until the parking is complete. Fix provided by Ulrich Obergfell. [akpm@linux-foundation.org: s/park_in_progress/watchdog_park_in_progress/] Link: http://lkml.kernel.org/r/1481041033-192236-1-git-send-email-dzickus@redhat.com Signed-off-by: Don Zickus Reviewed-by: Aaron Tomlin Cc: Ulrich Obergfell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- include/linux/nmi.h | 1 + kernel/watchdog.c | 9 +++++++++ kernel/watchdog_hld.c | 3 +++ 3 files changed, 13 insertions(+) diff --git a/include/linux/nmi.h b/include/linux/nmi.h index aacca824a6ae..0a3fadc32693 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -110,6 +110,7 @@ extern int watchdog_user_enabled; extern int watchdog_thresh; extern unsigned long watchdog_enabled; extern unsigned long *watchdog_cpumask_bits; +extern atomic_t watchdog_park_in_progress; #ifdef CONFIG_SMP extern int sysctl_softlockup_all_cpu_backtrace; extern int sysctl_hardlockup_all_cpu_backtrace; diff --git a/kernel/watchdog.c b/kernel/watchdog.c index d4b0fa01cae3..63177be0159e 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -49,6 +49,8 @@ unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); #define for_each_watchdog_cpu(cpu) \ for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask) +atomic_t watchdog_park_in_progress = ATOMIC_INIT(0); + /* * The 'watchdog_running' variable is set to 1 when the watchdog threads * are registered/started and is set to 0 when the watchdog threads are @@ -260,6 +262,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) int duration; int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; + if (atomic_read(&watchdog_park_in_progress) != 0) + return HRTIMER_NORESTART; + /* kick the hardlockup detector */ watchdog_interrupt_count(); @@ -467,12 +472,16 @@ static int watchdog_park_threads(void) { int cpu, ret = 0; + atomic_set(&watchdog_park_in_progress, 1); + for_each_watchdog_cpu(cpu) { ret = kthread_park(per_cpu(softlockup_watchdog, cpu)); if (ret) break; } + atomic_set(&watchdog_park_in_progress, 0); + return ret; } diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 84016c8aee6b..12b8dd640786 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -84,6 +84,9 @@ static void watchdog_overflow_callback(struct perf_event *event, /* Ensure the watchdog never gets throttled */ event->hw.interrupts = 0; + if (atomic_read(&watchdog_park_in_progress) != 0) + return; + if (__this_cpu_read(watchdog_nmi_touch) == true) { __this_cpu_write(watchdog_nmi_touch, false); return; -- GitLab From ae36f6a65af6f4eaca01cc5b68d8ecb266dbcc17 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Thu, 19 Jan 2017 10:43:53 +0000 Subject: [PATCH 121/786] vhost/vsock: handle vhost_vq_init_access() error [ Upstream commit 0516ffd88fa0d006ee80389ce14a9ca5ae45e845 ] Propagate the error when vhost_vq_init_access() fails and set vq->private_data to NULL. Signed-off-by: Stefan Hajnoczi Signed-off-by: Michael S. Tsirkin Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/vhost/vsock.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index a504e2e003da..e3fad302b4fb 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -368,6 +368,7 @@ static void vhost_vsock_handle_rx_kick(struct vhost_work *work) static int vhost_vsock_start(struct vhost_vsock *vsock) { + struct vhost_virtqueue *vq; size_t i; int ret; @@ -378,19 +379,20 @@ static int vhost_vsock_start(struct vhost_vsock *vsock) goto err; for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { - struct vhost_virtqueue *vq = &vsock->vqs[i]; + vq = &vsock->vqs[i]; mutex_lock(&vq->mutex); if (!vhost_vq_access_ok(vq)) { ret = -EFAULT; - mutex_unlock(&vq->mutex); goto err_vq; } if (!vq->private_data) { vq->private_data = vsock; - vhost_vq_init_access(vq); + ret = vhost_vq_init_access(vq); + if (ret) + goto err_vq; } mutex_unlock(&vq->mutex); @@ -400,8 +402,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock) return 0; err_vq: + vq->private_data = NULL; + mutex_unlock(&vq->mutex); + for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) { - struct vhost_virtqueue *vq = &vsock->vqs[i]; + vq = &vsock->vqs[i]; mutex_lock(&vq->mutex); vq->private_data = NULL; -- GitLab From 8cb6045ec40d90d8074dd072c2ec9d0fc7b91956 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Thu, 12 Jan 2017 14:30:29 -0800 Subject: [PATCH 122/786] ARC: smp-boot: Decouple Non masters waiting API from jump to entry point [ Upstream commit bf02454a741b58682a82c314a9a46bed930ed2f7 ] For run-on-reset SMP configs, non master cores call a routine which waits until Master gives it a "go" signal (currently using a shared mem flag). The same routine then jumps off the well known entry point of all non Master cores i.e. @first_lines_of_secondary This patch moves out the last part into one single place in early boot code. This is better in terms of absraction (the wait API only waits) and returns, leaving out the "jump off to" part. In actual implementation this requires some restructuring of the early boot code as well as Master now jumps to BSS setup explicitly, vs. falling thru into it before. Technically this patch doesn't cause any functional change, it just moves the ugly #ifdef'ry from assembly code to "C" Signed-off-by: Vineet Gupta Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/arc/kernel/head.S | 14 +++++++------- arch/arc/kernel/smp.c | 6 ++++-- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index 689dd867fdff..8b90d25a15cc 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S @@ -71,14 +71,14 @@ ENTRY(stext) GET_CPU_ID r5 cmp r5, 0 mov.nz r0, r5 -#ifdef CONFIG_ARC_SMP_HALT_ON_RESET - ; Non-Master can proceed as system would be booted sufficiently - jnz first_lines_of_secondary -#else + bz .Lmaster_proceed + ; Non-Masters wait for Master to boot enough and bring them up - jnz arc_platform_smp_wait_to_boot -#endif - ; Master falls thru + ; when they resume, tail-call to entry point + mov blink, @first_lines_of_secondary + j arc_platform_smp_wait_to_boot + +.Lmaster_proceed: #endif ; Clear BSS before updating any globals diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index 88674d972c9d..44a0d21ed342 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c @@ -98,14 +98,16 @@ static void arc_default_smp_cpu_kick(int cpu, unsigned long pc) void arc_platform_smp_wait_to_boot(int cpu) { + /* for halt-on-reset, we've waited already */ + if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET)) + return; + while (wake_flag != cpu) ; wake_flag = 0; - __asm__ __volatile__("j @first_lines_of_secondary \n"); } - const char *arc_platform_smp_cpuinfo(void) { return plat_smp_ops.info ? : ""; -- GitLab From 2552e2c11575c72114a2f8902a7488c810c41691 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Tue, 21 Jun 2016 14:24:33 +0530 Subject: [PATCH 123/786] ARCv2: smp-boot: wake_flag polling by non-Masters needs to be uncached [ Upstream commit 78f824d4312a8944f5340c6b161bba3bf2c81096 ] This is needed on HS38 cores, for setting up IO-Coherency aperture properly The polling could perturb the caches and coherecy fabric which could be wrong in the small window when Master is setting up IOC aperture etc in arc_cache_init() We do it only for ARCv2 based builds to not affect EZChip ARCompact based platform. Signed-off-by: Vineet Gupta Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/arc/kernel/smp.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index 44a0d21ed342..2afbafadb6ab 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c @@ -90,10 +90,23 @@ void __init smp_cpus_done(unsigned int max_cpus) */ static volatile int wake_flag; +#ifdef CONFIG_ISA_ARCOMPACT + +#define __boot_read(f) f +#define __boot_write(f, v) f = v + +#else + +#define __boot_read(f) arc_read_uncached_32(&f) +#define __boot_write(f, v) arc_write_uncached_32(&f, v) + +#endif + static void arc_default_smp_cpu_kick(int cpu, unsigned long pc) { BUG_ON(cpu == 0); - wake_flag = cpu; + + __boot_write(wake_flag, cpu); } void arc_platform_smp_wait_to_boot(int cpu) @@ -102,10 +115,10 @@ void arc_platform_smp_wait_to_boot(int cpu) if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET)) return; - while (wake_flag != cpu) + while (__boot_read(wake_flag) != cpu) ; - wake_flag = 0; + __boot_write(wake_flag, 0); } const char *arc_platform_smp_cpuinfo(void) -- GitLab From c7a552e771cccacf4da117b93088e3cf25b1e038 Mon Sep 17 00:00:00 2001 From: Parthasarathy Bhuvaragan Date: Tue, 24 Jan 2017 13:00:47 +0100 Subject: [PATCH 124/786] tipc: ignore requests when the connection state is not CONNECTED [ Upstream commit 4c887aa65d38633885010277f3482400681be719 ] In tipc_conn_sendmsg(), we first queue the request to the outqueue followed by the connection state check. If the connection is not connected, we should not queue this message. In this commit, we reject the messages if the connection state is not CF_CONNECTED. Acked-by: Ying Xue Acked-by: Jon Maloy Tested-by: John Thompson Signed-off-by: Parthasarathy Bhuvaragan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/tipc/server.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/net/tipc/server.c b/net/tipc/server.c index 215849ce453d..b4b742c89a26 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c @@ -458,6 +458,11 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid, if (!con) return -EINVAL; + if (!test_bit(CF_CONNECTED, &con->flags)) { + conn_put(con); + return 0; + } + e = tipc_alloc_entry(data, len); if (!e) { conn_put(con); @@ -471,12 +476,8 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid, list_add_tail(&e->list, &con->outqueue); spin_unlock_bh(&con->outqueue_lock); - if (test_bit(CF_CONNECTED, &con->flags)) { - if (!queue_work(s->send_wq, &con->swork)) - conn_put(con); - } else { + if (!queue_work(s->send_wq, &con->swork)) conn_put(con); - } return 0; } @@ -500,7 +501,7 @@ static void tipc_send_to_sock(struct tipc_conn *con) int ret; spin_lock_bh(&con->outqueue_lock); - while (1) { + while (test_bit(CF_CONNECTED, &con->flags)) { e = list_entry(con->outqueue.next, struct outqueue_entry, list); if ((struct list_head *) e == &con->outqueue) -- GitLab From 9f8df4f86a3fa3ca546955f696d7b602fc1eb3d6 Mon Sep 17 00:00:00 2001 From: Parthasarathy Bhuvaragan Date: Tue, 24 Jan 2017 13:00:45 +0100 Subject: [PATCH 125/786] tipc: fix connection refcount error [ Upstream commit fc0adfc8fd18b61b6f7a3f28b429e134d6f3a008 ] Until now, the generic server framework maintains the connection id's per subscriber in server's conn_idr. At tipc_close_conn, we remove the connection id from the server list, but the connection is valid until we call the refcount cleanup. Hence we have a window where the server allocates the same connection to an new subscriber leading to inconsistent reference count. We have another refcount warning we grab the refcount in tipc_conn_lookup() for connections with flag with CF_CONNECTED not set. This usually occurs at shutdown when the we stop the topology server and withdraw TIPC_CFG_SRV publication thereby triggering a withdraw message to subscribers. In this commit, we: 1. remove the connection from the server list at recount cleanup. 2. grab the refcount for a connection only if CF_CONNECTED is set. Tested-by: John Thompson Acked-by: Ying Xue Acked-by: Jon Maloy Signed-off-by: Parthasarathy Bhuvaragan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/tipc/server.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/net/tipc/server.c b/net/tipc/server.c index b4b742c89a26..f89c0c2e8c16 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c @@ -91,7 +91,8 @@ static void tipc_sock_release(struct tipc_conn *con); static void tipc_conn_kref_release(struct kref *kref) { struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); - struct sockaddr_tipc *saddr = con->server->saddr; + struct tipc_server *s = con->server; + struct sockaddr_tipc *saddr = s->saddr; struct socket *sock = con->sock; struct sock *sk; @@ -106,6 +107,11 @@ static void tipc_conn_kref_release(struct kref *kref) tipc_sock_release(con); sock_release(sock); con->sock = NULL; + + spin_lock_bh(&s->idr_lock); + idr_remove(&s->conn_idr, con->conid); + s->idr_in_use--; + spin_unlock_bh(&s->idr_lock); } tipc_clean_outqueues(con); @@ -128,8 +134,10 @@ static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid) spin_lock_bh(&s->idr_lock); con = idr_find(&s->conn_idr, conid); - if (con) + if (con && test_bit(CF_CONNECTED, &con->flags)) conn_get(con); + else + con = NULL; spin_unlock_bh(&s->idr_lock); return con; } @@ -198,15 +206,8 @@ static void tipc_sock_release(struct tipc_conn *con) static void tipc_close_conn(struct tipc_conn *con) { - struct tipc_server *s = con->server; - if (test_and_clear_bit(CF_CONNECTED, &con->flags)) { - spin_lock_bh(&s->idr_lock); - idr_remove(&s->conn_idr, con->conid); - s->idr_in_use--; - spin_unlock_bh(&s->idr_lock); - /* We shouldn't flush pending works as we may be in the * thread. In fact the races with pending rx/tx work structs * are harmless for us here as we have already deleted this -- GitLab From 1d6e36d730ed6a328f793da1ac907c8d80ca2eb0 Mon Sep 17 00:00:00 2001 From: Parthasarathy Bhuvaragan Date: Tue, 24 Jan 2017 13:00:44 +0100 Subject: [PATCH 126/786] tipc: add subscription refcount to avoid invalid delete [ Upstream commit d094c4d5f5c7e1b225e94227ca3f007be3adc4e8 ] Until now, the subscribers keep track of the subscriptions using reference count at subscriber level. At subscription cancel or subscriber delete, we delete the subscription only if the timer was pending for the subscription. This approach is incorrect as: 1. del_timer() is not SMP safe, if on CPU0 the check for pending timer returns true but CPU1 might schedule the timer callback thereby deleting the subscription. Thus when CPU0 is scheduled, it deletes an invalid subscription. 2. We export tipc_subscrp_report_overlap(), which accesses the subscription pointer multiple times. Meanwhile the subscription timer can expire thereby freeing the subscription and we might continue to access the subscription pointer leading to memory violations. In this commit, we introduce subscription refcount to avoid deleting an invalid subscription. Reported-and-Tested-by: John Thompson Acked-by: Ying Xue Acked-by: Jon Maloy Signed-off-by: Parthasarathy Bhuvaragan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/tipc/subscr.c | 124 ++++++++++++++++++++++++++-------------------- net/tipc/subscr.h | 1 + 2 files changed, 71 insertions(+), 54 deletions(-) diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 0dd02244e21d..9d94e65d0894 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -54,6 +54,8 @@ struct tipc_subscriber { static void tipc_subscrp_delete(struct tipc_subscription *sub); static void tipc_subscrb_put(struct tipc_subscriber *subscriber); +static void tipc_subscrp_put(struct tipc_subscription *subscription); +static void tipc_subscrp_get(struct tipc_subscription *subscription); /** * htohl - convert value to endianness used by destination @@ -123,6 +125,7 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower, { struct tipc_name_seq seq; + tipc_subscrp_get(sub); tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq); if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper)) return; @@ -132,30 +135,23 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower, tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref, node); + tipc_subscrp_put(sub); } static void tipc_subscrp_timeout(unsigned long data) { struct tipc_subscription *sub = (struct tipc_subscription *)data; - struct tipc_subscriber *subscriber = sub->subscriber; /* Notify subscriber of timeout */ tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper, TIPC_SUBSCR_TIMEOUT, 0, 0); - spin_lock_bh(&subscriber->lock); - tipc_subscrp_delete(sub); - spin_unlock_bh(&subscriber->lock); - - tipc_subscrb_put(subscriber); + tipc_subscrp_put(sub); } static void tipc_subscrb_kref_release(struct kref *kref) { - struct tipc_subscriber *subcriber = container_of(kref, - struct tipc_subscriber, kref); - - kfree(subcriber); + kfree(container_of(kref,struct tipc_subscriber, kref)); } static void tipc_subscrb_put(struct tipc_subscriber *subscriber) @@ -168,6 +164,59 @@ static void tipc_subscrb_get(struct tipc_subscriber *subscriber) kref_get(&subscriber->kref); } +static void tipc_subscrp_kref_release(struct kref *kref) +{ + struct tipc_subscription *sub = container_of(kref, + struct tipc_subscription, + kref); + struct tipc_net *tn = net_generic(sub->net, tipc_net_id); + struct tipc_subscriber *subscriber = sub->subscriber; + + spin_lock_bh(&subscriber->lock); + tipc_nametbl_unsubscribe(sub); + list_del(&sub->subscrp_list); + atomic_dec(&tn->subscription_count); + spin_unlock_bh(&subscriber->lock); + kfree(sub); + tipc_subscrb_put(subscriber); +} + +static void tipc_subscrp_put(struct tipc_subscription *subscription) +{ + kref_put(&subscription->kref, tipc_subscrp_kref_release); +} + +static void tipc_subscrp_get(struct tipc_subscription *subscription) +{ + kref_get(&subscription->kref); +} + +/* tipc_subscrb_subscrp_delete - delete a specific subscription or all + * subscriptions for a given subscriber. + */ +static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber, + struct tipc_subscr *s) +{ + struct list_head *subscription_list = &subscriber->subscrp_list; + struct tipc_subscription *sub, *temp; + + spin_lock_bh(&subscriber->lock); + list_for_each_entry_safe(sub, temp, subscription_list, subscrp_list) { + if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) + continue; + + tipc_subscrp_get(sub); + spin_unlock_bh(&subscriber->lock); + tipc_subscrp_delete(sub); + tipc_subscrp_put(sub); + spin_lock_bh(&subscriber->lock); + + if (s) + break; + } + spin_unlock_bh(&subscriber->lock); +} + static struct tipc_subscriber *tipc_subscrb_create(int conid) { struct tipc_subscriber *subscriber; @@ -177,8 +226,8 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid) pr_warn("Subscriber rejected, no memory\n"); return NULL; } - kref_init(&subscriber->kref); INIT_LIST_HEAD(&subscriber->subscrp_list); + kref_init(&subscriber->kref); subscriber->conid = conid; spin_lock_init(&subscriber->lock); @@ -187,55 +236,22 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid) static void tipc_subscrb_delete(struct tipc_subscriber *subscriber) { - struct tipc_subscription *sub, *temp; - u32 timeout; - - spin_lock_bh(&subscriber->lock); - /* Destroy any existing subscriptions for subscriber */ - list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list, - subscrp_list) { - timeout = htohl(sub->evt.s.timeout, sub->swap); - if ((timeout == TIPC_WAIT_FOREVER) || del_timer(&sub->timer)) { - tipc_subscrp_delete(sub); - tipc_subscrb_put(subscriber); - } - } - spin_unlock_bh(&subscriber->lock); - + tipc_subscrb_subscrp_delete(subscriber, NULL); tipc_subscrb_put(subscriber); } static void tipc_subscrp_delete(struct tipc_subscription *sub) { - struct tipc_net *tn = net_generic(sub->net, tipc_net_id); + u32 timeout = htohl(sub->evt.s.timeout, sub->swap); - tipc_nametbl_unsubscribe(sub); - list_del(&sub->subscrp_list); - kfree(sub); - atomic_dec(&tn->subscription_count); + if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer)) + tipc_subscrp_put(sub); } static void tipc_subscrp_cancel(struct tipc_subscr *s, struct tipc_subscriber *subscriber) { - struct tipc_subscription *sub, *temp; - u32 timeout; - - spin_lock_bh(&subscriber->lock); - /* Find first matching subscription, exit if not found */ - list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list, - subscrp_list) { - if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) { - timeout = htohl(sub->evt.s.timeout, sub->swap); - if ((timeout == TIPC_WAIT_FOREVER) || - del_timer(&sub->timer)) { - tipc_subscrp_delete(sub); - tipc_subscrb_put(subscriber); - } - break; - } - } - spin_unlock_bh(&subscriber->lock); + tipc_subscrb_subscrp_delete(subscriber, s); } static struct tipc_subscription *tipc_subscrp_create(struct net *net, @@ -272,6 +288,7 @@ static struct tipc_subscription *tipc_subscrp_create(struct net *net, sub->swap = swap; memcpy(&sub->evt.s, s, sizeof(*s)); atomic_inc(&tn->subscription_count); + kref_init(&sub->kref); return sub; } @@ -288,17 +305,16 @@ static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s, spin_lock_bh(&subscriber->lock); list_add(&sub->subscrp_list, &subscriber->subscrp_list); - tipc_subscrb_get(subscriber); sub->subscriber = subscriber; tipc_nametbl_subscribe(sub); + tipc_subscrb_get(subscriber); spin_unlock_bh(&subscriber->lock); + setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub); timeout = htohl(sub->evt.s.timeout, swap); - if (timeout == TIPC_WAIT_FOREVER) - return; - setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub); - mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout)); + if (timeout != TIPC_WAIT_FOREVER) + mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout)); } /* Handle one termination request for the subscriber */ diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h index be60103082c9..ffdc214c117a 100644 --- a/net/tipc/subscr.h +++ b/net/tipc/subscr.h @@ -57,6 +57,7 @@ struct tipc_subscriber; * @evt: template for events generated by subscription */ struct tipc_subscription { + struct kref kref; struct tipc_subscriber *subscriber; struct net *net; struct timer_list timer; -- GitLab From f68a45776a62fce560e4e28f89c5009895066ae1 Mon Sep 17 00:00:00 2001 From: Parthasarathy Bhuvaragan Date: Tue, 24 Jan 2017 13:00:43 +0100 Subject: [PATCH 127/786] tipc: fix nametbl_lock soft lockup at node/link events [ Upstream commit 93f955aad4bacee5acebad141d1a03cd51f27b4e ] We trigger a soft lockup as we grab nametbl_lock twice if the node has a pending node up/down or link up/down event while: - we process an incoming named message in tipc_named_rcv() and perform an tipc_update_nametbl(). - we have pending backlog items in the name distributor queue during a nametable update using tipc_nametbl_publish() or tipc_nametbl_withdraw(). The following are the call chain associated: tipc_named_rcv() Grabs nametbl_lock tipc_update_nametbl() (publish/withdraw) tipc_node_subscribe()/unsubscribe() tipc_node_write_unlock() << lockup occurs if an outstanding node/link event exits, as we grabs nametbl_lock again >> tipc_nametbl_withdraw() Grab nametbl_lock tipc_named_process_backlog() tipc_update_nametbl() << rest as above >> The function tipc_node_write_unlock(), in addition to releasing the lock processes the outstanding node/link up/down events. To do this, we need to grab the nametbl_lock again leading to the lockup. In this commit we fix the soft lockup by introducing a fast variant of node_unlock(), where we just release the lock. We adapt the node_subscribe()/node_unsubscribe() to use the fast variants. Reported-and-Tested-by: John Thompson Acked-by: Ying Xue Acked-by: Jon Maloy Signed-off-by: Parthasarathy Bhuvaragan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/tipc/node.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/net/tipc/node.c b/net/tipc/node.c index 9d2f4c2b08ab..27753325e06e 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -263,6 +263,11 @@ static void tipc_node_write_lock(struct tipc_node *n) write_lock_bh(&n->lock); } +static void tipc_node_write_unlock_fast(struct tipc_node *n) +{ + write_unlock_bh(&n->lock); +} + static void tipc_node_write_unlock(struct tipc_node *n) { struct net *net = n->net; @@ -417,7 +422,7 @@ void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr) } tipc_node_write_lock(n); list_add_tail(subscr, &n->publ_list); - tipc_node_write_unlock(n); + tipc_node_write_unlock_fast(n); tipc_node_put(n); } @@ -435,7 +440,7 @@ void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr) } tipc_node_write_lock(n); list_del_init(subscr); - tipc_node_write_unlock(n); + tipc_node_write_unlock_fast(n); tipc_node_put(n); } -- GitLab From fefdd79403e89b0c673965343b92e2e01e2713a8 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 24 Jan 2017 00:51:32 +0100 Subject: [PATCH 128/786] netfilter: nf_tables: fix set->nelems counting with no NLM_F_EXCL [ Upstream commit 35d0ac9070ef619e3bf44324375878a1c540387b ] If the element exists and no NLM_F_EXCL is specified, do not bump set->nelems, otherwise we leak one set element slot. This problem amplifies if the set is full since the abort path always decrements the counter for the -ENFILE case too, giving one spare extra slot. Fix this by moving set->nelems update to nft_add_set_elem() after successful element insertion. Moreover, remove the element if the set is full so there is no need to rely on the abort path to undo things anymore. Fixes: c016c7e45ddf ("netfilter: nf_tables: honor NLM_F_EXCL flag in set element insertion") Signed-off-by: Pablo Neira Ayuso Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/netfilter/nf_tables_api.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index e5194f6f906c..778fcdb83225 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3637,10 +3637,18 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, goto err5; } + if (set->size && + !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) { + err = -ENFILE; + goto err6; + } + nft_trans_elem(trans) = elem; list_add_tail(&trans->list, &ctx->net->nft.commit_list); return 0; +err6: + set->ops->remove(set, &elem); err5: kfree(trans); err4: @@ -3687,15 +3695,9 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk, return -EBUSY; nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { - if (set->size && - !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) - return -ENFILE; - err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags); - if (err < 0) { - atomic_dec(&set->nelems); + if (err < 0) break; - } } return err; } -- GitLab From c47538f610160c8f9e3bd3e6816a624d28220b0c Mon Sep 17 00:00:00 2001 From: Liping Zhang Date: Sun, 22 Jan 2017 22:10:32 +0800 Subject: [PATCH 129/786] netfilter: nft_log: restrict the log prefix length to 127 [ Upstream commit 5ce6b04ce96896e8a79e6f60740ced911eaac7a4 ] First, log prefix will be truncated to NF_LOG_PREFIXLEN-1, i.e. 127, at nf_log_packet(), so the extra part is useless. Second, after adding a log rule with a very very long prefix, we will fail to dump the nft rules after this _special_ one, but acctually, they do exist. For example: # name_65000=$(printf "%0.sQ" {1..65000}) # nft add rule filter output log prefix "$name_65000" # nft add rule filter output counter # nft add rule filter output counter # nft list chain filter output table ip filter { chain output { type filter hook output priority 0; policy accept; } } So now, restrict the log prefix length to NF_LOG_PREFIXLEN-1. Fixes: 96518518cc41 ("netfilter: add nftables") Signed-off-by: Liping Zhang Signed-off-by: Pablo Neira Ayuso Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- include/uapi/linux/netfilter/nf_log.h | 2 ++ net/netfilter/nf_log.c | 1 - net/netfilter/nft_log.c | 3 ++- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/netfilter/nf_log.h b/include/uapi/linux/netfilter/nf_log.h index 8be21e02387d..d0b5fa91ff54 100644 --- a/include/uapi/linux/netfilter/nf_log.h +++ b/include/uapi/linux/netfilter/nf_log.h @@ -9,4 +9,6 @@ #define NF_LOG_MACDECODE 0x20 /* Decode MAC header */ #define NF_LOG_MASK 0x2f +#define NF_LOG_PREFIXLEN 128 + #endif /* _NETFILTER_NF_LOG_H */ diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 3dca90dc24ad..ffb9e8ada899 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c @@ -13,7 +13,6 @@ /* Internal logging interface, which relies on the real LOG target modules */ -#define NF_LOG_PREFIXLEN 128 #define NFLOGGER_NAME_LEN 64 static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly; diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c index 1b01404bb33f..c7704e9123ef 100644 --- a/net/netfilter/nft_log.c +++ b/net/netfilter/nft_log.c @@ -38,7 +38,8 @@ static void nft_log_eval(const struct nft_expr *expr, static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = { [NFTA_LOG_GROUP] = { .type = NLA_U16 }, - [NFTA_LOG_PREFIX] = { .type = NLA_STRING }, + [NFTA_LOG_PREFIX] = { .type = NLA_STRING, + .len = NF_LOG_PREFIXLEN - 1 }, [NFTA_LOG_SNAPLEN] = { .type = NLA_U32 }, [NFTA_LOG_QTHRESHOLD] = { .type = NLA_U16 }, [NFTA_LOG_LEVEL] = { .type = NLA_U32 }, -- GitLab From a4fa249385b6db208032a5445cea05891001b7b4 Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Tue, 24 Jan 2017 13:51:43 +0200 Subject: [PATCH 130/786] RDMA/qedr: Dispatch port active event from qedr_add [ Upstream commit f449c7a2d822c2d81b5bcb2c50eec80796766726 ] Relying on qede to trigger qedr on startup is problematic. When probing both if qedr loads slowly then qede can assume qedr is missing and not trigger it. This patch adds a triggering from qedr and protects against a race via an atomic bit. Signed-off-by: Ram Amrani Signed-off-by: Ariel Elior Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/qedr/main.c | 20 ++++++++++++++------ drivers/infiniband/hw/qedr/qedr.h | 5 +++++ 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 7b74d09a8217..58e92bce6825 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -792,6 +792,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev, if (device_create_file(&dev->ibdev.dev, qedr_attributes[i])) goto sysfs_err; + if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) + qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); + DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); return dev; @@ -824,11 +827,10 @@ static void qedr_remove(struct qedr_dev *dev) ib_dealloc_device(&dev->ibdev); } -static int qedr_close(struct qedr_dev *dev) +static void qedr_close(struct qedr_dev *dev) { - qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); - - return 0; + if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) + qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR); } static void qedr_shutdown(struct qedr_dev *dev) @@ -837,6 +839,12 @@ static void qedr_shutdown(struct qedr_dev *dev) qedr_remove(dev); } +static void qedr_open(struct qedr_dev *dev) +{ + if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) + qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); +} + static void qedr_mac_address_change(struct qedr_dev *dev) { union ib_gid *sgid = &dev->sgid_tbl[0]; @@ -863,7 +871,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev) ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); - qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE); + qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE); if (rc) DP_ERR(dev, "Error updating mac filter\n"); @@ -877,7 +885,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event) { switch (event) { case QEDE_UP: - qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); + qedr_open(dev); break; case QEDE_DOWN: qedr_close(dev); diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index 620badd7d4fb..f669d0bb697e 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -113,6 +113,8 @@ struct qedr_device_attr { struct qed_rdma_events events; }; +#define QEDR_ENET_STATE_BIT (0) + struct qedr_dev { struct ib_device ibdev; struct qed_dev *cdev; @@ -153,6 +155,8 @@ struct qedr_dev { struct qedr_cq *gsi_sqcq; struct qedr_cq *gsi_rqcq; struct qedr_qp *gsi_qp; + + unsigned long enet_state; }; #define QEDR_MAX_SQ_PBL (0x8000) @@ -188,6 +192,7 @@ struct qedr_dev { #define QEDR_ROCE_MAX_CNQ_SIZE (0x4000) #define QEDR_MAX_PORT (1) +#define QEDR_PORT (1) #define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) -- GitLab From bbf61096e42c18e51b8e72a6dd4ceb276610c48c Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Tue, 24 Jan 2017 13:51:42 +0200 Subject: [PATCH 131/786] RDMA/qedr: Fix and simplify memory leak in PD alloc [ Upstream commit 9c1e0228ab35e52d30abf4b5629c28350833fbcb ] Free the PD if no internal resources were available. Move userspace code under the relevant 'if'. Signed-off-by: Ram Amrani Signed-off-by: Ariel Elior Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/qedr/verbs.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index a61514296767..b78e37ed5352 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) { struct qedr_dev *dev = get_qedr_dev(ibdev); - struct qedr_ucontext *uctx = NULL; - struct qedr_alloc_pd_uresp uresp; struct qedr_pd *pd; u16 pd_id; int rc; @@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, if (!pd) return ERR_PTR(-ENOMEM); - dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); + rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); + if (rc) + goto err; - uresp.pd_id = pd_id; pd->pd_id = pd_id; if (udata && context) { + struct qedr_alloc_pd_uresp uresp; + + uresp.pd_id = pd_id; + rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); - if (rc) + if (rc) { DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); - uctx = get_qedr_ucontext(context); - uctx->pd = pd; - pd->uctx = uctx; + dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id); + goto err; + } + + pd->uctx = get_qedr_ucontext(context); + pd->uctx->pd = pd; } return &pd->ibpd; + +err: + kfree(pd); + return ERR_PTR(rc); } int qedr_dealloc_pd(struct ib_pd *ibpd) -- GitLab From 13a87589af5f9d85493917048f4243691d35f89c Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Tue, 24 Jan 2017 13:50:38 +0200 Subject: [PATCH 132/786] RDMA/qedr: Don't reset QP when queues aren't flushed [ Upstream commit 933e6dcaa0f65eb2f624ad760274020874a1f35e ] Fail QP state transition from error to reset if SQ/RQ are not empty and still in the process of flushing out the queued work entries. Signed-off-by: Ram Amrani Signed-off-by: Michal Kalderon Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/qedr/verbs.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index b78e37ed5352..4e3e157009b6 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1729,6 +1729,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev, /* ERR->XXX */ switch (new_state) { case QED_ROCE_QP_STATE_RESET: + if ((qp->rq.prod != qp->rq.cons) || + (qp->sq.prod != qp->sq.cons)) { + DP_NOTICE(dev, + "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n", + qp->rq.prod, qp->rq.cons, qp->sq.prod, + qp->sq.cons); + status = -EINVAL; + } break; default: status = -EINVAL; -- GitLab From c5ea7aa57e2420725a283d2df893a2bbe54c5cfc Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Tue, 24 Jan 2017 13:50:37 +0200 Subject: [PATCH 133/786] RDMA/qedr: Don't spam dmesg if QP is in error state [ Upstream commit c78c31496111f497b4a03f955c100091185da8b6 ] It is normal to flush CQEs if the QP is in error state. Hence there's no use in printing a message per CQE to dmesg. Signed-off-by: Ram Amrani Signed-off-by: Michal Kalderon Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/qedr/verbs.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 4e3e157009b6..960e4bd8ffe9 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -3238,9 +3238,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev, IB_WC_SUCCESS, 0); break; case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR: - DP_ERR(dev, - "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", - cq->icid, qp->icid); + if (qp->state != QED_ROCE_QP_STATE_ERR) + DP_ERR(dev, + "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", + cq->icid, qp->icid); cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, IB_WC_WR_FLUSH_ERR, 0); break; -- GitLab From 57211e84dda0583b85326c07909c442ba950e0f0 Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Tue, 24 Jan 2017 13:50:35 +0200 Subject: [PATCH 134/786] RDMA/qedr: Return max inline data in QP query result [ Upstream commit 59e8970b3798e4cbe575ed9cf4d53098760a2a86 ] Return the maximum supported amount of inline data, not the qp's current configured inline data size, when filling out the results of a query qp call. Signed-off-by: Ram Amrani Signed-off-by: Michal Kalderon Signed-off-by: Doug Ledford Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/qedr/verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 960e4bd8ffe9..4ba019e3dc56 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -2032,7 +2032,7 @@ int qedr_query_qp(struct ib_qp *ibqp, qp_attr->cap.max_recv_wr = qp->rq.max_wr; qp_attr->cap.max_send_sge = qp->sq.max_sges; qp_attr->cap.max_recv_sge = qp->rq.max_sges; - qp_attr->cap.max_inline_data = qp->max_inline_data; + qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE; qp_init_attr->cap = qp_attr->cap; memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], ¶ms.dgid.bytes[0], -- GitLab From a2f68276822cd3a09ed9ae23e5c5e1cd8259af65 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 5 Jun 2017 02:43:51 -0700 Subject: [PATCH 135/786] xtensa: don't use linux IRQ #0 commit e5c86679d5e864947a52fb31e45a425dea3e7fa9 upstream. Linux IRQ #0 is reserved for error reporting and may not be used. Increase NR_IRQS for one additional slot and increase irq_domain_add_legacy parameter first_irq value to 1, so that linux IRQ #0 is not associated with hardware IRQ #0 in legacy IRQ domains. Introduce macro XTENSA_PIC_LINUX_IRQ for static translation of xtensa PIC hardware IRQ # to linux IRQ #. Use this macro in XTFPGA platform data definitions. This fixes inability to use hardware IRQ #0 in configurations that don't use device tree and allows for non-identity mapping between linux IRQ # and hardware IRQ #. Signed-off-by: Max Filippov Signed-off-by: Greg Kroah-Hartman --- arch/xtensa/include/asm/irq.h | 3 ++- arch/xtensa/kernel/irq.c | 5 ----- .../platforms/xtfpga/include/platform/hardware.h | 6 ++++-- arch/xtensa/platforms/xtfpga/setup.c | 10 +++++----- drivers/irqchip/irq-xtensa-mx.c | 2 +- drivers/irqchip/irq-xtensa-pic.c | 2 +- 6 files changed, 13 insertions(+), 15 deletions(-) diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h index f71f88ea7646..19707db966f1 100644 --- a/arch/xtensa/include/asm/irq.h +++ b/arch/xtensa/include/asm/irq.h @@ -29,7 +29,8 @@ static inline void variant_irq_disable(unsigned int irq) { } # define PLATFORM_NR_IRQS 0 #endif #define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS -#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS) +#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1) +#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1) #if VARIANT_NR_IRQS == 0 static inline void variant_init_irq(void) { } diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index 4ac3d23161cf..441694464b1e 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c @@ -34,11 +34,6 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs) { int irq = irq_find_mapping(NULL, hwirq); - if (hwirq >= NR_IRQS) { - printk(KERN_EMERG "%s: cannot handle IRQ %d\n", - __func__, hwirq); - } - #ifdef CONFIG_DEBUG_STACKOVERFLOW /* Debugging check for stack overflow: is there less than 1KB free? */ { diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h index dbeea2b440a1..1fda7e20dfcb 100644 --- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h +++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h @@ -24,16 +24,18 @@ /* Interrupt configuration. */ -#define PLATFORM_NR_IRQS 10 +#define PLATFORM_NR_IRQS 0 /* Default assignment of LX60 devices to external interrupts. */ #ifdef CONFIG_XTENSA_MX #define DUART16552_INTNUM XCHAL_EXTINT3_NUM #define OETH_IRQ XCHAL_EXTINT4_NUM +#define C67X00_IRQ XCHAL_EXTINT8_NUM #else #define DUART16552_INTNUM XCHAL_EXTINT0_NUM #define OETH_IRQ XCHAL_EXTINT1_NUM +#define C67X00_IRQ XCHAL_EXTINT5_NUM #endif /* @@ -63,5 +65,5 @@ #define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000) #define C67X00_SIZE 0x10 -#define C67X00_IRQ 5 + #endif /* __XTENSA_XTAVNET_HARDWARE_H */ diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c index 779be723eb2b..42285f35d313 100644 --- a/arch/xtensa/platforms/xtfpga/setup.c +++ b/arch/xtensa/platforms/xtfpga/setup.c @@ -175,8 +175,8 @@ static struct resource ethoc_res[] = { .flags = IORESOURCE_MEM, }, [2] = { /* IRQ number */ - .start = OETH_IRQ, - .end = OETH_IRQ, + .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ), + .end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ), .flags = IORESOURCE_IRQ, }, }; @@ -213,8 +213,8 @@ static struct resource c67x00_res[] = { .flags = IORESOURCE_MEM, }, [1] = { /* IRQ number */ - .start = C67X00_IRQ, - .end = C67X00_IRQ, + .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ), + .end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ), .flags = IORESOURCE_IRQ, }, }; @@ -247,7 +247,7 @@ static struct resource serial_resource = { static struct plat_serial8250_port serial_platform_data[] = { [0] = { .mapbase = DUART16552_PADDR, - .irq = DUART16552_INTNUM, + .irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM), .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, .iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32, diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c index bb3ac5fe5846..72a391e01011 100644 --- a/drivers/irqchip/irq-xtensa-mx.c +++ b/drivers/irqchip/irq-xtensa-mx.c @@ -142,7 +142,7 @@ static struct irq_chip xtensa_mx_irq_chip = { int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent) { struct irq_domain *root_domain = - irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, + irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0, &xtensa_mx_irq_domain_ops, &xtensa_mx_irq_chip); irq_set_default_host(root_domain); diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c index 472ae1770964..f728755fa292 100644 --- a/drivers/irqchip/irq-xtensa-pic.c +++ b/drivers/irqchip/irq-xtensa-pic.c @@ -89,7 +89,7 @@ static struct irq_chip xtensa_irq_chip = { int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent) { struct irq_domain *root_domain = - irq_domain_add_legacy(NULL, NR_IRQS, 0, 0, + irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0, &xtensa_irq_domain_ops, &xtensa_irq_chip); irq_set_default_host(root_domain); return 0; -- GitLab From 8e1a4006ff9218057f2e47d3bf5415bdcdbcd3b6 Mon Sep 17 00:00:00 2001 From: Christian Borntraeger Date: Mon, 15 May 2017 14:11:03 +0200 Subject: [PATCH 136/786] s390/kvm: do not rely on the ILC on kvm host protection fauls commit c0e7bb38c07cbd8269549ee0a0566021a3c729de upstream. For most cases a protection exception in the host (e.g. copy on write or dirty tracking) on the sie instruction will indicate an instruction length of 4. Turns out that there are some corner cases (e.g. runtime instrumentation) where this is not necessarily true and the ILC is unpredictable. Let's replace our 4 byte rewind_pad with 3 byte nops to prepare for all possible ILCs. Signed-off-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky Signed-off-by: Greg Kroah-Hartman --- arch/s390/kernel/entry.S | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index c43816886839..3bc2825173ef 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -240,12 +240,17 @@ ENTRY(sie64a) lctlg %c1,%c1,__LC_USER_ASCE # load primary asce .Lsie_done: # some program checks are suppressing. C code (e.g. do_protection_exception) -# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other -# instructions between sie64a and .Lsie_done should not cause program -# interrupts. So lets use a nop (47 00 00 00) as a landing pad. +# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There +# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. +# Other instructions between sie64a and .Lsie_done should not cause program +# interrupts. So lets use 3 nops as a landing pad for all possible rewinds. # See also .Lcleanup_sie -.Lrewind_pad: - nop 0 +.Lrewind_pad6: + nopr 7 +.Lrewind_pad4: + nopr 7 +.Lrewind_pad2: + nopr 7 .globl sie_exit sie_exit: lg %r14,__SF_EMPTY+8(%r15) # load guest register save area @@ -258,7 +263,9 @@ sie_exit: stg %r14,__SF_EMPTY+16(%r15) # set exit reason code j sie_exit - EX_TABLE(.Lrewind_pad,.Lsie_fault) + EX_TABLE(.Lrewind_pad6,.Lsie_fault) + EX_TABLE(.Lrewind_pad4,.Lsie_fault) + EX_TABLE(.Lrewind_pad2,.Lsie_fault) EX_TABLE(sie_exit,.Lsie_fault) EXPORT_SYMBOL(sie64a) EXPORT_SYMBOL(sie_exit) -- GitLab From 4f59a7a895c4658af84a8f7e88438973940e2a54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Thu, 15 Dec 2016 19:47:34 +0200 Subject: [PATCH 137/786] drm/i915: Workaround VLV/CHV DSI scanline counter hardware fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 8f4d38099b3098eae75f7755e1801931f8141350 upstream. The scanline counter is bonkers on VLV/CHV DSI. The scanline counter increment is not lined up with the start of vblank like it is on every other platform and output type. This causes problems for both the vblank timestamping and atomic update vblank evasion. On my FFRD8 machine at least, the scanline counter increment happens about 1/3 of a scanline ahead of the start of vblank (which is where all register latching happens still). That means we can't trust the scanline counter to tell us whether we're in vblank or not while we're on that particular line. In order to keep vblank timestamping in working condition when called from the vblank irq, we'll leave scanline_offset at one, which means that the entire line containing the start of vblank is considered to be inside the vblank. For the vblank evasion we'll need to consider that entire line to be bad, since we can't tell whether the registers already got latched or not. And we can't actually use the start of vblank interrupt to get us past that line as the interrupt would fire too soon, and then we'd up waiting for the next start of vblank instead. One way around that would using the frame start interrupt instead since that wouldn't fire until the next scanline, but that would require some bigger changes in the interrupt code. So for simplicity we'll just poll until we get past the bad line. v2: Adjust the comments a bit Cc: Jonas Aaberg Tested-by: Jonas Aaberg Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=99086 Signed-off-by: Ville Syrjälä Link: http://patchwork.freedesktop.org/patch/msgid/20161215174734.28779-1-ville.syrjala@linux.intel.com Tested-by: Mika Kahola Reviewed-by: Mika Kahola (cherry picked from commit ec1b4ee2834e66884e5b0d3d465f347ff212e372) Signed-off-by: Jani Nikula Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/intel_display.c | 9 +++++++++ drivers/gpu/drm/i915/intel_sprite.c | 21 +++++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3673ab3aa991..f8efd20e4a90 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13767,6 +13767,15 @@ static void update_scanline_offset(struct intel_crtc *crtc) * type. For DP ports it behaves like most other platforms, but on HDMI * there's an extra 1 line difference. So we need to add two instead of * one to the value. + * + * On VLV/CHV DSI the scanline counter would appear to increment + * approx. 1/3 of a scanline before start of vblank. Unfortunately + * that means we can't tell whether we're in vblank or not while + * we're on that particular line. We must still set scanline_offset + * to 1 so that the vblank timestamps come out correct when we query + * the scanline counter from within the vblank interrupt handler. + * However if queried just before the start of vblank we'll get an + * answer that's slightly in the future. */ if (IS_GEN2(dev)) { const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index dbed12c484c9..64f4e2e18594 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -81,10 +81,13 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, */ void intel_pipe_update_start(struct intel_crtc *crtc) { + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; long timeout = msecs_to_jiffies_timeout(1); int scanline, min, max, vblank_start; wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); + bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI); DEFINE_WAIT(wait); vblank_start = adjusted_mode->crtc_vblank_start; @@ -136,6 +139,24 @@ void intel_pipe_update_start(struct intel_crtc *crtc) drm_crtc_vblank_put(&crtc->base); + /* + * On VLV/CHV DSI the scanline counter would appear to + * increment approx. 1/3 of a scanline before start of vblank. + * The registers still get latched at start of vblank however. + * This means we must not write any registers on the first + * line of vblank (since not the whole line is actually in + * vblank). And unfortunately we can't use the interrupt to + * wait here since it will fire too soon. We could use the + * frame start interrupt instead since it will fire after the + * critical scanline, but that would require more changes + * in the interrupt code. So for now we'll just do the nasty + * thing and poll for the bad scanline to pass us by. + * + * FIXME figure out if BXT+ DSI suffers from this as well + */ + while (need_vlv_dsi_wa && scanline == vblank_start) + scanline = intel_get_crtc_scanline(crtc); + crtc->debug.scanline_start = scanline; crtc->debug.start_vbl_time = ktime_get(); crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc); -- GitLab From 933b9b11f7ca7153137ed8afccd4d32a5b73499f Mon Sep 17 00:00:00 2001 From: Maarten Lankhorst Date: Wed, 31 May 2017 17:42:36 +0200 Subject: [PATCH 138/786] drm/i915: Always recompute watermarks when distrust_bios_wm is set, v2. commit 4e3aed844547f63614363a386de126e6304e55fb upstream. On some systems there can be a race condition in which no crtc state is added to the first atomic commit. This results in all crtc's having a null DDB allocation, causing a FIFO underrun on any update until the first modeset. Changes since v1: - Do not take the connection_mutex, this is already done below. Reported-by: Maarten Lankhorst Inspired-by: Mahesh Kumar Signed-off-by: Maarten Lankhorst Fixes: 98d39494d375 ("drm/i915/gen9: Compute DDB allocation at atomic check time (v4)") Cc: Mahesh Kumar Cc: Matt Roper Link: http://patchwork.freedesktop.org/patch/msgid/20170531154236.27180-1-maarten.lankhorst@linux.intel.com Reviewed-by: Mahesh Kumar Reviewed-by: Matt Roper Signed-off-by: Greg Kroah-Hartman (cherry picked from commit 367d73d2806085bb507ab44c1f532640917fd5ca) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/intel_pm.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 2c6d59d4b6d3..49de4760cc16 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4114,10 +4114,18 @@ skl_compute_wm(struct drm_atomic_state *state) struct drm_crtc_state *cstate; struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct skl_wm_values *results = &intel_state->wm_results; + struct drm_device *dev = state->dev; struct skl_pipe_wm *pipe_wm; bool changed = false; int ret, i; + /* + * When we distrust bios wm we always need to recompute to set the + * expected DDB allocations for each CRTC. + */ + if (to_i915(dev)->wm.distrust_bios_wm) + changed = true; + /* * If this transaction isn't actually touching any CRTC's, don't * bother with watermark calculation. Note that if we pass this @@ -4128,6 +4136,7 @@ skl_compute_wm(struct drm_atomic_state *state) */ for_each_crtc_in_state(state, crtc, cstate, i) changed = true; + if (!changed) return 0; -- GitLab From bdc9a03fd9ce07caa7319e463db8cce0d76a4856 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 25 Nov 2016 14:03:55 +0300 Subject: [PATCH 139/786] sparc64: make string buffers large enough commit b5c3206190f1fddd100b3060eb15f0d775ffeab8 upstream. My static checker complains that if "lvl" is ULONG_MAX (this is 64 bit) then some of the strings will overflow. I don't know if that's possible but it seems simple enough to make the buffers slightly larger. Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller Cc: Waldemar Brodkorb Signed-off-by: Greg Kroah-Hartman --- arch/sparc/kernel/traps_64.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 4094a51b1970..496fa926e1e0 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -85,7 +85,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p) void bad_trap(struct pt_regs *regs, long lvl) { - char buffer[32]; + char buffer[36]; siginfo_t info; if (notify_die(DIE_TRAP, "bad trap", regs, @@ -116,7 +116,7 @@ void bad_trap(struct pt_regs *regs, long lvl) void bad_trap_tl1(struct pt_regs *regs, long lvl) { - char buffer[32]; + char buffer[36]; if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs, 0, lvl, SIGTRAP) == NOTIFY_STOP) -- GitLab From 050639ef5810e8ad17fb6a426eff3c63e616350c Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sat, 17 Jun 2017 06:43:47 +0200 Subject: [PATCH 140/786] Linux 4.9.33 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 30f72a08c6da..8470d81d5cc2 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 9 -SUBLEVEL = 32 +SUBLEVEL = 33 EXTRAVERSION = NAME = Roaring Lionus -- GitLab From eb486cc9a5d76dc13c8f49663a5edb24e642502b Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Mon, 19 Jun 2017 15:28:50 +0530 Subject: [PATCH 141/786] sched: EAS: skip energy_diff() for placement boosted tasks We always want to migrate the placement boosted task to the best CPU. Skip energy_diff() in such scenario. Change-Id: I2afb9f877d19422744e3351d6580c2ce7188e2fa Signed-off-by: Pavankumar Kondeti --- include/trace/events/sched.h | 5 +++++ kernel/sched/fair.c | 12 ++++++++++++ 2 files changed, 17 insertions(+) diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 1ea6e0d472a3..bf8f149192f0 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -881,6 +881,11 @@ DEFINE_EVENT(sched_task_util, sched_task_util_colocated, TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle) ); +DEFINE_EVENT(sched_task_util, sched_task_util_boosted, + TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle), + TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle) +); + DEFINE_EVENT(sched_task_util, sched_task_util_overutilzed, TP_PROTO(struct task_struct *p, int task_cpu, unsigned long task_util, int nominated_cpu, int target_cpu, int ediff, bool need_idle), TP_ARGS(p, task_cpu, task_util, nominated_cpu, target_cpu, ediff, need_idle) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a0b62d47480f..2d854e238616 100755 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7045,6 +7045,18 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync) return target_cpu; } + /* + * We always want to migrate the task to the best CPU when + * placement boost is active. + */ + if (placement_boost) { + trace_sched_task_util_boosted(p, task_cpu(p), + task_util(p), + target_cpu, + target_cpu, 0, need_idle); + return target_cpu; + } + #ifdef CONFIG_SCHED_WALT if (walt_disabled || !sysctl_sched_use_walt_cpu_util) task_util_boosted = 0; -- GitLab From c3dae85cea20b050034b8a590df3d345b765d881 Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Mon, 19 Jun 2017 15:30:11 +0530 Subject: [PATCH 142/786] sched: EAS: fix idle_get_state_idx() Consider a CPU is busy - When a CPU has more than 1 runnable tasks but CPU is still in idle state and about to exit or exiting idle state. - When the CPU exits idle but idle_set_state_idx() is not yet called. Change-Id: I4830caf2828947be614a187c26e01e9b2950f1c1 Signed-off-by: Pavankumar Kondeti --- kernel/sched/fair.c | 3 +-- kernel/sched/rt.c | 3 --- kernel/sched/sched.h | 4 ++++ 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 2d854e238616..33fda3b4f02e 100755 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6905,8 +6905,7 @@ static int energy_aware_wake_cpu(struct task_struct *p, int target, int sync) if (new_util > capacity_orig_of(i)) continue; - cpu_idle_idx = cpu_rq(i)->nr_running ? -1 : - idle_get_state_idx(cpu_rq(i)); + cpu_idle_idx = idle_get_state_idx(cpu_rq(i)); if (!need_idle && (!wake_on_sibling || diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index ec903196dc5e..65b34b4e947e 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1970,9 +1970,6 @@ static int find_lowest_rq(struct task_struct *task) if (sysctl_sched_cstate_aware) cpu_idle_idx = - (cpu == smp_processor_id() || - cpu_rq(cpu)->nr_running) ? - -1 : idle_get_state_idx(cpu_rq(cpu)); if (add_capacity_margin(new_util_cum) < diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 28d660b3be98..69c31f5e48d3 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1491,6 +1491,10 @@ static inline void idle_set_state_idx(struct rq *rq, int idle_state_idx) static inline int idle_get_state_idx(struct rq *rq) { WARN_ON(!rcu_read_lock_held()); + + if (rq->nr_running || cpu_of(rq) == raw_smp_processor_id()) + return -1; + return rq->idle_state_idx; } #else -- GitLab From dc7257223f0d18e9a99a7e523fd7c50fc9e0f7fc Mon Sep 17 00:00:00 2001 From: Steve Muckle Date: Tue, 13 Jun 2017 09:23:25 -0700 Subject: [PATCH 143/786] ANDROID: android-base.cfg: split out arm64-specific configs These config options are specific to arm64 so should not be universally required. Bug: 62523096 Change-Id: Ic5f35db71d73919f2958120f45dd717f5d05f4c5 Signed-off-by: Steve Muckle --- kernel/configs/android-base-arm64.cfg | 5 +++++ kernel/configs/android-base.config | 4 ---- 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 kernel/configs/android-base-arm64.cfg diff --git a/kernel/configs/android-base-arm64.cfg b/kernel/configs/android-base-arm64.cfg new file mode 100644 index 000000000000..43f23d6b5391 --- /dev/null +++ b/kernel/configs/android-base-arm64.cfg @@ -0,0 +1,5 @@ +# KEEP ALPHABETICALLY SORTED +CONFIG_ARMV8_DEPRECATED=y +CONFIG_CP15_BARRIER_EMULATION=y +CONFIG_SETEND_EMULATION=y +CONFIG_SWP_EMULATION=y diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config index fb6017e1a869..301e1a6c33b1 100644 --- a/kernel/configs/android-base.config +++ b/kernel/configs/android-base.config @@ -12,7 +12,6 @@ CONFIG_ANDROID=y CONFIG_ANDROID_BINDER_DEVICES=binder,hwbinder,vndbinder CONFIG_ANDROID_BINDER_IPC=y CONFIG_ANDROID_LOW_MEMORY_KILLER=y -CONFIG_ARMV8_DEPRECATED=y CONFIG_ASHMEM=y CONFIG_AUDIT=y CONFIG_BLK_DEV_INITRD=y @@ -22,7 +21,6 @@ CONFIG_CGROUP_DEBUG=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_SCHED=y CONFIG_CGROUP_BPF=y -CONFIG_CP15_BARRIER_EMULATION=y CONFIG_DEFAULT_SECURITY_SELINUX=y CONFIG_EMBEDDED=y CONFIG_FB=y @@ -156,9 +154,7 @@ CONFIG_SECURITY=y CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y CONFIG_SECURITY_SELINUX=y -CONFIG_SETEND_EMULATION=y CONFIG_STAGING=y -CONFIG_SWP_EMULATION=y CONFIG_SYNC=y CONFIG_TUN=y CONFIG_UID_SYS_STATS=y -- GitLab From 9151720ceb61f68bb0df556d8a2afa66857ca0e2 Mon Sep 17 00:00:00 2001 From: Narendra Muppalla Date: Fri, 16 Jun 2017 10:33:33 -0700 Subject: [PATCH 144/786] ARM: dts: msm: add topology node support dsi video and cmd mode sim panels This change adds display-topology node support for single/dual dsi video mode and cmd mode sim panels for sdm845. Change-Id: I21fee64d3403e4249c6421d43c0c7f9eca2e97af Signed-off-by: Narendra Muppalla --- arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi index 726a63f02be8..c0a64fcb8821 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi @@ -545,22 +545,34 @@ qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00]; qcom,mdss-dsi-t-clk-post = <0x0d>; qcom,mdss-dsi-t-clk-pre = <0x2d>; + qcom,display-topology = <1 0 1>, + <2 0 1>; + qcom,default-topology-index = <0>; }; &dsi_dual_sim_vid { qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00]; qcom,mdss-dsi-t-clk-post = <0x0d>; qcom,mdss-dsi-t-clk-pre = <0x2d>; + qcom,display-topology = <2 0 2>, + <1 0 2>; + qcom,default-topology-index = <0>; }; &dsi_sim_cmd { qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00]; qcom,mdss-dsi-t-clk-post = <0x0d>; qcom,mdss-dsi-t-clk-pre = <0x2d>; + qcom,display-topology = <1 0 1>, + <2 0 1>; + qcom,default-topology-index = <0>; }; &dsi_dual_sim_cmd { qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07 07 05 03 04 00]; qcom,mdss-dsi-t-clk-post = <0x0d>; qcom,mdss-dsi-t-clk-pre = <0x2d>; + qcom,display-topology = <2 0 2>, + <1 0 2>; + qcom,default-topology-index = <0>; }; -- GitLab From d68ab111c13afa8b8a51c7a826f4dda83890045e Mon Sep 17 00:00:00 2001 From: Michael Adisumarta Date: Wed, 14 Jun 2017 11:40:06 -0700 Subject: [PATCH 145/786] msm: ipa4: IPA_CLKON_CFG register configuration Add the IPA_CLKON_CFG register and initialize CGC_OPEN_GLOBAL_2X_CLK & CGC_OPEN_GLOBAL bits to 1. Remove disable prefetch from IPA_HW_v4_0 and on. Change-Id: I5797580bfb5bcd5c1095b1fc91cc7deec473c6c5 CRs-Fixed: 2060817 Signed-off-by: Michael Adisumarta --- drivers/platform/msm/ipa/ipa_v3/ipa.c | 3 ++- drivers/platform/msm/ipa/ipa_v3/ipa_utils.c | 4 ++++ drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c | 4 ++++ drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h | 1 + 4 files changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index 85bfe959c0ef..1c3995d46056 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -3998,7 +3998,8 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p, * IPAv3.5 and above requires to disable prefetch for USB in order * to allow MBIM to work, currently MBIM is not needed in MHI mode. */ - if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5) && + if ((ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5 + && ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) && (!ipa3_ctx->ipa_config_is_mhi)) ipa3_disable_prefetch(IPA_CLIENT_USB_CONS); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index dbe6cd60c6dd..3813c82c245a 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -43,6 +43,7 @@ #define IPA_BCR_REG_VAL_v3_0 (0x00000001) #define IPA_BCR_REG_VAL_v3_5 (0x0000003B) #define IPA_BCR_REG_VAL_v4_0 (0x00000039) +#define IPA_CLKON_CFG_v4_0 (0x30000000) #define IPA_AGGR_GRAN_MIN (1) #define IPA_AGGR_GRAN_MAX (32) #define IPA_EOT_COAL_GRAN_MIN (1) @@ -2055,6 +2056,9 @@ int ipa3_init_hw(void) ipahal_write_reg(IPA_BCR, val); + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + ipahal_write_reg(IPA_CLKON_CFG, IPA_CLKON_CFG_v4_0); + ipa3_cfg_qsb(); return 0; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c index 3019e4d2c819..af717cde487f 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c @@ -85,6 +85,7 @@ static const char *ipareg_name_to_str[IPA_REG_MAX] = { __stringify(IPA_IDLE_INDICATION_CFG), __stringify(IPA_DPS_SEQUENCER_FIRST), __stringify(IPA_HPS_SEQUENCER_FIRST), + __stringify(IPA_CLKON_CFG), }; static void ipareg_construct_dummy(enum ipahal_reg_name reg, @@ -1490,6 +1491,9 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = { [IPA_HW_v4_0][IPA_ENDP_STATUS_n] = { ipareg_construct_endp_status_n_v4_0, ipareg_parse_dummy, 0x00000840, 0x70}, + [IPA_HW_v4_0][IPA_CLKON_CFG] = { + ipareg_construct_dummy, ipareg_parse_dummy, + 0x00000044, 0}, }; /* diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h index c9293b8fd937..79e2b9ceeaea 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h @@ -88,6 +88,7 @@ enum ipahal_reg_name { IPA_IDLE_INDICATION_CFG, IPA_DPS_SEQUENCER_FIRST, IPA_HPS_SEQUENCER_FIRST, + IPA_CLKON_CFG, IPA_REG_MAX, }; -- GitLab From d850b2243a3621b725e0e9c5c379aecd3c3c6975 Mon Sep 17 00:00:00 2001 From: Dhaval Patel Date: Fri, 16 Jun 2017 17:37:37 -0700 Subject: [PATCH 146/786] drm/msm/sde: avoid virtual plane debug dump register The master plane dumps the virtual plane configuration also. Avoid doing same dump from virtual plane to reduce the dump log size. Change-Id: Ifb8cf1fb0aafa0812c653a5ffb5f01166d8caa16 Signed-off-by: Dhaval Patel --- drivers/gpu/drm/msm/sde/sde_hw_sspp.c | 9 +++++---- drivers/gpu/drm/msm/sde/sde_hw_sspp.h | 5 +++-- drivers/gpu/drm/msm/sde/sde_plane.c | 3 ++- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c index bc1b1e79d95f..9fd59926b146 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c @@ -1223,8 +1223,8 @@ static struct sde_hw_blk_ops sde_hw_ops = { }; struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx, - void __iomem *addr, - struct sde_mdss_cfg *catalog) + void __iomem *addr, struct sde_mdss_cfg *catalog, + bool is_virtual_pipe) { struct sde_hw_pipe *hw_pipe; struct sde_sspp_cfg *cfg; @@ -1256,12 +1256,13 @@ struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx, goto blk_init_error; } - sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, + if (!is_virtual_pipe) + sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, hw_pipe->hw.blk_off, hw_pipe->hw.blk_off + hw_pipe->hw.length, hw_pipe->hw.xin_id); - if (cfg->sblk->scaler_blk.len) + if (cfg->sblk->scaler_blk.len && !is_virtual_pipe) sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->sblk->scaler_blk.name, hw_pipe->hw.blk_off + cfg->sblk->scaler_blk.base, diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h index e4be0551c4b5..8d14715b3eb5 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h @@ -649,10 +649,11 @@ static inline struct sde_hw_pipe *to_sde_hw_pipe(struct sde_hw_blk *hw) * @idx: Pipe index for which driver object is required * @addr: Mapped register io address of MDP * @catalog : Pointer to mdss catalog data + * @is_virtual_pipe: is this pipe virtual pipe */ struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx, - void __iomem *addr, - struct sde_mdss_cfg *catalog); + void __iomem *addr, struct sde_mdss_cfg *catalog, + bool is_virtual_pipe); /** * sde_hw_sspp_destroy(): Destroys SSPP driver context diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c index 6b8a9b95cb61..373f626c52aa 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.c +++ b/drivers/gpu/drm/msm/sde/sde_plane.c @@ -4460,7 +4460,8 @@ struct drm_plane *sde_plane_init(struct drm_device *dev, } /* initialize underlying h/w driver */ - psde->pipe_hw = sde_hw_sspp_init(pipe, kms->mmio, kms->catalog); + psde->pipe_hw = sde_hw_sspp_init(pipe, kms->mmio, kms->catalog, + master_plane_id != 0); if (IS_ERR(psde->pipe_hw)) { SDE_ERROR("[%u]SSPP init failed\n", pipe); ret = PTR_ERR(psde->pipe_hw); -- GitLab From 4a70c6b1290476a87f6959a2982ca5219fcccd71 Mon Sep 17 00:00:00 2001 From: Tony Truong Date: Mon, 5 Jun 2017 18:27:32 -0700 Subject: [PATCH 147/786] msm: pcie: switch dma layer to map QGIC MSI address Clients create their mappings using APIs from dma layer. PCIe host bus driver should also use similar APIs instead of iommu driver when mapping QGIC address for clients. Change-Id: I85cf931e79709306e0643e1448bc3663ba8dbdf3 Signed-off-by: Tony Truong --- drivers/pci/host/pci-msm.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c index 771a1f9a4715..e5acefc444eb 100644 --- a/drivers/pci/host/pci-msm.c +++ b/drivers/pci/host/pci-msm.c @@ -4926,9 +4926,8 @@ static int msm_pcie_map_qgic_addr(struct msm_pcie_dev_t *dev, { struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev); struct iommu_domain_geometry geometry; - int ret, fastmap_en = 0, bypass_en = 0; - dma_addr_t iova; - phys_addr_t gicm_db_offset; + int fastmap_en = 0, bypass_en = 0; + dma_addr_t iova, addr; msg->address_hi = 0; msg->address_lo = dev->msi_gicm_addr; @@ -4970,18 +4969,15 @@ static int msm_pcie_map_qgic_addr(struct msm_pcie_dev_t *dev, iova = rounddown(pcie_base_addr, PAGE_SIZE); } - ret = iommu_map(domain, iova, rounddown(dev->msi_gicm_addr, PAGE_SIZE), - PAGE_SIZE, IOMMU_READ | IOMMU_WRITE); - if (ret < 0) { - PCIE_ERR(dev, - "PCIe: RC%d: ret: %d: Could not do iommu map for QGIC address\n", - dev->rc_idx, ret); - return -ENOMEM; + addr = dma_map_resource(&pdev->dev, dev->msi_gicm_addr, PAGE_SIZE, + DMA_BIDIRECTIONAL, 0); + if (dma_mapping_error(&pdev->dev, addr)) { + PCIE_ERR(dev, "PCIe: RC%d: failed to map QGIC address", + dev->rc_idx); + return -EIO; } - gicm_db_offset = dev->msi_gicm_addr - - rounddown(dev->msi_gicm_addr, PAGE_SIZE); - msg->address_lo = iova + gicm_db_offset; + msg->address_lo = iova + addr; return 0; } -- GitLab From a243084622ab964a2fd52d311753bf6a3315d725 Mon Sep 17 00:00:00 2001 From: Dhaval Patel Date: Thu, 15 Jun 2017 14:32:36 -0700 Subject: [PATCH 148/786] drm/msm: register sde rsc, dsi and phy to dbg dump Add sde rsc, dsi and phy to debug dump facility. It also adds "dump all" facility to avoid keeping all strings in dump API call. Change-Id: I596e4bcf7a2959f8d79dc945a99556d7d6d0139b Signed-off-by: Dhaval Patel --- drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c | 11 +++- drivers/gpu/drm/msm/dsi-staging/dsi_defs.h | 1 + drivers/gpu/drm/msm/dsi-staging/dsi_phy.c | 7 +++ drivers/gpu/drm/msm/msm_drv.c | 41 ++++++++++---- drivers/gpu/drm/msm/msm_drv.h | 1 + drivers/gpu/drm/msm/sde/sde_kms.c | 61 +++++---------------- drivers/gpu/drm/msm/sde_dbg.c | 62 +++++++++++++++------- drivers/gpu/drm/msm/sde_rsc.c | 7 +++ 8 files changed, 114 insertions(+), 77 deletions(-) diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c index da7a7c0a4f58..4788f3b46f11 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c @@ -31,6 +31,8 @@ #include "dsi_pwr.h" #include "dsi_catalog.h" +#include "sde_dbg.h" + #define DSI_CTRL_DEFAULT_LABEL "MDSS DSI CTRL" #define DSI_CTRL_TX_TO_MS 200 @@ -199,6 +201,7 @@ static int dsi_ctrl_debugfs_init(struct dsi_ctrl *dsi_ctrl, { int rc = 0; struct dentry *dir, *state_file, *reg_dump; + char dbg_name[DSI_DEBUG_NAME_LEN]; dir = debugfs_create_dir(dsi_ctrl->name, parent); if (IS_ERR_OR_NULL(dir)) { @@ -233,6 +236,11 @@ static int dsi_ctrl_debugfs_init(struct dsi_ctrl *dsi_ctrl, } dsi_ctrl->debugfs_root = dir; + + snprintf(dbg_name, DSI_DEBUG_NAME_LEN, "dsi%d_ctrl", + dsi_ctrl->cell_index); + sde_dbg_reg_register_base(dbg_name, dsi_ctrl->hw.base, + msm_iomap_size(dsi_ctrl->pdev, "dsi_ctrl")); error_remove_dir: debugfs_remove(dir); error: @@ -1296,8 +1304,7 @@ static int dsi_ctrl_dev_probe(struct platform_device *pdev) dsi_ctrl->pdev = pdev; platform_set_drvdata(pdev, dsi_ctrl); - - pr_debug("Probe successful for %s\n", dsi_ctrl->name); + pr_info("Probe successful for %s\n", dsi_ctrl->name); return 0; diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h index cf3631558806..77da9b4ad382 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h @@ -36,6 +36,7 @@ value;\ }) +#define DSI_DEBUG_NAME_LEN 32 /** * enum dsi_pixel_format - DSI pixel formats * @DSI_PIXEL_FORMAT_RGB565: diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c index a1a0e572d589..a91dba82184e 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c @@ -29,6 +29,8 @@ #include "dsi_pwr.h" #include "dsi_catalog.h" +#include "sde_dbg.h" + #define DSI_PHY_DEFAULT_LABEL "MDSS PHY CTRL" struct dsi_phy_list_item { @@ -547,6 +549,11 @@ void dsi_phy_put(struct msm_dsi_phy *dsi_phy) */ int dsi_phy_drv_init(struct msm_dsi_phy *dsi_phy) { + char dbg_name[DSI_DEBUG_NAME_LEN]; + + snprintf(dbg_name, DSI_DEBUG_NAME_LEN, "dsi%d_phy", dsi_phy->index); + sde_dbg_reg_register_base(dbg_name, dsi_phy->hw.base, + msm_iomap_size(dsi_phy->pdev, "dsi_phy")); return 0; } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 747d9a624243..decdbe15ddf3 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -229,6 +229,24 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, return ptr; } +unsigned long msm_iomap_size(struct platform_device *pdev, const char *name) +{ + struct resource *res; + + if (name) + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + else + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + if (!res) { + dev_err(&pdev->dev, "failed to get memory resource: %s\n", + name); + return 0; + } + + return resource_size(res); +} + void msm_iounmap(struct platform_device *pdev, void __iomem *addr) { devm_iounmap(&pdev->dev, addr); @@ -382,10 +400,11 @@ static int msm_drm_uninit(struct device *dev) priv->vram.paddr, attrs); } + component_unbind_all(dev, ddev); + sde_dbg_destroy(); debugfs_remove_recursive(priv->debug_root); - component_unbind_all(dev, ddev); sde_power_client_destroy(&priv->phandle, priv->pclient); sde_power_resource_deinit(pdev, &priv->phandle); @@ -579,6 +598,15 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) goto power_client_fail; } + dbg_power_ctrl.handle = &priv->phandle; + dbg_power_ctrl.client = priv->pclient; + dbg_power_ctrl.enable_fn = msm_power_enable_wrapper; + ret = sde_dbg_init(&pdev->dev, &dbg_power_ctrl); + if (ret) { + dev_err(dev, "failed to init sde dbg: %d\n", ret); + goto dbg_init_fail; + } + /* Bind all our sub-components: */ ret = msm_component_bind_all(dev, ddev); if (ret) @@ -588,15 +616,6 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) if (ret) goto fail; - dbg_power_ctrl.handle = &priv->phandle; - dbg_power_ctrl.client = priv->pclient; - dbg_power_ctrl.enable_fn = msm_power_enable_wrapper; - ret = sde_dbg_init(&pdev->dev, &dbg_power_ctrl); - if (ret) { - dev_err(dev, "failed to init sde dbg: %d\n", ret); - goto fail; - } - switch (get_mdp_ver(pdev)) { case KMS_MDP4: kms = mdp4_kms_init(ddev); @@ -722,6 +741,8 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) msm_drm_uninit(dev); return ret; bind_fail: + sde_dbg_destroy(); +dbg_init_fail: sde_power_client_destroy(&priv->phandle, priv->pclient); power_client_fail: sde_power_resource_deinit(pdev, &priv->phandle); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 77dde559aa71..eddd1809b783 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -771,6 +771,7 @@ static inline void msm_rd_dump_submit(struct msm_gem_submit *submit) {} void __iomem *msm_ioremap(struct platform_device *pdev, const char *name, const char *dbgname); +unsigned long msm_iomap_size(struct platform_device *pdev, const char *name); void msm_iounmap(struct platform_device *dev, void __iomem *addr); void msm_writel(u32 data, void __iomem *addr); u32 msm_readl(const void __iomem *addr); diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index bda89cff1703..26125d8d9e5d 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -1462,43 +1462,6 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms) return ret; } -static void __iomem *_sde_kms_ioremap(struct platform_device *pdev, - const char *name, unsigned long *out_size) -{ - struct resource *res; - unsigned long size; - void __iomem *ptr; - - if (out_size) - *out_size = 0; - - if (name) - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); - else - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - if (!res) { - /* availability depends on platform */ - SDE_DEBUG("failed to get memory resource: %s\n", name); - return ERR_PTR(-EINVAL); - } - - size = resource_size(res); - - ptr = devm_ioremap_nocache(&pdev->dev, res->start, size); - if (!ptr) { - SDE_ERROR("failed to ioremap: %s\n", name); - return ERR_PTR(-ENOMEM); - } - - SDE_DEBUG("IO:region %s %p %08lx\n", name, ptr, size); - - if (out_size) - *out_size = size; - - return ptr; -} - static void sde_kms_handle_power_event(u32 event_type, void *usr) { struct sde_kms *sde_kms = usr; @@ -1535,8 +1498,7 @@ static int sde_kms_hw_init(struct msm_kms *kms) goto end; } - sde_kms->mmio = _sde_kms_ioremap(dev->platformdev, "mdp_phys", - &sde_kms->mmio_len); + sde_kms->mmio = msm_ioremap(dev->platformdev, "mdp_phys", "mdp_phys"); if (IS_ERR(sde_kms->mmio)) { rc = PTR_ERR(sde_kms->mmio); SDE_ERROR("mdp register memory map failed: %d\n", rc); @@ -1544,32 +1506,36 @@ static int sde_kms_hw_init(struct msm_kms *kms) goto error; } DRM_INFO("mapped mdp address space @%p\n", sde_kms->mmio); + sde_kms->mmio_len = msm_iomap_size(dev->platformdev, "mdp_phys"); rc = sde_dbg_reg_register_base(SDE_DBG_NAME, sde_kms->mmio, sde_kms->mmio_len); if (rc) SDE_ERROR("dbg base register kms failed: %d\n", rc); - sde_kms->vbif[VBIF_RT] = _sde_kms_ioremap(dev->platformdev, "vbif_phys", - &sde_kms->vbif_len[VBIF_RT]); + sde_kms->vbif[VBIF_RT] = msm_ioremap(dev->platformdev, "vbif_phys", + "vbif_phys"); if (IS_ERR(sde_kms->vbif[VBIF_RT])) { rc = PTR_ERR(sde_kms->vbif[VBIF_RT]); SDE_ERROR("vbif register memory map failed: %d\n", rc); sde_kms->vbif[VBIF_RT] = NULL; goto error; } - + sde_kms->vbif_len[VBIF_RT] = msm_iomap_size(dev->platformdev, + "vbif_phys"); rc = sde_dbg_reg_register_base("vbif_rt", sde_kms->vbif[VBIF_RT], sde_kms->vbif_len[VBIF_RT]); if (rc) SDE_ERROR("dbg base register vbif_rt failed: %d\n", rc); - sde_kms->vbif[VBIF_NRT] = _sde_kms_ioremap(dev->platformdev, - "vbif_nrt_phys", &sde_kms->vbif_len[VBIF_NRT]); + sde_kms->vbif[VBIF_NRT] = msm_ioremap(dev->platformdev, "vbif_nrt_phys", + "vbif_nrt_phys"); if (IS_ERR(sde_kms->vbif[VBIF_NRT])) { sde_kms->vbif[VBIF_NRT] = NULL; SDE_DEBUG("VBIF NRT is not defined"); } else { + sde_kms->vbif_len[VBIF_NRT] = msm_iomap_size(dev->platformdev, + "vbif_nrt_phys"); rc = sde_dbg_reg_register_base("vbif_nrt", sde_kms->vbif[VBIF_NRT], sde_kms->vbif_len[VBIF_NRT]); @@ -1578,19 +1544,20 @@ static int sde_kms_hw_init(struct msm_kms *kms) rc); } - sde_kms->reg_dma = _sde_kms_ioremap(dev->platformdev, "regdma_phys", - &sde_kms->reg_dma_len); + sde_kms->reg_dma = msm_ioremap(dev->platformdev, "regdma_phys", + "regdma_phys"); if (IS_ERR(sde_kms->reg_dma)) { sde_kms->reg_dma = NULL; SDE_DEBUG("REG_DMA is not defined"); } else { + sde_kms->reg_dma_len = msm_iomap_size(dev->platformdev, + "regdma_phys"); rc = sde_dbg_reg_register_base("vbif_nrt", sde_kms->reg_dma, sde_kms->reg_dma_len); if (rc) SDE_ERROR("dbg base register reg_dma failed: %d\n", rc); - } sde_kms->core_client = sde_power_client_create(&priv->phandle, "core"); diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c index a420ffb92e73..bcd3eaa187ae 100644 --- a/drivers/gpu/drm/msm/sde_dbg.c +++ b/drivers/gpu/drm/msm/sde_dbg.c @@ -160,6 +160,7 @@ struct sde_dbg_vbif_debug_bus { * @enable_reg_dump: whether to dump registers into memory, kernel log, or both * @dbgbus_sde: debug bus structure for the sde * @dbgbus_vbif_rt: debug bus structure for the realtime vbif + * @dump_all: dump all entries in register dump */ static struct sde_dbg_base { struct sde_dbg_evtlog *evtlog; @@ -176,6 +177,7 @@ static struct sde_dbg_base { struct sde_dbg_sde_debug_bus dbgbus_sde; struct sde_dbg_vbif_debug_bus dbgbus_vbif_rt; + bool dump_all; } sde_dbg_base; /* sde_dbg_base_evtlog - global pointer to main sde event log for macro use */ @@ -2427,18 +2429,22 @@ static void _sde_dbg_dump_vbif_dbg_bus(struct sde_dbg_vbif_debug_bus *bus) */ static void _sde_dump_array(struct sde_dbg_reg_base *blk_arr[], u32 len, bool do_panic, const char *name, bool dump_dbgbus_sde, - bool dump_dbgbus_vbif_rt) + bool dump_dbgbus_vbif_rt, bool dump_all) { int i; - for (i = 0; i < len; i++) { - if (blk_arr[i] != NULL) - _sde_dump_reg_by_ranges(blk_arr[i], - sde_dbg_base.enable_reg_dump); - } - sde_evtlog_dump_all(sde_dbg_base.evtlog); + if (dump_all || !blk_arr || !len) { + _sde_dump_reg_all(); + } else { + for (i = 0; i < len; i++) { + if (blk_arr[i] != NULL) + _sde_dump_reg_by_ranges(blk_arr[i], + sde_dbg_base.enable_reg_dump); + } + } + if (dump_dbgbus_sde) _sde_dbg_dump_sde_dbg_bus(&sde_dbg_base.dbgbus_sde); @@ -2459,7 +2465,8 @@ static void _sde_dump_work(struct work_struct *work) ARRAY_SIZE(sde_dbg_base.req_dump_blks), sde_dbg_base.work_panic, "evtlog_workitem", sde_dbg_base.dbgbus_sde.cmn.include_in_deferred_work, - sde_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work); + sde_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work, + sde_dbg_base.dump_all); } void sde_dbg_dump(bool queue_work, const char *name, ...) @@ -2468,6 +2475,7 @@ void sde_dbg_dump(bool queue_work, const char *name, ...) bool do_panic = false; bool dump_dbgbus_sde = false; bool dump_dbgbus_vbif_rt = false; + bool dump_all = false; va_list args; char *blk_name = NULL; struct sde_dbg_reg_base *blk_base = NULL; @@ -2485,6 +2493,7 @@ void sde_dbg_dump(bool queue_work, const char *name, ...) memset(sde_dbg_base.req_dump_blks, 0, sizeof(sde_dbg_base.req_dump_blks)); + sde_dbg_base.dump_all = false; va_start(args, name); i = 0; @@ -2507,6 +2516,9 @@ void sde_dbg_dump(bool queue_work, const char *name, ...) } } + if (!strcmp(blk_name, "all")) + dump_all = true; + if (!strcmp(blk_name, "dbg_bus")) dump_dbgbus_sde = true; @@ -2528,7 +2540,7 @@ void sde_dbg_dump(bool queue_work, const char *name, ...) schedule_work(&sde_dbg_base.dump_work); } else { _sde_dump_array(blk_arr, blk_len, do_panic, name, - dump_dbgbus_sde, dump_dbgbus_vbif_rt); + dump_dbgbus_sde, dump_dbgbus_vbif_rt, dump_all); } } @@ -2577,15 +2589,8 @@ static ssize_t sde_evtlog_dump_read(struct file *file, char __user *buff, static ssize_t sde_evtlog_dump_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { - _sde_dump_reg_all(); - - sde_evtlog_dump_all(sde_dbg_base.evtlog); - - _sde_dbg_dump_sde_dbg_bus(&sde_dbg_base.dbgbus_sde); - _sde_dbg_dump_vbif_dbg_bus(&sde_dbg_base.dbgbus_vbif_rt); - - if (sde_dbg_base.panic_on_err) - panic("sde"); + _sde_dump_array(NULL, 0, sde_dbg_base.panic_on_err, "dump_debugfs", + true, true, true); return count; } @@ -3030,6 +3035,26 @@ int sde_dbg_init(struct device *dev, struct sde_dbg_power_ctrl *power_ctrl) return 0; } +static void sde_dbg_reg_base_destroy(void) +{ + struct sde_dbg_reg_range *range_node, *range_tmp; + struct sde_dbg_reg_base *blk_base, *blk_tmp; + struct sde_dbg_base *dbg_base = &sde_dbg_base; + + if (!dbg_base) + return; + + list_for_each_entry_safe(blk_base, blk_tmp, &dbg_base->reg_base_list, + reg_base_head) { + list_for_each_entry_safe(range_node, range_tmp, + &blk_base->sub_range_list, head) { + list_del(&range_node->head); + kfree(range_node); + } + list_del(&blk_base->reg_base_head); + kfree(blk_base); + } +} /** * sde_dbg_destroy - destroy sde debug facilities */ @@ -3039,6 +3064,7 @@ void sde_dbg_destroy(void) sde_dbg_base_evtlog = NULL; sde_evtlog_destroy(sde_dbg_base.evtlog); sde_dbg_base.evtlog = NULL; + sde_dbg_reg_base_destroy(); } int sde_dbg_reg_register_base(const char *name, void __iomem *base, diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c index caa8cdf3cd34..f26605ad1800 100644 --- a/drivers/gpu/drm/msm/sde_rsc.c +++ b/drivers/gpu/drm/msm/sde_rsc.c @@ -30,6 +30,9 @@ #include "sde_rsc_priv.h" #include "sde_dbg.h" +#define SDE_RSC_DRV_DBG_NAME "sde_rsc_drv" +#define SDE_RSC_WRAPPER_DBG_NAME "sde_rsc_wrapper" + /* worst case time to execute the one tcs vote(sleep/wake) - ~1ms */ #define SINGLE_TCS_EXECUTION_TIME 1064000 @@ -1063,6 +1066,10 @@ static int sde_rsc_bind(struct device *dev, rsc->master_drm = drm; mutex_unlock(&rsc->client_lock); + sde_dbg_reg_register_base(SDE_RSC_DRV_DBG_NAME, rsc->drv_io.base, + rsc->drv_io.len); + sde_dbg_reg_register_base(SDE_RSC_WRAPPER_DBG_NAME, + rsc->wrapper_io.base, rsc->wrapper_io.len); return 0; } -- GitLab From 96b5b27b2e0f1ffd6ba85db8f8102d15ae8703ea Mon Sep 17 00:00:00 2001 From: Clarence Ip Date: Wed, 14 Jun 2017 10:29:48 -0400 Subject: [PATCH 149/786] drm/msm: add ability to flag properties as dirty This patch adds a new msm_property function to allow clients to explicitly flag a property as being dirty. This is helpful if certain properties need to be conditionally reevaluated even if their values haven't changed. CRs-Fixed: 2062083 Change-Id: Ic510c618587508f8602bfb03ad57a1047309e4f2 Signed-off-by: Clarence Ip --- drivers/gpu/drm/msm/msm_prop.c | 12 ++++++++++++ drivers/gpu/drm/msm/msm_prop.h | 10 +++++++++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/msm_prop.c b/drivers/gpu/drm/msm/msm_prop.c index 663781ff6bdb..f2996dd04bea 100644 --- a/drivers/gpu/drm/msm/msm_prop.c +++ b/drivers/gpu/drm/msm/msm_prop.c @@ -371,6 +371,18 @@ int msm_property_index(struct msm_property_info *info, return rc; } +int msm_property_set_dirty(struct msm_property_info *info, int property_idx) +{ + if (!info) { + DRM_ERROR("invalid property info\n"); + return -EINVAL; + } + mutex_lock(&info->property_lock); + _msm_property_set_dirty_no_lock(info, property_idx); + mutex_unlock(&info->property_lock); + return 0; +} + int msm_property_atomic_set(struct msm_property_info *info, uint64_t *property_values, struct drm_property_blob **property_blobs, diff --git a/drivers/gpu/drm/msm/msm_prop.h b/drivers/gpu/drm/msm/msm_prop.h index dbe28bdf5638..e54c796e1dcd 100644 --- a/drivers/gpu/drm/msm/msm_prop.h +++ b/drivers/gpu/drm/msm/msm_prop.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -265,6 +265,14 @@ int msm_property_install_get_status(struct msm_property_info *info); int msm_property_index(struct msm_property_info *info, struct drm_property *property); +/** + * msm_property_set_dirty - forcibly flag a property as dirty + * @info: Pointer to property info container struct + * @property_idx: Property index + * Returns: Zero on success + */ +int msm_property_set_dirty(struct msm_property_info *info, int property_idx); + /** * msm_property_atomic_set - helper function for atomic property set callback * @info: Pointer to property info container struct -- GitLab From 3b1e2c6a52db9b0e601bb8bccd92fb30f73f9df0 Mon Sep 17 00:00:00 2001 From: Clarence Ip Date: Wed, 14 Jun 2017 11:20:44 -0400 Subject: [PATCH 150/786] drm/msm/sde: don't dirty scaler properties if null Avoid always flagging scaler configuration properties if the user space keeps setting it to a null value. These properties are meant to support incoming user pointers rather than traditional drm blob properties, so avoid recalculating the plane programming if they are set to a null value more than once. CRs-Fixed: 2062083 Change-Id: I88b7a3065670e7fa8827c91dbe856b428127de1b Signed-off-by: Clarence Ip --- drivers/gpu/drm/msm/sde/sde_plane.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c index 3eb45188ff7d..0bb3aeb91a6d 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.c +++ b/drivers/gpu/drm/msm/sde/sde_plane.c @@ -2861,7 +2861,7 @@ static int _sde_plane_validate_scaler_v2(struct sde_plane *psde, "req %d/%d, fetch %d/%d, src %dx%d\n", hor_req_pixels, vert_req_pixels, hor_fetch_pixels, vert_fetch_pixels, - src_w, src_h); + img_w, img_h); return -EINVAL; } @@ -3566,7 +3566,7 @@ static void _sde_plane_install_properties(struct drm_plane *plane, } if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3)) { - msm_property_install_volatile_range( + msm_property_install_range( &psde->property_info, "scaler_v2", 0x0, 0, ~0, 0, PLANE_PROP_SCALER_V2); msm_property_install_blob(&psde->property_info, @@ -3578,7 +3578,7 @@ static void _sde_plane_install_properties(struct drm_plane *plane, "lut_sep", 0, PLANE_PROP_SCALER_LUT_SEP); } else if (psde->features & SDE_SSPP_SCALER) { - msm_property_install_volatile_range( + msm_property_install_range( &psde->property_info, "scaler_v1", 0x0, 0, ~0, 0, PLANE_PROP_SCALER_V1); } @@ -3783,6 +3783,9 @@ static inline void _sde_plane_set_scaler_v1(struct sde_plane *psde, void *usr) return; } + /* force property to be dirty, even if the pointer didn't change */ + msm_property_set_dirty(&psde->property_info, PLANE_PROP_SCALER_V1); + /* populate from user space */ pe = &(psde->pixel_ext); memset(pe, 0, sizeof(struct sde_hw_pixel_ext)); @@ -3846,6 +3849,9 @@ static inline void _sde_plane_set_scaler_v2(struct sde_plane *psde, return; } + /* force property to be dirty, even if the pointer didn't change */ + msm_property_set_dirty(&psde->property_info, PLANE_PROP_SCALER_V2); + /* populate from user space */ pe = &(psde->pixel_ext); memset(pe, 0, sizeof(struct sde_hw_pixel_ext)); -- GitLab From ecd870bef47a09d5d4cc9f36972c4e78e5b28526 Mon Sep 17 00:00:00 2001 From: Alan Kwong Date: Fri, 16 Jun 2017 15:31:28 -0400 Subject: [PATCH 151/786] drm/msm: increase minimum bandwidth to match clock plan Current minimum bandwidth of 2MBps translates to 19.2MHz AXI clock, which is below the minimum in clock plan. Increase minimum bandwidth to 1.6GBps so AXI clock will stay at the minimum of 60MHz as in the clock plan. CRs-Fixed: 2037879 Change-Id: Ic84db29a54970644ee2c74da35636e2da1e348e3 Signed-off-by: Alan Kwong --- drivers/gpu/drm/msm/sde_power_handle.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h index c526b712693c..0db10b0cdc96 100644 --- a/drivers/gpu/drm/msm/sde_power_handle.h +++ b/drivers/gpu/drm/msm/sde_power_handle.h @@ -16,9 +16,9 @@ #define MAX_CLIENT_NAME_LEN 128 -#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA 2000000 +#define SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA 1600000000 #define SDE_POWER_HANDLE_DISABLE_BUS_AB_QUOTA 0 -#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA 2000000 +#define SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA 1600000000 #define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA 0 #include -- GitLab From 0230a10554045518e1669ef2dd6c327bd5e9de32 Mon Sep 17 00:00:00 2001 From: Alan Kwong Date: Tue, 16 May 2017 11:36:44 -0700 Subject: [PATCH 152/786] drm/msm: add split voting support to sde power handle Split voting allows separate votes to be applied for each section of bus paths. This allows better optimization on bus voting. Add new data bus identifiers to describe new bus voting topology. Rename CRTC bandwidth control properties to dram_ab and dram_ib to better reflect the underlying hardware being controlled. CRs-Fixed: 2037879 Change-Id: I43224d3a9cd75334bc8d1031c3eb9b8286d1a376 Signed-off-by: Alan Kwong --- .../bindings/display/msm/sde-rsc.txt | 24 ++ .../devicetree/bindings/display/msm/sde.txt | 22 ++ drivers/gpu/drm/msm/msm_drv.h | 6 +- drivers/gpu/drm/msm/sde/sde_core_perf.c | 263 +++++++++++------- drivers/gpu/drm/msm/sde/sde_core_perf.h | 4 +- drivers/gpu/drm/msm/sde/sde_crtc.c | 39 ++- drivers/gpu/drm/msm/sde/sde_crtc.h | 4 +- drivers/gpu/drm/msm/sde/sde_trace.h | 32 ++- drivers/gpu/drm/msm/sde_power_handle.c | 83 ++++-- drivers/gpu/drm/msm/sde_power_handle.h | 27 +- drivers/gpu/drm/msm/sde_rsc.c | 6 +- include/linux/sde_rsc.h | 5 +- 12 files changed, 363 insertions(+), 152 deletions(-) diff --git a/Documentation/devicetree/bindings/display/msm/sde-rsc.txt b/Documentation/devicetree/bindings/display/msm/sde-rsc.txt index 7e54fdd91a1c..55d18cf6fe32 100644 --- a/Documentation/devicetree/bindings/display/msm/sde-rsc.txt +++ b/Documentation/devicetree/bindings/display/msm/sde-rsc.txt @@ -29,6 +29,10 @@ Optional properties: Bus Scaling Subnodes: - qcom,sde-data-bus: Property to provide Bus scaling for data bus access for sde blocks. +- qcom,sde-llcc-bus: Property to provide Bus scaling for data bus access for + mnoc to llcc. +- qcom,sde-ebi-bus: Property to provide Bus scaling for data bus access for + llcc to ebi. Bus Scaling Data: - qcom,msm-bus,name: String property describing client name. @@ -69,4 +73,24 @@ Example: <22 512 0 6400000>, <23 512 0 6400000>, <22 512 0 6400000>, <23 512 0 6400000>; }; + qcom,sde-llcc-bus { + qcom,msm-bus,name = "sde_rsc_llcc"; + qcom,msm-bus,active-only; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <20001 20513 0 0>, + <20001 20513 0 6400000>, + <20001 20513 0 6400000>; + }; + qcom,sde-ebi-bus { + qcom,msm-bus,name = "sde_rsc_ebi"; + qcom,msm-bus,active-only; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <20000 20512 0 0>, + <20000 20512 0 6400000>, + <20000 20512 0 6400000>; + }; }; diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt index 47fc4653f8d8..863a1696b8c2 100644 --- a/Documentation/devicetree/bindings/display/msm/sde.txt +++ b/Documentation/devicetree/bindings/display/msm/sde.txt @@ -341,6 +341,10 @@ Bus Scaling Subnodes: mdss blocks. - qcom,sde-data-bus: Property to provide Bus scaling for data bus access for mdss blocks. +- qcom,sde-llcc-bus: Property to provide Bus scaling for data bus access for + mnoc to llcc. +- qcom,sde-ebi-bus: Property to provide Bus scaling for data bus access for + llcc to ebi. - qcom,sde-inline-rotator: A 2 cell property, with format of (rotator phandle, instance id), of inline rotator device. @@ -638,6 +642,24 @@ Example: <22 512 0 6400000>, <23 512 0 6400000>, <25 512 0 6400000>; }; + qcom,sde-llcc-bus { + qcom,msm-bus,name = "mdss_sde_llcc"; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <132 770 0 0>, + <132 770 0 6400000>, + <132 770 0 6400000>; + }; + qcom,sde-ebi-bus { + qcom,msm-bus,name = "mdss_sde_ebi"; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <129 512 0 0>, + <129 512 0 6400000>, + <129 512 0 6400000>; + }; qcom,sde-reg-bus { /* Reg Bus Scale Settings */ diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 77dde559aa71..ddd68921ef3b 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -136,8 +136,10 @@ enum msm_mdp_crtc_property { CRTC_PROP_CORE_CLK, CRTC_PROP_CORE_AB, CRTC_PROP_CORE_IB, - CRTC_PROP_MEM_AB, - CRTC_PROP_MEM_IB, + CRTC_PROP_LLCC_AB, + CRTC_PROP_LLCC_IB, + CRTC_PROP_DRAM_AB, + CRTC_PROP_DRAM_IB, CRTC_PROP_ROT_PREFILL_BW, CRTC_PROP_ROT_CLK, CRTC_PROP_ROI_V1, diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c index b1f8b0f28e39..71dfc126ec0e 100644 --- a/drivers/gpu/drm/msm/sde/sde_core_perf.c +++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c @@ -110,6 +110,7 @@ static void _sde_core_perf_calc_crtc(struct sde_kms *kms, struct sde_core_perf_params *perf) { struct sde_crtc_state *sde_cstate; + int i; if (!kms || !kms->catalog || !crtc || !state || !perf) { SDE_ERROR("invalid parameters\n"); @@ -119,29 +120,64 @@ static void _sde_core_perf_calc_crtc(struct sde_kms *kms, sde_cstate = to_sde_crtc_state(state); memset(perf, 0, sizeof(struct sde_core_perf_params)); - perf->bw_ctl = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB); - perf->max_per_pipe_ib = + perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MNOC] = + sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB); + perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_MNOC] = + sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB); + + if (sde_cstate->bw_split_vote) { + perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC] = + sde_crtc_get_property(sde_cstate, CRTC_PROP_LLCC_AB); + perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_LLCC] = + sde_crtc_get_property(sde_cstate, CRTC_PROP_LLCC_IB); + perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_EBI] = + sde_crtc_get_property(sde_cstate, CRTC_PROP_DRAM_AB); + perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_EBI] = + sde_crtc_get_property(sde_cstate, CRTC_PROP_DRAM_IB); + } else { + perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC] = + sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB); + perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_LLCC] = + sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB); + perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_EBI] = + sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB); + perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_EBI] = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB); + } + perf->core_clk_rate = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_CLK); if (!sde_cstate->bw_control) { - perf->bw_ctl = kms->catalog->perf.max_bw_high * 1000ULL; - perf->max_per_pipe_ib = perf->bw_ctl; + for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) { + perf->bw_ctl[i] = kms->catalog->perf.max_bw_high * + 1000ULL; + perf->max_per_pipe_ib[i] = perf->bw_ctl[i]; + } perf->core_clk_rate = kms->perf.max_core_clk_rate; } else if (kms->perf.perf_tune.mode == SDE_PERF_MODE_MINIMUM) { - perf->bw_ctl = 0; - perf->max_per_pipe_ib = 0; + for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) { + perf->bw_ctl[i] = 0; + perf->max_per_pipe_ib[i] = 0; + } perf->core_clk_rate = 0; } else if (kms->perf.perf_tune.mode == SDE_PERF_MODE_FIXED) { - perf->bw_ctl = kms->perf.fix_core_ab_vote; - perf->max_per_pipe_ib = kms->perf.fix_core_ib_vote; + for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) { + perf->bw_ctl[i] = kms->perf.fix_core_ab_vote; + perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote; + } perf->core_clk_rate = kms->perf.fix_core_clk_rate; } - SDE_DEBUG("crtc=%d clk_rate=%llu ib=%llu ab=%llu\n", + SDE_DEBUG( + "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n", crtc->base.id, perf->core_clk_rate, - perf->max_per_pipe_ib, perf->bw_ctl); + perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_MNOC], + perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MNOC], + perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_LLCC], + perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC], + perf->max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_EBI], + perf->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_EBI]); } int sde_core_perf_crtc_check(struct drm_crtc *crtc, @@ -154,6 +190,7 @@ int sde_core_perf_crtc_check(struct drm_crtc *crtc, struct sde_crtc_state *sde_cstate; struct drm_crtc *tmp_crtc; struct sde_kms *kms; + int i; if (!crtc || !state) { SDE_ERROR("invalid crtc\n"); @@ -175,39 +212,46 @@ int sde_core_perf_crtc_check(struct drm_crtc *crtc, /* obtain new values */ _sde_core_perf_calc_crtc(kms, crtc, state, &sde_cstate->new_perf); - bw_sum_of_intfs = sde_cstate->new_perf.bw_ctl; - curr_client_type = sde_crtc_get_client_type(crtc); + for (i = SDE_POWER_HANDLE_DBUS_ID_MNOC; + i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) { + bw_sum_of_intfs = sde_cstate->new_perf.bw_ctl[i]; + curr_client_type = sde_crtc_get_client_type(crtc); - drm_for_each_crtc(tmp_crtc, crtc->dev) { - if (_sde_core_perf_crtc_is_power_on(tmp_crtc) && - (sde_crtc_get_client_type(tmp_crtc) == curr_client_type) && - (tmp_crtc != crtc)) { - struct sde_crtc_state *tmp_cstate = + drm_for_each_crtc(tmp_crtc, crtc->dev) { + if (_sde_core_perf_crtc_is_power_on(tmp_crtc) && + (sde_crtc_get_client_type(tmp_crtc) == + curr_client_type) && + (tmp_crtc != crtc)) { + struct sde_crtc_state *tmp_cstate = to_sde_crtc_state(tmp_crtc->state); - bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl; + bw_sum_of_intfs += + tmp_cstate->new_perf.bw_ctl[i]; + } } - } - - /* convert bandwidth to kb */ - bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000); - SDE_DEBUG("calculated bandwidth=%uk\n", bw); - - is_video_mode = sde_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO; - threshold = (is_video_mode || - _sde_core_video_mode_intf_connected(crtc)) ? - kms->catalog->perf.max_bw_low : kms->catalog->perf.max_bw_high; - SDE_DEBUG("final threshold bw limit = %d\n", threshold); - - if (!sde_cstate->bw_control) { - SDE_DEBUG("bypass bandwidth check\n"); - } else if (!threshold) { - SDE_ERROR("no bandwidth limits specified\n"); - return -E2BIG; - } else if (bw > threshold) { - SDE_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw, threshold); - return -E2BIG; + /* convert bandwidth to kb */ + bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000); + SDE_DEBUG("calculated bandwidth=%uk\n", bw); + + is_video_mode = sde_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO; + threshold = (is_video_mode || + _sde_core_video_mode_intf_connected(crtc)) ? + kms->catalog->perf.max_bw_low : + kms->catalog->perf.max_bw_high; + + SDE_DEBUG("final threshold bw limit = %d\n", threshold); + + if (!sde_cstate->bw_control) { + SDE_DEBUG("bypass bandwidth check\n"); + } else if (!threshold) { + SDE_ERROR("no bandwidth limits specified\n"); + return -E2BIG; + } else if (bw > threshold) { + SDE_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw, + threshold); + return -E2BIG; + } } return 0; @@ -240,10 +284,10 @@ static inline enum sde_crtc_client_type _get_sde_client_type( } static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms, - struct drm_crtc *crtc) + struct drm_crtc *crtc, u32 bus_id) { u64 bw_sum_of_intfs = 0, bus_ab_quota, bus_ib_quota; - struct sde_core_perf_params perf = {0}; + struct sde_core_perf_params perf = { { 0 } }; enum sde_crtc_client_type client_vote, curr_client_type = sde_crtc_get_client_type(crtc); struct drm_crtc *tmp_crtc; @@ -256,19 +300,20 @@ static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms, &kms->perf)) { sde_cstate = to_sde_crtc_state(tmp_crtc->state); - perf.max_per_pipe_ib = max(perf.max_per_pipe_ib, - sde_cstate->new_perf.max_per_pipe_ib); + perf.max_per_pipe_ib[bus_id] = + max(perf.max_per_pipe_ib[bus_id], + sde_cstate->new_perf.max_per_pipe_ib[bus_id]); - bw_sum_of_intfs += sde_cstate->new_perf.bw_ctl; + bw_sum_of_intfs += sde_cstate->new_perf.bw_ctl[bus_id]; - SDE_DEBUG("crtc=%d bw=%llu\n", - tmp_crtc->base.id, - sde_cstate->new_perf.bw_ctl); + SDE_DEBUG("crtc=%d bus_id=%d bw=%llu\n", + tmp_crtc->base.id, bus_id, + sde_cstate->new_perf.bw_ctl[bus_id]); } } bus_ab_quota = max(bw_sum_of_intfs, kms->perf.perf_tune.min_bus_vote); - bus_ib_quota = perf.max_per_pipe_ib; + bus_ib_quota = perf.max_per_pipe_ib[bus_id]; if (kms->perf.perf_tune.mode == SDE_PERF_MODE_FIXED) { bus_ab_quota = kms->perf.fix_core_ab_vote; @@ -280,25 +325,25 @@ static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms, case NRT_CLIENT: sde_power_data_bus_set_quota(&priv->phandle, kms->core_client, SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT, - bus_ab_quota, bus_ib_quota); - SDE_DEBUG("client:%s ab=%llu ib=%llu\n", "nrt", - bus_ab_quota, bus_ib_quota); + bus_id, bus_ab_quota, bus_ib_quota); + SDE_DEBUG("client:%s bus_id=%d ab=%llu ib=%llu\n", "nrt", + bus_id, bus_ab_quota, bus_ib_quota); break; case RT_CLIENT: sde_power_data_bus_set_quota(&priv->phandle, kms->core_client, SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, - bus_ab_quota, bus_ib_quota); - SDE_DEBUG("client:%s ab=%llu ib=%llu\n", "rt", - bus_ab_quota, bus_ib_quota); + bus_id, bus_ab_quota, bus_ib_quota); + SDE_DEBUG("client:%s bus_id=%d ab=%llu ib=%llu\n", "rt", + bus_id, bus_ab_quota, bus_ib_quota); break; case RT_RSC_CLIENT: sde_cstate = to_sde_crtc_state(crtc->state); - sde_rsc_client_vote(sde_cstate->rsc_client, bus_ab_quota, - bus_ib_quota); - SDE_DEBUG("client:%s ab=%llu ib=%llu\n", "rt_rsc", - bus_ab_quota, bus_ib_quota); + sde_rsc_client_vote(sde_cstate->rsc_client, + bus_id, bus_ab_quota, bus_ib_quota); + SDE_DEBUG("client:%s bus_id=%d ab=%llu ib=%llu\n", "rt_rsc", + bus_id, bus_ab_quota, bus_ib_quota); break; default: @@ -311,10 +356,12 @@ static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms, case DISP_RSC_MODE: sde_power_data_bus_set_quota(&priv->phandle, kms->core_client, - SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT, 0, 0); + SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT, + bus_id, 0, 0); sde_power_data_bus_set_quota(&priv->phandle, kms->core_client, - SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, 0, 0); + SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, + bus_id, 0, 0); kms->perf.bw_vote_mode_updated = false; break; @@ -322,7 +369,7 @@ static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms, sde_cstate = to_sde_crtc_state(crtc->state); if (sde_cstate->rsc_client) { sde_rsc_client_vote(sde_cstate->rsc_client, - 0, 0); + bus_id, 0, 0); kms->perf.bw_vote_mode_updated = false; } break; @@ -347,6 +394,7 @@ void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc) struct sde_crtc *sde_crtc; struct sde_crtc_state *sde_cstate; struct sde_kms *kms; + int i; if (!crtc) { SDE_ERROR("invalid crtc\n"); @@ -382,9 +430,11 @@ void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc) /* Release the bandwidth */ if (kms->perf.enable_bw_release) { trace_sde_cmd_release_bw(crtc->base.id); - sde_crtc->cur_perf.bw_ctl = 0; SDE_DEBUG("Release BW crtc=%d\n", crtc->base.id); - _sde_core_perf_crtc_update_bus(kms, crtc); + for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) { + sde_crtc->cur_perf.bw_ctl[i] = 0; + _sde_core_perf_crtc_update_bus(kms, crtc, i); + } } } @@ -419,7 +469,7 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc, u64 clk_rate = 0; struct sde_crtc *sde_crtc; struct sde_crtc_state *sde_cstate; - int ret; + int ret, i; struct msm_drm_private *priv; struct sde_kms *kms; @@ -449,38 +499,52 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc, new = &sde_cstate->new_perf; if (_sde_core_perf_crtc_is_power_on(crtc) && !stop_req) { - /* - * cases for bus bandwidth update. - * 1. new bandwidth vote - "ab or ib vote" is higher - * than current vote for update request. - * 2. new bandwidth vote - "ab or ib vote" is lower - * than current vote at end of commit or stop. - */ - if ((params_changed && ((new->bw_ctl > old->bw_ctl) || - (new->max_per_pipe_ib > old->max_per_pipe_ib))) || - (!params_changed && ((new->bw_ctl < old->bw_ctl) || - (new->max_per_pipe_ib < old->max_per_pipe_ib)))) { - SDE_DEBUG("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n", - crtc->base.id, params_changed, new->bw_ctl, - old->bw_ctl); - old->bw_ctl = new->bw_ctl; - old->max_per_pipe_ib = new->max_per_pipe_ib; - update_bus = 1; - } + for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) { + /* + * cases for bus bandwidth update. + * 1. new bandwidth vote - "ab or ib vote" is higher + * than current vote for update request. + * 2. new bandwidth vote - "ab or ib vote" is lower + * than current vote at end of commit or stop. + */ + if ((params_changed && ((new->bw_ctl[i] > + old->bw_ctl[i]) || + (new->max_per_pipe_ib[i] > + old->max_per_pipe_ib[i]))) || + (!params_changed && ((new->bw_ctl[i] < + old->bw_ctl[i]) || + (new->max_per_pipe_ib[i] < + old->max_per_pipe_ib[i])))) { + SDE_DEBUG( + "crtc=%d p=%d new_bw=%llu,old_bw=%llu\n", + crtc->base.id, params_changed, + new->bw_ctl[i], old->bw_ctl[i]); + old->bw_ctl[i] = new->bw_ctl[i]; + old->max_per_pipe_ib[i] = + new->max_per_pipe_ib[i]; + update_bus |= BIT(i); + } - /* display rsc override during solver mode */ - if (kms->perf.bw_vote_mode == DISP_RSC_MODE && + /* display rsc override during solver mode */ + if (kms->perf.bw_vote_mode == DISP_RSC_MODE && get_sde_rsc_current_state(SDE_RSC_INDEX) == - SDE_RSC_CMD_STATE) { - /* update new bandwdith in all cases */ - if (params_changed && ((new->bw_ctl != old->bw_ctl) || - (new->max_per_pipe_ib != old->max_per_pipe_ib))) { - old->bw_ctl = new->bw_ctl; - old->max_per_pipe_ib = new->max_per_pipe_ib; - update_bus = 1; - /* reduce bw vote is not required in solver mode */ - } else if (!params_changed) { - update_bus = 0; + SDE_RSC_CMD_STATE) { + /* update new bandwidth in all cases */ + if (params_changed && ((new->bw_ctl[i] != + old->bw_ctl[i]) || + (new->max_per_pipe_ib[i] != + old->max_per_pipe_ib[i]))) { + old->bw_ctl[i] = new->bw_ctl[i]; + old->max_per_pipe_ib[i] = + new->max_per_pipe_ib[i]; + update_bus |= BIT(i); + /* + * reduce bw vote is not required in solver + * mode + */ + } else if (!params_changed) { + update_bus &= ~BIT(i); + } } } @@ -495,15 +559,20 @@ void sde_core_perf_crtc_update(struct drm_crtc *crtc, SDE_DEBUG("crtc=%d disable\n", crtc->base.id); memset(old, 0, sizeof(*old)); memset(new, 0, sizeof(*new)); - update_bus = 1; + update_bus = ~0; update_clk = 1; } - trace_sde_perf_crtc_update(crtc->base.id, new->bw_ctl, + trace_sde_perf_crtc_update(crtc->base.id, + new->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MNOC], + new->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_LLCC], + new->bw_ctl[SDE_POWER_HANDLE_DBUS_ID_EBI], new->core_clk_rate, stop_req, update_bus, update_clk); - if (update_bus) - _sde_core_perf_crtc_update_bus(kms, crtc); + for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) { + if (update_bus & BIT(i)) + _sde_core_perf_crtc_update_bus(kms, crtc, i); + } /* * Update the clock after bandwidth vote to ensure diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.h b/drivers/gpu/drm/msm/sde/sde_core_perf.h index 4a1bdad0c71b..589415c35fc8 100644 --- a/drivers/gpu/drm/msm/sde/sde_core_perf.h +++ b/drivers/gpu/drm/msm/sde/sde_core_perf.h @@ -30,8 +30,8 @@ * @core_clk_rate: core clock rate request */ struct sde_core_perf_params { - u64 max_per_pipe_ib; - u64 bw_ctl; + u64 max_per_pipe_ib[SDE_POWER_HANDLE_DBUS_ID_MAX]; + u64 bw_ctl[SDE_POWER_HANDLE_DBUS_ID_MAX]; u64 core_clk_rate; }; diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index e70829023d5e..e0ec4a7f4ad7 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -2482,6 +2482,7 @@ static void sde_crtc_disable(struct drm_crtc *crtc) /* disable clk & bw control until clk & bw properties are set */ cstate->bw_control = false; + cstate->bw_split_vote = false; spin_lock_irqsave(&sde_crtc->spin_lock, flags); list_for_each_entry(node, &sde_crtc->user_event_list, list) { @@ -2986,13 +2987,21 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc, catalog->perf.max_bw_high * 1000ULL, CRTC_PROP_CORE_IB); msm_property_install_range(&sde_crtc->property_info, - "mem_ab", 0x0, 0, U64_MAX, + "llcc_ab", 0x0, 0, U64_MAX, catalog->perf.max_bw_high * 1000ULL, - CRTC_PROP_MEM_AB); + CRTC_PROP_LLCC_AB); msm_property_install_range(&sde_crtc->property_info, - "mem_ib", 0x0, 0, U64_MAX, + "llcc_ib", 0x0, 0, U64_MAX, catalog->perf.max_bw_high * 1000ULL, - CRTC_PROP_MEM_IB); + CRTC_PROP_LLCC_IB); + msm_property_install_range(&sde_crtc->property_info, + "dram_ab", 0x0, 0, U64_MAX, + catalog->perf.max_bw_high * 1000ULL, + CRTC_PROP_DRAM_AB); + msm_property_install_range(&sde_crtc->property_info, + "dram_ib", 0x0, 0, U64_MAX, + catalog->perf.max_bw_high * 1000ULL, + CRTC_PROP_DRAM_IB); msm_property_install_range(&sde_crtc->property_info, "rot_prefill_bw", 0, 0, U64_MAX, catalog->perf.max_bw_high * 1000ULL, @@ -3120,10 +3129,15 @@ static int sde_crtc_atomic_set_property(struct drm_crtc *crtc, case CRTC_PROP_CORE_CLK: case CRTC_PROP_CORE_AB: case CRTC_PROP_CORE_IB: - case CRTC_PROP_MEM_AB: - case CRTC_PROP_MEM_IB: cstate->bw_control = true; break; + case CRTC_PROP_LLCC_AB: + case CRTC_PROP_LLCC_IB: + case CRTC_PROP_DRAM_AB: + case CRTC_PROP_DRAM_IB: + cstate->bw_control = true; + cstate->bw_split_vote = true; + break; default: /* nothing to do */ break; @@ -3475,15 +3489,22 @@ static int sde_crtc_debugfs_state_show(struct seq_file *s, void *v) struct sde_crtc *sde_crtc = to_sde_crtc(crtc); struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state); struct sde_crtc_res *res; + int i; seq_printf(s, "num_connectors: %d\n", cstate->num_connectors); seq_printf(s, "client type: %d\n", sde_crtc_get_client_type(crtc)); seq_printf(s, "intf_mode: %d\n", sde_crtc_get_intf_mode(crtc)); - seq_printf(s, "bw_ctl: %llu\n", sde_crtc->cur_perf.bw_ctl); seq_printf(s, "core_clk_rate: %llu\n", sde_crtc->cur_perf.core_clk_rate); - seq_printf(s, "max_per_pipe_ib: %llu\n", - sde_crtc->cur_perf.max_per_pipe_ib); + for (i = SDE_POWER_HANDLE_DBUS_ID_MNOC; + i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) { + seq_printf(s, "bw_ctl[%s]: %llu\n", + sde_power_handle_get_dbus_name(i), + sde_crtc->cur_perf.bw_ctl[i]); + seq_printf(s, "max_per_pipe_ib[%s]: %llu\n", + sde_power_handle_get_dbus_name(i), + sde_crtc->cur_perf.max_per_pipe_ib[i]); + } seq_printf(s, "rp.%d: ", cstate->rp.sequence_id); list_for_each_entry(res, &cstate->rp.res_list, list) diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h index 38311c1094f2..045c1a7c0a6f 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.h +++ b/drivers/gpu/drm/msm/sde/sde_crtc.h @@ -260,7 +260,8 @@ struct sde_crtc_respool { * @intf_mode : Interface mode of the primary connector * @rsc_client : sde rsc client when mode is valid * @is_ppsplit : Whether current topology requires PPSplit special handling - * @bw_control : true if bw/clk controlled by bw/clk properties + * @bw_control : true if bw/clk controlled by core bw/clk properties + * @bw_split_vote : true if bw controlled by llcc/dram bw properties * @crtc_roi : Current CRTC ROI. Possibly sub-rectangle of mode. * Origin top left of CRTC. * @lm_bounds : LM boundaries based on current mode full resolution, no ROI. @@ -287,6 +288,7 @@ struct sde_crtc_state { struct sde_rsc_client *rsc_client; bool rsc_update; bool bw_control; + bool bw_split_vote; bool is_ppsplit; struct sde_rect crtc_roi; diff --git a/drivers/gpu/drm/msm/sde/sde_trace.h b/drivers/gpu/drm/msm/sde/sde_trace.h index 6962befdb5dd..e233fc7d9d3d 100644 --- a/drivers/gpu/drm/msm/sde/sde_trace.h +++ b/drivers/gpu/drm/msm/sde/sde_trace.h @@ -193,13 +193,16 @@ TRACE_EVENT(sde_evtlog, ) TRACE_EVENT(sde_perf_crtc_update, - TP_PROTO(u32 crtc, u64 bw_ctl, u32 core_clk_rate, - bool stop_req, u32 update_bus, u32 update_clk), - TP_ARGS(crtc, bw_ctl, core_clk_rate, + TP_PROTO(u32 crtc, u64 bw_ctl_mnoc, u64 bw_ctl_llcc, + u64 bw_ctl_ebi, u32 core_clk_rate, + bool stop_req, u32 update_bus, u32 update_clk), + TP_ARGS(crtc, bw_ctl_mnoc, bw_ctl_llcc, bw_ctl_ebi, core_clk_rate, stop_req, update_bus, update_clk), TP_STRUCT__entry( __field(u32, crtc) - __field(u64, bw_ctl) + __field(u64, bw_ctl_mnoc) + __field(u64, bw_ctl_llcc) + __field(u64, bw_ctl_ebi) __field(u32, core_clk_rate) __field(bool, stop_req) __field(u32, update_bus) @@ -207,19 +210,24 @@ TRACE_EVENT(sde_perf_crtc_update, ), TP_fast_assign( __entry->crtc = crtc; - __entry->bw_ctl = bw_ctl; + __entry->bw_ctl_mnoc = bw_ctl_mnoc; + __entry->bw_ctl_llcc = bw_ctl_llcc; + __entry->bw_ctl_ebi = bw_ctl_ebi; __entry->core_clk_rate = core_clk_rate; __entry->stop_req = stop_req; __entry->update_bus = update_bus; __entry->update_clk = update_clk; ), - TP_printk("crtc=%d bw=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d", - __entry->crtc, - __entry->bw_ctl, - __entry->core_clk_rate, - __entry->stop_req, - __entry->update_bus, - __entry->update_clk) + TP_printk( + "crtc=%d bw_mnoc=%llu bw_llcc=%llu bw_ebi=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d", + __entry->crtc, + __entry->bw_ctl_mnoc, + __entry->bw_ctl_llcc, + __entry->bw_ctl_ebi, + __entry->core_clk_rate, + __entry->stop_req, + __entry->update_bus, + __entry->update_clk) ); #define SDE_ATRACE_END(name) trace_sde_mark_write(current->tgid, name, 0) diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c index 452a3be7f0e8..242cd6498366 100644 --- a/drivers/gpu/drm/msm/sde_power_handle.c +++ b/drivers/gpu/drm/msm/sde_power_handle.c @@ -30,6 +30,20 @@ #include "sde_power_handle.h" #include "sde_trace.h" +static const char *data_bus_name[SDE_POWER_HANDLE_DBUS_ID_MAX] = { + [SDE_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,sde-data-bus", + [SDE_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,sde-llcc-bus", + [SDE_POWER_HANDLE_DBUS_ID_EBI] = "qcom,sde-ebi-bus", +}; + +const char *sde_power_handle_get_dbus_name(u32 bus_id) +{ + if (bus_id < SDE_POWER_HANDLE_DBUS_ID_MAX) + return data_bus_name[bus_id]; + + return NULL; +} + static void sde_power_event_trigger_locked(struct sde_power_handle *phandle, u32 event_type) { @@ -415,7 +429,9 @@ static int _sde_power_data_bus_set_quota( vect->ab = ab_quota[i]; vect->ib = ib_quota[i]; - pr_debug("uc_idx=%d %s path idx=%d ab=%llu ib=%llu\n", + pr_debug( + "%s uc_idx=%d %s path idx=%d ab=%llu ib=%llu\n", + bw_table->name, new_uc_idx, (i < rt_axi_port_cnt) ? "rt" : "nrt" , i, vect->ab, vect->ib); } @@ -433,7 +449,8 @@ static int _sde_power_data_bus_set_quota( int sde_power_data_bus_set_quota(struct sde_power_handle *phandle, struct sde_power_client *pclient, - int bus_client, u64 ab_quota, u64 ib_quota) + int bus_client, u32 bus_id, + u64 ab_quota, u64 ib_quota) { int rc = 0; int i; @@ -442,7 +459,8 @@ int sde_power_data_bus_set_quota(struct sde_power_handle *phandle, struct sde_power_client *client; if (!phandle || !pclient || - bus_client >= SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX) { + bus_client >= SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX || + bus_id >= SDE_POWER_HANDLE_DBUS_ID_MAX) { pr_err("invalid parameters\n"); return -EINVAL; } @@ -465,7 +483,9 @@ int sde_power_data_bus_set_quota(struct sde_power_handle *phandle, } } - rc = _sde_power_data_bus_set_quota(&phandle->data_bus_handle, + if (phandle->data_bus_handle[bus_id].data_bus_hdl) + rc = _sde_power_data_bus_set_quota( + &phandle->data_bus_handle[bus_id], total_ab_rt, total_ab_nrt, total_ib_rt, total_ib_nrt); @@ -484,7 +504,7 @@ static void sde_power_data_bus_unregister( } static int sde_power_data_bus_parse(struct platform_device *pdev, - struct sde_power_data_bus_handle *pdbus) + struct sde_power_data_bus_handle *pdbus, const char *name) { struct device_node *node; int rc = 0; @@ -507,7 +527,7 @@ static int sde_power_data_bus_parse(struct platform_device *pdev, rc = 0; } - node = of_get_child_by_name(pdev->dev.of_node, "qcom,sde-data-bus"); + node = of_get_child_by_name(pdev->dev.of_node, name); if (node) { rc = of_property_read_u32(node, "qcom,msm-bus,num-paths", &paths); @@ -533,7 +553,8 @@ static int sde_power_data_bus_parse(struct platform_device *pdev, rc = -EINVAL; goto end; } - pr_debug("register data_bus_hdl=%x\n", pdbus->data_bus_hdl); + pr_debug("register %s data_bus_hdl=%x\n", name, + pdbus->data_bus_hdl); } end: @@ -621,7 +642,8 @@ static void sde_power_data_bus_unregister( int sde_power_data_bus_set_quota(struct sde_power_handle *phandle, struct sde_power_client *pclient, - int bus_client, u64 ab_quota, u64 ib_quota) + int bus_client, u32 bus_id, + u64 ab_quota, u64 ib_quota) { return 0; } @@ -651,7 +673,7 @@ static int sde_power_data_bus_update(struct sde_power_data_bus_handle *pdbus, int sde_power_resource_init(struct platform_device *pdev, struct sde_power_handle *phandle) { - int rc = 0; + int rc = 0, i; struct dss_module_power *mp; if (!phandle || !pdev) { @@ -699,10 +721,16 @@ int sde_power_resource_init(struct platform_device *pdev, goto bus_err; } - rc = sde_power_data_bus_parse(pdev, &phandle->data_bus_handle); - if (rc) { - pr_err("register data bus parse failed rc=%d\n", rc); - goto data_bus_err; + for (i = SDE_POWER_HANDLE_DBUS_ID_MNOC; + i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) { + rc = sde_power_data_bus_parse(pdev, + &phandle->data_bus_handle[i], + data_bus_name[i]); + if (rc) { + pr_err("register data bus parse failed id=%d rc=%d\n", + i, rc); + goto data_bus_err; + } } INIT_LIST_HEAD(&phandle->power_client_clist); @@ -716,6 +744,8 @@ int sde_power_resource_init(struct platform_device *pdev, return rc; data_bus_err: + for (i--; i >= 0; i--) + sde_power_data_bus_unregister(&phandle->data_bus_handle[i]); sde_power_reg_bus_unregister(phandle->reg_bus_hdl); bus_err: msm_dss_put_clk(mp->clk_config, mp->num_clk); @@ -739,6 +769,7 @@ void sde_power_resource_deinit(struct platform_device *pdev, struct dss_module_power *mp; struct sde_power_client *curr_client, *next_client; struct sde_power_event *curr_event, *next_event; + int i; if (!phandle || !pdev) { pr_err("invalid input param\n"); @@ -766,7 +797,8 @@ void sde_power_resource_deinit(struct platform_device *pdev, } mutex_unlock(&phandle->phandle_lock); - sde_power_data_bus_unregister(&phandle->data_bus_handle); + for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) + sde_power_data_bus_unregister(&phandle->data_bus_handle[i]); sde_power_reg_bus_unregister(phandle->reg_bus_hdl); @@ -790,7 +822,7 @@ void sde_power_resource_deinit(struct platform_device *pdev, int sde_power_resource_enable(struct sde_power_handle *phandle, struct sde_power_client *pclient, bool enable) { - int rc = 0; + int rc = 0, i; bool changed = false; u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx; struct sde_power_client *client; @@ -837,13 +869,15 @@ int sde_power_resource_enable(struct sde_power_handle *phandle, sde_power_event_trigger_locked(phandle, SDE_POWER_EVENT_PRE_ENABLE); - rc = sde_power_data_bus_update(&phandle->data_bus_handle, - enable); - if (rc) { - pr_err("failed to set data bus vote rc=%d\n", rc); - goto data_bus_hdl_err; + for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) { + rc = sde_power_data_bus_update( + &phandle->data_bus_handle[i], enable); + if (rc) { + pr_err("failed to set data bus vote id=%d rc=%d\n", + i, rc); + goto data_bus_hdl_err; + } } - /* * - When the target is RSCC enabled, regulator should * be enabled by the s/w only for the first time during @@ -897,7 +931,9 @@ int sde_power_resource_enable(struct sde_power_handle *phandle, if (!phandle->rsc_client) msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable); - sde_power_data_bus_update(&phandle->data_bus_handle, enable); + for (i = 0 ; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) + sde_power_data_bus_update(&phandle->data_bus_handle[i], + enable); sde_power_event_trigger_locked(phandle, SDE_POWER_EVENT_POST_DISABLE); @@ -915,7 +951,8 @@ int sde_power_resource_enable(struct sde_power_handle *phandle, if (!phandle->rsc_client) msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, 0); vreg_err: - sde_power_data_bus_update(&phandle->data_bus_handle, 0); + for (i = 0 ; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++) + sde_power_data_bus_update(&phandle->data_bus_handle[i], 0); data_bus_hdl_err: phandle->current_usecase_ndx = prev_usecase_ndx; mutex_unlock(&phandle->phandle_lock); diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h index 0db10b0cdc96..78c325d3d4da 100644 --- a/drivers/gpu/drm/msm/sde_power_handle.h +++ b/drivers/gpu/drm/msm/sde_power_handle.h @@ -59,6 +59,19 @@ enum sde_power_handle_data_bus_client { SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX }; +/** + * enum SDE_POWER_HANDLE_DBUS_ID - data bus identifier + * @SDE_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus + * @SDE_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus + * @SDE_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus + */ +enum SDE_POWER_HANDLE_DBUS_ID { + SDE_POWER_HANDLE_DBUS_ID_MNOC, + SDE_POWER_HANDLE_DBUS_ID_LLCC, + SDE_POWER_HANDLE_DBUS_ID_EBI, + SDE_POWER_HANDLE_DBUS_ID_MAX, +}; + /** * struct sde_power_client: stores the power client for sde driver * @name: name of the client @@ -152,7 +165,8 @@ struct sde_power_handle { struct device *dev; u32 current_usecase_ndx; u32 reg_bus_hdl; - struct sde_power_data_bus_handle data_bus_handle; + struct sde_power_data_bus_handle data_bus_handle + [SDE_POWER_HANDLE_DBUS_ID_MAX]; struct list_head event_list; struct sde_rsc_client *rsc_client; bool rsc_client_init; @@ -254,6 +268,7 @@ struct clk *sde_power_clk_get_clk(struct sde_power_handle *phandle, * @phandle: power handle containing the resources * @client: client information to set quota * @bus_client: real-time or non-real-time bus client + * @bus_id: identifier of data bus, see SDE_POWER_HANDLE_DBUS_ID * @ab_quota: arbitrated bus bandwidth * @ib_quota: instantaneous bus bandwidth * @@ -261,7 +276,8 @@ struct clk *sde_power_clk_get_clk(struct sde_power_handle *phandle, */ int sde_power_data_bus_set_quota(struct sde_power_handle *phandle, struct sde_power_client *pclient, - int bus_client, u64 ab_quota, u64 ib_quota); + int bus_client, u32 bus_id, + u64 ab_quota, u64 ib_quota); /** * sde_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable @@ -298,4 +314,11 @@ struct sde_power_event *sde_power_handle_register_event( void sde_power_handle_unregister_event(struct sde_power_handle *phandle, struct sde_power_event *event); +/** + * sde_power_handle_get_dbus_name - get name of given data bus identifier + * @bus_id: data bus identifier + * Return: Pointer to name string if success; NULL otherwise + */ +const char *sde_power_handle_get_dbus_name(u32 bus_id); + #endif /* _SDE_POWER_HANDLE_H_ */ diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c index caa8cdf3cd34..8447916999d2 100644 --- a/drivers/gpu/drm/msm/sde_rsc.c +++ b/drivers/gpu/drm/msm/sde_rsc.c @@ -657,13 +657,14 @@ EXPORT_SYMBOL(sde_rsc_client_state_update); * sde_rsc_client_vote() - ab/ib vote from rsc client * * @client: Client pointer provided by sde_rsc_client_create(). + * @bus_id: data bus for which to be voted * @ab: aggregated bandwidth vote from client. * @ib: instant bandwidth vote from client. * * Return: error code. */ int sde_rsc_client_vote(struct sde_rsc_client *caller_client, - u64 ab_vote, u64 ib_vote) + u32 bus_id, u64 ab_vote, u64 ib_vote) { int rc = 0; struct sde_rsc_priv *rsc; @@ -717,7 +718,8 @@ int sde_rsc_client_vote(struct sde_rsc_client *caller_client, rpmh_invalidate(rsc->disp_rsc); sde_power_data_bus_set_quota(&rsc->phandle, rsc->pclient, - SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, ab_vote, ib_vote); + SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT, + bus_id, ab_vote, ib_vote); rpmh_flush(rsc->disp_rsc); if (rsc->hw_ops.tcs_use_ok) diff --git a/include/linux/sde_rsc.h b/include/linux/sde_rsc.h index f921909103d7..1450caa6ff4d 100644 --- a/include/linux/sde_rsc.h +++ b/include/linux/sde_rsc.h @@ -179,13 +179,14 @@ int sde_rsc_client_state_update(struct sde_rsc_client *client, * sde_rsc_client_vote() - ab/ib vote from rsc client * * @client: Client pointer provided by sde_rsc_client_create(). + * @bus_id: data bus identifier * @ab: aggregated bandwidth vote from client. * @ib: instant bandwidth vote from client. * * Return: error code. */ int sde_rsc_client_vote(struct sde_rsc_client *caller_client, - u64 ab_vote, u64 ib_vote); + u32 bus_id, u64 ab_vote, u64 ib_vote); /** * sde_rsc_register_event - register a callback function for an event @@ -243,7 +244,7 @@ static inline int sde_rsc_client_state_update(struct sde_rsc_client *client, } static inline int sde_rsc_client_vote(struct sde_rsc_client *caller_client, - u64 ab_vote, u64 ib_vote) + u32 bus_id, u64 ab_vote, u64 ib_vote) { return 0; } -- GitLab From 77c171780bd7b82991b2a65bb93e8b3bd14c59a6 Mon Sep 17 00:00:00 2001 From: Deepak Katragadda Date: Mon, 19 Jun 2017 10:50:55 -0700 Subject: [PATCH 153/786] clk: qcom: Remove certain BCR reset references on SDM845 Due a hardware requirement, BCR clients need to update their code to set and clear the force-memory bits of any affected SREG_PSCBCs that might be under the BCRs control. In order to enforce that, remove the references to BCRs with SREG_PSCBCs in the clock driver. They will be added on a need-by basis after assessing the clients needs and after making sure that they follow the required work-around. CRs-Fixed: 2063631 Change-Id: Ia1de63db7a436b07b8ebe6b62ffeea8b9a72d069 Signed-off-by: Deepak Katragadda --- drivers/clk/qcom/camcc-sdm845.c | 11 ---- drivers/clk/qcom/dispcc-sdm845.c | 2 - drivers/clk/qcom/gcc-sdm845.c | 1 - drivers/clk/qcom/videocc-sdm845.c | 9 --- include/dt-bindings/clock/qcom,camcc-sdm845.h | 31 ++++------- .../dt-bindings/clock/qcom,dispcc-sdm845.h | 5 +- include/dt-bindings/clock/qcom,gcc-sdm845.h | 55 +++++++++---------- .../dt-bindings/clock/qcom,videocc-sdm845.h | 5 -- 8 files changed, 38 insertions(+), 81 deletions(-) diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c index 9ccef918ade3..86e148d870e9 100644 --- a/drivers/clk/qcom/camcc-sdm845.c +++ b/drivers/clk/qcom/camcc-sdm845.c @@ -1928,22 +1928,11 @@ static struct clk_regmap *cam_cc_sdm845_clocks[] = { }; static const struct qcom_reset_map cam_cc_sdm845_resets[] = { - [TITAN_CAM_CC_BPS_BCR] = { 0x6000 }, - [TITAN_CAM_CC_CAMNOC_BCR] = { 0xb120 }, [TITAN_CAM_CC_CCI_BCR] = { 0xb0d4 }, [TITAN_CAM_CC_CPAS_BCR] = { 0xb118 }, [TITAN_CAM_CC_CSI0PHY_BCR] = { 0x5000 }, [TITAN_CAM_CC_CSI1PHY_BCR] = { 0x5024 }, [TITAN_CAM_CC_CSI2PHY_BCR] = { 0x5048 }, - [TITAN_CAM_CC_FD_BCR] = { 0xb0ac }, - [TITAN_CAM_CC_ICP_BCR] = { 0xb074 }, - [TITAN_CAM_CC_IFE_0_BCR] = { 0x9000 }, - [TITAN_CAM_CC_IFE_1_BCR] = { 0xa000 }, - [TITAN_CAM_CC_IFE_LITE_BCR] = { 0xb000 }, - [TITAN_CAM_CC_IPE_0_BCR] = { 0x7000 }, - [TITAN_CAM_CC_IPE_1_BCR] = { 0x8000 }, - [TITAN_CAM_CC_JPEG_BCR] = { 0xb048 }, - [TITAN_CAM_CC_LRME_BCR] = { 0xb0f4 }, [TITAN_CAM_CC_MCLK0_BCR] = { 0x4000 }, [TITAN_CAM_CC_MCLK1_BCR] = { 0x4020 }, [TITAN_CAM_CC_MCLK2_BCR] = { 0x4040 }, diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c index 6acab9f95233..d6ecf122f8dd 100644 --- a/drivers/clk/qcom/dispcc-sdm845.c +++ b/drivers/clk/qcom/dispcc-sdm845.c @@ -992,8 +992,6 @@ static struct clk_regmap *disp_cc_sdm845_clocks[] = { }; static const struct qcom_reset_map disp_cc_sdm845_resets[] = { - [DISP_CC_MDSS_CORE_BCR] = { 0x2000 }, - [DISP_CC_MDSS_GCC_CLOCKS_BCR] = { 0x4000 }, [DISP_CC_MDSS_RSCC_BCR] = { 0x5000 }, }; diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c index 13de253b542a..34e6e3936770 100644 --- a/drivers/clk/qcom/gcc-sdm845.c +++ b/drivers/clk/qcom/gcc-sdm845.c @@ -3500,7 +3500,6 @@ static struct clk_regmap *gcc_sdm845_clocks[] = { }; static const struct qcom_reset_map gcc_sdm845_resets[] = { - [GCC_GPU_BCR] = { 0x71000 }, [GCC_MMSS_BCR] = { 0xb000 }, [GCC_PCIE_0_BCR] = { 0x6b000 }, [GCC_PCIE_1_BCR] = { 0x8d000 }, diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c index 14a9cff77286..362ea0b4fee7 100644 --- a/drivers/clk/qcom/videocc-sdm845.c +++ b/drivers/clk/qcom/videocc-sdm845.c @@ -311,13 +311,6 @@ static struct clk_regmap *video_cc_sdm845_clocks[] = { [VIDEO_PLL0] = &video_pll0.clkr, }; -static const struct qcom_reset_map video_cc_sdm845_resets[] = { - [VIDEO_CC_INTERFACE_BCR] = { 0x8f0 }, - [VIDEO_CC_VCODEC0_BCR] = { 0x870 }, - [VIDEO_CC_VCODEC1_BCR] = { 0x8b0 }, - [VIDEO_CC_VENUS_BCR] = { 0x810 }, -}; - static const struct regmap_config video_cc_sdm845_regmap_config = { .reg_bits = 32, .reg_stride = 4, @@ -330,8 +323,6 @@ static const struct qcom_cc_desc video_cc_sdm845_desc = { .config = &video_cc_sdm845_regmap_config, .clks = video_cc_sdm845_clocks, .num_clks = ARRAY_SIZE(video_cc_sdm845_clocks), - .resets = video_cc_sdm845_resets, - .num_resets = ARRAY_SIZE(video_cc_sdm845_resets), }; static const struct of_device_id video_cc_sdm845_match_table[] = { diff --git a/include/dt-bindings/clock/qcom,camcc-sdm845.h b/include/dt-bindings/clock/qcom,camcc-sdm845.h index e16b69aa1daa..7218261ea28e 100644 --- a/include/dt-bindings/clock/qcom,camcc-sdm845.h +++ b/include/dt-bindings/clock/qcom,camcc-sdm845.h @@ -102,26 +102,15 @@ #define CAM_CC_SOC_AHB_CLK 85 #define CAM_CC_SYS_TMR_CLK 86 -#define TITAN_CAM_CC_BPS_BCR 0 -#define TITAN_CAM_CC_CAMNOC_BCR 1 -#define TITAN_CAM_CC_CCI_BCR 2 -#define TITAN_CAM_CC_CPAS_BCR 3 -#define TITAN_CAM_CC_CSI0PHY_BCR 4 -#define TITAN_CAM_CC_CSI1PHY_BCR 5 -#define TITAN_CAM_CC_CSI2PHY_BCR 6 -#define TITAN_CAM_CC_FD_BCR 7 -#define TITAN_CAM_CC_ICP_BCR 8 -#define TITAN_CAM_CC_IFE_0_BCR 9 -#define TITAN_CAM_CC_IFE_1_BCR 10 -#define TITAN_CAM_CC_IFE_LITE_BCR 11 -#define TITAN_CAM_CC_IPE_0_BCR 12 -#define TITAN_CAM_CC_IPE_1_BCR 13 -#define TITAN_CAM_CC_JPEG_BCR 14 -#define TITAN_CAM_CC_LRME_BCR 15 -#define TITAN_CAM_CC_MCLK0_BCR 16 -#define TITAN_CAM_CC_MCLK1_BCR 17 -#define TITAN_CAM_CC_MCLK2_BCR 18 -#define TITAN_CAM_CC_MCLK3_BCR 19 -#define TITAN_CAM_CC_TITAN_TOP_BCR 20 +#define TITAN_CAM_CC_CCI_BCR 0 +#define TITAN_CAM_CC_CPAS_BCR 1 +#define TITAN_CAM_CC_CSI0PHY_BCR 2 +#define TITAN_CAM_CC_CSI1PHY_BCR 3 +#define TITAN_CAM_CC_CSI2PHY_BCR 4 +#define TITAN_CAM_CC_MCLK0_BCR 5 +#define TITAN_CAM_CC_MCLK1_BCR 6 +#define TITAN_CAM_CC_MCLK2_BCR 7 +#define TITAN_CAM_CC_MCLK3_BCR 8 +#define TITAN_CAM_CC_TITAN_TOP_BCR 9 #endif diff --git a/include/dt-bindings/clock/qcom,dispcc-sdm845.h b/include/dt-bindings/clock/qcom,dispcc-sdm845.h index 91ea07771864..42bb59faae06 100644 --- a/include/dt-bindings/clock/qcom,dispcc-sdm845.h +++ b/include/dt-bindings/clock/qcom,dispcc-sdm845.h @@ -56,9 +56,6 @@ #define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 39 #define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 40 -#define DISP_CC_MDSS_CORE_BCR 0 -#define DISP_CC_MDSS_GCC_CLOCKS_BCR 1 -#define DISP_CC_MDSS_RSCC_BCR 2 -#define DISP_CC_MDSS_SPDM_BCR 3 +#define DISP_CC_MDSS_RSCC_BCR 0 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h index f6f4bc3b5e97..678a88524279 100644 --- a/include/dt-bindings/clock/qcom,gcc-sdm845.h +++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h @@ -204,34 +204,33 @@ /* GCC reset clocks */ -#define GCC_GPU_BCR 0 -#define GCC_MMSS_BCR 1 -#define GCC_PCIE_0_BCR 2 -#define GCC_PCIE_1_BCR 3 -#define GCC_PCIE_PHY_BCR 4 -#define GCC_PDM_BCR 5 -#define GCC_PRNG_BCR 6 -#define GCC_QUPV3_WRAPPER_0_BCR 7 -#define GCC_QUPV3_WRAPPER_1_BCR 8 -#define GCC_QUSB2PHY_PRIM_BCR 9 -#define GCC_QUSB2PHY_SEC_BCR 10 -#define GCC_SDCC2_BCR 11 -#define GCC_SDCC4_BCR 12 -#define GCC_TSIF_BCR 13 -#define GCC_UFS_CARD_BCR 14 -#define GCC_UFS_PHY_BCR 15 -#define GCC_USB30_PRIM_BCR 16 -#define GCC_USB30_SEC_BCR 17 -#define GCC_USB3_PHY_PRIM_BCR 18 -#define GCC_USB3PHY_PHY_PRIM_BCR 19 -#define GCC_USB3_DP_PHY_PRIM_BCR 20 -#define GCC_USB3_PHY_SEC_BCR 21 -#define GCC_USB3PHY_PHY_SEC_BCR 22 -#define GCC_USB3_DP_PHY_SEC_BCR 23 -#define GCC_USB_PHY_CFG_AHB2PHY_BCR 24 -#define GCC_PCIE_0_PHY_BCR 25 -#define GCC_PCIE_1_PHY_BCR 26 -#define GCC_SDCC1_BCR 27 +#define GCC_MMSS_BCR 0 +#define GCC_PCIE_0_BCR 1 +#define GCC_PCIE_1_BCR 2 +#define GCC_PCIE_PHY_BCR 3 +#define GCC_PDM_BCR 4 +#define GCC_PRNG_BCR 5 +#define GCC_QUPV3_WRAPPER_0_BCR 6 +#define GCC_QUPV3_WRAPPER_1_BCR 7 +#define GCC_QUSB2PHY_PRIM_BCR 8 +#define GCC_QUSB2PHY_SEC_BCR 9 +#define GCC_SDCC2_BCR 10 +#define GCC_SDCC4_BCR 11 +#define GCC_TSIF_BCR 12 +#define GCC_UFS_CARD_BCR 13 +#define GCC_UFS_PHY_BCR 14 +#define GCC_USB30_PRIM_BCR 15 +#define GCC_USB30_SEC_BCR 16 +#define GCC_USB3_PHY_PRIM_BCR 17 +#define GCC_USB3PHY_PHY_PRIM_BCR 18 +#define GCC_USB3_DP_PHY_PRIM_BCR 19 +#define GCC_USB3_PHY_SEC_BCR 20 +#define GCC_USB3PHY_PHY_SEC_BCR 21 +#define GCC_USB3_DP_PHY_SEC_BCR 22 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 23 +#define GCC_PCIE_0_PHY_BCR 24 +#define GCC_PCIE_1_PHY_BCR 25 +#define GCC_SDCC1_BCR 26 /* Dummy clocks for rate measurement */ #define MEASURE_ONLY_SNOC_CLK 0 diff --git a/include/dt-bindings/clock/qcom,videocc-sdm845.h b/include/dt-bindings/clock/qcom,videocc-sdm845.h index b362852d0a18..21b5092babcc 100644 --- a/include/dt-bindings/clock/qcom,videocc-sdm845.h +++ b/include/dt-bindings/clock/qcom,videocc-sdm845.h @@ -28,9 +28,4 @@ #define VIDEO_CC_VENUS_CTL_CORE_CLK 11 #define VIDEO_PLL0 12 -#define VIDEO_CC_INTERFACE_BCR 0 -#define VIDEO_CC_VCODEC0_BCR 1 -#define VIDEO_CC_VCODEC1_BCR 2 -#define VIDEO_CC_VENUS_BCR 3 - #endif -- GitLab From 29db071fc4e289f8625a18b48d9c0a84a397da15 Mon Sep 17 00:00:00 2001 From: Can Guo Date: Thu, 18 May 2017 13:26:48 +0800 Subject: [PATCH 154/786] mmc: host: create a specific workqueue for clk gate mmc host clock gate work was scheduled on system_wq, which is created without flag WQ_MEM_RECLAIM. Because mmcqd thread was created with flag PF_MEMALLOC set, it would break forward-progress guarantee leading to a deadlock when clock gate work is cancelled. Fix it by creating a specific work queue for clock gate work. Change-Id: I4be5d629c712d8c48049f1e2df2c14ac6024b837 Signed-off-by: Can Guo --- drivers/mmc/core/host.c | 50 +++++++++++++++++++++++++++++++++++++++- include/linux/mmc/host.h | 1 + 2 files changed, 50 insertions(+), 1 deletion(-) diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index eb730fd604fc..e3c0f4f4c128 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -224,7 +224,7 @@ void mmc_host_clk_release(struct mmc_host *host) host->clk_requests--; if (mmc_host_may_gate_card(host->card) && !host->clk_requests) - schedule_delayed_work(&host->clk_gate_work, + queue_delayed_work(host->clk_gate_wq, &host->clk_gate_work, msecs_to_jiffies(host->clkgate_delay)); spin_unlock_irqrestore(&host->clk_lock, flags); } @@ -283,6 +283,8 @@ static inline void mmc_host_clk_exit(struct mmc_host *host) mmc_host_clk_gate_delayed(host); if (host->clk_gated) mmc_host_clk_hold(host); + if (host->clk_gate_wq) + destroy_workqueue(host->clk_gate_wq); /* There should be only one user now */ WARN_ON(host->clk_requests > 1); } @@ -298,6 +300,42 @@ static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) pr_err("%s: Failed to create clkgate_delay sysfs entry\n", mmc_hostname(host)); } + +static inline bool mmc_host_clk_gate_wq_init(struct mmc_host *host) +{ + char *wq = NULL; + int wq_nl; + bool ret = true; + + wq_nl = sizeof("mmc_clk_gate/") + sizeof(mmc_hostname(host)) + 1; + + wq = kzalloc(wq_nl, GFP_KERNEL); + if (!wq) { + ret = false; + goto out; + } + + snprintf(wq, wq_nl, "mmc_clk_gate/%s", mmc_hostname(host)); + + /* + * Create a work queue with flag WQ_MEM_RECLAIM set for + * mmc clock gate work. Because mmc thread is created with + * flag PF_MEMALLOC set, kernel will check for work queue + * flag WQ_MEM_RECLAIM when flush the work queue. If work + * queue flag WQ_MEM_RECLAIM is not set, kernel warning + * will be triggered. + */ + host->clk_gate_wq = create_workqueue(wq); + if (!host->clk_gate_wq) { + ret = false; + dev_err(host->parent, + "failed to create clock gate work queue\n"); + } + + kfree(wq); +out: + return ret; +} #else static inline void mmc_host_clk_init(struct mmc_host *host) @@ -316,6 +354,11 @@ bool mmc_host_may_gate_card(struct mmc_card *card) { return false; } + +static inline bool mmc_host_clk_gate_wq_init(struct mmc_host *host) +{ + return true; +} #endif void mmc_retune_enable(struct mmc_host *host) @@ -642,6 +685,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) return NULL; } + if (!mmc_host_clk_gate_wq_init(host)) { + kfree(host); + return NULL; + } + mmc_host_clk_init(host); spin_lock_init(&host->lock); diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 0e6a54c3853e..3c58c2092f93 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -478,6 +478,7 @@ struct mmc_host { int clk_requests; /* internal reference counter */ unsigned int clk_delay; /* number of MCI clk hold cycles */ bool clk_gated; /* clock gated */ + struct workqueue_struct *clk_gate_wq; /* clock gate work queue */ struct delayed_work clk_gate_work; /* delayed clock gate */ unsigned int clk_old; /* old clock value cache */ spinlock_t clk_lock; /* lock for clk fields */ -- GitLab From bd019ce3b36004fce87dea0aabca9bf83b7e0689 Mon Sep 17 00:00:00 2001 From: Channagoud Kadabi Date: Mon, 19 Jun 2017 17:14:52 -0700 Subject: [PATCH 155/786] kernel: cpu: Handle hotplug failure for state CPUHP_AP_IDLE_DEAD Once the tear down hotplug handler is run cpu is dead and enters into CPUHP_AP_IDLE_DEAD state. Any callbacks that fail in the state machine with state < CPUHP_AP_IDLE must be treated as fatal. As this could result into timer not beig migrated away from dead cpu and run into issues like work queue lock ups, sched_clock timer wrapping to zero as sched_clock_pll which is in the hrtimer base of cpu being hotplugged does not get migrated. Change-Id: I214c329f5a342d676cc26f57dba453d4a2cbe1a6 Signed-off-by: Channagoud Kadabi --- kernel/cpu.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/cpu.c b/kernel/cpu.c index 8ac83e55283a..4c922ae0cbcf 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -463,6 +463,7 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, for (; st->state > target; st->state--) { ret = cpuhp_invoke_callback(cpu, st->state, false, NULL); + BUG_ON(ret && st->state < CPUHP_AP_IDLE_DEAD); if (ret) { st->target = prev_state; undo_cpu_down(cpu, st); -- GitLab From 77cadd1064de8175af5b0092ce0c02e8fac0ebf8 Mon Sep 17 00:00:00 2001 From: Clarence Ip Date: Mon, 19 Jun 2017 17:51:46 -0400 Subject: [PATCH 156/786] msm: sde: check stride alignment before disabling partial writes Only disable partial writes if the stride is a multiple of 32 bytes. Otherwise, the partial read-modify-write behavior is needed to avoid corrupting the output image. CRs-Fixed: 2056485 Change-Id: Ia86dfc4561f96f7774db1e8529ee5a553c584426 Signed-off-by: Clarence Ip --- .../platform/msm/sde/rotator/sde_rotator_r3.c | 37 ++++++++++++++++--- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c index aa6c5223d2f3..743d2f73fb42 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c @@ -57,6 +57,9 @@ #define DEFAULT_MAXLINEWIDTH 4096 +/* stride alignment requirement for avoiding partial writes */ +#define PARTIAL_WRITE_ALIGNMENT 0x1F + /* Macro for constructing the REGDMA command */ #define SDE_REGDMA_WRITE(p, off, data) \ do { \ @@ -869,6 +872,8 @@ static void sde_hw_rotator_setup_timestamp_packet( SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE, 0x00010001); SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE, 0x00010001); SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY, 0); + SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, + (ctx->rot->highest_bank & 0x3) << 8); SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC, 0); SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 1); SDE_REGDMA_MODIFY(wrptr, REGDMA_TIMESTAMP_REG, mask, swts); @@ -1270,7 +1275,7 @@ static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx, u32 *wrptr; u32 pack = 0; u32 dst_format = 0; - u32 partial_write = 0; + u32 no_partial_writes = 0; int i; wrptr = sde_hw_rotator_get_regdma_segment(ctx); @@ -1355,12 +1360,34 @@ static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx, (cfg->h_downscale_factor << 16)); /* partial write check */ - if (test_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map) && - !sde_mdp_is_ubwc_format(fmt)) - partial_write = BIT(10); + if (test_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map)) { + no_partial_writes = BIT(10); + + /* + * For simplicity, don't disable partial writes if + * the ROI does not span the entire width of the + * output image, and require the total stride to + * also be properly aligned. + * + * This avoids having to determine the memory access + * alignment of the actual horizontal ROI on a per + * color format basis. + */ + if (sde_mdp_is_ubwc_format(fmt)) { + no_partial_writes = 0x0; + } else if (cfg->dst_rect->x || + cfg->dst_rect->w != cfg->img_width) { + no_partial_writes = 0x0; + } else { + for (i = 0; i < SDE_ROT_MAX_PLANES; i++) + if (cfg->dst_plane.ystride[i] & + PARTIAL_WRITE_ALIGNMENT) + no_partial_writes = 0x0; + } + } /* write config setup for bank configuration */ - SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, partial_write | + SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, no_partial_writes | (ctx->rot->highest_bank & 0x3) << 8); if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map)) -- GitLab From 0bdd09ad31a1068589c94444105fa64581b0d794 Mon Sep 17 00:00:00 2001 From: Greg Hackmann Date: Tue, 16 May 2017 16:48:49 -0700 Subject: [PATCH 157/786] ANDROID: sdcardfs: remove dead function open_flags_to_access_mode() smatch warns about the suspicious formatting in the last line of open_flags_to_access_mode(). It turns out the only caller was deleted over a year ago by "ANDROID: sdcardfs: Bring up to date with Android M permissions:", so we can "fix" the function's formatting by deleting it. Change-Id: Id85946f3eb01722eef35b1815f405a6fda3aa4ff Signed-off-by: Greg Hackmann --- fs/sdcardfs/packagelist.c | 13 ------------- fs/sdcardfs/sdcardfs.h | 1 - 2 files changed, 14 deletions(-) diff --git a/fs/sdcardfs/packagelist.c b/fs/sdcardfs/packagelist.c index 00a0f656acc7..6da0c2186d39 100644 --- a/fs/sdcardfs/packagelist.c +++ b/fs/sdcardfs/packagelist.c @@ -174,19 +174,6 @@ int check_caller_access_to_name(struct inode *parent_node, const struct qstr *na return 1; } -/* This function is used when file opening. The open flags must be - * checked before calling check_caller_access_to_name() - */ -int open_flags_to_access_mode(int open_flags) -{ - if ((open_flags & O_ACCMODE) == O_RDONLY) - return 0; /* R_OK */ - if ((open_flags & O_ACCMODE) == O_WRONLY) - return 1; /* W_OK */ - /* Probably O_RDRW, but treat as default to be safe */ - return 1; /* R_OK | W_OK */ -} - static struct hashtable_entry *alloc_hashtable_entry(const struct qstr *key, appid_t value) { diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h index 3687b22a2e6b..4e0ce49a906d 100644 --- a/fs/sdcardfs/sdcardfs.h +++ b/fs/sdcardfs/sdcardfs.h @@ -499,7 +499,6 @@ extern appid_t get_appid(const char *app_name); extern appid_t get_ext_gid(const char *app_name); extern appid_t is_excluded(const char *app_name, userid_t userid); extern int check_caller_access_to_name(struct inode *parent_node, const struct qstr *name); -extern int open_flags_to_access_mode(int open_flags); extern int packagelist_init(void); extern void packagelist_exit(void); -- GitLab From d8d6c8ab80e60649405484ad590aab865b0159b4 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Thu, 3 Nov 2016 15:50:09 +0100 Subject: [PATCH 158/786] drivers base/topology: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. No functional change Change-Id: I020e5d20d699bf5597927449545e90454481a5ec Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Cc: Greg Kroah-Hartman Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20161103145021.28528-14-bigeasy@linutronix.de Signed-off-by: Thomas Gleixner Git-commit: 38643a0e691ec947d311eb2db011b289cf95014e Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git Signed-off-by: Channagoud Kadabi --- drivers/base/topology.c | 42 +++++--------------------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 6 insertions(+), 37 deletions(-) diff --git a/drivers/base/topology.c b/drivers/base/topology.c index df3c97cb4c99..d6ec1c546f5b 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -118,51 +118,19 @@ static int topology_add_dev(unsigned int cpu) return sysfs_create_group(&dev->kobj, &topology_attr_group); } -static void topology_remove_dev(unsigned int cpu) +static int topology_remove_dev(unsigned int cpu) { struct device *dev = get_cpu_device(cpu); sysfs_remove_group(&dev->kobj, &topology_attr_group); -} - -static int topology_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) -{ - unsigned int cpu = (unsigned long)hcpu; - int rc = 0; - - switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - rc = topology_add_dev(cpu); - break; - case CPU_UP_CANCELED: - case CPU_UP_CANCELED_FROZEN: - case CPU_DEAD: - case CPU_DEAD_FROZEN: - topology_remove_dev(cpu); - break; - } - return notifier_from_errno(rc); + return 0; } static int topology_sysfs_init(void) { - int cpu; - int rc = 0; - - cpu_notifier_register_begin(); - - for_each_online_cpu(cpu) { - rc = topology_add_dev(cpu); - if (rc) - goto out; - } - __hotcpu_notifier(topology_cpu_callback, 0); - -out: - cpu_notifier_register_done(); - return rc; + return cpuhp_setup_state(CPUHP_TOPOLOGY_PREPARE, + "base/topology:prepare", topology_add_dev, + topology_remove_dev); } device_initcall(topology_sysfs_init); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 7f395e3f10c3..9f93d1865241 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -50,6 +50,7 @@ enum cpuhp_state { CPUHP_ARM_SHMOBILE_SCU_PREPARE, CPUHP_SH_SH3X_PREPARE, CPUHP_BLK_MQ_PREPARE, + CPUHP_TOPOLOGY_PREPARE, CPUHP_TIMERS_DEAD, CPUHP_NOTF_ERR_INJ_PREPARE, CPUHP_MIPS_SOC_PREPARE, -- GitLab From 5ec3ec14057cc831fd5407b39c15768ca715c8bd Mon Sep 17 00:00:00 2001 From: Channagoud Kadabi Date: Tue, 20 Jun 2017 15:01:40 -0700 Subject: [PATCH 159/786] kernel: cpu: send CPU_UP_CANCELLED notifcation If any of the callbacks during cpu up fail undo_cpu_up calls the teardown call backs to clean up things but does not send CPU_UP_CANCELLED. As some drivers still use the notification mechanism for cpu houplug we need to send CPU_UP_CANCELLED notification so drivers can clean up things they did on cpu up. Change-Id: Ia7fb79ff4d7919dc6dacd046c1ce250498371a63 Signed-off-by: Channagoud Kadabi --- kernel/cpu.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/cpu.c b/kernel/cpu.c index 8ac83e55283a..98eb4fbad8fa 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -494,6 +494,7 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, if (ret) { st->target = prev_state; undo_cpu_up(cpu, st); + cpu_notify(CPU_UP_CANCELED, cpu); break; } } -- GitLab From 64d136b81336b65bdcb0e09b0577994df45a2d65 Mon Sep 17 00:00:00 2001 From: Mayank Rana Date: Tue, 1 Nov 2016 21:01:34 -0700 Subject: [PATCH 160/786] usb: gsi: Update TRB RING initialization with USB GSI OUT endpoint GSI firmware can't differentiate if TRB ring is full or empty with USB GSI OUT endpoint. This change adds suggested sequence to initialize TRBs with USB GSI OUT endpoint related TRB ring as below: 1. Add LINK TRB as start of TRB ring and clear HWO bit. This LINK TRB should point to first regular TRB. 2. Clear HWO bit with all regular TRBs. 3. Set HWO bit with last LINK TRB. 4. Ring doorbell to GSI channel related to USB GSI OUT endpoint using last LINK TRB address Due to additional LINK TRB with USB GSI OUT endpoint's TRB ring, it is required to decrease number of OUT TRB value as 14. Use 14 as number of OUT TRB with ECM interface instead of 31. 31 number of OUT TRB was used as workaround which is fixed by this change. CRs-Fixed: 1107563 Change-Id: Ia1471f42dc8ece849e2298a9668c9702be085016 Signed-off-by: Mayank Rana --- drivers/usb/dwc3/dwc3-msm.c | 64 ++++++++++++++++++----------- drivers/usb/gadget/function/f_gsi.c | 16 ++++++-- drivers/usb/gadget/function/f_gsi.h | 3 +- include/linux/usb/gadget.h | 2 +- 4 files changed, 55 insertions(+), 30 deletions(-) diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c index dcb41a9b2fe9..7155ec9479d1 100644 --- a/drivers/usb/dwc3/dwc3-msm.c +++ b/drivers/usb/dwc3/dwc3-msm.c @@ -868,8 +868,8 @@ static void gsi_get_channel_info(struct usb_ep *ep, * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1 * LINK TRB. */ - ch_info->xfer_ring_len = (request->num_bufs + 1) * 0x10; - last_trb_index = request->num_bufs + 1; + ch_info->xfer_ring_len = (request->num_bufs + 2) * 0x10; + last_trb_index = request->num_bufs + 2; } /* Store last 16 bits of LINK TRB address as per GSI hw requirement */ @@ -941,13 +941,13 @@ static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr) } /* -* Rings Doorbell for IN GSI Channel +* Rings Doorbell for GSI Channel * * @usb_ep - pointer to usb_ep instance. * @request - pointer to GSI request. This is used to pass in the * address of the GSI doorbell obtained from IPA driver */ -static void gsi_ring_in_db(struct usb_ep *ep, struct usb_gsi_request *request) +static void gsi_ring_db(struct usb_ep *ep, struct usb_gsi_request *request) { void __iomem *gsi_dbl_address_lsb; void __iomem *gsi_dbl_address_msb; @@ -955,10 +955,11 @@ static void gsi_ring_in_db(struct usb_ep *ep, struct usb_gsi_request *request) u64 dbl_addr = *((u64 *)request->buf_base_addr); u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF); u32 dbl_hi_addr = (dbl_addr >> 32); - u32 num_trbs = (request->num_bufs * 2 + 2); struct dwc3_ep *dep = to_dwc3_ep(ep); struct dwc3 *dwc = dep->dwc; struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent); + int num_trbs = (dep->direction) ? (2 * (request->num_bufs) + 2) + : (request->num_bufs + 2); gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev, dbl_lo_addr, sizeof(u32)); @@ -971,8 +972,8 @@ static void gsi_ring_in_db(struct usb_ep *ep, struct usb_gsi_request *request) dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n"); offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]); - dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x)\n", - &offset, gsi_dbl_address_lsb, dbl_lo_addr); + dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x) for ep:%s\n", + &offset, gsi_dbl_address_lsb, dbl_lo_addr, ep->name); writel_relaxed(offset, gsi_dbl_address_lsb); writel_relaxed(0, gsi_dbl_address_msb); @@ -1042,7 +1043,7 @@ static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req) struct dwc3 *dwc = dep->dwc; struct dwc3_trb *trb; int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2) - : (req->num_bufs + 1); + : (req->num_bufs + 2); dep->trb_dma_pool = dma_pool_create(ep->name, dwc->sysdev, num_trbs * sizeof(struct dwc3_trb), @@ -1103,26 +1104,43 @@ static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req) trb = &dep->trb_pool[i]; memset(trb, 0, sizeof(*trb)); - trb->bpl = lower_32_bits(buffer_addr); - trb->bph = 0; - trb->size = req->buf_len; - trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_IOC - | DWC3_TRB_CTRL_CSP - | DWC3_TRB_CTRL_ISP_IMI; - buffer_addr += req->buf_len; - - /* Set up the Link TRB at the end */ - if (i == (num_trbs - 1)) { + /* Setup LINK TRB to start with TRB ring */ + if (i == 0) { trb->bpl = dwc3_trb_dma_offset(dep, - &dep->trb_pool[0]); + &dep->trb_pool[1]); + trb->ctrl = DWC3_TRBCTL_LINK_TRB; + } else if (i == (num_trbs - 1)) { + /* Set up the Link TRB at the end */ + trb->bpl = dwc3_trb_dma_offset(dep, + &dep->trb_pool[0]); trb->bph = (1 << 23) | (1 << 21) | (ep->ep_intr_num << 16); - trb->size = 0; trb->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO; + } else { + trb->bpl = lower_32_bits(buffer_addr); + trb->size = req->buf_len; + buffer_addr += req->buf_len; + trb->ctrl = DWC3_TRBCTL_NORMAL + | DWC3_TRB_CTRL_IOC + | DWC3_TRB_CTRL_CSP + | DWC3_TRB_CTRL_ISP_IMI; } } } + + pr_debug("%s: Initialized TRB Ring for %s\n", __func__, dep->name); + trb = &dep->trb_pool[0]; + if (trb) { + for (i = 0; i < num_trbs; i++) { + pr_debug("TRB(%d): ADDRESS:%lx bpl:%x bph:%x size:%x ctrl:%x\n", + i, (unsigned long)dwc3_trb_dma_offset(dep, + &dep->trb_pool[i]), trb->bpl, trb->bph, + trb->size, trb->ctrl); + trb++; + } + } + return 0; } @@ -1363,10 +1381,10 @@ static int dwc3_msm_gsi_ep_op(struct usb_ep *ep, ch_info = (struct gsi_channel_info *)op_data; gsi_get_channel_info(ep, ch_info); break; - case GSI_EP_OP_RING_IN_DB: + case GSI_EP_OP_RING_DB: request = (struct usb_gsi_request *)op_data; - dev_dbg(mdwc->dev, "RING IN EP DB\n"); - gsi_ring_in_db(ep, request); + dbg_print(0xFF, "RING_DB", 0, ep->name); + gsi_ring_db(ep, request); break; case GSI_EP_OP_UPDATEXFER: request = (struct usb_gsi_request *)op_data; diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c index 308a49c44793..d11c30b0bb69 100644 --- a/drivers/usb/gadget/function/f_gsi.c +++ b/drivers/usb/gadget/function/f_gsi.c @@ -472,6 +472,7 @@ static void ipa_data_path_enable(struct gsi_data_port *d_port) usb_gsi_ep_op(d_port->in_ep, (void *)&block_db, GSI_EP_OP_SET_CLR_BLOCK_DBL); + /* GSI channel DBL address for USB IN endpoint */ dbl_register_addr = gsi->d_port.in_db_reg_phs_addr_msb; dbl_register_addr = dbl_register_addr << 32; dbl_register_addr = @@ -481,11 +482,18 @@ static void ipa_data_path_enable(struct gsi_data_port *d_port) req.buf_base_addr = &dbl_register_addr; req.num_bufs = gsi->d_port.in_request.num_bufs; - usb_gsi_ep_op(gsi->d_port.in_ep, &req, GSI_EP_OP_RING_IN_DB); + usb_gsi_ep_op(gsi->d_port.in_ep, &req, GSI_EP_OP_RING_DB); if (gsi->d_port.out_ep) { - usb_gsi_ep_op(gsi->d_port.out_ep, &gsi->d_port.out_request, - GSI_EP_OP_UPDATEXFER); + /* GSI channel DBL address for USB OUT endpoint */ + dbl_register_addr = gsi->d_port.out_db_reg_phs_addr_msb; + dbl_register_addr = dbl_register_addr << 32; + dbl_register_addr = dbl_register_addr | + gsi->d_port.out_db_reg_phs_addr_lsb; + /* use temp request to pass 64 bit dbl reg addr and num_bufs */ + req.buf_base_addr = &dbl_register_addr; + req.num_bufs = gsi->d_port.out_request.num_bufs; + usb_gsi_ep_op(gsi->d_port.out_ep, &req, GSI_EP_OP_RING_DB); } } @@ -2618,7 +2626,7 @@ static int gsi_bind(struct usb_configuration *c, struct usb_function *f) info.in_req_num_buf = num_in_bufs; gsi->d_port.out_aggr_size = GSI_ECM_AGGR_SIZE; info.out_req_buf_len = GSI_OUT_ECM_BUF_LEN; - info.out_req_num_buf = GSI_ECM_NUM_OUT_BUFFERS; + info.out_req_num_buf = num_out_bufs; info.notify_buf_len = GSI_CTRL_NOTIFY_BUFF_LEN; /* export host's Ethernet address in CDC format */ diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h index 43aae8f6f2cc..0fe36659000a 100644 --- a/drivers/usb/gadget/function/f_gsi.h +++ b/drivers/usb/gadget/function/f_gsi.h @@ -37,8 +37,7 @@ #define GSI_NUM_IN_BUFFERS 15 #define GSI_IN_BUFF_SIZE 2048 -#define GSI_NUM_OUT_BUFFERS 15 -#define GSI_ECM_NUM_OUT_BUFFERS 31 +#define GSI_NUM_OUT_BUFFERS 14 #define GSI_OUT_AGGR_SIZE 24576 #define GSI_IN_RNDIS_AGGR_SIZE 9216 diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index fd09a1b4fcb8..ddd8f4d28235 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -58,7 +58,7 @@ enum gsi_ep_op { GSI_EP_OP_STORE_DBL_INFO, GSI_EP_OP_ENABLE_GSI, GSI_EP_OP_UPDATEXFER, - GSI_EP_OP_RING_IN_DB, + GSI_EP_OP_RING_DB, GSI_EP_OP_ENDXFER, GSI_EP_OP_GET_CH_INFO, GSI_EP_OP_GET_XFER_IDX, -- GitLab From dab40b4db04dad3d2212e2cd9bd56e8a79d43083 Mon Sep 17 00:00:00 2001 From: Sriharsha Allenki Date: Mon, 29 May 2017 14:52:40 +0530 Subject: [PATCH 161/786] usb: gsi: Don't disable endpoints as part of flow control Currently GSI endpoints are disabled when host enables flow control for RNDIS function. When these endpoints are enabled again as part of flow control disable, no transactions are observed on these endpoints. Fix this by not disabling the endpoints as part of flow control enable but just stop the active transfers on these endpoints. CRs-Fixed: 2050839 Change-Id: I391a7048188b2a63e2df993fcebf7a6e78eaef14 Signed-off-by: Sriharsha Allenki Signed-off-by: Mayank Rana --- drivers/usb/gadget/function/f_gsi.c | 50 +++++++++++++---------------- 1 file changed, 23 insertions(+), 27 deletions(-) diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c index 308a49c44793..6ff604f15f5a 100644 --- a/drivers/usb/gadget/function/f_gsi.c +++ b/drivers/usb/gadget/function/f_gsi.c @@ -40,6 +40,7 @@ MODULE_PARM_DESC(qti_packet_debug, "Print QTI Packet's Raw Data"); static struct workqueue_struct *ipa_usb_wq; +static void gsi_rndis_ipa_reset_trigger(struct gsi_data_port *d_port); static void ipa_disconnect_handler(struct gsi_data_port *d_port); static int gsi_ctrl_send_notification(struct f_gsi *gsi); static int gsi_alloc_trb_buffer(struct f_gsi *gsi); @@ -503,14 +504,11 @@ static void ipa_disconnect_handler(struct gsi_data_port *d_port) */ usb_gsi_ep_op(d_port->in_ep, (void *)&block_db, GSI_EP_OP_SET_CLR_BLOCK_DBL); - gsi->in_ep_desc_backup = gsi->d_port.in_ep->desc; usb_gsi_ep_op(gsi->d_port.in_ep, NULL, GSI_EP_OP_DISABLE); } - if (gsi->d_port.out_ep) { - gsi->out_ep_desc_backup = gsi->d_port.out_ep->desc; + if (gsi->d_port.out_ep) usb_gsi_ep_op(gsi->d_port.out_ep, NULL, GSI_EP_OP_DISABLE); - } gsi->d_port.net_ready_trigger = false; } @@ -616,6 +614,7 @@ static void ipa_work_handler(struct work_struct *w) struct device *dev; struct device *gad_dev; struct f_gsi *gsi = d_port_to_gsi(d_port); + bool block_db; event = read_event(d_port); @@ -676,28 +675,6 @@ static void ipa_work_handler(struct work_struct *w) break; } - /* - * Update desc and reconfigure USB GSI OUT and IN - * endpoint for RNDIS Adaptor enable case. - */ - if (d_port->out_ep && !d_port->out_ep->desc && - gsi->out_ep_desc_backup) { - d_port->out_ep->desc = gsi->out_ep_desc_backup; - d_port->out_ep->ep_intr_num = 1; - log_event_dbg("%s: OUT ep_op_config", __func__); - usb_gsi_ep_op(d_port->out_ep, - &d_port->out_request, GSI_EP_OP_CONFIG); - } - - if (d_port->in_ep && !d_port->in_ep->desc && - gsi->in_ep_desc_backup) { - d_port->in_ep->desc = gsi->in_ep_desc_backup; - d_port->in_ep->ep_intr_num = 2; - log_event_dbg("%s: IN ep_op_config", __func__); - usb_gsi_ep_op(d_port->in_ep, - &d_port->in_request, GSI_EP_OP_CONFIG); - } - ipa_connect_channels(d_port); ipa_data_path_enable(d_port); d_port->sm_state = STATE_CONNECTED; @@ -759,7 +736,15 @@ static void ipa_work_handler(struct work_struct *w) if (event == EVT_HOST_NRDY) { log_event_dbg("%s: ST_CON_HOST_NRDY\n", __func__); - ipa_disconnect_handler(d_port); + block_db = true; + /* stop USB ringing doorbell to GSI(OUT_EP) */ + usb_gsi_ep_op(d_port->in_ep, (void *)&block_db, + GSI_EP_OP_SET_CLR_BLOCK_DBL); + gsi_rndis_ipa_reset_trigger(d_port); + usb_gsi_ep_op(d_port->in_ep, NULL, + GSI_EP_OP_ENDXFER); + usb_gsi_ep_op(d_port->out_ep, NULL, + GSI_EP_OP_ENDXFER); } ipa_disconnect_work_handler(d_port); @@ -1385,6 +1370,17 @@ static void gsi_rndis_open(struct f_gsi *gsi) rndis_signal_connect(gsi->params); } +static void gsi_rndis_ipa_reset_trigger(struct gsi_data_port *d_port) +{ + unsigned long flags; + struct f_gsi *gsi = d_port_to_gsi(d_port); + + log_event_dbg("%s: setting net_ready_trigger\n", __func__); + spin_lock_irqsave(&d_port->lock, flags); + d_port->net_ready_trigger = false; + spin_unlock_irqrestore(&d_port->lock, flags); +} + void gsi_rndis_flow_ctrl_enable(bool enable, struct rndis_params *param) { struct f_gsi *gsi = param->v; -- GitLab From 7ee990974867303e19d3d594c92c9452acdf0b80 Mon Sep 17 00:00:00 2001 From: Veera Sundaram Sankaran Date: Tue, 13 Jun 2017 11:19:36 -0700 Subject: [PATCH 162/786] drm/msm/sde: move crtc frame event handling to event thread Currently crtc frame events are handled by display thread, which might cause issues during suspend usecases. During suspend, the encoder disable waits for pingpong done and sends the frame done event to crtc which in turn reduces the frame_pending count. But this might not execute until the display thread is done with suspend. To avoid such cases and also to free up the display thread, move these events to the event thread. Add completion logic to synchronizes the frame_done event. Change-Id: I2166d051e0cf68f7a7434df42263c43f588c094d Signed-off-by: Veera Sundaram Sankaran --- drivers/gpu/drm/msm/sde/sde_crtc.c | 81 +++++++++++++++++++-------- drivers/gpu/drm/msm/sde/sde_crtc.h | 2 + drivers/gpu/drm/msm/sde/sde_encoder.c | 17 +----- drivers/gpu/drm/msm/sde/sde_encoder.h | 1 - drivers/gpu/drm/msm/sde/sde_kms.h | 4 ++ 5 files changed, 66 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index e70829023d5e..a67799e5229b 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -1517,7 +1517,6 @@ static void sde_crtc_frame_event_work(struct kthread_work *work) struct sde_crtc_state *cstate; struct sde_kms *sde_kms; unsigned long flags; - bool disable_inprogress = false; if (!work) { SDE_ERROR("invalid work handle\n"); @@ -1543,9 +1542,6 @@ static void sde_crtc_frame_event_work(struct kthread_work *work) SDE_DEBUG("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event, ktime_to_ns(fevent->ts)); - disable_inprogress = fevent->event & - SDE_ENCODER_FRAME_EVENT_DURING_DISABLE; - fevent->event &= ~SDE_ENCODER_FRAME_EVENT_DURING_DISABLE; if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE || (fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR) || @@ -1566,15 +1562,17 @@ static void sde_crtc_frame_event_work(struct kthread_work *work) ktime_to_ns(fevent->ts)); SDE_EVT32(DRMID(crtc), fevent->event, SDE_EVTLOG_FUNC_CASE2); - if (!disable_inprogress) - sde_core_perf_crtc_release_bw(crtc); + sde_core_perf_crtc_release_bw(crtc); } else { SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event, SDE_EVTLOG_FUNC_CASE3); } - if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE && - !disable_inprogress) + if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE || + (fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR)) + complete_all(&sde_crtc->frame_done_comp); + + if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE) sde_core_perf_crtc_update(crtc, 0, false); } else { SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id, @@ -1629,11 +1627,7 @@ static void sde_crtc_frame_event_cb(void *data, u32 event) fevent->event = event; fevent->crtc = crtc; fevent->ts = ktime_get(); - if (event & SDE_ENCODER_FRAME_EVENT_DURING_DISABLE) - sde_crtc_frame_event_work(&fevent->work); - else - kthread_queue_work(&priv->disp_thread[pipe_id].worker, - &fevent->work); + kthread_queue_work(&sde_crtc->event_worker, &fevent->work); } void sde_crtc_complete_commit(struct drm_crtc *crtc, @@ -2085,6 +2079,36 @@ static void sde_crtc_destroy_state(struct drm_crtc *crtc, cstate->property_values, cstate->property_blobs); } +static int _sde_crtc_wait_for_frame_done(struct drm_crtc *crtc) +{ + struct sde_crtc *sde_crtc; + int ret, rc = 0; + + if (!crtc) { + SDE_ERROR("invalid argument\n"); + return -EINVAL; + } + sde_crtc = to_sde_crtc(crtc); + + if (!atomic_read(&sde_crtc->frame_pending)) { + SDE_DEBUG("no frames pending\n"); + return 0; + } + + SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_ENTRY); + ret = wait_for_completion_timeout(&sde_crtc->frame_done_comp, + msecs_to_jiffies(SDE_FRAME_DONE_TIMEOUT)); + if (!ret) { + SDE_ERROR("frame done completion wait timed out, ret:%d\n", + ret); + SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FATAL); + rc = -ETIMEDOUT; + } + SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_EXIT); + + return rc; +} + void sde_crtc_commit_kickoff(struct drm_crtc *crtc) { struct drm_encoder *encoder; @@ -2129,19 +2153,21 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc) sde_encoder_prepare_for_kickoff(encoder, ¶ms); } - if (atomic_read(&sde_crtc->frame_pending) > 2) { - /* framework allows only 1 outstanding + current */ - SDE_ERROR("crtc%d invalid frame pending\n", - crtc->base.id); - SDE_EVT32(DRMID(crtc), 0); + /* wait for frame_event_done completion */ + if (_sde_crtc_wait_for_frame_done(crtc)) { + SDE_ERROR("crtc%d wait for frame done failed;frame_pending%d\n", + crtc->base.id, + atomic_read(&sde_crtc->frame_pending)); goto end; - } else if (atomic_inc_return(&sde_crtc->frame_pending) == 1) { + } + + if (atomic_inc_return(&sde_crtc->frame_pending) == 1) { /* acquire bandwidth and other resources */ SDE_DEBUG("crtc%d first commit\n", crtc->base.id); - SDE_EVT32(DRMID(crtc), 1); + SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_CASE1); } else { SDE_DEBUG("crtc%d commit\n", crtc->base.id); - SDE_EVT32(DRMID(crtc), 2); + SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_CASE2); } sde_crtc->play_count++; @@ -2151,6 +2177,9 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc) sde_encoder_kickoff(encoder); } + + reinit_completion(&sde_crtc->frame_done_comp); + end: SDE_ATRACE_END("crtc_commit"); return; @@ -2444,6 +2473,12 @@ static void sde_crtc_disable(struct drm_crtc *crtc) mutex_lock(&sde_crtc->crtc_lock); SDE_EVT32(DRMID(crtc)); + /* wait for frame_event_done completion */ + if (_sde_crtc_wait_for_frame_done(crtc)) + SDE_ERROR("crtc%d wait for frame done failed;frame_pending%d\n", + crtc->base.id, + atomic_read(&sde_crtc->frame_pending)); + if (atomic_read(&sde_crtc->vblank_refcount) && !sde_crtc->suspend) { SDE_ERROR("crtc%d invalid vblank refcount\n", crtc->base.id); @@ -2455,8 +2490,6 @@ static void sde_crtc_disable(struct drm_crtc *crtc) } if (atomic_read(&sde_crtc->frame_pending)) { - /* release bandwidth and other resources */ - SDE_ERROR("crtc%d invalid frame pending\n", crtc->base.id); SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->frame_pending), SDE_EVTLOG_FUNC_CASE2); sde_core_perf_crtc_release_bw(crtc); @@ -3714,6 +3747,8 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane) spin_lock_init(&sde_crtc->spin_lock); atomic_set(&sde_crtc->frame_pending, 0); + init_completion(&sde_crtc->frame_done_comp); + INIT_LIST_HEAD(&sde_crtc->frame_event_list); INIT_LIST_HEAD(&sde_crtc->user_event_list); for (i = 0; i < ARRAY_SIZE(sde_crtc->frame_events); i++) { diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h index 38311c1094f2..41121ee79e4e 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.h +++ b/drivers/gpu/drm/msm/sde/sde_crtc.h @@ -135,6 +135,7 @@ struct sde_crtc_event { * @frame_events : static allocation of in-flight frame events * @frame_event_list : available frame event list * @spin_lock : spin lock for frame event, transaction status, etc... + * @frame_done_comp : for frame_event_done synchronization * @event_thread : Pointer to event handler thread * @event_worker : Event worker queue * @event_cache : Local cache of event worker structures @@ -186,6 +187,7 @@ struct sde_crtc { struct sde_crtc_frame_event frame_events[SDE_CRTC_FRAME_EVENT_SIZE]; struct list_head frame_event_list; spinlock_t spin_lock; + struct completion frame_done_comp; /* for handling internal event thread */ struct task_struct *event_thread; diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index e1caeaf1eb4a..b3c7c7f41fe0 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -56,9 +56,6 @@ (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \ ##__VA_ARGS__) -/* timeout in frames waiting for frame done */ -#define SDE_ENCODER_FRAME_DONE_TIMEOUT 60 - /* * Two to anticipate panels that can do cmd/vid dynamic switching * plan is to create all possible physical encoder types, and switch between @@ -173,7 +170,6 @@ enum sde_enc_rc_states { * @rsc_cfg: rsc configuration * @cur_conn_roi: current connector roi * @prv_conn_roi: previous connector roi to optimize if unchanged - * @disable_inprogress: sde encoder disable is in progress. */ struct sde_encoder_virt { struct drm_encoder base; @@ -217,7 +213,6 @@ struct sde_encoder_virt { struct sde_encoder_rsc_config rsc_cfg; struct sde_rect cur_conn_roi; struct sde_rect prv_conn_roi; - bool disable_inprogress; }; #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base) @@ -1643,7 +1638,6 @@ static void sde_encoder_virt_enable(struct drm_encoder *drm_enc) SDE_EVT32(DRMID(drm_enc)); sde_enc->cur_master = NULL; - sde_enc->disable_inprogress = false; for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; @@ -1702,7 +1696,6 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc) priv = drm_enc->dev->dev_private; sde_kms = to_sde_kms(priv->kms); - sde_enc->disable_inprogress = true; SDE_EVT32(DRMID(drm_enc)); @@ -1868,9 +1861,6 @@ static void sde_encoder_frame_done_callback( sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_FRAME_DONE); - if (sde_enc->disable_inprogress) - event |= SDE_ENCODER_FRAME_EVENT_DURING_DISABLE; - if (sde_enc->crtc_frame_event_cb) sde_enc->crtc_frame_event_cb( sde_enc->crtc_frame_event_cb_data, event); @@ -2332,7 +2322,7 @@ void sde_encoder_kickoff(struct drm_encoder *drm_enc) SDE_DEBUG_ENC(sde_enc, "\n"); atomic_set(&sde_enc->frame_done_timeout, - SDE_ENCODER_FRAME_DONE_TIMEOUT * 1000 / + SDE_FRAME_DONE_TIMEOUT * 1000 / drm_enc->crtc->state->adjusted_mode.vrefresh); mod_timer(&sde_enc->frame_done_timer, jiffies + ((atomic_read(&sde_enc->frame_done_timeout) * HZ) / 1000)); @@ -2912,10 +2902,7 @@ static void sde_encoder_frame_done_timeout(unsigned long data) SDE_ERROR_ENC(sde_enc, "frame done timeout\n"); - event = SDE_ENCODER_FRAME_EVENT_ERROR; - if (sde_enc->disable_inprogress) - event |= SDE_ENCODER_FRAME_EVENT_DURING_DISABLE; - + event = SDE_ENCODER_FRAME_EVENT_ERROR; SDE_EVT32(DRMID(drm_enc), event); sde_enc->crtc_frame_event_cb(sde_enc->crtc_frame_event_cb_data, event); } diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h index 0b14a58e1a66..bb77f44f5c6c 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.h +++ b/drivers/gpu/drm/msm/sde/sde_encoder.h @@ -27,7 +27,6 @@ #define SDE_ENCODER_FRAME_EVENT_DONE BIT(0) #define SDE_ENCODER_FRAME_EVENT_ERROR BIT(1) #define SDE_ENCODER_FRAME_EVENT_PANEL_DEAD BIT(2) -#define SDE_ENCODER_FRAME_EVENT_DURING_DISABLE BIT(3) /** * Encoder functions and data types diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h index 058f19bca467..5894fe2af53b 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.h +++ b/drivers/gpu/drm/msm/sde/sde_kms.h @@ -88,6 +88,10 @@ #define SDE_NAME_SIZE 12 + +/* timeout in frames waiting for frame done */ +#define SDE_FRAME_DONE_TIMEOUT 60 + /* * struct sde_irq_callback - IRQ callback handlers * @list: list to callback -- GitLab From a979047cc1ef7e86dffdf6342295903d15015aaf Mon Sep 17 00:00:00 2001 From: Kyle Yan Date: Mon, 19 Jun 2017 15:01:20 -0700 Subject: [PATCH 163/786] Revert "sched: Remove synchronize rcu/sched calls from _cpu_down" This reverts commit c9e8afb80a36b40e1091d6d64c3e8bd6134f0a12. Removing the synchronization of rcu/sched calls from _cpu_down introduces a race where tasks may get queued on an inactive CPU and unthrottling cfs_rqs. Change-Id: I3a24314a403fc7352f506cda0016963621c79f6a Signed-off-by: Kyle Yan --- kernel/sched/core.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 30a1b34125a7..de1b3b7eedb4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8052,6 +8052,20 @@ int sched_cpu_deactivate(unsigned int cpu) int ret; set_cpu_active(cpu, false); + /* + * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU + * users of this state to go away such that all new such users will + * observe it. + * + * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might + * not imply sync_sched(), so wait for both. + * + * Do sync before park smpboot threads to take care the rcu boost case. + */ + if (IS_ENABLED(CONFIG_PREEMPT)) + synchronize_rcu_mult(call_rcu, call_rcu_sched); + else + synchronize_rcu(); if (!sched_smp_initialized) return 0; -- GitLab From 24466ac854df834094c06aec21cba2df8297a2ea Mon Sep 17 00:00:00 2001 From: David Collins Date: Tue, 20 Jun 2017 17:45:14 -0700 Subject: [PATCH 164/786] ARM: dts: msm: reduce VDD_APC0/1 CPR floor voltage to 568 mV for sdm845 Reduce the absolute CPR floor voltage to 568 mV for most corners of the VDD_APC0 power cluster and L3 as well as VDD_APC1 performance cluster cprh-regulator devices. This ensures intermediate corners do not have their closed-loop voltage artificially limited by the former floor voltages. This in turn leads to a reduction in power consumption on some devices. Change-Id: Ie816c403b7752b7a56572e612168304d394f2c4b Signed-off-by: David Collins --- .../arm64/boot/dts/qcom/sdm845-regulator.dtsi | 48 +++++++++---------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi index 7befe3bdecbd..2e50846362af 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi @@ -131,19 +131,19 @@ qcom,cpr-voltage-floor = /* Speed bin 0 */ <568000 568000 568000 568000 568000 - 568000 568000 568000 568000 584000 - 584000 584000 632000 632000 632000 - 632000 672000 996000 996000>, + 568000 568000 568000 568000 568000 + 568000 568000 568000 568000 568000 + 568000 568000 996000 996000>, /* Speed bin 1 */ <568000 568000 568000 568000 568000 - 568000 568000 568000 568000 584000 - 584000 584000 632000 632000 632000 - 632000 672000 712000 712000>, + 568000 568000 568000 568000 568000 + 568000 568000 568000 568000 568000 + 568000 568000 568000 568000>, /* Speed bin 2 */ <568000 568000 568000 568000 568000 - 568000 568000 568000 568000 584000 - 584000 584000 632000 632000 632000 - 632000 672000 712000 712000>; + 568000 568000 568000 568000 568000 + 568000 568000 568000 568000 568000 + 568000 568000 568000 568000>; qcom,cpr-floor-to-ceiling-max-range = <32000 32000 32000 32000 32000 @@ -265,16 +265,16 @@ qcom,cpr-voltage-floor = /* Speed bin 0 */ <568000 568000 568000 568000 568000 - 584000 584000 632000 672000 996000 + 568000 568000 568000 568000 996000 996000>, /* Speed bin 1 */ <568000 568000 568000 568000 568000 - 584000 584000 632000 672000 712000 - 712000>, + 568000 568000 568000 568000 568000 + 568000>, /* Speed bin 2 */ <568000 568000 568000 568000 568000 - 584000 584000 632000 672000 712000 - 712000 712000 712000>; + 568000 568000 568000 568000 568000 + 568000 568000 568000>; qcom,cpr-floor-to-ceiling-max-range = /* Speed bin 0 */ @@ -453,22 +453,22 @@ /* Speed bin 0 */ <568000 568000 568000 568000 568000 568000 568000 568000 568000 568000 - 584000 584000 632000 632000 632000 - 632000 632000 672000 712000 712000 - 772000 772000>, + 568000 568000 568000 568000 568000 + 568000 568000 568000 568000 568000 + 568000 568000>, /* Speed bin 1 */ <568000 568000 568000 568000 568000 568000 568000 568000 568000 568000 - 584000 584000 632000 632000 632000 - 632000 632000 672000 712000 712000 - 772000 772000 772000 772000>, + 568000 568000 568000 568000 568000 + 568000 568000 568000 568000 568000 + 568000 568000 568000 568000>, /* Speed bin 2 */ <568000 568000 568000 568000 568000 568000 568000 568000 568000 568000 - 584000 584000 632000 632000 632000 - 632000 632000 672000 712000 712000 - 772000 772000 772000 772000 - 772000>; + 568000 568000 568000 568000 568000 + 568000 568000 568000 568000 568000 + 568000 568000 568000 568000 + 568000>; qcom,cpr-floor-to-ceiling-max-range = /* Speed bin 0 */ -- GitLab From 2c748e627b9922461f32e50a66530fc857754c6a Mon Sep 17 00:00:00 2001 From: Veera Sundaram Sankaran Date: Tue, 13 Jun 2017 17:01:48 -0700 Subject: [PATCH 165/786] drm/msm/sde: move release fence to crtc frame done event for cmd mode Currently, crtc output_fence is released during commit_complete. Release the fence early in frame_done crtc event for cmd mode panels to increase performance, as the buffers are no longer required after PPDONE. Change-Id: Ie711adeee20d10530508bd11e2fa04a599e1f9c1 Signed-off-by: Veera Sundaram Sankaran --- drivers/gpu/drm/msm/sde/sde_crtc.c | 36 ++++++++++++++++++++++++--- drivers/gpu/drm/msm/sde/sde_encoder.c | 16 ++++++++++++ drivers/gpu/drm/msm/sde/sde_encoder.h | 7 ++++++ 3 files changed, 56 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index a67799e5229b..bcdd01b0683f 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -1516,6 +1516,7 @@ static void sde_crtc_frame_event_work(struct kthread_work *work) struct sde_crtc *sde_crtc; struct sde_crtc_state *cstate; struct sde_kms *sde_kms; + struct drm_encoder *encoder; unsigned long flags; if (!work) { @@ -1569,8 +1570,26 @@ static void sde_crtc_frame_event_work(struct kthread_work *work) } if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE || - (fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR)) + (fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR)) { + bool signal_fence = true; + + drm_for_each_encoder(encoder, crtc->dev) { + if (encoder->crtc != crtc) + continue; + + signal_fence &= + sde_encoder_is_cmd_mode(encoder); + } + + /* signal release fence only for cmd mode panels here */ + if (signal_fence) { + sde_fence_signal(&sde_crtc->output_fence, 0); + SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event, + SDE_EVTLOG_FUNC_CASE4); + } + complete_all(&sde_crtc->frame_done_comp); + } if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE) sde_core_perf_crtc_update(crtc, 0, false); @@ -1635,7 +1654,9 @@ void sde_crtc_complete_commit(struct drm_crtc *crtc, { struct sde_crtc *sde_crtc; struct sde_crtc_state *cstate; + struct drm_encoder *encoder; int i; + bool signal_fence = true; if (!crtc || !crtc->state) { SDE_ERROR("invalid crtc\n"); @@ -1646,9 +1667,18 @@ void sde_crtc_complete_commit(struct drm_crtc *crtc, cstate = to_sde_crtc_state(crtc->state); SDE_EVT32_VERBOSE(DRMID(crtc)); - /* signal output fence(s) at end of commit */ - sde_fence_signal(&sde_crtc->output_fence, 0); + drm_for_each_encoder(encoder, crtc->dev) { + if (encoder->crtc != crtc) + continue; + + signal_fence &= !sde_encoder_is_cmd_mode(encoder); + } + + /* signal release fence for non-cmd mode panels */ + if (signal_fence) + sde_fence_signal(&sde_crtc->output_fence, 0); + /* signal retire fence */ for (i = 0; i < cstate->num_connectors; ++i) sde_connector_complete_commit(cstate->connectors[i]); } diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index b3c7c7f41fe0..0b4dd825b39b 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -2214,6 +2214,22 @@ static void _sde_encoder_update_master(struct drm_encoder *drm_enc, } } +bool sde_encoder_is_cmd_mode(struct drm_encoder *drm_enc) +{ + struct sde_encoder_virt *sde_enc; + struct msm_display_info *disp_info; + + if (!drm_enc) { + SDE_ERROR("invalid encoder\n"); + return false; + } + + sde_enc = to_sde_encoder_virt(drm_enc); + disp_info = &sde_enc->disp_info; + + return (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE); +} + void sde_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc) { struct sde_encoder_virt *sde_enc; diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h index bb77f44f5c6c..9c2d3e9f068a 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.h +++ b/drivers/gpu/drm/msm/sde/sde_encoder.h @@ -172,6 +172,13 @@ bool sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc); */ bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc); +/** + * sde_encoder_is_cmd_mode - check if it is cmd mode + * @drm_enc: Pointer to drm encoder object + * @Return: true if it is cmd mode + */ +bool sde_encoder_is_cmd_mode(struct drm_encoder *drm_enc); + /** * sde_encoder_init - initialize virtual encoder object * @dev: Pointer to drm device structure -- GitLab From 10ea2bdd445a00771e1cb77c76adb3c6c97454db Mon Sep 17 00:00:00 2001 From: Veera Sundaram Sankaran Date: Wed, 14 Jun 2017 14:10:57 -0700 Subject: [PATCH 166/786] drm/msm/sde: move vblank to event thread Currently vblank is queued as part of display thread and this would cause vblank to be serviced very late if the display thread is busy with commit. Move the crtc event thread to msm level so that, it can be used for crtc frame/custom events and vblank. Change-Id: Ifd1f6bc6f80209e7e5a9f945da7a5d075f029a61 Signed-off-by: Veera Sundaram Sankaran --- drivers/gpu/drm/msm/msm_drv.c | 51 +++++++++++++++++++++++++----- drivers/gpu/drm/msm/msm_drv.h | 7 ++-- drivers/gpu/drm/msm/sde/sde_crtc.c | 36 +++++++-------------- drivers/gpu/drm/msm/sde/sde_crtc.h | 2 -- 4 files changed, 59 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 747d9a624243..d4a270e9a014 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -304,7 +304,8 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv, list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list); spin_unlock_irqrestore(&vbl_ctrl->lock, flags); - kthread_queue_work(&priv->disp_thread[crtc_id].worker, &vbl_ctrl->work); + kthread_queue_work(&priv->event_thread[crtc_id].worker, + &vbl_ctrl->work); return 0; } @@ -330,13 +331,19 @@ static int msm_drm_uninit(struct device *dev) kfree(vbl_ev); } - /* clean up display commit worker threads */ + /* clean up display commit/event worker threads */ for (i = 0; i < priv->num_crtcs; i++) { if (priv->disp_thread[i].thread) { kthread_flush_worker(&priv->disp_thread[i].worker); kthread_stop(priv->disp_thread[i].thread); priv->disp_thread[i].thread = NULL; } + + if (priv->event_thread[i].thread) { + kthread_flush_worker(&priv->event_thread[i].worker); + kthread_stop(priv->event_thread[i].thread); + priv->event_thread[i].thread = NULL; + } } msm_gem_shrinker_cleanup(ddev); @@ -637,22 +644,50 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv) ddev->mode_config.funcs = &mode_config_funcs; for (i = 0; i < priv->num_crtcs; i++) { + + /* initialize display thread */ priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id; kthread_init_worker(&priv->disp_thread[i].worker); priv->disp_thread[i].dev = ddev; priv->disp_thread[i].thread = kthread_run(kthread_worker_fn, &priv->disp_thread[i].worker, - "crtc_commit:%d", - priv->disp_thread[i].crtc_id); + "crtc_commit:%d", priv->disp_thread[i].crtc_id); if (IS_ERR(priv->disp_thread[i].thread)) { - dev_err(dev, "failed to create kthread\n"); + dev_err(dev, "failed to create crtc_commit kthread\n"); priv->disp_thread[i].thread = NULL; + } + + /* initialize event thread */ + priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id; + kthread_init_worker(&priv->event_thread[i].worker); + priv->event_thread[i].dev = ddev; + priv->event_thread[i].thread = + kthread_run(kthread_worker_fn, + &priv->event_thread[i].worker, + "crtc_event:%d", priv->event_thread[i].crtc_id); + + if (IS_ERR(priv->event_thread[i].thread)) { + dev_err(dev, "failed to create crtc_event kthread\n"); + priv->event_thread[i].thread = NULL; + } + + if ((!priv->disp_thread[i].thread) || + !priv->event_thread[i].thread) { /* clean up previously created threads if any */ - for (i -= 1; i >= 0; i--) { - kthread_stop(priv->disp_thread[i].thread); - priv->disp_thread[i].thread = NULL; + for ( ; i >= 0; i--) { + if (priv->disp_thread[i].thread) { + kthread_stop( + priv->disp_thread[i].thread); + priv->disp_thread[i].thread = NULL; + } + + if (priv->event_thread[i].thread) { + kthread_stop( + priv->event_thread[i].thread); + priv->event_thread[i].thread = NULL; + } } goto fail; } diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 77dde559aa71..c697710e0f85 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -471,8 +471,8 @@ struct msm_drm_event { u8 data[]; }; -/* Commit thread specific structure */ -struct msm_drm_commit { +/* Commit/Event thread specific structure */ +struct msm_drm_thread { struct drm_device *dev; struct task_struct *thread; unsigned int crtc_id; @@ -536,7 +536,8 @@ struct msm_drm_private { unsigned int num_crtcs; struct drm_crtc *crtcs[MAX_CRTCS]; - struct msm_drm_commit disp_thread[MAX_CRTCS]; + struct msm_drm_thread disp_thread[MAX_CRTCS]; + struct msm_drm_thread event_thread[MAX_CRTCS]; unsigned int num_encoders; struct drm_encoder *encoders[MAX_ENCODERS]; diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index bcdd01b0683f..4caa61d4904c 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -494,12 +494,6 @@ static void _sde_crtc_deinit_events(struct sde_crtc *sde_crtc) { if (!sde_crtc) return; - - if (sde_crtc->event_thread) { - kthread_flush_worker(&sde_crtc->event_worker); - kthread_stop(sde_crtc->event_thread); - sde_crtc->event_thread = NULL; - } } static void sde_crtc_destroy(struct drm_crtc *crtc) @@ -1616,7 +1610,7 @@ static void sde_crtc_frame_event_cb(void *data, u32 event) struct msm_drm_private *priv; struct sde_crtc_frame_event *fevent; unsigned long flags; - int pipe_id; + u32 crtc_id; if (!crtc || !crtc->dev || !crtc->dev->dev_private) { SDE_ERROR("invalid parameters\n"); @@ -1624,7 +1618,7 @@ static void sde_crtc_frame_event_cb(void *data, u32 event) } sde_crtc = to_sde_crtc(crtc); priv = crtc->dev->dev_private; - pipe_id = drm_crtc_index(crtc); + crtc_id = drm_crtc_index(crtc); SDE_DEBUG("crtc%d\n", crtc->base.id); SDE_EVT32_VERBOSE(DRMID(crtc), event); @@ -1646,7 +1640,7 @@ static void sde_crtc_frame_event_cb(void *data, u32 event) fevent->event = event; fevent->crtc = crtc; fevent->ts = ktime_get(); - kthread_queue_work(&sde_crtc->event_worker, &fevent->work); + kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work); } void sde_crtc_complete_commit(struct drm_crtc *crtc, @@ -3687,14 +3681,18 @@ int sde_crtc_event_queue(struct drm_crtc *crtc, { unsigned long irq_flags; struct sde_crtc *sde_crtc; + struct msm_drm_private *priv; struct sde_crtc_event *event = NULL; + u32 crtc_id; - if (!crtc || !func) + if (!crtc || !crtc->dev || !crtc->dev->dev_private || !func) { + SDE_ERROR("invalid parameters\n"); return -EINVAL; + } sde_crtc = to_sde_crtc(crtc); + priv = crtc->dev->dev_private; + crtc_id = drm_crtc_index(crtc); - if (!sde_crtc->event_thread) - return -EINVAL; /* * Obtain an event struct from the private cache. This event * queue may be called from ISR contexts, so use a private @@ -3718,7 +3716,8 @@ int sde_crtc_event_queue(struct drm_crtc *crtc, /* queue new event request */ kthread_init_work(&event->kt_work, _sde_crtc_event_cb); - kthread_queue_work(&sde_crtc->event_worker, &event->kt_work); + kthread_queue_work(&priv->event_thread[crtc_id].worker, + &event->kt_work); return 0; } @@ -3739,17 +3738,6 @@ static int _sde_crtc_init_events(struct sde_crtc *sde_crtc) list_add_tail(&sde_crtc->event_cache[i].list, &sde_crtc->event_free_list); - kthread_init_worker(&sde_crtc->event_worker); - sde_crtc->event_thread = kthread_run(kthread_worker_fn, - &sde_crtc->event_worker, "crtc_event:%d", - sde_crtc->base.base.id); - - if (IS_ERR_OR_NULL(sde_crtc->event_thread)) { - SDE_ERROR("failed to create event thread\n"); - rc = PTR_ERR(sde_crtc->event_thread); - sde_crtc->event_thread = NULL; - } - return rc; } diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h index 41121ee79e4e..dcef05b730d2 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.h +++ b/drivers/gpu/drm/msm/sde/sde_crtc.h @@ -190,8 +190,6 @@ struct sde_crtc { struct completion frame_done_comp; /* for handling internal event thread */ - struct task_struct *event_thread; - struct kthread_worker event_worker; struct sde_crtc_event event_cache[SDE_CRTC_MAX_EVENT_COUNT]; struct list_head event_free_list; spinlock_t event_lock; -- GitLab From 85829ac72c47558b901aa992e1a9ac181646ad74 Mon Sep 17 00:00:00 2001 From: Can Guo Date: Tue, 20 Jun 2017 17:35:20 +0800 Subject: [PATCH 167/786] Revert "mmc: core: extend SDR104 workaround for other paths" Commit 8cd402a56c2d ("mmc: core: extend SDR104 workaround for other paths") would cause SD card remove detect malfunction. This change reverts it. Change-Id: I2a5ecebc8c355ff2b12c72596cb96d0f4f4b819e Signed-off-by: Can Guo --- drivers/mmc/card/block.c | 5 +--- drivers/mmc/core/core.c | 58 +++++++--------------------------------- include/linux/mmc/core.h | 1 - 3 files changed, 11 insertions(+), 53 deletions(-) diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 9ac6568dac62..d8e959953e2d 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -1718,8 +1718,6 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, /* We couldn't get a response from the card. Give up. */ if (err) { - if (card->err_in_sdr104) - return ERR_RETRY; /* Check if the card is removed */ if (mmc_detect_card_removed(card->host)) return ERR_NOMEDIUM; @@ -2210,8 +2208,7 @@ static int mmc_blk_err_check(struct mmc_card *card, brq->data.error == -ETIMEDOUT || brq->cmd.error == -EILSEQ || brq->cmd.error == -EIO || - brq->cmd.error == -ETIMEDOUT || - brq->sbc.error)) + brq->cmd.error == -ETIMEDOUT)) card->err_in_sdr104 = true; /* diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 8b1b0a0850df..e18be6b1ef2f 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -456,22 +456,6 @@ int mmc_clk_update_freq(struct mmc_host *host, } EXPORT_SYMBOL(mmc_clk_update_freq); -void mmc_recovery_fallback_lower_speed(struct mmc_host *host) -{ - if (!host->card) - return; - - if (host->sdr104_wa && mmc_card_sd(host->card) && - (host->ios.timing == MMC_TIMING_UHS_SDR104) && - !host->card->sdr104_blocked) { - pr_err("%s: %s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n", - mmc_hostname(host), __func__); - mmc_host_clear_sdr104(host); - mmc_hw_reset(host); - host->card->sdr104_blocked = true; - } -} - static int mmc_devfreq_set_target(struct device *dev, unsigned long *freq, u32 devfreq_flags) { @@ -523,9 +507,6 @@ static int mmc_devfreq_set_target(struct device *dev, if (abort) goto out; - if (mmc_card_sd(host->card) && host->card->sdr104_blocked) - goto rel_host; - /* * In case we were able to claim host there is no need to * defer the frequency change. It will be done now @@ -534,18 +515,15 @@ static int mmc_devfreq_set_target(struct device *dev, mmc_host_clk_hold(host); err = mmc_clk_update_freq(host, *freq, clk_scaling->state); - if (err && err != -EAGAIN) { + if (err && err != -EAGAIN) pr_err("%s: clock scale to %lu failed with error %d\n", mmc_hostname(host), *freq, err); - mmc_recovery_fallback_lower_speed(host); - } else { + else pr_debug("%s: clock change to %lu finished successfully (%s)\n", mmc_hostname(host), *freq, current->comm); - } mmc_host_clk_release(host); -rel_host: mmc_release_host(host); out: return err; @@ -566,9 +544,6 @@ void mmc_deferred_scaling(struct mmc_host *host) if (!host->clk_scaling.enable) return; - if (mmc_card_sd(host->card) && host->card->sdr104_blocked) - return; - spin_lock_bh(&host->clk_scaling.lock); if (host->clk_scaling.clk_scaling_in_progress || @@ -589,15 +564,13 @@ void mmc_deferred_scaling(struct mmc_host *host) err = mmc_clk_update_freq(host, target_freq, host->clk_scaling.state); - if (err && err != -EAGAIN) { + if (err && err != -EAGAIN) pr_err("%s: failed on deferred scale clocks (%d)\n", mmc_hostname(host), err); - mmc_recovery_fallback_lower_speed(host); - } else { + else pr_debug("%s: clocks were successfully scaled to %lu (%s)\n", mmc_hostname(host), target_freq, current->comm); - } host->clk_scaling.clk_scaling_in_progress = false; atomic_dec(&host->clk_scaling.devfreq_abort); } @@ -1567,13 +1540,8 @@ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq) } } if (!cmd->error || !cmd->retries || - mmc_card_removed(host->card)) { - if (cmd->error && !cmd->retries && - cmd->opcode != MMC_SEND_STATUS && - cmd->opcode != MMC_SEND_TUNING_BLOCK) - mmc_recovery_fallback_lower_speed(host); + mmc_card_removed(host->card)) break; - } mmc_retune_recheck(host); @@ -4228,18 +4196,12 @@ int _mmc_detect_card_removed(struct mmc_host *host) } if (ret) { - if (host->ops->get_cd && host->ops->get_cd(host)) { - mmc_recovery_fallback_lower_speed(host); - ret = 0; - } else { - mmc_card_set_removed(host->card); - if (host->card->sdr104_blocked) { - mmc_host_set_sdr104(host); - host->card->sdr104_blocked = false; - } - pr_debug("%s: card remove detected\n", - mmc_hostname(host)); + mmc_card_set_removed(host->card); + if (host->card->sdr104_blocked) { + mmc_host_set_sdr104(host); + host->card->sdr104_blocked = false; } + pr_debug("%s: card remove detected\n", mmc_hostname(host)); } return ret; diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 227b1e2befcf..959414b0b1e4 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -232,7 +232,6 @@ extern void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host, bool lock_needed); extern void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host, bool lock_needed, bool is_cmdq_dcmd); -extern void mmc_recovery_fallback_lower_speed(struct mmc_host *host); /** * mmc_claim_host - exclusively claim a host -- GitLab From 988dec1511a4d0eb13c47086a76e71c9ec5e4e70 Mon Sep 17 00:00:00 2001 From: Xiaoyu Ye Date: Fri, 16 Jun 2017 16:38:51 -0700 Subject: [PATCH 168/786] ASoC: sdm845: Add mixer controls for MI2S bit format MI2S ports supports multiple bit formats for Rx and Tx paths. Add new mixer controls for supporting bit format for Rx and Tx paths of all 4 MI2S ports in SDM845 target. The bit format for the MI2S ports will be used to setup Backend DAI configuration through fixup function. Change-Id: Ia94f8a6c1d27782fd30b00f7df6918ede47dec58 Signed-off-by: Xiaoyu Ye --- sound/soc/msm/sdm845.c | 155 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 155 insertions(+) diff --git a/sound/soc/msm/sdm845.c b/sound/soc/msm/sdm845.c index e6997604ef04..d3c4e05c65c8 100644 --- a/sound/soc/msm/sdm845.c +++ b/sound/soc/msm/sdm845.c @@ -493,6 +493,8 @@ static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_rx_chs, mi2s_ch_text); static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_tx_chs, mi2s_ch_text); static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_rx_chs, mi2s_ch_text); static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_tx_chs, mi2s_ch_text); +static SOC_ENUM_SINGLE_EXT_DECL(mi2s_rx_format, bit_format_text); +static SOC_ENUM_SINGLE_EXT_DECL(mi2s_tx_format, bit_format_text); static SOC_ENUM_SINGLE_EXT_DECL(hifi_function, hifi_text); static struct platform_device *spdev; @@ -2250,6 +2252,54 @@ static int mi2s_get_sample_rate(int value) return sample_rate; } +static int mi2s_get_format(int value) +{ + int format; + + switch (value) { + case 0: + format = SNDRV_PCM_FORMAT_S16_LE; + break; + case 1: + format = SNDRV_PCM_FORMAT_S24_LE; + break; + case 2: + format = SNDRV_PCM_FORMAT_S24_3LE; + break; + case 3: + format = SNDRV_PCM_FORMAT_S32_LE; + break; + default: + format = SNDRV_PCM_FORMAT_S16_LE; + break; + } + return format; +} + +static int mi2s_get_format_value(int format) +{ + int value; + + switch (format) { + case SNDRV_PCM_FORMAT_S16_LE: + value = 0; + break; + case SNDRV_PCM_FORMAT_S24_LE: + value = 1; + break; + case SNDRV_PCM_FORMAT_S24_3LE: + value = 2; + break; + case SNDRV_PCM_FORMAT_S32_LE: + value = 3; + break; + default: + value = 0; + break; + } + return value; +} + static int mi2s_rx_sample_rate_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { @@ -2382,6 +2432,78 @@ static int msm_mi2s_tx_ch_put(struct snd_kcontrol *kcontrol, return 1; } +static int msm_mi2s_rx_format_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + int idx = mi2s_get_port_idx(kcontrol); + + if (idx < 0) + return idx; + + ucontrol->value.enumerated.item[0] = + mi2s_get_format_value(mi2s_rx_cfg[idx].bit_format); + + pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__, + idx, mi2s_rx_cfg[idx].bit_format, + ucontrol->value.enumerated.item[0]); + + return 0; +} + +static int msm_mi2s_rx_format_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + int idx = mi2s_get_port_idx(kcontrol); + + if (idx < 0) + return idx; + + mi2s_rx_cfg[idx].bit_format = + mi2s_get_format(ucontrol->value.enumerated.item[0]); + + pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__, + idx, mi2s_rx_cfg[idx].bit_format, + ucontrol->value.enumerated.item[0]); + + return 0; +} + +static int msm_mi2s_tx_format_get(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + int idx = mi2s_get_port_idx(kcontrol); + + if (idx < 0) + return idx; + + ucontrol->value.enumerated.item[0] = + mi2s_get_format_value(mi2s_tx_cfg[idx].bit_format); + + pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__, + idx, mi2s_tx_cfg[idx].bit_format, + ucontrol->value.enumerated.item[0]); + + return 0; +} + +static int msm_mi2s_tx_format_put(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + int idx = mi2s_get_port_idx(kcontrol); + + if (idx < 0) + return idx; + + mi2s_tx_cfg[idx].bit_format = + mi2s_get_format(ucontrol->value.enumerated.item[0]); + + pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__, + idx, mi2s_tx_cfg[idx].bit_format, + ucontrol->value.enumerated.item[0]); + + return 0; +} + static int msm_hifi_ctrl(struct snd_soc_codec *codec) { struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec); @@ -2627,6 +2749,22 @@ static const struct snd_kcontrol_new msm_snd_controls[] = { msm_mi2s_rx_ch_get, msm_mi2s_rx_ch_put), SOC_ENUM_EXT("QUAT_MI2S_TX Channels", quat_mi2s_tx_chs, msm_mi2s_tx_ch_get, msm_mi2s_tx_ch_put), + SOC_ENUM_EXT("PRIM_MI2S_RX Format", mi2s_rx_format, + msm_mi2s_rx_format_get, msm_mi2s_rx_format_put), + SOC_ENUM_EXT("PRIM_MI2S_TX Format", mi2s_tx_format, + msm_mi2s_tx_format_get, msm_mi2s_tx_format_put), + SOC_ENUM_EXT("SEC_MI2S_RX Format", mi2s_rx_format, + msm_mi2s_rx_format_get, msm_mi2s_rx_format_put), + SOC_ENUM_EXT("SEC_MI2S_TX Format", mi2s_tx_format, + msm_mi2s_tx_format_get, msm_mi2s_tx_format_put), + SOC_ENUM_EXT("TERT_MI2S_RX Format", mi2s_rx_format, + msm_mi2s_rx_format_get, msm_mi2s_rx_format_put), + SOC_ENUM_EXT("TERT_MI2S_TX Format", mi2s_tx_format, + msm_mi2s_tx_format_get, msm_mi2s_tx_format_put), + SOC_ENUM_EXT("QUAT_MI2S_RX Format", mi2s_rx_format, + msm_mi2s_rx_format_get, msm_mi2s_rx_format_put), + SOC_ENUM_EXT("QUAT_MI2S_TX Format", mi2s_tx_format, + msm_mi2s_tx_format_get, msm_mi2s_tx_format_put), SOC_ENUM_EXT("HiFi Function", hifi_function, msm_hifi_get, msm_hifi_put), }; @@ -3052,48 +3190,64 @@ static int msm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, break; case MSM_BACKEND_DAI_PRI_MI2S_RX: + param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, + mi2s_rx_cfg[PRIM_MI2S].bit_format); rate->min = rate->max = mi2s_rx_cfg[PRIM_MI2S].sample_rate; channels->min = channels->max = mi2s_rx_cfg[PRIM_MI2S].channels; break; case MSM_BACKEND_DAI_PRI_MI2S_TX: + param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, + mi2s_tx_cfg[PRIM_MI2S].bit_format); rate->min = rate->max = mi2s_tx_cfg[PRIM_MI2S].sample_rate; channels->min = channels->max = mi2s_tx_cfg[PRIM_MI2S].channels; break; case MSM_BACKEND_DAI_SECONDARY_MI2S_RX: + param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, + mi2s_rx_cfg[SEC_MI2S].bit_format); rate->min = rate->max = mi2s_rx_cfg[SEC_MI2S].sample_rate; channels->min = channels->max = mi2s_rx_cfg[SEC_MI2S].channels; break; case MSM_BACKEND_DAI_SECONDARY_MI2S_TX: + param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, + mi2s_tx_cfg[SEC_MI2S].bit_format); rate->min = rate->max = mi2s_tx_cfg[SEC_MI2S].sample_rate; channels->min = channels->max = mi2s_tx_cfg[SEC_MI2S].channels; break; case MSM_BACKEND_DAI_TERTIARY_MI2S_RX: + param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, + mi2s_rx_cfg[TERT_MI2S].bit_format); rate->min = rate->max = mi2s_rx_cfg[TERT_MI2S].sample_rate; channels->min = channels->max = mi2s_rx_cfg[TERT_MI2S].channels; break; case MSM_BACKEND_DAI_TERTIARY_MI2S_TX: + param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, + mi2s_tx_cfg[TERT_MI2S].bit_format); rate->min = rate->max = mi2s_tx_cfg[TERT_MI2S].sample_rate; channels->min = channels->max = mi2s_tx_cfg[TERT_MI2S].channels; break; case MSM_BACKEND_DAI_QUATERNARY_MI2S_RX: + param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, + mi2s_rx_cfg[QUAT_MI2S].bit_format); rate->min = rate->max = mi2s_rx_cfg[QUAT_MI2S].sample_rate; channels->min = channels->max = mi2s_rx_cfg[QUAT_MI2S].channels; break; case MSM_BACKEND_DAI_QUATERNARY_MI2S_TX: + param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, + mi2s_tx_cfg[QUAT_MI2S].bit_format); rate->min = rate->max = mi2s_tx_cfg[QUAT_MI2S].sample_rate; channels->min = channels->max = mi2s_tx_cfg[QUAT_MI2S].channels; @@ -3855,6 +4009,7 @@ static u32 get_mi2s_bits_per_sample(u32 bit_format) u32 bit_per_sample; switch (bit_format) { + case SNDRV_PCM_FORMAT_S32_LE: case SNDRV_PCM_FORMAT_S24_3LE: case SNDRV_PCM_FORMAT_S24_LE: bit_per_sample = 32; -- GitLab From d0d638418e5dabec717ad07d6131fa3f81f91849 Mon Sep 17 00:00:00 2001 From: Maheshwar Ajja Date: Mon, 19 Jun 2017 17:50:42 -0700 Subject: [PATCH 169/786] [media] videobuf2-v4l2.c: send data_offset field to vb2 framework Clients may use different data offset values for each v4l2 buffer. So send data_offset value along with other parameters to vb2 framework for drivers to use the client set data_offset value. CRs-Fixed: 2064048 Change-Id: I5469deec44c11bde5700b2271d923d50ae525d62 Signed-off-by: Maheshwar Ajja --- drivers/media/v4l2-core/videobuf2-v4l2.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c index 52ef8833f6b6..a29ddca1fc8b 100644 --- a/drivers/media/v4l2-core/videobuf2-v4l2.c +++ b/drivers/media/v4l2-core/videobuf2-v4l2.c @@ -330,6 +330,8 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, b->m.planes[plane].m.userptr; planes[plane].length = b->m.planes[plane].length; + planes[plane].data_offset = + b->m.planes[plane].data_offset; } } if (b->memory == VB2_MEMORY_DMABUF) { @@ -338,6 +340,8 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, b->m.planes[plane].m.fd; planes[plane].length = b->m.planes[plane].length; + planes[plane].data_offset = + b->m.planes[plane].data_offset; } } -- GitLab From c6407c023f8648fb42693793fb55272261fdb7ae Mon Sep 17 00:00:00 2001 From: Maheshwar Ajja Date: Fri, 9 Jun 2017 18:53:20 -0700 Subject: [PATCH 170/786] msm: vidc: Simplify buffer map and unmap sequence Video driver will map the buffer twice in ETB/FTB. First map will call iommu_map and following maps will just increment the refcount. First unmap will happen in EBD/FBD and second unmap will happen if READONLY flag is not present in EBD/FBD. If READONLY flag is not present in EBD/FBD then video hardware will send RBR event (Release Buffer Reference) to driver where second unmap will happen. Though video driver calls iommu_unmap for second unmap the buffer is not actually unmapped in iommu driver as video buffers are mapped using late unmap feature. The buffer will get unmapped in iommu driver when it is freed. Change-Id: Ic0043ef97146e3b1081f2fc0fc3da715396be1a0 Signed-off-by: Maheshwar Ajja --- .../platform/msm/vidc/hfi_packetization.c | 5 - .../platform/msm/vidc/hfi_response_handler.c | 11 +- drivers/media/platform/msm/vidc/msm_smem.c | 571 ++++-- drivers/media/platform/msm/vidc/msm_vidc.c | 858 ++------ .../media/platform/msm/vidc/msm_vidc_clocks.c | 28 +- .../media/platform/msm/vidc/msm_vidc_clocks.h | 2 +- .../media/platform/msm/vidc/msm_vidc_common.c | 1802 ++++++++++------- .../media/platform/msm/vidc/msm_vidc_common.h | 52 +- .../media/platform/msm/vidc/msm_vidc_debug.c | 22 +- .../platform/msm/vidc/msm_vidc_internal.h | 65 +- drivers/media/platform/msm/vidc/venus_hfi.c | 94 +- drivers/media/platform/msm/vidc/venus_hfi.h | 4 +- .../media/platform/msm/vidc/vidc_hfi_api.h | 26 +- include/media/msm_vidc.h | 12 +- 14 files changed, 1728 insertions(+), 1824 deletions(-) diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c index 8d54e206160a..40c306d50359 100644 --- a/drivers/media/platform/msm/vidc/hfi_packetization.c +++ b/drivers/media/platform/msm/vidc/hfi_packetization.c @@ -10,9 +10,6 @@ * GNU General Public License for more details. * */ -#include -#include -#include #include "hfi_packetization.h" #include "msm_vidc_debug.h" @@ -868,8 +865,6 @@ int create_pkt_cmd_session_ftb(struct hfi_cmd_session_fill_buffer_packet *pkt, output_frame->device_addr, output_frame->timestamp, output_frame->alloc_len, output_frame->filled_len, output_frame->offset); - dprintk(VIDC_DBG, "### Q OUTPUT BUFFER ###: %d, %d, %d\n", - pkt->alloc_len, pkt->filled_len, pkt->offset); return rc; } diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c index 08cb055a0438..4fb65a2cabc1 100644 --- a/drivers/media/platform/msm/vidc/hfi_response_handler.c +++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c @@ -1329,10 +1329,8 @@ static int hfi_process_session_etb_done(u32 device_id, pkt->ubwc_cr_stats.complexity_number; data_done.input_done.offset = pkt->offset; data_done.input_done.filled_len = pkt->filled_len; - data_done.input_done.packet_buffer = - (ion_phys_addr_t)pkt->packet_buffer; - data_done.input_done.extra_data_buffer = - (ion_phys_addr_t)pkt->extra_data_buffer; + data_done.input_done.packet_buffer = pkt->packet_buffer; + data_done.input_done.extra_data_buffer = pkt->extra_data_buffer; data_done.input_done.status = hfi_map_err_status(pkt->error_type); hfi_picture_type = (struct hfi_picture_type *)&pkt->rgData[0]; @@ -1413,10 +1411,9 @@ static int hfi_process_session_ftb_done( data_done.output_done.alloc_len1 = pkt->alloc_len; data_done.output_done.filled_len1 = pkt->filled_len; data_done.output_done.picture_type = pkt->picture_type; - data_done.output_done.packet_buffer1 = - (ion_phys_addr_t)pkt->packet_buffer; + data_done.output_done.packet_buffer1 = pkt->packet_buffer; data_done.output_done.extra_data_buffer = - (ion_phys_addr_t)pkt->extra_data_buffer; + pkt->extra_data_buffer; data_done.output_done.buffer_type = HAL_BUFFER_OUTPUT; } else /* if (is_decoder) */ { struct hfi_msg_session_fbd_uncompressed_plane0_packet *pkt = diff --git a/drivers/media/platform/msm/vidc/msm_smem.c b/drivers/media/platform/msm/vidc/msm_smem.c index b1166226c213..9b23376b8ea6 100644 --- a/drivers/media/platform/msm/vidc/msm_smem.c +++ b/drivers/media/platform/msm/vidc/msm_smem.c @@ -30,7 +30,7 @@ struct smem_client { enum session_type session_type; }; -static int get_device_address(struct smem_client *smem_client, +static int msm_ion_get_device_address(struct smem_client *smem_client, struct ion_handle *hndl, unsigned long align, ion_phys_addr_t *iova, unsigned long *buffer_size, unsigned long flags, enum hal_buffer buffer_type, @@ -122,12 +122,6 @@ static int get_device_address(struct smem_client *smem_client, goto mem_map_sg_failed; } if (table->sgl) { - dprintk(VIDC_DBG, - "%s: CB : %s, DMA buf: %pK, device: %pK, attach: %pK, table: %pK, table sgl: %pK, rc: %d, dma_address: %pa\n", - __func__, cb->name, buf, cb->dev, attach, - table, table->sgl, rc, - &table->sgl->dma_address); - *iova = table->sgl->dma_address; *buffer_size = table->sgl->dma_length; } else { @@ -153,7 +147,6 @@ static int get_device_address(struct smem_client *smem_client, } } - dprintk(VIDC_DBG, "mapped ion handle %pK to %pa\n", hndl, iova); return 0; mem_map_sg_failed: dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL); @@ -166,38 +159,26 @@ static int get_device_address(struct smem_client *smem_client, return rc; } -static void put_device_address(struct smem_client *smem_client, +static int msm_ion_put_device_address(struct smem_client *smem_client, struct ion_handle *hndl, u32 flags, struct dma_mapping_info *mapping_info, enum hal_buffer buffer_type) { - struct ion_client *clnt = NULL; + int rc = 0; if (!hndl || !smem_client || !mapping_info) { dprintk(VIDC_WARN, "Invalid params: %pK, %pK\n", smem_client, hndl); - return; + return -EINVAL; } if (!mapping_info->dev || !mapping_info->table || !mapping_info->buf || !mapping_info->attach) { dprintk(VIDC_WARN, "Invalid params:\n"); - return; + return -EINVAL; } - clnt = smem_client->clnt; - if (!clnt) { - dprintk(VIDC_WARN, "Invalid client\n"); - return; - } if (is_iommu_present(smem_client->res)) { - dprintk(VIDC_DBG, - "Calling dma_unmap_sg - device: %pK, address: %pa, buf: %pK, table: %pK, attach: %pK\n", - mapping_info->dev, - &mapping_info->table->sgl->dma_address, - mapping_info->buf, mapping_info->table, - mapping_info->attach); - trace_msm_smem_buffer_iommu_op_start("UNMAP", 0, 0, 0, 0, 0); msm_dma_unmap_sg(mapping_info->dev, mapping_info->table->sgl, mapping_info->table->nents, DMA_BIDIRECTIONAL, @@ -207,68 +188,257 @@ static void put_device_address(struct smem_client *smem_client, dma_buf_detach(mapping_info->buf, mapping_info->attach); dma_buf_put(mapping_info->buf); trace_msm_smem_buffer_iommu_op_end("UNMAP", 0, 0, 0, 0, 0); + + mapping_info->dev = NULL; + mapping_info->mapping = NULL; + mapping_info->table = NULL; + mapping_info->attach = NULL; + mapping_info->buf = NULL; } + + return rc; } -static int ion_user_to_kernel(struct smem_client *client, int fd, u32 size, - struct msm_smem *mem, enum hal_buffer buffer_type) +static void *msm_ion_get_dma_buf(int fd) +{ + struct dma_buf *dma_buf; + + dma_buf = dma_buf_get(fd); + if (IS_ERR_OR_NULL(dma_buf)) { + dprintk(VIDC_ERR, "Failed to get dma_buf for %d, error %ld\n", + fd, PTR_ERR(dma_buf)); + dma_buf = NULL; + } + + return dma_buf; +} + +void *msm_smem_get_dma_buf(int fd) +{ + return (void *)msm_ion_get_dma_buf(fd); +} + +static void msm_ion_put_dma_buf(struct dma_buf *dma_buf) +{ + if (!dma_buf) { + dprintk(VIDC_ERR, "%s: Invalid params: %pK\n", + __func__, dma_buf); + return; + } + + dma_buf_put(dma_buf); +} + +void msm_smem_put_dma_buf(void *dma_buf) +{ + return msm_ion_put_dma_buf((struct dma_buf *)dma_buf); +} + +static struct ion_handle *msm_ion_get_handle(void *ion_client, + struct dma_buf *dma_buf) +{ + struct ion_handle *handle; + + if (!ion_client || !dma_buf) { + dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n", + __func__, ion_client, dma_buf); + return NULL; + } + + handle = ion_import_dma_buf(ion_client, dma_buf); + if (IS_ERR_OR_NULL(handle)) { + dprintk(VIDC_ERR, "Failed to get ion_handle: %pK, %pK, %ld\n", + ion_client, dma_buf, PTR_ERR(handle)); + handle = NULL; + } + + return handle; +} + +void *msm_smem_get_handle(struct smem_client *client, void *dma_buf) +{ + if (!client) + return NULL; + + return (void *)msm_ion_get_handle(client->clnt, + (struct dma_buf *)dma_buf); +} + +static void msm_ion_put_handle(struct ion_client *ion_client, + struct ion_handle *ion_handle) +{ + if (!ion_client || !ion_handle) { + dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n", + __func__, ion_client, ion_handle); + return; + } + + ion_free(ion_client, ion_handle); +} + +void msm_smem_put_handle(struct smem_client *client, void *handle) +{ + if (!client) { + dprintk(VIDC_ERR, "%s: Invalid params %pK %pK\n", + __func__, client, handle); + return; + } + return msm_ion_put_handle(client->clnt, (struct ion_handle *)handle); +} + +static int msm_ion_map_dma_buf(struct msm_vidc_inst *inst, + struct msm_smem *smem) { - struct ion_handle *hndl = NULL; - ion_phys_addr_t iova = 0; - unsigned long buffer_size = size; int rc = 0; + ion_phys_addr_t iova = 0; + u32 temp = 0; + unsigned long buffer_size = 0; unsigned long align = SZ_4K; unsigned long ion_flags = 0; + struct ion_client *ion_client; + struct ion_handle *ion_handle; + struct dma_buf *dma_buf; -#ifdef CONFIG_ION - hndl = ion_import_dma_buf_fd(client->clnt, fd); -#endif - dprintk(VIDC_DBG, "%s ion handle: %pK\n", __func__, hndl); - if (IS_ERR_OR_NULL(hndl)) { - dprintk(VIDC_ERR, "Failed to get handle: %pK, %d, %d, %pK\n", - client, fd, size, hndl); - rc = -ENOMEM; - goto fail_import_fd; + if (!inst || !inst->mem_client || !inst->mem_client->clnt) { + dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n", + __func__, inst, smem); + return -EINVAL; } - mem->kvaddr = NULL; - rc = ion_handle_get_flags(client->clnt, hndl, &ion_flags); + ion_client = inst->mem_client->clnt; + dma_buf = msm_ion_get_dma_buf(smem->fd); + if (!dma_buf) + return -EINVAL; + ion_handle = msm_ion_get_handle(ion_client, dma_buf); + if (!ion_handle) + return -EINVAL; + + smem->dma_buf = dma_buf; + smem->handle = ion_handle; + rc = ion_handle_get_flags(ion_client, ion_handle, &ion_flags); if (rc) { dprintk(VIDC_ERR, "Failed to get ion flags: %d\n", rc); - goto fail_device_address; + goto exit; } - mem->buffer_type = buffer_type; if (ion_flags & ION_FLAG_CACHED) - mem->flags |= SMEM_CACHED; + smem->flags |= SMEM_CACHED; if (ion_flags & ION_FLAG_SECURE) - mem->flags |= SMEM_SECURE; + smem->flags |= SMEM_SECURE; - rc = get_device_address(client, hndl, align, &iova, &buffer_size, - mem->flags, buffer_type, &mem->mapping_info); + rc = msm_ion_get_device_address(inst->mem_client, ion_handle, + align, &iova, &buffer_size, smem->flags, + smem->buffer_type, &smem->mapping_info); if (rc) { dprintk(VIDC_ERR, "Failed to get device address: %d\n", rc); - goto fail_device_address; + goto exit; + } + temp = (u32)iova; + if ((ion_phys_addr_t)temp != iova) { + dprintk(VIDC_ERR, "iova(%pa) truncated to %#x", &iova, temp); + rc = -EINVAL; + goto exit; } - mem->mem_type = client->mem_type; - mem->smem_priv = hndl; - mem->device_addr = iova; - mem->size = buffer_size; - if ((u32)mem->device_addr != iova) { - dprintk(VIDC_ERR, "iova(%pa) truncated to %#x", - &iova, (u32)mem->device_addr); - goto fail_device_address; + smem->device_addr = (u32)iova + smem->offset; + +exit: + return rc; +} + +int msm_smem_map_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem) +{ + int rc = 0; + + if (!inst || !smem) { + dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n", + __func__, inst, smem); + return -EINVAL; } - dprintk(VIDC_DBG, - "%s: ion_handle = %pK, fd = %d, device_addr = %pa, size = %zx, kvaddr = %pK, buffer_type = %d, flags = %#lx\n", - __func__, mem->smem_priv, fd, &mem->device_addr, mem->size, - mem->kvaddr, mem->buffer_type, mem->flags); + + if (smem->refcount) { + smem->refcount++; + return rc; + } + + switch (inst->mem_client->mem_type) { + case SMEM_ION: + rc = msm_ion_map_dma_buf(inst, smem); + break; + default: + dprintk(VIDC_ERR, "%s: Unknown mem_type %d\n", + __func__, inst->mem_client->mem_type); + rc = -EINVAL; + break; + } + if (!rc) + smem->refcount++; + return rc; -fail_device_address: - ion_free(client->clnt, hndl); -fail_import_fd: +} + +static int msm_ion_unmap_dma_buf(struct msm_vidc_inst *inst, + struct msm_smem *smem) +{ + int rc = 0; + + if (!inst || !inst->mem_client || !smem) { + dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n", + __func__, inst, smem); + return -EINVAL; + } + + rc = msm_ion_put_device_address(inst->mem_client, smem->handle, + smem->flags, &smem->mapping_info, smem->buffer_type); + if (rc) { + dprintk(VIDC_ERR, "Failed to put device address: %d\n", rc); + goto exit; + } + + msm_ion_put_handle(inst->mem_client->clnt, smem->handle); + msm_ion_put_dma_buf(smem->dma_buf); + + smem->device_addr = 0x0; + smem->handle = NULL; + smem->dma_buf = NULL; + +exit: + return rc; +} + +int msm_smem_unmap_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem) +{ + int rc = 0; + + if (!inst || !smem) { + dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n", + __func__, inst, smem); + return -EINVAL; + } + + if (smem->refcount) { + smem->refcount--; + } else { + dprintk(VIDC_WARN, + "unmap called while refcount is zero already\n"); + return -EINVAL; + } + + if (smem->refcount) + return rc; + + switch (inst->mem_client->mem_type) { + case SMEM_ION: + rc = msm_ion_unmap_dma_buf(inst, smem); + break; + default: + dprintk(VIDC_ERR, "%s: Unknown mem_type %d\n", + __func__, inst->mem_client->mem_type); + rc = -EINVAL; + break; + } + return rc; } @@ -321,6 +491,12 @@ static int alloc_ion_mem(struct smem_client *client, size_t size, u32 align, int rc = 0; int ion_flags = 0; + if (!client || !mem) { + dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n", + __func__, client, mem); + return -EINVAL; + } + align = ALIGN(align, SZ_4K); size = ALIGN(size, SZ_4K); @@ -366,10 +542,13 @@ static int alloc_ion_mem(struct smem_client *client, size_t size, u32 align, } trace_msm_smem_buffer_ion_op_end("ALLOC", (u32)buffer_type, heap_mask, size, align, flags, map_kernel); - mem->mem_type = client->mem_type; - mem->smem_priv = hndl; + + mem->handle = hndl; mem->flags = flags; mem->buffer_type = buffer_type; + mem->offset = 0; + mem->size = size; + if (map_kernel) { mem->kvaddr = ion_map_kernel(client->clnt, hndl); if (IS_ERR_OR_NULL(mem->kvaddr)) { @@ -382,24 +561,23 @@ static int alloc_ion_mem(struct smem_client *client, size_t size, u32 align, mem->kvaddr = NULL; } - rc = get_device_address(client, hndl, align, &iova, &buffer_size, - flags, buffer_type, &mem->mapping_info); + rc = msm_ion_get_device_address(client, hndl, align, &iova, + &buffer_size, flags, buffer_type, &mem->mapping_info); if (rc) { dprintk(VIDC_ERR, "Failed to get device address: %d\n", rc); goto fail_device_address; } - mem->device_addr = iova; - if ((u32)mem->device_addr != iova) { + mem->device_addr = (u32)iova; + if ((ion_phys_addr_t)mem->device_addr != iova) { dprintk(VIDC_ERR, "iova(%pa) truncated to %#x", - &iova, (u32)mem->device_addr); + &iova, mem->device_addr); goto fail_device_address; } - mem->size = size; dprintk(VIDC_DBG, - "%s: ion_handle = %pK, device_addr = %pa, size = %#zx, kvaddr = %pK, buffer_type = %#x, flags = %#lx\n", - __func__, mem->smem_priv, &mem->device_addr, - mem->size, mem->kvaddr, mem->buffer_type, mem->flags); + "%s: ion_handle = %pK, device_addr = %x, size = %d, kvaddr = %pK, buffer_type = %#x, flags = %#lx\n", + __func__, mem->handle, mem->device_addr, mem->size, + mem->kvaddr, mem->buffer_type, mem->flags); return rc; fail_device_address: if (mem->kvaddr) @@ -410,30 +588,40 @@ static int alloc_ion_mem(struct smem_client *client, size_t size, u32 align, return rc; } -static void free_ion_mem(struct smem_client *client, struct msm_smem *mem) +static int free_ion_mem(struct smem_client *client, struct msm_smem *mem) { + int rc = 0; + + if (!client || !mem) { + dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n", + __func__, client, mem); + return -EINVAL; + } + dprintk(VIDC_DBG, - "%s: ion_handle = %pK, device_addr = %pa, size = %#zx, kvaddr = %pK, buffer_type = %#x\n", - __func__, mem->smem_priv, &mem->device_addr, - mem->size, mem->kvaddr, mem->buffer_type); + "%s: ion_handle = %pK, device_addr = %x, size = %d, kvaddr = %pK, buffer_type = %#x\n", + __func__, mem->handle, mem->device_addr, mem->size, + mem->kvaddr, mem->buffer_type); if (mem->device_addr) - put_device_address(client, mem->smem_priv, mem->flags, + msm_ion_put_device_address(client, mem->handle, mem->flags, &mem->mapping_info, mem->buffer_type); if (mem->kvaddr) - ion_unmap_kernel(client->clnt, mem->smem_priv); - if (mem->smem_priv) { + ion_unmap_kernel(client->clnt, mem->handle); + + if (mem->handle) { trace_msm_smem_buffer_ion_op_start("FREE", (u32)mem->buffer_type, -1, mem->size, -1, mem->flags, -1); - dprintk(VIDC_DBG, - "%s: Freeing handle %pK, client: %pK\n", - __func__, mem->smem_priv, client->clnt); - ion_free(client->clnt, mem->smem_priv); + ion_free(client->clnt, mem->handle); trace_msm_smem_buffer_ion_op_end("FREE", (u32)mem->buffer_type, -1, mem->size, -1, mem->flags, -1); + } else { + dprintk(VIDC_ERR, "%s: invalid ion_handle\n", __func__); } + + return rc; } static void *ion_new_client(void) @@ -443,135 +631,105 @@ static void *ion_new_client(void) client = msm_ion_client_create("video_client"); if (!client) dprintk(VIDC_ERR, "Failed to create smem client\n"); + + dprintk(VIDC_DBG, "%s: client %pK\n", __func__, client); + return client; }; static void ion_delete_client(struct smem_client *client) { + if (!client) { + dprintk(VIDC_ERR, "%s: Invalid params: %pK\n", + __func__, client); + return; + } + + dprintk(VIDC_DBG, "%s: client %pK\n", __func__, client->clnt); ion_client_destroy(client->clnt); + client->clnt = NULL; } -struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 size, - enum hal_buffer buffer_type) +static int msm_ion_cache_operations(void *ion_client, void *ion_handle, + unsigned long offset, unsigned long size, + enum smem_cache_ops cache_op) { - struct smem_client *client = clt; int rc = 0; - struct msm_smem *mem; + unsigned long flags = 0; + int msm_cache_ops = 0; - if (fd < 0) { - dprintk(VIDC_ERR, "Invalid fd: %d\n", fd); - return NULL; + if (!ion_client || !ion_handle) { + dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n", + __func__, ion_client, ion_handle); + return -EINVAL; } - mem = kzalloc(sizeof(*mem), GFP_KERNEL); - if (!mem) { - dprintk(VIDC_ERR, "Failed to allocate shared mem\n"); - return NULL; + + rc = ion_handle_get_flags(ion_client, ion_handle, &flags); + if (rc) { + dprintk(VIDC_ERR, + "%s: ion_handle_get_flags failed: %d, ion client %pK, ion handle %pK\n", + __func__, rc, ion_client, ion_handle); + goto exit; } - switch (client->mem_type) { - case SMEM_ION: - rc = ion_user_to_kernel(clt, fd, size, mem, buffer_type); + + if (!ION_IS_CACHED(flags)) + goto exit; + + switch (cache_op) { + case SMEM_CACHE_CLEAN: + msm_cache_ops = ION_IOC_CLEAN_CACHES; + break; + case SMEM_CACHE_INVALIDATE: + msm_cache_ops = ION_IOC_INV_CACHES; + break; + case SMEM_CACHE_CLEAN_INVALIDATE: + msm_cache_ops = ION_IOC_CLEAN_INV_CACHES; break; default: - dprintk(VIDC_ERR, "Mem type not supported\n"); + dprintk(VIDC_ERR, "%s: cache (%d) operation not supported\n", + __func__, cache_op); rc = -EINVAL; - break; - } - if (rc) { - dprintk(VIDC_ERR, "Failed to allocate shared memory\n"); - kfree(mem); - mem = NULL; + goto exit; } - return mem; -} - -bool msm_smem_compare_buffers(void *clt, int fd, void *priv) -{ - struct smem_client *client = clt; - struct ion_handle *handle = NULL; - bool ret = false; - - if (!clt || !priv) { - dprintk(VIDC_ERR, "Invalid params: %pK, %pK\n", - clt, priv); - return false; - } -#ifdef CONFIG_ION - handle = ion_import_dma_buf_fd(client->clnt, fd); -#endif - ret = handle == priv; - (!IS_ERR_OR_NULL(handle)) ? ion_free(client->clnt, handle) : 0; - return ret; -} -static int ion_cache_operations(struct smem_client *client, - struct msm_smem *mem, enum smem_cache_ops cache_op) -{ - unsigned long ionflag = 0; - int rc = 0; - int msm_cache_ops = 0; - - if (!mem || !client) { - dprintk(VIDC_ERR, "Invalid params: %pK, %pK\n", - mem, client); - return -EINVAL; - } - rc = ion_handle_get_flags(client->clnt, mem->smem_priv, - &ionflag); + rc = msm_ion_do_cache_offset_op(ion_client, ion_handle, NULL, + offset, size, msm_cache_ops); if (rc) { dprintk(VIDC_ERR, - "ion_handle_get_flags failed: %d\n", rc); - goto cache_op_failed; - } - if (ION_IS_CACHED(ionflag)) { - switch (cache_op) { - case SMEM_CACHE_CLEAN: - msm_cache_ops = ION_IOC_CLEAN_CACHES; - break; - case SMEM_CACHE_INVALIDATE: - msm_cache_ops = ION_IOC_INV_CACHES; - break; - case SMEM_CACHE_CLEAN_INVALIDATE: - msm_cache_ops = ION_IOC_CLEAN_INV_CACHES; - break; - default: - dprintk(VIDC_ERR, "cache operation not supported\n"); - rc = -EINVAL; - goto cache_op_failed; - } - rc = msm_ion_do_cache_op(client->clnt, - (struct ion_handle *)mem->smem_priv, - 0, (unsigned long)mem->size, - msm_cache_ops); - if (rc) { - dprintk(VIDC_ERR, - "cache operation failed %d\n", rc); - goto cache_op_failed; - } + "%s: cache operation failed %d, ion client %pK, ion handle %pK, offset %lu, size %lu, msm_cache_ops %u\n", + __func__, rc, ion_client, ion_handle, offset, + size, msm_cache_ops); + goto exit; } -cache_op_failed: + +exit: return rc; } -int msm_smem_cache_operations(void *clt, struct msm_smem *mem, +int msm_smem_cache_operations(struct smem_client *client, + void *handle, unsigned long offset, unsigned long size, enum smem_cache_ops cache_op) { - struct smem_client *client = clt; int rc = 0; - if (!client) { - dprintk(VIDC_ERR, "Invalid params: %pK\n", - client); + if (!client || !handle) { + dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n", + __func__, client, handle); return -EINVAL; } + switch (client->mem_type) { case SMEM_ION: - rc = ion_cache_operations(client, mem, cache_op); + rc = msm_ion_cache_operations(client->clnt, handle, + offset, size, cache_op); if (rc) dprintk(VIDC_ERR, - "Failed cache operations: %d\n", rc); + "%s: Failed cache operations: %d\n", __func__, rc); break; default: - dprintk(VIDC_ERR, "Mem type not supported\n"); + dprintk(VIDC_ERR, "%s: Mem type (%d) not supported\n", + __func__, client->mem_type); + rc = -EINVAL; break; } return rc; @@ -607,32 +765,22 @@ void *msm_smem_new_client(enum smem_type mtype, return client; } -struct msm_smem *msm_smem_alloc(void *clt, size_t size, u32 align, u32 flags, - enum hal_buffer buffer_type, int map_kernel) +int msm_smem_alloc(struct smem_client *client, size_t size, + u32 align, u32 flags, enum hal_buffer buffer_type, + int map_kernel, struct msm_smem *smem) { - struct smem_client *client; int rc = 0; - struct msm_smem *mem; - client = clt; - if (!client) { - dprintk(VIDC_ERR, "Invalid client passed\n"); - return NULL; - } - if (!size) { - dprintk(VIDC_ERR, "No need to allocate memory of size: %zx\n", - size); - return NULL; - } - mem = kzalloc(sizeof(*mem), GFP_KERNEL); - if (!mem) { - dprintk(VIDC_ERR, "Failed to allocate shared mem\n"); - return NULL; + if (!client || !smem || !size) { + dprintk(VIDC_ERR, "%s: Invalid params %pK %pK %d\n", + __func__, client, smem, (u32)size); + return -EINVAL; } + switch (client->mem_type) { case SMEM_ION: rc = alloc_ion_mem(client, size, align, flags, buffer_type, - mem, map_kernel); + smem, map_kernel); break; default: dprintk(VIDC_ERR, "Mem type not supported\n"); @@ -640,30 +788,34 @@ struct msm_smem *msm_smem_alloc(void *clt, size_t size, u32 align, u32 flags, break; } if (rc) { - dprintk(VIDC_ERR, "Failed to allocate shared memory\n"); - kfree(mem); - mem = NULL; + dprintk(VIDC_ERR, "Failed to allocate memory\n"); } - return mem; + + return rc; } -void msm_smem_free(void *clt, struct msm_smem *mem) +int msm_smem_free(void *clt, struct msm_smem *smem) { + int rc = 0; struct smem_client *client = clt; - if (!client || !mem) { + if (!client || !smem) { dprintk(VIDC_ERR, "Invalid client/handle passed\n"); - return; + return -EINVAL; } switch (client->mem_type) { case SMEM_ION: - free_ion_mem(client, mem); + rc = free_ion_mem(client, smem); break; default: dprintk(VIDC_ERR, "Mem type not supported\n"); + rc = -EINVAL; break; } - kfree(mem); + if (rc) + dprintk(VIDC_ERR, "Failed to free memory\n"); + + return rc; }; void msm_smem_delete_client(void *clt) @@ -692,7 +844,7 @@ struct context_bank_info *msm_smem_get_context_bank(void *clt, struct context_bank_info *cb = NULL, *match = NULL; if (!clt) { - dprintk(VIDC_ERR, "%s - invalid params\n", __func__); + dprintk(VIDC_ERR, "%s: invalid params\n", __func__); return NULL; } @@ -713,12 +865,13 @@ struct context_bank_info *msm_smem_get_context_bank(void *clt, if (cb->is_secure == is_secure && cb->buffer_type & buffer_type) { match = cb; - dprintk(VIDC_DBG, - "context bank found for CB : %s, device: %pK mapping: %pK\n", - match->name, match->dev, match->mapping); break; } } + if (!match) + dprintk(VIDC_ERR, + "%s: cb not found for buffer_type %x, is_secure %d\n", + __func__, buffer_type, is_secure); return match; } diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c index ede51b679d79..97c8889e9072 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_vidc.c @@ -23,6 +23,7 @@ #include #include "vidc_hfi_api.h" #include "msm_vidc_clocks.h" +#include #define MAX_EVENTS 30 @@ -383,507 +384,6 @@ int msm_vidc_reqbufs(void *instance, struct v4l2_requestbuffers *b) } EXPORT_SYMBOL(msm_vidc_reqbufs); -struct buffer_info *get_registered_buf(struct msm_vidc_inst *inst, - struct v4l2_buffer *b, int idx, int *plane) -{ - struct buffer_info *temp; - struct buffer_info *ret = NULL; - int i; - int fd = b->m.planes[idx].reserved[0]; - u32 buff_off = b->m.planes[idx].reserved[1]; - u32 size = b->m.planes[idx].length; - ion_phys_addr_t device_addr = b->m.planes[idx].m.userptr; - - if (fd < 0 || !plane) { - dprintk(VIDC_ERR, "Invalid input\n"); - goto err_invalid_input; - } - - WARN(!mutex_is_locked(&inst->registeredbufs.lock), - "Registered buf lock is not acquired for %s", __func__); - - *plane = 0; - list_for_each_entry(temp, &inst->registeredbufs.list, list) { - for (i = 0; i < min(temp->num_planes, VIDEO_MAX_PLANES); i++) { - bool ion_hndl_matches = temp->handle[i] ? - msm_smem_compare_buffers(inst->mem_client, fd, - temp->handle[i]->smem_priv) : false; - bool device_addr_matches = device_addr == - temp->device_addr[i]; - bool contains_within = CONTAINS(temp->buff_off[i], - temp->size[i], buff_off) || - CONTAINS(buff_off, size, temp->buff_off[i]); - bool overlaps = OVERLAPS(buff_off, size, - temp->buff_off[i], temp->size[i]); - - if (!temp->inactive && - (ion_hndl_matches || device_addr_matches) && - (contains_within || overlaps)) { - dprintk(VIDC_DBG, - "This memory region is already mapped\n"); - ret = temp; - *plane = i; - break; - } - } - if (ret) - break; - } - -err_invalid_input: - return ret; -} - -static struct msm_smem *get_same_fd_buffer(struct msm_vidc_inst *inst, int fd) -{ - struct buffer_info *temp; - struct msm_smem *same_fd_handle = NULL; - int i; - - if (!fd) - return NULL; - - if (!inst || fd < 0) { - dprintk(VIDC_ERR, "%s: Invalid input\n", __func__); - goto err_invalid_input; - } - - mutex_lock(&inst->registeredbufs.lock); - list_for_each_entry(temp, &inst->registeredbufs.list, list) { - for (i = 0; i < min(temp->num_planes, VIDEO_MAX_PLANES); i++) { - bool ion_hndl_matches = temp->handle[i] ? - msm_smem_compare_buffers(inst->mem_client, fd, - temp->handle[i]->smem_priv) : false; - if (ion_hndl_matches && temp->mapped[i]) { - temp->same_fd_ref[i]++; - dprintk(VIDC_INFO, - "Found same fd buffer\n"); - same_fd_handle = temp->handle[i]; - break; - } - } - if (same_fd_handle) - break; - } - mutex_unlock(&inst->registeredbufs.lock); - -err_invalid_input: - return same_fd_handle; -} - -struct buffer_info *device_to_uvaddr(struct msm_vidc_list *buf_list, - ion_phys_addr_t device_addr) -{ - struct buffer_info *temp = NULL; - bool found = false; - int i; - - if (!buf_list || !device_addr) { - dprintk(VIDC_ERR, - "Invalid input- device_addr: %pa buf_list: %pK\n", - &device_addr, buf_list); - goto err_invalid_input; - } - - mutex_lock(&buf_list->lock); - list_for_each_entry(temp, &buf_list->list, list) { - for (i = 0; i < min(temp->num_planes, VIDEO_MAX_PLANES); i++) { - if (!temp->inactive && - temp->device_addr[i] == device_addr) { - dprintk(VIDC_INFO, - "Found same fd buffer\n"); - found = true; - break; - } - } - - if (found) - break; - } - mutex_unlock(&buf_list->lock); - -err_invalid_input: - return temp; -} - -static inline void populate_buf_info(struct buffer_info *binfo, - struct v4l2_buffer *b, u32 i) -{ - if (i >= VIDEO_MAX_PLANES) { - dprintk(VIDC_ERR, "%s: Invalid input\n", __func__); - return; - } - binfo->type = b->type; - binfo->fd[i] = b->m.planes[i].reserved[0]; - binfo->buff_off[i] = b->m.planes[i].reserved[1]; - binfo->size[i] = b->m.planes[i].length; - binfo->uvaddr[i] = b->m.planes[i].m.userptr; - binfo->num_planes = b->length; - binfo->memory = b->memory; - binfo->v4l2_index = b->index; - binfo->timestamp.tv_sec = b->timestamp.tv_sec; - binfo->timestamp.tv_usec = b->timestamp.tv_usec; - dprintk(VIDC_DBG, "%s: fd[%d] = %d b->index = %d", - __func__, i, binfo->fd[i], b->index); -} - -static inline void repopulate_v4l2_buffer(struct v4l2_buffer *b, - struct buffer_info *binfo) -{ - int i = 0; - - b->type = binfo->type; - b->length = binfo->num_planes; - b->memory = binfo->memory; - b->index = binfo->v4l2_index; - b->timestamp.tv_sec = binfo->timestamp.tv_sec; - b->timestamp.tv_usec = binfo->timestamp.tv_usec; - binfo->dequeued = false; - for (i = 0; i < binfo->num_planes; ++i) { - b->m.planes[i].reserved[0] = binfo->fd[i]; - b->m.planes[i].reserved[1] = binfo->buff_off[i]; - b->m.planes[i].length = binfo->size[i]; - b->m.planes[i].m.userptr = binfo->device_addr[i]; - dprintk(VIDC_DBG, "%s %d %d %d %pa\n", __func__, binfo->fd[i], - binfo->buff_off[i], binfo->size[i], - &binfo->device_addr[i]); - } -} - -static struct msm_smem *map_buffer(struct msm_vidc_inst *inst, - struct v4l2_plane *p, enum hal_buffer buffer_type) -{ - struct msm_smem *handle = NULL; - - handle = msm_comm_smem_user_to_kernel(inst, - p->reserved[0], - p->length, - buffer_type); - if (!handle) { - dprintk(VIDC_ERR, - "%s: Failed to get device buffer address\n", __func__); - return NULL; - } - return handle; -} - -static inline enum hal_buffer get_hal_buffer_type( - struct msm_vidc_inst *inst, struct v4l2_buffer *b) -{ - if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) - return HAL_BUFFER_INPUT; - else if (b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) - return HAL_BUFFER_OUTPUT; - else - return -EINVAL; -} - -static inline bool is_dynamic_buffer_mode(struct v4l2_buffer *b, - struct msm_vidc_inst *inst) -{ - enum vidc_ports port = b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ? - OUTPUT_PORT : CAPTURE_PORT; - return inst->buffer_mode_set[port] == HAL_BUFFER_MODE_DYNAMIC; -} - - -static inline void save_v4l2_buffer(struct v4l2_buffer *b, - struct buffer_info *binfo) -{ - int i = 0; - - for (i = 0; i < b->length; ++i) { - if (EXTRADATA_IDX(b->length) && - (i == EXTRADATA_IDX(b->length)) && - !b->m.planes[i].length) { - continue; - } - populate_buf_info(binfo, b, i); - } -} - -int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b) -{ - struct buffer_info *binfo = NULL; - struct buffer_info *temp = NULL, *iterator = NULL; - int plane = 0; - int i = 0, rc = 0; - struct msm_smem *same_fd_handle = NULL; - - if (!b || !inst) { - dprintk(VIDC_ERR, "%s: invalid input\n", __func__); - return -EINVAL; - } - - binfo = kzalloc(sizeof(*binfo), GFP_KERNEL); - if (!binfo) { - dprintk(VIDC_ERR, "Out of memory\n"); - rc = -ENOMEM; - goto exit; - } - if (b->length > VIDEO_MAX_PLANES) { - dprintk(VIDC_ERR, "Num planes exceeds max: %d, %d\n", - b->length, VIDEO_MAX_PLANES); - rc = -EINVAL; - goto exit; - } - - dprintk(VIDC_DBG, - "[MAP] Create binfo = %pK fd = %d size = %d type = %d\n", - binfo, b->m.planes[0].reserved[0], - b->m.planes[0].length, b->type); - - for (i = 0; i < b->length; ++i) { - rc = 0; - if (EXTRADATA_IDX(b->length) && - (i == EXTRADATA_IDX(b->length)) && - !b->m.planes[i].length) { - continue; - } - mutex_lock(&inst->registeredbufs.lock); - temp = get_registered_buf(inst, b, i, &plane); - if (temp && !is_dynamic_buffer_mode(b, inst)) { - dprintk(VIDC_DBG, - "This memory region has already been prepared\n"); - rc = 0; - mutex_unlock(&inst->registeredbufs.lock); - goto exit; - } - - if (temp && is_dynamic_buffer_mode(b, inst) && !i) { - /* - * Buffer is already present in registered list - * increment ref_count, populate new values of v4l2 - * buffer in existing buffer_info struct. - * - * We will use the saved buffer info and queue it when - * we receive RELEASE_BUFFER_REFERENCE EVENT from f/w. - */ - dprintk(VIDC_DBG, "[MAP] Buffer already prepared\n"); - temp->inactive = false; - list_for_each_entry(iterator, - &inst->registeredbufs.list, list) { - if (iterator == temp) { - rc = buf_ref_get(inst, temp); - save_v4l2_buffer(b, temp); - break; - } - } - } - mutex_unlock(&inst->registeredbufs.lock); - /* - * rc == 1, - * buffer is mapped, fw has released all reference, so skip - * mapping and queue it immediately. - * - * rc == 2, - * buffer is mapped and fw is holding a reference, hold it in - * the driver and queue it later when fw has released - */ - if (rc == 1) { - rc = 0; - goto exit; - } else if (rc >= 2) { - rc = -EEXIST; - goto exit; - } - - same_fd_handle = get_same_fd_buffer( - inst, b->m.planes[i].reserved[0]); - - populate_buf_info(binfo, b, i); - if (same_fd_handle) { - binfo->device_addr[i] = - same_fd_handle->device_addr + binfo->buff_off[i]; - b->m.planes[i].m.userptr = binfo->device_addr[i]; - binfo->mapped[i] = false; - binfo->handle[i] = same_fd_handle; - } else { - binfo->handle[i] = map_buffer(inst, &b->m.planes[i], - get_hal_buffer_type(inst, b)); - if (!binfo->handle[i]) { - rc = -EINVAL; - goto exit; - } - - binfo->mapped[i] = true; - binfo->device_addr[i] = binfo->handle[i]->device_addr + - binfo->buff_off[i]; - b->m.planes[i].m.userptr = binfo->device_addr[i]; - } - - /* We maintain one ref count for all planes*/ - if (!i && is_dynamic_buffer_mode(b, inst)) { - rc = buf_ref_get(inst, binfo); - if (rc < 0) - goto exit; - } - dprintk(VIDC_DBG, - "%s: [MAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n", - __func__, binfo, i, binfo->handle[i], - &binfo->device_addr[i], binfo->fd[i], - binfo->buff_off[i], binfo->mapped[i]); - } - - mutex_lock(&inst->registeredbufs.lock); - list_add_tail(&binfo->list, &inst->registeredbufs.list); - mutex_unlock(&inst->registeredbufs.lock); - return 0; - -exit: - kfree(binfo); - return rc; -} -int unmap_and_deregister_buf(struct msm_vidc_inst *inst, - struct buffer_info *binfo) -{ - int i = 0; - struct buffer_info *temp = NULL; - bool found = false, keep_node = false; - - if (!inst || !binfo) { - dprintk(VIDC_ERR, "%s invalid param: %pK %pK\n", - __func__, inst, binfo); - return -EINVAL; - } - - WARN(!mutex_is_locked(&inst->registeredbufs.lock), - "Registered buf lock is not acquired for %s", __func__); - - /* - * Make sure the buffer to be unmapped and deleted - * from the registered list is present in the list. - */ - list_for_each_entry(temp, &inst->registeredbufs.list, list) { - if (temp == binfo) { - found = true; - break; - } - } - - /* - * Free the buffer info only if - * - buffer info has not been deleted from registered list - * - vidc client has called dqbuf on the buffer - * - no references are held on the buffer - */ - if (!found || !temp || !temp->pending_deletion || !temp->dequeued) - goto exit; - - for (i = 0; i < temp->num_planes; i++) { - dprintk(VIDC_DBG, - "%s: [UNMAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n", - __func__, temp, i, temp->handle[i], - &temp->device_addr[i], temp->fd[i], - temp->buff_off[i], temp->mapped[i]); - /* - * Unmap the handle only if the buffer has been mapped and no - * other buffer has a reference to this buffer. - * In case of buffers with same fd, we will map the buffer only - * once and subsequent buffers will refer to the mapped buffer's - * device address. - * For buffers which share the same fd, do not unmap and keep - * the buffer info in registered list. - */ - if (temp->handle[i] && temp->mapped[i] && - !temp->same_fd_ref[i]) { - msm_comm_smem_free(inst, - temp->handle[i]); - } - - if (temp->same_fd_ref[i]) - keep_node = true; - else { - temp->fd[i] = 0; - temp->handle[i] = 0; - temp->device_addr[i] = 0; - temp->uvaddr[i] = 0; - } - } - if (!keep_node) { - dprintk(VIDC_DBG, "[UNMAP] AND-FREED binfo: %pK\n", temp); - list_del(&temp->list); - kfree(temp); - } else { - temp->inactive = true; - dprintk(VIDC_DBG, "[UNMAP] NOT-FREED binfo: %pK\n", temp); - } -exit: - return 0; -} - - -int qbuf_dynamic_buf(struct msm_vidc_inst *inst, - struct buffer_info *binfo) -{ - struct v4l2_buffer b = {0}; - struct v4l2_plane plane[VIDEO_MAX_PLANES] = { {0} }; - struct buf_queue *q = NULL; - int rc = 0; - - if (!binfo) { - dprintk(VIDC_ERR, "%s invalid param: %pK\n", __func__, binfo); - return -EINVAL; - } - dprintk(VIDC_DBG, "%s fd[0] = %d\n", __func__, binfo->fd[0]); - - b.m.planes = plane; - repopulate_v4l2_buffer(&b, binfo); - - q = msm_comm_get_vb2q(inst, (&b)->type); - if (!q) { - dprintk(VIDC_ERR, "Failed to find buffer queue for type = %d\n" - , (&b)->type); - return -EINVAL; - } - - mutex_lock(&q->lock); - rc = vb2_qbuf(&q->vb2_bufq, &b); - mutex_unlock(&q->lock); - - if (rc) - dprintk(VIDC_ERR, "Failed to qbuf, %d\n", rc); - return rc; -} - -int output_buffer_cache_invalidate(struct msm_vidc_inst *inst, - struct buffer_info *binfo) -{ - int i = 0; - int rc = 0; - - if (!inst) { - dprintk(VIDC_ERR, "%s: invalid inst: %pK\n", __func__, inst); - return -EINVAL; - } - - if (!binfo) { - dprintk(VIDC_ERR, "%s: invalid buffer info: %pK\n", - __func__, inst); - return -EINVAL; - } - - for (i = 0; i < binfo->num_planes; i++) { - if (binfo->handle[i]) { - struct msm_smem smem = *binfo->handle[i]; - - smem.offset = (unsigned int)(binfo->buff_off[i]); - smem.size = binfo->size[i]; - rc = msm_comm_smem_cache_operations(inst, - &smem, SMEM_CACHE_INVALIDATE); - if (rc) { - dprintk(VIDC_ERR, - "%s: Failed to clean caches: %d\n", - __func__, rc); - return -EINVAL; - } - } else - dprintk(VIDC_DBG, "%s: NULL handle for plane %d\n", - __func__, i); - } - return 0; -} - static bool valid_v4l2_buffer(struct v4l2_buffer *b, struct msm_vidc_inst *inst) { enum vidc_ports port = @@ -896,17 +396,16 @@ static bool valid_v4l2_buffer(struct v4l2_buffer *b, inst->bufq[port].num_planes == b->length; } -int msm_vidc_release_buffer(void *instance, int buffer_type, - unsigned int buffer_index) +int msm_vidc_release_buffer(void *instance, int type, unsigned int index) { + int rc = 0; struct msm_vidc_inst *inst = instance; - struct buffer_info *bi, *dummy; - int i, rc = 0; - int found_buf = 0; - struct vb2_buf_entry *temp, *next; + struct msm_vidc_buffer *mbuf, *dummy; - if (!inst) + if (!inst) { + dprintk(VIDC_ERR, "%s: invalid inst\n", __func__); return -EINVAL; + } if (!inst->in_reconfig && inst->state > MSM_VIDC_LOAD_RESOURCES && @@ -914,63 +413,25 @@ int msm_vidc_release_buffer(void *instance, int buffer_type, rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE); if (rc) { dprintk(VIDC_ERR, - "Failed to move inst: %pK to release res done\n", - inst); + "%s: Failed to move inst: %pK to release res done\n", + __func__, inst); } } mutex_lock(&inst->registeredbufs.lock); - list_for_each_entry_safe(bi, dummy, &inst->registeredbufs.list, list) { - if (bi->type == buffer_type && bi->v4l2_index == buffer_index) { - found_buf = 1; - list_del(&bi->list); - for (i = 0; i < bi->num_planes; i++) { - if (bi->handle[i] && bi->mapped[i]) { - dprintk(VIDC_DBG, - "%s: [UNMAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n", - __func__, bi, i, bi->handle[i], - &bi->device_addr[i], bi->fd[i], - bi->buff_off[i], bi->mapped[i]); - msm_comm_smem_free(inst, - bi->handle[i]); - found_buf = 2; - } - } - kfree(bi); - break; - } - } - mutex_unlock(&inst->registeredbufs.lock); + list_for_each_entry_safe(mbuf, dummy, &inst->registeredbufs.list, + list) { + struct vb2_buffer *vb2 = &mbuf->vvb.vb2_buf; - switch (found_buf) { - case 0: - dprintk(VIDC_DBG, - "%s: No buffer(type: %d) found for index %d\n", - __func__, buffer_type, buffer_index); - break; - case 1: - dprintk(VIDC_WARN, - "%s: Buffer(type: %d) found for index %d.", - __func__, buffer_type, buffer_index); - dprintk(VIDC_WARN, "zero planes mapped.\n"); - break; - case 2: - dprintk(VIDC_DBG, - "%s: Released buffer(type: %d) for index %d\n", - __func__, buffer_type, buffer_index); - break; - default: - break; - } + if (vb2->type != type || vb2->index != index) + continue; - mutex_lock(&inst->pendingq.lock); - list_for_each_entry_safe(temp, next, &inst->pendingq.list, list) { - if (temp->vb->type == buffer_type) { - list_del(&temp->list); - kfree(temp); - } + print_vidc_buffer(VIDC_DBG, "release buf", inst, mbuf); + msm_comm_unmap_vidc_buffer(inst, mbuf); + list_del(&mbuf->list); + kfree(mbuf); } - mutex_unlock(&inst->pendingq.lock); + mutex_unlock(&inst->registeredbufs.lock); return rc; } @@ -979,65 +440,20 @@ EXPORT_SYMBOL(msm_vidc_release_buffer); int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b) { struct msm_vidc_inst *inst = instance; - struct buffer_info *binfo; - int plane = 0; - int rc = 0; - int i; + int rc = 0, i = 0; struct buf_queue *q = NULL; - if (!inst || !inst->core || !b || !valid_v4l2_buffer(b, inst)) - return -EINVAL; - - if (inst->state == MSM_VIDC_CORE_INVALID || - inst->core->state == VIDC_CORE_INVALID) + if (!inst || !inst->core || !b || !valid_v4l2_buffer(b, inst)) { + dprintk(VIDC_ERR, "%s: invalid params, inst %pK\n", + __func__, inst); return -EINVAL; - - rc = map_and_register_buf(inst, b); - if (rc == -EEXIST) { - if (atomic_read(&inst->in_flush) && - is_dynamic_buffer_mode(b, inst)) { - dprintk(VIDC_ERR, - "Flush in progress, do not hold any buffers in driver\n"); - msm_comm_flush_dynamic_buffers(inst); - } - return 0; } - if (rc) - return rc; - for (i = 0; i < b->length; ++i) { - if (EXTRADATA_IDX(b->length) && - (i == EXTRADATA_IDX(b->length)) && - !b->m.planes[i].length) { - b->m.planes[i].m.userptr = 0; - continue; - } - mutex_lock(&inst->registeredbufs.lock); - binfo = get_registered_buf(inst, b, i, &plane); - mutex_unlock(&inst->registeredbufs.lock); - if (!binfo) { - dprintk(VIDC_ERR, - "This buffer is not registered: %d, %d, %d\n", - b->m.planes[i].reserved[0], - b->m.planes[i].reserved[1], - b->m.planes[i].length); - goto err_invalid_buff; - } - b->m.planes[i].m.userptr = binfo->device_addr[i]; - dprintk(VIDC_DBG, "Queueing device address = %pa\n", - &binfo->device_addr[i]); - - if (binfo->handle[i] && - (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)) { - rc = msm_comm_smem_cache_operations(inst, - binfo->handle[i], SMEM_CACHE_CLEAN); - if (rc) { - dprintk(VIDC_ERR, - "Failed to clean caches: %d\n", rc); - goto err_invalid_buff; - } - } + for (i = 0; i < b->length; i++) { + b->m.planes[i].m.fd = b->m.planes[i].reserved[0]; + b->m.planes[i].data_offset = b->m.planes[i].reserved[1]; } + msm_comm_qbuf_cache_operations(inst, b); q = msm_comm_get_vb2q(inst, b->type); if (!q) { @@ -1045,27 +461,28 @@ int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b) "Failed to find buffer queue for type = %d\n", b->type); return -EINVAL; } + mutex_lock(&q->lock); rc = vb2_qbuf(&q->vb2_bufq, b); mutex_unlock(&q->lock); if (rc) dprintk(VIDC_ERR, "Failed to qbuf, %d\n", rc); - return rc; -err_invalid_buff: - return -EINVAL; + return rc; } EXPORT_SYMBOL(msm_vidc_qbuf); int msm_vidc_dqbuf(void *instance, struct v4l2_buffer *b) { struct msm_vidc_inst *inst = instance; - struct buffer_info *buffer_info = NULL; - int i = 0, rc = 0; + int rc = 0, i = 0; struct buf_queue *q = NULL; - if (!inst || !b || !valid_v4l2_buffer(b, inst)) + if (!inst || !b || !valid_v4l2_buffer(b, inst)) { + dprintk(VIDC_ERR, "%s: invalid params, inst %pK\n", + __func__, inst); return -EINVAL; + } q = msm_comm_get_vb2q(inst, b->type); if (!q) { @@ -1073,54 +490,21 @@ int msm_vidc_dqbuf(void *instance, struct v4l2_buffer *b) "Failed to find buffer queue for type = %d\n", b->type); return -EINVAL; } + mutex_lock(&q->lock); rc = vb2_dqbuf(&q->vb2_bufq, b, true); mutex_unlock(&q->lock); - if (rc) { - dprintk(VIDC_DBG, "Failed to dqbuf, %d\n", rc); + if (rc == -EAGAIN) { + return rc; + } else if (rc) { + dprintk(VIDC_ERR, "Failed to dqbuf, %d\n", rc); return rc; } + msm_comm_dqbuf_cache_operations(inst, b); for (i = 0; i < b->length; i++) { - if (EXTRADATA_IDX(b->length) && - i == EXTRADATA_IDX(b->length)) { - continue; - } - buffer_info = device_to_uvaddr(&inst->registeredbufs, - b->m.planes[i].m.userptr); - - if (!buffer_info) { - dprintk(VIDC_ERR, - "%s no buffer info registered for buffer addr: %#lx\n", - __func__, b->m.planes[i].m.userptr); - return -EINVAL; - } - - b->m.planes[i].m.userptr = buffer_info->uvaddr[i]; - b->m.planes[i].reserved[0] = buffer_info->fd[i]; - b->m.planes[i].reserved[1] = buffer_info->buff_off[i]; - } - - if (!buffer_info) { - dprintk(VIDC_ERR, - "%s: error - no buffer info found in registered list\n", - __func__); - return -EINVAL; - } - - rc = output_buffer_cache_invalidate(inst, buffer_info); - if (rc) - return rc; - - - if (is_dynamic_buffer_mode(b, inst)) { - buffer_info->dequeued = true; - - dprintk(VIDC_DBG, "[DEQUEUED]: fd[0] = %d\n", - buffer_info->fd[0]); - mutex_lock(&inst->registeredbufs.lock); - rc = unmap_and_deregister_buf(inst, buffer_info); - mutex_unlock(&inst->registeredbufs.lock); + b->m.planes[i].reserved[0] = b->m.planes[i].m.fd; + b->m.planes[i].reserved[1] = b->m.planes[i].data_offset; } return rc; @@ -1419,7 +803,6 @@ static inline int start_streaming(struct msm_vidc_inst *inst) int rc = 0; struct hfi_device *hdev; struct hal_buffer_size_minimum b; - struct vb2_buf_entry *temp, *next; hdev = inst->core->device; @@ -1536,15 +919,22 @@ static inline int start_streaming(struct msm_vidc_inst *inst) fail_start: if (rc) { - mutex_lock(&inst->pendingq.lock); - list_for_each_entry_safe(temp, next, &inst->pendingq.list, - list) { - vb2_buffer_done(temp->vb, - VB2_BUF_STATE_QUEUED); + struct msm_vidc_buffer *temp, *next; + + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry_safe(temp, next, + &inst->registeredbufs.list, list) { + struct vb2_buffer *vb; + + print_vidc_buffer(VIDC_ERR, "return buf", inst, temp); + vb = msm_comm_get_vb_using_vidc_buffer(inst, temp); + if (vb) + vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED); + msm_comm_unmap_vidc_buffer(inst, temp); list_del(&temp->list); kfree(temp); } - mutex_unlock(&inst->pendingq.lock); + mutex_unlock(&inst->registeredbufs.lock); } return rc; } @@ -1651,12 +1041,29 @@ static void msm_vidc_stop_streaming(struct vb2_queue *q) inst, q->type); } -static void msm_vidc_buf_queue(struct vb2_buffer *vb) +static void msm_vidc_buf_queue(struct vb2_buffer *vb2) { - int rc = msm_comm_qbuf(vb2_get_drv_priv(vb->vb2_queue), vb); + int rc = 0; + struct msm_vidc_inst *inst = NULL; + struct msm_vidc_buffer *mbuf = NULL; + + inst = vb2_get_drv_priv(vb2->vb2_queue); + if (!inst) { + dprintk(VIDC_ERR, "%s: invalid inst\n", __func__); + return; + } + + mbuf = msm_comm_get_vidc_buffer(inst, vb2); + if (IS_ERR_OR_NULL(mbuf)) { + if (PTR_ERR(mbuf) != -EEXIST) + print_vb2_buffer(VIDC_ERR, "failed to get vidc-buf", + inst, vb2); + return; + } + rc = msm_comm_qbuf(inst, mbuf); if (rc) - dprintk(VIDC_ERR, "Failed to queue buffer: %d\n", rc); + print_vidc_buffer(VIDC_ERR, "failed qbuf", inst, mbuf); } static const struct vb2_ops msm_vidc_vb2q_ops = { @@ -2085,7 +1492,6 @@ void *msm_vidc_open(int core_id, int session_type) mutex_init(&inst->bufq[OUTPUT_PORT].lock); mutex_init(&inst->lock); - INIT_MSM_VIDC_LIST(&inst->pendingq); INIT_MSM_VIDC_LIST(&inst->scratchbufs); INIT_MSM_VIDC_LIST(&inst->freqs); INIT_MSM_VIDC_LIST(&inst->persistbufs); @@ -2192,7 +1598,6 @@ void *msm_vidc_open(int core_id, int session_type) mutex_destroy(&inst->bufq[OUTPUT_PORT].lock); mutex_destroy(&inst->lock); - DEINIT_MSM_VIDC_LIST(&inst->pendingq); DEINIT_MSM_VIDC_LIST(&inst->scratchbufs); DEINIT_MSM_VIDC_LIST(&inst->persistbufs); DEINIT_MSM_VIDC_LIST(&inst->pending_getpropq); @@ -2208,55 +1613,43 @@ EXPORT_SYMBOL(msm_vidc_open); static void cleanup_instance(struct msm_vidc_inst *inst) { - struct vb2_buf_entry *entry, *dummy; - - if (inst) { - - mutex_lock(&inst->pendingq.lock); - list_for_each_entry_safe(entry, dummy, &inst->pendingq.list, - list) { - list_del(&entry->list); - kfree(entry); - } - mutex_unlock(&inst->pendingq.lock); + if (!inst) { + dprintk(VIDC_ERR, "%s: invalid params\n", __func__); + return; + } - msm_comm_free_freq_table(inst); + msm_comm_free_freq_table(inst); - if (msm_comm_release_scratch_buffers(inst, false)) { - dprintk(VIDC_ERR, - "Failed to release scratch buffers\n"); - } + if (msm_comm_release_scratch_buffers(inst, false)) + dprintk(VIDC_ERR, + "Failed to release scratch buffers\n"); - if (msm_comm_release_recon_buffers(inst)) { - dprintk(VIDC_ERR, - "Failed to release recon buffers\n"); - } + if (msm_comm_release_recon_buffers(inst)) + dprintk(VIDC_ERR, + "Failed to release recon buffers\n"); - if (msm_comm_release_persist_buffers(inst)) { - dprintk(VIDC_ERR, - "Failed to release persist buffers\n"); - } + if (msm_comm_release_persist_buffers(inst)) + dprintk(VIDC_ERR, + "Failed to release persist buffers\n"); - /* - * At this point all buffes should be with driver - * irrespective of scenario - */ - msm_comm_validate_output_buffers(inst); + /* + * At this point all buffes should be with driver + * irrespective of scenario + */ + msm_comm_validate_output_buffers(inst); - if (msm_comm_release_output_buffers(inst, true)) { - dprintk(VIDC_ERR, - "Failed to release output buffers\n"); - } + if (msm_comm_release_output_buffers(inst, true)) + dprintk(VIDC_ERR, + "Failed to release output buffers\n"); - if (inst->extradata_handle) - msm_comm_smem_free(inst, inst->extradata_handle); + if (inst->extradata_handle) + msm_comm_smem_free(inst, inst->extradata_handle); - debugfs_remove_recursive(inst->debugfs_root); + debugfs_remove_recursive(inst->debugfs_root); - mutex_lock(&inst->pending_getpropq.lock); - WARN_ON(!list_empty(&inst->pending_getpropq.list)); - mutex_unlock(&inst->pending_getpropq.lock); - } + mutex_lock(&inst->pending_getpropq.lock); + WARN_ON(!list_empty(&inst->pending_getpropq.list)); + mutex_unlock(&inst->pending_getpropq.lock); } int msm_vidc_destroy(struct msm_vidc_inst *inst) @@ -2264,8 +1657,10 @@ int msm_vidc_destroy(struct msm_vidc_inst *inst) struct msm_vidc_core *core; int i = 0; - if (!inst || !inst->core) + if (!inst || !inst->core) { + dprintk(VIDC_ERR, "%s: invalid params\n", __func__); return -EINVAL; + } core = inst->core; @@ -2276,7 +1671,6 @@ int msm_vidc_destroy(struct msm_vidc_inst *inst) msm_comm_ctrl_deinit(inst); - DEINIT_MSM_VIDC_LIST(&inst->pendingq); DEINIT_MSM_VIDC_LIST(&inst->scratchbufs); DEINIT_MSM_VIDC_LIST(&inst->persistbufs); DEINIT_MSM_VIDC_LIST(&inst->pending_getpropq); @@ -2300,22 +1694,24 @@ int msm_vidc_destroy(struct msm_vidc_inst *inst) return 0; } -int msm_vidc_close(void *instance) +static void close_helper(struct kref *kref) { - void close_helper(struct kref *kref) - { - struct msm_vidc_inst *inst = container_of(kref, - struct msm_vidc_inst, kref); + struct msm_vidc_inst *inst = container_of(kref, + struct msm_vidc_inst, kref); - msm_vidc_destroy(inst); - } + msm_vidc_destroy(inst); +} +int msm_vidc_close(void *instance) +{ struct msm_vidc_inst *inst = instance; - struct buffer_info *bi, *dummy; + struct msm_vidc_buffer *temp, *dummy; int rc = 0; - if (!inst || !inst->core) + if (!inst || !inst->core) { + dprintk(VIDC_ERR, "%s: invalid params\n", __func__); return -EINVAL; + } /* * Make sure that HW stop working on these buffers that @@ -2327,19 +1723,13 @@ int msm_vidc_close(void *instance) MSM_VIDC_RELEASE_RESOURCES_DONE); mutex_lock(&inst->registeredbufs.lock); - list_for_each_entry_safe(bi, dummy, &inst->registeredbufs.list, list) { - int i = 0; - - list_del(&bi->list); - - for (i = 0; i < min(bi->num_planes, VIDEO_MAX_PLANES); - i++) { - if (bi->handle[i] && bi->mapped[i]) - msm_comm_smem_free(inst, bi->handle[i]); - } - - kfree(bi); - } + list_for_each_entry_safe(temp, dummy, &inst->registeredbufs.list, + list) { + print_vidc_buffer(VIDC_ERR, "undequeud buf", inst, temp); + msm_comm_unmap_vidc_buffer(inst, temp); + list_del(&temp->list); + kfree(temp); + } mutex_unlock(&inst->registeredbufs.lock); cleanup_instance(inst); diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c index 60262a17788d..5e366d0acde0 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c @@ -190,7 +190,7 @@ int msm_comm_vote_bus(struct msm_vidc_core *core) static inline int get_pending_bufs_fw(struct msm_vidc_inst *inst) { - int fw_out_qsize = 0, buffers_in_driver = 0; + int fw_out_qsize = 0; /* * DCVS always operates on Uncompressed buffers. @@ -203,11 +203,9 @@ static inline int get_pending_bufs_fw(struct msm_vidc_inst *inst) fw_out_qsize = inst->count.ftb - inst->count.fbd; else fw_out_qsize = inst->count.etb - inst->count.ebd; - - buffers_in_driver = inst->buffers_held_in_driver; } - return fw_out_qsize + buffers_in_driver; + return fw_out_qsize; } static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst) @@ -266,7 +264,7 @@ static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst) } static void msm_vidc_update_freq_entry(struct msm_vidc_inst *inst, - unsigned long freq, ion_phys_addr_t device_addr) + unsigned long freq, u32 device_addr) { struct vidc_freq_data *temp, *next; bool found = false; @@ -292,7 +290,7 @@ static void msm_vidc_update_freq_entry(struct msm_vidc_inst *inst, // TODO this needs to be removed later and use queued_list void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst, - ion_phys_addr_t device_addr) + u32 device_addr) { struct vidc_freq_data *temp, *next; @@ -515,10 +513,10 @@ int msm_vidc_update_operating_rate(struct msm_vidc_inst *inst) int msm_comm_scale_clocks(struct msm_vidc_inst *inst) { - struct vb2_buf_entry *temp, *next; + struct msm_vidc_buffer *temp, *next; unsigned long freq = 0; u32 filled_len = 0; - ion_phys_addr_t device_addr = 0; + u32 device_addr = 0; if (!inst || !inst->core) { dprintk(VIDC_ERR, "%s Invalid args: Inst = %pK\n", @@ -526,15 +524,17 @@ int msm_comm_scale_clocks(struct msm_vidc_inst *inst) return -EINVAL; } - mutex_lock(&inst->pendingq.lock); - list_for_each_entry_safe(temp, next, &inst->pendingq.list, list) { - if (temp->vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry_safe(temp, next, &inst->registeredbufs.list, list) { + if (temp->vvb.vb2_buf.type == + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && + temp->deferred) { filled_len = max(filled_len, - temp->vb->planes[0].bytesused); - device_addr = temp->vb->planes[0].m.userptr; + temp->vvb.vb2_buf.planes[0].bytesused); + device_addr = temp->smem[0].device_addr; } } - mutex_unlock(&inst->pendingq.lock); + mutex_unlock(&inst->registeredbufs.lock); if (!filled_len || !device_addr) { dprintk(VIDC_PROF, "No Change in frequency\n"); diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h index db57647f4a49..e1226e4ceb6a 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h @@ -42,7 +42,7 @@ void msm_comm_free_freq_table(struct msm_vidc_inst *inst); int msm_vidc_decide_work_mode(struct msm_vidc_inst *inst); int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst); void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst, - ion_phys_addr_t device_addr); + u32 device_addr); void update_recon_stats(struct msm_vidc_inst *inst, struct recon_stats_type *recon_stats); #endif diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c index 12b7cc48cecb..ee2a0e117058 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c @@ -1068,9 +1068,9 @@ static void handle_session_release_buf_done(enum hal_command_response cmd, mutex_lock(&inst->scratchbufs.lock); list_for_each_safe(ptr, next, &inst->scratchbufs.list) { buf = list_entry(ptr, struct internal_buf, list); - if (address == (u32)buf->handle->device_addr) { - dprintk(VIDC_DBG, "releasing scratch: %pa\n", - &buf->handle->device_addr); + if (address == buf->smem.device_addr) { + dprintk(VIDC_DBG, "releasing scratch: %x\n", + buf->smem.device_addr); buf_found = true; } } @@ -1079,9 +1079,9 @@ static void handle_session_release_buf_done(enum hal_command_response cmd, mutex_lock(&inst->persistbufs.lock); list_for_each_safe(ptr, next, &inst->persistbufs.list) { buf = list_entry(ptr, struct internal_buf, list); - if (address == (u32)buf->handle->device_addr) { - dprintk(VIDC_DBG, "releasing persist: %pa\n", - &buf->handle->device_addr); + if (address == buf->smem.device_addr) { + dprintk(VIDC_DBG, "releasing persist: %x\n", + buf->smem.device_addr); buf_found = true; } } @@ -1447,6 +1447,20 @@ static void handle_session_init_done(enum hal_command_response cmd, void *data) put_inst(inst); } +static void msm_vidc_queue_rbr_event(struct msm_vidc_inst *inst, + int fd, u32 offset) +{ + struct v4l2_event buf_event = {0}; + u32 *ptr; + + buf_event.type = V4L2_EVENT_RELEASE_BUFFER_REFERENCE; + ptr = (u32 *)buf_event.u.data; + ptr[0] = fd; + ptr[1] = offset; + + v4l2_event_queue_fh(&inst->event_handler, &buf_event); +} + static void handle_event_change(enum hal_command_response cmd, void *data) { struct msm_vidc_inst *inst = NULL; @@ -1479,65 +1493,17 @@ static void handle_event_change(enum hal_command_response cmd, void *data) break; case HAL_EVENT_RELEASE_BUFFER_REFERENCE: { - struct v4l2_event buf_event = {0}; - struct buffer_info *binfo = NULL, *temp = NULL; - u32 *ptr = NULL; - - dprintk(VIDC_DBG, "%s - inst: %pK buffer: %pa extra: %pa\n", - __func__, inst, &event_notify->packet_buffer, - &event_notify->extra_data_buffer); - - if (inst->state == MSM_VIDC_CORE_INVALID || - inst->core->state == VIDC_CORE_INVALID) { - dprintk(VIDC_DBG, - "Event release buf ref received in invalid state - discard\n"); - goto err_bad_event; - } - - /* - * Get the buffer_info entry for the - * device address. - */ - binfo = device_to_uvaddr(&inst->registeredbufs, - event_notify->packet_buffer); - if (!binfo) { - dprintk(VIDC_ERR, - "%s buffer not found in registered list\n", - __func__); - goto err_bad_event; - } - - /* Fill event data to be sent to client*/ - buf_event.type = V4L2_EVENT_RELEASE_BUFFER_REFERENCE; - ptr = (u32 *)buf_event.u.data; - ptr[0] = binfo->fd[0]; - ptr[1] = binfo->buff_off[0]; + u32 planes[VIDEO_MAX_PLANES] = {0}; dprintk(VIDC_DBG, - "RELEASE REFERENCE EVENT FROM F/W - fd = %d offset = %d\n", - ptr[0], ptr[1]); - - /* Decrement buffer reference count*/ - mutex_lock(&inst->registeredbufs.lock); - list_for_each_entry(temp, &inst->registeredbufs.list, - list) { - if (temp == binfo) { - buf_ref_put(inst, binfo); - break; - } - } + "%s: inst: %pK data_buffer: %x extradata_buffer: %x\n", + __func__, inst, event_notify->packet_buffer, + event_notify->extra_data_buffer); - /* - * Release buffer and remove from list - * if reference goes to zero. - */ - if (unmap_and_deregister_buf(inst, binfo)) - dprintk(VIDC_ERR, - "%s: buffer unmap failed\n", __func__); - mutex_unlock(&inst->registeredbufs.lock); + planes[0] = event_notify->packet_buffer; + planes[1] = event_notify->extra_data_buffer; + handle_release_buffer_reference(inst, planes); - /*send event to client*/ - v4l2_event_queue_fh(&inst->event_handler, &buf_event); goto err_bad_event; } default: @@ -1782,8 +1748,8 @@ void msm_comm_validate_output_buffers(struct msm_vidc_inst *inst) list_for_each_entry(binfo, &inst->outputbufs.list, list) { if (binfo->buffer_ownership != DRIVER) { dprintk(VIDC_DBG, - "This buffer is with FW %pa\n", - &binfo->handle->device_addr); + "This buffer is with FW %x\n", + binfo->smem.device_addr); continue; } buffers_owned_by_driver++; @@ -1803,7 +1769,6 @@ int msm_comm_queue_output_buffers(struct msm_vidc_inst *inst) { struct internal_buf *binfo; struct hfi_device *hdev; - struct msm_smem *handle; struct vidc_frame_data frame_data = {0}; struct hal_buffer_requirements *output_buf, *extra_buf; int rc = 0; @@ -1833,13 +1798,12 @@ int msm_comm_queue_output_buffers(struct msm_vidc_inst *inst) list_for_each_entry(binfo, &inst->outputbufs.list, list) { if (binfo->buffer_ownership != DRIVER) continue; - handle = binfo->handle; frame_data.alloc_len = output_buf->buffer_size; frame_data.filled_len = 0; frame_data.offset = 0; - frame_data.device_addr = handle->device_addr; + frame_data.device_addr = binfo->smem.device_addr; frame_data.flags = 0; - frame_data.extradata_addr = handle->device_addr + + frame_data.extradata_addr = binfo->smem.device_addr + output_buf->buffer_size; frame_data.buffer_type = HAL_BUFFER_OUTPUT; frame_data.extradata_size = extra_buf ? @@ -1890,7 +1854,7 @@ static void handle_session_flush(enum hal_command_response cmd, void *data) } } } - atomic_dec(&inst->in_flush); + inst->in_flush = false; flush_event.type = V4L2_EVENT_MSM_VIDC_FLUSH_DONE; ptr = (u32 *)flush_event.u.data; @@ -2113,82 +2077,84 @@ static void handle_session_close(enum hal_command_response cmd, void *data) put_inst(inst); } -static struct vb2_buffer *get_vb_from_device_addr(struct buf_queue *bufq, - unsigned long dev_addr) +struct vb2_buffer *msm_comm_get_vb_using_vidc_buffer( + struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf) { + u32 port = 0; struct vb2_buffer *vb = NULL; struct vb2_queue *q = NULL; - int found = 0; + bool found = false; - if (!bufq) { - dprintk(VIDC_ERR, "Invalid parameter\n"); + if (mbuf->vvb.vb2_buf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + port = CAPTURE_PORT; + } else if (mbuf->vvb.vb2_buf.type == + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + port = OUTPUT_PORT; + } else { + dprintk(VIDC_ERR, "%s: invalid type %d\n", + __func__, mbuf->vvb.vb2_buf.type); return NULL; } - q = &bufq->vb2_bufq; - mutex_lock(&bufq->lock); + + q = &inst->bufq[port].vb2_bufq; + mutex_lock(&inst->bufq[port].lock); + found = false; list_for_each_entry(vb, &q->queued_list, queued_entry) { - if (vb->planes[0].m.userptr == dev_addr && - vb->state == VB2_BUF_STATE_ACTIVE) { - found = 1; - dprintk(VIDC_DBG, "Found v4l2_buf index : %d\n", - vb->index); + if (msm_comm_compare_vb2_planes(inst, mbuf, vb)) { + found = true; break; } } - mutex_unlock(&bufq->lock); + mutex_unlock(&inst->bufq[port].lock); if (!found) { - dprintk(VIDC_DBG, - "Failed to find buffer in queued list: %#lx, qtype = %d\n", - dev_addr, q->type); - vb = NULL; + print_vidc_buffer(VIDC_ERR, "vb2 not found for", inst, mbuf); + return NULL; } + return vb; } -static void handle_dynamic_buffer(struct msm_vidc_inst *inst, - ion_phys_addr_t device_addr, u32 flags) +int msm_comm_vb2_buffer_done(struct msm_vidc_inst *inst, + struct vb2_buffer *vb) { - struct buffer_info *binfo = NULL, *temp = NULL; + u32 port; - /* - * Update reference count and release OR queue back the buffer, - * only when firmware is not holding a reference. - */ - binfo = device_to_uvaddr(&inst->registeredbufs, device_addr); - if (!binfo) { - dprintk(VIDC_ERR, - "%s buffer not found in registered list\n", - __func__); - return; + if (!inst || !vb) { + dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n", + __func__, inst, vb); + return -EINVAL; } - if (flags & HAL_BUFFERFLAG_READONLY) { - dprintk(VIDC_DBG, - "FBD fd[0] = %d -> Reference with f/w, addr: %pa\n", - binfo->fd[0], &device_addr); + + if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + port = CAPTURE_PORT; + } else if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + port = OUTPUT_PORT; } else { - dprintk(VIDC_DBG, - "FBD fd[0] = %d -> FBD_ref_released, addr: %pa\n", - binfo->fd[0], &device_addr); - - mutex_lock(&inst->registeredbufs.lock); - list_for_each_entry(temp, &inst->registeredbufs.list, - list) { - if (temp == binfo) { - buf_ref_put(inst, binfo); - break; - } - } - mutex_unlock(&inst->registeredbufs.lock); + dprintk(VIDC_ERR, "%s: invalid type %d\n", + __func__, vb->type); + return -EINVAL; } + msm_vidc_debugfs_update(inst, port == CAPTURE_PORT ? + MSM_VIDC_DEBUGFS_EVENT_FBD : + MSM_VIDC_DEBUGFS_EVENT_EBD); + + mutex_lock(&inst->bufq[port].lock); + vb2_buffer_done(vb, VB2_BUF_STATE_DONE); + mutex_unlock(&inst->bufq[port].lock); + + return 0; } static void handle_ebd(enum hal_command_response cmd, void *data) { struct msm_vidc_cb_data_done *response = data; + struct msm_vidc_buffer *mbuf; struct vb2_buffer *vb; struct msm_vidc_inst *inst; struct vidc_hal_ebd *empty_buf_done; - struct vb2_v4l2_buffer *vbuf = NULL; + struct vb2_v4l2_buffer *vbuf; + u32 planes[VIDEO_MAX_PLANES] = {0}; + u32 extra_idx = 0, i; if (!response) { dprintk(VIDC_ERR, "Invalid response from vidc_hal\n"); @@ -2201,140 +2167,79 @@ static void handle_ebd(enum hal_command_response cmd, void *data) dprintk(VIDC_WARN, "Got a response for an inactive session\n"); return; } - if (inst->buffer_mode_set[OUTPUT_PORT] == HAL_BUFFER_MODE_DYNAMIC) - handle_dynamic_buffer(inst, - response->input_done.packet_buffer, 0); - - vb = get_vb_from_device_addr(&inst->bufq[OUTPUT_PORT], - response->input_done.packet_buffer); - if (vb) { - vbuf = to_vb2_v4l2_buffer(vb); - vb->planes[0].bytesused = response->input_done.filled_len; - vb->planes[0].data_offset = response->input_done.offset; - if (vb->planes[0].data_offset > vb->planes[0].length) - dprintk(VIDC_INFO, "data_offset overflow length\n"); - if (vb->planes[0].bytesused > vb->planes[0].length) - dprintk(VIDC_INFO, "bytesused overflow length\n"); - if (vb->planes[0].m.userptr != - response->clnt_data) - dprintk(VIDC_INFO, "Client data != bufaddr\n"); - empty_buf_done = (struct vidc_hal_ebd *)&response->input_done; - if (empty_buf_done) { - if (empty_buf_done->status == VIDC_ERR_NOT_SUPPORTED) { - dprintk(VIDC_INFO, - "Failed : Unsupported input stream\n"); - vbuf->flags |= - V4L2_QCOM_BUF_INPUT_UNSUPPORTED; - } - if (empty_buf_done->status == VIDC_ERR_BITSTREAM_ERR) { - dprintk(VIDC_INFO, - "Failed : Corrupted input stream\n"); - vbuf->flags |= - V4L2_QCOM_BUF_DATA_CORRUPT; - } - if (empty_buf_done->flags & HAL_BUFFERFLAG_SYNCFRAME) - vbuf->flags |= - V4L2_QCOM_BUF_FLAG_IDRFRAME | - V4L2_BUF_FLAG_KEYFRAME; - } - update_recon_stats(inst, &empty_buf_done->recon_stats); + empty_buf_done = (struct vidc_hal_ebd *)&response->input_done; + planes[0] = empty_buf_done->packet_buffer; + planes[1] = empty_buf_done->extra_data_buffer; - dprintk(VIDC_DBG, - "Got ebd from hal: device_addr: %pa, alloc: %d, status: %#x, pic_type: %#x, flags: %#x\n", - &empty_buf_done->packet_buffer, - empty_buf_done->alloc_len, empty_buf_done->status, - empty_buf_done->picture_type, empty_buf_done->flags); - - msm_vidc_clear_freq_entry(inst, empty_buf_done->packet_buffer); - - mutex_lock(&inst->bufq[OUTPUT_PORT].lock); - vb2_buffer_done(vb, VB2_BUF_STATE_DONE); - mutex_unlock(&inst->bufq[OUTPUT_PORT].lock); - msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_EBD); + mbuf = msm_comm_get_buffer_using_device_planes(inst, planes); + if (!mbuf) { + dprintk(VIDC_ERR, + "%s: data_addr %x, extradata_addr %x not found\n", + __func__, planes[0], planes[1]); + goto exit; } + vb = &mbuf->vvb.vb2_buf; - put_inst(inst); -} - -int buf_ref_get(struct msm_vidc_inst *inst, struct buffer_info *binfo) -{ - int cnt = 0; - - if (!inst || !binfo) - return -EINVAL; - - atomic_inc(&binfo->ref_count); - cnt = atomic_read(&binfo->ref_count); - if (cnt >= 2) - inst->buffers_held_in_driver++; + vb->planes[0].bytesused = response->input_done.filled_len; + if (vb->planes[0].bytesused > vb->planes[0].length) + dprintk(VIDC_INFO, "bytesused overflow length\n"); - dprintk(VIDC_DBG, "REF_GET[%d] fd[0] = %d\n", cnt, binfo->fd[0]); - - return cnt; -} + if (empty_buf_done->status == VIDC_ERR_NOT_SUPPORTED) { + dprintk(VIDC_INFO, "Failed : Unsupported input stream\n"); + mbuf->vvb.flags |= V4L2_QCOM_BUF_INPUT_UNSUPPORTED; + } + if (empty_buf_done->status == VIDC_ERR_BITSTREAM_ERR) { + dprintk(VIDC_INFO, "Failed : Corrupted input stream\n"); + mbuf->vvb.flags |= V4L2_QCOM_BUF_DATA_CORRUPT; + } + if (empty_buf_done->flags & HAL_BUFFERFLAG_SYNCFRAME) + mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_IDRFRAME | + V4L2_BUF_FLAG_KEYFRAME; -int buf_ref_put(struct msm_vidc_inst *inst, struct buffer_info *binfo) -{ - int rc = 0; - int cnt; - bool release_buf = false; - bool qbuf_again = false; + extra_idx = EXTRADATA_IDX(inst->bufq[OUTPUT_PORT].num_planes); + if (extra_idx && extra_idx < VIDEO_MAX_PLANES) + vb->planes[extra_idx].bytesused = vb->planes[extra_idx].length; - if (!inst || !binfo) - return -EINVAL; + update_recon_stats(inst, &empty_buf_done->recon_stats); + msm_vidc_clear_freq_entry(inst, mbuf->smem[0].device_addr); - atomic_dec(&binfo->ref_count); - cnt = atomic_read(&binfo->ref_count); - dprintk(VIDC_DBG, "REF_PUT[%d] fd[0] = %d\n", cnt, binfo->fd[0]); - if (!cnt) - release_buf = true; - else if (cnt >= 1) - qbuf_again = true; - else { - dprintk(VIDC_DBG, "%s: invalid ref_cnt: %d\n", __func__, cnt); - cnt = -EINVAL; + vb = msm_comm_get_vb_using_vidc_buffer(inst, mbuf); + if (vb) { + vbuf = to_vb2_v4l2_buffer(vb); + vbuf->flags |= mbuf->vvb.flags; + for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) + vb->planes[i].bytesused = + mbuf->vvb.vb2_buf.planes[i].bytesused; } + /* + * put_buffer should be done before vb2_buffer_done else + * client might queue the same buffer before it is unmapped + * in put_buffer. also don't use mbuf after put_buffer + * as it may be freed in put_buffer. + */ + msm_comm_put_vidc_buffer(inst, mbuf); + msm_comm_vb2_buffer_done(inst, vb); - if (cnt < 0) - return cnt; - - if (release_buf) { - /* - * We can not delete binfo here as we need to set the user - * virtual address saved in binfo->uvaddr to the dequeued v4l2 - * buffer. - * - * We will set the pending_deletion flag to true here and delete - * binfo from registered list in dqbuf after setting the uvaddr. - */ - dprintk(VIDC_DBG, "fd[0] = %d -> pending_deletion = true\n", - binfo->fd[0]); - binfo->pending_deletion = true; - } else if (qbuf_again) { - inst->buffers_held_in_driver--; - rc = qbuf_dynamic_buf(inst, binfo); - if (!rc) - return rc; - } - return cnt; +exit: + put_inst(inst); } static int handle_multi_stream_buffers(struct msm_vidc_inst *inst, - ion_phys_addr_t dev_addr) + u32 dev_addr) { struct internal_buf *binfo; - struct msm_smem *handle; + struct msm_smem *smem; bool found = false; mutex_lock(&inst->outputbufs.lock); list_for_each_entry(binfo, &inst->outputbufs.list, list) { - handle = binfo->handle; - if (handle && dev_addr == handle->device_addr) { + smem = &binfo->smem; + if (smem && dev_addr == smem->device_addr) { if (binfo->buffer_ownership == DRIVER) { dprintk(VIDC_ERR, - "FW returned same buffer: %pa\n", - &dev_addr); + "FW returned same buffer: %x\n", + dev_addr); break; } binfo->buffer_ownership = DRIVER; @@ -2346,8 +2251,8 @@ static int handle_multi_stream_buffers(struct msm_vidc_inst *inst, if (!found) { dprintk(VIDC_ERR, - "Failed to find output buffer in queued list: %pa\n", - &dev_addr); + "Failed to find output buffer in queued list: %x\n", + dev_addr); } return 0; @@ -2365,13 +2270,15 @@ enum hal_buffer msm_comm_get_hal_output_buffer(struct msm_vidc_inst *inst) static void handle_fbd(enum hal_command_response cmd, void *data) { struct msm_vidc_cb_data_done *response = data; + struct msm_vidc_buffer *mbuf; struct msm_vidc_inst *inst; struct vb2_buffer *vb = NULL; struct vidc_hal_fbd *fill_buf_done; + struct vb2_v4l2_buffer *vbuf; enum hal_buffer buffer_type; - int extra_idx = 0; u64 time_usec = 0; - struct vb2_v4l2_buffer *vbuf = NULL; + u32 planes[VIDEO_MAX_PLANES] = {0}; + u32 extra_idx, i; if (!response) { dprintk(VIDC_ERR, "Invalid response from vidc_hal\n"); @@ -2386,132 +2293,117 @@ static void handle_fbd(enum hal_command_response cmd, void *data) } fill_buf_done = (struct vidc_hal_fbd *)&response->output_done; + planes[0] = fill_buf_done->packet_buffer1; + planes[1] = fill_buf_done->extra_data_buffer; + buffer_type = msm_comm_get_hal_output_buffer(inst); if (fill_buf_done->buffer_type == buffer_type) { - vb = get_vb_from_device_addr(&inst->bufq[CAPTURE_PORT], - fill_buf_done->packet_buffer1); + mbuf = msm_comm_get_buffer_using_device_planes(inst, planes); + if (!mbuf) { + dprintk(VIDC_ERR, + "%s: data_addr %x, extradata_addr %x not found\n", + __func__, planes[0], planes[1]); + goto exit; + } } else { if (handle_multi_stream_buffers(inst, fill_buf_done->packet_buffer1)) dprintk(VIDC_ERR, "Failed : Output buffer not found %pa\n", &fill_buf_done->packet_buffer1); - goto err_handle_fbd; + goto exit; } + vb = &mbuf->vvb.vb2_buf; - if (vb) { - vbuf = to_vb2_v4l2_buffer(vb); - if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DROP_FRAME || - fill_buf_done->flags1 & HAL_BUFFERFLAG_DECODEONLY) - fill_buf_done->filled_len1 = 0; - vb->planes[0].bytesused = fill_buf_done->filled_len1; - vb->planes[0].data_offset = fill_buf_done->offset1; - if (vb->planes[0].data_offset > vb->planes[0].length) - dprintk(VIDC_INFO, - "fbd:Overflow data_offset = %d; length = %d\n", - vb->planes[0].data_offset, - vb->planes[0].length); - if (vb->planes[0].bytesused > vb->planes[0].length) - dprintk(VIDC_INFO, - "fbd:Overflow bytesused = %d; length = %d\n", - vb->planes[0].bytesused, - vb->planes[0].length); - if (!(fill_buf_done->flags1 & - HAL_BUFFERFLAG_TIMESTAMPINVALID)) { - time_usec = fill_buf_done->timestamp_hi; - time_usec = (time_usec << 32) | - fill_buf_done->timestamp_lo; - } else { - time_usec = 0; - dprintk(VIDC_DBG, - "Set zero timestamp for buffer %pa, filled: %d, (hi:%u, lo:%u)\n", - &fill_buf_done->packet_buffer1, - fill_buf_done->filled_len1, - fill_buf_done->timestamp_hi, - fill_buf_done->timestamp_lo); - } - vbuf->flags = 0; - vb->timestamp = (time_usec * NSEC_PER_USEC); - - extra_idx = - EXTRADATA_IDX(inst->bufq[CAPTURE_PORT].num_planes); - if (extra_idx && extra_idx < VIDEO_MAX_PLANES) { - vb->planes[extra_idx].m.userptr = - (unsigned long)fill_buf_done->extra_data_buffer; - vb->planes[extra_idx].bytesused = - vb->planes[extra_idx].length; - vb->planes[extra_idx].data_offset = 0; - } - - if (inst->buffer_mode_set[CAPTURE_PORT] == - HAL_BUFFER_MODE_DYNAMIC) - handle_dynamic_buffer(inst, fill_buf_done->packet_buffer1, - fill_buf_done->flags1); - if (fill_buf_done->flags1 & HAL_BUFFERFLAG_READONLY) - vbuf->flags |= V4L2_QCOM_BUF_FLAG_READONLY; - if (fill_buf_done->flags1 & HAL_BUFFERFLAG_EOS) - vbuf->flags |= V4L2_QCOM_BUF_FLAG_EOS; - if (fill_buf_done->flags1 & HAL_BUFFERFLAG_CODECCONFIG) - vbuf->flags |= V4L2_QCOM_BUF_FLAG_CODECCONFIG; - if (fill_buf_done->flags1 & HAL_BUFFERFLAG_SYNCFRAME) - vbuf->flags |= V4L2_QCOM_BUF_FLAG_IDRFRAME; - if (fill_buf_done->flags1 & HAL_BUFFERFLAG_EOSEQ) - vbuf->flags |= V4L2_QCOM_BUF_FLAG_EOSEQ; - if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DECODEONLY || - fill_buf_done->flags1 & HAL_BUFFERFLAG_DROP_FRAME) - vbuf->flags |= V4L2_QCOM_BUF_FLAG_DECODEONLY; - if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DATACORRUPT) - vbuf->flags |= V4L2_QCOM_BUF_DATA_CORRUPT; - - switch (fill_buf_done->picture_type) { - case HAL_PICTURE_IDR: - vbuf->flags |= V4L2_QCOM_BUF_FLAG_IDRFRAME; - vbuf->flags |= V4L2_BUF_FLAG_KEYFRAME; - break; - case HAL_PICTURE_I: - vbuf->flags |= V4L2_BUF_FLAG_KEYFRAME; - break; - case HAL_PICTURE_P: - vbuf->flags |= V4L2_BUF_FLAG_PFRAME; - break; - case HAL_PICTURE_B: - vbuf->flags |= V4L2_BUF_FLAG_BFRAME; - break; - case HAL_FRAME_NOTCODED: - case HAL_UNUSED_PICT: - /* Do we need to care about these? */ - case HAL_FRAME_YUV: - break; - default: - break; - } - - inst->count.fbd++; - - if (extra_idx && extra_idx < VIDEO_MAX_PLANES) { - dprintk(VIDC_DBG, - "extradata: userptr = %pK;" - " bytesused = %d; length = %d\n", - (u8 *)vb->planes[extra_idx].m.userptr, - vb->planes[extra_idx].bytesused, - vb->planes[extra_idx].length); - } + if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DROP_FRAME || + fill_buf_done->flags1 & HAL_BUFFERFLAG_DECODEONLY) + fill_buf_done->filled_len1 = 0; + vb->planes[0].bytesused = fill_buf_done->filled_len1; + if (vb->planes[0].bytesused > vb->planes[0].length) + dprintk(VIDC_INFO, + "fbd:Overflow bytesused = %d; length = %d\n", + vb->planes[0].bytesused, + vb->planes[0].length); + if (vb->planes[0].data_offset != fill_buf_done->offset1) + dprintk(VIDC_ERR, "%s: data_offset %d vs %d\n", + __func__, vb->planes[0].data_offset, + fill_buf_done->offset1); + if (!(fill_buf_done->flags1 & HAL_BUFFERFLAG_TIMESTAMPINVALID)) { + time_usec = fill_buf_done->timestamp_hi; + time_usec = (time_usec << 32) | fill_buf_done->timestamp_lo; + } else { + time_usec = 0; dprintk(VIDC_DBG, - "Got fbd from hal: device_addr: %pa, alloc: %d, filled: %d, offset: %d, ts: %lld, flags: %#x, crop: %d %d %d %d, pic_type: %#x, mark_data: %#x\n", - &fill_buf_done->packet_buffer1, fill_buf_done->alloc_len1, - fill_buf_done->filled_len1, fill_buf_done->offset1, time_usec, - fill_buf_done->flags1, fill_buf_done->start_x_coord, - fill_buf_done->start_y_coord, fill_buf_done->frame_width, - fill_buf_done->frame_height, fill_buf_done->picture_type, - fill_buf_done->mark_data); + "Set zero timestamp for buffer %pa, filled: %d, (hi:%u, lo:%u)\n", + &fill_buf_done->packet_buffer1, + fill_buf_done->filled_len1, + fill_buf_done->timestamp_hi, + fill_buf_done->timestamp_lo); + } + vb->timestamp = (time_usec * NSEC_PER_USEC); + + extra_idx = EXTRADATA_IDX(inst->bufq[CAPTURE_PORT].num_planes); + if (extra_idx && extra_idx < VIDEO_MAX_PLANES) + vb->planes[extra_idx].bytesused = vb->planes[extra_idx].length; + + mbuf->vvb.flags = 0; + if (fill_buf_done->flags1 & HAL_BUFFERFLAG_READONLY) + mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_READONLY; + if (fill_buf_done->flags1 & HAL_BUFFERFLAG_EOS) + mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_EOS; + if (fill_buf_done->flags1 & HAL_BUFFERFLAG_CODECCONFIG) + mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_CODECCONFIG; + if (fill_buf_done->flags1 & HAL_BUFFERFLAG_SYNCFRAME) + mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_IDRFRAME; + if (fill_buf_done->flags1 & HAL_BUFFERFLAG_EOSEQ) + mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_EOSEQ; + if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DECODEONLY || + fill_buf_done->flags1 & HAL_BUFFERFLAG_DROP_FRAME) + mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_DECODEONLY; + if (fill_buf_done->flags1 & HAL_BUFFERFLAG_DATACORRUPT) + mbuf->vvb.flags |= V4L2_QCOM_BUF_DATA_CORRUPT; + switch (fill_buf_done->picture_type) { + case HAL_PICTURE_IDR: + mbuf->vvb.flags |= V4L2_QCOM_BUF_FLAG_IDRFRAME; + mbuf->vvb.flags |= V4L2_BUF_FLAG_KEYFRAME; + break; + case HAL_PICTURE_I: + mbuf->vvb.flags |= V4L2_BUF_FLAG_KEYFRAME; + break; + case HAL_PICTURE_P: + mbuf->vvb.flags |= V4L2_BUF_FLAG_PFRAME; + break; + case HAL_PICTURE_B: + mbuf->vvb.flags |= V4L2_BUF_FLAG_BFRAME; + break; + case HAL_FRAME_NOTCODED: + case HAL_UNUSED_PICT: + /* Do we need to care about these? */ + case HAL_FRAME_YUV: + break; + default: + break; + } - mutex_lock(&inst->bufq[CAPTURE_PORT].lock); - vb2_buffer_done(vb, VB2_BUF_STATE_DONE); - mutex_unlock(&inst->bufq[CAPTURE_PORT].lock); - msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_FBD); + vb = msm_comm_get_vb_using_vidc_buffer(inst, mbuf); + if (vb) { + vbuf = to_vb2_v4l2_buffer(vb); + vbuf->flags = mbuf->vvb.flags; + vb->timestamp = mbuf->vvb.vb2_buf.timestamp; + for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) + vb->planes[i].bytesused = + mbuf->vvb.vb2_buf.planes[i].bytesused; } + /* + * put_buffer should be done before vb2_buffer_done else + * client might queue the same buffer before it is unmapped + * in put_buffer. also don't use mbuf after put_buffer + * as it may be freed in put_buffer. + */ + msm_comm_put_vidc_buffer(inst, mbuf); + msm_comm_vb2_buffer_done(inst, vb); -err_handle_fbd: +exit: put_inst(inst); } @@ -3247,7 +3139,6 @@ static int set_output_buffers(struct msm_vidc_inst *inst, enum hal_buffer buffer_type) { int rc = 0; - struct msm_smem *handle; struct internal_buf *binfo; u32 smem_flags = 0, buffer_size; struct hal_buffer_requirements *output_buf, *extradata_buf; @@ -3295,33 +3186,30 @@ static int set_output_buffers(struct msm_vidc_inst *inst, if (output_buf->buffer_size) { for (i = 0; i < output_buf->buffer_count_actual; i++) { - handle = msm_comm_smem_alloc(inst, + binfo = kzalloc(sizeof(*binfo), GFP_KERNEL); + if (!binfo) { + dprintk(VIDC_ERR, "Out of memory\n"); + rc = -ENOMEM; + goto fail_kzalloc; + } + rc = msm_comm_smem_alloc(inst, buffer_size, 1, smem_flags, - buffer_type, 0); - if (!handle) { + buffer_type, 0, &binfo->smem); + if (rc) { dprintk(VIDC_ERR, "Failed to allocate output memory\n"); - rc = -ENOMEM; goto err_no_mem; } rc = msm_comm_smem_cache_operations(inst, - handle, SMEM_CACHE_CLEAN); + &binfo->smem, SMEM_CACHE_CLEAN); if (rc) { dprintk(VIDC_WARN, "Failed to clean cache may cause undefined behavior\n"); } - binfo = kzalloc(sizeof(*binfo), GFP_KERNEL); - if (!binfo) { - dprintk(VIDC_ERR, "Out of memory\n"); - rc = -ENOMEM; - goto fail_kzalloc; - } - - binfo->handle = handle; binfo->buffer_type = buffer_type; binfo->buffer_ownership = DRIVER; - dprintk(VIDC_DBG, "Output buffer address: %pa\n", - &handle->device_addr); + dprintk(VIDC_DBG, "Output buffer address: %#x\n", + binfo->smem.device_addr); if (inst->buffer_mode_set[CAPTURE_PORT] == HAL_BUFFER_MODE_STATIC) { @@ -3332,9 +3220,9 @@ static int set_output_buffers(struct msm_vidc_inst *inst, buffer_info.buffer_type = buffer_type; buffer_info.num_buffers = 1; buffer_info.align_device_addr = - handle->device_addr; + binfo->smem.device_addr; buffer_info.extradata_addr = - handle->device_addr + + binfo->smem.device_addr + output_buf->buffer_size; if (extradata_buf) buffer_info.extradata_size = @@ -3357,7 +3245,7 @@ static int set_output_buffers(struct msm_vidc_inst *inst, fail_set_buffers: kfree(binfo); fail_kzalloc: - msm_comm_smem_free(inst, handle); + msm_comm_smem_free(inst, &binfo->smem); err_no_mem: return rc; } @@ -3407,10 +3295,10 @@ static int set_internal_buf_on_fw(struct msm_vidc_inst *inst, buffer_info.buffer_type = buffer_type; buffer_info.num_buffers = 1; buffer_info.align_device_addr = handle->device_addr; - dprintk(VIDC_DBG, "%s %s buffer : %pa\n", + dprintk(VIDC_DBG, "%s %s buffer : %x\n", reuse ? "Reusing" : "Allocated", get_buffer_name(buffer_type), - &buffer_info.align_device_addr); + buffer_info.align_device_addr); rc = call_hfi_op(hdev, session_set_buffers, (void *) inst->session, &buffer_info); @@ -3436,11 +3324,6 @@ static bool reuse_internal_buffers(struct msm_vidc_inst *inst, mutex_lock(&buf_list->lock); list_for_each_entry(buf, &buf_list->list, list) { - if (!buf->handle) { - reused = false; - break; - } - if (buf->buffer_type != buffer_type) continue; @@ -3456,7 +3339,7 @@ static bool reuse_internal_buffers(struct msm_vidc_inst *inst, && buffer_type != HAL_BUFFER_INTERNAL_PERSIST_1) { rc = set_internal_buf_on_fw(inst, buffer_type, - buf->handle, true); + &buf->smem, true); if (rc) { dprintk(VIDC_ERR, "%s: session_set_buffers failed\n", @@ -3477,7 +3360,6 @@ static int allocate_and_set_internal_bufs(struct msm_vidc_inst *inst, struct hal_buffer_requirements *internal_bufreq, struct msm_vidc_list *buf_list) { - struct msm_smem *handle; struct internal_buf *binfo; u32 smem_flags = 0; int rc = 0; @@ -3493,27 +3375,25 @@ static int allocate_and_set_internal_bufs(struct msm_vidc_inst *inst, smem_flags |= SMEM_SECURE; for (i = 0; i < internal_bufreq->buffer_count_actual; i++) { - handle = msm_comm_smem_alloc(inst, internal_bufreq->buffer_size, - 1, smem_flags, internal_bufreq->buffer_type, 0); - if (!handle) { - dprintk(VIDC_ERR, - "Failed to allocate scratch memory\n"); - rc = -ENOMEM; - goto err_no_mem; - } - binfo = kzalloc(sizeof(*binfo), GFP_KERNEL); if (!binfo) { dprintk(VIDC_ERR, "Out of memory\n"); rc = -ENOMEM; goto fail_kzalloc; } + rc = msm_comm_smem_alloc(inst, internal_bufreq->buffer_size, + 1, smem_flags, internal_bufreq->buffer_type, + 0, &binfo->smem); + if (rc) { + dprintk(VIDC_ERR, + "Failed to allocate scratch memory\n"); + goto err_no_mem; + } - binfo->handle = handle; binfo->buffer_type = internal_bufreq->buffer_type; rc = set_internal_buf_on_fw(inst, internal_bufreq->buffer_type, - handle, false); + &binfo->smem, false); if (rc) goto fail_set_buffers; @@ -3524,10 +3404,10 @@ static int allocate_and_set_internal_bufs(struct msm_vidc_inst *inst, return rc; fail_set_buffers: + msm_comm_smem_free(inst, &binfo->smem); +err_no_mem: kfree(binfo); fail_kzalloc: - msm_comm_smem_free(inst, handle); -err_no_mem: return rc; } @@ -3766,25 +3646,32 @@ int msm_vidc_comm_cmd(void *instance, union msm_v4l2_cmd *cmd) } static void populate_frame_data(struct vidc_frame_data *data, - const struct vb2_buffer *vb, struct msm_vidc_inst *inst) + struct msm_vidc_buffer *mbuf, struct msm_vidc_inst *inst) { u64 time_usec; int extra_idx; - enum v4l2_buf_type type = vb->type; - enum vidc_ports port = type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ? - OUTPUT_PORT : CAPTURE_PORT; - struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct vb2_buffer *vb; + struct vb2_v4l2_buffer *vbuf; + + if (!inst || !mbuf || !data) { + dprintk(VIDC_ERR, "%s: invalid params %pK %pK %pK\n", + __func__, inst, mbuf, data); + return; + } + + vb = &mbuf->vvb.vb2_buf; + vbuf = to_vb2_v4l2_buffer(vb); time_usec = vb->timestamp; do_div(time_usec, NSEC_PER_USEC); data->alloc_len = vb->planes[0].length; - data->device_addr = vb->planes[0].m.userptr; + data->device_addr = mbuf->smem[0].device_addr; data->timestamp = time_usec; data->flags = 0; data->clnt_data = data->device_addr; - if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { bool pic_decoding_mode = msm_comm_g_ctrl_for_id(inst, V4L2_CID_MPEG_VIDC_VIDEO_PICTYPE_DEC_MODE); @@ -3812,59 +3699,64 @@ static void populate_frame_data(struct vidc_frame_data *data, data->mark_data = data->mark_target = pic_decoding_mode ? 0xdeadbeef : 0; - } else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + } else if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { data->buffer_type = msm_comm_get_hal_output_buffer(inst); } - extra_idx = EXTRADATA_IDX(inst->bufq[port].num_planes); - if (extra_idx && extra_idx < VIDEO_MAX_PLANES && - vb->planes[extra_idx].m.userptr) { - data->extradata_addr = vb->planes[extra_idx].m.userptr; + extra_idx = EXTRADATA_IDX(vb->num_planes); + if (extra_idx && extra_idx < VIDEO_MAX_PLANES) { + data->extradata_addr = mbuf->smem[extra_idx].device_addr; data->extradata_size = vb->planes[extra_idx].length; data->flags |= HAL_BUFFERFLAG_EXTRADATA; } } -static unsigned int count_single_batch(struct msm_vidc_list *list, +static unsigned int count_single_batch(struct msm_vidc_inst *inst, enum v4l2_buf_type type) { - struct vb2_buf_entry *buf; int count = 0; - struct vb2_v4l2_buffer *vbuf = NULL; + struct msm_vidc_buffer *mbuf = NULL; + + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry(mbuf, &inst->registeredbufs.list, list) { + if (mbuf->vvb.vb2_buf.type != type) + continue; - mutex_lock(&list->lock); - list_for_each_entry(buf, &list->list, list) { - if (buf->vb->type != type) + /* count only deferred buffers */ + if (!mbuf->deferred) continue; ++count; - vbuf = to_vb2_v4l2_buffer(buf->vb); - if (!(vbuf->flags & V4L2_MSM_BUF_FLAG_DEFER)) + if (!(mbuf->vvb.flags & V4L2_MSM_BUF_FLAG_DEFER)) goto found_batch; } - /* don't have a full batch */ + /* don't have a full batch */ count = 0; found_batch: - mutex_unlock(&list->lock); + mutex_unlock(&inst->registeredbufs.lock); return count; } -static unsigned int count_buffers(struct msm_vidc_list *list, +static unsigned int count_buffers(struct msm_vidc_inst *inst, enum v4l2_buf_type type) { - struct vb2_buf_entry *buf; + struct msm_vidc_buffer *mbuf; int count = 0; - mutex_lock(&list->lock); - list_for_each_entry(buf, &list->list, list) { - if (buf->vb->type != type) + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry(mbuf, &inst->registeredbufs.list, list) { + if (mbuf->vvb.vb2_buf.type != type) + continue; + + /* count only deferred buffers */ + if (!mbuf->deferred) continue; ++count; } - mutex_unlock(&list->lock); + mutex_unlock(&inst->registeredbufs.lock); return count; } @@ -3875,27 +3767,45 @@ static void log_frame(struct msm_vidc_inst *inst, struct vidc_frame_data *data, if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { dprintk(VIDC_DBG, - "Sending etb (%pa) to hal: filled: %d, ts: %lld, flags = %#x\n", - &data->device_addr, data->filled_len, + "Sending etb (%x) to hal: filled: %d, ts: %lld, flags = %#x\n", + data->device_addr, data->filled_len, data->timestamp, data->flags); msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_ETB); } else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { dprintk(VIDC_DBG, - "Sending ftb (%pa) to hal: size: %d, ts: %lld, flags = %#x\n", - &data->device_addr, data->alloc_len, + "Sending ftb (%x) to hal: size: %d, ts: %lld, flags = %#x\n", + data->device_addr, data->alloc_len, data->timestamp, data->flags); msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_FTB); } } +enum hal_buffer get_hal_buffer_type(unsigned int type, + unsigned int plane_num) +{ + if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + if (plane_num == 0) + return HAL_BUFFER_INPUT; + else + return HAL_BUFFER_EXTRADATA_INPUT; + } else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + if (plane_num == 0) + return HAL_BUFFER_OUTPUT; + else + return HAL_BUFFER_EXTRADATA_OUTPUT; + } else { + return -EINVAL; + } +} + /* * Attempts to queue `vb` to hardware. If, for various reasons, the buffer * cannot be queued to hardware, the buffer will be staged for commit in the * pending queue. Once the hardware reaches a good state (or if `vb` is NULL, * the subsequent *_qbuf will commit the previously staged buffers to hardware. */ -int msm_comm_qbuf(struct msm_vidc_inst *inst, struct vb2_buffer *vb) +int msm_comm_qbuf(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf) { int rc = 0, capture_count, output_count; struct msm_vidc_core *core; @@ -3905,8 +3815,7 @@ int msm_comm_qbuf(struct msm_vidc_inst *inst, struct vb2_buffer *vb) int count; } etbs, ftbs; bool defer = false, batch_mode; - struct vb2_buf_entry *temp, *next; - struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); + struct msm_vidc_buffer *temp = NULL, *next = NULL; if (!inst) { dprintk(VIDC_ERR, "%s: Invalid arguments\n", __func__); @@ -3916,36 +3825,21 @@ int msm_comm_qbuf(struct msm_vidc_inst *inst, struct vb2_buffer *vb) core = inst->core; hdev = core->device; - if (inst->state == MSM_VIDC_CORE_INVALID || - core->state == VIDC_CORE_INVALID || - core->state == VIDC_CORE_UNINIT) { - dprintk(VIDC_ERR, "Core is in bad state. Can't Queue\n"); + if (inst->state == MSM_VIDC_CORE_INVALID) { + dprintk(VIDC_ERR, "%s: inst is in bad state\n", __func__); return -EINVAL; } - /* - * Stick the buffer into the pendinq, we'll pop it out later on - * if we want to commit it to hardware - */ - if (vb) { - temp = kzalloc(sizeof(*temp), GFP_KERNEL); - if (!temp) { - dprintk(VIDC_ERR, "Out of memory\n"); - goto err_no_mem; - } - - temp->vb = vb; - mutex_lock(&inst->pendingq.lock); - list_add_tail(&temp->list, &inst->pendingq.list); - mutex_unlock(&inst->pendingq.lock); - } + /* initially assume every buffer is going to be deferred */ + if (mbuf) + mbuf->deferred = true; batch_mode = msm_comm_g_ctrl_for_id(inst, V4L2_CID_VIDC_QBUF_MODE) == V4L2_VIDC_QBUF_BATCHED; capture_count = (batch_mode ? &count_single_batch : &count_buffers) - (&inst->pendingq, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); + (inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); output_count = (batch_mode ? &count_single_batch : &count_buffers) - (&inst->pendingq, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); + (inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); /* * Somewhat complicated logic to prevent queuing the buffer to hardware. @@ -3959,13 +3853,18 @@ int msm_comm_qbuf(struct msm_vidc_inst *inst, struct vb2_buffer *vb) * buffer to be batched with future frames. The batch size (on both * capabilities) is completely determined by the client. */ - defer = defer ? defer : (vbuf && vbuf->flags & V4L2_MSM_BUF_FLAG_DEFER); + defer = defer ? defer : + (mbuf && mbuf->vvb.flags & V4L2_MSM_BUF_FLAG_DEFER); /* 3) If we're in batch mode, we must have full batches of both types */ defer = defer ? defer:(batch_mode && (!output_count || !capture_count)); if (defer) { - dprintk(VIDC_DBG, "Deferring queue of %pK\n", vb); + if (mbuf) { + mbuf->deferred = true; + print_vidc_buffer(VIDC_DBG, "deferred qbuf", + inst, mbuf); + } return 0; } @@ -3995,15 +3894,18 @@ int msm_comm_qbuf(struct msm_vidc_inst *inst, struct vb2_buffer *vb) etbs.count = ftbs.count = 0; /* - * Try to collect all pending buffers into 2 batches of ftb and etb + * Try to collect all deferred buffers into 2 batches of ftb and etb * Note that these "batches" might be empty if we're no in batching mode - * and the pendingq is empty + * and the deferred is not set for buffers. */ - mutex_lock(&inst->pendingq.lock); - list_for_each_entry_safe(temp, next, &inst->pendingq.list, list) { + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry_safe(temp, next, &inst->registeredbufs.list, list) { struct vidc_frame_data *frame_data = NULL; - switch (temp->vb->type) { + if (!temp->deferred) + continue; + + switch (temp->vvb.vb2_buf.type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: if (ftbs.count < capture_count && ftbs.data) frame_data = &ftbs.data[ftbs.count++]; @@ -4019,12 +3921,14 @@ int msm_comm_qbuf(struct msm_vidc_inst *inst, struct vb2_buffer *vb) if (!frame_data) continue; - populate_frame_data(frame_data, temp->vb, inst); + populate_frame_data(frame_data, temp, inst); - list_del(&temp->list); - kfree(temp); + /* this buffer going to be queued (not deferred) */ + temp->deferred = false; + + print_vidc_buffer(VIDC_DBG, "qbuf", inst, temp); } - mutex_unlock(&inst->pendingq.lock); + mutex_unlock(&inst->registeredbufs.lock); /* Finally commit all our frame(s) to H/W */ if (batch_mode) { @@ -4344,11 +4248,7 @@ int msm_comm_release_output_buffers(struct msm_vidc_inst *inst, } mutex_lock(&inst->outputbufs.lock); list_for_each_entry_safe(buf, dummy, &inst->outputbufs.list, list) { - handle = buf->handle; - if (!handle) { - dprintk(VIDC_ERR, "%s - invalid handle\n", __func__); - goto exit; - } + handle = &buf->smem; if ((buf->buffer_ownership == FIRMWARE) && !force_release) { dprintk(VIDC_INFO, "DPB is with f/w. Can't free it\n"); @@ -4368,18 +4268,17 @@ int msm_comm_release_output_buffers(struct msm_vidc_inst *inst, (void *)inst->session, &buffer_info); if (rc) { dprintk(VIDC_WARN, - "Rel output buf fail:%pa, %d\n", - &buffer_info.align_device_addr, + "Rel output buf fail:%x, %d\n", + buffer_info.align_device_addr, buffer_info.buffer_size); } } list_del(&buf->list); - msm_comm_smem_free(inst, buf->handle); + msm_comm_smem_free(inst, &buf->smem); kfree(buf); } -exit: mutex_unlock(&inst->outputbufs.lock); return rc; } @@ -4404,13 +4303,8 @@ static enum hal_buffer scratch_buf_sufficient(struct msm_vidc_inst *inst, mutex_lock(&inst->scratchbufs.lock); list_for_each_entry(buf, &inst->scratchbufs.list, list) { - if (!buf->handle) { - dprintk(VIDC_ERR, "%s: invalid buf handle\n", __func__); - mutex_unlock(&inst->scratchbufs.lock); - goto not_sufficient; - } if (buf->buffer_type == buffer_type && - buf->handle->size >= bufreq->buffer_size) + buf->smem.size >= bufreq->buffer_size) count++; } mutex_unlock(&inst->scratchbufs.lock); @@ -4469,13 +4363,7 @@ int msm_comm_release_scratch_buffers(struct msm_vidc_inst *inst, mutex_lock(&inst->scratchbufs.lock); list_for_each_entry_safe(buf, dummy, &inst->scratchbufs.list, list) { - if (!buf->handle) { - dprintk(VIDC_ERR, "%s - buf->handle NULL\n", __func__); - rc = -EINVAL; - goto exit; - } - - handle = buf->handle; + handle = &buf->smem; buffer_info.buffer_size = handle->size; buffer_info.buffer_type = buf->buffer_type; buffer_info.num_buffers = 1; @@ -4487,8 +4375,8 @@ int msm_comm_release_scratch_buffers(struct msm_vidc_inst *inst, (void *)inst->session, &buffer_info); if (rc) { dprintk(VIDC_WARN, - "Rel scrtch buf fail:%pa, %d\n", - &buffer_info.align_device_addr, + "Rel scrtch buf fail:%x, %d\n", + buffer_info.align_device_addr, buffer_info.buffer_size); } mutex_unlock(&inst->scratchbufs.lock); @@ -4507,11 +4395,10 @@ int msm_comm_release_scratch_buffers(struct msm_vidc_inst *inst, continue; list_del(&buf->list); - msm_comm_smem_free(inst, buf->handle); + msm_comm_smem_free(inst, handle); kfree(buf); } -exit: mutex_unlock(&inst->scratchbufs.lock); return rc; } @@ -4567,7 +4454,7 @@ int msm_comm_release_persist_buffers(struct msm_vidc_inst *inst) mutex_lock(&inst->persistbufs.lock); list_for_each_safe(ptr, next, &inst->persistbufs.list) { buf = list_entry(ptr, struct internal_buf, list); - handle = buf->handle; + handle = &buf->smem; buffer_info.buffer_size = handle->size; buffer_info.buffer_type = buf->buffer_type; buffer_info.num_buffers = 1; @@ -4579,8 +4466,8 @@ int msm_comm_release_persist_buffers(struct msm_vidc_inst *inst) (void *)inst->session, &buffer_info); if (rc) { dprintk(VIDC_WARN, - "Rel prst buf fail:%pa, %d\n", - &buffer_info.align_device_addr, + "Rel prst buf fail:%x, %d\n", + buffer_info.align_device_addr, buffer_info.buffer_size); } mutex_unlock(&inst->persistbufs.lock); @@ -4593,7 +4480,7 @@ int msm_comm_release_persist_buffers(struct msm_vidc_inst *inst) mutex_lock(&inst->persistbufs.lock); } list_del(&buf->list); - msm_comm_smem_free(inst, buf->handle); + msm_comm_smem_free(inst, handle); kfree(buf); } mutex_unlock(&inst->persistbufs.lock); @@ -4771,116 +4658,20 @@ static void msm_comm_flush_in_invalid_state(struct msm_vidc_inst *inst) for (c = 0; c < ARRAY_SIZE(ports); ++c) { enum vidc_ports port = ports[c]; - dprintk(VIDC_DBG, "Flushing buffers of type %d in bad state\n", - port); mutex_lock(&inst->bufq[port].lock); - list_for_each_safe(ptr, next, &inst->bufq[port]. - vb2_bufq.queued_list) { + list_for_each_safe(ptr, next, + &inst->bufq[port].vb2_bufq.queued_list) { struct vb2_buffer *vb = container_of(ptr, struct vb2_buffer, queued_entry); - vb->planes[0].bytesused = 0; - vb->planes[0].data_offset = 0; - + print_vb2_buffer(VIDC_ERR, "flush in invalid", + inst, vb); vb2_buffer_done(vb, VB2_BUF_STATE_DONE); } mutex_unlock(&inst->bufq[port].lock); } - msm_vidc_queue_v4l2_event(inst, V4L2_EVENT_MSM_VIDC_FLUSH_DONE); -} - -void msm_comm_flush_dynamic_buffers(struct msm_vidc_inst *inst) -{ - struct buffer_info *binfo = NULL; - - if (inst->buffer_mode_set[CAPTURE_PORT] != HAL_BUFFER_MODE_DYNAMIC) - return; - - /* - * dynamic buffer mode:- if flush is called during seek - * driver should not queue any new buffer it has been holding. - * - * Each dynamic o/p buffer can have one of following ref_count: - * ref_count : 0 - f/w has released reference and sent dynamic - * buffer back. The buffer has been returned - * back to client. - * - * ref_count : 1 - f/w is holding reference. f/w may have released - * dynamic buffer as read_only OR dynamic buffer is - * pending. f/w will release reference before sending - * flush_done. - * - * ref_count : >=2 - f/w is holding reference, f/w has released dynamic - * buffer as read_only, which client has queued back - * to driver. Driver holds this buffer and will queue - * back only when f/w releases the reference. During - * flush_done, f/w will release the reference but - * driver should not queue back the buffer to f/w. - * Flush all buffers with ref_count >= 2. - */ - mutex_lock(&inst->registeredbufs.lock); - if (!list_empty(&inst->registeredbufs.list)) { - struct v4l2_event buf_event = {0}; - u32 *ptr = NULL; - - list_for_each_entry(binfo, &inst->registeredbufs.list, list) { - if (binfo->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && - atomic_read(&binfo->ref_count) >= 2) { - - atomic_dec(&binfo->ref_count); - buf_event.type = - V4L2_EVENT_MSM_VIDC_RELEASE_UNQUEUED_BUFFER; - ptr = (u32 *)buf_event.u.data; - ptr[0] = binfo->fd[0]; - ptr[1] = binfo->buff_off[0]; - ptr[2] = binfo->uvaddr[0]; - ptr[3] = (u32) binfo->timestamp.tv_sec; - ptr[4] = (u32) binfo->timestamp.tv_usec; - ptr[5] = binfo->v4l2_index; - dprintk(VIDC_DBG, - "released buffer held in driver before issuing flush: %pa fd[0]: %d\n", - &binfo->device_addr[0], binfo->fd[0]); - /*send event to client*/ - v4l2_event_queue_fh(&inst->event_handler, - &buf_event); - } - } - } - mutex_unlock(&inst->registeredbufs.lock); -} - -void msm_comm_flush_pending_dynamic_buffers(struct msm_vidc_inst *inst) -{ - struct buffer_info *binfo = NULL; - - if (!inst) - return; - - if (inst->buffer_mode_set[CAPTURE_PORT] != HAL_BUFFER_MODE_DYNAMIC) - return; - - if (list_empty(&inst->pendingq.list) || - list_empty(&inst->registeredbufs.list)) - return; - - /* - * Dynamic Buffer mode - Since pendingq is not empty - * no output buffers have been sent to firmware yet. - * Hence remove reference to all pendingq o/p buffers - * before flushing them. - */ - - mutex_lock(&inst->registeredbufs.lock); - list_for_each_entry(binfo, &inst->registeredbufs.list, list) { - if (binfo->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { - dprintk(VIDC_DBG, - "%s: binfo = %pK device_addr = %pa\n", - __func__, binfo, &binfo->device_addr[0]); - buf_ref_put(inst, binfo); - } - } - mutex_unlock(&inst->registeredbufs.lock); + return; } int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags) @@ -4888,33 +4679,25 @@ int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags) int rc = 0; bool ip_flush = false; bool op_flush = false; - struct vb2_buf_entry *temp, *next; - struct mutex *lock; + struct msm_vidc_buffer *mbuf, *next; struct msm_vidc_core *core; struct hfi_device *hdev; - if (!inst) { + if (!inst || !inst->core || !inst->core->device) { dprintk(VIDC_ERR, - "Invalid instance pointer = %pK\n", inst); + "Invalid params, inst %pK\n", inst); return -EINVAL; } core = inst->core; - if (!core) { - dprintk(VIDC_ERR, - "Invalid core pointer = %pK\n", core); - return -EINVAL; - } hdev = core->device; - if (!hdev) { - dprintk(VIDC_ERR, "Invalid device pointer = %pK\n", hdev); - return -EINVAL; - } ip_flush = flags & V4L2_QCOM_CMD_FLUSH_OUTPUT; op_flush = flags & V4L2_QCOM_CMD_FLUSH_CAPTURE; if (ip_flush && !op_flush) { - dprintk(VIDC_INFO, "Input only flush not supported\n"); + dprintk(VIDC_WARN, + "Input only flush not supported, making it flush all\n"); + op_flush = true; return 0; } @@ -4922,11 +4705,7 @@ int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags) msm_clock_data_reset(inst); - msm_comm_flush_dynamic_buffers(inst); - - if (inst->state == MSM_VIDC_CORE_INVALID || - core->state == VIDC_CORE_INVALID || - core->state == VIDC_CORE_UNINIT) { + if (inst->state == MSM_VIDC_CORE_INVALID) { dprintk(VIDC_ERR, "Core %pK and inst %pK are in bad state\n", core, inst); @@ -4934,68 +4713,52 @@ int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags) return 0; } - if (inst->in_reconfig && !ip_flush && op_flush) { - mutex_lock(&inst->pendingq.lock); - if (!list_empty(&inst->pendingq.list)) { - /* - * Execution can never reach here since port reconfig - * wont happen unless pendingq is emptied out - * (both pendingq and flush being secured with same - * lock). Printing a message here incase this breaks. - */ - dprintk(VIDC_WARN, - "FLUSH BUG: Pending q not empty! It should be empty\n"); - } - mutex_unlock(&inst->pendingq.lock); - atomic_inc(&inst->in_flush); - dprintk(VIDC_DBG, "Send flush Output to firmware\n"); + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry_safe(mbuf, next, &inst->registeredbufs.list, list) { + /* flush only deferred buffers (which are not queued yet) */ + if (!mbuf->deferred) + continue; + + /* don't flush input buffers if flush not requested on it */ + if (!ip_flush && mbuf->vvb.vb2_buf.type == + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) + continue; + + print_vidc_buffer(VIDC_DBG, "flush buf", inst, mbuf); + msm_comm_flush_vidc_buffer(inst, mbuf); + msm_comm_unmap_vidc_buffer(inst, mbuf); + + /* remove from list */ + list_del(&mbuf->list); + kfree(mbuf); + mbuf = NULL; + } + mutex_unlock(&inst->registeredbufs.lock); + + /* enable in flush */ + inst->in_flush = true; + + hdev = inst->core->device; + if (ip_flush) { + dprintk(VIDC_DBG, "Send flush on all ports to firmware\n"); rc = call_hfi_op(hdev, session_flush, inst->session, - HAL_FLUSH_OUTPUT); + HAL_FLUSH_ALL); } else { - msm_comm_flush_pending_dynamic_buffers(inst); - /* - * If flush is called after queueing buffers but before - * streamon driver should flush the pending queue - */ - mutex_lock(&inst->pendingq.lock); - list_for_each_entry_safe(temp, next, - &inst->pendingq.list, list) { - enum v4l2_buf_type type = temp->vb->type; - - if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) - lock = &inst->bufq[CAPTURE_PORT].lock; - else - lock = &inst->bufq[OUTPUT_PORT].lock; - - temp->vb->planes[0].bytesused = 0; - - mutex_lock(lock); - vb2_buffer_done(temp->vb, VB2_BUF_STATE_DONE); - msm_vidc_debugfs_update(inst, - type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ? - MSM_VIDC_DEBUGFS_EVENT_FBD : - MSM_VIDC_DEBUGFS_EVENT_EBD); - list_del(&temp->list); - mutex_unlock(lock); - - kfree(temp); - } - mutex_unlock(&inst->pendingq.lock); - - /*Do not send flush in case of session_error */ - if (!(inst->state == MSM_VIDC_CORE_INVALID && - core->state != VIDC_CORE_INVALID)) { - atomic_inc(&inst->in_flush); - dprintk(VIDC_DBG, "Send flush all to firmware\n"); - rc = call_hfi_op(hdev, session_flush, inst->session, - HAL_FLUSH_ALL); - } + dprintk(VIDC_DBG, "Send flush on output port to firmware\n"); + rc = call_hfi_op(hdev, session_flush, inst->session, + HAL_FLUSH_OUTPUT); + } + if (rc) { + dprintk(VIDC_ERR, + "Sending flush to firmware failed, flush out all buffers\n"); + msm_comm_flush_in_invalid_state(inst); + /* disable in_flush */ + inst->in_flush = false; } return rc; } - enum hal_extradata_id msm_comm_get_hal_extradata_index( enum v4l2_mpeg_vidc_extradata index) { @@ -5371,19 +5134,19 @@ int msm_comm_kill_session(struct msm_vidc_inst *inst) return rc; } -struct msm_smem *msm_comm_smem_alloc(struct msm_vidc_inst *inst, - size_t size, u32 align, u32 flags, - enum hal_buffer buffer_type, int map_kernel) +int msm_comm_smem_alloc(struct msm_vidc_inst *inst, + size_t size, u32 align, u32 flags, enum hal_buffer buffer_type, + int map_kernel, struct msm_smem *smem) { - struct msm_smem *m = NULL; + int rc = 0; if (!inst || !inst->core) { dprintk(VIDC_ERR, "%s: invalid inst: %pK\n", __func__, inst); - return NULL; + return -EINVAL; } - m = msm_smem_alloc(inst->mem_client, size, align, - flags, buffer_type, map_kernel); - return m; + rc = msm_smem_alloc(inst->mem_client, size, align, + flags, buffer_type, map_kernel, smem); + return rc; } void msm_comm_smem_free(struct msm_vidc_inst *inst, struct msm_smem *mem) @@ -5404,28 +5167,138 @@ int msm_comm_smem_cache_operations(struct msm_vidc_inst *inst, "%s: invalid params: %pK %pK\n", __func__, inst, mem); return -EINVAL; } - return msm_smem_cache_operations(inst->mem_client, mem, cache_ops); + return msm_smem_cache_operations(inst->mem_client, mem->handle, + mem->offset, mem->size, cache_ops); } -struct msm_smem *msm_comm_smem_user_to_kernel(struct msm_vidc_inst *inst, - int fd, u32 offset, enum hal_buffer buffer_type) +int msm_comm_qbuf_cache_operations(struct msm_vidc_inst *inst, + struct v4l2_buffer *b) { - struct msm_smem *m = NULL; + int rc = 0, i; + void *dma_buf; + void *handle; + bool skip; - if (!inst || !inst->core) { - dprintk(VIDC_ERR, "%s: invalid inst: %pK\n", __func__, inst); - return NULL; + if (!inst || !b) { + dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n", + __func__, inst, b); + return -EINVAL; } - if (inst->state == MSM_VIDC_CORE_INVALID) { - dprintk(VIDC_ERR, "Core in Invalid state, returning from %s\n", - __func__); - return NULL; + for (i = 0; i < b->length; i++) { + unsigned long offset, size; + enum smem_cache_ops cache_ops; + + dma_buf = msm_smem_get_dma_buf(b->m.planes[i].m.fd); + handle = msm_smem_get_handle(inst->mem_client, dma_buf); + + offset = b->m.planes[i].data_offset; + size = b->m.planes[i].length; + cache_ops = SMEM_CACHE_INVALIDATE; + skip = false; + + if (inst->session_type == MSM_VIDC_DECODER) { + if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + if (!i) { /* bitstream */ + size = b->m.planes[i].bytesused; + cache_ops = SMEM_CACHE_CLEAN_INVALIDATE; + } + } else if (b->type == + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + if (!i) { /* yuv */ + /* all values are correct */ + } + } + } else if (inst->session_type == MSM_VIDC_ENCODER) { + if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + if (!i) { /* yuv */ + size = b->m.planes[i].bytesused; + cache_ops = SMEM_CACHE_CLEAN_INVALIDATE; + } else { /* extradata */ + cache_ops = SMEM_CACHE_CLEAN_INVALIDATE; + } + } else if (b->type == + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + if (!i) { /* bitstream */ + /* all values are correct */ + } + } + } + + if (!skip) { + rc = msm_smem_cache_operations(inst->mem_client, handle, + offset, size, cache_ops); + if (rc) + print_v4l2_buffer(VIDC_ERR, + "qbuf cache ops failed", inst, b); + } + + msm_smem_put_handle(inst->mem_client, handle); + msm_smem_put_dma_buf(dma_buf); } - m = msm_smem_user_to_kernel(inst->mem_client, - fd, offset, buffer_type); - return m; + return rc; +} + +int msm_comm_dqbuf_cache_operations(struct msm_vidc_inst *inst, + struct v4l2_buffer *b) +{ + int rc = 0, i; + void *dma_buf; + void *handle; + bool skip; + + if (!inst || !b) { + dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n", + __func__, inst, b); + return -EINVAL; + } + + for (i = 0; i < b->length; i++) { + unsigned long offset, size; + enum smem_cache_ops cache_ops; + + dma_buf = msm_smem_get_dma_buf(b->m.planes[i].m.fd); + handle = msm_smem_get_handle(inst->mem_client, dma_buf); + + offset = b->m.planes[i].data_offset; + size = b->m.planes[i].length; + cache_ops = SMEM_CACHE_INVALIDATE; + skip = false; + + if (inst->session_type == MSM_VIDC_DECODER) { + if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + if (!i) /* bitstream */ + skip = true; + } else if (b->type == + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + if (!i) /* yuv */ + skip = true; + } + } else if (inst->session_type == MSM_VIDC_ENCODER) { + if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + if (!i) /* yuv */ + skip = true; + } else if (b->type == + V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { + if (!i) /* bitstream */ + skip = true; + } + } + + if (!skip) { + rc = msm_smem_cache_operations(inst->mem_client, handle, + offset, size, cache_ops); + if (rc) + print_v4l2_buffer(VIDC_ERR, + "dqbuf cache ops failed", inst, b); + } + + msm_smem_put_handle(inst->mem_client, handle); + msm_smem_put_dma_buf(dma_buf); + } + + return rc; } void msm_vidc_fw_unload_handler(struct work_struct *work) @@ -5582,9 +5455,8 @@ int msm_vidc_comm_s_parm(struct msm_vidc_inst *inst, struct v4l2_streamparm *a) void msm_comm_print_inst_info(struct msm_vidc_inst *inst) { - struct buffer_info *temp; + struct msm_vidc_buffer *mbuf; struct internal_buf *buf; - int i = 0; bool is_decode = false; enum vidc_ports port; bool is_secure = false; @@ -5612,37 +5484,32 @@ void msm_comm_print_inst_info(struct msm_vidc_inst *inst) inst, inst->session_type); mutex_lock(&inst->registeredbufs.lock); dprintk(VIDC_ERR, "registered buffer list:\n"); - list_for_each_entry(temp, &inst->registeredbufs.list, list) - for (i = 0; i < temp->num_planes; i++) - dprintk(VIDC_ERR, - "type: %d plane: %d addr: %pa size: %d\n", - temp->type, i, &temp->device_addr[i], - temp->size[i]); - + list_for_each_entry(mbuf, &inst->registeredbufs.list, list) + print_vidc_buffer(VIDC_ERR, "buf", inst, mbuf); mutex_unlock(&inst->registeredbufs.lock); mutex_lock(&inst->scratchbufs.lock); dprintk(VIDC_ERR, "scratch buffer list:\n"); list_for_each_entry(buf, &inst->scratchbufs.list, list) - dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n", - buf->buffer_type, &buf->handle->device_addr, - buf->handle->size); + dprintk(VIDC_ERR, "type: %d addr: %x size: %u\n", + buf->buffer_type, buf->smem.device_addr, + buf->smem.size); mutex_unlock(&inst->scratchbufs.lock); mutex_lock(&inst->persistbufs.lock); dprintk(VIDC_ERR, "persist buffer list:\n"); list_for_each_entry(buf, &inst->persistbufs.list, list) - dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n", - buf->buffer_type, &buf->handle->device_addr, - buf->handle->size); + dprintk(VIDC_ERR, "type: %d addr: %x size: %u\n", + buf->buffer_type, buf->smem.device_addr, + buf->smem.size); mutex_unlock(&inst->persistbufs.lock); mutex_lock(&inst->outputbufs.lock); dprintk(VIDC_ERR, "dpb buffer list:\n"); list_for_each_entry(buf, &inst->outputbufs.list, list) - dprintk(VIDC_ERR, "type: %d addr: %pa size: %zu\n", - buf->buffer_type, &buf->handle->device_addr, - buf->handle->size); + dprintk(VIDC_ERR, "type: %d addr: %x size: %u\n", + buf->buffer_type, buf->smem.device_addr, + buf->smem.size); mutex_unlock(&inst->outputbufs.lock); } @@ -5739,3 +5606,540 @@ u32 get_frame_size_tp10_ubwc(int plane, u32 height, u32 width) return VENUS_BUFFER_SIZE(COLOR_FMT_NV12_BPP10_UBWC, width, height); } + +void print_vidc_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf) +{ + struct vb2_buffer *vb2 = NULL; + + if (!(tag & msm_vidc_debug) || !inst || !mbuf) + return; + + vb2 = &mbuf->vvb.vb2_buf; + + if (vb2->num_planes == 1) + dprintk(tag, + "%s: %s: %x : idx %2d fd %d off %d daddr %x size %d filled %d flags 0x%x ts %lld refcnt %d\n", + str, vb2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ? + "OUTPUT" : "CAPTURE", hash32_ptr(inst->session), + vb2->index, vb2->planes[0].m.fd, + vb2->planes[0].data_offset, mbuf->smem[0].device_addr, + vb2->planes[0].length, vb2->planes[0].bytesused, + mbuf->vvb.flags, mbuf->vvb.vb2_buf.timestamp, + mbuf->smem[0].refcount); + else + dprintk(tag, + "%s: %s: %x : idx %2d fd %d off %d daddr %x size %d filled %d flags 0x%x ts %lld refcnt %d, extradata: fd %d off %d daddr %x size %d filled %d refcnt %d\n", + str, vb2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ? + "OUTPUT" : "CAPTURE", hash32_ptr(inst->session), + vb2->index, vb2->planes[0].m.fd, + vb2->planes[0].data_offset, mbuf->smem[0].device_addr, + vb2->planes[0].length, vb2->planes[0].bytesused, + mbuf->vvb.flags, mbuf->vvb.vb2_buf.timestamp, + mbuf->smem[0].refcount, vb2->planes[1].m.fd, + vb2->planes[1].data_offset, mbuf->smem[1].device_addr, + vb2->planes[1].length, vb2->planes[1].bytesused, + mbuf->smem[1].refcount); +} + +void print_vb2_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst, + struct vb2_buffer *vb2) +{ + if (!(tag & msm_vidc_debug) || !inst || !vb2) + return; + + if (vb2->num_planes == 1) + dprintk(tag, + "%s: %s: %x : idx %2d fd %d off %d size %d filled %d\n", + str, vb2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ? + "OUTPUT" : "CAPTURE", hash32_ptr(inst->session), + vb2->index, vb2->planes[0].m.fd, + vb2->planes[0].data_offset, vb2->planes[0].length, + vb2->planes[0].bytesused); + else + dprintk(tag, + "%s: %s: %x : idx %2d fd %d off %d size %d filled %d, extradata: fd %d off %d size %d\n", + str, vb2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ? + "OUTPUT" : "CAPTURE", hash32_ptr(inst->session), + vb2->index, vb2->planes[0].m.fd, + vb2->planes[0].data_offset, vb2->planes[0].length, + vb2->planes[0].bytesused, vb2->planes[1].m.fd, + vb2->planes[1].data_offset, vb2->planes[1].length); +} + +void print_v4l2_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst, + struct v4l2_buffer *v4l2) +{ + if (!(tag & msm_vidc_debug) || !inst || !v4l2) + return; + + if (v4l2->length == 1) + dprintk(tag, + "%s: %s: %x : idx %2d fd %d off %d size %d filled %d\n", + str, v4l2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ? + "OUTPUT" : "CAPTURE", hash32_ptr(inst->session), + v4l2->index, v4l2->m.planes[0].m.fd, + v4l2->m.planes[0].data_offset, + v4l2->m.planes[0].length, + v4l2->m.planes[0].bytesused); + else + dprintk(tag, + "%s: %s: %x : idx %2d fd %d off %d size %d filled %d, extradata: fd %d off %d size %d\n", + str, v4l2->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ? + "OUTPUT" : "CAPTURE", hash32_ptr(inst->session), + v4l2->index, v4l2->m.planes[0].m.fd, + v4l2->m.planes[0].data_offset, + v4l2->m.planes[0].length, + v4l2->m.planes[0].bytesused, + v4l2->m.planes[1].m.fd, + v4l2->m.planes[1].data_offset, + v4l2->m.planes[1].length); +} + +bool msm_comm_compare_vb2_plane(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf, struct vb2_buffer *vb2, u32 i) +{ + struct vb2_buffer *vb; + + if (!inst || !mbuf || !vb2) { + dprintk(VIDC_ERR, "%s: invalid params, %pK %pK %pK\n", + __func__, inst, mbuf, vb2); + return false; + } + + vb = &mbuf->vvb.vb2_buf; + if (vb->planes[i].m.fd == vb2->planes[i].m.fd && + vb->planes[i].data_offset == vb2->planes[i].data_offset && + vb->planes[i].length == vb2->planes[i].length) { + return true; + } + + return false; +} + +bool msm_comm_compare_vb2_planes(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf, struct vb2_buffer *vb2) +{ + int i = 0; + struct vb2_buffer *vb; + + if (!inst || !mbuf || !vb2) { + dprintk(VIDC_ERR, "%s: invalid params, %pK %pK %pK\n", + __func__, inst, mbuf, vb2); + return false; + } + + vb = &mbuf->vvb.vb2_buf; + + if (vb->num_planes != vb2->num_planes) + return false; + + for (i = 0; i < vb->num_planes; i++) { + if (!msm_comm_compare_vb2_plane(inst, mbuf, vb2, i)) + return false; + } + + return true; +} + +bool msm_comm_compare_dma_plane(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf, unsigned long *dma_planes, u32 i) +{ + if (!inst || !mbuf || !dma_planes) { + dprintk(VIDC_ERR, "%s: invalid params, %pK %pK %pK\n", + __func__, inst, mbuf, dma_planes); + return false; + } + + if ((unsigned long)mbuf->smem[i].dma_buf == dma_planes[i]) + return true; + + return false; +} + +bool msm_comm_compare_dma_planes(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf, unsigned long *dma_planes) +{ + int i = 0; + struct vb2_buffer *vb; + + if (!inst || !mbuf || !dma_planes) { + dprintk(VIDC_ERR, "%s: invalid params, %pK %pK %pK\n", + __func__, inst, mbuf, dma_planes); + return false; + } + + vb = &mbuf->vvb.vb2_buf; + for (i = 0; i < vb->num_planes; i++) { + if (!msm_comm_compare_dma_plane(inst, mbuf, dma_planes, i)) + return false; + } + + return true; +} + + +bool msm_comm_compare_device_plane(struct msm_vidc_buffer *mbuf, + u32 *planes, u32 i) +{ + if (!mbuf || !planes) { + dprintk(VIDC_ERR, "%s: invalid params, %pK %pK\n", + __func__, mbuf, planes); + return false; + } + + if (mbuf->smem[i].device_addr == planes[i]) + return true; + + return false; +} + +bool msm_comm_compare_device_planes(struct msm_vidc_buffer *mbuf, + u32 *planes) +{ + int i = 0; + + if (!mbuf || !planes) + return false; + + for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) { + if (!msm_comm_compare_device_plane(mbuf, planes, i)) + return false; + } + + return true; +} + +struct msm_vidc_buffer *msm_comm_get_buffer_using_device_planes( + struct msm_vidc_inst *inst, u32 *planes) +{ + struct msm_vidc_buffer *mbuf; + bool found = false; + + mutex_lock(&inst->registeredbufs.lock); + found = false; + list_for_each_entry(mbuf, &inst->registeredbufs.list, list) { + if (msm_comm_compare_device_planes(mbuf, planes)) { + found = true; + break; + } + } + mutex_unlock(&inst->registeredbufs.lock); + if (!found) { + dprintk(VIDC_ERR, + "%s: data_addr %x, extradata_addr %x not found\n", + __func__, planes[0], planes[1]); + mbuf = NULL; + } + + return mbuf; +} + +int msm_comm_flush_vidc_buffer(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf) +{ + int rc; + struct vb2_buffer *vb; + + if (!inst || !mbuf) { + dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n", + __func__, inst, mbuf); + return -EINVAL; + } + + vb = msm_comm_get_vb_using_vidc_buffer(inst, mbuf); + if (!vb) { + print_vidc_buffer(VIDC_ERR, + "vb not found for buf", inst, mbuf); + return -EINVAL; + } + + vb->planes[0].bytesused = 0; + rc = msm_comm_vb2_buffer_done(inst, vb); + if (rc) + print_vidc_buffer(VIDC_ERR, + "vb2_buffer_done failed for", inst, mbuf); + + return rc; +} + +struct msm_vidc_buffer *msm_comm_get_vidc_buffer(struct msm_vidc_inst *inst, + struct vb2_buffer *vb2) +{ + int rc = 0; + struct vb2_v4l2_buffer *vbuf; + struct vb2_buffer *vb; + unsigned long dma_planes[VB2_MAX_PLANES] = {0}; + struct msm_vidc_buffer *mbuf; + bool found = false; + int i; + + if (!inst || !vb2) { + dprintk(VIDC_ERR, "%s: invalid params\n", __func__); + return NULL; + } + + for (i = 0; i < vb2->num_planes; i++) { + /* + * always compare dma_buf addresses which is guaranteed + * to be same across the processes (duplicate fds). + */ + dma_planes[i] = (unsigned long)dma_buf_get(vb2->planes[i].m.fd); + dma_buf_put((struct dma_buf *)dma_planes[i]); + } + + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry(mbuf, &inst->registeredbufs.list, list) { + if (msm_comm_compare_dma_planes(inst, mbuf, dma_planes)) { + found = true; + break; + } + } + + if (!found) { + /* this is new vb2_buffer */ + mbuf = kzalloc(sizeof(struct msm_vidc_buffer), GFP_KERNEL); + if (!mbuf) { + dprintk(VIDC_ERR, "%s: alloc msm_vidc_buffer failed\n", + __func__); + rc = -ENOMEM; + goto exit; + } + } + + vbuf = to_vb2_v4l2_buffer(vb2); + memcpy(&mbuf->vvb, vbuf, sizeof(struct vb2_v4l2_buffer)); + vb = &mbuf->vvb.vb2_buf; + + for (i = 0; i < vb->num_planes; i++) { + mbuf->smem[i].buffer_type = get_hal_buffer_type(vb->type, i); + mbuf->smem[i].fd = vb->planes[i].m.fd; + mbuf->smem[i].offset = vb->planes[i].data_offset; + mbuf->smem[i].size = vb->planes[i].length; + rc = msm_smem_map_dma_buf(inst, &mbuf->smem[i]); + if (rc) { + dprintk(VIDC_ERR, "%s: map failed.\n", __func__); + goto exit; + } + /* increase refcount as we get both fbd and rbr */ + rc = msm_smem_map_dma_buf(inst, &mbuf->smem[i]); + if (rc) { + dprintk(VIDC_ERR, "%s: map failed..\n", __func__); + goto exit; + } + } + + /* special handling for decoder */ + if (inst->session_type == MSM_VIDC_DECODER) { + if (found) { + rc = -EEXIST; + } else { + bool found_plane0 = false; + struct msm_vidc_buffer *temp; + /* + * client might have queued same plane[0] but different + * plane[1] search plane[0] and if found don't queue the + * buffer, the buffer will be queued when rbr event + * arrived. + */ + list_for_each_entry(temp, &inst->registeredbufs.list, + list) { + if (msm_comm_compare_dma_plane(inst, temp, + dma_planes, 0)) { + found_plane0 = true; + break; + } + } + if (found_plane0) + rc = -EEXIST; + } + } + + /* add the new buffer to list */ + if (!found) + list_add_tail(&mbuf->list, &inst->registeredbufs.list); + + mutex_unlock(&inst->registeredbufs.lock); + if (rc == -EEXIST) { + print_vidc_buffer(VIDC_DBG, "qbuf upon rbr", inst, mbuf); + return ERR_PTR(rc); + } + + return mbuf; + +exit: + mutex_unlock(&inst->registeredbufs.lock); + dprintk(VIDC_ERR, "%s: rc %d\n", __func__, rc); + msm_comm_unmap_vidc_buffer(inst, mbuf); + if (!found) + kfree(mbuf); + + return ERR_PTR(rc); +} + +void msm_comm_put_vidc_buffer(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf) +{ + struct msm_vidc_buffer *temp; + bool found = false; + int i = 0; + + if (!inst || !mbuf) { + dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n", + __func__, inst, mbuf); + return; + } + + mutex_lock(&inst->registeredbufs.lock); + /* check if mbuf was not removed by any chance */ + list_for_each_entry(temp, &inst->registeredbufs.list, list) { + if (msm_comm_compare_vb2_planes(inst, mbuf, + &temp->vvb.vb2_buf)) { + found = true; + break; + } + } + if (!found) { + print_vidc_buffer(VIDC_ERR, "buf was removed", inst, mbuf); + goto unlock; + } + + print_vidc_buffer(VIDC_DBG, "dqbuf", inst, mbuf); + for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) { + if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i])) + print_vidc_buffer(VIDC_ERR, + "dqbuf: unmap failed.", inst, mbuf); + + if (!(mbuf->vvb.flags & V4L2_QCOM_BUF_FLAG_READONLY)) { + /* rbr won't come for this buffer */ + if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i])) + print_vidc_buffer(VIDC_ERR, + "dqbuf: unmap failed..", inst, mbuf); + } /* else RBR event expected */ + } + /* + * remove the entry if plane[0].refcount is zero else + * don't remove as client queued same buffer that's why + * plane[0].refcount is not zero + */ + if (!mbuf->smem[0].refcount) { + list_del(&mbuf->list); + kfree(mbuf); + mbuf = NULL; + } +unlock: + mutex_unlock(&inst->registeredbufs.lock); +} + +void handle_release_buffer_reference(struct msm_vidc_inst *inst, u32 *planes) +{ + int rc = 0; + struct msm_vidc_buffer *mbuf = NULL; + bool found = false; + int i = 0; + + mutex_lock(&inst->registeredbufs.lock); + found = false; + list_for_each_entry(mbuf, &inst->registeredbufs.list, list) { + if (msm_comm_compare_device_planes(mbuf, planes)) { + found = true; + break; + } + } + if (found) { + msm_vidc_queue_rbr_event(inst, + mbuf->vvb.vb2_buf.planes[0].m.fd, + mbuf->vvb.vb2_buf.planes[0].data_offset); + + for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) { + if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i])) + print_vidc_buffer(VIDC_ERR, + "rbr unmap failed.", inst, mbuf); + } + /* refcount is not zero if client queued the same buffer */ + if (!mbuf->smem[0].refcount) { + list_del(&mbuf->list); + kfree(mbuf); + mbuf = NULL; + } + } else { + dprintk(VIDC_ERR, + "%s: data_addr %x extradata_addr %x not found\n", + __func__, planes[0], planes[1]); + goto unlock; + } + + /* + * 1. client might have pushed same planes in which case mbuf will be + * same and refcounts are positive and buffer wouldn't have been + * removed from the registeredbufs list. + * 2. client might have pushed same planes[0] but different planes[1] + * in which case mbuf will be different. + * 3. in either case we can search mbuf->smem[0].device_addr in the list + * and if found queue it to video hw (if not flushing). + */ + found = false; + list_for_each_entry(mbuf, &inst->registeredbufs.list, list) { + if (msm_comm_compare_device_plane(mbuf, planes, 0)) { + found = true; + break; + } + } + if (!found) + goto unlock; + + /* found means client queued the buffer already */ + if (inst->in_reconfig || inst->in_flush) { + print_vidc_buffer(VIDC_DBG, "rbr flush buf", inst, mbuf); + msm_comm_flush_vidc_buffer(inst, mbuf); + msm_comm_unmap_vidc_buffer(inst, mbuf); + /* remove from list */ + list_del(&mbuf->list); + kfree(mbuf); + mbuf = NULL; + + /* don't queue the buffer */ + found = false; + } +unlock: + mutex_unlock(&inst->registeredbufs.lock); + + if (found) { + print_vidc_buffer(VIDC_DBG, "rbr qbuf", inst, mbuf); + rc = msm_comm_qbuf(inst, mbuf); + if (rc) + print_vidc_buffer(VIDC_ERR, + "rbr qbuf failed", inst, mbuf); + } +} + +int msm_comm_unmap_vidc_buffer(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf) +{ + int rc = 0, i; + + if (!inst || !mbuf) { + dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n", + __func__, inst, mbuf); + return -EINVAL; + } + if (mbuf->vvb.vb2_buf.num_planes > VIDEO_MAX_PLANES) { + dprintk(VIDC_ERR, "%s: invalid num_planes %d\n", __func__, + mbuf->vvb.vb2_buf.num_planes); + return -EINVAL; + } + + for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) { + u32 refcount = mbuf->smem[i].refcount; + + while (refcount) { + if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i])) + print_vidc_buffer(VIDC_ERR, + "unmap failed for buf", inst, mbuf); + refcount--; + } + } + + return rc; +} + diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h index 52925eb66ab7..5c653f5c1e49 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h @@ -14,6 +14,7 @@ #ifndef _MSM_VIDC_COMMON_H_ #define _MSM_VIDC_COMMON_H_ #include "msm_vidc_internal.h" + struct vb2_buf_entry { struct list_head list; struct vb2_buffer *vb; @@ -28,6 +29,8 @@ enum load_calc_quirks { LOAD_CALC_IGNORE_NON_REALTIME_LOAD = 1 << 2, }; +enum hal_buffer get_hal_buffer_type(unsigned int type, + unsigned int plane_num); struct msm_vidc_core *get_vidc_core(int core_id); const struct msm_vidc_format *msm_comm_get_pixel_fmt_index( const struct msm_vidc_format fmt[], int size, int index, int fmt_type); @@ -46,7 +49,7 @@ int msm_comm_set_scratch_buffers(struct msm_vidc_inst *inst); int msm_comm_set_persist_buffers(struct msm_vidc_inst *inst); int msm_comm_set_output_buffers(struct msm_vidc_inst *inst); int msm_comm_queue_output_buffers(struct msm_vidc_inst *inst); -int msm_comm_qbuf(struct msm_vidc_inst *inst, struct vb2_buffer *vb); +int msm_comm_qbuf(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf); void msm_comm_flush_dynamic_buffers(struct msm_vidc_inst *inst); int msm_comm_flush(struct msm_vidc_inst *inst, u32 flags); int msm_comm_release_scratch_buffers(struct msm_vidc_inst *inst, @@ -69,14 +72,12 @@ void msm_comm_session_clean(struct msm_vidc_inst *inst); int msm_comm_kill_session(struct msm_vidc_inst *inst); enum multi_stream msm_comm_get_stream_output_mode(struct msm_vidc_inst *inst); enum hal_buffer msm_comm_get_hal_output_buffer(struct msm_vidc_inst *inst); -struct msm_smem *msm_comm_smem_alloc(struct msm_vidc_inst *inst, - size_t size, u32 align, u32 flags, - enum hal_buffer buffer_type, int map_kernel); -void msm_comm_smem_free(struct msm_vidc_inst *inst, struct msm_smem *mem); +int msm_comm_smem_alloc(struct msm_vidc_inst *inst, size_t size, u32 align, + u32 flags, enum hal_buffer buffer_type, int map_kernel, + struct msm_smem *smem); +void msm_comm_smem_free(struct msm_vidc_inst *inst, struct msm_smem *smem); int msm_comm_smem_cache_operations(struct msm_vidc_inst *inst, struct msm_smem *mem, enum smem_cache_ops cache_ops); -struct msm_smem *msm_comm_smem_user_to_kernel(struct msm_vidc_inst *inst, - int fd, u32 offset, enum hal_buffer buffer_type); enum hal_video_codec get_hal_codec(int fourcc); enum hal_domain get_hal_domain(int session_type); int msm_comm_check_core_init(struct msm_vidc_core *core); @@ -107,4 +108,41 @@ u32 get_frame_size_rgba(int plane, u32 height, u32 width); u32 get_frame_size_nv21(int plane, u32 height, u32 width); u32 get_frame_size_tp10_ubwc(int plane, u32 height, u32 width); void msm_comm_set_use_sys_cache(struct msm_vidc_inst *inst); +struct vb2_buffer *msm_comm_get_vb_using_vidc_buffer( + struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf); +struct msm_vidc_buffer *msm_comm_get_buffer_using_device_planes( + struct msm_vidc_inst *inst, u32 *planes); +struct msm_vidc_buffer *msm_comm_get_vidc_buffer(struct msm_vidc_inst *inst, + struct vb2_buffer *vb2); +void msm_comm_put_vidc_buffer(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf); +void handle_release_buffer_reference(struct msm_vidc_inst *inst, u32 *planes); +int msm_comm_vb2_buffer_done(struct msm_vidc_inst *inst, + struct vb2_buffer *vb); +int msm_comm_flush_vidc_buffer(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf); +int msm_comm_unmap_vidc_buffer(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf); +bool msm_comm_compare_dma_plane(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf, unsigned long *dma_planes, u32 i); +bool msm_comm_compare_dma_planes(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf, unsigned long *dma_planes); +bool msm_comm_compare_vb2_plane(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf, struct vb2_buffer *vb2, u32 i); +bool msm_comm_compare_vb2_planes(struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf, struct vb2_buffer *vb2); +bool msm_comm_compare_device_plane(struct msm_vidc_buffer *mbuf, + u32 *planes, u32 i); +bool msm_comm_compare_device_planes(struct msm_vidc_buffer *mbuf, + u32 *planes); +int msm_comm_qbuf_cache_operations(struct msm_vidc_inst *inst, + struct v4l2_buffer *b); +int msm_comm_dqbuf_cache_operations(struct msm_vidc_inst *inst, + struct v4l2_buffer *b); +void print_vidc_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst, + struct msm_vidc_buffer *mbuf); +void print_vb2_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst, + struct vb2_buffer *vb2); +void print_v4l2_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst, + struct v4l2_buffer *v4l2); #endif diff --git a/drivers/media/platform/msm/vidc/msm_vidc_debug.c b/drivers/media/platform/msm/vidc/msm_vidc_debug.c index 3b1d08d2b18c..58c3b0f921db 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_debug.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_debug.c @@ -265,7 +265,7 @@ static int inst_info_open(struct inode *inode, struct file *file) static int publish_unreleased_reference(struct msm_vidc_inst *inst) { - struct buffer_info *temp = NULL; + struct msm_vidc_buffer *temp = NULL; if (!inst) { dprintk(VIDC_ERR, "%s: invalid param\n", __func__); @@ -277,14 +277,15 @@ static int publish_unreleased_reference(struct msm_vidc_inst *inst) mutex_lock(&inst->registeredbufs.lock); list_for_each_entry(temp, &inst->registeredbufs.list, list) { - if (temp->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && - !temp->inactive && atomic_read(&temp->ref_count)) { + struct vb2_buffer *vb2 = &temp->vvb.vb2_buf; + + if (vb2->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { write_str(&dbg_buf, - "\tpending buffer: %#lx fd[0] = %d ref_count = %d held by: %s\n", - temp->device_addr[0], - temp->fd[0], - atomic_read(&temp->ref_count), - DYNAMIC_BUF_OWNER(temp)); + "\tbuffer: %#x fd[0] = %d size %d refcount = %d\n", + temp->smem[0].device_addr, + vb2->planes[0].m.fd, + vb2->planes[0].length, + temp->smem[0].refcount); } } mutex_unlock(&inst->registeredbufs.lock); @@ -403,18 +404,14 @@ void msm_vidc_debugfs_update(struct msm_vidc_inst *inst, switch (e) { case MSM_VIDC_DEBUGFS_EVENT_ETB: - mutex_lock(&inst->lock); inst->count.etb++; - mutex_unlock(&inst->lock); if (inst->count.ebd && inst->count.ftb > inst->count.fbd) { d->pdata[FRAME_PROCESSING].name[0] = '\0'; tic(inst, FRAME_PROCESSING, a); } break; case MSM_VIDC_DEBUGFS_EVENT_EBD: - mutex_lock(&inst->lock); inst->count.ebd++; - mutex_unlock(&inst->lock); if (inst->count.ebd && inst->count.ebd == inst->count.etb) { toc(inst, FRAME_PROCESSING); dprintk(VIDC_PROF, "EBD: FW needs input buffers\n"); @@ -431,6 +428,7 @@ void msm_vidc_debugfs_update(struct msm_vidc_inst *inst, } break; case MSM_VIDC_DEBUGFS_EVENT_FBD: + inst->count.fbd++; inst->debug.samples++; if (inst->count.ebd && inst->count.fbd == inst->count.ftb) { toc(inst, FRAME_PROCESSING); diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h index ca61708e82df..22772ef021bc 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h @@ -33,7 +33,6 @@ #include #include #include - #include "vidc_hfi_api.h" #define MSM_VIDC_DRV_NAME "msm_vidc_driver" @@ -141,7 +140,7 @@ enum buffer_owner { struct vidc_freq_data { struct list_head list; - ion_phys_addr_t device_addr; + u32 device_addr; unsigned long freq; }; @@ -155,7 +154,7 @@ struct recon_buf { struct internal_buf { struct list_head list; enum hal_buffer buffer_type; - struct msm_smem *handle; + struct msm_smem smem; enum buffer_owner buffer_ownership; }; @@ -322,7 +321,6 @@ struct msm_vidc_inst { enum instance_state state; struct msm_vidc_format fmts[MAX_PORT_NUM]; struct buf_queue bufq[MAX_PORT_NUM]; - struct msm_vidc_list pendingq; struct msm_vidc_list freqs; struct msm_vidc_list scratchbufs; struct msm_vidc_list persistbufs; @@ -331,7 +329,7 @@ struct msm_vidc_inst { struct msm_vidc_list reconbufs; struct msm_vidc_list registeredbufs; struct buffer_requirements buff_req; - void *mem_client; + struct smem_client *mem_client; struct v4l2_ctrl_handler ctrl_handler; struct completion completions[SESSION_MSG_END - SESSION_MSG_START + 1]; struct v4l2_ctrl **cluster; @@ -352,8 +350,7 @@ struct msm_vidc_inst { struct v4l2_ctrl **ctrls; enum msm_vidc_pixel_depth bit_depth; struct kref kref; - u32 buffers_held_in_driver; - atomic_t in_flush; + bool in_flush; u32 pic_struct; u32 colour_space; u32 profile; @@ -389,53 +386,33 @@ int msm_vidc_check_session_supported(struct msm_vidc_inst *inst); int msm_vidc_check_scaling_supported(struct msm_vidc_inst *inst); void msm_vidc_queue_v4l2_event(struct msm_vidc_inst *inst, int event_type); -struct buffer_info { +struct msm_vidc_buffer { struct list_head list; - int type; - int num_planes; - int fd[VIDEO_MAX_PLANES]; - int buff_off[VIDEO_MAX_PLANES]; - int size[VIDEO_MAX_PLANES]; - unsigned long uvaddr[VIDEO_MAX_PLANES]; - ion_phys_addr_t device_addr[VIDEO_MAX_PLANES]; - struct msm_smem *handle[VIDEO_MAX_PLANES]; - enum v4l2_memory memory; - u32 v4l2_index; - bool pending_deletion; - atomic_t ref_count; - bool dequeued; - bool inactive; - bool mapped[VIDEO_MAX_PLANES]; - int same_fd_ref[VIDEO_MAX_PLANES]; - struct timeval timestamp; + struct msm_smem smem[VIDEO_MAX_PLANES]; + struct vb2_v4l2_buffer vvb; + bool deferred; }; -struct buffer_info *device_to_uvaddr(struct msm_vidc_list *buf_list, - ion_phys_addr_t device_addr); -int buf_ref_get(struct msm_vidc_inst *inst, struct buffer_info *binfo); -int buf_ref_put(struct msm_vidc_inst *inst, struct buffer_info *binfo); -int output_buffer_cache_invalidate(struct msm_vidc_inst *inst, - struct buffer_info *binfo); -int qbuf_dynamic_buf(struct msm_vidc_inst *inst, - struct buffer_info *binfo); -int unmap_and_deregister_buf(struct msm_vidc_inst *inst, - struct buffer_info *binfo); - void msm_comm_handle_thermal_event(void); void *msm_smem_new_client(enum smem_type mtype, void *platform_resources, enum session_type stype); -struct msm_smem *msm_smem_alloc(void *clt, size_t size, u32 align, u32 flags, - enum hal_buffer buffer_type, int map_kernel); -void msm_smem_free(void *clt, struct msm_smem *mem); +int msm_smem_alloc(struct smem_client *client, + size_t size, u32 align, u32 flags, enum hal_buffer buffer_type, + int map_kernel, struct msm_smem *smem); +int msm_smem_free(void *clt, struct msm_smem *mem); void msm_smem_delete_client(void *clt); -int msm_smem_cache_operations(void *clt, struct msm_smem *mem, - enum smem_cache_ops); -struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 offset, - enum hal_buffer buffer_type); struct context_bank_info *msm_smem_get_context_bank(void *clt, bool is_secure, enum hal_buffer buffer_type); +int msm_smem_map_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem); +int msm_smem_unmap_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem); +void *msm_smem_get_dma_buf(int fd); +void msm_smem_put_dma_buf(void *dma_buf); +void *msm_smem_get_handle(struct smem_client *client, void *dma_buf); +void msm_smem_put_handle(struct smem_client *client, void *handle); +int msm_smem_cache_operations(struct smem_client *client, + void *handle, unsigned long offset, unsigned long size, + enum smem_cache_ops cache_op); void msm_vidc_fw_unload_handler(struct work_struct *work); -bool msm_smem_compare_buffers(void *clt, int fd, void *priv); /* * XXX: normally should be in msm_vidc.h, but that's meant for public APIs, * whereas this is private diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c index 62dcc59aa5a6..f8d88429cd2e 100644 --- a/drivers/media/platform/msm/vidc/venus_hfi.c +++ b/drivers/media/platform/msm/vidc/venus_hfi.c @@ -560,7 +560,7 @@ static int __smem_alloc(struct venus_hfi_device *dev, struct vidc_mem_addr *mem, u32 size, u32 align, u32 flags, u32 usage) { - struct msm_smem *alloc = NULL; + struct msm_smem *alloc = &mem->mem_data; int rc = 0; if (!dev || !dev->hal_client || !mem || !size) { @@ -569,8 +569,9 @@ static int __smem_alloc(struct venus_hfi_device *dev, } dprintk(VIDC_INFO, "start to alloc size: %d, flags: %d\n", size, flags); - alloc = msm_smem_alloc(dev->hal_client, size, align, flags, usage, 1); - if (!alloc) { + rc = msm_smem_alloc(dev->hal_client, size, align, flags, + usage, 1, alloc); + if (rc) { dprintk(VIDC_ERR, "Alloc failed\n"); rc = -ENOMEM; goto fail_smem_alloc; @@ -578,17 +579,16 @@ static int __smem_alloc(struct venus_hfi_device *dev, dprintk(VIDC_DBG, "__smem_alloc: ptr = %pK, size = %d\n", alloc->kvaddr, size); - rc = msm_smem_cache_operations(dev->hal_client, alloc, - SMEM_CACHE_CLEAN); + rc = msm_smem_cache_operations(dev->hal_client, alloc->handle, 0, + alloc->size, SMEM_CACHE_CLEAN); if (rc) { dprintk(VIDC_WARN, "Failed to clean cache\n"); - dprintk(VIDC_WARN, "This may result in undefined behavior\n"); } mem->mem_size = alloc->size; - mem->mem_data = alloc; mem->align_virtual_addr = alloc->kvaddr; mem->align_device_addr = alloc->device_addr; + return rc; fail_smem_alloc: return rc; @@ -1312,7 +1312,7 @@ static void __interface_queues_release(struct venus_hfi_device *device) unsigned long mem_map_table_base_addr; struct context_bank_info *cb; - if (device->qdss.mem_data) { + if (device->qdss.align_virtual_addr) { qdss = (struct hfi_mem_map_table *) device->qdss.align_virtual_addr; qdss->mem_map_num_entries = num_entries; @@ -1338,32 +1338,27 @@ static void __interface_queues_release(struct venus_hfi_device *device) mem_map[i].size); } - __smem_free(device, device->qdss.mem_data); + __smem_free(device, &device->qdss.mem_data); } - __smem_free(device, device->iface_q_table.mem_data); - __smem_free(device, device->sfr.mem_data); + __smem_free(device, &device->iface_q_table.mem_data); + __smem_free(device, &device->sfr.mem_data); for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) { device->iface_queues[i].q_hdr = NULL; - device->iface_queues[i].q_array.mem_data = NULL; device->iface_queues[i].q_array.align_virtual_addr = NULL; device->iface_queues[i].q_array.align_device_addr = 0; } - device->iface_q_table.mem_data = NULL; device->iface_q_table.align_virtual_addr = NULL; device->iface_q_table.align_device_addr = 0; - device->qdss.mem_data = NULL; device->qdss.align_virtual_addr = NULL; device->qdss.align_device_addr = 0; - device->sfr.mem_data = NULL; device->sfr.align_virtual_addr = NULL; device->sfr.align_device_addr = 0; - device->mem_addr.mem_data = NULL; device->mem_addr.align_virtual_addr = NULL; device->mem_addr.align_device_addr = 0; @@ -1452,7 +1447,6 @@ static int __interface_queues_init(struct venus_hfi_device *dev) struct vidc_mem_addr *mem_addr; int offset = 0; int num_entries = dev->res->qdss_addr_set.count; - u32 value = 0; phys_addr_t fw_bias = 0; size_t q_size; unsigned long mem_map_table_base_addr; @@ -1483,7 +1477,6 @@ static int __interface_queues_init(struct venus_hfi_device *dev) iface_q->q_array.align_virtual_addr = mem_addr->align_virtual_addr + offset; iface_q->q_array.mem_size = VIDC_IFACEQ_QUEUE_SIZE; - iface_q->q_array.mem_data = NULL; offset += iface_q->q_array.mem_size; iface_q->q_hdr = VIDC_IFACEQ_GET_QHDR_START_ADDR( dev->iface_q_table.align_virtual_addr, i); @@ -1535,65 +1528,34 @@ static int __interface_queues_init(struct venus_hfi_device *dev) iface_q = &dev->iface_queues[VIDC_IFACEQ_CMDQ_IDX]; q_hdr = iface_q->q_hdr; - q_hdr->qhdr_start_addr = (u32)iface_q->q_array.align_device_addr; + q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr; q_hdr->qhdr_type |= HFI_Q_ID_HOST_TO_CTRL_CMD_Q; - if ((ion_phys_addr_t)q_hdr->qhdr_start_addr != - iface_q->q_array.align_device_addr) { - dprintk(VIDC_ERR, "Invalid CMDQ device address (%pa)", - &iface_q->q_array.align_device_addr); - } iface_q = &dev->iface_queues[VIDC_IFACEQ_MSGQ_IDX]; q_hdr = iface_q->q_hdr; - q_hdr->qhdr_start_addr = (u32)iface_q->q_array.align_device_addr; + q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr; q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_MSG_Q; - if ((ion_phys_addr_t)q_hdr->qhdr_start_addr != - iface_q->q_array.align_device_addr) { - dprintk(VIDC_ERR, "Invalid MSGQ device address (%pa)", - &iface_q->q_array.align_device_addr); - } iface_q = &dev->iface_queues[VIDC_IFACEQ_DBGQ_IDX]; q_hdr = iface_q->q_hdr; - q_hdr->qhdr_start_addr = (u32)iface_q->q_array.align_device_addr; + q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr; q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q; /* * Set receive request to zero on debug queue as there is no * need of interrupt from video hardware for debug messages */ q_hdr->qhdr_rx_req = 0; - if ((ion_phys_addr_t)q_hdr->qhdr_start_addr != - iface_q->q_array.align_device_addr) { - dprintk(VIDC_ERR, "Invalid DBGQ device address (%pa)", - &iface_q->q_array.align_device_addr); - } - value = (u32)dev->iface_q_table.align_device_addr; - if ((ion_phys_addr_t)value != - dev->iface_q_table.align_device_addr) { - dprintk(VIDC_ERR, - "Invalid iface_q_table device address (%pa)", - &dev->iface_q_table.align_device_addr); - } - - if (dev->qdss.mem_data) { + if (dev->qdss.align_virtual_addr) { qdss = (struct hfi_mem_map_table *)dev->qdss.align_virtual_addr; qdss->mem_map_num_entries = num_entries; mem_map_table_base_addr = dev->qdss.align_device_addr + sizeof(struct hfi_mem_map_table); - qdss->mem_map_table_base_addr = - (u32)mem_map_table_base_addr; - if ((ion_phys_addr_t)qdss->mem_map_table_base_addr != - mem_map_table_base_addr) { - dprintk(VIDC_ERR, - "Invalid mem_map_table_base_addr (%#lx)", - mem_map_table_base_addr); - } + qdss->mem_map_table_base_addr = mem_map_table_base_addr; mem_map = (struct hfi_mem_map *)(qdss + 1); cb = msm_smem_get_context_bank(dev->hal_client, false, HAL_BUFFER_INTERNAL_CMD_QUEUE); - if (!cb) { dprintk(VIDC_ERR, "%s: failed to get context bank\n", __func__); @@ -1604,28 +1566,14 @@ static int __interface_queues_init(struct venus_hfi_device *dev) if (rc) { dprintk(VIDC_ERR, "IOMMU mapping failed, Freeing qdss memdata\n"); - __smem_free(dev, dev->qdss.mem_data); - dev->qdss.mem_data = NULL; + __smem_free(dev, &dev->qdss.mem_data); dev->qdss.align_virtual_addr = NULL; dev->qdss.align_device_addr = 0; } - - value = (u32)dev->qdss.align_device_addr; - if ((ion_phys_addr_t)value != - dev->qdss.align_device_addr) { - dprintk(VIDC_ERR, "Invalid qdss device address (%pa)", - &dev->qdss.align_device_addr); - } } vsfr = (struct hfi_sfr_struct *) dev->sfr.align_virtual_addr; vsfr->bufSize = ALIGNED_SFR_SIZE; - value = (u32)dev->sfr.align_device_addr; - if ((ion_phys_addr_t)value != - dev->sfr.align_device_addr) { - dprintk(VIDC_ERR, "Invalid sfr device address (%pa)", - &dev->sfr.align_device_addr); - } __setup_ucregion_memory_map(dev); return 0; @@ -1911,7 +1859,6 @@ static void __core_clear_interrupt(struct venus_hfi_device *device) __write_register(device, VIDC_CPU_CS_A2HSOFTINTCLR, 1); __write_register(device, VIDC_WRAPPER_INTR_CLEAR, intr_status); - dprintk(VIDC_DBG, "Cleared WRAPPER/A2H interrupt\n"); } static int venus_hfi_core_ping(void *device) @@ -3088,7 +3035,7 @@ static void venus_hfi_core_work_handler(struct work_struct *work) mutex_lock(&device->lock); - dprintk(VIDC_INFO, "Handling interrupt\n"); + dprintk(VIDC_DBG, "Handling interrupt\n"); if (!__core_in_valid_state(device)) { dprintk(VIDC_DBG, "%s - Core not in init state\n", __func__); @@ -3123,7 +3070,8 @@ static void venus_hfi_core_work_handler(struct work_struct *work) for (i = 0; !IS_ERR_OR_NULL(device->response_pkt) && i < num_responses; ++i) { struct msm_vidc_cb_info *r = &device->response_pkt[i]; - + dprintk(VIDC_DBG, "Processing response %d of %d, type %d\n", + (i + 1), num_responses, r->response_type); device->callback(r->response_type, &r->response); } @@ -3131,6 +3079,7 @@ static void venus_hfi_core_work_handler(struct work_struct *work) if (!(intr_status & VIDC_WRAPPER_INTR_STATUS_A2HWD_BMSK)) enable_irq(device->hal_data->irq); + dprintk(VIDC_DBG, "Handling interrupt done\n"); /* * XXX: Don't add any code beyond here. Reacquiring locks after release * it above doesn't guarantee the atomicity that we're aiming for. @@ -3991,7 +3940,6 @@ static inline int __resume(struct venus_hfi_device *device) dprintk(VIDC_ERR, "Invalid params: %pK\n", device); return -EINVAL; } else if (device->power_enabled) { - dprintk(VIDC_DBG, "Power is already enabled\n"); goto exit; } else if (!__core_in_valid_state(device)) { dprintk(VIDC_DBG, "venus_hfi_device in deinit state."); diff --git a/drivers/media/platform/msm/vidc/venus_hfi.h b/drivers/media/platform/msm/vidc/venus_hfi.h index 925918c42b6a..4c4cb06924d2 100644 --- a/drivers/media/platform/msm/vidc/venus_hfi.h +++ b/drivers/media/platform/msm/vidc/venus_hfi.h @@ -126,10 +126,10 @@ enum vidc_hw_reg { }; struct vidc_mem_addr { - ion_phys_addr_t align_device_addr; + u32 align_device_addr; u8 *align_virtual_addr; u32 mem_size; - struct msm_smem *mem_data; + struct msm_smem mem_data; }; struct vidc_iface_q_info { diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h index 695c563de3b4..47ce0ba3f0f1 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h @@ -17,6 +17,8 @@ #include #include #include +#include +#include #include #include "msm_vidc_resources.h" @@ -882,8 +884,8 @@ struct vidc_buffer_addr_info { enum hal_buffer buffer_type; u32 buffer_size; u32 num_buffers; - ion_phys_addr_t align_device_addr; - ion_phys_addr_t extradata_addr; + u32 align_device_addr; + u32 extradata_addr; u32 extradata_size; u32 response_required; }; @@ -910,8 +912,8 @@ struct vidc_uncompressed_frame_config { struct vidc_frame_data { enum hal_buffer buffer_type; - ion_phys_addr_t device_addr; - ion_phys_addr_t extradata_addr; + u32 device_addr; + u32 extradata_addr; int64_t timestamp; u32 flags; u32 offset; @@ -1111,8 +1113,8 @@ struct vidc_hal_ebd { u32 filled_len; enum hal_picture picture_type; struct recon_stats_type recon_stats; - ion_phys_addr_t packet_buffer; - ion_phys_addr_t extra_data_buffer; + u32 packet_buffer; + u32 extra_data_buffer; }; struct vidc_hal_fbd { @@ -1134,18 +1136,18 @@ struct vidc_hal_fbd { u32 input_tag; u32 input_tag1; enum hal_picture picture_type; - ion_phys_addr_t packet_buffer1; - ion_phys_addr_t extra_data_buffer; + u32 packet_buffer1; + u32 extra_data_buffer; u32 flags2; u32 alloc_len2; u32 filled_len2; u32 offset2; - ion_phys_addr_t packet_buffer2; + u32 packet_buffer2; u32 flags3; u32 alloc_len3; u32 filled_len3; u32 offset3; - ion_phys_addr_t packet_buffer3; + u32 packet_buffer3; enum hal_buffer buffer_type; }; @@ -1247,8 +1249,8 @@ struct msm_vidc_cb_event { u32 width; enum msm_vidc_pixel_depth bit_depth; u32 hal_event_type; - ion_phys_addr_t packet_buffer; - ion_phys_addr_t extra_data_buffer; + u32 packet_buffer; + u32 extra_data_buffer; u32 pic_struct; u32 colour_space; u32 profile; diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h index 237fb4a5bb3f..bb5a21cb682d 100644 --- a/include/media/msm_vidc.h +++ b/include/media/msm_vidc.h @@ -61,15 +61,17 @@ struct dma_mapping_info { }; struct msm_smem { - int mem_type; - size_t size; + u32 refcount; + int fd; + void *dma_buf; + void *handle; void *kvaddr; - ion_phys_addr_t device_addr; + u32 device_addr; + unsigned int offset; + unsigned int size; unsigned long flags; - void *smem_priv; enum hal_buffer buffer_type; struct dma_mapping_info mapping_info; - unsigned int offset; }; enum smem_cache_ops { -- GitLab From 9cd18ff1c195c1d4cc2200b893b96ba63f8ac99b Mon Sep 17 00:00:00 2001 From: Karthikeyan Ramasubramanian Date: Tue, 9 May 2017 17:11:26 -0600 Subject: [PATCH 171/786] slimbus: Add support for IOMMU S1 translation Add support for IOMMU stage 1 translation to protect the kernel memory during slimbus transfer. Change-Id: I01db86e767d0b350c175534e69371986e9a024c7 Signed-off-by: Karthikeyan Ramasubramanian Signed-off-by: Sagar Dharia --- .../bindings/slimbus/slim-msm-ctrl.txt | 16 +++ arch/arm64/boot/dts/qcom/sdm845.dtsi | 15 +++ drivers/slimbus/slim-msm-ngd.c | 66 +++++++++-- drivers/slimbus/slim-msm.c | 111 ++++++++++++++++-- drivers/slimbus/slim-msm.h | 9 +- 5 files changed, 201 insertions(+), 16 deletions(-) diff --git a/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt b/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt index 95cc85ae58ed..7711b8b34207 100644 --- a/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt +++ b/Documentation/devicetree/bindings/slimbus/slim-msm-ctrl.txt @@ -65,6 +65,17 @@ Optional property: and follow appropriate steps to ensure communication on the bus can be resumed after subsytem restart. By default slimbus driver register with ADSP subsystem. + - qcom,iommu-s1-bypass: Boolean flag to bypass IOMMU stage 1 translation. + +Optional subnodes: +qcom,iommu_slim_ctrl_cb : Child node representing the Slimbus controller + context bank. + +Subnode Required properties: +- compatible : Must be "qcom,slim-ctrl-cb"; +- iommus : A list of phandle and IOMMU specifier pairs that + describe the IOMMU master interfaces of the device. + Example: slim@fe12f000 { cell-index = <1>; @@ -78,4 +89,9 @@ Example: qcom,rxreg-access; qcom,apps-ch-pipes = <0x60000000>; qcom,ea-pc = <0x30>; + + iommu_slim_ctrl_cb: qcom,iommu_slim_ctrl_cb { + compatible = "qcom,iommu-slim-ctrl-cb"; + iommus = <&apps_smmu 0x1 0x0>; + }; }; diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index 7ea200e5e1a1..77674b8c518d 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -1832,6 +1832,15 @@ interrupt-names = "slimbus_irq", "slimbus_bam_irq"; qcom,apps-ch-pipes = <0x780000>; qcom,ea-pc = <0x270>; + qcom,iommu-s1-bypass; + + iommu_slim_aud_ctrl_cb: qcom,iommu_slim_ctrl_cb { + compatible = "qcom,iommu-slim-ctrl-cb"; + iommus = <&apps_smmu 0x1806 0x0>, + <&apps_smmu 0x180d 0x0>, + <&apps_smmu 0x180e 0x1>, + <&apps_smmu 0x1810 0x1>; + }; }; slim_qca: slim@17240000 { @@ -1843,6 +1852,12 @@ reg-names = "slimbus_physical", "slimbus_bam_physical"; interrupts = <0 291 0>, <0 292 0>; interrupt-names = "slimbus_irq", "slimbus_bam_irq"; + qcom,iommu-s1-bypass; + + iommu_slim_qca_ctrl_cb: qcom,iommu_slim_ctrl_cb { + compatible = "qcom,iommu-slim-ctrl-cb"; + iommus = <&apps_smmu 0x1813 0x0>; + }; /* Slimbus Slave DT for WCN3990 */ btfmslim_codec: wcn3990 { diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c index a72cb174a10f..1b7b591175aa 100644 --- a/drivers/slimbus/slim-msm-ngd.c +++ b/drivers/slimbus/slim-msm-ngd.c @@ -9,11 +9,13 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ +#include #include #include #include #include #include +#include #include #include #include @@ -23,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -1665,6 +1668,43 @@ static ssize_t set_mask(struct device *device, struct device_attribute *attr, static DEVICE_ATTR(debug_mask, 0644, show_mask, set_mask); +static const struct of_device_id ngd_slim_dt_match[] = { + { + .compatible = "qcom,slim-ngd", + }, + { + .compatible = "qcom,iommu-slim-ctrl-cb", + }, + {} +}; + +static int ngd_slim_iommu_probe(struct device *dev) +{ + struct platform_device *pdev; + struct msm_slim_ctrl *ctrl_dev; + + if (unlikely(!dev->parent)) { + dev_err(dev, "%s no parent for this device\n", __func__); + return -EINVAL; + } + + pdev = to_platform_device(dev->parent); + if (!pdev) { + dev_err(dev, "%s Parent platform device not found\n", __func__); + return -EINVAL; + } + + ctrl_dev = platform_get_drvdata(pdev); + if (!ctrl_dev) { + dev_err(dev, "%s NULL controller device\n", __func__); + return -EINVAL; + + } + ctrl_dev->iommu_desc.cb_dev = dev; + SLIM_INFO(ctrl_dev, "NGD IOMMU initialization complete\n"); + return 0; +} + static int ngd_slim_probe(struct platform_device *pdev) { struct msm_slim_ctrl *dev; @@ -1676,6 +1716,10 @@ static int ngd_slim_probe(struct platform_device *pdev) bool slim_mdm = false; const char *ext_modem_id = NULL; + if (of_device_is_compatible(pdev->dev.of_node, + "qcom,iommu-slim-ctrl-cb")) + return ngd_slim_iommu_probe(&pdev->dev); + slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slimbus_physical"); if (!slim_mem) { @@ -1774,6 +1818,17 @@ static int ngd_slim_probe(struct platform_device *pdev) "qcom,slim-mdm", &ext_modem_id); if (!ret) slim_mdm = true; + + dev->iommu_desc.s1_bypass = of_property_read_bool( + pdev->dev.of_node, + "qcom,iommu-s1-bypass"); + ret = of_platform_populate(pdev->dev.of_node, ngd_slim_dt_match, + NULL, &pdev->dev); + if (ret) { + dev_err(dev->dev, "%s: Failed to of_platform_populate %d\n", + __func__, ret); + goto err_ctrl_failed; + } } else { dev->ctrl.nr = pdev->id; } @@ -1920,6 +1975,10 @@ static int ngd_slim_remove(struct platform_device *pdev) struct msm_slim_ctrl *dev = platform_get_drvdata(pdev); ngd_slim_enable(dev, false); + if (!IS_ERR_OR_NULL(dev->iommu_desc.iommu_map)) { + arm_iommu_detach_device(dev->iommu_desc.cb_dev); + arm_iommu_release_mapping(dev->iommu_desc.iommu_map); + } if (dev->sysfs_created) sysfs_remove_file(&dev->dev->kobj, &dev_attr_debug_mask.attr); @@ -2091,13 +2150,6 @@ static const struct dev_pm_ops ngd_slim_dev_pm_ops = { ) }; -static const struct of_device_id ngd_slim_dt_match[] = { - { - .compatible = "qcom,slim-ngd", - }, - {} -}; - static struct platform_driver ngd_slim_driver = { .probe = ngd_slim_probe, .remove = ngd_slim_remove, diff --git a/drivers/slimbus/slim-msm.c b/drivers/slimbus/slim-msm.c index ef10e644fc03..d8c5ea8a47b6 100644 --- a/drivers/slimbus/slim-msm.c +++ b/drivers/slimbus/slim-msm.c @@ -9,17 +9,21 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ -#include -#include +#include #include +#include +#include +#include +#include +#include #include #include -#include -#include #include "slim-msm.h" /* Pipe Number Offset Mask */ #define P_OFF_MASK 0x3FC +#define MSM_SLIM_VA_START (0x40000000) +#define MSM_SLIM_VA_SIZE (0xC0000000) int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len) { @@ -164,17 +168,61 @@ void msm_slim_free_endpoint(struct msm_slim_endp *ep) ep->sps = NULL; } +static int msm_slim_iommu_attach(struct msm_slim_ctrl *ctrl_dev) +{ + struct dma_iommu_mapping *iommu_map; + dma_addr_t va_start = MSM_SLIM_VA_START; + size_t va_size = MSM_SLIM_VA_SIZE; + int bypass = 1; + struct device *dev; + + if (unlikely(!ctrl_dev)) + return -EINVAL; + + if (!ctrl_dev->iommu_desc.cb_dev) + return 0; + + dev = ctrl_dev->iommu_desc.cb_dev; + iommu_map = arm_iommu_create_mapping(&platform_bus_type, + va_start, va_size); + if (IS_ERR(iommu_map)) { + dev_err(dev, "%s iommu_create_mapping failure\n", __func__); + return PTR_ERR(iommu_map); + } + + if (ctrl_dev->iommu_desc.s1_bypass) { + if (iommu_domain_set_attr(iommu_map->domain, + DOMAIN_ATTR_S1_BYPASS, &bypass)) { + dev_err(dev, "%s Can't bypass s1 translation\n", + __func__); + arm_iommu_release_mapping(iommu_map); + return -EIO; + } + } + + if (arm_iommu_attach_device(dev, iommu_map)) { + dev_err(dev, "%s can't arm_iommu_attach_device\n", __func__); + arm_iommu_release_mapping(iommu_map); + return -EIO; + } + ctrl_dev->iommu_desc.iommu_map = iommu_map; + SLIM_INFO(ctrl_dev, "NGD IOMMU Attach complete\n"); + return 0; +} + int msm_slim_sps_mem_alloc( struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len) { dma_addr_t phys; + struct device *dma_dev = dev->iommu_desc.cb_dev ? + dev->iommu_desc.cb_dev : dev->dev; mem->size = len; mem->min_size = 0; - mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL); + mem->base = dma_alloc_coherent(dma_dev, mem->size, &phys, GFP_KERNEL); if (!mem->base) { - dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len); + dev_err(dma_dev, "dma_alloc_coherent(%d) failed\n", len); return -ENOMEM; } @@ -387,6 +435,10 @@ int msm_alloc_port(struct slim_controller *ctrl, u8 pn) if (pn >= dev->port_nums) return -ENODEV; + ret = msm_slim_iommu_attach(dev); + if (ret) + return ret; + endpoint = &dev->pipes[pn]; ret = msm_slim_init_endpoint(dev, endpoint); dev_dbg(dev->dev, "sps register bam error code:%x\n", ret); @@ -435,9 +487,37 @@ enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr, return SLIM_P_INPROGRESS; } -static void msm_slim_port_cb(struct sps_event_notify *ev) +static int msm_slim_iommu_map(struct msm_slim_ctrl *dev, phys_addr_t iobuf, + u32 len) { + int ret; + + if (!dev->iommu_desc.cb_dev) + return 0; + + ret = iommu_map(dev->iommu_desc.iommu_map->domain, + rounddown(iobuf, PAGE_SIZE), + rounddown(iobuf, PAGE_SIZE), + roundup((len + (iobuf - rounddown(iobuf, PAGE_SIZE))), + PAGE_SIZE), IOMMU_READ | IOMMU_WRITE); + return ret; +} + +static void msm_slim_iommu_unmap(struct msm_slim_ctrl *dev, phys_addr_t iobuf, + u32 len) +{ + if (!dev->iommu_desc.cb_dev) + return; + + iommu_unmap(dev->iommu_desc.iommu_map->domain, + rounddown(iobuf, PAGE_SIZE), + roundup((len + (iobuf - rounddown(iobuf, PAGE_SIZE))), + PAGE_SIZE)); +} +static void msm_slim_port_cb(struct sps_event_notify *ev) +{ + struct msm_slim_ctrl *dev = ev->user; struct completion *comp = ev->data.transfer.user; struct sps_iovec *iovec = &ev->data.transfer.iovec; @@ -450,6 +530,8 @@ static void msm_slim_port_cb(struct sps_event_notify *ev) pr_err("%s: ERR event %d\n", __func__, ev->event_id); } + if (dev) + msm_slim_iommu_unmap(dev, iovec->addr, iovec->size); if (comp) complete(comp); } @@ -467,14 +549,19 @@ int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, phys_addr_t iobuf, if (!dev->pipes[pn].connected) return -ENOTCONN; + ret = msm_slim_iommu_map(dev, iobuf, len); + if (ret) + return ret; + sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR); sreg.mode = SPS_TRIGGER_WAIT; sreg.xfer_done = NULL; sreg.callback = msm_slim_port_cb; - sreg.user = NULL; + sreg.user = dev; ret = sps_register_event(dev->pipes[pn].sps, &sreg); if (ret) { dev_dbg(dev->dev, "sps register event error:%x\n", ret); + msm_slim_iommu_unmap(dev, iobuf, len); return ret; } ret = sps_transfer_one(dev->pipes[pn].sps, iobuf, len, comp, @@ -490,6 +577,8 @@ int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, phys_addr_t iobuf, PGD_THIS_EE(PGD_PORT_INT_EN_EEn, dev->ver)); /* Make sure that port registers are updated before returning */ mb(); + } else { + msm_slim_iommu_unmap(dev, iobuf, len); } return ret; @@ -1102,6 +1191,12 @@ int msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem, } init_msgq: + ret = msm_slim_iommu_attach(dev); + if (ret) { + sps_deregister_bam_device(bam_handle); + return ret; + } + ret = msm_slim_init_rx_msgq(dev, pipe_reg); if (ret) dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret); diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h index ee0f6255fa07..5859c5f19786 100644 --- a/drivers/slimbus/slim-msm.h +++ b/drivers/slimbus/slim-msm.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -263,10 +263,17 @@ struct msm_slim_bulk_wr { bool in_progress; }; +struct msm_slim_iommu { + struct device *cb_dev; + struct dma_iommu_mapping *iommu_map; + bool s1_bypass; +}; + struct msm_slim_ctrl { struct slim_controller ctrl; struct slim_framer framer; struct device *dev; + struct msm_slim_iommu iommu_desc; void __iomem *base; struct resource *slew_mem; struct resource *bam_mem; -- GitLab From 243b94b602d93cab7a406d4755d6c1a141ae4ea4 Mon Sep 17 00:00:00 2001 From: Gaurav Singhal Date: Tue, 20 Jun 2017 14:16:59 +0530 Subject: [PATCH 172/786] ARM: dts: msm: Fix NFC PMIC gpio configuration For SDM845 PMIC GPIO configurations are handled in different way from what was done on earlier targets. Incorrect GPIO configuration is fixed and now PMIC GPIO is configured based on pinctrl. Change-Id: I9e8f5c8879cb2b71181cd8f22d1248bb047bc1f5 Signed-off-by: Gaurav Singhal --- arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi | 4 +++- arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi | 4 +++- arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi | 17 +++++++++-------- arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi | 4 +++- 4 files changed, 18 insertions(+), 11 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi index c8f84fda8428..0430ea4cddc7 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi @@ -334,7 +334,9 @@ interrupts = <63 0>; interrupt-names = "nfc_irq"; pinctrl-names = "nfc_active", "nfc_suspend"; - pinctrl-0 = <&nfc_int_active &nfc_enable_active>; + pinctrl-0 = <&nfc_int_active + &nfc_enable_active + &nfc_clk_default>; pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>; clocks = <&clock_rpmh RPMH_LN_BB_CLK3>; clock-names = "ref_clk"; diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi index c75eb481aefd..c3217e77e0c7 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi @@ -308,7 +308,9 @@ interrupts = <63 0>; interrupt-names = "nfc_irq"; pinctrl-names = "nfc_active", "nfc_suspend"; - pinctrl-0 = <&nfc_int_active &nfc_enable_active>; + pinctrl-0 = <&nfc_int_active + &nfc_enable_active + &nfc_clk_default>; pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>; clocks = <&clock_rpmh RPMH_LN_BB_CLK3>; clock-names = "ref_clk"; diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi index 9946a25b88b9..dc58f9c36da3 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi @@ -2800,14 +2800,6 @@ }; &pm8998_gpios { - gpio@d400 { - qcom,mode = <0>; - qcom,vin-sel = <1>; - qcom,src-sel = <0>; - qcom,master-en = <1>; - status = "okay"; - }; - key_home { key_home_default: key_home_default { pins = "gpio5"; @@ -2865,6 +2857,15 @@ output-low; }; }; + + nfc_clk { + nfc_clk_default: nfc_clk_default { + pins = "gpio21"; + function = "normal"; + input-enable; + power-source = <1>; + }; + }; }; &pmi8998_gpios { diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi index c2fbed52c159..cc2b3847b198 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi @@ -73,7 +73,9 @@ interrupts = <63 0>; interrupt-names = "nfc_irq"; pinctrl-names = "nfc_active", "nfc_suspend"; - pinctrl-0 = <&nfc_int_active &nfc_enable_active>; + pinctrl-0 = <&nfc_int_active + &nfc_enable_active + &nfc_clk_default>; pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>; clocks = <&clock_rpmh RPMH_LN_BB_CLK3>; clock-names = "ref_clk"; -- GitLab From 80ada7ff0423136aa02957b2b12813ba50d31261 Mon Sep 17 00:00:00 2001 From: Clarence Ip Date: Thu, 4 May 2017 09:55:21 -0700 Subject: [PATCH 173/786] drm/msm/dsi-staging: enable interrupt support Enable basic interrupt support for DSI driver. A generic ISR is provided within dsi_ctrl that clears status interrupts; each individual DSI status interrupt may be enabled/disabled from the rest of the DSI driver via a provided API function. CRs-Fixed: 2027542 Change-Id: I2645b9e705fa03e18a584e0ee04c05f6c1ce787e Signed-off-by: Clarence Ip Signed-off-by: Sandeep Panda --- drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c | 373 +++++++++++++++--- drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h | 51 ++- drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h | 188 +++++++-- .../gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c | 2 + drivers/gpu/drm/msm/dsi-staging/dsi_defs.h | 10 +- drivers/gpu/drm/msm/dsi-staging/dsi_display.c | 46 ++- drivers/gpu/drm/msm/dsi-staging/dsi_display.h | 11 + drivers/gpu/drm/msm/dsi-staging/dsi_drm.c | 13 + drivers/gpu/drm/msm/dsi-staging/dsi_drm.h | 10 + 9 files changed, 578 insertions(+), 126 deletions(-) diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c index da7a7c0a4f58..2709d5c62976 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c @@ -9,7 +9,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * */ #define pr_fmt(fmt) "dsi-ctrl:[%s] " fmt, __func__ @@ -876,7 +875,7 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl, const struct mipi_dsi_msg *msg, u32 flags) { - int rc = 0; + int rc = 0, ret = 0; struct mipi_dsi_packet packet; struct dsi_ctrl_cmd_dma_fifo_info cmd; struct dsi_ctrl_cmd_dma_info cmd_mem; @@ -940,42 +939,59 @@ static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl, hw_flags |= (flags & DSI_CTRL_CMD_DEFER_TRIGGER) ? DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER : 0; - if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) - reinit_completion(&dsi_ctrl->int_info.cmd_dma_done); - - if (flags & DSI_CTRL_CMD_FETCH_MEMORY) { - dsi_ctrl->hw.ops.kickoff_command(&dsi_ctrl->hw, - &cmd_mem, - hw_flags); - } else if (flags & DSI_CTRL_CMD_FIFO_STORE) { - dsi_ctrl->hw.ops.kickoff_fifo_command(&dsi_ctrl->hw, - &cmd, - hw_flags); + if (flags & DSI_CTRL_CMD_DEFER_TRIGGER) { + if (flags & DSI_CTRL_CMD_FETCH_MEMORY) { + dsi_ctrl->hw.ops.kickoff_command(&dsi_ctrl->hw, + &cmd_mem, + hw_flags); + } else if (flags & DSI_CTRL_CMD_FIFO_STORE) { + dsi_ctrl->hw.ops.kickoff_fifo_command(&dsi_ctrl->hw, + &cmd, + hw_flags); + } } if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) { - u32 retry = 10; - u32 status = 0; - u64 error = 0; - u32 mask = (DSI_CMD_MODE_DMA_DONE); + dsi_ctrl_enable_status_interrupt(dsi_ctrl, + DSI_SINT_CMD_MODE_DMA_DONE, NULL); + reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done); - while ((status == 0) && (retry > 0)) { - udelay(1000); - status = dsi_ctrl->hw.ops.get_interrupt_status( - &dsi_ctrl->hw); - error = dsi_ctrl->hw.ops.get_error_status( - &dsi_ctrl->hw); - status &= mask; - retry--; - dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw, + if (flags & DSI_CTRL_CMD_FETCH_MEMORY) { + dsi_ctrl->hw.ops.kickoff_command(&dsi_ctrl->hw, + &cmd_mem, + hw_flags); + } else if (flags & DSI_CTRL_CMD_FIFO_STORE) { + dsi_ctrl->hw.ops.kickoff_fifo_command(&dsi_ctrl->hw, + &cmd, + hw_flags); + } + + ret = wait_for_completion_timeout( + &dsi_ctrl->irq_info.cmd_dma_done, + msecs_to_jiffies(DSI_CTRL_TX_TO_MS)); + + if (ret == 0) { + u32 status = 0; + u32 mask = DSI_CMD_MODE_DMA_DONE; + + if (status & mask) { + status |= (DSI_CMD_MODE_DMA_DONE | + DSI_BTA_DONE); + dsi_ctrl->hw.ops.clear_interrupt_status( + &dsi_ctrl->hw, status); - dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw, - error); + dsi_ctrl_disable_status_interrupt(dsi_ctrl, + DSI_SINT_CMD_MODE_DMA_DONE); + complete_all(&dsi_ctrl->irq_info.cmd_dma_done); + pr_warn("dma_tx done but irq not triggered\n"); + } else { + rc = -ETIMEDOUT; + dsi_ctrl_disable_status_interrupt(dsi_ctrl, + DSI_SINT_CMD_MODE_DMA_DONE); + pr_err("[DSI_%d]Command transfer failed\n", + dsi_ctrl->cell_index); + } } - pr_debug("INT STATUS = %x, retry = %d\n", status, retry); - if (retry == 0) - pr_err("[DSI_%d]Command transfer failed\n", - dsi_ctrl->cell_index); dsi_ctrl->hw.ops.reset_cmd_fifo(&dsi_ctrl->hw); } @@ -1144,15 +1160,6 @@ static int dsi_ctrl_drv_state_init(struct dsi_ctrl *dsi_ctrl) return rc; } -int dsi_ctrl_intr_deinit(struct dsi_ctrl *dsi_ctrl) -{ - struct dsi_ctrl_interrupts *ints = &dsi_ctrl->int_info; - - devm_free_irq(&dsi_ctrl->pdev->dev, ints->irq, dsi_ctrl); - - return 0; -} - static int dsi_ctrl_buffer_deinit(struct dsi_ctrl *dsi_ctrl) { if (dsi_ctrl->tx_cmd_buf) { @@ -1251,6 +1258,10 @@ static int dsi_ctrl_dev_probe(struct platform_device *pdev) dsi_ctrl->cell_index = index; dsi_ctrl->version = version; + dsi_ctrl->irq_info.irq_num = -1; + dsi_ctrl->irq_info.irq_stat_mask = 0x0; + + spin_lock_init(&dsi_ctrl->irq_info.irq_lock); dsi_ctrl->name = of_get_property(pdev->dev.of_node, "label", NULL); if (!dsi_ctrl->name) @@ -1670,6 +1681,236 @@ int dsi_ctrl_phy_reset_config(struct dsi_ctrl *dsi_ctrl, bool enable) return 0; } +static void dsi_ctrl_handle_error_status(struct dsi_ctrl *dsi_ctrl, + unsigned long int error) +{ + pr_err("%s: %lu\n", __func__, error); + + /* DTLN PHY error */ + if (error & 0x3000e00) + if (dsi_ctrl->hw.ops.clear_error_status) + dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw, + 0x3000e00); + + /* DSI FIFO OVERFLOW error */ + if (error & 0xf0000) { + if (dsi_ctrl->hw.ops.clear_error_status) + dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw, + 0xf0000); + } + + /* DSI FIFO UNDERFLOW error */ + if (error & 0xf00000) { + if (dsi_ctrl->hw.ops.clear_error_status) + dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw, + 0xf00000); + } + + /* DSI PLL UNLOCK error */ + if (error & BIT(8)) + if (dsi_ctrl->hw.ops.clear_error_status) + dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw, + BIT(8)); +} + +/** + * dsi_ctrl_isr - interrupt service routine for DSI CTRL component + * @irq: Incoming IRQ number + * @ptr: Pointer to user data structure (struct dsi_ctrl) + * Returns: IRQ_HANDLED if no further action required + */ +static irqreturn_t dsi_ctrl_isr(int irq, void *ptr) +{ + struct dsi_ctrl *dsi_ctrl; + struct dsi_event_cb_info cb_info; + unsigned long flags; + uint32_t cell_index, status, i; + uint64_t errors; + + if (!ptr) + return IRQ_NONE; + dsi_ctrl = ptr; + + /* clear status interrupts */ + if (dsi_ctrl->hw.ops.get_interrupt_status) + status = dsi_ctrl->hw.ops.get_interrupt_status(&dsi_ctrl->hw); + else + status = 0x0; + + if (dsi_ctrl->hw.ops.clear_interrupt_status) + dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw, status); + + spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags); + cell_index = dsi_ctrl->cell_index; + spin_unlock_irqrestore(&dsi_ctrl->irq_info.irq_lock, flags); + + /* clear error interrupts */ + if (dsi_ctrl->hw.ops.get_error_status) + errors = dsi_ctrl->hw.ops.get_error_status(&dsi_ctrl->hw); + else + errors = 0x0; + + if (errors) { + /* handle DSI error recovery */ + dsi_ctrl_handle_error_status(dsi_ctrl, errors); + if (dsi_ctrl->hw.ops.clear_error_status) + dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw, + errors); + } + + if (status & DSI_CMD_MODE_DMA_DONE) { + dsi_ctrl_disable_status_interrupt(dsi_ctrl, + DSI_SINT_CMD_MODE_DMA_DONE); + complete_all(&dsi_ctrl->irq_info.cmd_dma_done); + } + + if (status & DSI_CMD_FRAME_DONE) { + dsi_ctrl_disable_status_interrupt(dsi_ctrl, + DSI_SINT_CMD_FRAME_DONE); + complete_all(&dsi_ctrl->irq_info.cmd_frame_done); + } + + if (status & DSI_VIDEO_MODE_FRAME_DONE) { + dsi_ctrl_disable_status_interrupt(dsi_ctrl, + DSI_SINT_VIDEO_MODE_FRAME_DONE); + complete_all(&dsi_ctrl->irq_info.vid_frame_done); + } + + if (status & DSI_BTA_DONE) { + dsi_ctrl_disable_status_interrupt(dsi_ctrl, + DSI_SINT_BTA_DONE); + complete_all(&dsi_ctrl->irq_info.bta_done); + } + + for (i = 0; status && i < DSI_STATUS_INTERRUPT_COUNT; ++i) { + if (status & 0x1) { + spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags); + cb_info = dsi_ctrl->irq_info.irq_stat_cb[i]; + spin_unlock_irqrestore( + &dsi_ctrl->irq_info.irq_lock, flags); + + if (cb_info.event_cb) + (void)cb_info.event_cb(cb_info.event_usr_ptr, + cb_info.event_idx, + cell_index, irq, 0, 0, 0); + } + status >>= 1; + } + + return IRQ_HANDLED; +} + +/** + * _dsi_ctrl_setup_isr - register ISR handler + * @dsi_ctrl: Pointer to associated dsi_ctrl structure + * Returns: Zero on success + */ +static int dsi_ctrl_setup_isr(struct dsi_ctrl *dsi_ctrl) +{ + int irq_num, rc; + + if (!dsi_ctrl) + return -EINVAL; + if (dsi_ctrl->irq_info.irq_num != -1) + return 0; + + init_completion(&dsi_ctrl->irq_info.cmd_dma_done); + init_completion(&dsi_ctrl->irq_info.vid_frame_done); + init_completion(&dsi_ctrl->irq_info.cmd_frame_done); + init_completion(&dsi_ctrl->irq_info.bta_done); + + irq_num = platform_get_irq(dsi_ctrl->pdev, 0); + if (irq_num < 0) { + pr_err("[DSI_%d] Failed to get IRQ number, %d\n", + dsi_ctrl->cell_index, irq_num); + rc = irq_num; + } else { + rc = devm_request_threaded_irq(&dsi_ctrl->pdev->dev, irq_num, + dsi_ctrl_isr, NULL, 0, "dsi_ctrl", dsi_ctrl); + if (rc) { + pr_err("[DSI_%d] Failed to request IRQ, %d\n", + dsi_ctrl->cell_index, rc); + } else { + dsi_ctrl->irq_info.irq_num = irq_num; + disable_irq_nosync(irq_num); + + pr_info("[DSI_%d] IRQ %d registered\n", + dsi_ctrl->cell_index, irq_num); + } + } + return rc; +} + +/** + * _dsi_ctrl_destroy_isr - unregister ISR handler + * @dsi_ctrl: Pointer to associated dsi_ctrl structure + */ +static void _dsi_ctrl_destroy_isr(struct dsi_ctrl *dsi_ctrl) +{ + if (!dsi_ctrl || !dsi_ctrl->pdev || dsi_ctrl->irq_info.irq_num < 0) + return; + + if (dsi_ctrl->irq_info.irq_num != -1) { + devm_free_irq(&dsi_ctrl->pdev->dev, + dsi_ctrl->irq_info.irq_num, dsi_ctrl); + dsi_ctrl->irq_info.irq_num = -1; + } +} + +void dsi_ctrl_enable_status_interrupt(struct dsi_ctrl *dsi_ctrl, + uint32_t intr_idx, struct dsi_event_cb_info *event_info) +{ + unsigned long flags; + + if (!dsi_ctrl || dsi_ctrl->irq_info.irq_num == -1 || + intr_idx >= DSI_STATUS_INTERRUPT_COUNT) + return; + + spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags); + + if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx] == 0) { + /* enable irq on first request */ + if (dsi_ctrl->irq_info.irq_stat_mask == 0) + enable_irq(dsi_ctrl->irq_info.irq_num); + + /* update hardware mask */ + dsi_ctrl->irq_info.irq_stat_mask |= BIT(intr_idx); + dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, + dsi_ctrl->irq_info.irq_stat_mask); + } + ++(dsi_ctrl->irq_info.irq_stat_refcount[intr_idx]); + + if (event_info) + dsi_ctrl->irq_info.irq_stat_cb[intr_idx] = *event_info; + + spin_unlock_irqrestore(&dsi_ctrl->irq_info.irq_lock, flags); +} + +void dsi_ctrl_disable_status_interrupt(struct dsi_ctrl *dsi_ctrl, + uint32_t intr_idx) +{ + unsigned long flags; + + if (!dsi_ctrl || dsi_ctrl->irq_info.irq_num == -1 || + intr_idx >= DSI_STATUS_INTERRUPT_COUNT) + return; + + spin_lock_irqsave(&dsi_ctrl->irq_info.irq_lock, flags); + + if (dsi_ctrl->irq_info.irq_stat_refcount[intr_idx]) + if (--(dsi_ctrl->irq_info.irq_stat_refcount[intr_idx]) == 0) { + dsi_ctrl->irq_info.irq_stat_mask &= ~BIT(intr_idx); + dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, + dsi_ctrl->irq_info.irq_stat_mask); + + /* don't need irq if no lines are enabled */ + if (dsi_ctrl->irq_info.irq_stat_mask == 0) + disable_irq_nosync(dsi_ctrl->irq_info.irq_num); + } + + spin_unlock_irqrestore(&dsi_ctrl->irq_info.irq_lock, flags); +} + /** * dsi_ctrl_host_init() - Initialize DSI host hardware. * @dsi_ctrl: DSI controller handle. @@ -1722,7 +1963,7 @@ int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl) &dsi_ctrl->host_config.video_timing); } - + dsi_ctrl_setup_isr(dsi_ctrl); dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0); dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0x0); @@ -1770,6 +2011,8 @@ int dsi_ctrl_host_deinit(struct dsi_ctrl *dsi_ctrl) mutex_lock(&dsi_ctrl->ctrl_lock); + _dsi_ctrl_destroy_isr(dsi_ctrl); + rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x0); if (rc) { pr_err("[DSI_%d] Controller state check failed, rc=%d\n", @@ -1926,7 +2169,7 @@ int dsi_ctrl_cmd_transfer(struct dsi_ctrl *dsi_ctrl, */ int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags) { - int rc = 0; + int rc = 0, ret = 0; u32 status = 0; u32 mask = (DSI_CMD_MODE_DMA_DONE); @@ -1937,27 +2180,43 @@ int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags) mutex_lock(&dsi_ctrl->ctrl_lock); - reinit_completion(&dsi_ctrl->int_info.cmd_dma_done); - - dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw); + if (!(flags & DSI_CTRL_CMD_BROADCAST_MASTER)) + dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw); if ((flags & DSI_CTRL_CMD_BROADCAST) && - (flags & DSI_CTRL_CMD_BROADCAST_MASTER)) { - u32 retry = 10; + (flags & DSI_CTRL_CMD_BROADCAST_MASTER)) { + dsi_ctrl_enable_status_interrupt(dsi_ctrl, + DSI_SINT_CMD_MODE_DMA_DONE, NULL); + reinit_completion(&dsi_ctrl->irq_info.cmd_dma_done); - while ((status == 0) && (retry > 0)) { - udelay(1000); + /* trigger command */ + dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw); + + ret = wait_for_completion_timeout( + &dsi_ctrl->irq_info.cmd_dma_done, + msecs_to_jiffies(DSI_CTRL_TX_TO_MS)); + + if (ret == 0) { status = dsi_ctrl->hw.ops.get_interrupt_status( &dsi_ctrl->hw); - status &= mask; - retry--; - dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw, + if (status & mask) { + status |= (DSI_CMD_MODE_DMA_DONE | + DSI_BTA_DONE); + dsi_ctrl->hw.ops.clear_interrupt_status( + &dsi_ctrl->hw, status); + dsi_ctrl_disable_status_interrupt(dsi_ctrl, + DSI_SINT_CMD_MODE_DMA_DONE); + complete_all(&dsi_ctrl->irq_info.cmd_dma_done); + pr_warn("dma_tx done but irq not triggered\n"); + } else { + rc = -ETIMEDOUT; + dsi_ctrl_disable_status_interrupt(dsi_ctrl, + DSI_SINT_CMD_MODE_DMA_DONE); + pr_err("[DSI_%d]Command transfer failed\n", + dsi_ctrl->cell_index); + } } - pr_debug("INT STATUS = %x, retry = %d\n", status, retry); - if (retry == 0) - pr_err("[DSI_%d]Command transfer failed\n", - dsi_ctrl->cell_index); } mutex_unlock(&dsi_ctrl->ctrl_lock); diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h index 7f36fde078f3..ec535ce112d5 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h @@ -138,33 +138,26 @@ struct dsi_ctrl_state_info { /** * struct dsi_ctrl_interrupts - define interrupt information - * @irq: IRQ id for the DSI controller. - * @intr_lock: Spinlock to protect access to interrupt registers. - * @interrupt_status: Status interrupts which need to be serviced. - * @error_status: Error interurpts which need to be serviced. - * @interrupts_enabled: Status interrupts which are enabled. - * @errors_enabled: Error interrupts which are enabled. + * @irq_lock: Spinlock for ISR handler. + * @irq_num: Linux interrupt number associated with device. + * @irq_stat_mask: Hardware mask of currently enabled interrupts. + * @irq_stat_refcount: Number of times each interrupt has been requested. + * @irq_stat_cb: Status IRQ callback definitions. * @cmd_dma_done: Completion signal for DSI_CMD_MODE_DMA_DONE interrupt * @vid_frame_done: Completion signal for DSI_VIDEO_MODE_FRAME_DONE int. * @cmd_frame_done: Completion signal for DSI_CMD_FRAME_DONE interrupt. - * @interrupt_done_work: Work item for servicing status interrupts. - * @error_status_work: Work item for servicing error interrupts. */ struct dsi_ctrl_interrupts { - u32 irq; - spinlock_t intr_lock; /* protects access to interrupt registers */ - u32 interrupt_status; - u64 error_status; - - u32 interrupts_enabled; - u64 errors_enabled; + spinlock_t irq_lock; + int irq_num; + uint32_t irq_stat_mask; + int irq_stat_refcount[DSI_STATUS_INTERRUPT_COUNT]; + struct dsi_event_cb_info irq_stat_cb[DSI_STATUS_INTERRUPT_COUNT]; struct completion cmd_dma_done; struct completion vid_frame_done; struct completion cmd_frame_done; - - struct work_struct interrupt_done_work; - struct work_struct error_status_work; + struct completion bta_done; }; /** @@ -180,7 +173,7 @@ struct dsi_ctrl_interrupts { * @hw: DSI controller hardware object. * @current_state: Current driver and hardware state. * @clk_cb: Callback for DSI clock control. - * @int_info: Interrupt information. + * @irq_info: Interrupt information. * @clk_info: Clock information. * @clk_freq: DSi Link clock frequency information. * @pwr_info: Power information. @@ -212,7 +205,8 @@ struct dsi_ctrl { struct dsi_ctrl_state_info current_state; struct clk_ctrl_cb clk_cb; - struct dsi_ctrl_interrupts int_info; + struct dsi_ctrl_interrupts irq_info; + /* Clock and power states */ struct dsi_ctrl_clk_info clk_info; struct link_clk_freq clk_freq; @@ -559,6 +553,23 @@ int dsi_ctrl_set_clamp_state(struct dsi_ctrl *dsi_Ctrl, int dsi_ctrl_set_clock_source(struct dsi_ctrl *dsi_ctrl, struct dsi_clk_link_set *source_clks); +/** + * dsi_ctrl_enable_status_interrupt() - enable status interrupts + * @dsi_ctrl: DSI controller handle. + * @intr_idx: Index interrupt to disable. + * @event_info: Pointer to event callback definition + */ +void dsi_ctrl_enable_status_interrupt(struct dsi_ctrl *dsi_ctrl, + uint32_t intr_idx, struct dsi_event_cb_info *event_info); + +/** + * dsi_ctrl_disable_status_interrupt() - disable status interrupts + * @dsi_ctrl: DSI controller handle. + * @intr_idx: Index interrupt to disable. + */ +void dsi_ctrl_disable_status_interrupt( + struct dsi_ctrl *dsi_ctrl, uint32_t intr_idx); + /** * dsi_ctrl_drv_register() - register platform driver for dsi controller */ diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h index bb728076e57a..74be279af298 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h @@ -9,7 +9,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * */ #ifndef _DSI_CTRL_HW_H_ @@ -83,6 +82,36 @@ enum dsi_test_pattern { DSI_TEST_PATTERN_MAX }; +/** + * enum dsi_status_int_index - index of interrupts generated by DSI controller + * @DSI_SINT_CMD_MODE_DMA_DONE: Command mode DMA packets are sent out. + * @DSI_SINT_CMD_STREAM0_FRAME_DONE: A frame of cmd mode stream0 is sent out. + * @DSI_SINT_CMD_STREAM1_FRAME_DONE: A frame of cmd mode stream1 is sent out. + * @DSI_SINT_CMD_STREAM2_FRAME_DONE: A frame of cmd mode stream2 is sent out. + * @DSI_SINT_VIDEO_MODE_FRAME_DONE: A frame of video mode stream is sent out. + * @DSI_SINT_BTA_DONE: A BTA is completed. + * @DSI_SINT_CMD_FRAME_DONE: A frame of selected cmd mode stream is + * sent out by MDP. + * @DSI_SINT_DYN_REFRESH_DONE: The dynamic refresh operation completed. + * @DSI_SINT_DESKEW_DONE: The deskew calibration operation done. + * @DSI_SINT_DYN_BLANK_DMA_DONE: The dynamic blankin DMA operation has + * completed. + */ +enum dsi_status_int_index { + DSI_SINT_CMD_MODE_DMA_DONE = 0, + DSI_SINT_CMD_STREAM0_FRAME_DONE = 1, + DSI_SINT_CMD_STREAM1_FRAME_DONE = 2, + DSI_SINT_CMD_STREAM2_FRAME_DONE = 3, + DSI_SINT_VIDEO_MODE_FRAME_DONE = 4, + DSI_SINT_BTA_DONE = 5, + DSI_SINT_CMD_FRAME_DONE = 6, + DSI_SINT_DYN_REFRESH_DONE = 7, + DSI_SINT_DESKEW_DONE = 8, + DSI_SINT_DYN_BLANK_DMA_DONE = 9, + + DSI_STATUS_INTERRUPT_COUNT +}; + /** * enum dsi_status_int_type - status interrupts generated by DSI controller * @DSI_CMD_MODE_DMA_DONE: Command mode DMA packets are sent out. @@ -99,16 +128,89 @@ enum dsi_test_pattern { * completed. */ enum dsi_status_int_type { - DSI_CMD_MODE_DMA_DONE = BIT(0), - DSI_CMD_STREAM0_FRAME_DONE = BIT(1), - DSI_CMD_STREAM1_FRAME_DONE = BIT(2), - DSI_CMD_STREAM2_FRAME_DONE = BIT(3), - DSI_VIDEO_MODE_FRAME_DONE = BIT(4), - DSI_BTA_DONE = BIT(5), - DSI_CMD_FRAME_DONE = BIT(6), - DSI_DYN_REFRESH_DONE = BIT(7), - DSI_DESKEW_DONE = BIT(8), - DSI_DYN_BLANK_DMA_DONE = BIT(9) + DSI_CMD_MODE_DMA_DONE = BIT(DSI_SINT_CMD_MODE_DMA_DONE), + DSI_CMD_STREAM0_FRAME_DONE = BIT(DSI_SINT_CMD_STREAM0_FRAME_DONE), + DSI_CMD_STREAM1_FRAME_DONE = BIT(DSI_SINT_CMD_STREAM1_FRAME_DONE), + DSI_CMD_STREAM2_FRAME_DONE = BIT(DSI_SINT_CMD_STREAM2_FRAME_DONE), + DSI_VIDEO_MODE_FRAME_DONE = BIT(DSI_SINT_VIDEO_MODE_FRAME_DONE), + DSI_BTA_DONE = BIT(DSI_SINT_BTA_DONE), + DSI_CMD_FRAME_DONE = BIT(DSI_SINT_CMD_FRAME_DONE), + DSI_DYN_REFRESH_DONE = BIT(DSI_SINT_DYN_REFRESH_DONE), + DSI_DESKEW_DONE = BIT(DSI_SINT_DESKEW_DONE), + DSI_DYN_BLANK_DMA_DONE = BIT(DSI_SINT_DYN_BLANK_DMA_DONE) +}; + +/** + * enum dsi_error_int_index - index of error interrupts from DSI controller + * @DSI_EINT_RDBK_SINGLE_ECC_ERR: Single bit ECC error in read packet. + * @DSI_EINT_RDBK_MULTI_ECC_ERR: Multi bit ECC error in read packet. + * @DSI_EINT_RDBK_CRC_ERR: CRC error in read packet. + * @DSI_EINT_RDBK_INCOMPLETE_PKT: Incomplete read packet. + * @DSI_EINT_PERIPH_ERROR_PKT: Error packet returned from peripheral, + * @DSI_EINT_LP_RX_TIMEOUT: Low power reverse transmission timeout. + * @DSI_EINT_HS_TX_TIMEOUT: High speed fwd transmission timeout. + * @DSI_EINT_BTA_TIMEOUT: BTA timeout. + * @DSI_EINT_PLL_UNLOCK: PLL has unlocked. + * @DSI_EINT_DLN0_ESC_ENTRY_ERR: Incorrect LP Rx escape entry. + * @DSI_EINT_DLN0_ESC_SYNC_ERR: LP Rx data is not byte aligned. + * @DSI_EINT_DLN0_LP_CONTROL_ERR: Incorrect LP Rx state sequence. + * @DSI_EINT_PENDING_HS_TX_TIMEOUT: Pending High-speed transfer timeout. + * @DSI_EINT_INTERLEAVE_OP_CONTENTION: Interleave operation contention. + * @DSI_EINT_CMD_DMA_FIFO_UNDERFLOW: Command mode DMA FIFO underflow. + * @DSI_EINT_CMD_MDP_FIFO_UNDERFLOW: Command MDP FIFO underflow (failed to + * receive one complete line from MDP). + * @DSI_EINT_DLN0_HS_FIFO_OVERFLOW: High speed FIFO data lane 0 overflows. + * @DSI_EINT_DLN1_HS_FIFO_OVERFLOW: High speed FIFO data lane 1 overflows. + * @DSI_EINT_DLN2_HS_FIFO_OVERFLOW: High speed FIFO data lane 2 overflows. + * @DSI_EINT_DLN3_HS_FIFO_OVERFLOW: High speed FIFO data lane 3 overflows. + * @DSI_EINT_DLN0_HS_FIFO_UNDERFLOW: High speed FIFO data lane 0 underflows. + * @DSI_EINT_DLN1_HS_FIFO_UNDERFLOW: High speed FIFO data lane 1 underflows. + * @DSI_EINT_DLN2_HS_FIFO_UNDERFLOW: High speed FIFO data lane 2 underflows. + * @DSI_EINT_DLN3_HS_FIFO_UNDERFLOW: High speed FIFO data lane 3 undeflows. + * @DSI_EINT_DLN0_LP0_CONTENTION: PHY level contention while lane 0 low. + * @DSI_EINT_DLN1_LP0_CONTENTION: PHY level contention while lane 1 low. + * @DSI_EINT_DLN2_LP0_CONTENTION: PHY level contention while lane 2 low. + * @DSI_EINT_DLN3_LP0_CONTENTION: PHY level contention while lane 3 low. + * @DSI_EINT_DLN0_LP1_CONTENTION: PHY level contention while lane 0 high. + * @DSI_EINT_DLN1_LP1_CONTENTION: PHY level contention while lane 1 high. + * @DSI_EINT_DLN2_LP1_CONTENTION: PHY level contention while lane 2 high. + * @DSI_EINT_DLN3_LP1_CONTENTION: PHY level contention while lane 3 high. + */ +enum dsi_error_int_index { + DSI_EINT_RDBK_SINGLE_ECC_ERR = 0, + DSI_EINT_RDBK_MULTI_ECC_ERR = 1, + DSI_EINT_RDBK_CRC_ERR = 2, + DSI_EINT_RDBK_INCOMPLETE_PKT = 3, + DSI_EINT_PERIPH_ERROR_PKT = 4, + DSI_EINT_LP_RX_TIMEOUT = 5, + DSI_EINT_HS_TX_TIMEOUT = 6, + DSI_EINT_BTA_TIMEOUT = 7, + DSI_EINT_PLL_UNLOCK = 8, + DSI_EINT_DLN0_ESC_ENTRY_ERR = 9, + DSI_EINT_DLN0_ESC_SYNC_ERR = 10, + DSI_EINT_DLN0_LP_CONTROL_ERR = 11, + DSI_EINT_PENDING_HS_TX_TIMEOUT = 12, + DSI_EINT_INTERLEAVE_OP_CONTENTION = 13, + DSI_EINT_CMD_DMA_FIFO_UNDERFLOW = 14, + DSI_EINT_CMD_MDP_FIFO_UNDERFLOW = 15, + DSI_EINT_DLN0_HS_FIFO_OVERFLOW = 16, + DSI_EINT_DLN1_HS_FIFO_OVERFLOW = 17, + DSI_EINT_DLN2_HS_FIFO_OVERFLOW = 18, + DSI_EINT_DLN3_HS_FIFO_OVERFLOW = 19, + DSI_EINT_DLN0_HS_FIFO_UNDERFLOW = 20, + DSI_EINT_DLN1_HS_FIFO_UNDERFLOW = 21, + DSI_EINT_DLN2_HS_FIFO_UNDERFLOW = 22, + DSI_EINT_DLN3_HS_FIFO_UNDERFLOW = 23, + DSI_EINT_DLN0_LP0_CONTENTION = 24, + DSI_EINT_DLN1_LP0_CONTENTION = 25, + DSI_EINT_DLN2_LP0_CONTENTION = 26, + DSI_EINT_DLN3_LP0_CONTENTION = 27, + DSI_EINT_DLN0_LP1_CONTENTION = 28, + DSI_EINT_DLN1_LP1_CONTENTION = 29, + DSI_EINT_DLN2_LP1_CONTENTION = 30, + DSI_EINT_DLN3_LP1_CONTENTION = 31, + + DSI_ERROR_INTERRUPT_COUNT }; /** @@ -148,38 +250,38 @@ enum dsi_status_int_type { * @DSI_DLN3_LP1_CONTENTION: PHY level contention while lane 3 is high. */ enum dsi_error_int_type { - DSI_RDBK_SINGLE_ECC_ERR = BIT(0), - DSI_RDBK_MULTI_ECC_ERR = BIT(1), - DSI_RDBK_CRC_ERR = BIT(2), - DSI_RDBK_INCOMPLETE_PKT = BIT(3), - DSI_PERIPH_ERROR_PKT = BIT(4), - DSI_LP_RX_TIMEOUT = BIT(5), - DSI_HS_TX_TIMEOUT = BIT(6), - DSI_BTA_TIMEOUT = BIT(7), - DSI_PLL_UNLOCK = BIT(8), - DSI_DLN0_ESC_ENTRY_ERR = BIT(9), - DSI_DLN0_ESC_SYNC_ERR = BIT(10), - DSI_DLN0_LP_CONTROL_ERR = BIT(11), - DSI_PENDING_HS_TX_TIMEOUT = BIT(12), - DSI_INTERLEAVE_OP_CONTENTION = BIT(13), - DSI_CMD_DMA_FIFO_UNDERFLOW = BIT(14), - DSI_CMD_MDP_FIFO_UNDERFLOW = BIT(15), - DSI_DLN0_HS_FIFO_OVERFLOW = BIT(16), - DSI_DLN1_HS_FIFO_OVERFLOW = BIT(17), - DSI_DLN2_HS_FIFO_OVERFLOW = BIT(18), - DSI_DLN3_HS_FIFO_OVERFLOW = BIT(19), - DSI_DLN0_HS_FIFO_UNDERFLOW = BIT(20), - DSI_DLN1_HS_FIFO_UNDERFLOW = BIT(21), - DSI_DLN2_HS_FIFO_UNDERFLOW = BIT(22), - DSI_DLN3_HS_FIFO_UNDERFLOW = BIT(23), - DSI_DLN0_LP0_CONTENTION = BIT(24), - DSI_DLN1_LP0_CONTENTION = BIT(25), - DSI_DLN2_LP0_CONTENTION = BIT(26), - DSI_DLN3_LP0_CONTENTION = BIT(27), - DSI_DLN0_LP1_CONTENTION = BIT(28), - DSI_DLN1_LP1_CONTENTION = BIT(29), - DSI_DLN2_LP1_CONTENTION = BIT(30), - DSI_DLN3_LP1_CONTENTION = BIT(31), + DSI_RDBK_SINGLE_ECC_ERR = BIT(DSI_EINT_RDBK_SINGLE_ECC_ERR), + DSI_RDBK_MULTI_ECC_ERR = BIT(DSI_EINT_RDBK_MULTI_ECC_ERR), + DSI_RDBK_CRC_ERR = BIT(DSI_EINT_RDBK_CRC_ERR), + DSI_RDBK_INCOMPLETE_PKT = BIT(DSI_EINT_RDBK_INCOMPLETE_PKT), + DSI_PERIPH_ERROR_PKT = BIT(DSI_EINT_PERIPH_ERROR_PKT), + DSI_LP_RX_TIMEOUT = BIT(DSI_EINT_LP_RX_TIMEOUT), + DSI_HS_TX_TIMEOUT = BIT(DSI_EINT_HS_TX_TIMEOUT), + DSI_BTA_TIMEOUT = BIT(DSI_EINT_BTA_TIMEOUT), + DSI_PLL_UNLOCK = BIT(DSI_EINT_PLL_UNLOCK), + DSI_DLN0_ESC_ENTRY_ERR = BIT(DSI_EINT_DLN0_ESC_ENTRY_ERR), + DSI_DLN0_ESC_SYNC_ERR = BIT(DSI_EINT_DLN0_ESC_SYNC_ERR), + DSI_DLN0_LP_CONTROL_ERR = BIT(DSI_EINT_DLN0_LP_CONTROL_ERR), + DSI_PENDING_HS_TX_TIMEOUT = BIT(DSI_EINT_PENDING_HS_TX_TIMEOUT), + DSI_INTERLEAVE_OP_CONTENTION = BIT(DSI_EINT_INTERLEAVE_OP_CONTENTION), + DSI_CMD_DMA_FIFO_UNDERFLOW = BIT(DSI_EINT_CMD_DMA_FIFO_UNDERFLOW), + DSI_CMD_MDP_FIFO_UNDERFLOW = BIT(DSI_EINT_CMD_MDP_FIFO_UNDERFLOW), + DSI_DLN0_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN0_HS_FIFO_OVERFLOW), + DSI_DLN1_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN1_HS_FIFO_OVERFLOW), + DSI_DLN2_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN2_HS_FIFO_OVERFLOW), + DSI_DLN3_HS_FIFO_OVERFLOW = BIT(DSI_EINT_DLN3_HS_FIFO_OVERFLOW), + DSI_DLN0_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN0_HS_FIFO_UNDERFLOW), + DSI_DLN1_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN1_HS_FIFO_UNDERFLOW), + DSI_DLN2_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN2_HS_FIFO_UNDERFLOW), + DSI_DLN3_HS_FIFO_UNDERFLOW = BIT(DSI_EINT_DLN3_HS_FIFO_UNDERFLOW), + DSI_DLN0_LP0_CONTENTION = BIT(DSI_EINT_DLN0_LP0_CONTENTION), + DSI_DLN1_LP0_CONTENTION = BIT(DSI_EINT_DLN1_LP0_CONTENTION), + DSI_DLN2_LP0_CONTENTION = BIT(DSI_EINT_DLN2_LP0_CONTENTION), + DSI_DLN3_LP0_CONTENTION = BIT(DSI_EINT_DLN3_LP0_CONTENTION), + DSI_DLN0_LP1_CONTENTION = BIT(DSI_EINT_DLN0_LP1_CONTENTION), + DSI_DLN1_LP1_CONTENTION = BIT(DSI_EINT_DLN1_LP1_CONTENTION), + DSI_DLN2_LP1_CONTENTION = BIT(DSI_EINT_DLN2_LP1_CONTENTION), + DSI_DLN3_LP1_CONTENTION = BIT(DSI_EINT_DLN3_LP1_CONTENTION), }; /** diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c index a024c437a9ad..0af6f25def9c 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_cmn.c @@ -706,6 +706,8 @@ void dsi_ctrl_hw_cmn_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints) { u32 reg = 0; + reg = DSI_R32(ctrl, DSI_INT_CTRL); + if (ints & DSI_CMD_MODE_DMA_DONE) reg |= BIT(0); if (ints & DSI_CMD_FRAME_DONE) diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h index cf3631558806..3709c67ca21a 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h @@ -9,7 +9,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * */ #ifndef _DSI_DEFS_H_ @@ -446,5 +445,14 @@ static inline bool dsi_rect_is_equal(struct dsi_rect *r1, r1->h == r2->h; } +struct dsi_event_cb_info { + uint32_t event_idx; + void *event_usr_ptr; + + int (*event_cb)(void *event_usr_ptr, + uint32_t event_idx, uint32_t instance_idx, + uint32_t data0, uint32_t data1, + uint32_t data2, uint32_t data3); +}; #endif /* _DSI_DEFS_H_ */ diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c index 3dd49504d6fe..1bbe1fac78a5 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c @@ -19,6 +19,7 @@ #include #include "msm_drv.h" +#include "sde_connector.h" #include "dsi_display.h" #include "dsi_panel.h" #include "dsi_ctrl.h" @@ -499,7 +500,45 @@ static int dsi_display_phy_idle_off(struct dsi_display *display) return 0; } +void dsi_display_enable_event(struct dsi_display *display, + uint32_t event_idx, struct dsi_event_cb_info *event_info, + bool enable) +{ + uint32_t irq_status_idx = DSI_STATUS_INTERRUPT_COUNT; + int i; + + if (!display) { + pr_err("invalid display\n"); + return; + } + + if (event_info) + event_info->event_idx = event_idx; + switch (event_idx) { + case SDE_CONN_EVENT_VID_DONE: + irq_status_idx = DSI_SINT_VIDEO_MODE_FRAME_DONE; + break; + case SDE_CONN_EVENT_CMD_DONE: + irq_status_idx = DSI_SINT_CMD_FRAME_DONE; + break; + default: + /* nothing to do */ + pr_debug("[%s] unhandled event %d\n", display->name, event_idx); + return; + } + + if (enable) { + for (i = 0; i < display->ctrl_count; i++) + dsi_ctrl_enable_status_interrupt( + display->ctrl[i].ctrl, irq_status_idx, + event_info); + } else { + for (i = 0; i < display->ctrl_count; i++) + dsi_ctrl_disable_status_interrupt( + display->ctrl[i].ctrl, irq_status_idx); + } +} static int dsi_display_ctrl_power_on(struct dsi_display *display) { @@ -1215,8 +1254,7 @@ static int dsi_display_broadcast_cmd(struct dsi_display *display, goto error; } - rc = dsi_ctrl_cmd_tx_trigger(ctrl->ctrl, - DSI_CTRL_CMD_BROADCAST); + rc = dsi_ctrl_cmd_tx_trigger(ctrl->ctrl, flags); if (rc) { pr_err("[%s] cmd trigger failed, rc=%d\n", display->name, rc); @@ -1224,9 +1262,7 @@ static int dsi_display_broadcast_cmd(struct dsi_display *display, } } - rc = dsi_ctrl_cmd_tx_trigger(m_ctrl->ctrl, - (DSI_CTRL_CMD_BROADCAST_MASTER | - DSI_CTRL_CMD_BROADCAST)); + rc = dsi_ctrl_cmd_tx_trigger(m_ctrl->ctrl, m_flags); if (rc) { pr_err("[%s] cmd trigger failed for master, rc=%d\n", display->name, rc); diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h index 9aa31139fe91..38af37b59b6a 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h @@ -444,6 +444,17 @@ int dsi_display_set_tpg_state(struct dsi_display *display, bool enable); int dsi_display_clock_gate(struct dsi_display *display, bool enable); int dsi_dispaly_static_frame(struct dsi_display *display, bool enable); +/** + * dsi_display_enable_event() - enable interrupt based connector event + * @display: Handle to display. + * @event_idx: Event index. + * @event_info: Event callback definition. + * @enable: Whether to enable/disable the event interrupt. + */ +void dsi_display_enable_event(struct dsi_display *display, + uint32_t event_idx, struct dsi_event_cb_info *event_info, + bool enable); + int dsi_display_set_backlight(void *display, u32 bl_lvl); /** diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c index 4e09cfb2f10e..b499bd6a98c5 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c @@ -546,6 +546,19 @@ int dsi_conn_pre_kickoff(struct drm_connector *connector, return dsi_display_pre_kickoff(display, params); } +void dsi_conn_enable_event(struct drm_connector *connector, + uint32_t event_idx, bool enable, void *display) +{ + struct dsi_event_cb_info event_info; + + memset(&event_info, 0, sizeof(event_info)); + + event_info.event_cb = sde_connector_trigger_event; + event_info.event_usr_ptr = connector; + + dsi_display_enable_event(display, event_idx, &event_info, enable); +} + struct dsi_bridge *dsi_drm_bridge_init(struct dsi_display *display, struct drm_device *dev, struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h index 68520a8e7e60..45feec9e263d 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h @@ -85,6 +85,16 @@ enum drm_mode_status dsi_conn_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode, void *display); +/** + * dsi_conn_enable_event - callback to notify DSI driver of event registeration + * @connector: Pointer to drm connector structure + * @event_idx: Connector event index + * @enable: Whether or not the event is enabled + * @display: Pointer to private display handle + */ +void dsi_conn_enable_event(struct drm_connector *connector, + uint32_t event_idx, bool enable, void *display); + struct dsi_bridge *dsi_drm_bridge_init(struct dsi_display *display, struct drm_device *dev, struct drm_encoder *encoder); -- GitLab From ab5e2b68a775bff59dd4d18016122741a0960df7 Mon Sep 17 00:00:00 2001 From: Lloyd Atkinson Date: Tue, 9 May 2017 13:06:51 -0400 Subject: [PATCH 174/786] drm/msm/sde: update connector roi print to match crtc print Print connector region of interest values in decimal notation which matches the CRTC printing and is easier to follow. Change-Id: I32dd1dec051abed4b7ae4967a549f906099ed88d Signed-off-by: Lloyd Atkinson --- drivers/gpu/drm/msm/sde/sde_connector.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c index 655125799e8d..4ede27121627 100644 --- a/drivers/gpu/drm/msm/sde/sde_connector.c +++ b/drivers/gpu/drm/msm/sde/sde_connector.c @@ -546,7 +546,7 @@ static int _sde_connector_set_roi_v1( return rc; c_state->rois.roi[i] = roi_v1.roi[i]; - SDE_DEBUG_CONN(c_conn, "roi%d: roi 0x%x 0x%x 0x%x 0x%x\n", i, + SDE_DEBUG_CONN(c_conn, "roi%d: roi (%d,%d) (%d,%d)\n", i, c_state->rois.roi[i].x1, c_state->rois.roi[i].y1, c_state->rois.roi[i].x2, -- GitLab From 9d9ff91973ea2f8f858a1e1c4c71ef03c015d1c8 Mon Sep 17 00:00:00 2001 From: Veera Sundaram Sankaran Date: Tue, 20 Jun 2017 10:41:21 -0700 Subject: [PATCH 175/786] drm/msm/sde: fix excl_rect validation in crtc u16 flag is passed incorrectly while populating the plane's src rect to compare against excl_rect. Fix it to pass the correct flag. Change-Id: Icc8686e4fe4644b1a7d7b479af211c29bec927bd Signed-off-by: Veera Sundaram Sankaran --- drivers/gpu/drm/msm/sde/sde_crtc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index e70829023d5e..c2111b26f657 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -2576,7 +2576,7 @@ static int _sde_crtc_excl_rect_overlap_check(struct plane_state pstates[], for (i = curr_cnt; i < cnt; i++) { pstate = pstates[i].drm_pstate; POPULATE_RECT(&dst_rect, pstate->crtc_x, pstate->crtc_y, - pstate->crtc_w, pstate->crtc_h, true); + pstate->crtc_w, pstate->crtc_h, false); sde_kms_rect_intersect(&dst_rect, excl_rect, &intersect); if (intersect.w == excl_rect->w && intersect.h == excl_rect->h -- GitLab From fbee3e0e6108cadc65566ac456aa1d9d6f7032a8 Mon Sep 17 00:00:00 2001 From: Siba Prasad Date: Wed, 10 May 2017 12:22:07 +0530 Subject: [PATCH 176/786] scsi: ufs: ufs-qcom-ice: Initialize local variable in ice_cfg_start() Previously local variable ice_set in ufs_qcom_ice_cfg_start() function was initialized only if qcom_host->ice.vops->config_start was set. If that was not set, then the uninitialized value of ice_set might have lead to incorrect configuration of ICE. So initialize the variable regardless the value of config_start. Change-Id: I16692902d9f5b8df00ae2ffc7c363413d13e932a Signed-off-by: Siba Prasad --- drivers/scsi/ufs/ufs-qcom-ice.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c index 814d1dcfe90e..0c862639fa3e 100644 --- a/drivers/scsi/ufs/ufs-qcom-ice.c +++ b/drivers/scsi/ufs/ufs-qcom-ice.c @@ -394,8 +394,8 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host, } + memset(&ice_set, 0, sizeof(ice_set)); if (qcom_host->ice.vops->config_start) { - memset(&ice_set, 0, sizeof(ice_set)); spin_lock_irqsave( &qcom_host->ice_work_lock, flags); -- GitLab From b5b8fc30aacf3faeb9aaf1cfe9fc9432ad28497b Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Wed, 21 Jun 2017 08:59:11 -0700 Subject: [PATCH 177/786] msm: pcie: Fix lots of static checker errors This file has so many sparse warnings that sparse fails with an error message about too many warnings. Fix them all so that we can find future sparse errors in this file. Change-Id: I719153ff680627e2e6b3a326085239acd7fcf0a0 Signed-off-by: Stephen Boyd --- drivers/pci/host/pci-msm.c | 90 +++++++++++++++++++------------------- 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c index 771a1f9a4715..361d7dd060e0 100644 --- a/drivers/pci/host/pci-msm.c +++ b/drivers/pci/host/pci-msm.c @@ -564,7 +564,7 @@ static struct msm_pcie_device_info msm_pcie_dev_tbl[MAX_RC_NUM * MAX_DEVICE_NUM]; /* PCIe driver state */ -struct pcie_drv_sta { +static struct pcie_drv_sta { u32 rc_num; struct mutex drv_lock; } pcie_drv; @@ -690,14 +690,14 @@ static struct msm_pcie_clk_info_t /* resources */ static const struct msm_pcie_res_info_t msm_pcie_res_info[MSM_PCIE_MAX_RES] = { - {"parf", 0, 0}, - {"phy", 0, 0}, - {"dm_core", 0, 0}, - {"elbi", 0, 0}, - {"conf", 0, 0}, - {"io", 0, 0}, - {"bars", 0, 0}, - {"tcsr", 0, 0} + {"parf", NULL, NULL}, + {"phy", NULL, NULL}, + {"dm_core", NULL, NULL}, + {"elbi", NULL, NULL}, + {"conf", NULL, NULL}, + {"io", NULL, NULL}, + {"bars", NULL, NULL}, + {"tcsr", NULL, NULL} }; /* irqs */ @@ -763,14 +763,14 @@ static inline void msm_pcie_fixup_irqs(struct msm_pcie_dev_t *dev) } #endif -static inline void msm_pcie_write_reg(void *base, u32 offset, u32 value) +static inline void msm_pcie_write_reg(void __iomem *base, u32 offset, u32 value) { writel_relaxed(value, base + offset); /* ensure that changes propagated to the hardware */ wmb(); } -static inline void msm_pcie_write_reg_field(void *base, u32 offset, +static inline void msm_pcie_write_reg_field(void __iomem *base, u32 offset, const u32 mask, u32 val) { u32 shift = find_first_bit((void *)&mask, 32); @@ -976,7 +976,7 @@ static void msm_pcie_cfg_recover(struct msm_pcie_dev_t *dev, bool rc) int i, j; u32 val = 0; u32 *shadow; - void *cfg = dev->conf; + void __iomem *cfg = dev->conf; for (i = 0; i < MAX_DEVICE_NUM; i++) { if (!rc && !dev->pcidev_table[i].bdf) @@ -1764,7 +1764,7 @@ static ssize_t msm_pcie_cmd_debug(struct file *file, return count; } -const struct file_operations msm_pcie_cmd_debug_ops = { +static const struct file_operations msm_pcie_cmd_debug_ops = { .write = msm_pcie_cmd_debug, }; @@ -1807,7 +1807,7 @@ static ssize_t msm_pcie_set_rc_sel(struct file *file, return count; } -const struct file_operations msm_pcie_rc_sel_ops = { +static const struct file_operations msm_pcie_rc_sel_ops = { .write = msm_pcie_set_rc_sel, }; @@ -1865,7 +1865,7 @@ static ssize_t msm_pcie_set_base_sel(struct file *file, return count; } -const struct file_operations msm_pcie_base_sel_ops = { +static const struct file_operations msm_pcie_base_sel_ops = { .write = msm_pcie_set_base_sel, }; @@ -1911,7 +1911,7 @@ static ssize_t msm_pcie_set_linkdown_panic(struct file *file, return count; } -const struct file_operations msm_pcie_linkdown_panic_ops = { +static const struct file_operations msm_pcie_linkdown_panic_ops = { .write = msm_pcie_set_linkdown_panic, }; @@ -1938,7 +1938,7 @@ static ssize_t msm_pcie_set_wr_offset(struct file *file, return count; } -const struct file_operations msm_pcie_wr_offset_ops = { +static const struct file_operations msm_pcie_wr_offset_ops = { .write = msm_pcie_set_wr_offset, }; @@ -1965,7 +1965,7 @@ static ssize_t msm_pcie_set_wr_mask(struct file *file, return count; } -const struct file_operations msm_pcie_wr_mask_ops = { +static const struct file_operations msm_pcie_wr_mask_ops = { .write = msm_pcie_set_wr_mask, }; static ssize_t msm_pcie_set_wr_value(struct file *file, @@ -1991,7 +1991,7 @@ static ssize_t msm_pcie_set_wr_value(struct file *file, return count; } -const struct file_operations msm_pcie_wr_value_ops = { +static const struct file_operations msm_pcie_wr_value_ops = { .write = msm_pcie_set_wr_value, }; @@ -2035,7 +2035,7 @@ static ssize_t msm_pcie_set_boot_option(struct file *file, return count; } -const struct file_operations msm_pcie_boot_option_ops = { +static const struct file_operations msm_pcie_boot_option_ops = { .write = msm_pcie_set_boot_option, }; @@ -2091,7 +2091,7 @@ static ssize_t msm_pcie_set_aer_enable(struct file *file, return count; } -const struct file_operations msm_pcie_aer_enable_ops = { +static const struct file_operations msm_pcie_aer_enable_ops = { .write = msm_pcie_set_aer_enable, }; @@ -2118,7 +2118,7 @@ static ssize_t msm_pcie_set_corr_counter_limit(struct file *file, return count; } -const struct file_operations msm_pcie_corr_counter_limit_ops = { +static const struct file_operations msm_pcie_corr_counter_limit_ops = { .write = msm_pcie_set_corr_counter_limit, }; @@ -2127,14 +2127,14 @@ static void msm_pcie_debugfs_init(void) rc_sel_max = (0x1 << MAX_RC_NUM) - 1; wr_mask = 0xffffffff; - dent_msm_pcie = debugfs_create_dir("pci-msm", 0); + dent_msm_pcie = debugfs_create_dir("pci-msm", NULL); if (IS_ERR(dent_msm_pcie)) { pr_err("PCIe: fail to create the folder for debug_fs.\n"); return; } dfile_rc_sel = debugfs_create_file("rc_sel", 0664, - dent_msm_pcie, 0, + dent_msm_pcie, NULL, &msm_pcie_rc_sel_ops); if (!dfile_rc_sel || IS_ERR(dfile_rc_sel)) { pr_err("PCIe: fail to create the file for debug_fs rc_sel.\n"); @@ -2142,7 +2142,7 @@ static void msm_pcie_debugfs_init(void) } dfile_case = debugfs_create_file("case", 0664, - dent_msm_pcie, 0, + dent_msm_pcie, NULL, &msm_pcie_cmd_debug_ops); if (!dfile_case || IS_ERR(dfile_case)) { pr_err("PCIe: fail to create the file for debug_fs case.\n"); @@ -2150,7 +2150,7 @@ static void msm_pcie_debugfs_init(void) } dfile_base_sel = debugfs_create_file("base_sel", 0664, - dent_msm_pcie, 0, + dent_msm_pcie, NULL, &msm_pcie_base_sel_ops); if (!dfile_base_sel || IS_ERR(dfile_base_sel)) { pr_err("PCIe: fail to create the file for debug_fs base_sel.\n"); @@ -2158,7 +2158,7 @@ static void msm_pcie_debugfs_init(void) } dfile_linkdown_panic = debugfs_create_file("linkdown_panic", 0644, - dent_msm_pcie, 0, + dent_msm_pcie, NULL, &msm_pcie_linkdown_panic_ops); if (!dfile_linkdown_panic || IS_ERR(dfile_linkdown_panic)) { pr_err("PCIe: fail to create the file for debug_fs linkdown_panic.\n"); @@ -2166,7 +2166,7 @@ static void msm_pcie_debugfs_init(void) } dfile_wr_offset = debugfs_create_file("wr_offset", 0664, - dent_msm_pcie, 0, + dent_msm_pcie, NULL, &msm_pcie_wr_offset_ops); if (!dfile_wr_offset || IS_ERR(dfile_wr_offset)) { pr_err("PCIe: fail to create the file for debug_fs wr_offset.\n"); @@ -2174,7 +2174,7 @@ static void msm_pcie_debugfs_init(void) } dfile_wr_mask = debugfs_create_file("wr_mask", 0664, - dent_msm_pcie, 0, + dent_msm_pcie, NULL, &msm_pcie_wr_mask_ops); if (!dfile_wr_mask || IS_ERR(dfile_wr_mask)) { pr_err("PCIe: fail to create the file for debug_fs wr_mask.\n"); @@ -2182,7 +2182,7 @@ static void msm_pcie_debugfs_init(void) } dfile_wr_value = debugfs_create_file("wr_value", 0664, - dent_msm_pcie, 0, + dent_msm_pcie, NULL, &msm_pcie_wr_value_ops); if (!dfile_wr_value || IS_ERR(dfile_wr_value)) { pr_err("PCIe: fail to create the file for debug_fs wr_value.\n"); @@ -2190,7 +2190,7 @@ static void msm_pcie_debugfs_init(void) } dfile_boot_option = debugfs_create_file("boot_option", 0664, - dent_msm_pcie, 0, + dent_msm_pcie, NULL, &msm_pcie_boot_option_ops); if (!dfile_boot_option || IS_ERR(dfile_boot_option)) { pr_err("PCIe: fail to create the file for debug_fs boot_option.\n"); @@ -2198,7 +2198,7 @@ static void msm_pcie_debugfs_init(void) } dfile_aer_enable = debugfs_create_file("aer_enable", 0664, - dent_msm_pcie, 0, + dent_msm_pcie, NULL, &msm_pcie_aer_enable_ops); if (!dfile_aer_enable || IS_ERR(dfile_aer_enable)) { pr_err("PCIe: fail to create the file for debug_fs aer_enable.\n"); @@ -2206,7 +2206,7 @@ static void msm_pcie_debugfs_init(void) } dfile_corr_counter_limit = debugfs_create_file("corr_counter_limit", - 0664, dent_msm_pcie, 0, + 0664, dent_msm_pcie, NULL, &msm_pcie_corr_counter_limit_ops); if (!dfile_corr_counter_limit || IS_ERR(dfile_corr_counter_limit)) { pr_err("PCIe: fail to create the file for debug_fs corr_counter_limit.\n"); @@ -2609,7 +2609,7 @@ static void msm_pcie_gpio_deinit(struct msm_pcie_dev_t *dev) gpio_free(dev->gpio[i].num); } -int msm_pcie_vreg_init(struct msm_pcie_dev_t *dev) +static int msm_pcie_vreg_init(struct msm_pcie_dev_t *dev) { int i, rc = 0; struct regulator *vreg; @@ -3229,7 +3229,7 @@ static void msm_pcie_config_link_state(struct msm_pcie_dev_t *dev) } } -void msm_pcie_config_msi_controller(struct msm_pcie_dev_t *dev) +static void msm_pcie_config_msi_controller(struct msm_pcie_dev_t *dev) { int i; @@ -3638,7 +3638,7 @@ static void msm_pcie_release_resources(struct msm_pcie_dev_t *dev) dev->dev_io_res = NULL; } -int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options) +static int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options) { int ret = 0; uint32_t val; @@ -3895,7 +3895,7 @@ int msm_pcie_enable(struct msm_pcie_dev_t *dev, u32 options) return ret; } -void msm_pcie_disable(struct msm_pcie_dev_t *dev, u32 options) +static void msm_pcie_disable(struct msm_pcie_dev_t *dev, u32 options) { PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx); @@ -4721,7 +4721,7 @@ static void msm_pcie_unmap_qgic_addr(struct msm_pcie_dev_t *dev, } } -void msm_pcie_destroy_irq(unsigned int irq, struct pci_dev *pdev) +static void msm_pcie_destroy_irq(unsigned int irq, struct pci_dev *pdev) { int pos; struct msi_desc *entry = irq_get_msi_desc(irq); @@ -5093,7 +5093,7 @@ static const struct irq_domain_ops msm_pcie_msi_ops = { .map = msm_pcie_msi_map, }; -int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev) +static int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev) { int rc; int msi_start = 0; @@ -5233,7 +5233,7 @@ int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev) return 0; } -void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev) +static void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev) { PCIE_DBG(dev, "RC%d\n", dev->rc_idx); @@ -5575,7 +5575,7 @@ static int msm_pcie_probe(struct platform_device *pdev) msm_pcie_dev[rc_idx].pcidev_table[i].short_bdf = 0; msm_pcie_dev[rc_idx].pcidev_table[i].sid = 0; msm_pcie_dev[rc_idx].pcidev_table[i].domain = rc_idx; - msm_pcie_dev[rc_idx].pcidev_table[i].conf_base = 0; + msm_pcie_dev[rc_idx].pcidev_table[i].conf_base = NULL; msm_pcie_dev[rc_idx].pcidev_table[i].phy_address = 0; msm_pcie_dev[rc_idx].pcidev_table[i].dev_ctrlstts_offset = 0; msm_pcie_dev[rc_idx].pcidev_table[i].event_reg = NULL; @@ -5725,7 +5725,7 @@ static struct platform_driver msm_pcie_driver = { }, }; -int __init pcie_init(void) +static int __init pcie_init(void) { int ret = 0, i; char rc_name[MAX_RC_NAME_LEN]; @@ -5784,7 +5784,7 @@ int __init pcie_init(void) msm_pcie_dev_tbl[i].short_bdf = 0; msm_pcie_dev_tbl[i].sid = 0; msm_pcie_dev_tbl[i].domain = -1; - msm_pcie_dev_tbl[i].conf_base = 0; + msm_pcie_dev_tbl[i].conf_base = NULL; msm_pcie_dev_tbl[i].phy_address = 0; msm_pcie_dev_tbl[i].dev_ctrlstts_offset = 0; msm_pcie_dev_tbl[i].event_reg = NULL; @@ -5999,7 +5999,7 @@ static int msm_pcie_pm_resume(struct pci_dev *dev, return ret; } -void msm_pcie_fixup_resume(struct pci_dev *dev) +static void msm_pcie_fixup_resume(struct pci_dev *dev) { int ret; struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus); @@ -6022,7 +6022,7 @@ void msm_pcie_fixup_resume(struct pci_dev *dev) DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP, msm_pcie_fixup_resume); -void msm_pcie_fixup_resume_early(struct pci_dev *dev) +static void msm_pcie_fixup_resume_early(struct pci_dev *dev) { int ret; struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus); -- GitLab From 5573e7367f72411e5d777259f5d11e5e58f14eed Mon Sep 17 00:00:00 2001 From: Rohit Gupta Date: Wed, 21 Jun 2017 10:08:07 -0700 Subject: [PATCH 178/786] cpufreq: schedutil: Fix hispeed_freq condition Remove a timestamp that wasn't being used and an unnecessary conditional check. Change-Id: I9ad1f6b0e41e3f5ac668b5c6053a48e61c72b4df Signed-off-by: Rohit Gupta --- kernel/sched/cpufreq_schedutil.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index c42380ac8959..0a0e9aa9526a 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -36,7 +36,6 @@ struct sugov_policy { raw_spinlock_t update_lock; /* For shared policies */ u64 last_freq_update_time; s64 freq_update_delay_ns; - u64 hispeed_jmp_ts; unsigned int next_freq; unsigned int cached_raw_freq; unsigned long hispeed_util; @@ -216,11 +215,8 @@ static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util, HISPEED_LOAD, 100)); - if (is_hiload && !is_migration && - sg_policy->next_freq < sg_policy->tunables->hispeed_freq) { + if (is_hiload && !is_migration) *util = max(*util, sg_policy->hispeed_util); - sg_policy->hispeed_jmp_ts = sg_cpu->last_update; - } if (is_hiload && nl >= mult_frac(cpu_util, NL_RATIO, 100)) *util = *max; -- GitLab From 9a3bf9a8010a0906d3bd97294ea18ee4c543709d Mon Sep 17 00:00:00 2001 From: Siddartha Mohanadoss Date: Tue, 20 Jun 2017 13:08:40 -0700 Subject: [PATCH 179/786] ARM: dts: msm: Add ADC nodes for SDM845 QRD Support clients to read vph_pwr, vcoin and thermistor channels such as msm, PA and quiet thermistor through VADC. Clients can set thresholds for VADC channels and receive notification using ADC_TM peripheral. Change-Id: I668f61f2de06141eed74b2219a3321fcd3f91c16 Signed-off-by: Siddartha Mohanadoss --- arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi | 186 +++++++++++++++++++++++ 1 file changed, 186 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi index c2fbed52c159..c0cbe77798fc 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi @@ -265,3 +265,189 @@ &ext_5v_boost { status = "ok"; }; + +&pm8998_vadc { + chan@83 { + label = "vph_pwr"; + reg = <0x83>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <1>; + qcom,calibration-type = "absolute"; + qcom,scale-function = <0>; + qcom,hw-settle-time = <0>; + qcom,fast-avg-setup = <0>; + }; + + chan@85 { + label = "vcoin"; + reg = <0x85>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <1>; + qcom,calibration-type = "absolute"; + qcom,scale-function = <0>; + qcom,hw-settle-time = <0>; + qcom,fast-avg-setup = <0>; + }; + + chan@4c { + label = "xo_therm"; + reg = <0x4c>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <4>; + qcom,hw-settle-time = <2>; + qcom,fast-avg-setup = <0>; + }; + + chan@4d { + label = "msm_therm"; + reg = <0x4d>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <2>; + qcom,hw-settle-time = <2>; + qcom,fast-avg-setup = <0>; + }; + + chan@4f { + label = "pa_therm1"; + reg = <0x4f>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <2>; + qcom,hw-settle-time = <2>; + qcom,fast-avg-setup = <0>; + }; + + chan@51 { + label = "quiet_therm"; + reg = <0x51>; + qcom,decimation = <2>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <2>; + qcom,hw-settle-time = <2>; + qcom,fast-avg-setup = <0>; + }; +}; + +&pm8998_adc_tm { + chan@83 { + label = "vph_pwr"; + reg = <0x83>; + qcom,pre-div-channel-scaling = <1>; + qcom,calibration-type = "absolute"; + qcom,scale-function = <0>; + qcom,hw-settle-time = <0>; + qcom,btm-channel-number = <0x60>; + }; + + chan@4c { + label = "xo_therm"; + reg = <0x4c>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <4>; + qcom,hw-settle-time = <2>; + qcom,btm-channel-number = <0x68>; + qcom,thermal-node; + }; + + chan@4d { + label = "msm_therm"; + reg = <0x4d>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <2>; + qcom,hw-settle-time = <2>; + qcom,btm-channel-number = <0x70>; + qcom,thermal-node; + }; + + chan@4f { + label = "pa_therm1"; + reg = <0x4f>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <2>; + qcom,hw-settle-time = <2>; + qcom,btm-channel-number = <0x78>; + qcom,thermal-node; + }; + + chan@51 { + label = "quiet_therm"; + reg = <0x51>; + qcom,pre-div-channel-scaling = <0>; + qcom,calibration-type = "ratiometric"; + qcom,scale-function = <2>; + qcom,hw-settle-time = <2>; + qcom,btm-channel-number = <0x80>; + qcom,thermal-node; + }; +}; + +&thermal_zones { + xo-therm-adc { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&pm8998_adc_tm 0x4c>; + thermal-governor = "user_space"; + + trips { + active-config0 { + temperature = <65000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + msm-therm-adc { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&pm8998_adc_tm 0x4d>; + thermal-governor = "user_space"; + + trips { + active-config0 { + temperature = <65000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + pa-therm1-adc { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&pm8998_adc_tm 0x4f>; + thermal-governor = "user_space"; + + trips { + active-config0 { + temperature = <65000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; + + quiet-therm-adc { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-sensors = <&pm8998_adc_tm 0x51>; + thermal-governor = "user_space"; + + trips { + active-config0 { + temperature = <65000>; + hysteresis = <1000>; + type = "passive"; + }; + }; + }; +}; -- GitLab From c3b15ce3bde40c7a50ed96337553e740129242ed Mon Sep 17 00:00:00 2001 From: Shrenuj Bansal Date: Thu, 15 Jun 2017 14:48:05 -0700 Subject: [PATCH 180/786] msm: kgsl: Fix print log for SMMU API failure Fix print log for SMMU API failure. Change-Id: Iadbf4603191c63d2ae1c3a5604d04fa40ab37d6a Signed-off-by: Shrenuj Bansal --- drivers/gpu/msm/kgsl_iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c index b3d02e646a43..73c0d71873e3 100644 --- a/drivers/gpu/msm/kgsl_iommu.c +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -1255,7 +1255,7 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) ret = iommu_domain_get_attr(iommu_pt->domain, DOMAIN_ATTR_CONTEXT_BANK, &cb_num); if (ret) { - KGSL_CORE_ERR("get DOMAIN_ATTR_PROCID failed: %d\n", + KGSL_CORE_ERR("get DOMAIN_ATTR_CONTEXT_BANK failed: %d\n", ret); goto done; } -- GitLab From a98e7f34a3cc46425c6ff320d98b99c069ffaf93 Mon Sep 17 00:00:00 2001 From: Shrenuj Bansal Date: Thu, 11 May 2017 15:19:18 -0700 Subject: [PATCH 181/786] msm: kgsl: Provide the context bank in SMMU_TABLE_UPDATE packet The SMMU driver allocates context banks dynamically on a first come first serve basis. As a result, the KGSL user context may not get context bank 0 depending on the attach order. Program the context bank number for the SMMU_TABLE_UPDATE packet so the CP can program the IOMMU registers for that particular context bank. Change-Id: I06c2794c5bfd5fc4c2f1383db57893500e53aca6 Signed-off-by: Shrenuj Bansal --- drivers/gpu/msm/adreno_iommu.c | 41 +++++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/msm/adreno_iommu.c b/drivers/gpu/msm/adreno_iommu.c index 80a04bc45124..1a2f8ff22c87 100644 --- a/drivers/gpu/msm/adreno_iommu.c +++ b/drivers/gpu/msm/adreno_iommu.c @@ -574,6 +574,40 @@ static unsigned int _adreno_iommu_set_pt_v2_a5xx(struct kgsl_device *device, return cmds - cmds_orig; } +static unsigned int _adreno_iommu_set_pt_v2_a6xx(struct kgsl_device *device, + unsigned int *cmds_orig, + u64 ttbr0, u32 contextidr, + struct adreno_ringbuffer *rb, + unsigned int cb_num) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + unsigned int *cmds = cmds_orig; + + cmds += _adreno_iommu_add_idle_cmds(adreno_dev, cmds); + cmds += cp_wait_for_me(adreno_dev, cmds); + + /* CP switches the pagetable and flushes the Caches */ + *cmds++ = cp_packet(adreno_dev, CP_SMMU_TABLE_UPDATE, 4); + *cmds++ = lower_32_bits(ttbr0); + *cmds++ = upper_32_bits(ttbr0); + *cmds++ = contextidr; + *cmds++ = cb_num; + + *cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 4, 1); + cmds += cp_gpuaddr(adreno_dev, cmds, (rb->pagetable_desc.gpuaddr + + PT_INFO_OFFSET(ttbr0))); + *cmds++ = lower_32_bits(ttbr0); + *cmds++ = upper_32_bits(ttbr0); + *cmds++ = contextidr; + + /* release all commands with wait_for_me */ + cmds += cp_wait_for_me(adreno_dev, cmds); + + cmds += _adreno_iommu_add_idle_cmds(adreno_dev, cmds); + + return cmds - cmds_orig; +} + /** * adreno_iommu_set_pt_generate_cmds() - Generate commands to change pagetable * @rb: The RB pointer in which these commaands are to be submitted @@ -588,6 +622,7 @@ unsigned int adreno_iommu_set_pt_generate_cmds( struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb); struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device); + struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER]; u64 ttbr0; u32 contextidr; unsigned int *cmds_orig = cmds; @@ -601,7 +636,11 @@ unsigned int adreno_iommu_set_pt_generate_cmds( iommu->setstate.gpuaddr + KGSL_IOMMU_SETSTATE_NOP_OFFSET); if (iommu->version >= 2) { - if (adreno_is_a5xx(adreno_dev) || adreno_is_a6xx(adreno_dev)) + if (adreno_is_a6xx(adreno_dev)) + cmds += _adreno_iommu_set_pt_v2_a6xx(device, cmds, + ttbr0, contextidr, rb, + ctx->cb_num); + else if (adreno_is_a5xx(adreno_dev)) cmds += _adreno_iommu_set_pt_v2_a5xx(device, cmds, ttbr0, contextidr, rb); else if (adreno_is_a4xx(adreno_dev)) -- GitLab From 873e9302b9557270d4dcbf2ecb297903be409af1 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Mon, 15 Aug 2016 18:17:15 -0700 Subject: [PATCH 182/786] defconfig: sdm845: enable vendor device specific HID support This change enables plantronics usb head set specific HID driver support. Change-Id: I5b25a99269d3374bc4bc58c734bb64f0418f08f8 Signed-off-by: Hemant Kumar --- arch/arm64/configs/sdm845-perf_defconfig | 1 + arch/arm64/configs/sdm845_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig index c69e0153fea4..15d3529d6894 100644 --- a/arch/arm64/configs/sdm845-perf_defconfig +++ b/arch/arm64/configs/sdm845-perf_defconfig @@ -365,6 +365,7 @@ CONFIG_SND_SOC_SDM845=y CONFIG_UHID=y CONFIG_HID_APPLE=y CONFIG_HID_MICROSOFT=y +CONFIG_HID_PLANTRONICS=y CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_XHCI_HCD=y diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig index 2da9d07b6b70..ecb89bc70d71 100644 --- a/arch/arm64/configs/sdm845_defconfig +++ b/arch/arm64/configs/sdm845_defconfig @@ -374,6 +374,7 @@ CONFIG_SND_SOC_SDM845=y CONFIG_UHID=y CONFIG_HID_APPLE=y CONFIG_HID_MICROSOFT=y +CONFIG_HID_PLANTRONICS=y CONFIG_USB=y CONFIG_USB_XHCI_HCD=y CONFIG_USB_EHCI_HCD=y -- GitLab From c4949de3793fd484bb4804ab6c5811e1030238df Mon Sep 17 00:00:00 2001 From: Benjamin Chan Date: Wed, 21 Jun 2017 16:53:15 -0400 Subject: [PATCH 183/786] msm: sde: remove error message for probe defer handling For all the probe defer handling, rotator driver does not need to post error message. CRs-Fixed: 2062947 Change-Id: Ica6c7bbbd3c4a0c4fc485909e4df65a6d8fcebea Signed-off-by: Benjamin Chan --- drivers/media/platform/msm/sde/rotator/sde_rotator_core.c | 2 +- drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c | 5 ++++- drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c | 3 ++- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c index a850bc0f33c0..a195c15b2e8c 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c @@ -2958,7 +2958,7 @@ int sde_rotator_core_init(struct sde_rot_mgr **pmgr, *pmgr = mgr; ret = sde_rotator_footswitch_ctrl(mgr, true); if (ret) { - SDEROT_ERR("res_init failed %d\n", ret); + SDEROT_INFO("res_init failed %d, use probe defer\n", ret); ret = -EPROBE_DEFER; goto error_fs_en_fail; } diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c index f2778b014ed8..d300de2b47fb 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c @@ -3291,7 +3291,10 @@ static int sde_rotator_probe(struct platform_device *pdev) ret = sde_rotator_core_init(&rot_dev->mgr, pdev); if (ret < 0) { - SDEDEV_ERR(&pdev->dev, "fail init core %d\n", ret); + if (ret == -EPROBE_DEFER) + SDEDEV_INFO(&pdev->dev, "probe defer for core init\n"); + else + SDEDEV_ERR(&pdev->dev, "fail init core %d\n", ret); goto error_rotator_core_init; } diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c index e2091921dce3..9e4718769573 100644 --- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c +++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c @@ -519,7 +519,8 @@ int sde_smmu_probe(struct platform_device *pdev) char name[MAX_CLIENT_NAME_LEN]; if (!mdata) { - SDEROT_ERR("probe failed as mdata is not initialized\n"); + SDEROT_INFO( + "probe failed as mdata is not initializedi, probe defer\n"); return -EPROBE_DEFER; } -- GitLab From b1886f443e256d6e870ee6f478982314048876ad Mon Sep 17 00:00:00 2001 From: Deepak Katragadda Date: Mon, 19 Jun 2017 11:52:32 -0700 Subject: [PATCH 184/786] clk: qcom: gcc-sdm845: Populate the hwcg fields of branch clocks Populate the hwcg_reg and hwcg_bit fields of the branch clock structures for clocks which can be hardware clock gated. Change-Id: I2b59a4455cbcf82c77a4610bd63b28d93b32c5a8 Signed-off-by: Deepak Katragadda --- drivers/clk/qcom/gcc-sdm845.c | 66 +++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c index 13de253b542a..796c3e469770 100644 --- a/drivers/clk/qcom/gcc-sdm845.c +++ b/drivers/clk/qcom/gcc-sdm845.c @@ -1240,6 +1240,8 @@ static struct clk_branch gcc_aggre_noc_pcie_tbu_clk = { static struct clk_branch gcc_aggre_ufs_card_axi_clk = { .halt_reg = 0x82028, .halt_check = BRANCH_HALT, + .hwcg_reg = 0x82028, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x82028, .enable_mask = BIT(0), @@ -1275,6 +1277,8 @@ static struct clk_branch gcc_aggre_ufs_card_axi_hw_ctl_clk = { static struct clk_branch gcc_aggre_ufs_phy_axi_clk = { .halt_reg = 0x82024, .halt_check = BRANCH_HALT, + .hwcg_reg = 0x82024, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x82024, .enable_mask = BIT(0), @@ -1346,6 +1350,8 @@ static struct clk_branch gcc_aggre_usb3_sec_axi_clk = { static struct clk_branch gcc_boot_rom_ahb_clk = { .halt_reg = 0x38004, .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x38004, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x52004, .enable_mask = BIT(10), @@ -1359,6 +1365,8 @@ static struct clk_branch gcc_boot_rom_ahb_clk = { static struct clk_branch gcc_camera_ahb_clk = { .halt_reg = 0xb008, .halt_check = BRANCH_HALT, + .hwcg_reg = 0xb008, + .hwcg_bit = 1, .clkr = { .enable_reg = 0xb008, .enable_mask = BIT(0), @@ -1398,6 +1406,8 @@ static struct clk_branch gcc_camera_xo_clk = { static struct clk_branch gcc_ce1_ahb_clk = { .halt_reg = 0x4100c, .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x4100c, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x52004, .enable_mask = BIT(3), @@ -1504,6 +1514,8 @@ static struct clk_branch gcc_cpuss_dvm_bus_clk = { static struct clk_branch gcc_cpuss_gnoc_clk = { .halt_reg = 0x48004, .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x48004, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x52004, .enable_mask = BIT(22), @@ -1548,6 +1560,8 @@ static struct clk_branch gcc_ddrss_gpu_axi_clk = { static struct clk_branch gcc_disp_ahb_clk = { .halt_reg = 0xb00c, .halt_check = BRANCH_HALT, + .hwcg_reg = 0xb00c, + .hwcg_bit = 1, .clkr = { .enable_reg = 0xb00c, .enable_mask = BIT(0), @@ -1675,6 +1689,8 @@ static struct clk_branch gcc_gp3_clk = { static struct clk_branch gcc_gpu_cfg_ahb_clk = { .halt_reg = 0x71004, .halt_check = BRANCH_HALT, + .hwcg_reg = 0x71004, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x71004, .enable_mask = BIT(0), @@ -1774,6 +1790,8 @@ static struct clk_branch gcc_mss_axis2_clk = { static struct clk_branch gcc_mss_cfg_ahb_clk = { .halt_reg = 0x8a000, .halt_check = BRANCH_HALT, + .hwcg_reg = 0x8a000, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x8a000, .enable_mask = BIT(0), @@ -1799,6 +1817,8 @@ static struct clk_gate2 gcc_mss_gpll0_div_clk_src = { static struct clk_branch gcc_mss_mfab_axis_clk = { .halt_reg = 0x8a004, .halt_check = BRANCH_VOTED, + .hwcg_reg = 0x8a004, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x8a004, .enable_mask = BIT(0), @@ -1856,6 +1876,8 @@ static struct clk_branch gcc_pcie_0_aux_clk = { static struct clk_branch gcc_pcie_0_cfg_ahb_clk = { .halt_reg = 0x6b018, .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x6b018, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x5200c, .enable_mask = BIT(2), @@ -1907,6 +1929,8 @@ static struct clk_gate2 gcc_pcie_0_pipe_clk = { static struct clk_branch gcc_pcie_0_slv_axi_clk = { .halt_reg = 0x6b010, .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x6b010, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x5200c, .enable_mask = BIT(0), @@ -1951,6 +1975,8 @@ static struct clk_branch gcc_pcie_1_aux_clk = { static struct clk_branch gcc_pcie_1_cfg_ahb_clk = { .halt_reg = 0x8d018, .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x8d018, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x52004, .enable_mask = BIT(28), @@ -2002,6 +2028,8 @@ static struct clk_gate2 gcc_pcie_1_pipe_clk = { static struct clk_branch gcc_pcie_1_slv_axi_clk = { .halt_reg = 0x8d010, .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x8d010, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x52004, .enable_mask = BIT(26), @@ -2082,6 +2110,8 @@ static struct clk_branch gcc_pdm2_clk = { static struct clk_branch gcc_pdm_ahb_clk = { .halt_reg = 0x33004, .halt_check = BRANCH_HALT, + .hwcg_reg = 0x33004, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x33004, .enable_mask = BIT(0), @@ -2108,6 +2138,8 @@ static struct clk_branch gcc_pdm_xo4_clk = { static struct clk_branch gcc_prng_ahb_clk = { .halt_reg = 0x34004, .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x34004, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x52004, .enable_mask = BIT(13), @@ -2121,6 +2153,8 @@ static struct clk_branch gcc_prng_ahb_clk = { static struct clk_branch gcc_qmip_camera_ahb_clk = { .halt_reg = 0xb014, .halt_check = BRANCH_HALT, + .hwcg_reg = 0xb014, + .hwcg_bit = 1, .clkr = { .enable_reg = 0xb014, .enable_mask = BIT(0), @@ -2134,6 +2168,8 @@ static struct clk_branch gcc_qmip_camera_ahb_clk = { static struct clk_branch gcc_qmip_disp_ahb_clk = { .halt_reg = 0xb018, .halt_check = BRANCH_HALT, + .hwcg_reg = 0xb018, + .hwcg_bit = 1, .clkr = { .enable_reg = 0xb018, .enable_mask = BIT(0), @@ -2147,6 +2183,8 @@ static struct clk_branch gcc_qmip_disp_ahb_clk = { static struct clk_branch gcc_qmip_video_ahb_clk = { .halt_reg = 0xb010, .halt_check = BRANCH_HALT, + .hwcg_reg = 0xb010, + .hwcg_bit = 1, .clkr = { .enable_reg = 0xb010, .enable_mask = BIT(0), @@ -2461,6 +2499,8 @@ static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = { static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = { .halt_reg = 0x17008, .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x17008, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x5200c, .enable_mask = BIT(7), @@ -2487,6 +2527,8 @@ static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = { static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = { .halt_reg = 0x18010, .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x18010, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x5200c, .enable_mask = BIT(21), @@ -2624,6 +2666,8 @@ static struct clk_branch gcc_tsif_ref_clk = { static struct clk_branch gcc_ufs_card_ahb_clk = { .halt_reg = 0x75010, .halt_check = BRANCH_HALT, + .hwcg_reg = 0x75010, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x75010, .enable_mask = BIT(0), @@ -2637,6 +2681,8 @@ static struct clk_branch gcc_ufs_card_ahb_clk = { static struct clk_branch gcc_ufs_card_axi_clk = { .halt_reg = 0x7500c, .halt_check = BRANCH_HALT, + .hwcg_reg = 0x7500c, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x7500c, .enable_mask = BIT(0), @@ -2685,6 +2731,8 @@ static struct clk_branch gcc_ufs_card_clkref_clk = { static struct clk_branch gcc_ufs_card_ice_core_clk = { .halt_reg = 0x75058, .halt_check = BRANCH_HALT, + .hwcg_reg = 0x75058, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x75058, .enable_mask = BIT(0), @@ -2720,6 +2768,8 @@ static struct clk_branch gcc_ufs_card_ice_core_hw_ctl_clk = { static struct clk_branch gcc_ufs_card_phy_aux_clk = { .halt_reg = 0x7508c, .halt_check = BRANCH_HALT, + .hwcg_reg = 0x7508c, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x7508c, .enable_mask = BIT(0), @@ -2791,6 +2841,8 @@ static struct clk_gate2 gcc_ufs_card_tx_symbol_0_clk = { static struct clk_branch gcc_ufs_card_unipro_core_clk = { .halt_reg = 0x75054, .halt_check = BRANCH_HALT, + .hwcg_reg = 0x75054, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x75054, .enable_mask = BIT(0), @@ -2839,6 +2891,8 @@ static struct clk_branch gcc_ufs_mem_clkref_clk = { static struct clk_branch gcc_ufs_phy_ahb_clk = { .halt_reg = 0x77010, .halt_check = BRANCH_HALT, + .hwcg_reg = 0x77010, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x77010, .enable_mask = BIT(0), @@ -2852,6 +2906,8 @@ static struct clk_branch gcc_ufs_phy_ahb_clk = { static struct clk_branch gcc_ufs_phy_axi_clk = { .halt_reg = 0x7700c, .halt_check = BRANCH_HALT, + .hwcg_reg = 0x7700c, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x7700c, .enable_mask = BIT(0), @@ -2887,6 +2943,8 @@ static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = { static struct clk_branch gcc_ufs_phy_ice_core_clk = { .halt_reg = 0x77058, .halt_check = BRANCH_HALT, + .hwcg_reg = 0x77058, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x77058, .enable_mask = BIT(0), @@ -2922,6 +2980,8 @@ static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = { static struct clk_branch gcc_ufs_phy_phy_aux_clk = { .halt_reg = 0x7708c, .halt_check = BRANCH_HALT, + .hwcg_reg = 0x7708c, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x7708c, .enable_mask = BIT(0), @@ -2993,6 +3053,8 @@ static struct clk_gate2 gcc_ufs_phy_tx_symbol_0_clk = { static struct clk_branch gcc_ufs_phy_unipro_core_clk = { .halt_reg = 0x77054, .halt_check = BRANCH_HALT, + .hwcg_reg = 0x77054, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x77054, .enable_mask = BIT(0), @@ -3248,6 +3310,8 @@ static struct clk_gate2 gcc_usb3_sec_phy_pipe_clk = { static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = { .halt_reg = 0x6a004, .halt_check = BRANCH_HALT, + .hwcg_reg = 0x6a004, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x6a004, .enable_mask = BIT(0), @@ -3261,6 +3325,8 @@ static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = { static struct clk_branch gcc_video_ahb_clk = { .halt_reg = 0xb004, .halt_check = BRANCH_HALT, + .hwcg_reg = 0xb004, + .hwcg_bit = 1, .clkr = { .enable_reg = 0xb004, .enable_mask = BIT(0), -- GitLab From afe7e12443a708f89806704dfc5973d56e82f2ed Mon Sep 17 00:00:00 2001 From: Sai Krishna Juturi Date: Wed, 17 May 2017 11:06:35 +0530 Subject: [PATCH 185/786] usb : dwc3: Initialize kernel stack variables properly If kernel stack variables are not initialized properly, there is a chance of kernel information disclosure. So, initialize kernel stack variables with null characters. CRs-fixed: 2042592 Change-Id: I213c0e5c7f67824c2cecace276ff2f8f81599d51 Signed-off-by: Sai Krishna Juturi Signed-off-by: Mayank Rana --- drivers/usb/dwc3/debugfs.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c index a7105afd82e0..260092c5d49a 100644 --- a/drivers/usb/dwc3/debugfs.c +++ b/drivers/usb/dwc3/debugfs.c @@ -324,7 +324,7 @@ static ssize_t dwc3_mode_write(struct file *file, struct dwc3 *dwc = s->private; unsigned long flags; u32 mode = 0; - char buf[32]; + char buf[32] = {}; if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; @@ -404,7 +404,7 @@ static ssize_t dwc3_testmode_write(struct file *file, struct dwc3 *dwc = s->private; unsigned long flags; u32 testmode = 0; - char buf[32]; + char buf[32] = {}; if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; @@ -511,7 +511,7 @@ static ssize_t dwc3_link_state_write(struct file *file, struct dwc3 *dwc = s->private; unsigned long flags; enum dwc3_link_state state = 0; - char buf[32]; + char buf[32] = {}; if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; -- GitLab From b4aff8b0c2103180a7c4f782c0bc7a3a1b8da62e Mon Sep 17 00:00:00 2001 From: David Dai Date: Tue, 11 Apr 2017 19:36:53 -0700 Subject: [PATCH 186/786] msm: msm_bus: Introduce QoS config for QNOC4 Add support for new QNOC QoS programming scheme. Configure default priority, regulator, limiter and forwarding settings based using dt bindings in order to ensure correct priority of masters across the system to satisfy their usecases. Add new single BCM commit request interface to toggle on/off clock domains controlled by RPMH in order to ensure master specific QoS register space is clocked before access. Change-Id: I1b4757c63c718a909775003c01fc1f5902e4f349 Signed-off-by: David Dai --- .../bindings/arm/msm/msm_bus_adhoc.txt | 20 + .../soc/qcom/msm_bus/msm_bus_fabric_rpmh.c | 176 +++++- drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c | 564 ++++++------------ drivers/soc/qcom/msm_bus/msm_bus_of.c | 4 +- drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c | 174 +++--- drivers/soc/qcom/msm_bus/msm_bus_rpmh.h | 47 +- 6 files changed, 473 insertions(+), 512 deletions(-) diff --git a/Documentation/devicetree/bindings/arm/msm/msm_bus_adhoc.txt b/Documentation/devicetree/bindings/arm/msm/msm_bus_adhoc.txt index 6bf6a57ca629..8aeaf774e48e 100644 --- a/Documentation/devicetree/bindings/arm/msm/msm_bus_adhoc.txt +++ b/Documentation/devicetree/bindings/arm/msm/msm_bus_adhoc.txt @@ -121,6 +121,20 @@ qcom,blacklist: An array of phandles that represent devices that this de cannot connect to either directly or via any number of intermediate nodes. qcom,agg-ports: The number of aggregation ports on the bus. +qcom,node-qos-bcms: Optional property to target specific BCMs to toggle during QoS configuration, + this is to ensure QoS register space is clocked and accessible. Array is + defined as follows: BCM node ID, VoteX, VoteY. The vectors must be defined in + sets of the three values aforementioned. +qcom,prio: Default fixed priority for bus master. +qcom,qos-lim-params: Array containing QoS limiter configurations defined as: Bandwidth, Saturation. + Must define "qcom,qos-lim-en" for these settings to take effect. +qcom,qos-lim-en: Boolean to enable limiter settings, default is disabled. +qcom,qos-reg-params: Array containing QoS regulator configurations defined as: Low Priority, High + Priority, Bandwidth, Saturation. Must define "qcom,qos-reg-regmode" for these + settings to take effect. +qcom,qos-reg-mode: Array containing QoS regulator mode enablement: Read Enable, Write Enable, + default is disabled. +qcom,forwarding: Boolean indicate Urgent Forwarding enablement. The following properties are optional as collecting data via coresight might and are present on child nodes that represent NOC devices. The documentation @@ -172,6 +186,12 @@ Example: <&clock_gcc clk_q1_clk>; q0-clk-supply = <&gdsc_q0_clk>; }; + qcom,node-qos-bcms = <0x7011 0 1>; + qcom,prio = 1; + qcom,qos-lim-params = <1000 1000>; + qcom,qos-lim-en: + qcom,qos-reg-params = <1 2 1000 1000>; + qcom,qos-reg-mode = <1 1>; }; mm_int_bimc: mm-int-bimc { diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c index 144b1a12c8db..b331e74c2cb1 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c @@ -272,6 +272,9 @@ static int tcs_cmd_gen(struct msm_bus_node_device_type *cur_bcm, int ret = 0; bool valid = true; + if (!cmd) + return ret; + if (vec_a == 0 && vec_b == 0) valid = false; @@ -670,7 +673,30 @@ int msm_bus_query_gen(struct list_head *query_list, return ret; } +static void bcm_commit_single_req(struct msm_bus_node_device_type *cur_bcm, + uint64_t vec_a, uint64_t vec_b) +{ + struct msm_bus_node_device_type *cur_rsc = NULL; + struct rpmh_client *cur_mbox = NULL; + struct tcs_cmd *cmd_active = NULL; + + if (!cur_bcm->node_info->num_rsc_devs) + return; + + cmd_active = kzalloc(sizeof(struct tcs_cmd), GFP_KERNEL); + + if (!cmd_active) + return; + + cur_rsc = to_msm_bus_node(cur_bcm->node_info->rsc_devs[0]); + cur_mbox = cur_rsc->rscdev->mbox; + + tcs_cmd_gen(cur_bcm, cmd_active, vec_a, vec_b, true); + rpmh_write_single(cur_mbox, RPMH_ACTIVE_ONLY_STATE, + cmd_active->addr, cmd_active->data); + kfree(cmd_active); +} void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size, size_t new_size, gfp_t flags) @@ -733,29 +759,22 @@ static int msm_bus_disable_node_qos_clk(struct msm_bus_node_device_type *node) static int msm_bus_enable_node_qos_clk(struct msm_bus_node_device_type *node) { - struct msm_bus_node_device_type *bus_node = NULL; int i; int ret; long rounded_rate; - if (!node || (!to_msm_bus_node(node->node_info->bus_device))) { - ret = -ENXIO; - goto exit_enable_node_qos_clk; - } - bus_node = to_msm_bus_node(node->node_info->bus_device); - - for (i = 0; i < bus_node->num_node_qos_clks; i++) { - if (!bus_node->node_qos_clks[i].enable_only_clk) { + for (i = 0; i < node->num_node_qos_clks; i++) { + if (!node->node_qos_clks[i].enable_only_clk) { rounded_rate = clk_round_rate( - bus_node->node_qos_clks[i].clk, 1); - ret = setrate_nodeclk(&bus_node->node_qos_clks[i], + node->node_qos_clks[i].clk, 1); + ret = setrate_nodeclk(&node->node_qos_clks[i], rounded_rate); if (ret) MSM_BUS_DBG("%s: Failed set rate clk,node %d\n", __func__, node->node_info->id); } - ret = enable_nodeclk(&bus_node->node_qos_clks[i], + ret = enable_nodeclk(&node->node_qos_clks[i], node->node_info->bus_device); if (ret) { MSM_BUS_DBG("%s: Failed to set Qos Clks ret %d\n", @@ -763,12 +782,85 @@ static int msm_bus_enable_node_qos_clk(struct msm_bus_node_device_type *node) msm_bus_disable_node_qos_clk(node); goto exit_enable_node_qos_clk; } - } exit_enable_node_qos_clk: return ret; } +static int msm_bus_vote_qos_bcms(struct msm_bus_node_device_type *node) +{ + struct msm_bus_node_device_type *cur_dev = NULL; + struct msm_bus_node_device_type *cur_bcm = NULL; + int i; + struct device *dev = NULL; + + if (!node || (!to_msm_bus_node(node->node_info->bus_device))) + return -ENXIO; + + cur_dev = node; + + for (i = 0; i < cur_dev->num_qos_bcms; i++) { + dev = bus_find_device(&msm_bus_type, NULL, + (void *) &cur_dev->qos_bcms[i].qos_bcm_id, + msm_bus_device_match_adhoc); + + if (!dev) { + MSM_BUS_ERR("Can't find dev node for %d", + cur_dev->qos_bcms[i].qos_bcm_id); + return -ENODEV; + } + + cur_bcm = to_msm_bus_node(dev); + if (cur_bcm->node_vec[ACTIVE_CTX].vec_a != 0 || + cur_bcm->node_vec[ACTIVE_CTX].vec_b != 0 || + cur_bcm->node_vec[DUAL_CTX].vec_a != 0 || + cur_bcm->node_vec[DUAL_CTX].vec_b != 0) + return 0; + + bcm_commit_single_req(cur_bcm, + cur_dev->qos_bcms[i].vec.vec_a, + cur_dev->qos_bcms[i].vec.vec_b); + } + + return 0; +} + +static int msm_bus_rm_vote_qos_bcms(struct msm_bus_node_device_type *node) +{ + struct msm_bus_node_device_type *cur_dev = NULL; + struct msm_bus_node_device_type *cur_bcm = NULL; + int i; + struct device *dev = NULL; + + if (!node || (!to_msm_bus_node(node->node_info->bus_device))) + return -ENXIO; + + cur_dev = node; + + for (i = 0; i < cur_dev->num_qos_bcms; i++) { + dev = bus_find_device(&msm_bus_type, NULL, + (void *) &cur_dev->qos_bcms[i].qos_bcm_id, + msm_bus_device_match_adhoc); + + if (!dev) { + MSM_BUS_ERR("Can't find dev node for %d", + cur_dev->qos_bcms[i].qos_bcm_id); + return -ENODEV; + } + + cur_bcm = to_msm_bus_node(dev); + if (cur_bcm->node_vec[ACTIVE_CTX].vec_a != 0 || + cur_bcm->node_vec[ACTIVE_CTX].vec_b != 0 || + cur_bcm->node_vec[DUAL_CTX].vec_a != 0 || + cur_bcm->node_vec[DUAL_CTX].vec_b != 0) + return 0; + + bcm_commit_single_req(cur_bcm, 0, 0); + } + + return 0; +} + int msm_bus_enable_limiter(struct msm_bus_node_device_type *node_dev, int enable, uint64_t lim_bw) { @@ -847,12 +939,11 @@ static int msm_bus_dev_init_qos(struct device *dev, void *data) bus_node_info->fabdev->noc_ops.qos_init) { int ret = 0; - if (node_dev->ap_owned && - (node_dev->node_info->qos_params.mode) != -1) { - + if (node_dev->ap_owned) { if (bus_node_info->fabdev->bypass_qos_prg) goto exit_init_qos; + ret = msm_bus_vote_qos_bcms(node_dev); ret = msm_bus_enable_node_qos_clk(node_dev); if (ret < 0) { MSM_BUS_DBG("Can't Enable QoS clk %d\n", @@ -868,6 +959,7 @@ static int msm_bus_dev_init_qos(struct device *dev, void *data) bus_node_info->fabdev->qos_off, bus_node_info->fabdev->qos_freq); ret = msm_bus_disable_node_qos_clk(node_dev); + ret = msm_bus_rm_vote_qos_bcms(node_dev); node_dev->node_info->defer_qos = false; } } else @@ -1136,18 +1228,27 @@ static int msm_bus_copy_node_info(struct msm_bus_node_device_type *pdata, node_info->is_fab_dev = pdata_node_info->is_fab_dev; node_info->is_bcm_dev = pdata_node_info->is_bcm_dev; node_info->is_rsc_dev = pdata_node_info->is_rsc_dev; - node_info->qos_params.mode = pdata_node_info->qos_params.mode; - node_info->qos_params.prio1 = pdata_node_info->qos_params.prio1; - node_info->qos_params.prio0 = pdata_node_info->qos_params.prio0; - node_info->qos_params.reg_prio1 = pdata_node_info->qos_params.reg_prio1; - node_info->qos_params.reg_prio0 = pdata_node_info->qos_params.reg_prio0; - node_info->qos_params.prio_lvl = pdata_node_info->qos_params.prio_lvl; - node_info->qos_params.prio_rd = pdata_node_info->qos_params.prio_rd; - node_info->qos_params.prio_wr = pdata_node_info->qos_params.prio_wr; - node_info->qos_params.gp = pdata_node_info->qos_params.gp; - node_info->qos_params.thmp = pdata_node_info->qos_params.thmp; - node_info->qos_params.ws = pdata_node_info->qos_params.ws; - node_info->qos_params.bw_buffer = pdata_node_info->qos_params.bw_buffer; + node_info->qos_params.prio_dflt = pdata_node_info->qos_params.prio_dflt; + node_info->qos_params.limiter.bw = + pdata_node_info->qos_params.limiter.bw; + node_info->qos_params.limiter.sat = + pdata_node_info->qos_params.limiter.sat; + node_info->qos_params.limiter_en = + pdata_node_info->qos_params.limiter_en; + node_info->qos_params.reg.low_prio = + pdata_node_info->qos_params.reg.low_prio; + node_info->qos_params.reg.hi_prio = + pdata_node_info->qos_params.reg.hi_prio; + node_info->qos_params.reg.bw = + pdata_node_info->qos_params.reg.bw; + node_info->qos_params.reg.sat = + pdata_node_info->qos_params.reg.sat; + node_info->qos_params.reg_mode.read = + pdata_node_info->qos_params.reg_mode.read; + node_info->qos_params.reg_mode.write = + pdata_node_info->qos_params.reg_mode.write; + node_info->qos_params.urg_fwd_en = + pdata_node_info->qos_params.urg_fwd_en; node_info->agg_params.buswidth = pdata_node_info->agg_params.buswidth; node_info->agg_params.agg_scheme = pdata_node_info->agg_params.agg_scheme; @@ -1299,7 +1400,7 @@ static struct device *msm_bus_device_init( struct device *bus_dev = NULL; struct msm_bus_node_device_type *bus_node = NULL; struct msm_bus_node_info_type *node_info = NULL; - int ret = 0; + int ret = 0, i = 0; /** * Init here so we can use devm calls @@ -1328,6 +1429,23 @@ static struct device *msm_bus_device_init( bus_node->node_info = node_info; bus_node->ap_owned = pdata->ap_owned; bus_node->dirty = false; + bus_node->num_qos_bcms = pdata->num_qos_bcms; + if (bus_node->num_qos_bcms) { + bus_node->qos_bcms = devm_kzalloc(bus_dev, + (sizeof(struct qos_bcm_type) * + bus_node->num_qos_bcms), GFP_KERNEL); + if (!bus_node->qos_bcms) + goto exit_device_init; + for (i = 0; i < bus_node->num_qos_bcms; i++) { + bus_node->qos_bcms[i].qos_bcm_id = + pdata->qos_bcms[i].qos_bcm_id; + bus_node->qos_bcms[i].vec.vec_a = + pdata->qos_bcms[i].vec.vec_a; + bus_node->qos_bcms[i].vec.vec_b = + pdata->qos_bcms[i].vec.vec_b; + } + } + bus_dev->of_node = pdata->of_node; if (msm_bus_copy_node_info(pdata, bus_dev) < 0) { diff --git a/drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c index c501e80bb8e8..996c7194b508 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -32,15 +32,46 @@ #define NOC_QOS_REG_BASE(b, o) ((b) + (o)) -#define NOC_QOS_PRIORITYn_ADDR(b, o, n, d) \ +#define NOC_QOS_MAINCTL_LOWn_ADDR(b, o, n, d) \ (NOC_QOS_REG_BASE(b, o) + 0x8 + (d) * (n)) -enum noc_qos_id_priorityn { - NOC_QOS_PRIORITYn_RMSK = 0x0000000f, - NOC_QOS_PRIORITYn_MAXn = 32, - NOC_QOS_PRIORITYn_P1_BMSK = 0xc, - NOC_QOS_PRIORITYn_P1_SHFT = 0x2, - NOC_QOS_PRIORITYn_P0_BMSK = 0x3, - NOC_QOS_PRIORITYn_P0_SHFT = 0x0, +enum noc_qos_id_mainctl_lown { + NOC_QOS_MCTL_DFLT_PRIOn_BMSK = 0x00000070, + NOC_QOS_MCTL_DFLT_PRIOn_SHFT = 0x4, + NOC_QOS_MCTL_URGFWD_ENn_BMSK = 0x00000008, + NOC_QOS_MCTL_URGFWD_ENn_SHFT = 0x3, + NOC_QOS_MCTL_LIMIT_ENn_BMSK = 0x00000001, + NOC_QOS_MCTL_LIMIT_ENn_SHFT = 0x0, +}; + +#define NOC_QOS_LIMITBWn_ADDR(b, o, n, d) \ + (NOC_QOS_REG_BASE(b, o) + 0x18 + (d) * (n)) +enum noc_qos_id_limitbwn { + NOC_QOS_LIMITBW_BWn_BMSK = 0x000007FF, + NOC_QOS_LIMITBW_BWn_SHFT = 0x0, + NOC_QOS_LIMITBW_SATn_BMSK = 0x03FF0000, + NOC_QOS_LIMITBW_SATn_SHFT = 0x11, +}; + +#define NOC_QOS_REGUL0CTLn_ADDR(b, o, n, d) \ + (NOC_QOS_REG_BASE(b, o) + 0x40 + (d) * (n)) +enum noc_qos_id_regul0ctln { + NOC_QOS_REGUL0CTL_HI_PRIOn_BMSK = 0x00007000, + NOC_QOS_REGUL0CTL_HI_PRIOn_SHFT = 0x8, + NOC_QOS_REGUL0CTL_LW_PRIOn_BMSK = 0x00000700, + NOC_QOS_REGUL0CTL_LW_PRIOn_SHFT = 0xC, + NOC_QOS_REGUL0CTL_WRENn_BMSK = 0x00000002, + NOC_QOS_REGUL0CTL_WRENn_SHFT = 0x1, + NOC_QOS_REGUL0CTL_RDENn_BMSK = 0x00000001, + NOC_QOS_REGUL0CTL_RDENn_SHFT = 0x0, +}; + +#define NOC_QOS_REGUL0BWn_ADDR(b, o, n, d) \ + (NOC_QOS_REG_BASE(b, o) + 0x48 + (d) * (n)) +enum noc_qos_id_regul0bwbwn { + NOC_QOS_REGUL0BW_BWn_BMSK = 0x000007FF, + NOC_QOS_REGUL0BW_BWn_SHFT = 0x0, + NOC_QOS_REGUL0BW_SATn_BMSK = 0x03FF0000, + NOC_QOS_REGUL0BW_SATn_SHFT = 0x11, }; #define NOC_QOS_MODEn_ADDR(b, o, n, d) \ @@ -100,14 +131,6 @@ static uint64_t noc_bw(uint32_t bw_field, uint32_t qos_freq) /** * Calculate the max BW in Bytes/s for a given time-base. */ -static uint32_t noc_bw_ceil(long int bw_field, uint32_t qos_freq_khz) -{ - uint64_t bw_temp = 2 * qos_freq_khz * bw_field; - uint32_t scale = 1000 * BW_SCALE; - - noc_div(&bw_temp, scale); - return bw_temp * 1000000; -} #define MAX_BW(timebase) noc_bw_ceil(MAX_BW_FIELD, (timebase)) /** @@ -129,190 +152,147 @@ static uint32_t noc_ws(uint64_t bw, uint32_t sat, uint32_t qos_freq) } #define MAX_WS(bw, timebase) noc_ws((bw), MAX_SAT_FIELD, (timebase)) -/* Calculate bandwidth field value for requested bandwidth */ -static uint32_t noc_bw_field(uint64_t bw_bps, uint32_t qos_freq_khz) +static void noc_set_qos_dflt_prio(void __iomem *base, uint32_t qos_off, + uint32_t mport, uint32_t qos_delta, + uint32_t prio) { - uint32_t bw_field = 0; - - if (bw_bps) { - uint32_t rem; - uint64_t bw_capped = min_t(uint64_t, bw_bps, - MAX_BW(qos_freq_khz)); - uint64_t bwc = bw_capped * BW_SCALE; - uint64_t qf = 2 * qos_freq_khz * 1000; - - rem = noc_div(&bwc, qf); - bw_field = (uint32_t)max_t(unsigned long, bwc, MIN_BW_FIELD); - bw_field = (uint32_t)min_t(unsigned long, bw_field, - MAX_BW_FIELD); - } + uint32_t reg_val, val; - MSM_BUS_DBG("NOC: bw_field: %u\n", bw_field); - return bw_field; + reg_val = readl_relaxed(NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, + qos_delta)); + val = prio << NOC_QOS_MCTL_DFLT_PRIOn_SHFT; + writel_relaxed(((reg_val & (~(NOC_QOS_MCTL_DFLT_PRIOn_BMSK))) | + (val & NOC_QOS_MCTL_DFLT_PRIOn_BMSK)), + NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, qos_delta)); + + /* Ensure qos priority is set before exiting */ + wmb(); } -static uint32_t noc_sat_field(uint64_t bw, uint32_t ws, uint32_t qos_freq) +static void noc_set_qos_limiter(void __iomem *base, uint32_t qos_off, + uint32_t mport, uint32_t qos_delta, + struct msm_bus_noc_limiter *lim, uint32_t lim_en) { - uint32_t sat_field = 0; - - if (bw) { - /* Limit to max bw and scale bw to 100 KB increments */ - uint64_t tbw, tscale; - uint64_t bw_scaled = min_t(uint64_t, bw, MAX_BW(qos_freq)); - uint32_t rem = noc_div(&bw_scaled, 100000); - - /** - * SATURATION = - * (BW [MBps] * integration window [us] * - * time base frequency [MHz]) / (256 * 16) - */ - tbw = bw_scaled * ws * qos_freq; - tscale = BW_SCALE * SAT_SCALE * 1000000LL; - rem = noc_div(&tbw, tscale); - sat_field = (uint32_t)max_t(unsigned long, tbw, MIN_SAT_FIELD); - sat_field = (uint32_t)min_t(unsigned long, sat_field, - MAX_SAT_FIELD); - } + uint32_t reg_val, val; - MSM_BUS_DBG("NOC: sat_field: %d\n", sat_field); - return sat_field; -} + reg_val = readl_relaxed(NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, + qos_delta)); + + writel_relaxed((reg_val & (~(NOC_QOS_MCTL_LIMIT_ENn_BMSK))), + NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, qos_delta)); + + /* Ensure we disable limiter before config*/ + wmb(); + + reg_val = readl_relaxed(NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport, + qos_delta)); + val = lim->bw << NOC_QOS_LIMITBW_BWn_SHFT; + writel_relaxed(((reg_val & (~(NOC_QOS_LIMITBW_BWn_BMSK))) | + (val & NOC_QOS_LIMITBW_BWn_BMSK)), + NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport, qos_delta)); + + reg_val = readl_relaxed(NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport, + qos_delta)); + val = lim->sat << NOC_QOS_LIMITBW_SATn_SHFT; + writel_relaxed(((reg_val & (~(NOC_QOS_LIMITBW_SATn_BMSK))) | + (val & NOC_QOS_LIMITBW_SATn_BMSK)), + NOC_QOS_LIMITBWn_ADDR(base, qos_off, mport, qos_delta)); + + /* Ensure qos limiter settings in place before possibly enabling */ + wmb(); + + reg_val = readl_relaxed(NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, + qos_delta)); + val = lim_en << NOC_QOS_MCTL_LIMIT_ENn_SHFT; + writel_relaxed(((reg_val & (~(NOC_QOS_MCTL_LIMIT_ENn_BMSK))) | + (val & NOC_QOS_MCTL_LIMIT_ENn_BMSK)), + NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, qos_delta)); -static void noc_set_qos_mode(void __iomem *base, uint32_t qos_off, - uint32_t mport, uint32_t qos_delta, uint8_t mode, - uint8_t perm_mode) -{ - if (mode < NOC_QOS_MODE_MAX && - ((1 << mode) & perm_mode)) { - uint32_t reg_val; - - reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off, - mport, qos_delta)) & NOC_QOS_MODEn_RMSK; - writel_relaxed(((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK))) | - (mode & NOC_QOS_MODEn_MODE_BMSK)), - NOC_QOS_MODEn_ADDR(base, qos_off, mport, qos_delta)); - } - /* Ensure qos mode is set before exiting */ wmb(); } -static void noc_set_qos_priority(void __iomem *base, uint32_t qos_off, +static void noc_set_qos_regulator(void __iomem *base, uint32_t qos_off, uint32_t mport, uint32_t qos_delta, - struct msm_bus_noc_qos_priority *priority) + struct msm_bus_noc_regulator *reg, + struct msm_bus_noc_regulator_mode *reg_mode) { uint32_t reg_val, val; - reg_val = readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, - qos_delta)) & NOC_QOS_PRIORITYn_RMSK; - val = priority->p1 << NOC_QOS_PRIORITYn_P1_SHFT; - writel_relaxed(((reg_val & (~(NOC_QOS_PRIORITYn_P1_BMSK))) | - (val & NOC_QOS_PRIORITYn_P1_BMSK)), - NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, qos_delta)); - - reg_val = readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, - qos_delta)) - & NOC_QOS_PRIORITYn_RMSK; - writel_relaxed(((reg_val & (~(NOC_QOS_PRIORITYn_P0_BMSK))) | - (priority->p0 & NOC_QOS_PRIORITYn_P0_BMSK)), - NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, qos_delta)); - /* Ensure qos priority is set before exiting */ - wmb(); -} + reg_val = readl_relaxed(NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, + qos_delta)) & (NOC_QOS_REGUL0CTL_WRENn_BMSK | + NOC_QOS_REGUL0CTL_RDENn_BMSK); -static void msm_bus_noc_set_qos_bw(void __iomem *base, uint32_t qos_off, - uint32_t qos_freq, uint32_t mport, uint32_t qos_delta, - uint8_t perm_mode, struct msm_bus_noc_qos_bw *qbw) -{ - uint32_t reg_val, val, mode; + writel_relaxed((reg_val & (~(NOC_QOS_REGUL0CTL_WRENn_BMSK | + NOC_QOS_REGUL0CTL_RDENn_BMSK))), + NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, qos_delta)); - if (!qos_freq) { - MSM_BUS_DBG("Zero QoS Freq\n"); - return; - } + /* Ensure qos regulator is disabled before configuring */ + wmb(); - /* If Limiter or Regulator modes are not supported, bw not available*/ - if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER | - NOC_QOS_PERM_MODE_REGULATOR)) { - uint32_t bw_val = noc_bw_field(qbw->bw, qos_freq); - uint32_t sat_val = noc_sat_field(qbw->bw, qbw->ws, - qos_freq); - - MSM_BUS_DBG("NOC: BW: perm_mode: %d bw_val: %d, sat_val: %d\n", - perm_mode, bw_val, sat_val); - /* - * If in Limiter/Regulator mode, first go to fixed mode. - * Clear QoS accumulator - **/ - mode = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off, - mport, qos_delta)) & NOC_QOS_MODEn_MODE_BMSK; - if (mode == NOC_QOS_MODE_REGULATOR || mode == - NOC_QOS_MODE_LIMITER) { - reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR( - base, qos_off, mport, qos_delta)); - val = NOC_QOS_MODE_FIXED; - writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK))) - | (val & NOC_QOS_MODEn_MODE_BMSK), - NOC_QOS_MODEn_ADDR(base, qos_off, mport, - qos_delta)); - } - - reg_val = readl_relaxed(NOC_QOS_BWn_ADDR(base, qos_off, mport, - qos_delta)); - val = bw_val << NOC_QOS_BWn_BW_SHFT; - writel_relaxed(((reg_val & (~(NOC_QOS_BWn_BW_BMSK))) | - (val & NOC_QOS_BWn_BW_BMSK)), - NOC_QOS_BWn_ADDR(base, qos_off, mport, qos_delta)); - - MSM_BUS_DBG("NOC: BW: Wrote value: 0x%x\n", ((reg_val & - (~NOC_QOS_BWn_BW_BMSK)) | (val & - NOC_QOS_BWn_BW_BMSK))); - - reg_val = readl_relaxed(NOC_QOS_SATn_ADDR(base, qos_off, - mport, qos_delta)); - val = sat_val << NOC_QOS_SATn_SAT_SHFT; - writel_relaxed(((reg_val & (~(NOC_QOS_SATn_SAT_BMSK))) | - (val & NOC_QOS_SATn_SAT_BMSK)), - NOC_QOS_SATn_ADDR(base, qos_off, mport, qos_delta)); - - MSM_BUS_DBG("NOC: SAT: Wrote value: 0x%x\n", ((reg_val & - (~NOC_QOS_SATn_SAT_BMSK)) | (val & - NOC_QOS_SATn_SAT_BMSK))); - - /* Set mode back to what it was initially */ - reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off, - mport, qos_delta)); - writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK))) - | (mode & NOC_QOS_MODEn_MODE_BMSK), - NOC_QOS_MODEn_ADDR(base, qos_off, mport, qos_delta)); - /* Ensure that all writes for bandwidth registers have - * completed before returning - */ - wmb(); - } -} + reg_val = readl_relaxed(NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, + qos_delta)) & NOC_QOS_REGUL0CTL_HI_PRIOn_BMSK; + val = reg->hi_prio << NOC_QOS_REGUL0CTL_HI_PRIOn_SHFT; + writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0CTL_HI_PRIOn_BMSK))) | + (val & NOC_QOS_REGUL0CTL_HI_PRIOn_BMSK)), + NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, qos_delta)); + + reg_val = readl_relaxed(NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, + qos_delta)) & NOC_QOS_REGUL0CTL_LW_PRIOn_BMSK; + val = reg->low_prio << NOC_QOS_REGUL0CTL_LW_PRIOn_SHFT; + writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0CTL_LW_PRIOn_BMSK))) | + (val & NOC_QOS_REGUL0CTL_LW_PRIOn_BMSK)), + NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, qos_delta)); + + reg_val = readl_relaxed(NOC_QOS_REGUL0BWn_ADDR(base, qos_off, mport, + qos_delta)) & NOC_QOS_REGUL0BW_BWn_BMSK; + val = reg->bw << NOC_QOS_REGUL0BW_BWn_SHFT; + writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0BW_BWn_BMSK))) | + (val & NOC_QOS_REGUL0BW_BWn_BMSK)), + NOC_QOS_REGUL0BWn_ADDR(base, qos_off, mport, qos_delta)); + + reg_val = readl_relaxed(NOC_QOS_REGUL0BWn_ADDR(base, qos_off, mport, + qos_delta)) & NOC_QOS_REGUL0BW_SATn_BMSK; + val = reg->sat << NOC_QOS_REGUL0BW_SATn_SHFT; + writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0BW_SATn_BMSK))) | + (val & NOC_QOS_REGUL0BW_SATn_BMSK)), + NOC_QOS_REGUL0BWn_ADDR(base, qos_off, mport, qos_delta)); + + /* Ensure regulator is configured before possibly enabling */ + wmb(); -uint8_t msm_bus_noc_get_qos_mode(void __iomem *base, uint32_t qos_off, - uint32_t mport, uint32_t qos_delta, uint32_t mode, uint32_t perm_mode) -{ - if (perm_mode == NOC_QOS_MODES_ALL_PERM) - return readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off, - mport, qos_delta)) & NOC_QOS_MODEn_MODE_BMSK; - else - return 31 - __CLZ(mode & - NOC_QOS_MODES_ALL_PERM); + reg_val = readl_relaxed(NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, + qos_delta)); + val = reg_mode->write << NOC_QOS_REGUL0CTL_WRENn_SHFT; + writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0CTL_WRENn_BMSK))) | + (val & NOC_QOS_REGUL0CTL_WRENn_BMSK)), + NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, qos_delta)); + + reg_val = readl_relaxed(NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, + qos_delta)); + val = reg_mode->read << NOC_QOS_REGUL0CTL_RDENn_SHFT; + writel_relaxed(((reg_val & (~(NOC_QOS_REGUL0CTL_RDENn_BMSK))) | + (val & NOC_QOS_REGUL0CTL_RDENn_BMSK)), + NOC_QOS_REGUL0CTLn_ADDR(base, qos_off, mport, qos_delta)); + + /* Ensure regulator is ready before exiting */ + wmb(); } -void msm_bus_noc_get_qos_priority(void __iomem *base, uint32_t qos_off, - uint32_t mport, uint32_t qos_delta, - struct msm_bus_noc_qos_priority *priority) +static void noc_set_qos_forwarding(void __iomem *base, uint32_t qos_off, + uint32_t mport, uint32_t qos_delta, + bool urg_fwd_en) { - priority->p1 = (readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, - mport, qos_delta)) & NOC_QOS_PRIORITYn_P1_BMSK) >> - NOC_QOS_PRIORITYn_P1_SHFT; + uint32_t reg_val, val; - priority->p0 = (readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, - mport, qos_delta)) & NOC_QOS_PRIORITYn_P0_BMSK) >> - NOC_QOS_PRIORITYn_P0_SHFT; + reg_val = readl_relaxed(NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, + qos_delta)); + val = (urg_fwd_en ? 1:0) << NOC_QOS_MCTL_URGFWD_ENn_SHFT; + writel_relaxed(((reg_val & (~(NOC_QOS_MCTL_URGFWD_ENn_BMSK))) | + (val & NOC_QOS_MCTL_URGFWD_ENn_BMSK)), + NOC_QOS_MAINCTL_LOWn_ADDR(base, qos_off, mport, qos_delta)); + + /* Ensure qos priority is set before exiting */ + wmb(); } void msm_bus_noc_get_qos_bw(void __iomem *base, uint32_t qos_off, @@ -336,28 +316,16 @@ void msm_bus_noc_get_qos_bw(void __iomem *base, uint32_t qos_off, } } -static bool msm_bus_noc_update_bw_reg(int mode) -{ - bool ret = false; - - if ((mode == NOC_QOS_MODE_LIMITER) || - (mode == NOC_QOS_MODE_REGULATOR)) - ret = true; - - return ret; -} - static int msm_bus_noc_qos_init(struct msm_bus_node_device_type *info, void __iomem *qos_base, uint32_t qos_off, uint32_t qos_delta, uint32_t qos_freq) { - struct msm_bus_noc_qos_priority prio; + struct msm_bus_noc_qos_params *qos_params; int ret = 0; int i; - prio.p1 = info->node_info->qos_params.prio1; - prio.p0 = info->node_info->qos_params.prio0; + qos_params = &info->node_info->qos_params; if (!info->node_info->qport) { MSM_BUS_DBG("No QoS Ports to init\n"); @@ -366,212 +334,29 @@ static int msm_bus_noc_qos_init(struct msm_bus_node_device_type *info, } for (i = 0; i < info->node_info->num_qports; i++) { - if (info->node_info->qos_params.mode != NOC_QOS_MODE_BYPASS) { - noc_set_qos_priority(qos_base, qos_off, - info->node_info->qport[i], qos_delta, - &prio); - - if (info->node_info->qos_params.mode != - NOC_QOS_MODE_FIXED) { - struct msm_bus_noc_qos_bw qbw; - - qbw.ws = info->node_info->qos_params.ws; - qbw.bw = 0; - msm_bus_noc_set_qos_bw(qos_base, qos_off, - qos_freq, + noc_set_qos_dflt_prio(qos_base, qos_off, info->node_info->qport[i], qos_delta, - info->node_info->qos_params.mode, - &qbw); - } - } - - noc_set_qos_mode(qos_base, qos_off, info->node_info->qport[i], - qos_delta, info->node_info->qos_params.mode, - (1 << info->node_info->qos_params.mode)); - } -err_qos_init: - return ret; -} - -static int msm_bus_noc_set_bw(struct msm_bus_node_device_type *dev, - void __iomem *qos_base, - uint32_t qos_off, uint32_t qos_delta, - uint32_t qos_freq) -{ - int ret = 0; - uint64_t bw = 0; - int i; - struct msm_bus_node_info_type *info = dev->node_info; - - if (info && info->num_qports && - ((info->qos_params.mode == NOC_QOS_MODE_REGULATOR) || - (info->qos_params.mode == - NOC_QOS_MODE_LIMITER))) { - struct msm_bus_noc_qos_bw qos_bw; - - bw = msm_bus_div64(info->num_qports, - dev->node_bw[ACTIVE_CTX].sum_ab); - - for (i = 0; i < info->num_qports; i++) { - if (!info->qport) { - MSM_BUS_DBG("No qos ports to update!\n"); - break; - } - - qos_bw.bw = bw; - qos_bw.ws = info->qos_params.ws; - msm_bus_noc_set_qos_bw(qos_base, qos_off, qos_freq, - info->qport[i], qos_delta, - (1 << info->qos_params.mode), &qos_bw); - MSM_BUS_DBG("NOC: QoS: Update mas_bw: ws: %u\n", - qos_bw.ws); - } - } - return ret; -} - -static int msm_bus_noc_set_lim_mode(struct msm_bus_node_device_type *info, - void __iomem *qos_base, uint32_t qos_off, - uint32_t qos_delta, uint32_t qos_freq, - u64 lim_bw) -{ - int i; + qos_params->prio_dflt); - if (info && info->node_info->num_qports) { - struct msm_bus_noc_qos_bw qos_bw; - - if (lim_bw != info->node_info->lim_bw) { - for (i = 0; i < info->node_info->num_qports; i++) { - qos_bw.bw = lim_bw; - qos_bw.ws = info->node_info->qos_params.ws; - msm_bus_noc_set_qos_bw(qos_base, - qos_off, qos_freq, - info->node_info->qport[i], qos_delta, - (1 << NOC_QOS_MODE_LIMITER), &qos_bw); - } - info->node_info->lim_bw = lim_bw; - } - - for (i = 0; i < info->node_info->num_qports; i++) { - noc_set_qos_mode(qos_base, qos_off, + noc_set_qos_limiter(qos_base, qos_off, info->node_info->qport[i], qos_delta, - NOC_QOS_MODE_LIMITER, - (1 << NOC_QOS_MODE_LIMITER)); - } - } - - return 0; -} + &qos_params->limiter, + qos_params->limiter_en); -static int msm_bus_noc_set_reg_mode(struct msm_bus_node_device_type *info, - void __iomem *qos_base, uint32_t qos_off, - uint32_t qos_delta, uint32_t qos_freq, - u64 lim_bw) -{ - int i; - - if (info && info->node_info->num_qports) { - struct msm_bus_noc_qos_priority prio; - struct msm_bus_noc_qos_bw qos_bw; - - for (i = 0; i < info->node_info->num_qports; i++) { - prio.p1 = - info->node_info->qos_params.reg_prio1; - prio.p0 = - info->node_info->qos_params.reg_prio0; - noc_set_qos_priority(qos_base, qos_off, - info->node_info->qport[i], - qos_delta, - &prio); - } - - if (lim_bw != info->node_info->lim_bw) { - for (i = 0; i < info->node_info->num_qports; i++) { - qos_bw.bw = lim_bw; - qos_bw.ws = info->node_info->qos_params.ws; - msm_bus_noc_set_qos_bw(qos_base, qos_off, - qos_freq, - info->node_info->qport[i], qos_delta, - (1 << NOC_QOS_MODE_REGULATOR), &qos_bw); - } - info->node_info->lim_bw = lim_bw; - } - - for (i = 0; i < info->node_info->num_qports; i++) { - noc_set_qos_mode(qos_base, qos_off, + noc_set_qos_regulator(qos_base, qos_off, info->node_info->qport[i], qos_delta, - NOC_QOS_MODE_REGULATOR, - (1 << NOC_QOS_MODE_REGULATOR)); - } - } - return 0; -} - -static int msm_bus_noc_set_def_mode(struct msm_bus_node_device_type *info, - void __iomem *qos_base, uint32_t qos_off, - uint32_t qos_delta, uint32_t qos_freq, - u64 lim_bw) -{ - int i; + &qos_params->reg, + &qos_params->reg_mode); - for (i = 0; i < info->node_info->num_qports; i++) { - if (info->node_info->qos_params.mode == - NOC_QOS_MODE_FIXED) { - struct msm_bus_noc_qos_priority prio; - - prio.p1 = - info->node_info->qos_params.prio1; - prio.p0 = - info->node_info->qos_params.prio0; - noc_set_qos_priority(qos_base, qos_off, + noc_set_qos_forwarding(qos_base, qos_off, info->node_info->qport[i], - qos_delta, &prio); - } - noc_set_qos_mode(qos_base, qos_off, - info->node_info->qport[i], - qos_delta, - info->node_info->qos_params.mode, - (1 << info->node_info->qos_params.mode)); - } - return 0; -} - -static int msm_bus_noc_limit_mport(struct msm_bus_node_device_type *info, - void __iomem *qos_base, uint32_t qos_off, - uint32_t qos_delta, uint32_t qos_freq, - int enable_lim, u64 lim_bw) -{ - int ret = 0; - - if (!(info && info->node_info->num_qports)) { - MSM_BUS_ERR("Invalid Node info or no Qports to program"); - ret = -ENXIO; - goto exit_limit_mport; + qos_delta, + qos_params->urg_fwd_en); } - - if (lim_bw) { - switch (enable_lim) { - case THROTTLE_REG: - msm_bus_noc_set_reg_mode(info, qos_base, qos_off, - qos_delta, qos_freq, lim_bw); - break; - case THROTTLE_ON: - msm_bus_noc_set_lim_mode(info, qos_base, qos_off, - qos_delta, qos_freq, lim_bw); - break; - default: - msm_bus_noc_set_def_mode(info, qos_base, qos_off, - qos_delta, qos_freq, lim_bw); - break; - } - } else - msm_bus_noc_set_def_mode(info, qos_base, qos_off, - qos_delta, qos_freq, lim_bw); - -exit_limit_mport: +err_qos_init: return ret; } @@ -581,9 +366,6 @@ int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev) return -ENODEV; bus_dev->fabdev->noc_ops.qos_init = msm_bus_noc_qos_init; - bus_dev->fabdev->noc_ops.set_bw = msm_bus_noc_set_bw; - bus_dev->fabdev->noc_ops.limit_mport = msm_bus_noc_limit_mport; - bus_dev->fabdev->noc_ops.update_bw_reg = msm_bus_noc_update_bw_reg; return 0; } diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of.c b/drivers/soc/qcom/msm_bus/msm_bus_of.c index fd72ae6c28db..34ba05f35f05 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_of.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_of.c @@ -113,9 +113,9 @@ static struct msm_bus_scale_pdata *get_pdata(struct platform_device *pdev, int index = i * 2; usecase_lat[i].fal_ns = (uint64_t) - KBTOB(be32_to_cpu(vec_arr[index])); + be32_to_cpu(vec_arr[index]); usecase_lat[i].idle_t_ns = (uint64_t) - KBTOB(be32_to_cpu(vec_arr[index + 1])); + be32_to_cpu(vec_arr[index + 1]); } pdata->usecase_lat = usecase_lat; diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c index 5710bca8cd9f..42a6f58af760 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c @@ -31,31 +31,6 @@ #define DEFAULT_VRAIL_COMP 100 #define DEFAULT_AGG_SCHEME AGG_SCHEME_LEG -static int get_qos_mode(struct platform_device *pdev, - struct device_node *node, const char *qos_mode) -{ - static char const *qos_names[] = {"fixed", "limiter", - "bypass", "regulator"}; - int i = 0; - int ret = -1; - - if (!qos_mode) - goto exit_get_qos_mode; - - for (i = 0; i < ARRAY_SIZE(qos_names); i++) { - if (!strcmp(qos_mode, qos_names[i])) - break; - } - if (i == ARRAY_SIZE(qos_names)) - dev_err(&pdev->dev, "Cannot match mode qos %s using Bypass", - qos_mode); - else - ret = i; - -exit_get_qos_mode: - return ret; -} - static int *get_arr(struct platform_device *pdev, struct device_node *node, const char *prop, int *nports) @@ -210,7 +185,6 @@ static struct msm_bus_fab_device_type *get_fab_device_info( fab_dev->qos_freq = DEFAULT_QOS_FREQ; } - return fab_dev; fab_dev_err: @@ -224,54 +198,48 @@ static void get_qos_params( struct platform_device * const pdev, struct msm_bus_node_info_type *node_info) { - const char *qos_mode = NULL; - unsigned int ret; - unsigned int temp; - - ret = of_property_read_string(dev_node, "qcom,qos-mode", &qos_mode); - - if (ret) - node_info->qos_params.mode = -1; - else - node_info->qos_params.mode = get_qos_mode(pdev, dev_node, - qos_mode); + const uint32_t *vec_arr = NULL; + int len; - of_property_read_u32(dev_node, "qcom,prio-lvl", - &node_info->qos_params.prio_lvl); + of_property_read_u32(dev_node, "qcom,prio", + &node_info->qos_params.prio_dflt); - of_property_read_u32(dev_node, "qcom,prio1", - &node_info->qos_params.prio1); - - of_property_read_u32(dev_node, "qcom,prio0", - &node_info->qos_params.prio0); - - of_property_read_u32(dev_node, "qcom,reg-prio1", - &node_info->qos_params.reg_prio1); - - of_property_read_u32(dev_node, "qcom,reg-prio0", - &node_info->qos_params.reg_prio0); - - of_property_read_u32(dev_node, "qcom,prio-rd", - &node_info->qos_params.prio_rd); - - of_property_read_u32(dev_node, "qcom,prio-wr", - &node_info->qos_params.prio_wr); - - of_property_read_u32(dev_node, "qcom,gp", - &node_info->qos_params.gp); + vec_arr = of_get_property(dev_node, "qcom,lim-params", &len); + if (vec_arr != NULL && len == sizeof(uint32_t) * 2) { + node_info->qos_params.limiter.bw = be32_to_cpu(vec_arr[0]); + node_info->qos_params.limiter.sat = be32_to_cpu(vec_arr[1]); + } else { + node_info->qos_params.limiter.bw = 0; + node_info->qos_params.limiter.sat = 0; + } - of_property_read_u32(dev_node, "qcom,thmp", - &node_info->qos_params.thmp); + node_info->qos_params.limiter_en = of_property_read_bool(dev_node, + "qcom,lim-en"); - of_property_read_u32(dev_node, "qcom,ws", - &node_info->qos_params.ws); + vec_arr = of_get_property(dev_node, "qcom,qos-reg-params", &len); + if (vec_arr != NULL && len == sizeof(uint32_t) * 4) { + node_info->qos_params.reg.low_prio = be32_to_cpu(vec_arr[0]); + node_info->qos_params.reg.hi_prio = be32_to_cpu(vec_arr[1]); + node_info->qos_params.reg.bw = be32_to_cpu(vec_arr[2]); + node_info->qos_params.reg.sat = be32_to_cpu(vec_arr[3]); + } else { + node_info->qos_params.reg.low_prio = 0; + node_info->qos_params.reg.hi_prio = 0; + node_info->qos_params.reg.bw = 0; + node_info->qos_params.reg.sat = 0; + } - ret = of_property_read_u32(dev_node, "qcom,bw_buffer", &temp); + vec_arr = of_get_property(dev_node, "qcom,qos-reg-mode", &len); + if (vec_arr != NULL && len == sizeof(uint32_t) * 2) { + node_info->qos_params.reg_mode.read = be32_to_cpu(vec_arr[0]); + node_info->qos_params.reg_mode.write = be32_to_cpu(vec_arr[1]); + } else { + node_info->qos_params.reg_mode.read = 0; + node_info->qos_params.reg_mode.write = 0; + } - if (ret) - node_info->qos_params.bw_buffer = 0; - else - node_info->qos_params.bw_buffer = KBTOB(temp); + node_info->qos_params.urg_fwd_en = of_property_read_bool(dev_node, + "qcom,forwarding"); } @@ -308,13 +276,9 @@ static int msm_bus_of_parse_clk_array(struct device_node *dev_node, char gdsc_string[MAX_REG_NAME]; (*clk_arr)[idx].clk = of_clk_get_by_name(dev_node, clk_name); + if (IS_ERR_OR_NULL((*clk_arr)[idx].clk)) + goto exit_of_parse_clk_array; - if (IS_ERR_OR_NULL((*clk_arr)[idx].clk)) { - dev_err(&pdev->dev, - "Failed to get clk %s for bus%d ", clk_name, - id); - continue; - } if (strnstr(clk_name, "no-rate", strlen(clk_name))) (*clk_arr)[idx].enable_only_clk = true; @@ -532,6 +496,10 @@ static int get_bus_node_device_data( { bool enable_only; bool setrate_only; + int num_elems = 0, num_bcms = 0, i = 0, ret = 0; + uint32_t *vec_arr = NULL; + struct qos_bcm_type *qos_bcms = NULL; + struct device_node *qos_clk_node = NULL; node_device->node_info = get_node_info_data(dev_node, pdev); if (IS_ERR_OR_NULL(node_device->node_info)) { @@ -566,8 +534,6 @@ static int get_bus_node_device_data( } if (node_device->node_info->is_fab_dev) { - struct device_node *qos_clk_node; - dev_dbg(&pdev->dev, "Dev %d\n", node_device->node_info->id); if (!node_device->node_info->virt_dev) { @@ -615,6 +581,48 @@ static int get_bus_node_device_data( of_node_put(qos_clk_node); } } else { + num_elems = of_property_count_elems_of_size(dev_node, + "qcom,node-qos-bcms", sizeof(uint32_t)); + + if (num_elems > 0) { + if (num_elems % 3 != 0) { + pr_err("Error: Length-error on getting vectors\n"); + return -ENODATA; + } + + vec_arr = devm_kzalloc(&pdev->dev, (sizeof(uint32_t) * + num_elems), GFP_KERNEL); + if (!vec_arr) + return -ENOMEM; + + ret = of_property_read_u32_array(dev_node, + "qcom,node-qos-bcms", vec_arr, + num_elems); + if (ret) { + pr_err("Error: problem reading qos-bcm vectors\n"); + return ret; + } + num_bcms = num_elems / 3; + node_device->num_qos_bcms = num_bcms; + + qos_bcms = devm_kzalloc(&pdev->dev, + (sizeof(struct qos_bcm_type) * + num_bcms), GFP_KERNEL); + if (!qos_bcms) + return -ENOMEM; + + for (i = 0; i < num_bcms; i++) { + int index = i * 3; + + qos_bcms[i].qos_bcm_id = vec_arr[index]; + qos_bcms[i].vec.vec_a = + (uint64_t)KBTOB(vec_arr[index + 1]); + qos_bcms[i].vec.vec_b = + (uint64_t)KBTOB(vec_arr[index + 2]); + } + node_device->qos_bcms = qos_bcms; + } + enable_only = of_property_read_bool(dev_node, "qcom,enable-only-clk"); node_device->clk[DUAL_CTX].enable_only_clk = enable_only; @@ -632,6 +640,20 @@ static int get_bus_node_device_data( setrate_only; } + qos_clk_node = of_get_child_by_name(dev_node, + "qcom,node-qos-clks"); + + if (qos_clk_node) { + if (msm_bus_of_parse_clk_array(qos_clk_node, dev_node, + pdev, + &node_device->node_qos_clks, + &node_device->num_node_qos_clks, + node_device->node_info->id)) { + dev_dbg(&pdev->dev, "Bypass QoS programming"); + node_device->fabdev->bypass_qos_prg = true; + } + of_node_put(qos_clk_node); + } node_device->clk[DUAL_CTX].clk = of_clk_get_by_name(dev_node, "node_clk"); diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h index cd5281a9a9b0..17657e55bc8b 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h +++ b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h @@ -19,6 +19,7 @@ #include #include #include "msm_bus_core.h" +#include "msm_bus_noc.h" #define VCD_MAX_CNT 16 @@ -75,6 +76,11 @@ struct nodevector { uint64_t query_vec_b; }; +struct qos_bcm_type { + int qos_bcm_id; + struct nodevector vec; +}; + struct msm_bus_rsc_device_type { struct rpmh_client *mbox; struct list_head bcm_clist[VCD_MAX_CNT]; @@ -106,19 +112,30 @@ struct msm_bus_fab_device_type { bool bypass_qos_prg; }; -struct qos_params_type { - int mode; - unsigned int prio_lvl; - unsigned int prio_rd; - unsigned int prio_wr; - unsigned int prio1; - unsigned int prio0; - unsigned int reg_prio1; - unsigned int reg_prio0; - unsigned int gp; - unsigned int thmp; - unsigned int ws; - u64 bw_buffer; +struct msm_bus_noc_limiter { + uint32_t bw; + uint32_t sat; +}; + +struct msm_bus_noc_regulator { + uint32_t low_prio; + uint32_t hi_prio; + uint32_t bw; + uint32_t sat; +}; + +struct msm_bus_noc_regulator_mode { + uint32_t read; + uint32_t write; +}; + +struct msm_bus_noc_qos_params { + uint32_t prio_dflt; + struct msm_bus_noc_limiter limiter; + bool limiter_en; + struct msm_bus_noc_regulator reg; + struct msm_bus_noc_regulator_mode reg_mode; + bool urg_fwd_en; }; struct node_util_levels_type { @@ -143,7 +160,7 @@ struct msm_bus_node_info_type { int num_ports; int num_qports; int *qport; - struct qos_params_type qos_params; + struct msm_bus_noc_qos_params qos_params; unsigned int num_connections; unsigned int num_blist; unsigned int num_bcm_devs; @@ -185,6 +202,8 @@ struct msm_bus_node_device_type { struct nodeclk bus_qos_clk; uint32_t num_node_qos_clks; struct nodeclk *node_qos_clks; + uint32_t num_qos_bcms; + struct qos_bcm_type *qos_bcms; unsigned int ap_owned; struct device_node *of_node; struct device dev; -- GitLab From d039a4e438df824f3213f2e63295d334e3ad935f Mon Sep 17 00:00:00 2001 From: Runmin Wang Date: Tue, 20 Jun 2017 14:56:56 -0700 Subject: [PATCH 187/786] ARM: dts: msm: Add imem and restart DT node for sdxpoorwills Add imem and restart DT node for sdxpoorwills to enable restart and DLOAD mode. Change-Id: I8a436373f26f647a5518f17c7535044d887d07a5 Signed-off-by: Runmin Wang --- arch/arm/boot/dts/qcom/sdxpoorwills.dtsi | 31 ++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi index 961adc9f2186..0093c1663e8e 100644 --- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi +++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi @@ -180,6 +180,37 @@ reg = <0x00137004 0x4>; status = "ok"; }; + + qcom,msm-imem@8600000 { + compatible = "qcom,msm-imem"; + reg = <0x8600000 0x1000>; /* Address and size of IMEM */ + ranges = <0x0 0x8600000 0x1000>; + #address-cells = <1>; + #size-cells = <1>; + + mem_dump_table@10 { + compatible = "qcom,msm-imem-mem_dump_table"; + reg = <0x10 8>; + }; + + restart_reason@65c { + compatible = "qcom,msm-imem-restart_reason"; + reg = <0x65c 4>; + }; + + boot_stats@6b0 { + compatible = "qcom,msm-imem-boot_stats"; + reg = <0x6b0 32>; + }; + }; + + restart@4ab000 { + compatible = "qcom,pshold"; + reg = <0x4ab000 0x4>, + <0x193d100 0x4>; + reg-names = "pshold-base", "tcsr-boot-misc-detect"; + }; + }; #include "sdxpoorwills-regulator.dtsi" -- GitLab From 26182b50fb53aedba4a4a1bb60b8464e14964be9 Mon Sep 17 00:00:00 2001 From: Jeevan Shriram Date: Mon, 12 Jun 2017 16:21:45 -0700 Subject: [PATCH 188/786] defconfig: sdxpoorwills: enable POWER_RESET and DLOAD_MODE Enable POWER_RESET and DLOAD_MODE for sdxpoorwills target. Change-Id: Id4360930a0122baf7582ca18262c57578a4d87f4 Signed-off-by: Runmin Wang Signed-off-by: Jeevan Shriram --- arch/arm/configs/sdxpoorwills-perf_defconfig | 2 ++ arch/arm/configs/sdxpoorwills_defconfig | 2 ++ 2 files changed, 4 insertions(+) diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig index 40289a8dc299..0afbc259f291 100644 --- a/arch/arm/configs/sdxpoorwills-perf_defconfig +++ b/arch/arm/configs/sdxpoorwills-perf_defconfig @@ -207,6 +207,8 @@ CONFIG_PINCTRL_SDXPOORWILLS=y CONFIG_DEBUG_GPIO=y CONFIG_GPIO_SYSFS=y CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_QCOM=y +CONFIG_QCOM_DLOAD_MODE=y CONFIG_POWER_SUPPLY=y CONFIG_THERMAL=y CONFIG_REGULATOR=y diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig index d91f5f625b02..4b4099158e1a 100644 --- a/arch/arm/configs/sdxpoorwills_defconfig +++ b/arch/arm/configs/sdxpoorwills_defconfig @@ -200,6 +200,8 @@ CONFIG_SPI_SPIDEV=m CONFIG_SLIMBUS=y CONFIG_PINCTRL_SDXPOORWILLS=y CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_QCOM=y +CONFIG_QCOM_DLOAD_MODE=y CONFIG_POWER_SUPPLY=y CONFIG_THERMAL=y CONFIG_MSM_CDC_PINCTRL=y -- GitLab From eddcf45d62c7c6034858f51de69205c2eabe5f3a Mon Sep 17 00:00:00 2001 From: Chris Lew Date: Wed, 5 Oct 2016 15:01:58 -0700 Subject: [PATCH 189/786] diag: Update msg, log and event information Update the diag header with latest log, message and event information to match the list maintained by the peripherals. CRs-Fixed: 1074803 Change-Id: I2df3304029a05ce0f4cb11551e9ed768cd89f8b5 Signed-off-by: Chris Lew --- include/linux/diagchar.h | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h index 3b5c7bfc4df4..e65d87130971 100644 --- a/include/linux/diagchar.h +++ b/include/linux/diagchar.h @@ -145,10 +145,10 @@ * a new RANGE of SSIDs to the msg_mask_tbl. */ #define MSG_MASK_TBL_CNT 25 -#define APPS_EVENT_LAST_ID 0x0B14 +#define APPS_EVENT_LAST_ID 0x0B2A #define MSG_SSID_0 0 -#define MSG_SSID_0_LAST 118 +#define MSG_SSID_0_LAST 120 #define MSG_SSID_1 500 #define MSG_SSID_1_LAST 506 #define MSG_SSID_2 1000 @@ -164,7 +164,7 @@ #define MSG_SSID_7 4600 #define MSG_SSID_7_LAST 4615 #define MSG_SSID_8 5000 -#define MSG_SSID_8_LAST 5032 +#define MSG_SSID_8_LAST 5033 #define MSG_SSID_9 5500 #define MSG_SSID_9_LAST 5516 #define MSG_SSID_10 6000 @@ -347,7 +347,9 @@ static const uint32_t msg_bld_masks_0[] = { MSG_LVL_FATAL, MSG_LVL_MED, MSG_LVL_MED, - MSG_LVL_HIGH + MSG_LVL_HIGH, + MSG_LVL_LOW, + MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL }; static const uint32_t msg_bld_masks_1[] = { @@ -546,7 +548,8 @@ static const uint32_t msg_bld_masks_8[] = { MSG_LVL_MED, MSG_LVL_MED, MSG_LVL_MED, - MSG_LVL_MED + MSG_LVL_MED, + MSG_LVL_HIGH }; static const uint32_t msg_bld_masks_9[] = { @@ -869,7 +872,7 @@ static const uint32_t msg_bld_masks_23[] = { /* LOG CODES */ static const uint32_t log_code_last_tbl[] = { 0x0, /* EQUIP ID 0 */ - 0x1966, /* EQUIP ID 1 */ + 0x1A02, /* EQUIP ID 1 */ 0x0, /* EQUIP ID 2 */ 0x0, /* EQUIP ID 3 */ 0x4910, /* EQUIP ID 4 */ -- GitLab From 5b5cd0ac67039c2316cd52d71db86a12171bae8e Mon Sep 17 00:00:00 2001 From: Chris Lew Date: Tue, 11 Oct 2016 17:47:48 -0700 Subject: [PATCH 190/786] diag: Add MSG SSID for DPM Update diag header with new MSG SSID for DPM. CRs-Fixed: 1076696 Change-Id: I681509b653c419159f6ca4172c5ff66a1cae1b79 Signed-off-by: Chris Lew --- include/linux/diagchar.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h index e65d87130971..fd57cad947a9 100644 --- a/include/linux/diagchar.h +++ b/include/linux/diagchar.h @@ -194,7 +194,7 @@ #define MSG_SSID_22 10350 #define MSG_SSID_22_LAST 10377 #define MSG_SSID_23 10400 -#define MSG_SSID_23_LAST 10415 +#define MSG_SSID_23_LAST 10416 #define MSG_SSID_24 0xC000 #define MSG_SSID_24_LAST 0xC063 @@ -866,6 +866,7 @@ static const uint32_t msg_bld_masks_23[] = { MSG_LVL_LOW, MSG_LVL_LOW, MSG_LVL_LOW, + MSG_LVL_LOW, MSG_LVL_LOW }; -- GitLab From 2aaae994d5beb78ae64321538181d3a1b1452d64 Mon Sep 17 00:00:00 2001 From: Chris Lew Date: Thu, 16 Mar 2017 17:20:45 -0700 Subject: [PATCH 191/786] diag: Update msg, log and event information Update the diag header with latest log, message and event information to match the list maintained by the peripherals. CRs-Fixed: 2020864 Change-Id: Icbed01bb4f90fb7d72fe7517ee6964d799f1d48e Signed-off-by: Chris Lew --- include/linux/diagchar.h | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h index fd57cad947a9..483b7ce6d86a 100644 --- a/include/linux/diagchar.h +++ b/include/linux/diagchar.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -145,10 +145,10 @@ * a new RANGE of SSIDs to the msg_mask_tbl. */ #define MSG_MASK_TBL_CNT 25 -#define APPS_EVENT_LAST_ID 0x0B2A +#define APPS_EVENT_LAST_ID 0x0B3F #define MSG_SSID_0 0 -#define MSG_SSID_0_LAST 120 +#define MSG_SSID_0_LAST 121 #define MSG_SSID_1 500 #define MSG_SSID_1_LAST 506 #define MSG_SSID_2 1000 @@ -160,7 +160,7 @@ #define MSG_SSID_5 4000 #define MSG_SSID_5_LAST 4010 #define MSG_SSID_6 4500 -#define MSG_SSID_6_LAST 4573 +#define MSG_SSID_6_LAST 4583 #define MSG_SSID_7 4600 #define MSG_SSID_7_LAST 4615 #define MSG_SSID_8 5000 @@ -184,7 +184,7 @@ #define MSG_SSID_17 9000 #define MSG_SSID_17_LAST 9008 #define MSG_SSID_18 9500 -#define MSG_SSID_18_LAST 9510 +#define MSG_SSID_18_LAST 9521 #define MSG_SSID_19 10200 #define MSG_SSID_19_LAST 10210 #define MSG_SSID_20 10251 @@ -349,7 +349,8 @@ static const uint32_t msg_bld_masks_0[] = { MSG_LVL_MED, MSG_LVL_HIGH, MSG_LVL_LOW, - MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL + MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL, + MSG_LVL_HIGH }; static const uint32_t msg_bld_masks_1[] = { @@ -873,7 +874,7 @@ static const uint32_t msg_bld_masks_23[] = { /* LOG CODES */ static const uint32_t log_code_last_tbl[] = { 0x0, /* EQUIP ID 0 */ - 0x1A02, /* EQUIP ID 1 */ + 0x1A11, /* EQUIP ID 1 */ 0x0, /* EQUIP ID 2 */ 0x0, /* EQUIP ID 3 */ 0x4910, /* EQUIP ID 4 */ -- GitLab From d731640274d3220efd224879ccf27f1620ce9b1f Mon Sep 17 00:00:00 2001 From: Chris Lew Date: Mon, 19 Jun 2017 15:42:50 -0700 Subject: [PATCH 192/786] diag: Update msg mask tables for new SSID Update the diag headers and msg mask tables to account for a new ssid group. CRs-Fixed: 2055789 Change-Id: I3abd816d5e99bff263b753fb1070a2672ecea46c Signed-off-by: Chris Lew --- drivers/char/diag/diag_masks.c | 3 ++- include/linux/diagchar.h | 21 ++++++++++++++++++--- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c index c4d378edf8de..b5a594a86991 100644 --- a/drivers/char/diag/diag_masks.c +++ b/drivers/char/diag/diag_masks.c @@ -60,7 +60,8 @@ static const struct diag_ssid_range_t msg_mask_tbl[] = { { .ssid_first = MSG_SSID_21, .ssid_last = MSG_SSID_21_LAST }, { .ssid_first = MSG_SSID_22, .ssid_last = MSG_SSID_22_LAST }, { .ssid_first = MSG_SSID_23, .ssid_last = MSG_SSID_23_LAST }, - { .ssid_first = MSG_SSID_24, .ssid_last = MSG_SSID_24_LAST } + { .ssid_first = MSG_SSID_24, .ssid_last = MSG_SSID_24_LAST }, + { .ssid_first = MSG_SSID_25, .ssid_last = MSG_SSID_25_LAST } }; static int diag_apps_responds(void) diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h index 483b7ce6d86a..1731c3a0e815 100644 --- a/include/linux/diagchar.h +++ b/include/linux/diagchar.h @@ -144,7 +144,7 @@ /* This needs to be modified manually now, when we add * a new RANGE of SSIDs to the msg_mask_tbl. */ -#define MSG_MASK_TBL_CNT 25 +#define MSG_MASK_TBL_CNT 26 #define APPS_EVENT_LAST_ID 0x0B3F #define MSG_SSID_0 0 @@ -195,8 +195,10 @@ #define MSG_SSID_22_LAST 10377 #define MSG_SSID_23 10400 #define MSG_SSID_23_LAST 10416 -#define MSG_SSID_24 0xC000 -#define MSG_SSID_24_LAST 0xC063 +#define MSG_SSID_24 10500 +#define MSG_SSID_24_LAST 10505 +#define MSG_SSID_25 0xC000 +#define MSG_SSID_25_LAST 0xC063 static const uint32_t msg_bld_masks_0[] = { MSG_LVL_LOW, @@ -871,6 +873,19 @@ static const uint32_t msg_bld_masks_23[] = { MSG_LVL_LOW }; +static const uint32_t msg_bld_masks_24[] = { + MSG_LVL_HIGH, + MSG_LVL_HIGH, + MSG_LVL_HIGH, + MSG_LVL_HIGH, + MSG_LVL_HIGH, + MSG_LVL_HIGH +}; + +static const uint32_t msg_bld_masks_25[] = { + MSG_LVL_LOW +}; + /* LOG CODES */ static const uint32_t log_code_last_tbl[] = { 0x0, /* EQUIP ID 0 */ -- GitLab From c8ad70ff2482124e95d85087e431b3d488e5d443 Mon Sep 17 00:00:00 2001 From: Sayali Lokhande Date: Wed, 14 Dec 2016 11:10:55 +0530 Subject: [PATCH 193/786] mmc: sdhci-msm: Cache mmc data structures for debugging Add debug RAM to store few important mmc data structures on first point of failure(like mmc_card, mmc_host, sdhci_host) for debugging purpose, otherwise it will be overwritten as BUG_ON is removed now. Change-Id: Ia1388a77aeed60d4d49a63b8798d3a925a60ac2c Signed-off-by: Sayali Lokhande --- drivers/mmc/host/sdhci-msm.c | 16 ++++++++++++++++ drivers/mmc/host/sdhci-msm.h | 7 +++++++ 2 files changed, 23 insertions(+) diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index caf8dd1ec380..05d8a52e63df 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -3270,6 +3270,21 @@ static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host) pr_err("-------------------------\n"); } +static void sdhci_msm_cache_debug_data(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + struct sdhci_msm_debug_data *cached_data = &msm_host->cached_data; + + memcpy(&cached_data->copy_mmc, msm_host->mmc, + sizeof(struct mmc_host)); + if (msm_host->mmc->card) + memcpy(&cached_data->copy_card, msm_host->mmc->card, + sizeof(struct mmc_card)); + memcpy(&cached_data->copy_host, host, + sizeof(struct sdhci_host)); +} + void sdhci_msm_dump_vendor_regs(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -3281,6 +3296,7 @@ void sdhci_msm_dump_vendor_regs(struct sdhci_host *host) u32 test_bus_val = 0; u32 debug_reg[MAX_TEST_BUS] = {0}; + sdhci_msm_cache_debug_data(host); pr_info("----------- VENDOR REGISTER DUMP -----------\n"); if (host->cq_host) sdhci_msm_cmdq_dump_debug_ram(host); diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h index a7577d9ab91f..2b907e94f7d1 100644 --- a/drivers/mmc/host/sdhci-msm.h +++ b/drivers/mmc/host/sdhci-msm.h @@ -170,6 +170,12 @@ struct sdhci_msm_ice_data { int state; }; +struct sdhci_msm_debug_data { + struct mmc_host copy_mmc; + struct mmc_card copy_card; + struct sdhci_host copy_host; +}; + struct sdhci_msm_host { struct platform_device *pdev; void __iomem *core_mem; /* MSM SDCC mapped address */ @@ -185,6 +191,7 @@ struct sdhci_msm_host { atomic_t clks_on; /* Set if clocks are enabled */ struct sdhci_msm_pltfm_data *pdata; struct mmc_host *mmc; + struct sdhci_msm_debug_data cached_data; struct sdhci_pltfm_data sdhci_msm_pdata; u32 curr_pwr_state; u32 curr_io_level; -- GitLab From bf795c53a4f26f6a500d830cd9d62ad39bc63106 Mon Sep 17 00:00:00 2001 From: Veerabhadrarao Badiganti Date: Thu, 9 Feb 2017 20:23:17 +0530 Subject: [PATCH 194/786] mmc: core: Retry claim host in mmc_sd_detect Use mmc_try_claim_host with a timeout instead of mmc_claim_host in mmc_sd_detect. This is to ensure that mmc rescan work item is doesn't get blocked on claim_host for longer period. In the pm_suspend path, we cancel the mmc_rescan work item. If this work item is already scheduled, suspend would be blocked till mmc_rescan gets finished. In case, mmc_rescan is blocked on claim_host lock, pm_suspend could get blocked for longer period. This can result in momentary UI freeze since pm_suspend is blocked for longer duration. This change is to prevent this scenario. Change-Id: Ib93bae6745a153bad3579ae42f46c3c3a7c1b95a Signed-off-by: Veerabhadrarao Badiganti --- drivers/mmc/core/core.c | 32 ++++++++++++++++++++++++++++++++ drivers/mmc/core/sd.c | 12 +++++++++++- include/linux/mmc/core.h | 1 + 3 files changed, 44 insertions(+), 1 deletion(-) diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 3910d2d2aa81..545e26eabd84 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -2271,6 +2271,38 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) } EXPORT_SYMBOL(__mmc_claim_host); +/** + * mmc_try_claim_host - try exclusively to claim a host + * and keep trying for given time, with a gap of 10ms + * @host: mmc host to claim + * @dealy_ms: delay in ms + * + * Returns %1 if the host is claimed, %0 otherwise. + */ +int mmc_try_claim_host(struct mmc_host *host, unsigned int delay_ms) +{ + int claimed_host = 0; + unsigned long flags; + int retry_cnt = delay_ms/10; + + do { + spin_lock_irqsave(&host->lock, flags); + if (!host->claimed || host->claimer == current) { + host->claimed = 1; + host->claimer = current; + host->claim_cnt += 1; + claimed_host = 1; + } + spin_unlock_irqrestore(&host->lock, flags); + if (!claimed_host) + mmc_delay(10); + } while (!claimed_host && retry_cnt--); + if (host->ops->enable && claimed_host && host->claim_cnt == 1) + host->ops->enable(host); + return claimed_host; +} +EXPORT_SYMBOL(mmc_try_claim_host); + /** * mmc_release_host - release a host * @host: mmc host to release diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 7112f9fcfe58..650f658f0d94 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -1185,7 +1185,17 @@ static void mmc_sd_detect(struct mmc_host *host) BUG_ON(!host); BUG_ON(!host->card); - mmc_get_card(host->card); + /* + * Try to acquire claim host. If failed to get the lock in 2 sec, + * just return; This is to ensure that when this call is invoked + * due to pm_suspend, not to block suspend for longer duration. + */ + pm_runtime_get_sync(&host->card->dev); + if (!mmc_try_claim_host(host, 2000)) { + pm_runtime_mark_last_busy(&host->card->dev); + pm_runtime_put_autosuspend(&host->card->dev); + return; + } /* * Just check if our card has been removed. diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 227b1e2befcf..b71810518922 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -214,6 +214,7 @@ extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int); extern int __mmc_claim_host(struct mmc_host *host, atomic_t *abort); extern void mmc_release_host(struct mmc_host *host); +extern int mmc_try_claim_host(struct mmc_host *host, unsigned int delay); extern void mmc_get_card(struct mmc_card *card); extern void mmc_put_card(struct mmc_card *card); -- GitLab From 7a714cdb48de1d4f9e8a8398569ac65bdd0df4d5 Mon Sep 17 00:00:00 2001 From: Kyle Piefer Date: Wed, 21 Jun 2017 15:55:47 -0700 Subject: [PATCH 195/786] msm: kgsl: Use GMU cold boot sequence for hard reset During GPU hang recovery, KGSL goes to STATE_RESET. When starting GMU from STATE_RESET, use the cold boot initialization sequence instead of the warm boot sequence. This provides a completely clean starting point. CRs-Fixed: 2062271 Change-Id: Ie9b628775e61e05fd2231e167657882581b681d1 Signed-off-by: Kyle Piefer --- drivers/gpu/msm/adreno_a6xx.c | 11 ++++++----- drivers/gpu/msm/kgsl_gmu.c | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c index 33854eac5126..e9ea04676df9 100644 --- a/drivers/gpu/msm/adreno_a6xx.c +++ b/drivers/gpu/msm/adreno_a6xx.c @@ -1348,6 +1348,8 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device, int ret, i; switch (boot_state) { + case GMU_RESET: + /* fall through */ case GMU_COLD_BOOT: /* Turn on the HM and SPTP head switches */ ret = a6xx_hm_sptprac_enable(device); @@ -1363,6 +1365,10 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device, ret = a6xx_hm_sptprac_enable(device); if (ret) return ret; + } else if (boot_state == GMU_RESET) { + ret = a6xx_hm_sptprac_enable(device); + if (ret) + return ret; } else { ret = a6xx_rpmh_power_on_gpu(device); if (ret) @@ -1390,11 +1396,6 @@ static int a6xx_gmu_fw_start(struct kgsl_device *device, if (ret) return ret; break; - case GMU_RESET: - /* Turn on the HM and SPTP head switches */ - ret = a6xx_hm_sptprac_enable(device); - if (ret) - return ret; default: break; } diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c index 2a6e7ddee5c0..3d0dd77556af 100644 --- a/drivers/gpu/msm/kgsl_gmu.c +++ b/drivers/gpu/msm/kgsl_gmu.c @@ -1418,7 +1418,7 @@ int gmu_start(struct kgsl_device *device) gmu_irq_enable(device); - ret = hfi_start(gmu, GMU_WARM_BOOT); + ret = hfi_start(gmu, GMU_COLD_BOOT); if (ret) goto error_gpu; -- GitLab From db362e6e229edccffd12238e3ff181d7267e6163 Mon Sep 17 00:00:00 2001 From: Liangliang Lu Date: Sat, 3 Dec 2016 10:34:26 +0800 Subject: [PATCH 196/786] mmc: mmc-debugfs: add error state This change adds support to allow user space query if low level eMMC driver has encountered any error or not, this state can be read/cleared via debugfs. CRs-Fixed: 1056483 Change-Id: Idc4ea375e9f308446dec04d443d062fe502658bd Signed-off-by: Liangliang Lu Signed-off-by: Sayali Lokhande --- drivers/mmc/core/debugfs.c | 31 +++++++++++++++++++++++++++++++ include/linux/mmc/host.h | 2 ++ 2 files changed, 33 insertions(+) diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c index 0c8ff8637fa5..d1a0235e85c5 100644 --- a/drivers/mmc/core/debugfs.c +++ b/drivers/mmc/core/debugfs.c @@ -354,6 +354,33 @@ static int mmc_force_err_set(void *data, u64 val) DEFINE_SIMPLE_ATTRIBUTE(mmc_force_err_fops, NULL, mmc_force_err_set, "%llu\n"); +static int mmc_err_state_get(void *data, u64 *val) +{ + struct mmc_host *host = data; + + if (!host) + return -EINVAL; + + *val = host->err_occurred ? 1 : 0; + + return 0; +} + +static int mmc_err_state_clear(void *data, u64 val) +{ + struct mmc_host *host = data; + + if (!host) + return -EINVAL; + + host->err_occurred = false; + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(mmc_err_state, mmc_err_state_get, + mmc_err_state_clear, "%llu\n"); + void mmc_add_host_debugfs(struct mmc_host *host) { struct dentry *root; @@ -399,6 +426,10 @@ void mmc_add_host_debugfs(struct mmc_host *host) root, host, &mmc_ring_buffer_fops)) goto err_node; #endif + if (!debugfs_create_file("err_state", S_IRUSR | S_IWUSR, root, host, + &mmc_err_state)) + goto err_node; + #ifdef CONFIG_MMC_CLKGATE if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR), root, &host->clk_delay)) diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index df841cff8331..9200069d41e5 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -559,6 +559,8 @@ struct mmc_host { struct dentry *debugfs_root; + bool err_occurred; + struct mmc_async_req *areq; /* active async req */ struct mmc_context_info context_info; /* async synchronization info */ -- GitLab From 092caac3acee3bf61d0a12a4a6efccca660037aa Mon Sep 17 00:00:00 2001 From: Liangliang Lu Date: Wed, 21 Dec 2016 14:02:02 +0800 Subject: [PATCH 197/786] mmc: sdhci: add err_state to sdhci_dumpregs func This change sets err_state in sdhci_dumpregs func indicating driver errors captured, which can be read out from debugfs. CRs-Fixed: 1056483 Change-Id: If6323f4e2cf9c835139ea92753ae8407709b8a70 Signed-off-by: Liangliang Lu Signed-off-by: Sayali Lokhande --- drivers/mmc/host/sdhci.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 4bb4c183318c..8fbcdae33570 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -148,6 +148,8 @@ static void sdhci_dumpregs(struct sdhci_host *host) readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); } + host->mmc->err_occurred = true; + if (host->ops->dump_vendor_regs) host->ops->dump_vendor_regs(host); sdhci_dump_state(host); -- GitLab From f364b545143559bb797b410d171802a8bdc0ecaa Mon Sep 17 00:00:00 2001 From: George Shen Date: Tue, 20 Jun 2017 17:02:43 -0700 Subject: [PATCH 198/786] msm: kgsl: Wait longer for CX headswitch off Due to scheduler latency, GPU driver has to wait longer for SMMU driver to release CX headswitch vote. The change also calls cond_resched() API to yield to scheduler to provide SMMU driver better chance to finish its job. CRs-Fixed: 2064195 Change-Id: Iee630f14a9958db42cb317686c90aed284471bf5 Signed-off-by: George Shen --- drivers/gpu/msm/kgsl_gmu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c index 2a6e7ddee5c0..cf6009495abe 100644 --- a/drivers/gpu/msm/kgsl_gmu.c +++ b/drivers/gpu/msm/kgsl_gmu.c @@ -1238,7 +1238,7 @@ static int gmu_enable_gdsc(struct gmu_device *gmu) return ret; } -#define CX_GDSC_TIMEOUT 10 /* ms */ +#define CX_GDSC_TIMEOUT 500 /* ms */ static int gmu_disable_gdsc(struct gmu_device *gmu) { int ret; @@ -1264,7 +1264,7 @@ static int gmu_disable_gdsc(struct gmu_device *gmu) do { if (!regulator_is_enabled(gmu->cx_gdsc)) return 0; - udelay(100); + cond_resched(); } while (!(time_after(jiffies, t))); -- GitLab From 28befbdf2baaa83eea9f71a32e0e9eb42ad90fa3 Mon Sep 17 00:00:00 2001 From: George Shen Date: Wed, 21 Jun 2017 13:59:34 -0700 Subject: [PATCH 199/786] Revert "msm: kgsl: Enable SPTP power collapse" This reverts commit c3b616abaa517a8d031d057b40178732ff1766bc. Enabling SPTP PC feature caused instability issue in PDT. Disable it until it's fixed. CRs-Fixed: 2052606 Change-Id: I6bb25816e01dbe170197599a169b4c9e4dffb1e4 Signed-off-by: George Shen --- drivers/gpu/msm/adreno-gpulist.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h index 86a1e88b2942..1e94372b2867 100644 --- a/drivers/gpu/msm/adreno-gpulist.h +++ b/drivers/gpu/msm/adreno-gpulist.h @@ -327,8 +327,7 @@ static const struct adreno_gpu_core adreno_gpulist[] = { .minor = 0, .patchid = ANY_ID, .features = ADRENO_64BIT | ADRENO_RPMH | - ADRENO_GPMU | ADRENO_CONTENT_PROTECTION | - ADRENO_SPTP_PC, + ADRENO_GPMU | ADRENO_CONTENT_PROTECTION, .sqefw_name = "a630_sqe.fw", .zap_name = "a630_zap", .gpudev = &adreno_a6xx_gpudev, -- GitLab From 6f18847ea153ca161ce389f2f865436394aa2fd6 Mon Sep 17 00:00:00 2001 From: Joonwoo Park Date: Fri, 16 Jun 2017 11:32:17 -0700 Subject: [PATCH 200/786] sched: WALT: introduce sched_group_{up,down}_migrate knobs Introduce sched_group_upmigrate and sched_group_downmigrate to control these tunables at a runtime. Change-Id: I8d88b0ca15dc1daec99b03de116192acc2660ba7 Signed-off-by: Joonwoo Park --- include/linux/sched/sysctl.h | 12 +++++++++-- kernel/sched/walt.c | 30 ++++++++++++++++++++++++++ kernel/sysctl.c | 41 ++++++++++++++++++++++-------------- 3 files changed, 65 insertions(+), 18 deletions(-) diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 190bf3b95141..fda695bd2b3d 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -33,6 +33,8 @@ extern unsigned int sysctl_sched_cpu_high_irqload; extern unsigned int sysctl_sched_use_walt_cpu_util; extern unsigned int sysctl_sched_use_walt_task_util; extern unsigned int sysctl_sched_boost; +extern unsigned int sysctl_sched_group_upmigrate_pct; +extern unsigned int sysctl_sched_group_downmigrate_pct; #endif #ifdef CONFIG_SCHED_HMP @@ -53,8 +55,6 @@ extern unsigned int sysctl_sched_spill_nr_run; extern unsigned int sysctl_sched_spill_load_pct; extern unsigned int sysctl_sched_upmigrate_pct; extern unsigned int sysctl_sched_downmigrate_pct; -extern unsigned int sysctl_sched_group_upmigrate_pct; -extern unsigned int sysctl_sched_group_downmigrate_pct; extern unsigned int sysctl_early_detection_duration; extern unsigned int sysctl_sched_small_wakee_task_load_pct; extern unsigned int sysctl_sched_big_waker_task_load_pct; @@ -67,6 +67,14 @@ extern unsigned int sysctl_sched_freq_aggregate_threshold_pct; extern unsigned int sysctl_sched_prefer_sync_wakee_to_waker; extern unsigned int sysctl_sched_short_burst; extern unsigned int sysctl_sched_short_sleep; + +#elif defined(CONFIG_SCHED_WALT) + +extern int +walt_proc_update_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + #endif /* CONFIG_SCHED_HMP */ enum sched_tunable_scaling { diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c index 65f41483c48e..786686f97077 100644 --- a/kernel/sched/walt.c +++ b/kernel/sched/walt.c @@ -3065,3 +3065,33 @@ void walt_irq_work(struct irq_work *irq_work) core_ctl_check(this_rq()->window_start); } + +#ifndef CONFIG_SCHED_HMP +int walt_proc_update_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret; + unsigned int *data = (unsigned int *)table->data; + static DEFINE_MUTEX(mutex); + + mutex_lock(&mutex); + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + if (ret || !write) { + mutex_unlock(&mutex); + return ret; + } + + if (data == &sysctl_sched_group_upmigrate_pct) + sched_group_upmigrate = + pct_to_real(sysctl_sched_group_upmigrate_pct); + else if (data == &sysctl_sched_group_downmigrate_pct) + sched_group_downmigrate = + pct_to_real(sysctl_sched_group_downmigrate_pct); + else + ret = -EINVAL; + mutex_unlock(&mutex); + + return ret; +} +#endif diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 534431af102f..0e0a4bdd6f39 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -300,6 +300,31 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "sched_group_upmigrate", + .data = &sysctl_sched_group_upmigrate_pct, + .maxlen = sizeof(unsigned int), + .mode = 0644, +#ifdef CONFIG_SCHED_HMP + .proc_handler = sched_hmp_proc_update_handler, +#else + .proc_handler = walt_proc_update_handler, +#endif + .extra1 = &sysctl_sched_group_downmigrate_pct, + }, + { + .procname = "sched_group_downmigrate", + .data = &sysctl_sched_group_downmigrate_pct, + .maxlen = sizeof(unsigned int), + .mode = 0644, +#ifdef CONFIG_SCHED_HMP + .proc_handler = sched_hmp_proc_update_handler, +#else + .proc_handler = walt_proc_update_handler, +#endif + .extra1 = &zero, + .extra2 = &sysctl_sched_group_upmigrate_pct, + }, #endif #ifdef CONFIG_SCHED_HMP { @@ -376,22 +401,6 @@ static struct ctl_table kern_table[] = { .extra1 = &zero, .extra2 = &one_hundred, }, - { - .procname = "sched_group_upmigrate", - .data = &sysctl_sched_group_upmigrate_pct, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = sched_hmp_proc_update_handler, - .extra1 = &zero, - }, - { - .procname = "sched_group_downmigrate", - .data = &sysctl_sched_group_downmigrate_pct, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = sched_hmp_proc_update_handler, - .extra1 = &zero, - }, { .procname = "sched_init_task_load", .data = &sysctl_sched_init_task_load_pct, -- GitLab From b02fc008c890b23347ae39e0e698578efd180764 Mon Sep 17 00:00:00 2001 From: Joonwoo Park Date: Fri, 16 Jun 2017 11:58:58 -0700 Subject: [PATCH 201/786] sched: introduce sched_{up,down}migrate knobs Introduce knobs sched_upmigrate and sched_downmigrate to control thresholds to up and down migrate tasks between clusters at a runtime for ease of power and performance tuning. Change-Id: I17a464388b404b61d9a9dbe1beee4883c4db8e3f Signed-off-by: Joonwoo Park --- include/linux/sched/sysctl.h | 6 +++++ include/linux/sysctl.h | 3 +++ kernel/sched/core.c | 26 ++++++++++++++++++ kernel/sched/fair.c | 16 ++++++----- kernel/sysctl.c | 52 ++++++++++++++++++++++++++++++++++++ 5 files changed, 97 insertions(+), 6 deletions(-) diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index fda695bd2b3d..f0ba8e6c8425 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -22,6 +22,8 @@ extern unsigned int sysctl_sched_is_big_little; extern unsigned int sysctl_sched_sync_hint_enable; extern unsigned int sysctl_sched_initial_task_util; extern unsigned int sysctl_sched_cstate_aware; +extern unsigned int sysctl_sched_capacity_margin; +extern unsigned int sysctl_sched_capacity_margin_down; #ifdef CONFIG_SCHED_WALT extern unsigned int sysctl_sched_use_walt_cpu_util; extern unsigned int sysctl_sched_use_walt_task_util; @@ -156,6 +158,10 @@ extern int sched_rt_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); +extern int sched_updown_migrate_handler(struct ctl_table *table, + int write, void __user *buffer, + size_t *lenp, loff_t *ppos); + extern int sysctl_numa_balancing(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index adf4e51cf597..8f84c84ea00f 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -59,6 +59,9 @@ extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, void __user *, size_t *, loff_t *); extern int proc_do_large_bitmap(struct ctl_table *, int, void __user *, size_t *, loff_t *); +extern int proc_douintvec_capacity(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); /* * Register a set of sysctl names by calling register_sysctl_table diff --git a/kernel/sched/core.c b/kernel/sched/core.c index de1b3b7eedb4..e5abfcf41c77 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9137,6 +9137,32 @@ int sched_rr_handler(struct ctl_table *table, int write, return ret; } +#ifdef CONFIG_PROC_SYSCTL +int sched_updown_migrate_handler(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret; + unsigned int *data = (unsigned int *)table->data; + unsigned int old_val; + static DEFINE_MUTEX(mutex); + + mutex_lock(&mutex); + old_val = *data; + + ret = proc_douintvec_capacity(table, write, buffer, lenp, ppos); + + if (!ret && write && + sysctl_sched_capacity_margin > sysctl_sched_capacity_margin_down) { + ret = -EINVAL; + *data = old_val; + } + mutex_unlock(&mutex); + + return ret; +} +#endif + #ifdef CONFIG_CGROUP_SCHED inline struct task_group *css_tg(struct cgroup_subsys_state *css) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index cd406daa8c00..893385567508 100755 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -221,8 +221,8 @@ unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; * The margin used when comparing utilization with CPU capacity: * util * 1024 < capacity * margin */ -unsigned int capacity_margin = 1078; /* ~5% margin */ -unsigned int capacity_margin_down = 1205; /* ~15% margin */ +unsigned int sysctl_sched_capacity_margin = 1078; /* ~5% margin */ +unsigned int sysctl_sched_capacity_margin_down = 1205; /* ~15% margin */ static inline void update_load_add(struct load_weight *lw, unsigned long inc) { @@ -5931,9 +5931,9 @@ static inline bool __task_fits(struct task_struct *p, int cpu, int util) util += boosted_task_util(p); if (capacity_orig_of(task_cpu(p)) > capacity_orig_of(cpu)) - margin = capacity_margin_down; + margin = sysctl_sched_capacity_margin_down; else - margin = capacity_margin; + margin = sysctl_sched_capacity_margin; return (capacity_orig_of(cpu) * 1024) > (util * margin); } @@ -5961,7 +5961,7 @@ static inline bool task_fits_spare(struct task_struct *p, int cpu) static bool __cpu_overutilized(int cpu, int delta) { return (capacity_orig_of(cpu) * 1024) < - ((cpu_util(cpu) + delta) * capacity_margin); + ((cpu_util(cpu) + delta) * sysctl_sched_capacity_margin); } bool cpu_overutilized(int cpu) @@ -6098,10 +6098,14 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, struct sched_group *fit_group = NULL, *spare_group = NULL; unsigned long min_load = ULONG_MAX, this_load = 0; unsigned long fit_capacity = ULONG_MAX; - unsigned long max_spare_capacity = capacity_margin - SCHED_CAPACITY_SCALE; + unsigned long max_spare_capacity; + int load_idx = sd->forkexec_idx; int imbalance = 100 + (sd->imbalance_pct-100)/2; + max_spare_capacity = sysctl_sched_capacity_margin - + SCHED_CAPACITY_SCALE; + if (sd_flag & SD_BALANCE_WAKE) load_idx = sd->wake_idx; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 0e0a4bdd6f39..b076cba857fb 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -586,6 +586,20 @@ static struct ctl_table kern_table[] = { .extra1 = &min_wakeup_granularity_ns, .extra2 = &max_wakeup_granularity_ns, }, + { + .procname = "sched_upmigrate", + .data = &sysctl_sched_capacity_margin, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_updown_migrate_handler, + }, + { + .procname = "sched_downmigrate", + .data = &sysctl_sched_capacity_margin_down, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_updown_migrate_handler, + }, #ifdef CONFIG_SMP { .procname = "sched_tunable_scaling", @@ -3181,6 +3195,39 @@ int proc_do_large_bitmap(struct ctl_table *table, int write, } } +static int do_proc_douintvec_capacity_conv(bool *negp, unsigned long *lvalp, + int *valp, int write, void *data) +{ + if (write) { + if (*negp) + return -EINVAL; + *valp = SCHED_FIXEDPOINT_SCALE * 100 / *lvalp; + } else { + *negp = false; + *lvalp = SCHED_FIXEDPOINT_SCALE * 100 / *valp; + } + + return 0; +} + +/** + * proc_douintvec_capacity - read a vector of integers in percentage and convert + * into sched capacity + * @table: the sysctl table + * @write: %TRUE if this is a write to the sysctl file + * @buffer: the user buffer + * @lenp: the size of the user buffer + * @ppos: file position + * + * Returns 0 on success. + */ +int proc_douintvec_capacity(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + return do_proc_dointvec(table, write, buffer, lenp, ppos, + do_proc_douintvec_capacity_conv, NULL); +} + #else /* CONFIG_PROC_SYSCTL */ int proc_dostring(struct ctl_table *table, int write, @@ -3238,6 +3285,11 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, return -ENOSYS; } +int proc_douintvec_capacity(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + return -ENOSYS; +} #endif /* CONFIG_PROC_SYSCTL */ -- GitLab From c34b9e322155717a67f3fa5b663e19de08aecb1d Mon Sep 17 00:00:00 2001 From: George Shen Date: Tue, 20 Jun 2017 11:42:19 -0700 Subject: [PATCH 202/786] msm: kgsl: update TP clock gating setting To fix a GPU hang issue with Unreal Soul demo. CRs-Fixed: 2062271 Change-Id: I5f6acb8461fd72dc3b9671f8e37d440ae3abac46 Signed-off-by: George Shen --- drivers/gpu/msm/adreno_a6xx.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c index 314b2d8f576b..ee9dbea153a8 100644 --- a/drivers/gpu/msm/adreno_a6xx.c +++ b/drivers/gpu/msm/adreno_a6xx.c @@ -79,10 +79,10 @@ static const struct kgsl_hwcg_reg a630_hwcg_regs[] = { {A6XX_RBBM_CLOCK_HYST_SP1, 0x00000080}, {A6XX_RBBM_CLOCK_HYST_SP2, 0x00000080}, {A6XX_RBBM_CLOCK_HYST_SP3, 0x00000080}, - {A6XX_RBBM_CLOCK_CNTL_TP0, 0x22222222}, - {A6XX_RBBM_CLOCK_CNTL_TP1, 0x22222222}, - {A6XX_RBBM_CLOCK_CNTL_TP2, 0x22222222}, - {A6XX_RBBM_CLOCK_CNTL_TP3, 0x22222222}, + {A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222}, + {A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222}, + {A6XX_RBBM_CLOCK_CNTL_TP2, 0x02222222}, + {A6XX_RBBM_CLOCK_CNTL_TP3, 0x02222222}, {A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222}, {A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222}, {A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222}, -- GitLab From 081400edf9d7ab7ca692b3f50a5a330c4dc5a0df Mon Sep 17 00:00:00 2001 From: Dhaval Patel Date: Wed, 21 Jun 2017 19:24:48 -0700 Subject: [PATCH 203/786] drm/msm/dsi-staging: remove display from active list Remove display from active list if it is selected from command line instead of dtsi. This will avoid boot failure for display configuration from dtsi and select valid display as primary display. Change-Id: I7da2d42d584a4b8dda174bc50d319f711b69f5e0 Signed-off-by: Dhaval Patel --- drivers/gpu/drm/msm/dsi-staging/dsi_display.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c index 3dd49504d6fe..ab65397c940d 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c @@ -2780,6 +2780,9 @@ int dsi_display_dev_probe(struct platform_device *pdev) (void)_dsi_display_dev_deinit(main_display); component_del(&main_display->pdev->dev, &dsi_display_comp_ops); + mutex_lock(&dsi_display_list_lock); + list_del(&main_display->list); + mutex_unlock(&dsi_display_list_lock); comp_add_success = false; default_active_node = NULL; pr_debug("removed the existing comp ops\n"); -- GitLab From 05737fde4198a59ba65328cf6dc0f41ea259d705 Mon Sep 17 00:00:00 2001 From: Sahitya Tummala Date: Mon, 5 Jun 2017 13:43:05 +0530 Subject: [PATCH 204/786] defconfig: msm: Enable QPIC NAND driver for sdxpoorwills Enable QPIC NAND driver support for sdxpoorwills. This is needed to support NAND devices. Change-Id: I9a4f914dc49d286ae5ec16adfb973e142b81b0d4 Signed-off-by: Sahitya Tummala --- arch/arm/configs/sdxpoorwills-perf_defconfig | 1 + arch/arm/configs/sdxpoorwills_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig index 40289a8dc299..9824f82f11cd 100644 --- a/arch/arm/configs/sdxpoorwills-perf_defconfig +++ b/arch/arm/configs/sdxpoorwills-perf_defconfig @@ -156,6 +156,7 @@ CONFIG_MTD=y CONFIG_MTD_TESTS=m CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_BLOCK=y +CONFIG_MTD_MSM_QPIC_NAND=y CONFIG_MTD_UBI=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig index d91f5f625b02..ece96b4d875f 100644 --- a/arch/arm/configs/sdxpoorwills_defconfig +++ b/arch/arm/configs/sdxpoorwills_defconfig @@ -148,6 +148,7 @@ CONFIG_MTD=y CONFIG_MTD_TESTS=m CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_BLOCK=y +CONFIG_MTD_MSM_QPIC_NAND=y CONFIG_MTD_UBI=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y -- GitLab From 4713dedc33e5a41366381ac561180e78f3d746d5 Mon Sep 17 00:00:00 2001 From: Banajit Goswami Date: Wed, 21 Jun 2017 20:41:33 -0700 Subject: [PATCH 205/786] soc: qcom: add config options for Audio SSR and PDR features Add configuration options for Audio SSR and Audio PDR features. Change-Id: Ib2fb40dbf5981f04ceb063bc0e58947497326b36 Signed-off-by: Banajit Goswami --- drivers/soc/qcom/Kconfig | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 8317c0992ad1..07c44f717d1c 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -545,6 +545,38 @@ config MSM_QDSP6_APRV3_GLINK used by audio driver to configure QDSP6v2's ASM, ADM and AFE. +config MSM_QDSP6_SSR + bool "Audio QDSP6 SSR support" + depends on MSM_QDSP6_APRV2_GLINK || MSM_QDSP6_APRV3_GLINK + help + Enable Subsystem Restart. Reset audio + clients when the ADSP subsystem is + restarted. Subsystem Restart for audio + is only used for processes on the ADSP + and signals audio drivers through APR. + + +config MSM_QDSP6_PDR + bool "Audio QDSP6 PDR support" + depends on MSM_QDSP6_APRV2_GLINK || MSM_QDSP6_APRV3_GLINK + help + Enable Protection Domain Restart. Reset + audio clients when a process on the ADSP + is restarted. PDR for audio is only used + for processes on the ADSP and signals + audio drivers through APR. + +config MSM_QDSP6_NOTIFIER + bool "Audio QDSP6 PDR support" + depends on MSM_QDSP6_SSR || MSM_QDSP6_PDR + help + Enable notifier which decides whether + to use SSR or PDR and notifies all + audio clients of the event. Both SSR + and PDR are recovery methods when + there is a crash on ADSP. Audio drivers + are contacted by ADSP through APR. + config MSM_ADSP_LOADER tristate "ADSP loader support" select SND_SOC_MSM_APRV2_INTF -- GitLab From e3361f9415c594c85040c7e6f1af1a6e354440cc Mon Sep 17 00:00:00 2001 From: Tharun Kumar Merugu Date: Thu, 22 Jun 2017 10:45:43 +0530 Subject: [PATCH 206/786] msm: ADSPRPC: Initialize FastRPC invoke metadata Initializing metadata buffer to zero before reusing the buffer for next invoke. Change-Id: Iaab3478732b83427a475e95afa0e031cb76f60d9 Acked-by: Viswanatham Paduchuri Signed-off-by: Tharun Kumar Merugu --- drivers/char/adsprpc.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index efaa9d13a821..a5e82229e113 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -992,6 +992,9 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) if (err) goto bail; } + if (ctx->buf->virt && metalen <= copylen) + memset(ctx->buf->virt, 0, metalen); + /* copy metadata */ rpra = ctx->buf->virt; ctx->rpra = rpra; -- GitLab From d5eae9d3d879c4439547cd823c14a053b0a4f9ad Mon Sep 17 00:00:00 2001 From: Padmanabhan Komanduru Date: Thu, 22 Jun 2017 19:23:36 +0530 Subject: [PATCH 207/786] drm: dp: add support to validate the modes reported by the sink The Display Port driver might not support all the modes returned by the sink upon reading its EDID. This can be due to a combination of restrictions related to link rate/number of lanes/bpp/maximum pixel clock supported by the SDM hardware. Add support to validate these modes before passing them to display userspace system. Change-Id: Ia1cfb8dcd293b89405718dd68fb53e6d3b000ad5 Signed-off-by: Padmanabhan Komanduru --- .../boot/dts/qcom/sdm845-sde-display.dtsi | 1 + drivers/gpu/drm/msm/dp/dp_display.c | 6 ++++ drivers/gpu/drm/msm/dp/dp_display.h | 1 + drivers/gpu/drm/msm/dp/dp_drm.c | 14 +++++++- drivers/gpu/drm/msm/dp/dp_panel.c | 29 ++++++++++++++++ drivers/gpu/drm/msm/dp/dp_panel.h | 2 ++ drivers/gpu/drm/msm/sde_edid_parser.c | 34 +++++++++++++++++++ drivers/gpu/drm/msm/sde_edid_parser.h | 8 +++++ 8 files changed, 94 insertions(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi index 726a63f02be8..2a907ef7e48b 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-sde-display.dtsi @@ -412,6 +412,7 @@ qcom,dp-usbpd-detection = <&pmi8998_pdphy>; qcom,aux-cfg-settings = [00 13 04 00 0a 26 0a 03 bb 03]; + qcom,max-pclk-frequency-khz = <576000>; qcom,core-supply-entries { #address-cells = <1>; diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index a3c6f58614f8..5a33fdb29bb4 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -261,6 +261,7 @@ static const struct component_ops dp_display_comp_ops = { static int dp_display_process_hpd_high(struct dp_display_private *dp) { int rc = 0; + u32 max_pclk_from_edid = 0; rc = dp->panel->read_dpcd(dp->panel); if (rc) @@ -269,6 +270,11 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp) sde_get_edid(dp->dp_display.connector, &dp->aux->drm_aux->ddc, (void **)&dp->panel->edid_ctrl); + max_pclk_from_edid = dp->panel->get_max_pclk(dp->panel); + + dp->dp_display.max_pclk_khz = min(max_pclk_from_edid, + dp->parser->max_pclk_khz); + dp->dp_display.is_connected = true; drm_helper_hpd_irq_event(dp->dp_display.connector->dev); diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h index 877287a2a09a..3caa2773ae32 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.h +++ b/drivers/gpu/drm/msm/dp/dp_display.h @@ -29,6 +29,7 @@ struct dp_display { struct dp_bridge *bridge; struct drm_connector *connector; bool is_connected; + u32 max_pclk_khz; int (*enable)(struct dp_display *dp_display); int (*post_enable)(struct dp_display *dp_display); diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c index 78c04c4cca9f..91aafdd2c445 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -450,5 +450,17 @@ enum drm_mode_status dp_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode, void *display) { - return MODE_OK; + struct dp_display *dp_disp; + + if (!mode || !display) { + pr_err("invalid params\n"); + return MODE_ERROR; + } + + dp_disp = display; + + if (mode->clock > dp_disp->max_pclk_khz) + return MODE_BAD; + else + return MODE_OK; } diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c index fed1dbb44094..4496e9a0e6b5 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.c +++ b/drivers/gpu/drm/msm/dp/dp_panel.c @@ -72,6 +72,34 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel) return rc; } +static u32 dp_panel_get_max_pclk(struct dp_panel *dp_panel) +{ + struct dp_panel_private *panel; + struct drm_dp_link *dp_link; + u32 bpc, bpp, max_data_rate_khz, max_pclk_rate_khz; + const u8 num_components = 3; + + if (!dp_panel) { + pr_err("invalid input\n"); + return 0; + } + + panel = container_of(dp_panel, struct dp_panel_private, dp_panel); + dp_link = &dp_panel->dp_link; + + bpc = sde_get_sink_bpc(dp_panel->edid_ctrl); + bpp = bpc * num_components; + + max_data_rate_khz = (dp_link->num_lanes * dp_link->rate * 8); + max_pclk_rate_khz = max_data_rate_khz / bpp; + + pr_debug("bpp=%d, max_lane_cnt=%d\n", bpp, dp_link->num_lanes); + pr_debug("max_data_rate=%dKHz, max_pclk_rate=%dKHz\n", + max_data_rate_khz, max_pclk_rate_khz); + + return max_pclk_rate_khz; +} + static int dp_panel_timing_cfg(struct dp_panel *dp_panel) { int rc = 0; @@ -276,6 +304,7 @@ struct dp_panel *dp_panel_get(struct device *dev, struct dp_aux *aux, dp_panel->timing_cfg = dp_panel_timing_cfg; dp_panel->read_dpcd = dp_panel_read_dpcd; dp_panel->get_link_rate = dp_panel_get_link_rate; + dp_panel->get_max_pclk = dp_panel_get_max_pclk; return dp_panel; error: diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h index 5852c70e1d51..b63c51f3c7d7 100644 --- a/drivers/gpu/drm/msm/dp/dp_panel.h +++ b/drivers/gpu/drm/msm/dp/dp_panel.h @@ -46,6 +46,7 @@ struct dp_panel { struct dp_panel_info pinfo; u32 vic; + u32 max_pclk_khz; int (*sde_edid_register)(struct dp_panel *dp_panel); void (*sde_edid_deregister)(struct dp_panel *dp_panel); @@ -53,6 +54,7 @@ struct dp_panel { int (*timing_cfg)(struct dp_panel *dp_panel); int (*read_dpcd)(struct dp_panel *dp_panel); u32 (*get_link_rate)(struct dp_panel *dp_panel); + u32 (*get_max_pclk)(struct dp_panel *dp_panel); }; struct dp_panel *dp_panel_get(struct device *dev, struct dp_aux *aux, diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c index 12165e8b99e3..db36069b8322 100644 --- a/drivers/gpu/drm/msm/sde_edid_parser.c +++ b/drivers/gpu/drm/msm/sde_edid_parser.c @@ -484,6 +484,40 @@ int _sde_edid_update_modes(struct drm_connector *connector, return rc; } +u32 sde_get_sink_bpc(void *input) +{ + struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input); + struct edid *edid = edid_ctrl->edid; + + if ((edid->revision < 3) || !(edid->input & DRM_EDID_INPUT_DIGITAL)) + return 0; + + if (edid->revision < 4) { + if (edid->input & DRM_EDID_DIGITAL_TYPE_DVI) + return 8; + else + return 0; + } + + switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) { + case DRM_EDID_DIGITAL_DEPTH_6: + return 6; + case DRM_EDID_DIGITAL_DEPTH_8: + return 8; + case DRM_EDID_DIGITAL_DEPTH_10: + return 10; + case DRM_EDID_DIGITAL_DEPTH_12: + return 12; + case DRM_EDID_DIGITAL_DEPTH_14: + return 14; + case DRM_EDID_DIGITAL_DEPTH_16: + return 16; + case DRM_EDID_DIGITAL_DEPTH_UNDEF: + default: + return 0; + } +} + bool sde_detect_hdmi_monitor(void *input) { struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input); diff --git a/drivers/gpu/drm/msm/sde_edid_parser.h b/drivers/gpu/drm/msm/sde_edid_parser.h index 1143dc2c7bec..eb68439ec4f2 100644 --- a/drivers/gpu/drm/msm/sde_edid_parser.h +++ b/drivers/gpu/drm/msm/sde_edid_parser.h @@ -135,6 +135,14 @@ void sde_free_edid(void **edid_ctrl); */ bool sde_detect_hdmi_monitor(void *edid_ctrl); +/** + * sde_get_sink_bpc() - return the bpc of sink device. + * @edid_ctrl: Handle to the edid_ctrl structure. + * + * Return: bpc supported by the sink. + */ +u32 sde_get_sink_bpc(void *edid_ctrl); + /** * _sde_edid_update_modes() - populate EDID modes. * @edid_ctrl: Handle to the edid_ctrl structure. -- GitLab From 372596d6647e29f10898f72fa4c43660f243b7bf Mon Sep 17 00:00:00 2001 From: Veera Sundaram Sankaran Date: Wed, 21 Jun 2017 17:57:25 -0700 Subject: [PATCH 208/786] drm/msm/sde: return error code on multirect check failure Return error code is missed for one of the multirect checks, which makes the crtc atomic check return early with success. This might lead to wrong hardware configuration. Fix it by returning proper error code. Change-Id: Id4b7860a233b65cabfb834d068a5e92e435ac57c Signed-off-by: Veera Sundaram Sankaran --- drivers/gpu/drm/msm/sde/sde_crtc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index e70829023d5e..56e4618c6b65 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -2747,8 +2747,10 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc, sde_plane_clear_multirect(pipe_staged[i]); if (is_sde_plane_virtual(pipe_staged[i]->plane)) { - SDE_ERROR("invalid use of virtual plane: %d\n", + SDE_ERROR( + "r1 only virt plane:%d not supported\n", pipe_staged[i]->plane->base.id); + rc = -EINVAL; goto end; } } -- GitLab From 3692fa7387da7fcecce6de5f7cb8a5df53bc956e Mon Sep 17 00:00:00 2001 From: Karthikeyan Mani Date: Tue, 25 Apr 2017 18:57:05 -0700 Subject: [PATCH 209/786] ASoC: wcd-mbhc: Add support for unloading mbhc Add lock and unlock for mbhc resource lock while calling wcd_cancel_hs_detect_plug to avoid kernel warning messages from deinit function. CRs-fixed: 2039099 Change-Id: I732163fae68bc675e72142b3287d11a0c21f7375 Signed-off-by: Karthikeyan Mani --- sound/soc/codecs/wcd-mbhc-v2.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c index cb96f2b995a9..3b2426dc7234 100644 --- a/sound/soc/codecs/wcd-mbhc-v2.c +++ b/sound/soc/codecs/wcd-mbhc-v2.c @@ -2058,9 +2058,12 @@ void wcd_mbhc_deinit(struct wcd_mbhc *mbhc) mbhc->mbhc_cb->free_irq(codec, mbhc->intr_ids->hph_right_ocp, mbhc); if (mbhc->mbhc_cb && mbhc->mbhc_cb->register_notifier) mbhc->mbhc_cb->register_notifier(mbhc, &mbhc->nblock, false); - if (mbhc->mbhc_fn->wcd_cancel_hs_detect_plug) + if (mbhc->mbhc_fn->wcd_cancel_hs_detect_plug) { + WCD_MBHC_RSC_LOCK(mbhc); mbhc->mbhc_fn->wcd_cancel_hs_detect_plug(mbhc, &mbhc->correct_plug_swch); + WCD_MBHC_RSC_UNLOCK(mbhc); + } mutex_destroy(&mbhc->codec_resource_lock); mutex_destroy(&mbhc->hphl_pa_lock); mutex_destroy(&mbhc->hphr_pa_lock); -- GitLab From 1af8e86109fb929696706f3b119b66d45c4986bc Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Tue, 20 Jun 2017 16:16:36 -0700 Subject: [PATCH 210/786] usb: phy-msm-qusb-v2: Use phy autoresume register offset from device tree Phy auto resume register offset may change with every target. Hence read this register offset from device tree. Change-Id: Ibbdb14eb6c8510b422161959c1978864b19360dc Signed-off-by: Hemant Kumar --- .../devicetree/bindings/usb/msm-phy.txt | 1 + drivers/usb/phy/phy-msm-qusb-v2.c | 25 ++++++++++++------- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt index e508a4fed2ab..6f67899c7849 100644 --- a/Documentation/devicetree/bindings/usb/msm-phy.txt +++ b/Documentation/devicetree/bindings/usb/msm-phy.txt @@ -140,6 +140,7 @@ Optional properties: - qcom,hold-reset: Indicates that hold QUSB PHY into reset state. - qcom,phy-clk-scheme: Should be one of "cml" or "cmos" if ref_clk_addr is provided. - qcom,major-rev: provide major revision number to differentiate power up sequence. default is 2.0 + - qcom,phy-auto-resume-offset: Provides phy auto-resume register offset. Example: qusb_phy: qusb@f9b39000 { diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c index 4f0a455d50c5..1210188e5735 100644 --- a/drivers/usb/phy/phy-msm-qusb-v2.c +++ b/drivers/usb/phy/phy-msm-qusb-v2.c @@ -48,7 +48,6 @@ #define DPSE_INTERRUPT BIT(0) #define QUSB2PHY_PORT_TUNE1 0x23c -#define QUSB2PHY_TEST1 0x24C #define QUSB2PHY_PLL_CORE_INPUT_OVERRIDE 0x0a8 #define CORE_PLL_RATE BIT(0) @@ -94,6 +93,7 @@ struct qusb_phy { int *qusb_phy_host_init_seq; u32 tune_val; + u32 phy_auto_resume_offset; int efuse_bit_pos; int efuse_num_of_bits; @@ -551,14 +551,15 @@ static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend) CORE_RESET | CORE_RESET_MUX, qphy->base + QUSB2PHY_PLL_CORE_INPUT_OVERRIDE); - /* enable phy auto-resume */ - writel_relaxed(0x91, - qphy->base + QUSB2PHY_TEST1); - /* flush the previous write before next write */ - wmb(); - writel_relaxed(0x90, - qphy->base + QUSB2PHY_TEST1); - + if (qphy->phy_auto_resume_offset) { + /* enable phy auto-resume */ + writel_relaxed(0x91, + qphy->base + qphy->phy_auto_resume_offset); + /* flush the previous write before next write */ + wmb(); + writel_relaxed(0x90, + qphy->base + qphy->phy_auto_resume_offset); + } dev_dbg(phy->dev, "%s: intr_mask = %x\n", __func__, intr_mask); @@ -916,6 +917,12 @@ static int qusb_phy_probe(struct platform_device *pdev) return ret; } + ret = of_property_read_u32(dev->of_node, "qcom,phy-auto-resume-offset", + &qphy->phy_auto_resume_offset); + if (ret) + dev_dbg(dev, "error reading qcom,phy-auto-resume-offset %d\n", + ret); + qphy->vdd = devm_regulator_get(dev, "vdd"); if (IS_ERR(qphy->vdd)) { dev_err(dev, "unable to get vdd supply\n"); -- GitLab From 6f555350e7e7081bc631117593e572e1f1f436f0 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Tue, 20 Jun 2017 16:13:16 -0700 Subject: [PATCH 211/786] ARM: dts: msm: Add support to pass phy auto resume offset for SDM845 Phy auto resume register offset may change for every target. Hence pass register offset value from device tree. Change-Id: Ic52ab9ee0b16c4f358fd2d69af07fcb6b4ac2c01 Signed-off-by: Hemant Kumar --- arch/arm64/boot/dts/qcom/sdm845-usb.dtsi | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi index df6ffadf2fd0..53cb27e62c26 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi @@ -141,6 +141,8 @@ 0x00 0x23c /* CHG_CTRL2 */ 0x22 0x210>; /* PWR_CTRL1 */ + qcom,phy-auto-resume-offset = <0x254>; + phy_type= "utmi"; clocks = <&clock_rpmh RPMH_CXO_CLK>, <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>; -- GitLab From b8d20e978b68fcdd9530627a19e5ca694a2cc7e2 Mon Sep 17 00:00:00 2001 From: Dustin Brown Date: Tue, 13 Jun 2017 13:54:38 -0700 Subject: [PATCH 212/786] arm/arm64: Export save_stack_trace_tsk() The kernel watchdog is a great debugging tool for finding tasks that consume a disproportionate amount of CPU time in contiguous chunks. One can imagine building a similar watchdog for arbitrary driver threads using save_stack_trace_tsk() and print_stack_trace(). However, this is not viable for dynamically loaded driver modules on ARM platforms because save_stack_trace_tsk() is not exported for those architectures. Export save_stack_trace_tsk() for the ARM64 architecture to align with x86 and support various debugging use cases such as arbitrary driver thread watchdog timers. Change-Id: I61e9d2afc4703a786fa6dcaf82fe46c0ed250045 CRs-Fixed: 2061326 Signed-off-by: Dustin Brown --- arch/arm/kernel/stacktrace.c | 1 + arch/arm64/kernel/stacktrace.c | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 92b72375c4c7..fe76010f75cf 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -170,6 +170,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { __save_stack_trace(tsk, trace, 1); } +EXPORT_SYMBOL(save_stack_trace_tsk); void save_stack_trace(struct stack_trace *trace) { diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index c2efddfca18c..bedf97d7fe20 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -203,6 +203,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) if (trace->nr_entries < trace->max_entries) trace->entries[trace->nr_entries++] = ULONG_MAX; } +EXPORT_SYMBOL(save_stack_trace_tsk); void save_stack_trace(struct stack_trace *trace) { -- GitLab From 886617ea7fe671545401b3933aa87149c4562831 Mon Sep 17 00:00:00 2001 From: David Dai Date: Mon, 17 Apr 2017 14:46:59 -0700 Subject: [PATCH 213/786] ARM: dts: msm: Add QoS settings for sdm845 Add QoS settings specific to master ports and QoS base and offset addresses to fabrics. Change-Id: Iaedd33ad97836079060cb6936a508b1f088873b6 Signed-off-by: David Dai --- arch/arm64/boot/dts/qcom/sdm845-bus.dtsi | 116 ++++++++++++++++++++--- include/dt-bindings/msm/msm-bus-ids.h | 2 +- 2 files changed, 102 insertions(+), 16 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi index e26f888443ff..b33b525a30ff 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi @@ -12,6 +12,7 @@ #include #include +#include &soc { ad_hoc_bus: ad-hoc-bus { @@ -355,7 +356,8 @@ label = "fab-aggre1_noc"; qcom,fab-dev; qcom,base-name = "aggre1_noc-base"; - qcom,bypass-qos-prg; + qcom,qos-off = <4096>; + qcom,base-offset = <16384>; qcom,bus-type = <1>; clocks = <>; }; @@ -365,7 +367,8 @@ label = "fab-aggre2_noc"; qcom,fab-dev; qcom,base-name = "aggre2_noc-base"; - qcom,bypass-qos-prg; + qcom,qos-off = <4096>; + qcom,base-offset = <16384>; qcom,bus-type = <1>; clocks = <>; }; @@ -432,7 +435,8 @@ label = "fab-mem_noc"; qcom,fab-dev; qcom,base-name = "mem_noc-base"; - qcom,bypass-qos-prg; + qcom,qos-off = <4096>; + qcom,base-offset = <65536>; qcom,bus-type = <1>; clocks = <>; }; @@ -442,7 +446,8 @@ label = "fab-mmss_noc"; qcom,fab-dev; qcom,base-name = "mmss_noc-base"; - qcom,bypass-qos-prg; + qcom,qos-off = <4096>; + qcom,base-offset = <36864>; qcom,bus-type = <1>; clocks = <>; }; @@ -452,7 +457,8 @@ label = "fab-system_noc"; qcom,fab-dev; qcom,base-name = "system_noc-base"; - qcom,bypass-qos-prg; + qcom,qos-off = <4096>; + qcom,base-offset = <36864>; qcom,bus-type = <1>; clocks = <>; }; @@ -471,6 +477,8 @@ label = "fab-mem_noc_display"; qcom,fab-dev; qcom,base-name = "mem_noc-base"; + qcom,qos-off = <4096>; + qcom,base-offset = <65536>; qcom,bypass-qos-prg; qcom,bus-type = <1>; clocks = <>; @@ -524,6 +532,8 @@ qcom,qport = <1>; qcom,connections = <&slv_qns_a1noc_snoc>; qcom,bus-dev = <&fab_aggre1_noc>; + qcom,ap-owned; + qcom,prio = <1>; }; mas_xm_sdc4: mas-xm-sdc4 { @@ -534,6 +544,8 @@ qcom,qport = <2>; qcom,connections = <&slv_qns_a1noc_snoc>; qcom,bus-dev = <&fab_aggre1_noc>; + qcom,ap-owned; + qcom,prio = <1>; }; mas_xm_ufs_card: mas-xm-ufs-card { @@ -544,6 +556,8 @@ qcom,qport = <3>; qcom,connections = <&slv_qns_a1noc_snoc>; qcom,bus-dev = <&fab_aggre1_noc>; + qcom,ap-owned; + qcom,prio = <2>; }; mas_xm_ufs_mem: mas-xm-ufs-mem { @@ -554,6 +568,8 @@ qcom,qport = <4>; qcom,connections = <&slv_qns_a1noc_snoc>; qcom,bus-dev = <&fab_aggre1_noc>; + qcom,ap-owned; + qcom,prio = <2>; }; mas_qhm_a2noc_cfg: mas-qhm-a2noc-cfg { @@ -592,6 +608,8 @@ qcom,qport = <0>; qcom,connections = <&slv_qns_a2noc_snoc>; qcom,bus-dev = <&fab_aggre2_noc>; + qcom,ap-owned; + qcom,prio = <1>; }; mas_qxm_crypto: mas-qxm-crypto { @@ -603,6 +621,8 @@ qcom,connections = <&slv_qns_a2noc_snoc>; qcom,bus-dev = <&fab_aggre2_noc>; qcom,bcms = <&bcm_ce0>; + qcom,ap-owned; + qcom,prio = <2>; }; mas_qxm_ipa: mas-qxm-ipa { @@ -613,6 +633,7 @@ qcom,qport = <2>; qcom,connections = <&slv_qns_a2noc_snoc>; qcom,bus-dev = <&fab_aggre2_noc>; + qcom,prio = <2>; }; mas_xm_pcie3_1: mas-xm-pcie3-1 { @@ -623,6 +644,8 @@ qcom,qport = <6>; qcom,connections = <&slv_qns_pcie_snoc>; qcom,bus-dev = <&fab_aggre2_noc>; + qcom,ap-owned; + qcom,prio = <2>; }; mas_xm_pcie_0: mas-xm-pcie-0 { @@ -632,7 +655,9 @@ qcom,agg-ports = <1>; qcom,qport = <5>; qcom,connections = <&slv_qns_pcie_snoc>; - qcom,bus-dev = <&fab_aggre2_noc>; + qcom,bus-dev = <&fab_aggre1_noc>; + qcom,ap-owned; + qcom,prio = <2>; }; mas_xm_qdss_etr: mas-xm-qdss-etr { @@ -643,6 +668,8 @@ qcom,qport = <7>; qcom,connections = <&slv_qns_a2noc_snoc>; qcom,bus-dev = <&fab_aggre2_noc>; + qcom,ap-owned; + qcom,prio = <2>; }; mas_xm_usb3_0: mas-xm-usb3-0 { @@ -653,6 +680,7 @@ qcom,qport = <10>; qcom,connections = <&slv_qns_a2noc_snoc>; qcom,bus-dev = <&fab_aggre2_noc>; + qcom,prio = <2>; }; mas_xm_usb3_1: mas-xm-usb3-1 { @@ -663,6 +691,7 @@ qcom,qport = <11>; qcom,connections = <&slv_qns_a2noc_snoc>; qcom,bus-dev = <&fab_aggre2_noc>; + qcom,prio = <2>; }; mas_qxm_camnoc_hf0_uncomp: mas-qxm-camnoc-hf0-uncomp { @@ -825,12 +854,12 @@ qcom,bus-dev = <&fab_gladiator_noc>; }; - mas_ipa_core: mas-ipa-core { + mas_ipa_core_master: mas-ipa-core-master { cell-id = ; - label = "mas-ipa-core"; - qcom,buswidth = <1>; + label = "mas-ipa-core-master"; + qcom,buswidth = <8>; qcom,agg-ports = <1>; - qcom,connections = <&slv_ipa_core>; + qcom,connections = <&slv_ipa_core_slave>; qcom,bus-dev = <&fab_ipa_virt>; }; @@ -853,6 +882,8 @@ &slv_qns_memnoc_snoc>; qcom,bus-dev = <&fab_mem_noc>; qcom,bcms = <&bcm_sh3>; + qcom,ap-owned; + qcom,prio = <6>; }; mas_qhm_memnoc_cfg: mas-qhm-memnoc-cfg { @@ -874,6 +905,8 @@ qcom,connections = <&slv_qns_llcc>; qcom,bus-dev = <&fab_mem_noc>; qcom,bcms = <&bcm_sh5>; + qcom,ap-owned; + qcom,prio = <0>; }; mas_qnm_mnoc_hf: mas-qnm-mnoc-hf { @@ -884,6 +917,10 @@ qcom,qport = <4 5>; qcom,connections = <&slv_qns_apps_io &slv_qns_llcc>; qcom,bus-dev = <&fab_mem_noc>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + qcom,node-qos-bcms = <7012 0 1>; }; mas_qnm_mnoc_sf: mas-qnm-mnoc-sf { @@ -895,6 +932,10 @@ qcom,connections = <&slv_qns_apps_io &slv_qns_llcc &slv_qns_memnoc_snoc>; qcom,bus-dev = <&fab_mem_noc>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + qcom,node-qos-bcms = <7012 0 1>; }; mas_qnm_snoc_gc: mas-qnm-snoc-gc { @@ -905,6 +946,9 @@ qcom,qport = <8>; qcom,connections = <&slv_qns_llcc>; qcom,bus-dev = <&fab_mem_noc>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; }; mas_qnm_snoc_sf: mas-qnm-snoc-sf { @@ -915,6 +959,9 @@ qcom,qport = <9>; qcom,connections = <&slv_qns_apps_io &slv_qns_llcc>; qcom,bus-dev = <&fab_mem_noc>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; }; mas_qxm_gpu: mas-qxm-gpu { @@ -926,7 +973,8 @@ qcom,connections = <&slv_qns_apps_io &slv_qns_llcc &slv_qns_memnoc_snoc>; qcom,bus-dev = <&fab_mem_noc>; - qcom,bcms = <&bcm_sh4>; + qcom,ap-owned; + qcom,prio = <0>; }; mas_qhm_mnoc_cfg: mas-qhm-mnoc-cfg { @@ -947,6 +995,10 @@ qcom,connections = <&slv_qns_mem_noc_hf>; qcom,bus-dev = <&fab_mmss_noc>; qcom,bcms = <&bcm_mm1>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + qcom,node-qos-bcms = <7012 0 1>; }; mas_qxm_camnoc_hf1: mas-qxm-camnoc-hf1 { @@ -958,6 +1010,10 @@ qcom,connections = <&slv_qns_mem_noc_hf>; qcom,bus-dev = <&fab_mmss_noc>; qcom,bcms = <&bcm_mm1>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + qcom,node-qos-bcms = <7012 0 1>; }; mas_qxm_camnoc_sf: mas-qxm-camnoc-sf { @@ -969,6 +1025,10 @@ qcom,connections = <&slv_qns2_mem_noc>; qcom,bus-dev = <&fab_mmss_noc>; qcom,bcms = <&bcm_mm3>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + qcom,node-qos-bcms = <7012 0 1>; }; mas_qxm_mdp0: mas-qxm-mdp0 { @@ -980,6 +1040,10 @@ qcom,connections = <&slv_qns_mem_noc_hf>; qcom,bus-dev = <&fab_mmss_noc>; qcom,bcms = <&bcm_mm1>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + qcom,node-qos-bcms = <7012 0 1>; }; mas_qxm_mdp1: mas-qxm-mdp1 { @@ -991,6 +1055,10 @@ qcom,connections = <&slv_qns_mem_noc_hf>; qcom,bus-dev = <&fab_mmss_noc>; qcom,bcms = <&bcm_mm1>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + qcom,node-qos-bcms = <7012 0 1>; }; mas_qxm_rot: mas-qxm-rot { @@ -1002,6 +1070,10 @@ qcom,connections = <&slv_qns2_mem_noc>; qcom,bus-dev = <&fab_mmss_noc>; qcom,bcms = <&bcm_mm3>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + qcom,node-qos-bcms = <7012 0 1>; }; mas_qxm_venus0: mas-qxm-venus0 { @@ -1013,6 +1085,10 @@ qcom,connections = <&slv_qns2_mem_noc>; qcom,bus-dev = <&fab_mmss_noc>; qcom,bcms = <&bcm_mm3>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + qcom,node-qos-bcms = <7012 0 1>; }; mas_qxm_venus1: mas-qxm-venus1 { @@ -1024,6 +1100,10 @@ qcom,connections = <&slv_qns2_mem_noc>; qcom,bus-dev = <&fab_mmss_noc>; qcom,bcms = <&bcm_mm3>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + qcom,node-qos-bcms = <7012 0 1>; }; mas_qxm_venus_arm9: mas-qxm-venus-arm9 { @@ -1035,6 +1115,10 @@ qcom,connections = <&slv_qns2_mem_noc>; qcom,bus-dev = <&fab_mmss_noc>; qcom,bcms = <&bcm_mm3>; + qcom,ap-owned; + qcom,prio = <0>; + qcom,forwarding; + qcom,node-qos-bcms = <7012 0 1>; }; mas_qhm_snoc_cfg: mas-qhm-snoc-cfg { @@ -1120,6 +1204,8 @@ qcom,connections = <&slv_qxs_imem &slv_qns_memnoc_gc>; qcom,bus-dev = <&fab_system_noc>; qcom,bcms = <&bcm_sn4>; + qcom,ap-owned; + qcom,prio = <2>; }; mas_alc: mas-alc { @@ -1687,10 +1773,10 @@ qcom,bus-dev = <&fab_gladiator_noc>; }; - slv_ipa_core:slv-ipa-core { - cell-id = ; - label = "slv-ipa-core"; - qcom,buswidth = <1>; + slv_ipa_core_slave:slv-ipa-core-slave { + cell-id = ; + label = "slv-ipa-core-slave"; + qcom,buswidth = <8>; qcom,agg-ports = <1>; qcom,bus-dev = <&fab_ipa_virt>; qcom,bcms = <&bcm_ip0>; diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h index 9d52d2ee53d4..8bd30d4ec339 100644 --- a/include/dt-bindings/msm/msm-bus-ids.h +++ b/include/dt-bindings/msm/msm-bus-ids.h @@ -588,7 +588,7 @@ #define MSM_BUS_SLAVE_SNOC_MEM_NOC_GC 774 #define MSM_BUS_SLAVE_SNOC_MEM_NOC_SF 775 #define MSM_BUS_SLAVE_MEM_NOC_SNOC 776 -#define MSM_BUS_SLAVE_IPA 777 +#define MSM_BUS_SLAVE_IPA_CORE 777 #define MSM_BUS_SLAVE_CAMNOC_UNCOMP 778 #define MSM_BUS_SLAVE_LAST 779 -- GitLab From e9b257b0babc0797d6ea3433ea2c946f6f3f7df1 Mon Sep 17 00:00:00 2001 From: Alan Kwong Date: Tue, 16 May 2017 11:40:50 -0700 Subject: [PATCH 214/786] ARM: dts: msm: add display bus paths for split voting to sdm845 Add new display data bus paths to describe display split voting topology on sdm845. CRs-Fixed: 2037879 Change-Id: I46a7face3861f734c72582c25d8b7b7a5b5f832a Signed-off-by: Alan Kwong --- arch/arm64/boot/dts/qcom/sdm845-sde.dtsi | 58 ++++++++++++++++++++---- 1 file changed, 50 insertions(+), 8 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi index 2ae38320c237..e31f8fdd1cd5 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi @@ -209,13 +209,33 @@ /* data and reg bus scale settings */ qcom,sde-data-bus { - qcom,msm-bus,name = "mdss_sde"; + qcom,msm-bus,name = "mdss_sde_mnoc"; qcom,msm-bus,num-cases = <3>; qcom,msm-bus,num-paths = <2>; qcom,msm-bus,vectors-KBps = - <22 512 0 0>, <23 512 0 0>, - <22 512 0 6400000>, <23 512 0 6400000>, - <22 512 0 6400000>, <23 512 0 6400000>; + <22 773 0 0>, <23 773 0 0>, + <22 773 0 6400000>, <23 773 0 6400000>, + <22 773 0 6400000>, <23 773 0 6400000>; + }; + + qcom,sde-llcc-bus { + qcom,msm-bus,name = "mdss_sde_llcc"; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <132 770 0 0>, + <132 770 0 6400000>, + <132 770 0 6400000>; + }; + + qcom,sde-ebi-bus { + qcom,msm-bus,name = "mdss_sde_ebi"; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <129 512 0 0>, + <129 512 0 6400000>, + <129 512 0 6400000>; }; qcom,sde-reg-bus { @@ -252,14 +272,36 @@ /* data and reg bus scale settings */ qcom,sde-data-bus { - qcom,msm-bus,name = "disp_rsc"; + qcom,msm-bus,name = "disp_rsc_mnoc"; qcom,msm-bus,active-only; qcom,msm-bus,num-cases = <3>; qcom,msm-bus,num-paths = <2>; qcom,msm-bus,vectors-KBps = - <20003 20512 0 0>, <20004 20512 0 0>, - <20003 20512 0 6400000>, <20004 20512 0 6400000>, - <20003 20512 0 6400000>, <20004 20512 0 6400000>; + <20003 20515 0 0>, <20004 20515 0 0>, + <20003 20515 0 6400000>, <20004 20515 0 6400000>, + <20003 20515 0 6400000>, <20004 20515 0 6400000>; + }; + + qcom,sde-llcc-bus { + qcom,msm-bus,name = "disp_rsc_llcc"; + qcom,msm-bus,active-only; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <20001 20513 0 0>, + <20001 20513 0 6400000>, + <20001 20513 0 6400000>; + }; + + qcom,sde-ebi-bus { + qcom,msm-bus,name = "disp_rsc_ebi"; + qcom,msm-bus,active-only; + qcom,msm-bus,num-cases = <3>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <20000 20512 0 0>, + <20000 20512 0 6400000>, + <20000 20512 0 6400000>; }; }; -- GitLab From 397ed0b0d74767525a1a1641a0e124ce8ee5498a Mon Sep 17 00:00:00 2001 From: Haynes Mathew George Date: Fri, 5 May 2017 16:04:43 -0700 Subject: [PATCH 215/786] ASoC: msm: qdsp6v2: Apply gain based on app type Stream volume cannot be applied for usecases where the volume gain module resides in the copp topology. Expose control to set adm gain based on app type. CRs-Fixed: 2046719 Change-Id: I9f9fdbc87cc8449ddf30e3f699507a9b65438d0f Signed-off-by: Haynes Mathew George --- sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c | 50 ++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c index 019cbaecba49..d67296f238f0 100644 --- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c +++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c @@ -10605,10 +10605,60 @@ static int msm_routing_put_app_type_cfg_control(struct snd_kcontrol *kcontrol, return 0; } +static int msm_routing_put_app_type_gain_control(struct snd_kcontrol *kcontrol, + struct snd_ctl_elem_value *ucontrol) +{ + int j, fe_id, be_id, port_type; + int ret = 0; + unsigned long copp; + struct msm_pcm_routing_bdai_data *bedai; + int dir = ucontrol->value.integer.value[0] ? SESSION_TYPE_TX : + SESSION_TYPE_RX; + int app_type = ucontrol->value.integer.value[1]; + int gain = (ucontrol->value.integer.value[2] + + ucontrol->value.integer.value[3])/2; + + port_type = (dir == SESSION_TYPE_RX) ? MSM_AFE_PORT_TYPE_RX : + MSM_AFE_PORT_TYPE_TX; + + mutex_lock(&routing_lock); + for (be_id = 0; be_id < MSM_BACKEND_DAI_MAX; be_id++) { + if (is_be_dai_extproc(be_id)) + continue; + + bedai = &msm_bedais[be_id]; + if (afe_get_port_type(bedai->port_id) != port_type) + continue; + + if (!bedai->active) + continue; + + for (fe_id = 0; fe_id < MSM_FRONTEND_DAI_MAX; fe_id++) { + if (!test_bit(fe_id, &bedai->fe_sessions[0])) + continue; + + if (app_type != + fe_dai_app_type_cfg[fe_id][dir][be_id].app_type) + continue; + + copp = session_copp_map[fe_id][dir][be_id]; + for (j = 0; j < MAX_COPPS_PER_PORT; j++) { + if (!test_bit(j, &copp)) + continue; + ret |= adm_set_volume(bedai->port_id, j, gain); + } + } + } + mutex_unlock(&routing_lock); + return ret ? -EINVAL : 0; +} + static const struct snd_kcontrol_new app_type_cfg_controls[] = { SOC_SINGLE_MULTI_EXT("App Type Config", SND_SOC_NOPM, 0, 0xFFFFFFFF, 0, 128, msm_routing_get_app_type_cfg_control, msm_routing_put_app_type_cfg_control), + SOC_SINGLE_MULTI_EXT("App Type Gain", SND_SOC_NOPM, 0, + 0x2000, 0, 4, NULL, msm_routing_put_app_type_gain_control) }; static int msm_routing_get_lsm_app_type_cfg_control( -- GitLab From 72ec8bbf4cdb54794210035ab203f30500a19d0d Mon Sep 17 00:00:00 2001 From: George Shen Date: Thu, 22 Jun 2017 14:45:54 -0700 Subject: [PATCH 216/786] msm: kgsl: Fix racing in HFI message deletion Move more HFI message processing into spinlock protected area in tasklet function that receives HFI message and notifies GMU driver thread. The racing happens when GPU driver times out waiting for HFI message. Receiver function just received the message and processing it. CRs-Fixed: 2052606 Change-Id: I495ba923961f8c91f9806eee0766a96f2b4371d6 Signed-off-by: George Shen --- drivers/gpu/msm/kgsl_hfi.c | 3 ++- drivers/gpu/msm/kgsl_hfi.h | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c index e91550a67d45..067b276f92d1 100644 --- a/drivers/gpu/msm/kgsl_hfi.c +++ b/drivers/gpu/msm/kgsl_hfi.c @@ -188,9 +188,9 @@ static void receive_ack_msg(struct gmu_device *gmu, struct hfi_msg_rsp *rsp) msg->seqnum == rsp->ret_hdr.seqnum) break; } - spin_unlock(&hfi->msglock); if (msg == NULL) { + spin_unlock(&hfi->msglock); dev_err(&gmu->pdev->dev, "Cannot find receiver of ack msg with id=%d\n", rsp->ret_hdr.id); @@ -199,6 +199,7 @@ static void receive_ack_msg(struct gmu_device *gmu, struct hfi_msg_rsp *rsp) memcpy(&msg->results, (void *) rsp, rsp->hdr.size << 2); complete(&msg->msg_complete); + spin_unlock(&hfi->msglock); } static void receive_err_msg(struct gmu_device *gmu, struct hfi_msg_rsp *rsp) diff --git a/drivers/gpu/msm/kgsl_hfi.h b/drivers/gpu/msm/kgsl_hfi.h index 83abec4cf06f..8eedbfa217f8 100644 --- a/drivers/gpu/msm/kgsl_hfi.h +++ b/drivers/gpu/msm/kgsl_hfi.h @@ -115,7 +115,7 @@ enum hfi_f2h_qpri { HFI_F2H_QPRI_DEBUG = 40, }; -#define HFI_RSP_TIMEOUT 50 /* msec */ +#define HFI_RSP_TIMEOUT 100 /* msec */ #define HFI_H2F_CMD_IRQ_MASK BIT(0) enum hfi_msg_type { -- GitLab From 60242815ae1f18714afc6b2ef1497bf82fcd22ae Mon Sep 17 00:00:00 2001 From: Nicholas Troast Date: Tue, 20 Jun 2017 09:33:21 -0700 Subject: [PATCH 217/786] power: qcom: qpnp-fg-gen3: add missing break in switch statement A break is missing in a switch statement. Add it. Change-Id: Ib298194957ff2cc1aaba5bd4335afe9d0f9cf6f5 Signed-off-by: Nicholas Troast --- drivers/power/supply/qcom/qpnp-fg-gen3.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c index 73d54c6b6d60..75e79bbf9c7c 100644 --- a/drivers/power/supply/qcom/qpnp-fg-gen3.c +++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c @@ -3083,6 +3083,7 @@ static int fg_psy_set_property(struct power_supply *psy, pval->intval); return -EINVAL; } + break; case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: rc = fg_set_constant_chg_voltage(chip, pval->intval); break; -- GitLab From 37328aacce884cd76864c1d0e72aba41b354d133 Mon Sep 17 00:00:00 2001 From: Matt Wagantall Date: Sun, 14 Sep 2014 17:19:50 -0700 Subject: [PATCH 218/786] arm64: Dump memory surrounding PC, LR and SP registers only Due to the verbosity of printing, dumping memory regions for all register when many CPUs are online may contribute to flooded kernel logs. Spinlock lockups due to the printing have also been seen to result, compounding the problem due to additional prints. Additionally, the other registers may contain pointers to device memory. The region around a particular device memory location may not be physically implemented, or may have side-effects on access which result in crashes which obscure the original issue. Change-Id: I7440b2d77f03fd34f36816f549588fa89322ce5f Signed-off-by: Matt Wagantall Signed-off-by: Abhimanyu Kapur Signed-off-by: Patrick Daly --- arch/arm64/kernel/process.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index fe8f94a434a9..fc1a28633244 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -225,18 +225,12 @@ static void show_data(unsigned long addr, int nbytes, const char *name) static void show_extra_register_data(struct pt_regs *regs, int nbytes) { mm_segment_t fs; - unsigned int i; fs = get_fs(); set_fs(KERNEL_DS); show_data(regs->pc - nbytes, nbytes * 2, "PC"); show_data(regs->regs[30] - nbytes, nbytes * 2, "LR"); show_data(regs->sp - nbytes, nbytes * 2, "SP"); - for (i = 0; i < 30; i++) { - char name[4]; - snprintf(name, sizeof(name), "X%u", i); - show_data(regs->regs[i] - nbytes, nbytes * 2, name); - } set_fs(fs); } -- GitLab From f8c82541f4d298bf00e4776e46f7f01cd0689e90 Mon Sep 17 00:00:00 2001 From: Chris Lew Date: Thu, 22 Jun 2017 16:45:59 -0700 Subject: [PATCH 219/786] soc: qcom: glink: Change dummy xprt allocation to kzalloc Change allocation from kmalloc to kzalloc to prevent any usage of function pointers that are not initialized. CRs-Fixed: 2065710 Change-Id: I3a7589158e799e19072ca6a6847986b99da5d9ab Signed-off-by: Chris Lew --- drivers/soc/qcom/glink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c index fd4c6042d635..dcf6654293d7 100644 --- a/drivers/soc/qcom/glink.c +++ b/drivers/soc/qcom/glink.c @@ -4095,7 +4095,7 @@ static struct glink_core_xprt_ctx *glink_create_dummy_xprt_ctx( xprt_ptr = kzalloc(sizeof(*xprt_ptr), GFP_KERNEL); if (!xprt_ptr) return ERR_PTR(-ENOMEM); - if_ptr = kmalloc(sizeof(*if_ptr), GFP_KERNEL); + if_ptr = kzalloc(sizeof(*if_ptr), GFP_KERNEL); if (!if_ptr) { kfree(xprt_ptr); return ERR_PTR(-ENOMEM); -- GitLab From e9e3b40246a1ddf407cf1ec416aaafba6ca38d98 Mon Sep 17 00:00:00 2001 From: Kyle Yan Date: Thu, 22 Jun 2017 16:37:17 -0700 Subject: [PATCH 220/786] ARM: dts: msm: Remove votes for LVS2 on SDM845 On SDM845, LVS2 no longer need to be enabled by PIL as it now only used by physical sensors which SSC can take charge of enabling and disabling once it is up. Change-Id: I5b37305a0aac61076d4cd1296efdd78e2c9d0897 Signed-off-by: Kyle Yan --- arch/arm64/boot/dts/qcom/sdm845.dtsi | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index 83f116688524..1eaeb59d9785 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -1771,9 +1771,8 @@ interrupts = <0 494 1>; vdd_cx-supply = <&pm8998_l27_level>; - vdd_px-supply = <&pm8998_lvs2>; qcom,vdd_cx-uV-uA = ; - qcom,proxy-reg-names = "vdd_cx", "vdd_px"; + qcom,proxy-reg-names = "vdd_cx"; qcom,keep-proxy-regs-on; clocks = <&clock_rpmh RPMH_CXO_CLK>; -- GitLab From 51096c4082431c4aa27d30132a5633b307ecb348 Mon Sep 17 00:00:00 2001 From: Kyle Yan Date: Mon, 19 Jun 2017 17:25:05 -0700 Subject: [PATCH 221/786] drivers: edac: Fix error code parsing and initial check for bus errors Primary error code reporting is out of sync with latest documentation. Update the error values to properly reflect what is listed. Further add an initial check for bus errors before reporting any uncorrectable error so that it will not be confused for an ECC error. Change-Id: If5e6adee3420751e8ad399c57568985fc63bf66c Signed-off-by: Kyle Yan --- drivers/edac/kryo3xx_arm64_edac.c | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/drivers/edac/kryo3xx_arm64_edac.c b/drivers/edac/kryo3xx_arm64_edac.c index 4ac880bb97a2..f5bb3ed2bb65 100644 --- a/drivers/edac/kryo3xx_arm64_edac.c +++ b/drivers/edac/kryo3xx_arm64_edac.c @@ -62,7 +62,7 @@ static inline void set_errxctlr_el1(void) static inline void set_errxmisc_overflow(void) { - u64 val = 0x7F7F00000000; + u64 val = 0x7F7F00000000ULL; asm volatile("msr s3_0_c5_c5_0, %0" : : "r" (val)); } @@ -118,8 +118,9 @@ static const struct errors_edac errors[] = { #define DATA_BUF_ERR 0x2 #define CACHE_DATA_ERR 0x6 #define CACHE_TAG_DIRTY_ERR 0x7 -#define TLB_PARITY_ERR 0x8 -#define BUS_ERROR 0x18 +#define TLB_PARITY_ERR_DATA 0x8 +#define TLB_PARITY_ERR_TAG 0x9 +#define BUS_ERROR 0x12 struct erp_drvdata { struct edac_device_ctl_info *edev_ctl; @@ -217,10 +218,13 @@ static void dump_err_reg(int errorcode, int level, u64 errxstatus, u64 errxmisc, edac_printk(KERN_CRIT, EDAC_CPU, "ECC Error from cache tag or dirty RAM\n"); break; - case TLB_PARITY_ERR: + case TLB_PARITY_ERR_DATA: edac_printk(KERN_CRIT, EDAC_CPU, "Parity error on TLB RAM\n"); break; + case TLB_PARITY_ERR_TAG: + edac_printk(KERN_CRIT, EDAC_CPU, "Parity error on TLB DATA\n"); + case BUS_ERROR: edac_printk(KERN_CRIT, EDAC_CPU, "Bus Error\n"); break; @@ -283,6 +287,16 @@ static void kryo3xx_check_l1_l2_ecc(void *info) spin_unlock_irqrestore(&local_handler_lock, flags); } +static bool l3_is_bus_error(u64 errxstatus) +{ + if (KRYO3XX_ERRXSTATUS_SERR(errxstatus) == BUS_ERROR) { + edac_printk(KERN_CRIT, EDAC_CPU, "Bus Error\n"); + return true; + } + + return false; +} + static void kryo3xx_check_l3_scu_error(struct edac_device_ctl_info *edev_ctl) { u64 errxstatus = 0; @@ -296,6 +310,11 @@ static void kryo3xx_check_l3_scu_error(struct edac_device_ctl_info *edev_ctl) if (KRYO3XX_ERRXSTATUS_VALID(errxstatus) && KRYO3XX_ERRXMISC_LVL(errxmisc) == L3) { + if (l3_is_bus_error(errxstatus)) { + if (edev_ctl->panic_on_ue) + panic("Causing panic due to Bus Error\n"); + return; + } if (KRYO3XX_ERRXSTATUS_UE(errxstatus)) { edac_printk(KERN_CRIT, EDAC_CPU, "Detected L3 uncorrectable error\n"); dump_err_reg(KRYO3XX_L3_UE, L3, errxstatus, errxmisc, -- GitLab From ac170f9aebf919550fa47e0afb87236f50b84293 Mon Sep 17 00:00:00 2001 From: Shivendra Kakrania Date: Mon, 22 May 2017 13:08:09 -0700 Subject: [PATCH 222/786] msm: vidc: Enable LLC bandwidth configuration Configure llc bw according to current load. It also fixes double bw voting when governor is enabled. CRs-Fixed: 2050534 Change-Id: Ib2ea7f341ef0cbe5be036f8af2e36e6d016a7805 Signed-off-by: Shivendra Kakrania --- .../msm/vidc/governors/msm_vidc_dyn_gov.c | 1 + .../media/platform/msm/vidc/msm_vidc_clocks.c | 4 +++ .../media/platform/msm/vidc/msm_vidc_common.h | 1 - .../platform/msm/vidc/msm_vidc_res_parse.c | 9 ++++- .../platform/msm/vidc/msm_vidc_resources.h | 1 + drivers/media/platform/msm/vidc/venus_hfi.c | 34 +++++++++---------- .../media/platform/msm/vidc/vidc_hfi_api.h | 4 +-- 7 files changed, 32 insertions(+), 22 deletions(-) diff --git a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c index d329a8b78caa..06187687beb0 100644 --- a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c +++ b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c @@ -871,6 +871,7 @@ static int __event_handler(struct devfreq *devfreq, unsigned int event, switch (event) { case DEVFREQ_GOV_START: case DEVFREQ_GOV_RESUME: + case DEVFREQ_GOV_SUSPEND: mutex_lock(&devfreq->lock); rc = update_devfreq(devfreq); mutex_unlock(&devfreq->lock); diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c index 5e366d0acde0..6867735aeca7 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c @@ -178,6 +178,10 @@ int msm_comm_vote_bus(struct msm_vidc_core *core) } vote_data[i].work_mode = inst->clk_data.work_mode; fill_recon_stats(inst, &vote_data[i]); + + if (core->resources.sys_cache_enabled) + vote_data[i].use_sys_cache = true; + i++; } mutex_unlock(&core->lock); diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h index 5c653f5c1e49..7bed81144361 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h @@ -107,7 +107,6 @@ u32 get_frame_size_nv12_ubwc(int plane, u32 height, u32 width); u32 get_frame_size_rgba(int plane, u32 height, u32 width); u32 get_frame_size_nv21(int plane, u32 height, u32 width); u32 get_frame_size_tp10_ubwc(int plane, u32 height, u32 width); -void msm_comm_set_use_sys_cache(struct msm_vidc_inst *inst); struct vb2_buffer *msm_comm_get_vb_using_vidc_buffer( struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf); struct msm_vidc_buffer *msm_comm_get_buffer_using_device_planes( diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c index 062795fb99c3..afb88936329b 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c @@ -27,6 +27,8 @@ enum clock_properties { CLOCK_PROP_HAS_MEM_RETENTION = 1 << 1, }; +#define PERF_GOV "performance" + static inline struct device *msm_iommu_get_ctx(const char *ctx_name) { return NULL; @@ -426,6 +428,8 @@ static int msm_vidc_populate_bus(struct device *dev, buses->bus_tbl = temp_table; bus = &buses->bus_tbl[buses->count]; + memset(bus, 0x0, sizeof(struct bus_info)); + rc = of_property_read_string(dev->of_node, "label", &temp_name); if (rc) { dprintk(VIDC_ERR, "'label' not found in node\n"); @@ -457,9 +461,12 @@ static int msm_vidc_populate_bus(struct device *dev, rc = 0; dprintk(VIDC_DBG, "'qcom,bus-governor' not found, default to performance governor\n"); - bus->governor = "performance"; + bus->governor = PERF_GOV; } + if (!strcmp(bus->governor, PERF_GOV)) + bus->is_prfm_gov_used = true; + rc = of_property_read_u32_array(dev->of_node, "qcom,bus-range-kbps", range, ARRAY_SIZE(range)); if (rc) { diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h index dda5e80a9ebc..755f0c86f9d3 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h @@ -112,6 +112,7 @@ struct bus_info { struct devfreq_dev_profile devfreq_prof; struct devfreq *devfreq; struct msm_bus_client_handle *client; + bool is_prfm_gov_used; }; struct bus_set { diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c index f8d88429cd2e..dad4b608fd47 100644 --- a/drivers/media/platform/msm/vidc/venus_hfi.c +++ b/drivers/media/platform/msm/vidc/venus_hfi.c @@ -809,21 +809,22 @@ static int __unvote_buses(struct venus_hfi_device *device) int rc = 0; struct bus_info *bus = NULL; + kfree(device->bus_vote.data); + device->bus_vote.data = NULL; + device->bus_vote.data_count = 0; + venus_hfi_for_each_bus(device, bus) { - int local_rc = 0; unsigned long zero = 0; - rc = devfreq_suspend_device(bus->devfreq); + if (!bus->is_prfm_gov_used) + rc = devfreq_suspend_device(bus->devfreq); + else + rc = __devfreq_target(bus->dev, &zero, 0); + if (rc) goto err_unknown_device; - - local_rc = __devfreq_target(bus->dev, &zero, 0); - rc = rc ?: local_rc; } - if (rc) - dprintk(VIDC_WARN, "Failed to unvote some buses\n"); - err_unknown_device: return rc; } @@ -857,15 +858,14 @@ static int __vote_buses(struct venus_hfi_device *device, venus_hfi_for_each_bus(device, bus) { if (bus && bus->devfreq) { - /* NOP if already resume */ - rc = devfreq_resume_device(bus->devfreq); - if (rc) - goto err_no_mem; - - /* Kick devfreq awake incase _resume() didn't do it */ - - bus->devfreq->nb.notifier_call( - &bus->devfreq->nb, 0, NULL); + if (!bus->is_prfm_gov_used) { + rc = devfreq_resume_device(bus->devfreq); + if (rc) + goto err_no_mem; + } else { + bus->devfreq->nb.notifier_call( + &bus->devfreq->nb, 0, NULL); + } } } diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h index 47ce0ba3f0f1..29a7fe333949 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h @@ -1340,10 +1340,8 @@ struct vidc_bus_vote_data { int complexity_factor; unsigned int lcu_size; enum msm_vidc_power_mode power_mode; - struct imem_ab_table *imem_ab_tbl; enum hal_work_mode work_mode; - unsigned long bitrate; - u32 imem_ab_tbl_size; + bool use_sys_cache; }; struct vidc_clk_scale_data { -- GitLab From d4810dbeb6826c6d909317bfbc543fcabc1bb487 Mon Sep 17 00:00:00 2001 From: Karthikeyan Mani Date: Wed, 21 Jun 2017 15:35:40 -0700 Subject: [PATCH 223/786] ARM: dts: msm: Fix support for WSA device detection on QRD SDM845 Add WSA device nodes with correct gpio reset pin. This overwrites any WSA node specific to QRD with properties specific to QRD. CRs-fixed: 2063578 Change-Id: I00529cf79bc981c4d45388b28f65b7227daff21f Signed-off-by: Karthikeyan Mani --- arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi index c2fbed52c159..7a1191d85a43 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi @@ -143,6 +143,24 @@ }; }; +&slim_aud { + tavil_codec { + swr_master { + wsa881x_0211: wsa881x@20170211 { + compatible = "qcom,wsa881x"; + reg = <0x00 0x20170211>; + qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>; + }; + + wsa881x_0213: wsa881x@21170213 { + compatible = "qcom,wsa881x"; + reg = <0x00 0x21170213>; + qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>; + }; + }; + }; +}; + &ufsphy_mem { compatible = "qcom,ufs-phy-qmp-v3"; -- GitLab From e2f8af3d91063d48cdf7a5886a9036c51e123353 Mon Sep 17 00:00:00 2001 From: Ritesh Harjani Date: Sun, 4 Dec 2016 21:37:41 +0530 Subject: [PATCH 224/786] mmc: mmc: Handle error case in mmc_suspend Currently in case if suspend is failed due to some reason then the error case is not handled properly :- 1. w.r.t. suspend_clk_scaling claim_host counters are going bad. 2. In case of any error after halting and disabling cmdq - the error is returned to the PM framework and it assumes that card is not suspended, whereas CQ remains disabled. This causes I/O requests hangs since no request can be processed any further. Fix this by handling error cases properly. Change-Id: I8691eebcb0e2d089720505475aa0297efce5cca5 Signed-off-by: Ritesh Harjani Signed-off-by: Sayali Lokhande Signed-off-by: Siba Prasad --- drivers/mmc/core/mmc.c | 36 ++++++++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 564b5c97f8b3..cf00d1a4baba 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -2610,7 +2610,7 @@ static int mmc_test_awake_ext_csd(struct mmc_host *host) static int _mmc_suspend(struct mmc_host *host, bool is_suspend) { - int err = 0; + int err = 0, ret; BUG_ON(!host); BUG_ON(!host->card); @@ -2619,6 +2619,8 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend) if (err) { pr_err("%s: %s: fail to suspend clock scaling (%d)\n", mmc_hostname(host), __func__, err); + if (host->card->cmdq_init) + wake_up(&host->cmdq_ctx.wait); return err; } @@ -2643,12 +2645,12 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend) if (mmc_card_doing_bkops(host->card)) { err = mmc_stop_bkops(host->card); if (err) - goto out; + goto out_err; } err = mmc_flush_cache(host->card); if (err) - goto out; + goto out_err; if (mmc_can_sleepawake(host)) { /* @@ -2665,16 +2667,38 @@ static int _mmc_suspend(struct mmc_host *host, bool is_suspend) err = mmc_deselect_cards(host); } - if (!err) { - mmc_power_off(host); - mmc_card_set_suspended(host->card); + if (err) + goto out_err; + mmc_power_off(host); + mmc_card_set_suspended(host->card); + + goto out; + +out_err: + /* + * In case of err let's put controller back in cmdq mode and unhalt + * the controller. + * We expect cmdq_enable and unhalt won't return any error + * since it is anyway enabling few registers. + */ + if (host->card->cmdq_init) { + mmc_host_clk_hold(host); + ret = host->cmdq_ops->enable(host); + if (ret) + pr_err("%s: %s: enabling CMDQ mode failed (%d)\n", + mmc_hostname(host), __func__, ret); + mmc_host_clk_release(host); + mmc_cmdq_halt(host, false); } + out: /* Kick CMDQ thread to process any requests came in while suspending */ if (host->card->cmdq_init) wake_up(&host->cmdq_ctx.wait); mmc_release_host(host); + if (err) + mmc_resume_clk_scaling(host); return err; } -- GitLab From 055c827b949fd816f6be4b3c511e5119ed8c9a64 Mon Sep 17 00:00:00 2001 From: Siba Prasad Date: Mon, 5 Dec 2016 18:29:39 +0530 Subject: [PATCH 225/786] mmc: bus: Handle error in case bus_ops suspend fails bus_ops->suspend may fail due to some reason (for e.g. due to flush timeout). In such cases, if we return error to PM framework from here without calling pm_generic_resume then mmc request may get stuck since PM framework will assume that mmc bus not suspended (because of error) and it won't call resume again. Thus fix this by calling pm_generic_resume in case of error from bus_ops->suspend in mmc_bus_suspend. Change-Id: Iaef485d0b47b005aa88e61cd77a2b7b65931def1 Signed-off-by: Ritesh Harjani Signed-off-by: Sayali Lokhande Signed-off-by: Siba Prasad --- drivers/mmc/core/bus.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index 1c28cf87d71c..e3696c5c2dac 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -167,6 +167,19 @@ static int mmc_bus_suspend(struct device *dev) if (mmc_bus_needs_resume(host)) return 0; ret = host->bus_ops->suspend(host); + + /* + * bus_ops->suspend may fail due to some reason + * In such cases if we return error to PM framework + * from here without calling pm_generic_resume then mmc + * request may get stuck since PM framework will assume + * that mmc bus is not suspended (because of error) and + * it won't call resume again. + * + * So in case of error call pm_generic_resume(). + */ + if (ret) + pm_generic_resume(dev); return ret; } -- GitLab From 615439d4c91c4dc10efded0fd1051460cf53af6a Mon Sep 17 00:00:00 2001 From: Hareesh Gundu Date: Fri, 16 Jun 2017 17:06:57 +0530 Subject: [PATCH 226/786] Revert "msm: kgsl: Offload mementry destroy work to separate thread" This reverts commit 281fcb5e184b9d1074dd404016cebacce12a8664. To address the issue with the OOMkiller causing to kill the foreground application. Change-Id: Ie4c078d706fdf1c13ad45840f72b414ddc37c1d0 Signed-off-by: Hareesh Gundu Signed-off-by: Venkateswara Rao Tadikonda --- drivers/gpu/msm/kgsl.c | 35 ++++++++--------------------------- 1 file changed, 8 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 6bd212d66a72..364e32fb7bfa 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -256,13 +256,6 @@ static void _deferred_put(struct work_struct *work) kgsl_mem_entry_put(entry); } -static inline void -kgsl_mem_entry_put_deferred(struct kgsl_mem_entry *entry) -{ - if (entry) - queue_work(kgsl_driver.mem_workqueue, &entry->work); -} - static inline struct kgsl_mem_entry * kgsl_mem_entry_create(void) { @@ -272,7 +265,6 @@ kgsl_mem_entry_create(void) kref_init(&entry->refcount); /* put this ref in userspace memory alloc and map ioctls */ kref_get(&entry->refcount); - INIT_WORK(&entry->work, _deferred_put); } return entry; @@ -1882,7 +1874,7 @@ long kgsl_ioctl_sharedmem_free(struct kgsl_device_private *dev_priv, return -EINVAL; ret = gpumem_free_entry(entry); - kgsl_mem_entry_put_deferred(entry); + kgsl_mem_entry_put(entry); return ret; } @@ -1900,7 +1892,7 @@ long kgsl_ioctl_gpumem_free_id(struct kgsl_device_private *dev_priv, return -EINVAL; ret = gpumem_free_entry(entry); - kgsl_mem_entry_put_deferred(entry); + kgsl_mem_entry_put(entry); return ret; } @@ -1937,7 +1929,8 @@ static void gpuobj_free_fence_func(void *priv) { struct kgsl_mem_entry *entry = priv; - kgsl_mem_entry_put_deferred(entry); + INIT_WORK(&entry->work, _deferred_put); + queue_work(kgsl_driver.mem_workqueue, &entry->work); } static long gpuobj_free_on_fence(struct kgsl_device_private *dev_priv, @@ -2001,7 +1994,7 @@ long kgsl_ioctl_gpuobj_free(struct kgsl_device_private *dev_priv, else ret = -EINVAL; - kgsl_mem_entry_put_deferred(entry); + kgsl_mem_entry_put(entry); return ret; } @@ -3381,13 +3374,7 @@ long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv, if (entry == NULL) return -EINVAL; - if (!kgsl_mem_entry_set_pend(entry)) { - kgsl_mem_entry_put(entry); - return -EBUSY; - } - if (entry->memdesc.cur_bindings != 0) { - kgsl_mem_entry_unset_pend(entry); kgsl_mem_entry_put(entry); return -EINVAL; } @@ -3396,7 +3383,7 @@ long kgsl_ioctl_sparse_phys_free(struct kgsl_device_private *dev_priv, /* One put for find_id(), one put for the kgsl_mem_entry_create() */ kgsl_mem_entry_put(entry); - kgsl_mem_entry_put_deferred(entry); + kgsl_mem_entry_put(entry); return 0; } @@ -3456,13 +3443,7 @@ long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv, if (entry == NULL) return -EINVAL; - if (!kgsl_mem_entry_set_pend(entry)) { - kgsl_mem_entry_put(entry); - return -EBUSY; - } - if (entry->bind_tree.rb_node != NULL) { - kgsl_mem_entry_unset_pend(entry); kgsl_mem_entry_put(entry); return -EINVAL; } @@ -3471,7 +3452,7 @@ long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv, /* One put for find_id(), one put for the kgsl_mem_entry_create() */ kgsl_mem_entry_put(entry); - kgsl_mem_entry_put_deferred(entry); + kgsl_mem_entry_put(entry); return 0; } @@ -4869,7 +4850,7 @@ static int __init kgsl_core_init(void) WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); kgsl_driver.mem_workqueue = alloc_workqueue("kgsl-mementry", - WQ_MEM_RECLAIM, 0); + WQ_UNBOUND | WQ_MEM_RECLAIM, 0); kgsl_events_init(); -- GitLab From a40bed26b8bbd762f115ac3d0e14dbd3449a0215 Mon Sep 17 00:00:00 2001 From: Amit Nischal Date: Wed, 14 Jun 2017 10:45:46 +0530 Subject: [PATCH 227/786] clk: qcom: Check for errors during RCG read There could be instances where the RCG configuration update or readback could fail. Notify the caller of the failure. Add support to change scope of mux_div_get_src_div() in order to use it globally. Change-Id: Ica07c28cede695785db81697effcb40ab6b717d4 Signed-off-by: Amit Nischal --- drivers/clk/qcom/clk-regmap-mux-div.c | 22 +++++++++++++++------- drivers/clk/qcom/clk-regmap-mux-div.h | 3 ++- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/drivers/clk/qcom/clk-regmap-mux-div.c b/drivers/clk/qcom/clk-regmap-mux-div.c index 9593aefb0bf6..942a68e2a650 100644 --- a/drivers/clk/qcom/clk-regmap-mux-div.c +++ b/drivers/clk/qcom/clk-regmap-mux-div.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2015, Linaro Limited - * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -64,20 +64,26 @@ int __mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div) return -EBUSY; } -static void __mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src, +int mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src, u32 *div) { + int ret = 0; u32 val, __div, __src; const char *name = clk_hw_get_name(&md->clkr.hw); - regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset, &val); + ret = regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset, &val); + if (ret) + return ret; if (val & CMD_RCGR_DIRTY_CFG) { pr_err("%s: RCG configuration is pending\n", name); - return; + return -EBUSY; } - regmap_read(md->clkr.regmap, CFG_RCGR + md->reg_offset, &val); + ret = regmap_read(md->clkr.regmap, CFG_RCGR + md->reg_offset, &val); + if (ret) + return ret; + __src = (val >> md->src_shift); __src &= BIT(md->src_width) - 1; *src = __src; @@ -85,6 +91,8 @@ static void __mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src, __div = (val >> md->hid_shift); __div &= BIT(md->hid_width) - 1; *div = __div; + + return ret; } static int mux_div_enable(struct clk_hw *hw) @@ -181,7 +189,7 @@ static u8 mux_div_get_parent(struct clk_hw *hw) const char *name = clk_hw_get_name(hw); u32 i, div, src = 0; - __mux_div_get_src_div(md, &src, &div); + mux_div_get_src_div(md, &src, &div); for (i = 0; i < clk_hw_get_num_parents(hw); i++) if (src == md->parent_map[i].cfg) @@ -222,7 +230,7 @@ static unsigned long mux_div_recalc_rate(struct clk_hw *hw, unsigned long prate) int i, num_parents = clk_hw_get_num_parents(hw); const char *name = clk_hw_get_name(hw); - __mux_div_get_src_div(md, &src, &div); + mux_div_get_src_div(md, &src, &div); for (i = 0; i < num_parents; i++) if (src == md->parent_map[i].cfg) { struct clk_hw *p = clk_hw_get_parent_by_index(hw, i); diff --git a/drivers/clk/qcom/clk-regmap-mux-div.h b/drivers/clk/qcom/clk-regmap-mux-div.h index 6fac5c54a824..63a696a96033 100644 --- a/drivers/clk/qcom/clk-regmap-mux-div.h +++ b/drivers/clk/qcom/clk-regmap-mux-div.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2015, Linaro Limited - * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -61,5 +61,6 @@ struct clk_regmap_mux_div { extern const struct clk_ops clk_regmap_mux_div_ops; int __mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div); +int mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src, u32 *div); #endif -- GitLab From b41e057a26c90a0f002c2c715bff54f0c19c9438 Mon Sep 17 00:00:00 2001 From: Ritesh Harjani Date: Tue, 28 Mar 2017 13:19:26 +0530 Subject: [PATCH 228/786] mmc: sdhci-msm: Change pm_qos cpu groups latency to PM_QOS_DEFAULT_VALUE In current pm_qos implementation - when the new pm_qos vote request comes, the previous cpu group where pm_qos vote was put is determined and removed if the current cpu group is different than previous. If the pm_qos vote of performance mode latency is put during initialization, there can be a case where this vote will never be released - since during init phase we can't cache the prev pm_qos cpu group (pm_qos_prev_cpu = -1). Thus during the actual I/O sdhci_request the pm_qos_prev_cpu will be -1 and unless the request comes once onto each of those cpu group, the pm_qos voting can never be released. Hence change this pm_qos vote for all cpu groups to PM_QOS_DEFAULT_VALUE during init phase. Change-Id: I71249b58f41850a8a84e6165d6df936eba13b218 Signed-off-by: Ritesh Harjani --- drivers/mmc/host/sdhci-msm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index caf8dd1ec380..795d0a075e7f 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -3814,8 +3814,8 @@ void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host, group->req.type = PM_QOS_REQ_AFFINE_CORES; cpumask_copy(&group->req.cpus_affine, &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]); - /* For initialization phase, set the performance mode latency */ - group->latency = latency[i].latency[SDHCI_PERFORMANCE_MODE]; + /* We set default latency here for all pm_qos cpu groups. */ + group->latency = PM_QOS_DEFAULT_VALUE; pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY, group->latency); pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n", -- GitLab From 07b40c513d430d0e5d69e506cd6c44eee082cc92 Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Fri, 23 Jun 2017 16:17:20 +0530 Subject: [PATCH 229/786] sched: improve the scheduler This change is for general scheduler improvement. Change-Id: Ib4795668b27709c053046f26de72140f237a9ebe Signed-off-by: Pavankumar Kondeti --- kernel/sched/boost.c | 2 -- kernel/sched/walt.c | 50 ++++++++------------------------------------ 2 files changed, 9 insertions(+), 43 deletions(-) diff --git a/kernel/sched/boost.c b/kernel/sched/boost.c index f5e877919d85..1a3309b1a312 100644 --- a/kernel/sched/boost.c +++ b/kernel/sched/boost.c @@ -10,7 +10,6 @@ * GNU General Public License for more details. */ -#include #include "sched.h" #include #include @@ -140,7 +139,6 @@ static void _sched_set_boost(int old_val, int type) case RESTRAINED_BOOST: freq_aggr_threshold_backup = update_freq_aggregate_threshold(1); - mod_timer(&sched_grp_timer, jiffies + 1); break; default: diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c index e4bd0f4231f8..50f889b1197e 100644 --- a/kernel/sched/walt.c +++ b/kernel/sched/walt.c @@ -100,45 +100,6 @@ static void release_rq_locks_irqrestore(const cpumask_t *cpus, local_irq_restore(*flags); } -struct timer_list sched_grp_timer; -static void sched_agg_grp_load(unsigned long data) -{ - struct sched_cluster *cluster; - unsigned long flags; - int cpu; - - acquire_rq_locks_irqsave(cpu_possible_mask, &flags); - - for_each_sched_cluster(cluster) { - u64 aggr_grp_load = 0; - - for_each_cpu(cpu, &cluster->cpus) { - struct rq *rq = cpu_rq(cpu); - - if (rq->curr) - update_task_ravg(rq->curr, rq, TASK_UPDATE, - sched_ktime_clock(), 0); - aggr_grp_load += - rq->grp_time.prev_runnable_sum; - } - - cluster->aggr_grp_load = aggr_grp_load; - } - - release_rq_locks_irqrestore(cpu_possible_mask, &flags); - - if (sched_boost() == RESTRAINED_BOOST) - mod_timer(&sched_grp_timer, jiffies + 1); -} - -static int __init setup_sched_grp_timer(void) -{ - init_timer_deferrable(&sched_grp_timer); - sched_grp_timer.function = sched_agg_grp_load; - return 0; -} -late_initcall(setup_sched_grp_timer); - /* 1 -> use PELT based load stats, 0 -> use window-based load stats */ unsigned int __read_mostly walt_disabled = 0; @@ -3058,6 +3019,8 @@ void walt_irq_work(struct irq_work *irq_work) wc = sched_ktime_clock(); for_each_sched_cluster(cluster) { + u64 aggr_grp_load = 0; + raw_spin_lock(&cluster->load_lock); for_each_cpu(cpu, &cluster->cpus) { @@ -3066,14 +3029,19 @@ void walt_irq_work(struct irq_work *irq_work) update_task_ravg(rq->curr, rq, TASK_UPDATE, wc, 0); account_load_subtractions(rq); + aggr_grp_load += rq->grp_time.prev_runnable_sum; } - - cpufreq_update_util(rq, 0); } + cluster->aggr_grp_load = aggr_grp_load; + raw_spin_unlock(&cluster->load_lock); } + for_each_sched_cluster(cluster) + for_each_cpu(cpu, &cluster->cpus) + cpufreq_update_util(cpu_rq(cpu), 0); + for_each_cpu(cpu, cpu_possible_mask) raw_spin_unlock(&cpu_rq(cpu)->lock); -- GitLab From 030473ed982b0b9409e24e4fcdfbef8dabfed0d2 Mon Sep 17 00:00:00 2001 From: Lynus Vaz Date: Thu, 22 Jun 2017 17:33:06 +0530 Subject: [PATCH 230/786] msm: kgsl: Dump some A6XX registers before crashdump The VBIF and certain status registers are modified when the crashdump script runs. Dump these registers before running the crashdump script. Change-Id: I0d3f8df553273f399f5faf66d1ff52b33ae07a01 Signed-off-by: Lynus Vaz --- drivers/gpu/msm/adreno_a6xx_snapshot.c | 55 ++++++++++++++++++++------ 1 file changed, 42 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c index 54acd73998e2..216108354828 100644 --- a/drivers/gpu/msm/adreno_a6xx_snapshot.c +++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c @@ -257,16 +257,17 @@ static const unsigned int a6xx_registers[] = { 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001B, 0x001e, 0x0032, 0x0038, 0x003C, 0x0042, 0x0042, 0x0044, 0x0044, 0x0047, 0x0047, 0x0056, 0x0056, 0x00AD, 0x00AE, 0x00B0, 0x00FB, - 0x0100, 0x011D, 0x0200, 0x020D, 0x0210, 0x0213, 0x0218, 0x023D, - 0x0400, 0x04F9, 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511, - 0x0533, 0x0533, 0x0540, 0x0555, + 0x0100, 0x011D, 0x0200, 0x020D, 0x0218, 0x023D, 0x0400, 0x04F9, + 0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511, 0x0533, 0x0533, + 0x0540, 0x0555, /* CP */ - 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0827, - 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F, 0x0880, 0x088A, - 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD, 0x08F0, 0x08F3, - 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E, 0x0942, 0x094D, - 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E, 0x09A0, 0x09A6, - 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8, 0x0A00, 0x0A03, + 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824, + 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F, + 0x0880, 0x088A, 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD, + 0x08F0, 0x08F3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E, + 0x0942, 0x094D, 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E, + 0x09A0, 0x09A6, 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8, + 0x0A00, 0x0A03, /* VSC */ 0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E, /* UCHE */ @@ -290,6 +291,18 @@ static const unsigned int a6xx_registers[] = { 0xA630, 0xA630, }; +/* + * Set of registers to dump for A6XX before actually triggering crash dumper. + * Registers in pairs - first value is the start offset, second + * is the stop offset (inclusive) + */ +static const unsigned int a6xx_pre_crashdumper_registers[] = { + /* RBBM: RBBM_STATUS - RBBM_STATUS3 */ + 0x210, 0x213, + /* CP: CP_STATUS_1 */ + 0x825, 0x825, +}; + enum a6xx_debugbus_id { A6XX_DBGBUS_CP = 0x1, A6XX_DBGBUS_RBBM = 0x2, @@ -562,6 +575,17 @@ static size_t a6xx_snapshot_registers(struct kgsl_device *device, u8 *buf, return (count * 8) + sizeof(*header); } +static size_t a6xx_snapshot_pre_crashdump_regs(struct kgsl_device *device, + u8 *buf, size_t remain, void *priv) +{ + struct kgsl_snapshot_registers pre_cdregs = { + .regs = a6xx_pre_crashdumper_registers, + .count = ARRAY_SIZE(a6xx_pre_crashdumper_registers)/2, + }; + + return kgsl_snapshot_dump_registers(device, buf, remain, &pre_cdregs); +} + static size_t a6xx_snapshot_shader_memory(struct kgsl_device *device, u8 *buf, size_t remain, void *priv) { @@ -1385,16 +1409,21 @@ void a6xx_snapshot(struct adreno_device *adreno_dev, struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); struct adreno_snapshot_data *snap_data = gpudev->snapshot_data; - /* Try to run the crash dumper */ - _a6xx_do_crashdump(device); - + /* Dump the registers which get affected by crash dumper trigger */ kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS, - snapshot, a6xx_snapshot_registers, NULL); + snapshot, a6xx_snapshot_pre_crashdump_regs, NULL); + /* Dump vbif registers as well which get affected by crash dumper */ adreno_snapshot_vbif_registers(device, snapshot, a6xx_vbif_snapshot_registers, ARRAY_SIZE(a6xx_vbif_snapshot_registers)); + /* Try to run the crash dumper */ + _a6xx_do_crashdump(device); + + kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS, + snapshot, a6xx_snapshot_registers, NULL); + /* CP_SQE indexed registers */ kgsl_snapshot_indexed_registers(device, snapshot, A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA, -- GitLab From 7214d7e8ad7153bba497ce4bb2577a69bef5517c Mon Sep 17 00:00:00 2001 From: AnilKumar Chimata Date: Fri, 23 Jun 2017 03:09:59 -0700 Subject: [PATCH 231/786] md: Add dm-req-crypt driver snapshot This is a snapshot of dm-req-crypt driver as of msm-4.4 commit <0293b8a7d07a2> (qcom: smb-lib: rerun APSD on insertion for micro USB mode). Change-Id: Ifa51a02d5ba0bd4a7b623f6dc8c18f1c921ae127 Signed-off-by: AnilKumar Chimata --- block/blk-core.c | 12 + block/blk-merge.c | 58 ++ block/blk.h | 1 - drivers/md/Kconfig | 17 + drivers/md/Makefile | 1 + drivers/md/dm-req-crypt.c | 1362 +++++++++++++++++++++++++++++++++ drivers/md/dm-rq.c | 11 +- include/linux/blk_types.h | 10 +- include/linux/blkdev.h | 3 + include/linux/device-mapper.h | 8 + 10 files changed, 1479 insertions(+), 4 deletions(-) create mode 100644 drivers/md/dm-req-crypt.c diff --git a/block/blk-core.c b/block/blk-core.c index 710c93ba11fe..d8fba675a6e4 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1438,6 +1438,9 @@ void __blk_put_request(struct request_queue *q, struct request *req) /* this is a bio leak */ WARN_ON(req->bio != NULL); + /* this is a bio leak if the bio is not tagged with BIO_DONTFREE */ + WARN_ON(req->bio && !bio_flagged(req->bio, BIO_DONTFREE)); + /* * Request may not have originated from ll_rw_blk. if not, * it didn't come out of our reserved rq pools @@ -2619,6 +2622,15 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) blk_account_io_completion(req, nr_bytes); total_bytes = 0; + + /* + * Check for this if flagged, Req based dm needs to perform + * post processing, hence dont end bios or request.DM + * layer takes care. + */ + if (bio_flagged(req->bio, BIO_DONTFREE)) + return false; + while (req->bio) { struct bio *bio = req->bio; unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes); diff --git a/block/blk-merge.c b/block/blk-merge.c index 2642e5fc8b69..abde3707438d 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -492,6 +492,64 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, } EXPORT_SYMBOL(blk_rq_map_sg); +/* + * map a request to scatterlist without combining PHY CONT + * blocks, return number of sg entries setup. Caller + * must make sure sg can hold rq->nr_phys_segments entries + */ +int blk_rq_map_sg_no_cluster(struct request_queue *q, struct request *rq, + struct scatterlist *sglist) +{ + struct bio_vec bvec, bvprv = { NULL }; + struct req_iterator iter; + struct scatterlist *sg; + int nsegs, cluster = 0; + + nsegs = 0; + + /* + * for each bio in rq + */ + sg = NULL; + rq_for_each_segment(bvec, rq, iter) { + __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg, + &nsegs, &cluster); + } /* segments in rq */ + + + if (!sg) + return nsegs; + + if (unlikely(rq->cmd_flags & REQ_COPY_USER) && + (blk_rq_bytes(rq) & q->dma_pad_mask)) { + unsigned int pad_len = + (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; + + sg->length += pad_len; + rq->extra_len += pad_len; + } + + if (q->dma_drain_size && q->dma_drain_needed(rq)) { + if (rq->cmd_flags & REQ_OP_WRITE) + memset(q->dma_drain_buffer, 0, q->dma_drain_size); + + sg->page_link &= ~0x02; + sg = sg_next(sg); + sg_set_page(sg, virt_to_page(q->dma_drain_buffer), + q->dma_drain_size, + ((unsigned long)q->dma_drain_buffer) & + (PAGE_SIZE - 1)); + nsegs++; + rq->extra_len += q->dma_drain_size; + } + + if (sg) + sg_mark_end(sg); + + return nsegs; +} +EXPORT_SYMBOL(blk_rq_map_sg_no_cluster); + static inline int ll_new_hw_segment(struct request_queue *q, struct request *req, struct bio *bio) diff --git a/block/blk.h b/block/blk.h index 74444c49078f..ae076666cc23 100644 --- a/block/blk.h +++ b/block/blk.h @@ -207,7 +207,6 @@ int attempt_back_merge(struct request_queue *q, struct request *rq); int attempt_front_merge(struct request_queue *q, struct request *rq); int blk_attempt_req_merge(struct request_queue *q, struct request *rq, struct request *next); -void blk_recalc_rq_segments(struct request *rq); void blk_rq_set_mixed_merge(struct request *rq); bool blk_rq_merge_ok(struct request *rq, struct bio *bio); int blk_try_merge(struct request *rq, struct bio *bio); diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index e7b8f49e060f..89fc93bc9009 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -277,6 +277,23 @@ config DM_CRYPT If unsure, say N. +config DM_REQ_CRYPT + tristate "Req Crypt target support" + depends on BLK_DEV_DM + select XTS + select CRYPTO_XTS + ---help--- + This request based device-mapper target allows you to create a device that + transparently encrypts the data on it. You'll need to activate + the ciphers you're going to use in the cryptoapi configuration. + The DM REQ CRYPT operates on requests (bigger payloads) to utilize + crypto hardware better. + + To compile this code as a module, choose M here: the module will + be called dm-req-crypt. + + If unsure, say N. + config DM_SNAPSHOT tristate "Snapshot target" depends on BLK_DEV_DM diff --git a/drivers/md/Makefile b/drivers/md/Makefile index f26ce41af389..f14e2fce5023 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -59,6 +59,7 @@ obj-$(CONFIG_DM_CACHE_SMQ) += dm-cache-smq.o obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o obj-$(CONFIG_DM_ERA) += dm-era.o obj-$(CONFIG_DM_LOG_WRITES) += dm-log-writes.o +obj-$(CONFIG_DM_REQ_CRYPT) += dm-req-crypt.o obj-$(CONFIG_DM_ANDROID_VERITY) += dm-android-verity.o ifeq ($(CONFIG_DM_UEVENT),y) diff --git a/drivers/md/dm-req-crypt.c b/drivers/md/dm-req-crypt.c new file mode 100644 index 000000000000..56f214a0b4fe --- /dev/null +++ b/drivers/md/dm-req-crypt.c @@ -0,0 +1,1362 @@ +/* + * DM request based crypto driver + * + * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#define DM_MSG_PREFIX "req-crypt" + +#define MAX_SG_LIST 1024 +#define REQ_DM_512_KB (512*1024) +#define MAX_ENCRYPTION_BUFFERS 1 +#define MIN_IOS 256 +#define MIN_POOL_PAGES 32 +#define KEY_SIZE_XTS 32 +#define AES_XTS_IV_LEN 16 +#define MAX_MSM_ICE_KEY_LUT_SIZE 32 +#define SECTOR_SIZE 512 +#define MIN_CRYPTO_TRANSFER_SIZE (4 * 1024) + +#define DM_REQ_CRYPT_ERROR -1 +#define DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC -2 + +/* + * ENCRYPTION_MODE_CRYPTO means dm-req-crypt would invoke crypto operations + * for all of the requests. Crypto operations are performed by crypto engine + * plugged with Linux Kernel Crypto APIs + */ +#define DM_REQ_CRYPT_ENCRYPTION_MODE_CRYPTO 0 +/* + * ENCRYPTION_MODE_TRANSPARENT means dm-req-crypt would not invoke crypto + * operations for any of the requests. Data would be encrypted or decrypted + * using Inline Crypto Engine(ICE) embedded in storage hardware + */ +#define DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT 1 + +#define DM_REQ_CRYPT_QUEUE_SIZE 256 + +struct req_crypt_result { + struct completion completion; + int err; +}; + +#define FDE_KEY_ID 0 +#define PFE_KEY_ID 1 + +static struct dm_dev *dev; +static struct kmem_cache *_req_crypt_io_pool; +static struct kmem_cache *_req_dm_scatterlist_pool; +static sector_t start_sector_orig; +static struct workqueue_struct *req_crypt_queue; +static struct workqueue_struct *req_crypt_split_io_queue; +static mempool_t *req_io_pool; +static mempool_t *req_page_pool; +static mempool_t *req_scatterlist_pool; +static bool is_fde_enabled; +static struct crypto_ablkcipher *tfm; +static unsigned int encryption_mode; +static struct ice_crypto_setting *ice_settings; + +unsigned int num_engines; +unsigned int num_engines_fde, fde_cursor; +unsigned int num_engines_pfe, pfe_cursor; +struct crypto_engine_entry *fde_eng, *pfe_eng; +DEFINE_MUTEX(engine_list_mutex); + +struct req_dm_crypt_io { + struct ice_crypto_setting ice_settings; + struct work_struct work; + struct request *cloned_request; + int error; + atomic_t pending; + struct timespec start_time; + bool should_encrypt; + bool should_decrypt; + u32 key_id; +}; + +struct req_dm_split_req_io { + struct work_struct work; + struct scatterlist *req_split_sg_read; + struct req_crypt_result result; + struct crypto_engine_entry *engine; + u8 IV[AES_XTS_IV_LEN]; + int size; + struct request *clone; +}; + +#ifdef CONFIG_FIPS_ENABLE +static struct qcrypto_func_set dm_qcrypto_func; +#else +static struct qcrypto_func_set dm_qcrypto_func = { + qcrypto_cipher_set_device_hw, + qcrypto_cipher_set_flag, + qcrypto_get_num_engines, + qcrypto_get_engine_list +}; +#endif +static void req_crypt_cipher_complete + (struct crypto_async_request *req, int err); +static void req_cryptd_split_req_queue_cb + (struct work_struct *work); +static void req_cryptd_split_req_queue + (struct req_dm_split_req_io *io); +static void req_crypt_split_io_complete + (struct req_crypt_result *res, int err); + +static bool req_crypt_should_encrypt(struct req_dm_crypt_io *req) +{ + int ret = 0; + bool should_encrypt = false; + struct bio *bio = NULL; + bool is_encrypted = false; + bool is_inplace = false; + + if (!req || !req->cloned_request || !req->cloned_request->bio) + return false; + + if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) + return false; + bio = req->cloned_request->bio; + + /* req->key_id = key_id; @todo support more than 1 pfe key */ + if ((ret == 0) && (is_encrypted || is_inplace)) { + should_encrypt = true; + req->key_id = PFE_KEY_ID; + } else if (is_fde_enabled) { + should_encrypt = true; + req->key_id = FDE_KEY_ID; + } + + return should_encrypt; +} + +static bool req_crypt_should_deccrypt(struct req_dm_crypt_io *req) +{ + int ret = 0; + bool should_deccrypt = false; + struct bio *bio = NULL; + bool is_encrypted = false; + bool is_inplace = false; + + if (!req || !req->cloned_request || !req->cloned_request->bio) + return false; + if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) + return false; + + bio = req->cloned_request->bio; + + /* req->key_id = key_id; @todo support more than 1 pfe key */ + if ((ret == 0) && (is_encrypted && !is_inplace)) { + should_deccrypt = true; + req->key_id = PFE_KEY_ID; + } else if (is_fde_enabled) { + should_deccrypt = true; + req->key_id = FDE_KEY_ID; + } + + return should_deccrypt; +} + +static void req_crypt_inc_pending(struct req_dm_crypt_io *io) +{ + atomic_inc(&io->pending); +} + +static void req_crypt_dec_pending_encrypt(struct req_dm_crypt_io *io) +{ + int error = 0; + struct request *clone = NULL; + + if (io) { + error = io->error; + if (io->cloned_request) { + clone = io->cloned_request; + } else { + DMERR("%s io->cloned_request is NULL\n", + __func__); + /* + * If Clone is NULL we cannot do anything, + * this should never happen + */ + WARN_ON(1); + } + } else { + DMERR("%s io is NULL\n", __func__); + /* + * If Clone is NULL we cannot do anything, + * this should never happen + */ + WARN_ON(1); + } + + atomic_dec(&io->pending); + + if (error < 0) { + dm_kill_unmapped_request(clone, error); + mempool_free(io, req_io_pool); + } else + dm_dispatch_request(clone); +} + +static void req_crypt_dec_pending_decrypt(struct req_dm_crypt_io *io) +{ + int error = 0; + struct request *clone = NULL; + + if (io) { + error = io->error; + if (io->cloned_request) { + clone = io->cloned_request; + } else { + DMERR("%s io->cloned_request is NULL\n", + __func__); + /* + * If Clone is NULL we cannot do anything, + * this should never happen + */ + WARN_ON(1); + } + } else { + DMERR("%s io is NULL\n", + __func__); + /* + * If Clone is NULL we cannot do anything, + * this should never happen + */ + WARN_ON(1); + } + + /* Should never get here if io or Clone is NULL */ + dm_end_request(clone, error); + atomic_dec(&io->pending); + mempool_free(io, req_io_pool); +} + +/* + * The callback that will be called by the worker queue to perform Decryption + * for reads and use the dm function to complete the bios and requests. + */ +static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io) +{ + struct request *clone = NULL; + int error = DM_REQ_CRYPT_ERROR; + int total_sg_len = 0, total_bytes_in_req = 0, temp_size = 0, i = 0; + struct scatterlist *sg = NULL; + struct scatterlist *req_sg_read = NULL; + + unsigned int engine_list_total = 0; + struct crypto_engine_entry *curr_engine_list = NULL; + bool split_transfers = 0; + sector_t tempiv; + struct req_dm_split_req_io *split_io = NULL; + + if (io) { + error = io->error; + if (io->cloned_request) { + clone = io->cloned_request; + } else { + DMERR("%s io->cloned_request is NULL\n", + __func__); + error = DM_REQ_CRYPT_ERROR; + goto submit_request; + } + } else { + DMERR("%s io is NULL\n", + __func__); + error = DM_REQ_CRYPT_ERROR; + goto submit_request; + } + + req_crypt_inc_pending(io); + + mutex_lock(&engine_list_mutex); + + engine_list_total = (io->key_id == FDE_KEY_ID ? num_engines_fde : + (io->key_id == PFE_KEY_ID ? + num_engines_pfe : 0)); + + curr_engine_list = (io->key_id == FDE_KEY_ID ? fde_eng : + (io->key_id == PFE_KEY_ID ? + pfe_eng : NULL)); + + mutex_unlock(&engine_list_mutex); + + req_sg_read = (struct scatterlist *)mempool_alloc(req_scatterlist_pool, + GFP_KERNEL); + if (!req_sg_read) { + DMERR("%s req_sg_read allocation failed\n", + __func__); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + memset(req_sg_read, 0, sizeof(struct scatterlist) * MAX_SG_LIST); + + total_sg_len = blk_rq_map_sg_no_cluster(clone->q, clone, req_sg_read); + if ((total_sg_len <= 0) || (total_sg_len > MAX_SG_LIST)) { + DMERR("%s Request Error%d", __func__, total_sg_len); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + + total_bytes_in_req = clone->__data_len; + if (total_bytes_in_req > REQ_DM_512_KB) { + DMERR("%s total_bytes_in_req > 512 MB %d", + __func__, total_bytes_in_req); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + + + if ((clone->__data_len >= (MIN_CRYPTO_TRANSFER_SIZE * + engine_list_total)) + && (engine_list_total > 1)) + split_transfers = 1; + + if (split_transfers) { + split_io = kzalloc(sizeof(struct req_dm_split_req_io) + * engine_list_total, GFP_KERNEL); + if (!split_io) { + DMERR("%s split_io allocation failed\n", __func__); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + + split_io[0].req_split_sg_read = sg = req_sg_read; + split_io[engine_list_total - 1].size = total_bytes_in_req; + for (i = 0; i < (engine_list_total); i++) { + while ((sg) && i < (engine_list_total - 1)) { + split_io[i].size += sg->length; + split_io[engine_list_total - 1].size -= + sg->length; + if (split_io[i].size >= + (total_bytes_in_req / + engine_list_total)) { + split_io[i + 1].req_split_sg_read = + sg_next(sg); + sg_mark_end(sg); + break; + } + sg = sg_next(sg); + } + split_io[i].engine = &curr_engine_list[i]; + init_completion(&split_io[i].result.completion); + memset(&split_io[i].IV, 0, AES_XTS_IV_LEN); + tempiv = clone->__sector + (temp_size / SECTOR_SIZE); + memcpy(&split_io[i].IV, &tempiv, sizeof(sector_t)); + temp_size += split_io[i].size; + split_io[i].clone = clone; + req_cryptd_split_req_queue(&split_io[i]); + } + } else { + split_io = kzalloc(sizeof(struct req_dm_split_req_io), + GFP_KERNEL); + if (!split_io) { + DMERR("%s split_io allocation failed\n", __func__); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + split_io->engine = &curr_engine_list[0]; + init_completion(&split_io->result.completion); + memcpy(split_io->IV, &clone->__sector, sizeof(sector_t)); + split_io->req_split_sg_read = req_sg_read; + split_io->size = total_bytes_in_req; + split_io->clone = clone; + req_cryptd_split_req_queue(split_io); + } + + if (!split_transfers) { + wait_for_completion_interruptible(&split_io->result.completion); + if (split_io->result.err) { + DMERR("%s error = %d for request\n", + __func__, split_io->result.err); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + } else { + for (i = 0; i < (engine_list_total); i++) { + wait_for_completion_interruptible( + &split_io[i].result.completion); + if (split_io[i].result.err) { + DMERR("%s error = %d for %dst request\n", + __func__, split_io[i].result.err, i); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + } + } + error = 0; +ablkcipher_req_alloc_failure: + + mempool_free(req_sg_read, req_scatterlist_pool); + kfree(split_io); +submit_request: + if (io) + io->error = error; + req_crypt_dec_pending_decrypt(io); +} + +/* + * This callback is called by the worker queue to perform non-decrypt reads + * and use the dm function to complete the bios and requests. + */ +static void req_cryptd_crypt_read_plain(struct req_dm_crypt_io *io) +{ + struct request *clone = NULL; + int error = 0; + + if (!io || !io->cloned_request) { + DMERR("%s io is invalid\n", __func__); + WARN_ON(1); /* should not happen */ + } + + clone = io->cloned_request; + + dm_end_request(clone, error); + mempool_free(io, req_io_pool); +} + +/* + * The callback that will be called by the worker queue to perform Encryption + * for writes and submit the request using the elevelator. + */ +static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io) +{ + struct request *clone = NULL; + struct bio *bio_src = NULL; + unsigned int total_sg_len_req_in = 0, total_sg_len_req_out = 0, + total_bytes_in_req = 0, error = DM_MAPIO_REMAPPED, rc = 0; + struct req_iterator iter; + struct req_iterator iter1; + struct ablkcipher_request *req = NULL; + struct req_crypt_result result; + struct bio_vec bvec; + struct scatterlist *req_sg_in = NULL; + struct scatterlist *req_sg_out = NULL; + int copy_bio_sector_to_req = 0; + gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; + struct page *page = NULL; + u8 IV[AES_XTS_IV_LEN]; + int remaining_size = 0, err = 0; + struct crypto_engine_entry engine; + unsigned int engine_list_total = 0; + struct crypto_engine_entry *curr_engine_list = NULL; + unsigned int *engine_cursor = NULL; + + + if (io) { + if (io->cloned_request) { + clone = io->cloned_request; + } else { + DMERR("%s io->cloned_request is NULL\n", + __func__); + error = DM_REQ_CRYPT_ERROR; + goto submit_request; + } + } else { + DMERR("%s io is NULL\n", + __func__); + error = DM_REQ_CRYPT_ERROR; + goto submit_request; + } + + req_crypt_inc_pending(io); + + req = ablkcipher_request_alloc(tfm, GFP_KERNEL); + if (!req) { + DMERR("%s ablkcipher request allocation failed\n", + __func__); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + + ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + req_crypt_cipher_complete, &result); + + mutex_lock(&engine_list_mutex); + engine_list_total = (io->key_id == FDE_KEY_ID ? num_engines_fde : + (io->key_id == PFE_KEY_ID ? + num_engines_pfe : 0)); + + curr_engine_list = (io->key_id == FDE_KEY_ID ? fde_eng : + (io->key_id == PFE_KEY_ID ? + pfe_eng : NULL)); + + engine_cursor = (io->key_id == FDE_KEY_ID ? &fde_cursor : + (io->key_id == PFE_KEY_ID ? &pfe_cursor + : NULL)); + if ((engine_list_total < 1) || (curr_engine_list == NULL) || + (engine_cursor == NULL)) { + DMERR("%s Unknown Key ID!\n", __func__); + error = DM_REQ_CRYPT_ERROR; + mutex_unlock(&engine_list_mutex); + goto ablkcipher_req_alloc_failure; + } + + engine = curr_engine_list[*engine_cursor]; + (*engine_cursor)++; + (*engine_cursor) %= engine_list_total; + + err = (dm_qcrypto_func.cipher_set)(req, engine.ce_device, + engine.hw_instance); + if (err) { + DMERR("%s qcrypto_cipher_set_device_hw failed with err %d\n", + __func__, err); + mutex_unlock(&engine_list_mutex); + goto ablkcipher_req_alloc_failure; + } + mutex_unlock(&engine_list_mutex); + + init_completion(&result.completion); + + (dm_qcrypto_func.cipher_flag)(req, + QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B); + crypto_ablkcipher_clear_flags(tfm, ~0); + crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS); + + req_sg_in = (struct scatterlist *)mempool_alloc(req_scatterlist_pool, + GFP_KERNEL); + if (!req_sg_in) { + DMERR("%s req_sg_in allocation failed\n", + __func__); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + memset(req_sg_in, 0, sizeof(struct scatterlist) * MAX_SG_LIST); + + req_sg_out = (struct scatterlist *)mempool_alloc(req_scatterlist_pool, + GFP_KERNEL); + if (!req_sg_out) { + DMERR("%s req_sg_out allocation failed\n", + __func__); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + memset(req_sg_out, 0, sizeof(struct scatterlist) * MAX_SG_LIST); + + total_sg_len_req_in = blk_rq_map_sg(clone->q, clone, req_sg_in); + if ((total_sg_len_req_in <= 0) || + (total_sg_len_req_in > MAX_SG_LIST)) { + DMERR("%s Request Error%d", __func__, total_sg_len_req_in); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + + total_bytes_in_req = clone->__data_len; + if (total_bytes_in_req > REQ_DM_512_KB) { + DMERR("%s total_bytes_in_req > 512 MB %d", + __func__, total_bytes_in_req); + error = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + + rq_for_each_segment(bvec, clone, iter) { + if (bvec.bv_len > remaining_size) { + page = NULL; + while (page == NULL) { + page = mempool_alloc(req_page_pool, gfp_mask); + if (!page) { + DMERR("%s Crypt page alloc failed", + __func__); + congestion_wait(BLK_RW_ASYNC, HZ/100); + } + } + + bvec.bv_page = page; + bvec.bv_offset = 0; + remaining_size = PAGE_SIZE - bvec.bv_len; + if (remaining_size < 0) + WARN_ON(1); + } else { + bvec.bv_page = page; + bvec.bv_offset = PAGE_SIZE - remaining_size; + remaining_size = remaining_size - bvec.bv_len; + } + } + + total_sg_len_req_out = blk_rq_map_sg(clone->q, clone, req_sg_out); + if ((total_sg_len_req_out <= 0) || + (total_sg_len_req_out > MAX_SG_LIST)) { + DMERR("%s Request Error %d", __func__, total_sg_len_req_out); + error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC; + goto ablkcipher_req_alloc_failure; + } + + memset(IV, 0, AES_XTS_IV_LEN); + memcpy(IV, &clone->__sector, sizeof(sector_t)); + + ablkcipher_request_set_crypt(req, req_sg_in, req_sg_out, + total_bytes_in_req, (void *) IV); + + rc = crypto_ablkcipher_encrypt(req); + + switch (rc) { + case 0: + break; + + case -EBUSY: + /* + * Lets make this synchronous request by waiting on + * in progress as well + */ + case -EINPROGRESS: + wait_for_completion_interruptible(&result.completion); + if (result.err) { + DMERR("%s error = %d encrypting the request\n", + __func__, result.err); + error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC; + goto ablkcipher_req_alloc_failure; + } + break; + + default: + error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC; + goto ablkcipher_req_alloc_failure; + } + + __rq_for_each_bio(bio_src, clone) { + if (copy_bio_sector_to_req == 0) + copy_bio_sector_to_req++; + blk_queue_bounce(clone->q, &bio_src); + } + + /* + * Recalculate the phy_segments as we allocate new pages + * This is used by storage driver to fill the sg list. + */ + blk_recalc_rq_segments(clone); + +ablkcipher_req_alloc_failure: + if (req) + ablkcipher_request_free(req); + + if (error == DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC) { + rq_for_each_segment(bvec, clone, iter1) { + if (bvec.bv_offset == 0) { + mempool_free(bvec.bv_page, req_page_pool); + bvec.bv_page = NULL; + } else + bvec.bv_page = NULL; + } + } + + mempool_free(req_sg_in, req_scatterlist_pool); + mempool_free(req_sg_out, req_scatterlist_pool); +submit_request: + if (io) + io->error = error; + req_crypt_dec_pending_encrypt(io); +} + +/* + * This callback is called by the worker queue to perform non-encrypted writes + * and submit the request using the elevelator. + */ +static void req_cryptd_crypt_write_plain(struct req_dm_crypt_io *io) +{ + struct request *clone = NULL; + + if (!io || !io->cloned_request) { + DMERR("%s io is invalid\n", __func__); + WARN_ON(1); /* should not happen */ + } + + clone = io->cloned_request; + io->error = 0; + dm_dispatch_request(clone); +} + +/* Queue callback function that will get triggered */ +static void req_cryptd_crypt(struct work_struct *work) +{ + struct req_dm_crypt_io *io = + container_of(work, struct req_dm_crypt_io, work); + + if (rq_data_dir(io->cloned_request) == WRITE) { + if (io->should_encrypt) + req_cryptd_crypt_write_convert(io); + else + req_cryptd_crypt_write_plain(io); + } else if (rq_data_dir(io->cloned_request) == READ) { + if (io->should_decrypt) + req_cryptd_crypt_read_convert(io); + else + req_cryptd_crypt_read_plain(io); + } else { + DMERR("%s received non-write request for Clone 0x%p\n", + __func__, io->cloned_request); + } +} + +static void req_cryptd_split_req_queue_cb(struct work_struct *work) +{ + struct req_dm_split_req_io *io = + container_of(work, struct req_dm_split_req_io, work); + struct ablkcipher_request *req = NULL; + struct req_crypt_result result; + int err = 0; + struct crypto_engine_entry *engine = NULL; + + if ((!io) || (!io->req_split_sg_read) || (!io->engine)) { + DMERR("%s Input invalid\n", + __func__); + err = DM_REQ_CRYPT_ERROR; + /* If io is not populated this should not be called */ + WARN_ON(1); + } + req = ablkcipher_request_alloc(tfm, GFP_KERNEL); + if (!req) { + DMERR("%s ablkcipher request allocation failed\n", __func__); + err = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + + ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + req_crypt_cipher_complete, &result); + + engine = io->engine; + + err = (dm_qcrypto_func.cipher_set)(req, engine->ce_device, + engine->hw_instance); + if (err) { + DMERR("%s qcrypto_cipher_set_device_hw failed with err %d\n", + __func__, err); + goto ablkcipher_req_alloc_failure; + } + init_completion(&result.completion); + (dm_qcrypto_func.cipher_flag)(req, + QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B); + + crypto_ablkcipher_clear_flags(tfm, ~0); + crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS); + + ablkcipher_request_set_crypt(req, io->req_split_sg_read, + io->req_split_sg_read, io->size, (void *) io->IV); + + err = crypto_ablkcipher_decrypt(req); + switch (err) { + case 0: + break; + + case -EBUSY: + /* + * Lets make this synchronous request by waiting on + * in progress as well + */ + case -EINPROGRESS: + wait_for_completion_io(&result.completion); + if (result.err) { + DMERR("%s error = %d encrypting the request\n", + __func__, result.err); + err = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + break; + + default: + err = DM_REQ_CRYPT_ERROR; + goto ablkcipher_req_alloc_failure; + } + err = 0; +ablkcipher_req_alloc_failure: + if (req) + ablkcipher_request_free(req); + + req_crypt_split_io_complete(&io->result, err); +} + +static void req_cryptd_split_req_queue(struct req_dm_split_req_io *io) +{ + INIT_WORK(&io->work, req_cryptd_split_req_queue_cb); + queue_work(req_crypt_split_io_queue, &io->work); +} + +static void req_cryptd_queue_crypt(struct req_dm_crypt_io *io) +{ + INIT_WORK(&io->work, req_cryptd_crypt); + queue_work(req_crypt_queue, &io->work); +} + +/* + * Cipher complete callback, this is triggered by the Linux crypto api once + * the operation is done. This signals the waiting thread that the crypto + * operation is complete. + */ +static void req_crypt_cipher_complete(struct crypto_async_request *req, int err) +{ + struct req_crypt_result *res = req->data; + + if (err == -EINPROGRESS) + return; + + res->err = err; + complete(&res->completion); +} + +static void req_crypt_split_io_complete(struct req_crypt_result *res, int err) +{ + if (err == -EINPROGRESS) + return; + + res->err = err; + complete(&res->completion); +} +/* + * If bio->bi_dev is a partition, remap the location + */ +static inline void req_crypt_blk_partition_remap(struct bio *bio) +{ + struct block_device *bdev = bio->bi_bdev; + + if (bio_sectors(bio) && bdev != bdev->bd_contains) { + struct hd_struct *p = bdev->bd_part; + /* + * Check for integer overflow, should never happen. + */ + if (p->start_sect > (UINT_MAX - bio->bi_iter.bi_sector)) + WARN_ON(1); + + bio->bi_iter.bi_sector += p->start_sect; + bio->bi_bdev = bdev->bd_contains; + } +} + +/* + * The endio function is called from ksoftirqd context (atomic). + * For write operations the new pages created form the mempool + * is freed and returned. * For read operations, decryption is + * required, since this is called in a atomic * context, the + * request is sent to a worker queue to complete decryptiona and + * free the request once done. + */ +static int req_crypt_endio(struct dm_target *ti, struct request *clone, + int error, union map_info *map_context) +{ + int err = 0; + struct req_iterator iter1; + struct bio_vec bvec; + struct req_dm_crypt_io *req_io = map_context->ptr; + + /* If it is for ICE, free up req_io and return */ + if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) { + mempool_free(req_io, req_io_pool); + err = error; + goto submit_request; + } + + if (rq_data_dir(clone) == WRITE) { + rq_for_each_segment(bvec, clone, iter1) { + if (req_io->should_encrypt && bvec.bv_offset == 0) { + mempool_free(bvec.bv_page, req_page_pool); + bvec.bv_page = NULL; + } else + bvec.bv_page = NULL; + } + mempool_free(req_io, req_io_pool); + goto submit_request; + } else if (rq_data_dir(clone) == READ) { + req_io->error = error; + req_cryptd_queue_crypt(req_io); + err = DM_ENDIO_INCOMPLETE; + goto submit_request; + } + +submit_request: + return err; +} + +/* + * This function is called with interrupts disabled + * The function remaps the clone for the underlying device. + * If it is a write request, it calls into the worker queue to + * encrypt the data + * and submit the request directly using the elevator + * For a read request no pre-processing is required the request + * is returned to dm once mapping is done + */ +static int req_crypt_map(struct dm_target *ti, struct request *clone, + union map_info *map_context) +{ + struct req_dm_crypt_io *req_io = NULL; + int error = DM_REQ_CRYPT_ERROR, copy_bio_sector_to_req = 0; + struct bio *bio_src = NULL; + gfp_t gfp_flag = GFP_KERNEL; + + if (in_interrupt() || irqs_disabled()) + gfp_flag = GFP_NOWAIT; + + req_io = mempool_alloc(req_io_pool, gfp_flag); + if (!req_io) { + WARN_ON(1); + error = DM_REQ_CRYPT_ERROR; + goto submit_request; + } + + /* Save the clone in the req_io, the callback to the worker + * queue will get the req_io + */ + req_io->cloned_request = clone; + map_context->ptr = req_io; + atomic_set(&req_io->pending, 0); + + if (rq_data_dir(clone) == WRITE) + req_io->should_encrypt = req_crypt_should_encrypt(req_io); + if (rq_data_dir(clone) == READ) + req_io->should_decrypt = req_crypt_should_deccrypt(req_io); + + /* Get the queue of the underlying original device */ + clone->q = bdev_get_queue(dev->bdev); + clone->rq_disk = dev->bdev->bd_disk; + + __rq_for_each_bio(bio_src, clone) { + bio_src->bi_bdev = dev->bdev; + /* Currently the way req-dm works is that once the underlying + * device driver completes the request by calling into the + * block layer. The block layer completes the bios (clones) and + * then the cloned request. This is undesirable for req-dm-crypt + * hence added a flag BIO_DONTFREE, this flag will ensure that + * blk layer does not complete the cloned bios before completing + * the request. When the crypt endio is called, post-processing + * is done and then the dm layer will complete the bios (clones) + * and free them. + */ + if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) + bio_src->bi_flags |= 1 << BIO_INLINECRYPT; + else + bio_src->bi_flags |= 1 << BIO_DONTFREE; + + /* + * If this device has partitions, remap block n + * of partition p to block n+start(p) of the disk. + */ + req_crypt_blk_partition_remap(bio_src); + if (copy_bio_sector_to_req == 0) { + clone->__sector = bio_src->bi_iter.bi_sector; + copy_bio_sector_to_req++; + } + blk_queue_bounce(clone->q, &bio_src); + } + + if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) { + /* Set all crypto parameters for inline crypto engine */ + memcpy(&req_io->ice_settings, ice_settings, + sizeof(struct ice_crypto_setting)); + } else { + /* ICE checks for key_index which could be >= 0. If a chip has + * both ICE and GPCE and wanted to use GPCE, there could be + * issue. Storage driver send all requests to ICE driver. If + * it sees key_index as 0, it would assume it is for ICE while + * it is not. Hence set invalid key index by default. + */ + req_io->ice_settings.key_index = -1; + + } + + if (rq_data_dir(clone) == READ || + encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) { + error = DM_MAPIO_REMAPPED; + goto submit_request; + } else if (rq_data_dir(clone) == WRITE) { + req_cryptd_queue_crypt(req_io); + error = DM_MAPIO_SUBMITTED; + goto submit_request; + } + +submit_request: + return error; + +} + +static void deconfigure_qcrypto(void) +{ + mempool_destroy(req_page_pool); + req_page_pool = NULL; + + mempool_destroy(req_scatterlist_pool); + req_scatterlist_pool = NULL; + + if (req_crypt_split_io_queue) { + destroy_workqueue(req_crypt_split_io_queue); + req_crypt_split_io_queue = NULL; + } + if (req_crypt_queue) { + destroy_workqueue(req_crypt_queue); + req_crypt_queue = NULL; + } + + kmem_cache_destroy(_req_dm_scatterlist_pool); + + mutex_lock(&engine_list_mutex); + kfree(pfe_eng); + pfe_eng = NULL; + kfree(fde_eng); + fde_eng = NULL; + mutex_unlock(&engine_list_mutex); + + if (tfm) { + crypto_free_ablkcipher(tfm); + tfm = NULL; + } +} + +static void req_crypt_dtr(struct dm_target *ti) +{ + DMDEBUG("dm-req-crypt Destructor.\n"); + + mempool_destroy(req_io_pool); + req_io_pool = NULL; + + if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) { + kfree(ice_settings); + ice_settings = NULL; + } else { + deconfigure_qcrypto(); + } + + kmem_cache_destroy(_req_crypt_io_pool); + + if (dev) { + dm_put_device(ti, dev); + dev = NULL; + } +} + +static int configure_qcrypto(void) +{ + struct crypto_engine_entry *eng_list = NULL; + struct block_device *bdev = NULL; + int err = DM_REQ_CRYPT_ERROR, i; + struct request_queue *q = NULL; + + bdev = dev->bdev; + q = bdev_get_queue(bdev); + blk_queue_max_hw_sectors(q, DM_REQ_CRYPT_QUEUE_SIZE); + + /* Allocate the crypto alloc blk cipher and keep the handle */ + tfm = crypto_alloc_ablkcipher("qcom-xts(aes)", 0, 0); + if (IS_ERR(tfm)) { + DMERR("%s ablkcipher tfm allocation failed : error\n", + __func__); + tfm = NULL; + goto exit_err; + } + + num_engines_fde = num_engines_pfe = 0; + + mutex_lock(&engine_list_mutex); + num_engines = (dm_qcrypto_func.get_num_engines)(); + if (!num_engines) { + DMERR(KERN_INFO "%s qcrypto_get_num_engines failed\n", + __func__); + err = DM_REQ_CRYPT_ERROR; + mutex_unlock(&engine_list_mutex); + goto exit_err; + } + + eng_list = kcalloc(num_engines, sizeof(*eng_list), GFP_KERNEL); + if (eng_list == NULL) { + DMERR("%s engine list allocation failed\n", __func__); + err = DM_REQ_CRYPT_ERROR; + mutex_unlock(&engine_list_mutex); + goto exit_err; + } + + (dm_qcrypto_func.get_engine_list)(num_engines, eng_list); + + for (i = 0; i < num_engines; i++) { + if (eng_list[i].ce_device == FDE_KEY_ID) + num_engines_fde++; + if (eng_list[i].ce_device == PFE_KEY_ID) + num_engines_pfe++; + } + + fde_eng = kcalloc(num_engines_fde, sizeof(*fde_eng), GFP_KERNEL); + if (fde_eng == NULL) { + DMERR("%s fde engine list allocation failed\n", __func__); + mutex_unlock(&engine_list_mutex); + goto exit_err; + } + + pfe_eng = kcalloc(num_engines_pfe, sizeof(*pfe_eng), GFP_KERNEL); + if (pfe_eng == NULL) { + DMERR("%s pfe engine list allocation failed\n", __func__); + mutex_unlock(&engine_list_mutex); + goto exit_err; + } + + fde_cursor = 0; + pfe_cursor = 0; + + for (i = 0; i < num_engines; i++) { + if (eng_list[i].ce_device == FDE_KEY_ID) + fde_eng[fde_cursor++] = eng_list[i]; + if (eng_list[i].ce_device == PFE_KEY_ID) + pfe_eng[pfe_cursor++] = eng_list[i]; + } + + fde_cursor = 0; + pfe_cursor = 0; + mutex_unlock(&engine_list_mutex); + + _req_dm_scatterlist_pool = kmem_cache_create("req_dm_scatterlist", + sizeof(struct scatterlist) * MAX_SG_LIST, + __alignof__(struct scatterlist), 0, NULL); + if (!_req_dm_scatterlist_pool) + goto exit_err; + + req_crypt_queue = alloc_workqueue("req_cryptd", + WQ_UNBOUND | + WQ_CPU_INTENSIVE | + WQ_MEM_RECLAIM, + 0); + if (!req_crypt_queue) { + DMERR("%s req_crypt_queue not allocated\n", __func__); + goto exit_err; + } + + req_crypt_split_io_queue = alloc_workqueue("req_crypt_split", + WQ_UNBOUND | + WQ_CPU_INTENSIVE | + WQ_MEM_RECLAIM, + 0); + if (!req_crypt_split_io_queue) { + DMERR("%s req_crypt_split_io_queue not allocated\n", __func__); + goto exit_err; + } + req_scatterlist_pool = mempool_create_slab_pool(MIN_IOS, + _req_dm_scatterlist_pool); + if (!req_scatterlist_pool) { + DMERR("%s req_scatterlist_pool is not allocated\n", __func__); + err = -ENOMEM; + goto exit_err; + } + + req_page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); + if (!req_page_pool) { + DMERR("%s req_page_pool not allocated\n", __func__); + goto exit_err; + } + + err = 0; + +exit_err: + kfree(eng_list); + return err; +} + +/* + * Construct an encryption mapping: + * + */ +static int req_crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) +{ + int err = DM_REQ_CRYPT_ERROR; + unsigned long long tmpll; + char dummy; + int ret; + + DMDEBUG("dm-req-crypt Constructor.\n"); + + if (argc < 5) { + DMERR(" %s Not enough args\n", __func__); + err = DM_REQ_CRYPT_ERROR; + goto ctr_exit; + } + + if (argv[3]) { + if (dm_get_device(ti, argv[3], + dm_table_get_mode(ti->table), &dev)) { + DMERR(" %s Device Lookup failed\n", __func__); + err = DM_REQ_CRYPT_ERROR; + goto ctr_exit; + } + } else { + DMERR(" %s Arg[3] invalid\n", __func__); + err = DM_REQ_CRYPT_ERROR; + goto ctr_exit; + } + + if (argv[4]) { + if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) { + DMERR("%s Invalid device sector\n", __func__); + err = DM_REQ_CRYPT_ERROR; + goto ctr_exit; + } + } else { + DMERR(" %s Arg[4] invalid\n", __func__); + err = DM_REQ_CRYPT_ERROR; + goto ctr_exit; + } + start_sector_orig = tmpll; + + /* Allow backward compatible */ + if (argc >= 6) { + if (argv[5]) { + if (!strcmp(argv[5], "fde_enabled")) + is_fde_enabled = true; + else + is_fde_enabled = false; + } else { + DMERR(" %s Arg[5] invalid\n", __func__); + err = DM_REQ_CRYPT_ERROR; + goto ctr_exit; + } + } else { + DMERR(" %s Arg[5] missing, set FDE enabled.\n", __func__); + is_fde_enabled = true; /* backward compatible */ + } + + _req_crypt_io_pool = KMEM_CACHE(req_dm_crypt_io, 0); + if (!_req_crypt_io_pool) { + err = DM_REQ_CRYPT_ERROR; + goto ctr_exit; + } + + encryption_mode = DM_REQ_CRYPT_ENCRYPTION_MODE_CRYPTO; + if (argc >= 7 && argv[6]) { + if (!strcmp(argv[6], "ice")) + encryption_mode = + DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT; + } + + if (encryption_mode == DM_REQ_CRYPT_ENCRYPTION_MODE_TRANSPARENT) { + /* configure ICE settings */ + ice_settings = + kzalloc(sizeof(struct ice_crypto_setting), GFP_KERNEL); + if (!ice_settings) { + err = -ENOMEM; + goto ctr_exit; + } + ice_settings->key_size = ICE_CRYPTO_KEY_SIZE_128; + ice_settings->algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS; + ice_settings->key_mode = ICE_CRYPTO_USE_LUT_SW_KEY; + if (kstrtou16(argv[1], 0, &ice_settings->key_index) || + ice_settings->key_index < 0 || + ice_settings->key_index > MAX_MSM_ICE_KEY_LUT_SIZE) { + DMERR("%s Err: key index %d received for ICE\n", + __func__, ice_settings->key_index); + err = DM_REQ_CRYPT_ERROR; + goto ctr_exit; + } + } else { + ret = configure_qcrypto(); + if (ret) { + DMERR("%s failed to configure qcrypto\n", __func__); + err = ret; + goto ctr_exit; + } + } + + req_io_pool = mempool_create_slab_pool(MIN_IOS, _req_crypt_io_pool); + if (!req_io_pool) { + DMERR("%s req_io_pool not allocated\n", __func__); + err = -ENOMEM; + goto ctr_exit; + } + + /* + * If underlying device supports flush/discard, mapped target + * should also allow it + */ + ti->num_flush_bios = 1; + ti->num_discard_bios = 1; + + err = 0; + DMINFO("%s: Mapping block_device %s to dm-req-crypt ok!\n", + __func__, argv[3]); +ctr_exit: + if (err) + req_crypt_dtr(ti); + + return err; +} + +static int req_crypt_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data) +{ + return fn(ti, dev, start_sector_orig, ti->len, data); +} +void set_qcrypto_func_dm(void *dev, + void *flag, + void *engines, + void *engine_list) +{ + dm_qcrypto_func.cipher_set = dev; + dm_qcrypto_func.cipher_flag = flag; + dm_qcrypto_func.get_num_engines = engines; + dm_qcrypto_func.get_engine_list = engine_list; +} +EXPORT_SYMBOL(set_qcrypto_func_dm); + +static struct target_type req_crypt_target = { + .name = "req-crypt", + .version = {1, 0, 0}, + .module = THIS_MODULE, + .ctr = req_crypt_ctr, + .dtr = req_crypt_dtr, + .map_rq = req_crypt_map, + .rq_end_io = req_crypt_endio, + .iterate_devices = req_crypt_iterate_devices, +}; + +static int __init req_dm_crypt_init(void) +{ + int r; + + + r = dm_register_target(&req_crypt_target); + if (r < 0) { + DMERR("register failed %d", r); + return r; + } + + DMINFO("dm-req-crypt successfully initalized.\n"); + + return r; +} + +static void __exit req_dm_crypt_exit(void) +{ + dm_unregister_target(&req_crypt_target); +} + +module_init(req_dm_crypt_init); +module_exit(req_dm_crypt_exit); + +MODULE_DESCRIPTION(DM_NAME " target for request based transparent encryption / decryption"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index ba7c4c685db3..bca4c0e387a6 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -283,7 +283,7 @@ static void free_rq_clone(struct request *clone) * Must be called without clone's queue lock held, * see end_clone_request() for more details. */ -static void dm_end_request(struct request *clone, int error) +void dm_end_request(struct request *clone, int error) { int rw = rq_data_dir(clone); struct dm_rq_target_io *tio = clone->end_io_data; @@ -464,7 +464,7 @@ static void dm_complete_request(struct request *rq, int error) * Target's rq_end_io() function isn't called. * This may be used when the target's map_rq() or clone_and_map_rq() functions fail. */ -static void dm_kill_unmapped_request(struct request *rq, int error) +void dm_kill_unmapped_request(struct request *rq, int error) { rq->cmd_flags |= REQ_FAILED; dm_complete_request(rq, error); @@ -512,6 +512,13 @@ static void dm_dispatch_clone_request(struct request *clone, struct request *rq) dm_complete_request(rq, r); } +void dm_dispatch_request(struct request *rq) +{ + struct dm_rq_target_io *tio = tio_from_request(rq); + + dm_dispatch_clone_request(tio->clone, rq); +} + static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, void *data) { diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 744ea4f87f7e..2b8b6e0d7745 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -125,7 +125,15 @@ struct bio { * BVEC_POOL_IDX() */ #define BIO_RESET_BITS 10 -#define BIO_INLINECRYPT 15 + + +/* + * Added for Req based dm which need to perform post processing. This flag + * ensures blk_update_request does not free the bios or request, this is done + * at the dm level + */ +#define BIO_DONTFREE 10 +#define BIO_INLINECRYPT 11 /* * We support 6 different bvec pools, the last one is magic in that it diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e47a7f7025a0..fb910c634382 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -816,6 +816,7 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, struct scsi_ioctl_command __user *); +extern void blk_recalc_rq_segments(struct request *rq); extern int blk_queue_enter(struct request_queue *q, bool nowait); extern void blk_queue_exit(struct request_queue *q); extern void blk_start_queue(struct request_queue *q); @@ -1031,6 +1032,8 @@ extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fu extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); +extern int blk_rq_map_sg_no_cluster(struct request_queue *q, struct request *rq, + struct scatterlist *sglist); extern void blk_dump_rq_flags(struct request *, char *); extern long nr_blockdev_pages(void); diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index cf86f528e615..20e26d96720f 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -650,4 +650,12 @@ static inline unsigned long to_bytes(sector_t n) return (n << SECTOR_SHIFT); } +/*----------------------------------------------------------------- + * Helper for block layer and dm core operations + *----------------------------------------------------------------- + */ +void dm_dispatch_request(struct request *rq); +void dm_kill_unmapped_request(struct request *rq, int error); +void dm_end_request(struct request *clone, int error); + #endif /* _LINUX_DEVICE_MAPPER_H */ -- GitLab From ddc48128a87bf328b1d76627ffd523dedf009150 Mon Sep 17 00:00:00 2001 From: AnilKumar Chimata Date: Fri, 23 Jun 2017 03:12:57 -0700 Subject: [PATCH 232/786] md: dm-req-crypt: Fix compilation errors During the kernel upgrade few functions are deprecated which results in compilation errors on dm-req-crypt driver, this patch fixes those compilation errors. Change-Id: I8fadd8ca81e35292174199afd3c4508ecabf55b5 Signed-off-by: AnilKumar Chimata --- drivers/crypto/msm/qcrypto.c | 4 +- drivers/md/dm-req-crypt.c | 94 ++++++++++++++++++------------------ include/linux/qcrypto.h | 9 ++-- 3 files changed, 55 insertions(+), 52 deletions(-) diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c index 0f0da4fd9388..b979fb9a8683 100644 --- a/drivers/crypto/msm/qcrypto.c +++ b/drivers/crypto/msm/qcrypto.c @@ -4289,7 +4289,7 @@ int qcrypto_cipher_set_device(struct ablkcipher_request *req, unsigned int dev) }; EXPORT_SYMBOL(qcrypto_cipher_set_device); -int qcrypto_cipher_set_device_hw(struct ablkcipher_request *req, u32 dev, +int qcrypto_cipher_set_device_hw(struct skcipher_request *req, u32 dev, u32 hw_inst) { struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); @@ -4335,7 +4335,7 @@ int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev) }; EXPORT_SYMBOL(qcrypto_ahash_set_device); -int qcrypto_cipher_set_flag(struct ablkcipher_request *req, unsigned int flags) +int qcrypto_cipher_set_flag(struct skcipher_request *req, unsigned int flags) { struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct crypto_priv *cp = ctx->cp; diff --git a/drivers/md/dm-req-crypt.c b/drivers/md/dm-req-crypt.c index 56f214a0b4fe..3ffe7e5e1197 100644 --- a/drivers/md/dm-req-crypt.c +++ b/drivers/md/dm-req-crypt.c @@ -33,6 +33,8 @@ #include #include +#include +#include #include #include #include @@ -88,7 +90,7 @@ static mempool_t *req_io_pool; static mempool_t *req_page_pool; static mempool_t *req_scatterlist_pool; static bool is_fde_enabled; -static struct crypto_ablkcipher *tfm; +static struct crypto_skcipher *tfm; static unsigned int encryption_mode; static struct ice_crypto_setting *ice_settings; @@ -323,7 +325,7 @@ static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io) DMERR("%s req_sg_read allocation failed\n", __func__); error = DM_REQ_CRYPT_ERROR; - goto ablkcipher_req_alloc_failure; + goto skcipher_req_alloc_failure; } memset(req_sg_read, 0, sizeof(struct scatterlist) * MAX_SG_LIST); @@ -331,7 +333,7 @@ static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io) if ((total_sg_len <= 0) || (total_sg_len > MAX_SG_LIST)) { DMERR("%s Request Error%d", __func__, total_sg_len); error = DM_REQ_CRYPT_ERROR; - goto ablkcipher_req_alloc_failure; + goto skcipher_req_alloc_failure; } total_bytes_in_req = clone->__data_len; @@ -339,7 +341,7 @@ static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io) DMERR("%s total_bytes_in_req > 512 MB %d", __func__, total_bytes_in_req); error = DM_REQ_CRYPT_ERROR; - goto ablkcipher_req_alloc_failure; + goto skcipher_req_alloc_failure; } @@ -354,7 +356,7 @@ static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io) if (!split_io) { DMERR("%s split_io allocation failed\n", __func__); error = DM_REQ_CRYPT_ERROR; - goto ablkcipher_req_alloc_failure; + goto skcipher_req_alloc_failure; } split_io[0].req_split_sg_read = sg = req_sg_read; @@ -389,7 +391,7 @@ static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io) if (!split_io) { DMERR("%s split_io allocation failed\n", __func__); error = DM_REQ_CRYPT_ERROR; - goto ablkcipher_req_alloc_failure; + goto skcipher_req_alloc_failure; } split_io->engine = &curr_engine_list[0]; init_completion(&split_io->result.completion); @@ -406,7 +408,7 @@ static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io) DMERR("%s error = %d for request\n", __func__, split_io->result.err); error = DM_REQ_CRYPT_ERROR; - goto ablkcipher_req_alloc_failure; + goto skcipher_req_alloc_failure; } } else { for (i = 0; i < (engine_list_total); i++) { @@ -416,12 +418,12 @@ static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io) DMERR("%s error = %d for %dst request\n", __func__, split_io[i].result.err, i); error = DM_REQ_CRYPT_ERROR; - goto ablkcipher_req_alloc_failure; + goto skcipher_req_alloc_failure; } } } error = 0; -ablkcipher_req_alloc_failure: +skcipher_req_alloc_failure: mempool_free(req_sg_read, req_scatterlist_pool); kfree(split_io); @@ -463,7 +465,7 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io) total_bytes_in_req = 0, error = DM_MAPIO_REMAPPED, rc = 0; struct req_iterator iter; struct req_iterator iter1; - struct ablkcipher_request *req = NULL; + struct skcipher_request *req = NULL; struct req_crypt_result result; struct bio_vec bvec; struct scatterlist *req_sg_in = NULL; @@ -497,15 +499,15 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io) req_crypt_inc_pending(io); - req = ablkcipher_request_alloc(tfm, GFP_KERNEL); + req = skcipher_request_alloc(tfm, GFP_KERNEL); if (!req) { - DMERR("%s ablkcipher request allocation failed\n", + DMERR("%s skcipher request allocation failed\n", __func__); error = DM_REQ_CRYPT_ERROR; - goto ablkcipher_req_alloc_failure; + goto skcipher_req_alloc_failure; } - ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, req_crypt_cipher_complete, &result); mutex_lock(&engine_list_mutex); @@ -525,7 +527,7 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io) DMERR("%s Unknown Key ID!\n", __func__); error = DM_REQ_CRYPT_ERROR; mutex_unlock(&engine_list_mutex); - goto ablkcipher_req_alloc_failure; + goto skcipher_req_alloc_failure; } engine = curr_engine_list[*engine_cursor]; @@ -538,7 +540,7 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io) DMERR("%s qcrypto_cipher_set_device_hw failed with err %d\n", __func__, err); mutex_unlock(&engine_list_mutex); - goto ablkcipher_req_alloc_failure; + goto skcipher_req_alloc_failure; } mutex_unlock(&engine_list_mutex); @@ -546,8 +548,8 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io) (dm_qcrypto_func.cipher_flag)(req, QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B); - crypto_ablkcipher_clear_flags(tfm, ~0); - crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS); + crypto_skcipher_clear_flags(tfm, ~0); + crypto_skcipher_setkey(tfm, NULL, KEY_SIZE_XTS); req_sg_in = (struct scatterlist *)mempool_alloc(req_scatterlist_pool, GFP_KERNEL); @@ -555,7 +557,7 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io) DMERR("%s req_sg_in allocation failed\n", __func__); error = DM_REQ_CRYPT_ERROR; - goto ablkcipher_req_alloc_failure; + goto skcipher_req_alloc_failure; } memset(req_sg_in, 0, sizeof(struct scatterlist) * MAX_SG_LIST); @@ -565,7 +567,7 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io) DMERR("%s req_sg_out allocation failed\n", __func__); error = DM_REQ_CRYPT_ERROR; - goto ablkcipher_req_alloc_failure; + goto skcipher_req_alloc_failure; } memset(req_sg_out, 0, sizeof(struct scatterlist) * MAX_SG_LIST); @@ -574,7 +576,7 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io) (total_sg_len_req_in > MAX_SG_LIST)) { DMERR("%s Request Error%d", __func__, total_sg_len_req_in); error = DM_REQ_CRYPT_ERROR; - goto ablkcipher_req_alloc_failure; + goto skcipher_req_alloc_failure; } total_bytes_in_req = clone->__data_len; @@ -582,7 +584,7 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io) DMERR("%s total_bytes_in_req > 512 MB %d", __func__, total_bytes_in_req); error = DM_REQ_CRYPT_ERROR; - goto ablkcipher_req_alloc_failure; + goto skcipher_req_alloc_failure; } rq_for_each_segment(bvec, clone, iter) { @@ -614,16 +616,16 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io) (total_sg_len_req_out > MAX_SG_LIST)) { DMERR("%s Request Error %d", __func__, total_sg_len_req_out); error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC; - goto ablkcipher_req_alloc_failure; + goto skcipher_req_alloc_failure; } memset(IV, 0, AES_XTS_IV_LEN); memcpy(IV, &clone->__sector, sizeof(sector_t)); - ablkcipher_request_set_crypt(req, req_sg_in, req_sg_out, + skcipher_request_set_crypt(req, req_sg_in, req_sg_out, total_bytes_in_req, (void *) IV); - rc = crypto_ablkcipher_encrypt(req); + rc = crypto_skcipher_encrypt(req); switch (rc) { case 0: @@ -640,13 +642,13 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io) DMERR("%s error = %d encrypting the request\n", __func__, result.err); error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC; - goto ablkcipher_req_alloc_failure; + goto skcipher_req_alloc_failure; } break; default: error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC; - goto ablkcipher_req_alloc_failure; + goto skcipher_req_alloc_failure; } __rq_for_each_bio(bio_src, clone) { @@ -661,9 +663,9 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io) */ blk_recalc_rq_segments(clone); -ablkcipher_req_alloc_failure: +skcipher_req_alloc_failure: if (req) - ablkcipher_request_free(req); + skcipher_request_free(req); if (error == DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC) { rq_for_each_segment(bvec, clone, iter1) { @@ -727,7 +729,7 @@ static void req_cryptd_split_req_queue_cb(struct work_struct *work) { struct req_dm_split_req_io *io = container_of(work, struct req_dm_split_req_io, work); - struct ablkcipher_request *req = NULL; + struct skcipher_request *req = NULL; struct req_crypt_result result; int err = 0; struct crypto_engine_entry *engine = NULL; @@ -739,14 +741,14 @@ static void req_cryptd_split_req_queue_cb(struct work_struct *work) /* If io is not populated this should not be called */ WARN_ON(1); } - req = ablkcipher_request_alloc(tfm, GFP_KERNEL); + req = skcipher_request_alloc(tfm, GFP_KERNEL); if (!req) { - DMERR("%s ablkcipher request allocation failed\n", __func__); + DMERR("%s skcipher request allocation failed\n", __func__); err = DM_REQ_CRYPT_ERROR; - goto ablkcipher_req_alloc_failure; + goto skcipher_req_alloc_failure; } - ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, req_crypt_cipher_complete, &result); engine = io->engine; @@ -756,19 +758,19 @@ static void req_cryptd_split_req_queue_cb(struct work_struct *work) if (err) { DMERR("%s qcrypto_cipher_set_device_hw failed with err %d\n", __func__, err); - goto ablkcipher_req_alloc_failure; + goto skcipher_req_alloc_failure; } init_completion(&result.completion); (dm_qcrypto_func.cipher_flag)(req, QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B); - crypto_ablkcipher_clear_flags(tfm, ~0); - crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS); + crypto_skcipher_clear_flags(tfm, ~0); + crypto_skcipher_setkey(tfm, NULL, KEY_SIZE_XTS); - ablkcipher_request_set_crypt(req, io->req_split_sg_read, + skcipher_request_set_crypt(req, io->req_split_sg_read, io->req_split_sg_read, io->size, (void *) io->IV); - err = crypto_ablkcipher_decrypt(req); + err = crypto_skcipher_decrypt(req); switch (err) { case 0: break; @@ -784,18 +786,18 @@ static void req_cryptd_split_req_queue_cb(struct work_struct *work) DMERR("%s error = %d encrypting the request\n", __func__, result.err); err = DM_REQ_CRYPT_ERROR; - goto ablkcipher_req_alloc_failure; + goto skcipher_req_alloc_failure; } break; default: err = DM_REQ_CRYPT_ERROR; - goto ablkcipher_req_alloc_failure; + goto skcipher_req_alloc_failure; } err = 0; -ablkcipher_req_alloc_failure: +skcipher_req_alloc_failure: if (req) - ablkcipher_request_free(req); + skcipher_request_free(req); req_crypt_split_io_complete(&io->result, err); } @@ -1029,7 +1031,7 @@ static void deconfigure_qcrypto(void) mutex_unlock(&engine_list_mutex); if (tfm) { - crypto_free_ablkcipher(tfm); + crypto_free_skcipher(tfm); tfm = NULL; } } @@ -1068,9 +1070,9 @@ static int configure_qcrypto(void) blk_queue_max_hw_sectors(q, DM_REQ_CRYPT_QUEUE_SIZE); /* Allocate the crypto alloc blk cipher and keep the handle */ - tfm = crypto_alloc_ablkcipher("qcom-xts(aes)", 0, 0); + tfm = crypto_alloc_skcipher("qcom-xts(aes)", 0, 0); if (IS_ERR(tfm)) { - DMERR("%s ablkcipher tfm allocation failed : error\n", + DMERR("%s skcipher tfm allocation failed : error\n", __func__); tfm = NULL; goto exit_err; diff --git a/include/linux/qcrypto.h b/include/linux/qcrypto.h index 252464a0e0d5..ff0e64c82cda 100644 --- a/include/linux/qcrypto.h +++ b/include/linux/qcrypto.h @@ -15,6 +15,7 @@ #include #include +#include #define QCRYPTO_CTX_KEY_MASK 0x000000ff #define QCRYPTO_CTX_USE_HW_KEY 0x00000001 @@ -29,7 +30,7 @@ int qcrypto_cipher_set_device(struct ablkcipher_request *req, unsigned int dev); int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev); /*int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev);*/ -int qcrypto_cipher_set_flag(struct ablkcipher_request *req, unsigned int flags); +int qcrypto_cipher_set_flag(struct skcipher_request *req, unsigned int flags); int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags); /*int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags);*/ @@ -47,16 +48,16 @@ struct crypto_engine_entry { int qcrypto_get_num_engines(void); void qcrypto_get_engine_list(size_t num_engines, struct crypto_engine_entry *arr); -int qcrypto_cipher_set_device_hw(struct ablkcipher_request *req, +int qcrypto_cipher_set_device_hw(struct skcipher_request *req, unsigned int fde_pfe, unsigned int hw_inst); struct qcrypto_func_set { - int (*cipher_set)(struct ablkcipher_request *req, + int (*cipher_set)(struct skcipher_request *req, unsigned int fde_pfe, unsigned int hw_inst); - int (*cipher_flag)(struct ablkcipher_request *req, unsigned int flags); + int (*cipher_flag)(struct skcipher_request *req, unsigned int flags); int (*get_num_engines)(void); void (*get_engine_list)(size_t num_engines, struct crypto_engine_entry *arr); -- GitLab From c6856549a4d0f7013ae478f67730c5cfac1ff470 Mon Sep 17 00:00:00 2001 From: AnilKumar Chimata Date: Fri, 23 Jun 2017 02:59:52 -0700 Subject: [PATCH 233/786] defconfig: sdm845: Enable DM request driver Enable DM request driver for sdm845 to perform hardware based full disk encryption. Change-Id: I39341da4dbd0282e30d355d4991bccd52b0b265c Signed-off-by: AnilKumar Chimata --- arch/arm64/configs/sdm845-perf_defconfig | 3 +++ arch/arm64/configs/sdm845_defconfig | 2 ++ 2 files changed, 5 insertions(+) diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig index 18b0a3ba1ae6..7a53ed4777d5 100644 --- a/arch/arm64/configs/sdm845-perf_defconfig +++ b/arch/arm64/configs/sdm845-perf_defconfig @@ -246,9 +246,12 @@ CONFIG_SCSI_UFSHCD=y CONFIG_SCSI_UFSHCD_PLATFORM=y CONFIG_SCSI_UFS_QCOM=y CONFIG_SCSI_UFS_QCOM_ICE=y +CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y CONFIG_DM_CRYPT=y +CONFIG_DM_REQ_CRYPT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig index 1f1b5b46d522..711529447973 100644 --- a/arch/arm64/configs/sdm845_defconfig +++ b/arch/arm64/configs/sdm845_defconfig @@ -259,7 +259,9 @@ CONFIG_SCSI_UFS_QCOM_ICE=y CONFIG_SCSI_UFSHCD_CMD_LOGGING=y CONFIG_MD=y CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y CONFIG_DM_CRYPT=y +CONFIG_DM_REQ_CRYPT=y CONFIG_DM_UEVENT=y CONFIG_DM_VERITY=y CONFIG_DM_VERITY_FEC=y -- GitLab From 278923c4a5eb8fe3a2c1fabf9a569012be6a4f9e Mon Sep 17 00:00:00 2001 From: Siva Kumar Akkireddi Date: Thu, 11 May 2017 15:29:47 +0530 Subject: [PATCH 234/786] msm: sps: Fix race condition in SPS debugfs APIs SPS debugfs APIs can be called concurrently which can result in dangling pointer access. This change synchronizes access to the SPS debugfs buffer. Change-Id: I409b3f0618f760cb67eba47b43c81d166cdae4aa Signed-off-by: Siva Kumar Akkireddi --- drivers/platform/msm/sps/sps.c | 13 +++++++++++++ drivers/platform/msm/sps/spsi.h | 17 ----------------- 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/drivers/platform/msm/sps/sps.c b/drivers/platform/msm/sps/sps.c index 907c94e89c28..1b7681a3af72 100644 --- a/drivers/platform/msm/sps/sps.c +++ b/drivers/platform/msm/sps/sps.c @@ -67,6 +67,7 @@ static char *debugfs_buf; static u32 debugfs_buf_size; static u32 debugfs_buf_used; static int wraparound; +static struct mutex sps_debugfs_lock; struct dentry *dent; struct dentry *dfile_info; @@ -85,6 +86,7 @@ static struct sps_bam *phy2bam(phys_addr_t phys_addr); /* record debug info for debugfs */ void sps_debugfs_record(const char *msg) { + mutex_lock(&sps_debugfs_lock); if (debugfs_record_enabled) { if (debugfs_buf_used + MAX_MSG_LEN >= debugfs_buf_size) { debugfs_buf_used = 0; @@ -98,6 +100,7 @@ void sps_debugfs_record(const char *msg) debugfs_buf_size - debugfs_buf_used, "\n**** end line of sps log ****\n\n"); } + mutex_unlock(&sps_debugfs_lock); } /* read the recorded debug info to userspace */ @@ -107,6 +110,7 @@ static ssize_t sps_read_info(struct file *file, char __user *ubuf, int ret = 0; int size; + mutex_lock(&sps_debugfs_lock); if (debugfs_record_enabled) { if (wraparound) size = debugfs_buf_size - MAX_MSG_LEN; @@ -116,6 +120,7 @@ static ssize_t sps_read_info(struct file *file, char __user *ubuf, ret = simple_read_from_buffer(ubuf, count, ppos, debugfs_buf, size); } + mutex_unlock(&sps_debugfs_lock); return ret; } @@ -160,12 +165,14 @@ static ssize_t sps_set_info(struct file *file, const char __user *buf, new_buf_size = buf_size_kb * SZ_1K; + mutex_lock(&sps_debugfs_lock); if (debugfs_record_enabled) { if (debugfs_buf_size == new_buf_size) { /* need do nothing */ pr_info( "sps:debugfs: input buffer size is the same as before.\n" ); + mutex_unlock(&sps_debugfs_lock); return count; } /* release the current buffer */ @@ -183,12 +190,14 @@ static ssize_t sps_set_info(struct file *file, const char __user *buf, if (!debugfs_buf) { debugfs_buf_size = 0; pr_err("sps:fail to allocate memory for debug_fs.\n"); + mutex_unlock(&sps_debugfs_lock); return -ENOMEM; } debugfs_buf_used = 0; wraparound = false; debugfs_record_enabled = true; + mutex_unlock(&sps_debugfs_lock); return count; } @@ -237,6 +246,7 @@ static ssize_t sps_set_logging_option(struct file *file, const char __user *buf, return count; } + mutex_lock(&sps_debugfs_lock); if (((option == 0) || (option == 2)) && ((logging_option == 1) || (logging_option == 3))) { debugfs_record_enabled = false; @@ -248,6 +258,7 @@ static ssize_t sps_set_logging_option(struct file *file, const char __user *buf, } logging_option = option; + mutex_unlock(&sps_debugfs_lock); return count; } @@ -595,6 +606,8 @@ static void sps_debugfs_init(void) goto bam_log_level_err; } + mutex_init(&sps_debugfs_lock); + return; bam_log_level_err: diff --git a/drivers/platform/msm/sps/spsi.h b/drivers/platform/msm/sps/spsi.h index 2e57f7d3bdcc..0c1b8ea1a60e 100644 --- a/drivers/platform/msm/sps/spsi.h +++ b/drivers/platform/msm/sps/spsi.h @@ -145,11 +145,6 @@ extern u8 print_limit_option; pr_info(msg, ##args); \ } \ } while (0) -#define SPS_DEBUGFS(msg, args...) do { \ - char buf[MAX_MSG_LEN]; \ - snprintf(buf, MAX_MSG_LEN, msg"\n", ##args); \ - sps_debugfs_record(buf); \ - } while (0) #define SPS_ERR(dev, msg, args...) do { \ if (logging_option != 1) { \ if (unlikely(print_limit_option > 2)) \ @@ -157,8 +152,6 @@ extern u8 print_limit_option; else \ pr_err(msg, ##args); \ } \ - if (unlikely(debugfs_record_enabled)) \ - SPS_DEBUGFS(msg, ##args); \ SPS_IPC(3, dev, msg, args); \ } while (0) #define SPS_INFO(dev, msg, args...) do { \ @@ -168,8 +161,6 @@ extern u8 print_limit_option; else \ pr_info(msg, ##args); \ } \ - if (unlikely(debugfs_record_enabled)) \ - SPS_DEBUGFS(msg, ##args); \ SPS_IPC(3, dev, msg, args); \ } while (0) #define SPS_DBG(dev, msg, args...) do { \ @@ -181,8 +172,6 @@ extern u8 print_limit_option; pr_info(msg, ##args); \ } else \ pr_debug(msg, ##args); \ - if (unlikely(debugfs_record_enabled)) \ - SPS_DEBUGFS(msg, ##args); \ if (dev) { \ if ((dev)->ipc_loglevel <= 0) \ SPS_IPC(0, dev, msg, args); \ @@ -197,8 +186,6 @@ extern u8 print_limit_option; pr_info(msg, ##args); \ } else \ pr_debug(msg, ##args); \ - if (unlikely(debugfs_record_enabled)) \ - SPS_DEBUGFS(msg, ##args); \ if (dev) { \ if ((dev)->ipc_loglevel <= 1) \ SPS_IPC(1, dev, msg, args); \ @@ -213,8 +200,6 @@ extern u8 print_limit_option; pr_info(msg, ##args); \ } else \ pr_debug(msg, ##args); \ - if (unlikely(debugfs_record_enabled)) \ - SPS_DEBUGFS(msg, ##args); \ if (dev) { \ if ((dev)->ipc_loglevel <= 2) \ SPS_IPC(2, dev, msg, args); \ @@ -229,8 +214,6 @@ extern u8 print_limit_option; pr_info(msg, ##args); \ } else \ pr_debug(msg, ##args); \ - if (unlikely(debugfs_record_enabled)) \ - SPS_DEBUGFS(msg, ##args); \ if (dev) { \ if ((dev)->ipc_loglevel <= 3) \ SPS_IPC(3, dev, msg, args); \ -- GitLab From 2a3e7f59401379fa9b279a538424086ff4cc217e Mon Sep 17 00:00:00 2001 From: Harshdeep Dhatt Date: Thu, 1 Jun 2017 13:02:06 -0600 Subject: [PATCH 235/786] msm: kgsl: Increase the global memory size This is needed to accommodate the preemption buffers for a6xx. The per ringbuffer preemption buffer is little more than 2 mb, so increase the size to 20 mb. Change-Id: I968cb69c3b023f97a43ddb5e07c3a361bec06b2a Signed-off-by: Harshdeep Dhatt --- drivers/gpu/msm/kgsl_iommu.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h index 6337a48633b9..acf8ae43d64d 100644 --- a/drivers/gpu/msm/kgsl_iommu.h +++ b/drivers/gpu/msm/kgsl_iommu.h @@ -23,7 +23,7 @@ * These defines control the address range for allocations that * are mapped into all pagetables. */ -#define KGSL_IOMMU_GLOBAL_MEM_SIZE SZ_8M +#define KGSL_IOMMU_GLOBAL_MEM_SIZE (20 * SZ_1M) #define KGSL_IOMMU_GLOBAL_MEM_BASE 0xf8000000 #define KGSL_IOMMU_SECURE_SIZE SZ_256M -- GitLab From 5a19df0d79c4fa59c6e53f7035ea846e1a58bff7 Mon Sep 17 00:00:00 2001 From: Ajay Agarwal Date: Thu, 22 Jun 2017 11:09:33 +0530 Subject: [PATCH 236/786] sound: usb: Add quirk to issue SET_CUR volume on 2nd channel Some audio devices with two channels require the host to issue SET_CUR command on the Volume control on both the channels in order to function (playback or record). Currently, the Linux Host driver issues SET_CUR only for the 1st channel. Hence, add the quirk for concerned devices for SET_CUR on 2nd channel. Change-Id: I6f0bbcdfd3a8b3ccb33a1d56c252c701849a442f Signed-off-by: Ajay Agarwal --- sound/usb/mixer.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index c3bf5ffe56e3..9e7861af23a2 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1013,6 +1013,17 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, cval->res = 384; } break; + + case USB_ID(0x1130, 0x1620): /* Logitech Speakers S150 */ + /* This audio device has 2 channels and it explicitly requires the + * host to send SET_CUR command on the volume control of both the + * channels. 7936 = 0x1F00 is the default value. + */ + if (cval->channels == 2) + snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, + (cval->control << 8) | 2, 7936); + break; + } } -- GitLab From 0cdc8993efcb50d4e81c49c064e76617408baf98 Mon Sep 17 00:00:00 2001 From: Harshdeep Dhatt Date: Wed, 31 May 2017 15:44:05 -0600 Subject: [PATCH 237/786] msm: kgsl: Add A6XX preemption support This patch adds the basic a6xx preemption code, which includes: 1. New PM4 packets sequences which inform the CP of the outgoing and the incoming contexts. 2. The preemption initialization sequence. 3. Definitions of preemption related registers. 4. The fast preemption code. Change-Id: I37ae9d6cdd3d6c7fd5cb635502f5535713042732 Signed-off-by: Harshdeep Dhatt --- drivers/gpu/msm/Makefile | 1 + drivers/gpu/msm/a6xx_reg.h | 10 + drivers/gpu/msm/adreno.h | 1 + drivers/gpu/msm/adreno_a6xx.c | 91 +++- drivers/gpu/msm/adreno_a6xx.h | 79 +++- drivers/gpu/msm/adreno_a6xx_preempt.c | 602 ++++++++++++++++++++++++++ drivers/gpu/msm/adreno_pm4types.h | 6 + drivers/gpu/msm/adreno_ringbuffer.c | 22 +- drivers/gpu/msm/adreno_ringbuffer.h | 5 +- 9 files changed, 804 insertions(+), 13 deletions(-) create mode 100644 drivers/gpu/msm/adreno_a6xx_preempt.c diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile index f51320701c9e..005822672101 100644 --- a/drivers/gpu/msm/Makefile +++ b/drivers/gpu/msm/Makefile @@ -38,6 +38,7 @@ msm_adreno-y += \ adreno_a6xx_snapshot.o \ adreno_a4xx_preempt.o \ adreno_a5xx_preempt.o \ + adreno_a6xx_preempt.o \ adreno_sysfs.o \ adreno.o \ adreno_cp_parser.o \ diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h index 58ef5eedebeb..f4552b61a4dc 100644 --- a/drivers/gpu/msm/a6xx_reg.h +++ b/drivers/gpu/msm/a6xx_reg.h @@ -70,6 +70,15 @@ #define A6XX_CP_ADDR_MODE_CNTL 0x842 #define A6XX_CP_PROTECT_CNTL 0x84F #define A6XX_CP_PROTECT_REG 0x850 +#define A6XX_CP_CONTEXT_SWITCH_CNTL 0x8A0 +#define A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x8A1 +#define A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x8A2 +#define A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO 0x8A3 +#define A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI 0x8A4 +#define A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO 0x8A5 +#define A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI 0x8A6 +#define A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO 0x8A7 +#define A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI 0x8A8 #define A6XX_CP_PERFCTR_CP_SEL_0 0x8D0 #define A6XX_CP_PERFCTR_CP_SEL_1 0x8D1 #define A6XX_CP_PERFCTR_CP_SEL_2 0x8D2 @@ -590,6 +599,7 @@ #define A6XX_RB_PERFCTR_CMP_SEL_1 0x8E2D #define A6XX_RB_PERFCTR_CMP_SEL_2 0x8E2E #define A6XX_RB_PERFCTR_CMP_SEL_3 0x8E2F +#define A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE 0x8E50 /* PC registers */ #define A6XX_PC_DBG_ECO_CNTL 0x9E00 diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index 7a6581c9480e..b3b4ccb2ecd1 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -845,6 +845,7 @@ struct adreno_gpudev { unsigned int *cmds, struct kgsl_context *context); int (*preemption_yield_enable)(unsigned int *); + unsigned int (*preemption_set_marker)(unsigned int *cmds, int start); unsigned int (*preemption_post_ibsubmit)( struct adreno_device *adreno_dev, unsigned int *cmds); diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c index 3cbb68e82971..2c46b934088c 100644 --- a/drivers/gpu/msm/adreno_a6xx.c +++ b/drivers/gpu/msm/adreno_a6xx.c @@ -29,9 +29,6 @@ #include "kgsl_gmu.h" #include "kgsl_trace.h" -#define A6XX_CP_RB_CNTL_DEFAULT (((ilog2(4) << 8) & 0x1F00) | \ - (ilog2(KGSL_RB_DWORDS >> 1) & 0x3F)) - #define MIN_HBB 13 #define A6XX_LLC_NUM_GPU_SCIDS 5 @@ -482,6 +479,12 @@ static void a6xx_start(struct adreno_device *adreno_dev) if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_TWO_PASS_USE_WFI)) kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 8)); + /* Enable the GMEM save/restore feature for preemption */ + if (adreno_is_preemption_enabled(adreno_dev)) + kgsl_regwrite(device, A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, + 0x1); + + a6xx_preemption_start(adreno_dev); a6xx_protect_init(adreno_dev); } @@ -611,6 +614,70 @@ static int a6xx_send_cp_init(struct adreno_device *adreno_dev, return ret; } +/* + * Follow the ME_INIT sequence with a preemption yield to allow the GPU to move + * to a different ringbuffer, if desired + */ +static int _preemption_init(struct adreno_device *adreno_dev, + struct adreno_ringbuffer *rb, unsigned int *cmds, + struct kgsl_context *context) +{ + unsigned int *cmds_orig = cmds; + + /* Turn CP protection OFF */ + *cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1); + *cmds++ = 0; + + *cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 6); + *cmds++ = 1; + cmds += cp_gpuaddr(adreno_dev, cmds, + rb->preemption_desc.gpuaddr); + + *cmds++ = 2; + cmds += cp_gpuaddr(adreno_dev, cmds, 0); + + /* Turn CP protection ON */ + *cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1); + *cmds++ = 1; + + *cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4); + cmds += cp_gpuaddr(adreno_dev, cmds, 0x0); + *cmds++ = 0; + /* generate interrupt on preemption completion */ + *cmds++ = 0; + + return cmds - cmds_orig; +} + +static int a6xx_post_start(struct adreno_device *adreno_dev) +{ + int ret; + unsigned int *cmds, *start; + struct adreno_ringbuffer *rb = adreno_dev->cur_rb; + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + + if (!adreno_is_preemption_enabled(adreno_dev)) + return 0; + + cmds = adreno_ringbuffer_allocspace(rb, 42); + if (IS_ERR(cmds)) { + KGSL_DRV_ERR(device, "error allocating preemption init cmds"); + return PTR_ERR(cmds); + } + start = cmds; + + cmds += _preemption_init(adreno_dev, rb, cmds, NULL); + + rb->_wptr = rb->_wptr - (42 - (cmds - start)); + + ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000); + if (ret) + adreno_spin_idle_debug(adreno_dev, + "hw preemption initialization failed to idle\n"); + + return ret; +} + /* * a6xx_rb_start() - Start the ringbuffer * @adreno_dev: Pointer to adreno device @@ -651,7 +718,11 @@ static int a6xx_rb_start(struct adreno_device *adreno_dev, return ret; /* GPU comes up in secured mode, make it unsecured by default */ - return adreno_set_unsecured_mode(adreno_dev, rb); + ret = adreno_set_unsecured_mode(adreno_dev, rb); + if (ret) + return ret; + + return a6xx_post_start(adreno_dev); } static int _load_firmware(struct kgsl_device *device, const char *fwfile, @@ -2086,7 +2157,7 @@ static struct adreno_irq_funcs a6xx_irq_funcs[32] = { /* 6 - RBBM_ATB_ASYNC_OVERFLOW */ ADRENO_IRQ_CALLBACK(a6xx_err_callback), ADRENO_IRQ_CALLBACK(NULL), /* 7 - GPC_ERR */ - ADRENO_IRQ_CALLBACK(NULL),/* 8 - CP_SW */ + ADRENO_IRQ_CALLBACK(a6xx_preemption_callback),/* 8 - CP_SW */ ADRENO_IRQ_CALLBACK(a6xx_cp_hw_err_callback), /* 9 - CP_HW_ERROR */ ADRENO_IRQ_CALLBACK(NULL), /* 10 - CP_CCU_FLUSH_DEPTH_TS */ ADRENO_IRQ_CALLBACK(NULL), /* 11 - CP_CCU_FLUSH_COLOR_TS */ @@ -2580,6 +2651,11 @@ static unsigned int a6xx_register_offsets[ADRENO_REG_REGISTER_MAX] = { ADRENO_REG_DEFINE(ADRENO_REG_CP_IB2_BUFSZ, A6XX_CP_IB2_REM_SIZE), ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_ADDR, A6XX_CP_ROQ_DBG_ADDR), ADRENO_REG_DEFINE(ADRENO_REG_CP_ROQ_DATA, A6XX_CP_ROQ_DBG_DATA), + ADRENO_REG_DEFINE(ADRENO_REG_CP_PREEMPT, A6XX_CP_CONTEXT_SWITCH_CNTL), + ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_LO, + A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO), + ADRENO_REG_DEFINE(ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI, + A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI), ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS, A6XX_RBBM_STATUS), ADRENO_REG_DEFINE(ADRENO_REG_RBBM_STATUS3, A6XX_RBBM_STATUS3), ADRENO_REG_DEFINE(ADRENO_REG_RBBM_PERFCTR_CTL, A6XX_RBBM_PERFCTR_CNTL), @@ -2693,4 +2769,9 @@ struct adreno_gpudev adreno_a6xx_gpudev = { .iommu_fault_block = a6xx_iommu_fault_block, .reset = a6xx_reset, .soft_reset = a6xx_soft_reset, + .preemption_pre_ibsubmit = a6xx_preemption_pre_ibsubmit, + .preemption_post_ibsubmit = a6xx_preemption_post_ibsubmit, + .preemption_init = a6xx_preemption_init, + .preemption_schedule = a6xx_preemption_schedule, + .preemption_set_marker = a6xx_preemption_set_marker, }; diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h index 4b96f5616e7b..27382383087c 100644 --- a/drivers/gpu/msm/adreno_a6xx.h +++ b/drivers/gpu/msm/adreno_a6xx.h @@ -23,10 +23,87 @@ #define CP_CLUSTER_SP_PS 0x4 #define CP_CLUSTER_PS 0x5 +/** + * struct a6xx_cp_preemption_record - CP context record for + * preemption. + * @magic: (00) Value at this offset must be equal to + * A6XX_CP_CTXRECORD_MAGIC_REF. + * @info: (04) Type of record. Written non-zero (usually) by CP. + * we must set to zero for all ringbuffers. + * @errno: (08) Error code. Initialize this to A6XX_CP_CTXRECORD_ERROR_NONE. + * CP will update to another value if a preemption error occurs. + * @data: (12) DATA field in YIELD and SET_MARKER packets. + * Written by CP when switching out. Not used on switch-in. Initialized to 0. + * @cntl: (16) RB_CNTL, saved and restored by CP. We must initialize this. + * @rptr: (20) RB_RPTR, saved and restored by CP. We must initialize this. + * @wptr: (24) RB_WPTR, saved and restored by CP. We must initialize this. + * @_pad28: (28) Reserved/padding. + * @rptr_addr: (32) RB_RPTR_ADDR_LO|HI saved and restored. We must initialize. + * rbase: (40) RB_BASE_LO|HI saved and restored. + * counter: (48) Pointer to preemption counter. + */ +struct a6xx_cp_preemption_record { + uint32_t magic; + uint32_t info; + uint32_t errno; + uint32_t data; + uint32_t cntl; + uint32_t rptr; + uint32_t wptr; + uint32_t _pad28; + uint64_t rptr_addr; + uint64_t rbase; + uint64_t counter; +}; + +/** + * struct a6xx_cp_smmu_info - CP preemption SMMU info. + * @magic: (00) The value at this offset must be equal to + * A6XX_CP_SMMU_INFO_MAGIC_REF. + * @_pad4: (04) Reserved/padding + * @ttbr0: (08) Base address of the page table for the + * incoming context. + * @context_idr: (16) Context Identification Register value. + */ +struct a6xx_cp_smmu_info { + uint32_t magic; + uint32_t _pad4; + uint64_t ttbr0; + uint32_t asid; + uint32_t context_idr; +}; + +#define A6XX_CP_SMMU_INFO_MAGIC_REF 0x3618CDA3UL + +#define A6XX_CP_CTXRECORD_MAGIC_REF 0xAE399D6EUL +/* Size of each CP preemption record */ +#define A6XX_CP_CTXRECORD_SIZE_IN_BYTES (2112 * 1024) +/* Size of the preemption counter block (in bytes) */ +#define A6XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE (16 * 4) +/* Size of the performance counter save/restore block (in bytes) */ +#define A6XX_CP_PERFCOUNTER_SAVE_RESTORE_SIZE (4 * 1024) + +#define A6XX_CP_RB_CNTL_DEFAULT (((ilog2(4) << 8) & 0x1F00) | \ + (ilog2(KGSL_RB_DWORDS >> 1) & 0x3F)) + +/* Preemption functions */ +void a6xx_preemption_trigger(struct adreno_device *adreno_dev); +void a6xx_preemption_schedule(struct adreno_device *adreno_dev); +void a6xx_preemption_start(struct adreno_device *adreno_dev); +int a6xx_preemption_init(struct adreno_device *adreno_dev); + +unsigned int a6xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev, + unsigned int *cmds); +unsigned int a6xx_preemption_pre_ibsubmit(struct adreno_device *adreno_dev, + struct adreno_ringbuffer *rb, + unsigned int *cmds, struct kgsl_context *context); + +unsigned int a6xx_preemption_set_marker(unsigned int *cmds, int start); + +void a6xx_preemption_callback(struct adreno_device *adreno_dev, int bit); void a6xx_snapshot(struct adreno_device *adreno_dev, struct kgsl_snapshot *snapshot); void a6xx_crashdump_init(struct adreno_device *adreno_dev); - #endif diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c new file mode 100644 index 000000000000..c37791a1aaf0 --- /dev/null +++ b/drivers/gpu/msm/adreno_a6xx_preempt.c @@ -0,0 +1,602 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "adreno.h" +#include "adreno_a6xx.h" +#include "a6xx_reg.h" +#include "adreno_trace.h" +#include "adreno_pm4types.h" + +#define PREEMPT_RECORD(_field) \ + offsetof(struct a6xx_cp_preemption_record, _field) + +#define PREEMPT_SMMU_RECORD(_field) \ + offsetof(struct a6xx_cp_smmu_info, _field) + +enum { + SET_PSEUDO_REGISTER_SAVE_REGISTER_SMMU_INFO = 0, + SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_NON_SECURE_SAVE_ADDR, + SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_SECURE_SAVE_ADDR, + SET_PSEUDO_REGISTER_SAVE_REGISTER_NON_PRIV_SAVE_ADDR, + SET_PSEUDO_REGISTER_SAVE_REGISTER_COUNTER, +}; + +static void _update_wptr(struct adreno_device *adreno_dev, bool reset_timer) +{ + struct adreno_ringbuffer *rb = adreno_dev->cur_rb; + unsigned int wptr; + unsigned long flags; + + spin_lock_irqsave(&rb->preempt_lock, flags); + + adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_WPTR, &wptr); + + if (wptr != rb->wptr) { + adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR, + rb->wptr); + /* + * In case something got submitted while preemption was on + * going, reset the timer. + */ + reset_timer = true; + } + + if (reset_timer) + rb->dispatch_q.expires = jiffies + + msecs_to_jiffies(adreno_drawobj_timeout); + + spin_unlock_irqrestore(&rb->preempt_lock, flags); +} + +static inline bool adreno_move_preempt_state(struct adreno_device *adreno_dev, + enum adreno_preempt_states old, enum adreno_preempt_states new) +{ + return (atomic_cmpxchg(&adreno_dev->preempt.state, old, new) == old); +} + +static void _a6xx_preemption_done(struct adreno_device *adreno_dev) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + unsigned int status; + + /* + * In the very unlikely case that the power is off, do nothing - the + * state will be reset on power up and everybody will be happy + */ + + if (!kgsl_state_is_awake(device)) + return; + + adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status); + + if (status & 0x1) { + KGSL_DRV_ERR(device, + "Preemption not complete: status=%X cur=%d R/W=%X/%X next=%d R/W=%X/%X\n", + status, adreno_dev->cur_rb->id, + adreno_get_rptr(adreno_dev->cur_rb), + adreno_dev->cur_rb->wptr, adreno_dev->next_rb->id, + adreno_get_rptr(adreno_dev->next_rb), + adreno_dev->next_rb->wptr); + + /* Set a fault and restart */ + adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT); + adreno_dispatcher_schedule(device); + + return; + } + + del_timer_sync(&adreno_dev->preempt.timer); + + trace_adreno_preempt_done(adreno_dev->cur_rb, adreno_dev->next_rb); + + /* Clean up all the bits */ + adreno_dev->prev_rb = adreno_dev->cur_rb; + adreno_dev->cur_rb = adreno_dev->next_rb; + adreno_dev->next_rb = NULL; + + /* Update the wptr for the new command queue */ + _update_wptr(adreno_dev, true); + + /* Update the dispatcher timer for the new command queue */ + mod_timer(&adreno_dev->dispatcher.timer, + adreno_dev->cur_rb->dispatch_q.expires); + + /* Clear the preempt state */ + adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE); +} + +static void _a6xx_preemption_fault(struct adreno_device *adreno_dev) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + unsigned int status; + + /* + * If the power is on check the preemption status one more time - if it + * was successful then just transition to the complete state + */ + if (kgsl_state_is_awake(device)) { + adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status); + + if (status == 0) { + adreno_set_preempt_state(adreno_dev, + ADRENO_PREEMPT_COMPLETE); + + adreno_dispatcher_schedule(device); + return; + } + } + + KGSL_DRV_ERR(device, + "Preemption timed out: cur=%d R/W=%X/%X, next=%d R/W=%X/%X\n", + adreno_dev->cur_rb->id, + adreno_get_rptr(adreno_dev->cur_rb), adreno_dev->cur_rb->wptr, + adreno_dev->next_rb->id, + adreno_get_rptr(adreno_dev->next_rb), + adreno_dev->next_rb->wptr); + + adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT); + adreno_dispatcher_schedule(device); +} + +static void _a6xx_preemption_worker(struct work_struct *work) +{ + struct adreno_preemption *preempt = container_of(work, + struct adreno_preemption, work); + struct adreno_device *adreno_dev = container_of(preempt, + struct adreno_device, preempt); + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + + /* Need to take the mutex to make sure that the power stays on */ + mutex_lock(&device->mutex); + + if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_FAULTED)) + _a6xx_preemption_fault(adreno_dev); + + mutex_unlock(&device->mutex); +} + +static void _a6xx_preemption_timer(unsigned long data) +{ + struct adreno_device *adreno_dev = (struct adreno_device *) data; + + /* We should only be here from a triggered state */ + if (!adreno_move_preempt_state(adreno_dev, + ADRENO_PREEMPT_TRIGGERED, ADRENO_PREEMPT_FAULTED)) + return; + + /* Schedule the worker to take care of the details */ + queue_work(system_unbound_wq, &adreno_dev->preempt.work); +} + +/* Find the highest priority active ringbuffer */ +static struct adreno_ringbuffer *a6xx_next_ringbuffer( + struct adreno_device *adreno_dev) +{ + struct adreno_ringbuffer *rb; + unsigned long flags; + unsigned int i; + + FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { + bool empty; + + spin_lock_irqsave(&rb->preempt_lock, flags); + empty = adreno_rb_empty(rb); + spin_unlock_irqrestore(&rb->preempt_lock, flags); + + if (empty == false) + return rb; + } + + return NULL; +} + +void a6xx_preemption_trigger(struct adreno_device *adreno_dev) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device); + struct adreno_ringbuffer *next; + uint64_t ttbr0; + unsigned int contextidr; + unsigned long flags; + uint32_t preempt_level = 0, usesgmem = 1, skipsaverestore = 0; + + /* Put ourselves into a possible trigger state */ + if (!adreno_move_preempt_state(adreno_dev, + ADRENO_PREEMPT_NONE, ADRENO_PREEMPT_START)) + return; + + /* Get the next ringbuffer to preempt in */ + next = a6xx_next_ringbuffer(adreno_dev); + + /* + * Nothing to do if every ringbuffer is empty or if the current + * ringbuffer is the only active one + */ + if (next == NULL || next == adreno_dev->cur_rb) { + /* + * Update any critical things that might have been skipped while + * we were looking for a new ringbuffer + */ + + if (next != NULL) { + _update_wptr(adreno_dev, false); + + mod_timer(&adreno_dev->dispatcher.timer, + adreno_dev->cur_rb->dispatch_q.expires); + } + + adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE); + return; + } + + /* Turn off the dispatcher timer */ + del_timer(&adreno_dev->dispatcher.timer); + + /* + * This is the most critical section - we need to take care not to race + * until we have programmed the CP for the switch + */ + + spin_lock_irqsave(&next->preempt_lock, flags); + + /* + * Get the pagetable from the pagetable info. + * The pagetable_desc is allocated and mapped at probe time, and + * preemption_desc at init time, so no need to check if + * sharedmem accesses to these memdescs succeed. + */ + kgsl_sharedmem_readq(&next->pagetable_desc, &ttbr0, + PT_INFO_OFFSET(ttbr0)); + kgsl_sharedmem_readl(&next->pagetable_desc, &contextidr, + PT_INFO_OFFSET(contextidr)); + + kgsl_sharedmem_writel(device, &next->preemption_desc, + PREEMPT_RECORD(wptr), next->wptr); + + spin_unlock_irqrestore(&next->preempt_lock, flags); + + /* And write it to the smmu info */ + kgsl_sharedmem_writeq(device, &iommu->smmu_info, + PREEMPT_SMMU_RECORD(ttbr0), ttbr0); + kgsl_sharedmem_writel(device, &iommu->smmu_info, + PREEMPT_SMMU_RECORD(context_idr), contextidr); + + kgsl_regwrite(device, + A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO, + lower_32_bits(next->preemption_desc.gpuaddr)); + kgsl_regwrite(device, + A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI, + upper_32_bits(next->preemption_desc.gpuaddr)); + + adreno_dev->next_rb = next; + + /* Start the timer to detect a stuck preemption */ + mod_timer(&adreno_dev->preempt.timer, + jiffies + msecs_to_jiffies(ADRENO_PREEMPT_TIMEOUT)); + + trace_adreno_preempt_trigger(adreno_dev->cur_rb, adreno_dev->next_rb); + + adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_TRIGGERED); + + /* Trigger the preemption */ + adreno_writereg(adreno_dev, ADRENO_REG_CP_PREEMPT, + ((preempt_level << 6) & 0xC0) | + ((skipsaverestore << 9) & 0x200) | + ((usesgmem << 8) & 0x100) | 0x1); +} + +void a6xx_preemption_callback(struct adreno_device *adreno_dev, int bit) +{ + unsigned int status; + + if (!adreno_move_preempt_state(adreno_dev, + ADRENO_PREEMPT_TRIGGERED, ADRENO_PREEMPT_PENDING)) + return; + + adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status); + + if (status & 0x1) { + KGSL_DRV_ERR(KGSL_DEVICE(adreno_dev), + "preempt interrupt with non-zero status: %X\n", status); + + /* + * Under the assumption that this is a race between the + * interrupt and the register, schedule the worker to clean up. + * If the status still hasn't resolved itself by the time we get + * there then we have to assume something bad happened + */ + adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_COMPLETE); + adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev)); + return; + } + + del_timer(&adreno_dev->preempt.timer); + + trace_adreno_preempt_done(adreno_dev->cur_rb, + adreno_dev->next_rb); + + adreno_dev->prev_rb = adreno_dev->cur_rb; + adreno_dev->cur_rb = adreno_dev->next_rb; + adreno_dev->next_rb = NULL; + + /* Update the wptr if it changed while preemption was ongoing */ + _update_wptr(adreno_dev, true); + + /* Update the dispatcher timer for the new command queue */ + mod_timer(&adreno_dev->dispatcher.timer, + adreno_dev->cur_rb->dispatch_q.expires); + + adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE); + + a6xx_preemption_trigger(adreno_dev); +} + +void a6xx_preemption_schedule(struct adreno_device *adreno_dev) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + + if (!adreno_is_preemption_enabled(adreno_dev)) + return; + + mutex_lock(&device->mutex); + + if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_COMPLETE)) + _a6xx_preemption_done(adreno_dev); + + a6xx_preemption_trigger(adreno_dev); + + mutex_unlock(&device->mutex); +} + +unsigned int a6xx_preemption_set_marker(unsigned int *cmds, int start) +{ + *cmds++ = cp_type7_packet(CP_SET_MARKER, 1); + + /* + * Indicate the beginning and end of the IB1 list with a SET_MARKER. + * Among other things, this will implicitly enable and disable + * preemption respectively. + */ + if (start) + *cmds++ = 0xD; + else + *cmds++ = 0xE; + + return 2; +} + +unsigned int a6xx_preemption_pre_ibsubmit( + struct adreno_device *adreno_dev, + struct adreno_ringbuffer *rb, + unsigned int *cmds, struct kgsl_context *context) +{ + unsigned int *cmds_orig = cmds; + + *cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 12); + + /* NULL SMMU_INFO buffer - we track in KMD */ + *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_SMMU_INFO; + cmds += cp_gpuaddr(adreno_dev, cmds, 0x0); + + *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_NON_SECURE_SAVE_ADDR; + cmds += cp_gpuaddr(adreno_dev, cmds, rb->preemption_desc.gpuaddr); + + *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_SECURE_SAVE_ADDR; + cmds += cp_gpuaddr(adreno_dev, cmds, 0); + + + /* + * There is no need to specify this address when we are about to + * trigger preemption. This is because CP internally stores this + * address specified here in the CP_SET_PSEUDO_REGISTER payload to + * the context record and thus knows from where to restore + * the saved perfcounters for the new ringbuffer. + */ + *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_COUNTER; + cmds += cp_gpuaddr(adreno_dev, cmds, + rb->perfcounter_save_restore_desc.gpuaddr); + + return (unsigned int) (cmds - cmds_orig); +} + +unsigned int a6xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev, + unsigned int *cmds) +{ + unsigned int *cmds_orig = cmds; + + *cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4); + cmds += cp_gpuaddr(adreno_dev, cmds, 0x0); + *cmds++ = 1; + *cmds++ = 0; + + return (unsigned int) (cmds - cmds_orig); +} + +void a6xx_preemption_start(struct adreno_device *adreno_dev) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device); + struct adreno_ringbuffer *rb; + unsigned int i; + + if (!adreno_is_preemption_enabled(adreno_dev)) + return; + + /* Force the state to be clear */ + adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE); + + /* smmu_info is allocated and mapped in a6xx_preemption_iommu_init */ + kgsl_sharedmem_writel(device, &iommu->smmu_info, + PREEMPT_SMMU_RECORD(magic), A6XX_CP_SMMU_INFO_MAGIC_REF); + kgsl_sharedmem_writeq(device, &iommu->smmu_info, + PREEMPT_SMMU_RECORD(ttbr0), MMU_DEFAULT_TTBR0(device)); + + /* The CP doesn't use the asid record, so poison it */ + kgsl_sharedmem_writel(device, &iommu->smmu_info, + PREEMPT_SMMU_RECORD(asid), 0xDECAFBAD); + kgsl_sharedmem_writel(device, &iommu->smmu_info, + PREEMPT_SMMU_RECORD(context_idr), + MMU_DEFAULT_CONTEXTIDR(device)); + + adreno_writereg64(adreno_dev, + ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_LO, + ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI, + iommu->smmu_info.gpuaddr); + + FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { + /* + * preemption_desc is allocated and mapped at init time, + * so no need to check sharedmem_writel return value + */ + kgsl_sharedmem_writel(device, &rb->preemption_desc, + PREEMPT_RECORD(rptr), 0); + kgsl_sharedmem_writel(device, &rb->preemption_desc, + PREEMPT_RECORD(wptr), 0); + + adreno_ringbuffer_set_pagetable(rb, + device->mmu.defaultpagetable); + } +} + +static int a6xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev, + struct adreno_ringbuffer *rb, uint64_t counteraddr) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + int ret; + + ret = kgsl_allocate_global(device, &rb->preemption_desc, + A6XX_CP_CTXRECORD_SIZE_IN_BYTES, 0, KGSL_MEMDESC_PRIVILEGED, + "preemption_desc"); + if (ret) + return ret; + + ret = kgsl_allocate_global(device, &rb->perfcounter_save_restore_desc, + A6XX_CP_PERFCOUNTER_SAVE_RESTORE_SIZE, 0, + KGSL_MEMDESC_PRIVILEGED, "perfcounter_save_restore_desc"); + if (ret) + return ret; + + kgsl_sharedmem_writel(device, &rb->preemption_desc, + PREEMPT_RECORD(magic), A6XX_CP_CTXRECORD_MAGIC_REF); + kgsl_sharedmem_writel(device, &rb->preemption_desc, + PREEMPT_RECORD(info), 0); + kgsl_sharedmem_writel(device, &rb->preemption_desc, + PREEMPT_RECORD(data), 0); + kgsl_sharedmem_writel(device, &rb->preemption_desc, + PREEMPT_RECORD(cntl), A6XX_CP_RB_CNTL_DEFAULT); + kgsl_sharedmem_writel(device, &rb->preemption_desc, + PREEMPT_RECORD(rptr), 0); + kgsl_sharedmem_writel(device, &rb->preemption_desc, + PREEMPT_RECORD(wptr), 0); + kgsl_sharedmem_writeq(device, &rb->preemption_desc, + PREEMPT_RECORD(rptr_addr), SCRATCH_RPTR_GPU_ADDR(device, + rb->id)); + kgsl_sharedmem_writeq(device, &rb->preemption_desc, + PREEMPT_RECORD(rbase), rb->buffer_desc.gpuaddr); + kgsl_sharedmem_writeq(device, &rb->preemption_desc, + PREEMPT_RECORD(counter), counteraddr); + + return 0; +} + +#ifdef CONFIG_QCOM_KGSL_IOMMU +static int a6xx_preemption_iommu_init(struct adreno_device *adreno_dev) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device); + + /* Allocate mem for storing preemption smmu record */ + return kgsl_allocate_global(device, &iommu->smmu_info, PAGE_SIZE, + KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED, + "smmu_info"); +} + +static void a6xx_preemption_iommu_close(struct adreno_device *adreno_dev) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device); + + kgsl_free_global(device, &iommu->smmu_info); +} +#else +static int a6xx_preemption_iommu_init(struct adreno_device *adreno_dev) +{ + return -ENODEV; +} + +static void a6xx_preemption_iommu_close(struct adreno_device *adreno_dev) +{ +} +#endif + +static void a6xx_preemption_close(struct kgsl_device *device) +{ + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct adreno_preemption *preempt = &adreno_dev->preempt; + struct adreno_ringbuffer *rb; + unsigned int i; + + del_timer(&preempt->timer); + kgsl_free_global(device, &preempt->counters); + a6xx_preemption_iommu_close(adreno_dev); + + FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { + kgsl_free_global(device, &rb->preemption_desc); + kgsl_free_global(device, &rb->perfcounter_save_restore_desc); + } +} + +int a6xx_preemption_init(struct adreno_device *adreno_dev) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + struct adreno_preemption *preempt = &adreno_dev->preempt; + struct adreno_ringbuffer *rb; + int ret; + unsigned int i; + uint64_t addr; + + /* We are dependent on IOMMU to make preemption go on the CP side */ + if (kgsl_mmu_get_mmutype(device) != KGSL_MMU_TYPE_IOMMU) + return -ENODEV; + + INIT_WORK(&preempt->work, _a6xx_preemption_worker); + + setup_timer(&preempt->timer, _a6xx_preemption_timer, + (unsigned long) adreno_dev); + + /* Allocate mem for storing preemption counters */ + ret = kgsl_allocate_global(device, &preempt->counters, + adreno_dev->num_ringbuffers * + A6XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0, + "preemption_counters"); + if (ret) + goto err; + + addr = preempt->counters.gpuaddr; + + /* Allocate mem for storing preemption switch record */ + FOR_EACH_RINGBUFFER(adreno_dev, rb, i) { + ret = a6xx_preemption_ringbuffer_init(adreno_dev, rb, addr); + if (ret) + goto err; + + addr += A6XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE; + } + + ret = a6xx_preemption_iommu_init(adreno_dev); + +err: + if (ret) + a6xx_preemption_close(device); + + return ret; +} diff --git a/drivers/gpu/msm/adreno_pm4types.h b/drivers/gpu/msm/adreno_pm4types.h index fceceda6785c..2a330b4474aa 100644 --- a/drivers/gpu/msm/adreno_pm4types.h +++ b/drivers/gpu/msm/adreno_pm4types.h @@ -55,6 +55,12 @@ /* switches SMMU pagetable, used on a5xx only */ #define CP_SMMU_TABLE_UPDATE 0x53 +/* Set internal CP registers, used to indicate context save data addresses */ +#define CP_SET_PSEUDO_REGISTER 0x56 + +/* Tell CP the current operation mode, indicates save and restore procedure */ +#define CP_SET_MARKER 0x65 + /* register read/modify/write */ #define CP_REG_RMW 0x21 diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index bff1fdaf5456..15c68fb443f8 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -864,9 +864,12 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, dwords += 2; } - if (gpudev->preemption_yield_enable && - adreno_is_preemption_enabled(adreno_dev)) - dwords += 8; + if (adreno_is_preemption_enabled(adreno_dev)) { + if (gpudev->preemption_set_marker) + dwords += 4; + else if (gpudev->preemption_yield_enable) + dwords += 8; + } link = kcalloc(dwords, sizeof(unsigned int), GFP_KERNEL); if (!link) { @@ -897,6 +900,10 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, gpu_ticks_submitted)); } + if (gpudev->preemption_set_marker && + adreno_is_preemption_enabled(adreno_dev)) + cmds += gpudev->preemption_set_marker(cmds, 1); + if (numibs) { list_for_each_entry(ib, &cmdobj->cmdlist, node) { /* @@ -918,9 +925,12 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev, } } - if (gpudev->preemption_yield_enable && - adreno_is_preemption_enabled(adreno_dev)) - cmds += gpudev->preemption_yield_enable(cmds); + if (adreno_is_preemption_enabled(adreno_dev)) { + if (gpudev->preemption_set_marker) + cmds += gpudev->preemption_set_marker(cmds, 0); + else if (gpudev->preemption_yield_enable) + cmds += gpudev->preemption_yield_enable(cmds); + } if (kernel_profiling) { cmds += _get_alwayson_counter(adreno_dev, cmds, diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h index 63374af1e3f7..72fc5bf38461 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.h +++ b/drivers/gpu/msm/adreno_ringbuffer.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -92,6 +92,8 @@ struct adreno_ringbuffer_pagetable_info { * @drawctxt_active: The last pagetable that this ringbuffer is set to * @preemption_desc: The memory descriptor containing * preemption info written/read by CP + * @perfcounter_save_restore_desc: Used by CP to save/restore the perfcounter + * values across preemption * @pagetable_desc: Memory to hold information about the pagetables being used * and the commands to switch pagetable on the RB * @dispatch_q: The dispatcher side queue for this ringbuffer @@ -118,6 +120,7 @@ struct adreno_ringbuffer { struct kgsl_event_group events; struct adreno_context *drawctxt_active; struct kgsl_memdesc preemption_desc; + struct kgsl_memdesc perfcounter_save_restore_desc; struct kgsl_memdesc pagetable_desc; struct adreno_dispatcher_drawqueue dispatch_q; wait_queue_head_t ts_expire_waitq; -- GitLab From 2e42f121b804386879114501deb3e7d9aa4ef3c2 Mon Sep 17 00:00:00 2001 From: Harshdeep Dhatt Date: Wed, 31 May 2017 17:27:19 -0600 Subject: [PATCH 238/786] msm: kgsl: Add the per context preemption buffer This buffer is used by CP to save/restore the VPC data for the outgoing/incoming context respectively. So allocate and map it during context initialization and then specify the gpu address of this buffer in the preemption packets. Change-Id: I3bb73322848e2f19f1f8e511fa5c303e57898cc8 Signed-off-by: Harshdeep Dhatt --- drivers/gpu/msm/adreno.h | 2 + drivers/gpu/msm/adreno_a6xx.c | 2 + drivers/gpu/msm/adreno_a6xx.h | 6 +++ drivers/gpu/msm/adreno_a6xx_preempt.c | 54 ++++++++++++++++++++++++++- drivers/gpu/msm/adreno_drawctxt.c | 19 ++++++++++ drivers/gpu/msm/kgsl.c | 8 ++-- drivers/gpu/msm/kgsl.h | 4 ++ drivers/gpu/msm/kgsl_device.h | 5 +++ 8 files changed, 94 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index b3b4ccb2ecd1..c7e3ad7e6974 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -851,6 +851,8 @@ struct adreno_gpudev { unsigned int *cmds); int (*preemption_init)(struct adreno_device *); void (*preemption_schedule)(struct adreno_device *); + int (*preemption_context_init)(struct kgsl_context *); + void (*preemption_context_destroy)(struct kgsl_context *); void (*enable_64bit)(struct adreno_device *); void (*clk_set_options)(struct adreno_device *, const char *, struct clk *, bool on); diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c index 2c46b934088c..ad0ce44baabe 100644 --- a/drivers/gpu/msm/adreno_a6xx.c +++ b/drivers/gpu/msm/adreno_a6xx.c @@ -2774,4 +2774,6 @@ struct adreno_gpudev adreno_a6xx_gpudev = { .preemption_init = a6xx_preemption_init, .preemption_schedule = a6xx_preemption_schedule, .preemption_set_marker = a6xx_preemption_set_marker, + .preemption_context_init = a6xx_preemption_context_init, + .preemption_context_destroy = a6xx_preemption_context_destroy, }; diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h index 27382383087c..ddf89d6ada39 100644 --- a/drivers/gpu/msm/adreno_a6xx.h +++ b/drivers/gpu/msm/adreno_a6xx.h @@ -80,6 +80,8 @@ struct a6xx_cp_smmu_info { #define A6XX_CP_CTXRECORD_SIZE_IN_BYTES (2112 * 1024) /* Size of the preemption counter block (in bytes) */ #define A6XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE (16 * 4) +/* Size of the user context record block (in bytes) */ +#define A6XX_CP_CTXRECORD_USER_RESTORE_SIZE (192 * 1024) /* Size of the performance counter save/restore block (in bytes) */ #define A6XX_CP_PERFCOUNTER_SAVE_RESTORE_SIZE (4 * 1024) @@ -102,6 +104,10 @@ unsigned int a6xx_preemption_set_marker(unsigned int *cmds, int start); void a6xx_preemption_callback(struct adreno_device *adreno_dev, int bit); +int a6xx_preemption_context_init(struct kgsl_context *context); + +void a6xx_preemption_context_destroy(struct kgsl_context *context); + void a6xx_snapshot(struct adreno_device *adreno_dev, struct kgsl_snapshot *snapshot); diff --git a/drivers/gpu/msm/adreno_a6xx_preempt.c b/drivers/gpu/msm/adreno_a6xx_preempt.c index c37791a1aaf0..00325e57ff8f 100644 --- a/drivers/gpu/msm/adreno_a6xx_preempt.c +++ b/drivers/gpu/msm/adreno_a6xx_preempt.c @@ -277,6 +277,18 @@ void a6xx_preemption_trigger(struct adreno_device *adreno_dev) A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI, upper_32_bits(next->preemption_desc.gpuaddr)); + if (next->drawctxt_active) { + struct kgsl_context *context = &next->drawctxt_active->base; + uint64_t gpuaddr = context->user_ctxt_record->memdesc.gpuaddr; + + kgsl_regwrite(device, + A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO, + lower_32_bits(gpuaddr)); + kgsl_regwrite(device, + A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI, + upper_32_bits(gpuaddr)); + } + adreno_dev->next_rb = next; /* Start the timer to detect a stuck preemption */ @@ -381,7 +393,10 @@ unsigned int a6xx_preemption_pre_ibsubmit( { unsigned int *cmds_orig = cmds; - *cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 12); + if (context) + *cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 15); + else + *cmds++ = cp_type7_packet(CP_SET_PSEUDO_REGISTER, 12); /* NULL SMMU_INFO buffer - we track in KMD */ *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_SMMU_INFO; @@ -393,6 +408,12 @@ unsigned int a6xx_preemption_pre_ibsubmit( *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_PRIV_SECURE_SAVE_ADDR; cmds += cp_gpuaddr(adreno_dev, cmds, 0); + if (context) { + uint64_t gpuaddr = context->user_ctxt_record->memdesc.gpuaddr; + + *cmds++ = SET_PSEUDO_REGISTER_SAVE_REGISTER_NON_PRIV_SAVE_ADDR; + cmds += cp_gpuaddr(adreno_dev, cmds, gpuaddr); + } /* * There is no need to specify this address when we are about to @@ -600,3 +621,34 @@ int a6xx_preemption_init(struct adreno_device *adreno_dev) return ret; } + +void a6xx_preemption_context_destroy(struct kgsl_context *context) +{ + struct kgsl_device *device = context->device; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + + if (!adreno_is_preemption_enabled(adreno_dev)) + return; + + gpumem_free_entry(context->user_ctxt_record); +} + +int a6xx_preemption_context_init(struct kgsl_context *context) +{ + struct kgsl_device *device = context->device; + struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + + if (!adreno_is_preemption_enabled(adreno_dev)) + return 0; + + context->user_ctxt_record = gpumem_alloc_entry(context->dev_priv, + A6XX_CP_CTXRECORD_USER_RESTORE_SIZE, 0); + if (IS_ERR(context->user_ctxt_record)) { + int ret = PTR_ERR(context->user_ctxt_record); + + context->user_ctxt_record = NULL; + return ret; + } + + return 0; +} diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c index f217822fab41..c6df7bb503e2 100644 --- a/drivers/gpu/msm/adreno_drawctxt.c +++ b/drivers/gpu/msm/adreno_drawctxt.c @@ -341,6 +341,7 @@ adreno_drawctxt_create(struct kgsl_device_private *dev_priv, struct adreno_context *drawctxt; struct kgsl_device *device = dev_priv->device; struct adreno_device *adreno_dev = ADRENO_DEVICE(device); + struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); int ret; unsigned int local; @@ -421,6 +422,16 @@ adreno_drawctxt_create(struct kgsl_device_private *dev_priv, return ERR_PTR(ret); } + if (gpudev->preemption_context_init) { + ret = gpudev->preemption_context_init(&drawctxt->base); + if (ret != 0) { + kgsl_context_detach(&drawctxt->base); + kgsl_context_put(&drawctxt->base); + kfree(drawctxt); + return ERR_PTR(ret); + } + } + kgsl_sharedmem_writel(device, &device->memstore, KGSL_MEMSTORE_OFFSET(drawctxt->base.id, soptimestamp), 0); @@ -545,10 +556,18 @@ void adreno_drawctxt_detach(struct kgsl_context *context) void adreno_drawctxt_destroy(struct kgsl_context *context) { struct adreno_context *drawctxt; + struct adreno_device *adreno_dev; + struct adreno_gpudev *gpudev; if (context == NULL) return; + adreno_dev = ADRENO_DEVICE(context->device); + gpudev = ADRENO_GPU_DEVICE(adreno_dev); + + if (gpudev->preemption_context_destroy) + gpudev->preemption_context_destroy(context); + drawctxt = ADRENO_CONTEXT(context); debugfs_remove_recursive(drawctxt->debug_root); kfree(drawctxt); diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index 6bd212d66a72..7b8cdc2e679b 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -245,8 +245,6 @@ int kgsl_readtimestamp(struct kgsl_device *device, void *priv, } EXPORT_SYMBOL(kgsl_readtimestamp); -static long gpumem_free_entry(struct kgsl_mem_entry *entry); - /* Scheduled by kgsl_mem_entry_put_deferred() */ static void _deferred_put(struct work_struct *work) { @@ -608,7 +606,7 @@ EXPORT_SYMBOL(kgsl_context_init); * detached by checking the KGSL_CONTEXT_PRIV_DETACHED bit in * context->priv. */ -static void kgsl_context_detach(struct kgsl_context *context) +void kgsl_context_detach(struct kgsl_context *context) { struct kgsl_device *device; @@ -1812,7 +1810,7 @@ long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv, return 0; } -static long gpumem_free_entry(struct kgsl_mem_entry *entry) +long gpumem_free_entry(struct kgsl_mem_entry *entry) { pid_t ptname = 0; @@ -3054,7 +3052,7 @@ static uint64_t kgsl_filter_cachemode(uint64_t flags) /* The largest allowable alignment for a GPU object is 32MB */ #define KGSL_MAX_ALIGN (32 * SZ_1M) -static struct kgsl_mem_entry *gpumem_alloc_entry( +struct kgsl_mem_entry *gpumem_alloc_entry( struct kgsl_device_private *dev_priv, uint64_t size, uint64_t flags) { diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h index 3f1c86ef74b1..c54e51efe647 100644 --- a/drivers/gpu/msm/kgsl.h +++ b/drivers/gpu/msm/kgsl.h @@ -445,6 +445,10 @@ extern const struct dev_pm_ops kgsl_pm_ops; int kgsl_suspend_driver(struct platform_device *pdev, pm_message_t state); int kgsl_resume_driver(struct platform_device *pdev); +struct kgsl_mem_entry *gpumem_alloc_entry(struct kgsl_device_private *dev_priv, + uint64_t size, uint64_t flags); +long gpumem_free_entry(struct kgsl_mem_entry *entry); + static inline int kgsl_gpuaddr_in_memdesc(const struct kgsl_memdesc *memdesc, uint64_t gpuaddr, uint64_t size) { diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h index ca1f181560f3..b621ada6406e 100644 --- a/drivers/gpu/msm/kgsl_device.h +++ b/drivers/gpu/msm/kgsl_device.h @@ -378,6 +378,8 @@ struct kgsl_process_private; * @pwr_constraint: power constraint from userspace for this context * @fault_count: number of times gpu hanged in last _context_throttle_time ms * @fault_time: time of the first gpu hang in last _context_throttle_time ms + * @user_ctxt_record: memory descriptor used by CP to save/restore VPC data + * across preemption */ struct kgsl_context { struct kref refcount; @@ -395,6 +397,7 @@ struct kgsl_context { struct kgsl_pwr_constraint pwr_constraint; unsigned int fault_count; unsigned long fault_time; + struct kgsl_mem_entry *user_ctxt_record; }; #define _context_comm(_c) \ @@ -689,6 +692,8 @@ void kgsl_snapshot_save_frozen_objs(struct work_struct *work); void kgsl_events_init(void); void kgsl_events_exit(void); +void kgsl_context_detach(struct kgsl_context *context); + void kgsl_del_event_group(struct kgsl_event_group *group); void kgsl_add_event_group(struct kgsl_event_group *group, -- GitLab From 060f60c4a80fde4bd9dda8dc969c300d79cb13f7 Mon Sep 17 00:00:00 2001 From: Mahesh Sivasubramanian Date: Fri, 2 Jun 2017 16:55:51 -0600 Subject: [PATCH 239/786] drivers: cpuidle: lpm-levels: Remove unused code All QTI chips support firmware configuration of Sleep modes through PSCI. Remove any legacy code not required for newer ARM targets. Change-Id: I414e90be54db8c86492e2d77ef02e226ac7c42e4 Signed-off-by: Mahesh Sivasubramanian --- drivers/cpuidle/lpm-levels-of.c | 118 +++++++++-------------- drivers/cpuidle/lpm-levels.c | 160 ++------------------------------ drivers/cpuidle/lpm-levels.h | 16 ---- include/soc/qcom/pm.h | 12 +-- 4 files changed, 52 insertions(+), 254 deletions(-) diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c index ed239c41797b..2201196c89a9 100644 --- a/drivers/cpuidle/lpm-levels-of.c +++ b/drivers/cpuidle/lpm-levels-of.c @@ -21,7 +21,6 @@ #include #include "lpm-levels.h" -bool use_psci; enum lpm_type { IDLE = 0, SUSPEND, @@ -431,30 +430,27 @@ static int parse_cluster_params(struct device_node *node, return ret; } - if (use_psci) { - key = "qcom,psci-mode-shift"; - ret = of_property_read_u32(node, key, - &c->psci_mode_shift); - if (ret) { - pr_err("%s(): Failed to read param: %s\n", - __func__, key); - return ret; - } + key = "qcom,psci-mode-shift"; + ret = of_property_read_u32(node, key, + &c->psci_mode_shift); + if (ret) { + pr_err("%s(): Failed to read param: %s\n", + __func__, key); + return ret; + } - key = "qcom,psci-mode-mask"; - ret = of_property_read_u32(node, key, - &c->psci_mode_mask); - if (ret) { - pr_err("%s(): Failed to read param: %s\n", - __func__, key); - return ret; - } + key = "qcom,psci-mode-mask"; + ret = of_property_read_u32(node, key, + &c->psci_mode_mask); + if (ret) { + pr_err("%s(): Failed to read param: %s\n", + __func__, key); + return ret; + } - /* Set ndevice to 1 as default */ - c->ndevices = 1; + /* Set ndevice to 1 as default */ + c->ndevices = 1; - } else - pr_warn("Target supports PSCI only\n"); return 0; } @@ -503,22 +499,14 @@ static int parse_cluster_level(struct device_node *node, if (ret) goto failed; - if (use_psci) { - char *k = "qcom,psci-mode"; - - ret = of_property_read_u32(node, k, &level->psci_id); - if (ret) - goto failed; - - level->is_reset = of_property_read_bool(node, "qcom,is-reset"); - } else - pr_warn("Build supports PSCI targets only"); + key = "qcom,psci-mode"; - key = "label"; - ret = of_property_read_string(node, key, &level->level_name); + ret = of_property_read_u32(node, key, &level->psci_id); if (ret) goto failed; + level->is_reset = of_property_read_bool(node, "qcom,is-reset"); + if (cluster->nlevels != cluster->default_level) { key = "min child idx"; ret = of_property_read_u32(node, "qcom,min-child-idx", @@ -531,10 +519,6 @@ static int parse_cluster_level(struct device_node *node, } level->notify_rpm = of_property_read_bool(node, "qcom,notify-rpm"); - level->disable_dynamic_routing = of_property_read_bool(node, - "qcom,disable-dynamic-int-routing"); - level->last_core_only = of_property_read_bool(node, - "qcom,last-core-only"); key = "parse_power_params"; ret = parse_power_params(node, &level->pwr); @@ -569,20 +553,16 @@ static int parse_cpu_mode(struct device_node *n, struct lpm_cpu_level *l) return ret; } - if (use_psci) { - key = "qcom,psci-cpu-mode"; - - ret = of_property_read_u32(n, key, &l->psci_id); - if (ret) { - pr_err("Failed reading %s on device %s\n", key, - n->name); - return ret; - } - key = "qcom,hyp-psci"; + key = "qcom,psci-cpu-mode"; + ret = of_property_read_u32(n, key, &l->psci_id); + if (ret) { + pr_err("Failed reading %s on device %s\n", key, + n->name); + return ret; + } + key = "qcom,hyp-psci"; - l->hyp_psci = of_property_read_bool(n, key); - } else - pr_warn("Build supports PSCI targets only"); + l->hyp_psci = of_property_read_bool(n, key); return 0; } @@ -651,24 +631,22 @@ static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c) return ret; c->cpu->parent = c; - if (use_psci) { - key = "qcom,psci-mode-shift"; + key = "qcom,psci-mode-shift"; - ret = of_property_read_u32(node, key, &c->cpu->psci_mode_shift); - if (ret) { - pr_err("Failed reading %s on device %s\n", key, - node->name); - return ret; - } - key = "qcom,psci-mode-mask"; + ret = of_property_read_u32(node, key, &c->cpu->psci_mode_shift); + if (ret) { + pr_err("Failed reading %s on device %s\n", key, + node->name); + return ret; + } + key = "qcom,psci-mode-mask"; - ret = of_property_read_u32(node, key, &c->cpu->psci_mode_mask); - if (ret) { - pr_err("Failed reading %s on device %s\n", key, - node->name); - return ret; - } + ret = of_property_read_u32(node, key, &c->cpu->psci_mode_mask); + if (ret) { + pr_err("Failed reading %s on device %s\n", key, + node->name); + return ret; } for_each_child_of_node(node, n) { struct lpm_cpu_level *l = &c->cpu->levels[c->cpu->nlevels]; @@ -709,7 +687,7 @@ static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c) c->cpu->levels[i].pwr.residencies[j] = calculate_residency(&c->cpu->levels[i].pwr, - &c->cpu->levels[j].pwr); + &c->cpu->levels[j].pwr); pr_err("%s: idx %d %u\n", __func__, j, c->cpu->levels[i].pwr.residencies[j]); @@ -753,10 +731,8 @@ void free_cluster_node(struct lpm_cluster *cluster) } kfree(cluster->cpu); kfree(cluster->name); - kfree(cluster->lpm_dev); cluster->cpu = NULL; cluster->name = NULL; - cluster->lpm_dev = NULL; cluster->ndevices = 0; } @@ -795,7 +771,6 @@ struct lpm_cluster *parse_cluster(struct device_node *node, continue; key = "qcom,pm-cluster-level"; if (!of_node_cmp(n->name, key)) { - WARN_ON(!use_psci && c->no_saw_devices); if (parse_cluster_level(n, c)) goto failed_parse_cluster; continue; @@ -805,7 +780,6 @@ struct lpm_cluster *parse_cluster(struct device_node *node, if (!of_node_cmp(n->name, key)) { struct lpm_cluster *child; - WARN_ON(!use_psci && c->no_saw_devices); child = parse_cluster(n, c); if (!child) goto failed_parse_cluster; @@ -883,8 +857,6 @@ struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev) { struct device_node *top = NULL; - use_psci = of_property_read_bool(pdev->dev.of_node, "qcom,use-psci"); - top = of_find_node_by_name(pdev->dev.of_node, "qcom,pm-cluster"); if (!top) { pr_err("Failed to find root node\n"); diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c index 8b59beee4b4a..1a01e3fd30c8 100644 --- a/drivers/cpuidle/lpm-levels.c +++ b/drivers/cpuidle/lpm-levels.c @@ -53,10 +53,8 @@ #include #define SCLK_HZ (32768) -#define SCM_HANDOFF_LOCK_ID "S:7" #define PSCI_POWER_STATE(reset) (reset << 30) #define PSCI_AFFINITY_LEVEL(lvl) ((lvl & 0x3) << 24) -static remote_spinlock_t scm_handoff_lock; enum { MSM_LPM_LVL_DBG_SUSPEND_LIMITS = BIT(0), @@ -414,22 +412,6 @@ static void msm_pm_set_timer(uint32_t modified_time_us) hrtimer_start(&lpm_hrtimer, modified_ktime, HRTIMER_MODE_REL_PINNED); } -static int set_device_mode(struct lpm_cluster *cluster, int ndevice, - struct lpm_cluster_level *level) -{ - struct low_power_ops *ops; - - if (use_psci) - return 0; - - ops = &cluster->lpm_dev[ndevice]; - if (ops && ops->set_mode) - return ops->set_mode(ops, level->mode[ndevice], - level->notify_rpm); - else - return -EINVAL; -} - static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev, struct lpm_cpu *cpu, int *idx_restrict, uint32_t *idx_restrict_time) @@ -953,10 +935,6 @@ static int cluster_select(struct lpm_cluster *cluster, bool from_idle, if (!lpm_cluster_mode_allow(cluster, i, from_idle)) continue; - if (level->last_core_only && - cpumask_weight(cpu_online_mask) > 1) - continue; - if (!cpumask_equal(&cluster->num_children_in_sync, &level->num_cpu_votes)) continue; @@ -1001,7 +979,6 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx, bool from_idle, int predicted) { struct lpm_cluster_level *level = &cluster->levels[idx]; - int ret, i; if (!cpumask_equal(&cluster->num_children_in_sync, &cluster->child_cpus) || is_IPI_pending(&cluster->num_children_in_sync)) { @@ -1022,25 +999,12 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx, ktime_to_us(ktime_get())); } - for (i = 0; i < cluster->ndevices; i++) { - ret = set_device_mode(cluster, i, level); - if (ret) - goto failed_set_mode; - } if (level->notify_rpm) { - struct cpumask nextcpu, *cpumask; uint64_t us; uint32_t pred_us; - us = get_cluster_sleep_time(cluster, &nextcpu, - from_idle, &pred_us); - cpumask = level->disable_dynamic_routing ? NULL : &nextcpu; - - if (ret) { - pr_info("Failed msm_rpm_enter_sleep() rc = %d\n", ret); - goto failed_set_mode; - } - + us = get_cluster_sleep_time(cluster, NULL, from_idle, + &pred_us); us = us + 1; clear_predict_history(); clear_cl_predict_history(); @@ -1062,17 +1026,6 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx, } return 0; -failed_set_mode: - - for (i = 0; i < cluster->ndevices; i++) { - int rc = 0; - - level = &cluster->levels[cluster->default_level]; - // rc = set_device_mode(cluster, i, level); - WARN_ON(rc); - } - - return ret; } static void cluster_prepare(struct lpm_cluster *cluster, @@ -1152,7 +1105,7 @@ static void cluster_unprepare(struct lpm_cluster *cluster, { struct lpm_cluster_level *level; bool first_cpu; - int last_level, i, ret; + int last_level, i; if (!cluster) return; @@ -1202,13 +1155,8 @@ static void cluster_unprepare(struct lpm_cluster *cluster, last_level = cluster->last_level; cluster->last_level = cluster->default_level; - for (i = 0; i < cluster->ndevices; i++) { + for (i = 0; i < cluster->ndevices; i++) level = &cluster->levels[cluster->default_level]; - ret = set_device_mode(cluster, i, level); - - WARN_ON(ret); - - } cluster_notify(cluster, &cluster->levels[last_level], false); @@ -1305,8 +1253,8 @@ int get_cluster_id(struct lpm_cluster *cluster, int *aff_lvl) return state_id; } -#if !defined(CONFIG_CPU_V7) -bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle) +static bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, + bool from_idle) { int affinity_level = 0; int state_id = get_cluster_id(cluster, &affinity_level); @@ -1336,41 +1284,6 @@ bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle) success, 0xdeaffeed, true); return success; } -#elif defined(CONFIG_ARM_PSCI) -bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle) -{ - int affinity_level = 0; - int state_id = get_cluster_id(cluster, &affinity_level); - int power_state = - PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset); - bool success = false; - - if (!idx) { - stop_critical_timings(); - wfi(); - start_critical_timings(); - return 1; - } - - affinity_level = PSCI_AFFINITY_LEVEL(affinity_level); - state_id |= (power_state | affinity_level - | cluster->cpu->levels[idx].psci_id); - - update_debug_pc_event(CPU_ENTER, state_id, - 0xdeaffeed, 0xdeaffeed, true); - stop_critical_timings(); - success = !arm_cpuidle_suspend(state_id); - start_critical_timings(); - update_debug_pc_event(CPU_EXIT, state_id, - success, 0xdeaffeed, true); -} -#else -bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, bool from_idle) -{ - WARN_ONCE(true, "PSCI cpu_suspend ops not supported\n"); - return false; -} -#endif static int lpm_cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) @@ -1444,7 +1357,6 @@ static int lpm_cpuidle_enter(struct cpuidle_device *dev, if (need_resched() || (idx < 0)) goto exit; - WARN_ON(!use_psci); success = psci_enter_sleep(cluster, idx, true); exit: @@ -1689,7 +1601,6 @@ static int lpm_suspend_enter(suspend_state_t state) * LPMs(XO and Vmin). */ - WARN_ON(!use_psci); psci_enter_sleep(cluster, idx, true); if (idx > 0) @@ -1737,14 +1648,6 @@ static int lpm_probe(struct platform_device *pdev) hrtimer_init(&histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); cluster_timer_init(lpm_root_node); - ret = remote_spin_lock_init(&scm_handoff_lock, SCM_HANDOFF_LOCK_ID); - if (ret) { - pr_err("%s: Failed initializing scm_handoff_lock (%d)\n", - __func__, ret); - put_online_cpus(); - return ret; - } - size = num_dbg_elements * sizeof(struct lpm_debug); lpm_debug = dma_alloc_coherent(&pdev->dev, size, &lpm_debug_phys, GFP_KERNEL); @@ -1813,54 +1716,3 @@ static int __init lpm_levels_module_init(void) return rc; } late_initcall(lpm_levels_module_init); - -enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu) -{ - struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu); - enum msm_pm_l2_scm_flag retflag = MSM_SCM_L2_ON; - - /* - * No need to acquire the lock if probe isn't completed yet - * In the event of the hotplug happening before lpm probe, we want to - * flush the cache to make sure that L2 is flushed. In particular, this - * could cause incoherencies for a cluster architecture. This wouldn't - * affect the idle case as the idle driver wouldn't be registered - * before the probe function - */ - if (!cluster) - return MSM_SCM_L2_OFF; - - /* - * Assumes L2 only. What/How parameters gets passed into TZ will - * determine how this function reports this info back in msm-pm.c - */ - spin_lock(&cluster->sync_lock); - - if (!cluster->lpm_dev) { - retflag = MSM_SCM_L2_OFF; - goto unlock_and_return; - } - - if (!cpumask_equal(&cluster->num_children_in_sync, - &cluster->child_cpus)) - goto unlock_and_return; - - if (cluster->lpm_dev) - retflag = cluster->lpm_dev->tz_flag; - /* - * The scm_handoff_lock will be release by the secure monitor. - * It is used to serialize power-collapses from this point on, - * so that both Linux and the secure context have a consistent - * view regarding the number of running cpus (cpu_count). - * - * It must be acquired before releasing the cluster lock. - */ -unlock_and_return: - update_debug_pc_event(PRE_PC_CB, retflag, 0xdeadbeef, 0xdeadbeef, - 0xdeadbeef); - trace_pre_pc_cb(retflag); - remote_spin_lock_rlock_id(&scm_handoff_lock, - REMOTE_SPINLOCK_TID_START + cpu); - spin_unlock(&cluster->sync_lock); - return retflag; -} diff --git a/drivers/cpuidle/lpm-levels.h b/drivers/cpuidle/lpm-levels.h index 3d35ae9db3c4..9875edd0ef54 100644 --- a/drivers/cpuidle/lpm-levels.h +++ b/drivers/cpuidle/lpm-levels.h @@ -17,8 +17,6 @@ #define MAXSAMPLES 5 #define CLUST_SMPL_INVLD_TIME 40000 -extern bool use_psci; - struct lpm_lookup_table { uint32_t modes; const char *mode_name; @@ -74,21 +72,13 @@ struct lpm_cluster_level { struct cpumask num_cpu_votes; struct power_params pwr; bool notify_rpm; - bool disable_dynamic_routing; bool sync_level; - bool last_core_only; struct lpm_level_avail available; unsigned int psci_id; bool is_reset; int reset_level; }; -struct low_power_ops { - struct msm_spm_device *spm; - int (*set_mode)(struct low_power_ops *ops, int mode, bool notify_rpm); - enum msm_pm_l2_scm_flag tz_flag; -}; - struct cluster_history { uint32_t resi[MAXSAMPLES]; int mode[MAXSAMPLES]; @@ -108,11 +98,9 @@ struct lpm_cluster { const char *cluster_name; const char **name; unsigned long aff_level; /* Affinity level of the node */ - struct low_power_ops *lpm_dev; int ndevices; struct lpm_cluster_level levels[NR_LPM_LEVELS]; int nlevels; - enum msm_pm_l2_scm_flag l2_flag; int min_child_level; int default_level; int last_level; @@ -125,14 +113,10 @@ struct lpm_cluster { struct lpm_stats *stats; unsigned int psci_mode_shift; unsigned int psci_mode_mask; - bool no_saw_devices; struct cluster_history history; struct hrtimer histtimer; }; -int set_l2_mode(struct low_power_ops *ops, int mode, bool notify_rpm); -int set_system_mode(struct low_power_ops *ops, int mode, bool notify_rpm); -int set_l3_mode(struct low_power_ops *ops, int mode, bool notify_rpm); void lpm_suspend_wake_time(uint64_t wakeup_time); struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev); diff --git a/include/soc/qcom/pm.h b/include/soc/qcom/pm.h index a82ada6abe99..58d011e5477f 100644 --- a/include/soc/qcom/pm.h +++ b/include/soc/qcom/pm.h @@ -1,6 +1,6 @@ /* * Copyright (C) 2007 Google, Inc. - * Copyright (c) 2009-2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2009-2017, The Linux Foundation. All rights reserved. * Author: San Mehat * * This software is licensed under the terms of the GNU General Public @@ -69,16 +69,6 @@ struct latency_level { const char *level_name; }; -/** - * lpm_cpu_pre_pc_cb(): API to get the L2 flag to pass to TZ - * - * @cpu: cpuid of the CPU going down. - * - * Returns the l2 flush flag enum that is passed down to TZ during power - * collaps - */ -enum msm_pm_l2_scm_flag lpm_cpu_pre_pc_cb(unsigned int cpu); - /** * msm_pm_sleep_mode_allow() - API to determine if sleep mode is allowed. * @cpu: CPU on which to check for the sleep mode. -- GitLab From 90cf3c71eaef655cedc8232128674b85b9a9103c Mon Sep 17 00:00:00 2001 From: Shivendra Kakrania Date: Fri, 23 Jun 2017 13:35:11 -0700 Subject: [PATCH 240/786] msm: vidc: Update correct HFI config for QP Updating the HFI config (HFI_PROPERTY_CONFIG_VENC_FRAME_QP) for configuring quantization parameter. CRs-Fixed: 2066228 Change-Id: Ie7c5e4c0fafdf34e510ede1a311fa5f7fed81c1d Signed-off-by: Shivendra Kakrania --- drivers/media/platform/msm/vidc/hfi_packetization.c | 2 +- drivers/media/platform/msm/vidc/vidc_hfi_helper.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c index 8d54e206160a..18b2ccf4577c 100644 --- a/drivers/media/platform/msm/vidc/hfi_packetization.c +++ b/drivers/media/platform/msm/vidc/hfi_packetization.c @@ -1295,7 +1295,7 @@ int create_pkt_cmd_session_set_property( struct hal_quantization *hal_quant = (struct hal_quantization *) pdata; pkt->rg_property_data[0] = - HFI_PROPERTY_CONFIG_VENC_SESSION_QP; + HFI_PROPERTY_CONFIG_VENC_FRAME_QP; hfi = (struct hfi_quantization *) &pkt->rg_property_data[1]; hfi->qp_packed = hal_quant->qpi | hal_quant->qpp << 8 | hal_quant->qpb << 16; diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h index 616fc0960f3a..8169a9b5b7e0 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h @@ -339,7 +339,7 @@ struct hfi_buffer_info { (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00E) #define HFI_PROPERTY_CONFIG_VENC_BASELAYER_PRIORITYID \ (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00F) -#define HFI_PROPERTY_CONFIG_VENC_SESSION_QP \ +#define HFI_PROPERTY_CONFIG_VENC_FRAME_QP \ (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x012) #define HFI_PROPERTY_PARAM_VPE_COMMON_START \ -- GitLab From 30ca8045fe12aedc231bad3d5877edb139075d20 Mon Sep 17 00:00:00 2001 From: Ram Chandrasekar Date: Tue, 13 Jun 2017 16:05:47 -0600 Subject: [PATCH 241/786] drivers: thermal: cpu-cooling: unisolate CPU after hotplug If a CPU that has been isolated is offlined, as per the recommendation the CPU has to be unisolated. The existing CPU cooling device doesn't handle this scenario. Register for CPU hotplug state callback and unisolate the CPU after it is hotplugged. When the CPU is brought back online, veto it from coming online and bring it back online later when the isolation request is cleared. Change-Id: I1d73f3a0610b73b67e94900ec38d59402021289b Signed-off-by: Ram Chandrasekar --- drivers/thermal/cpu_cooling.c | 97 +++++++++++++++++++++++++++++++++-- 1 file changed, 92 insertions(+), 5 deletions(-) diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 355d0136ac2b..cd5bde3a5eff 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -118,6 +118,10 @@ static DEFINE_IDR(cpufreq_idr); static DEFINE_MUTEX(cooling_cpufreq_lock); static unsigned int cpufreq_dev_count; +static int8_t cpuhp_registered; +static struct work_struct cpuhp_register_work; +static struct cpumask cpus_pending_online; +static DEFINE_MUTEX(core_isolate_lock); static DEFINE_MUTEX(cooling_list_lock); static LIST_HEAD(cpufreq_dev_list); @@ -212,6 +216,49 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq) } EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level); +static int cpufreq_hp_offline(unsigned int offline_cpu) +{ + struct cpufreq_cooling_device *cpufreq_dev; + + mutex_lock(&cooling_list_lock); + list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { + if (!cpumask_test_cpu(offline_cpu, &cpufreq_dev->allowed_cpus)) + continue; + + mutex_lock(&core_isolate_lock); + if (cpufreq_dev->cpufreq_state == cpufreq_dev->max_level) + sched_unisolate_cpu_unlocked(offline_cpu); + mutex_unlock(&core_isolate_lock); + break; + } + mutex_unlock(&cooling_list_lock); + + return 0; +} + +static int cpufreq_hp_online(unsigned int online_cpu) +{ + struct cpufreq_cooling_device *cpufreq_dev; + int ret = 0; + + mutex_lock(&cooling_list_lock); + list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { + if (!cpumask_test_cpu(online_cpu, &cpufreq_dev->allowed_cpus)) + continue; + + mutex_lock(&core_isolate_lock); + if (cpufreq_dev->cpufreq_state == cpufreq_dev->max_level) { + cpumask_set_cpu(online_cpu, &cpus_pending_online); + ret = NOTIFY_BAD; + } + mutex_unlock(&core_isolate_lock); + break; + } + mutex_unlock(&cooling_list_lock); + + return ret; +} + /** * cpufreq_thermal_notifier - notifier callback for cpufreq policy change. * @nb: struct notifier_block * with callback info. @@ -611,6 +658,9 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus); unsigned int clip_freq; + unsigned long prev_state; + struct device *cpu_dev; + int ret = 0; /* Request state should be less than max_level */ if (WARN_ON(state > cpufreq_device->max_level)) @@ -620,13 +670,34 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, if (cpufreq_device->cpufreq_state == state) return 0; + mutex_lock(&core_isolate_lock); + prev_state = cpufreq_device->cpufreq_state; cpufreq_device->cpufreq_state = state; /* If state is the last, isolate the CPU */ - if (state == cpufreq_device->max_level) - return sched_isolate_cpu(cpu); - else if (state < cpufreq_device->max_level) - sched_unisolate_cpu(cpu); - + if (state == cpufreq_device->max_level) { + if (cpu_online(cpu)) + sched_isolate_cpu(cpu); + mutex_unlock(&core_isolate_lock); + return ret; + } else if ((prev_state == cpufreq_device->max_level) + && (state < cpufreq_device->max_level)) { + if (cpumask_test_and_clear_cpu(cpu, &cpus_pending_online)) { + cpu_dev = get_cpu_device(cpu); + mutex_unlock(&core_isolate_lock); + /* + * Unlock before calling the device_online. + * Else, this will lead to deadlock, since the hp + * online callback will be blocked on this mutex. + */ + ret = device_online(cpu_dev); + if (ret) + pr_err("CPU:%d online error:%d\n", cpu, ret); + goto update_frequency; + } else + sched_unisolate_cpu(cpu); + } + mutex_unlock(&core_isolate_lock); +update_frequency: clip_freq = cpufreq_device->freq_table[state]; cpufreq_device->clipped_freq = clip_freq; @@ -878,6 +949,16 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table, return max; } +static void register_cdev(struct work_struct *work) +{ + int ret = 0; + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "cpu_cooling/no-sched", cpufreq_hp_online, + cpufreq_hp_offline); + if (ret < 0) + pr_err("Error registering for hotpug callback:%d\n", ret); +} /** * __cpufreq_cooling_register - helper function to create cpufreq cooling device * @np: a valid struct device_node to the cooling device device tree node @@ -1025,6 +1106,12 @@ __cpufreq_cooling_register(struct device_node *np, if (!cpufreq_dev_count++) cpufreq_register_notifier(&thermal_cpufreq_notifier_block, CPUFREQ_POLICY_NOTIFIER); + if (!cpuhp_registered) { + cpuhp_registered = 1; + cpumask_clear(&cpus_pending_online); + INIT_WORK(&cpuhp_register_work, register_cdev); + queue_work(system_wq, &cpuhp_register_work); + } mutex_unlock(&cooling_cpufreq_lock); goto put_policy; -- GitLab From 6d81556c99bcc9b2d20f727856fd5ca952dc41ac Mon Sep 17 00:00:00 2001 From: Chris Lew Date: Tue, 13 Jun 2017 11:50:06 -0700 Subject: [PATCH 242/786] mailbox: msm_qmp: Add multi client functionality Enable data muxing to remote processor for a single mailbox. When multiple channels are opened for a single mailbox, receiving data from the remote proc is disabled. CRs-Fixed: 2052601 Change-Id: I83e86a36a621f4a8e9e2ac8d627842ee5d8a19e1 Signed-off-by: Chris Lew --- .../devicetree/bindings/arm/msm/msm_qmp.txt | 2 + drivers/mailbox/msm_qmp.c | 728 ++++++++++-------- 2 files changed, 428 insertions(+), 302 deletions(-) diff --git a/Documentation/devicetree/bindings/arm/msm/msm_qmp.txt b/Documentation/devicetree/bindings/arm/msm/msm_qmp.txt index 0a5c0b300e1a..5fb3e65cb321 100644 --- a/Documentation/devicetree/bindings/arm/msm/msm_qmp.txt +++ b/Documentation/devicetree/bindings/arm/msm/msm_qmp.txt @@ -13,6 +13,7 @@ Required properties: - qcom,irq-mask : the bitmask to trigger an interrupt. - interrupt : the receiving interrupt line. - mbox-desc-offset : offset of mailbox descriptor from start of the msgram. +- priority : the priority of this mailbox compared to other mailboxes. - #mbox-cells: Common mailbox binding property to identify the number of cells required for the mailbox specifier, should be 1. @@ -33,6 +34,7 @@ Example: qcom,irq-mask = <0x1>; interrupt = <0 389 1>; mbox-desc-offset = <0x100>; + priority = <1>; mbox-offset = <0x500>; mbox-size = <0x400>; #mbox-cells = <1>; diff --git a/drivers/mailbox/msm_qmp.c b/drivers/mailbox/msm_qmp.c index dd022d3a0c1d..f0bb0bcfa0d7 100644 --- a/drivers/mailbox/msm_qmp.c +++ b/drivers/mailbox/msm_qmp.c @@ -26,7 +26,6 @@ #define QMP_MAGIC 0x4d41494c /* MAIL */ #define QMP_VERSION 0x1 #define QMP_FEATURES 0x0 -#define QMP_NUM_CHANS 0x1 #define QMP_TOUT_MS 5000 #define QMP_TX_TOUT_MS 2000 @@ -107,63 +106,89 @@ struct qmp_core_version { }; /** - * struct qmp_device - local information for managing a single mailbox - * @dev: The device that corresponds to this mailbox - * @mbox: The mbox controller for this mailbox - * @name: The name of this mailbox + * struct qmp_mbox - local information for managing a single mailbox + * @list: List head for adding mbox to linked list + * @ctrl: Controller for this mailbox + * @priority: Priority of mailbox in the linked list + * @num_assigned: Number of channels assigned for allocated pool + * @num_shutdown: Number of channels that have shutdown + * @desc: Reference to the mailbox descriptor in SMEM + * @rx_disabled: Disable rx if multiple client are sending from this mbox + * @tx_sent: True if tx is sent and remote proc has not sent ack + * @idx_in_flight: current channel idx whos tx is in flight + * @mcore_mbox_offset: Offset of mcore mbox from the msgram start + * @mcore_mbox_size: Size of the mcore mbox + * @rx_pkt: buffer to pass to client, holds copied data from mailbox + * @version: Version and features received during link negotiation * @local_state: Current state of the mailbox protocol + * @state_lock: Serialize mailbox state changes + * @tx_lock: Serialize access for writes to mailbox * @link_complete: Use to block until link negotiation with remote proc - * is complete * @ch_complete: Use to block until the channel is fully opened - * @tx_sent: True if tx is sent and remote proc has not sent ack * @ch_in_use: True if this mailbox's channel owned by a client - * @rx_buf: buffer to pass to client, holds copied data from mailbox - * @version: Version and features received during link negotiation - * @mcore_mbox_offset: Offset of mcore mbox from the msgram start - * @mcore_mbox_size: Size of the mcore mbox - * @desc: Reference to the mailbox descriptor in SMEM + * @dwork: Delayed work to detect timed out tx + */ +struct qmp_mbox { + struct list_head list; + struct mbox_controller ctrl; + int priority; + u32 num_assigned; + u32 num_shutdown; + + void __iomem *desc; + bool rx_disabled; + bool tx_sent; + u32 idx_in_flight; + u32 mcore_mbox_offset; + u32 mcore_mbox_size; + struct qmp_pkt rx_pkt; + + struct qmp_core_version version; + enum qmp_local_state local_state; + struct mutex state_lock; + spinlock_t tx_lock; + + struct completion link_complete; + struct completion ch_complete; + struct delayed_work dwork; + struct qmp_device *mdev; +}; + +/** + * struct qmp_device - local information for managing a single qmp edge + * @dev: The device that corresponds to this edge + * @name: The name of this mailbox + * @mboxes: The mbox controller for this mailbox * @msgram: Reference to the start of msgram - * @irq_mask: Mask written to @tx_irq_reg to trigger irq * @tx_irq_reg: Reference to the register to send an irq to remote proc * @rx_reset_reg: Reference to the register to reset the rx irq, if * applicable + * @kwork: kwork for rx handling + * @kworker: Handle to entitiy to process incoming data + * @task: Handle to task context used to run @kworker + * @irq_mask: Mask written to @tx_irq_reg to trigger irq * @rx_irq_line: The incoming interrupt line + * @rx_work: Work to be executed when an irq is received * @tx_irq_count: Number of tx interrupts triggered * @rx_irq_count: Number of rx interrupts received - * @kwork: Work to be executed when an irq is received - * @kworker: Handle to entitiy to process incoming data - * @task: Handle to task context used to run @kworker - * @state_lock: Serialize mailbox state changes - * @dwork: Delayed work to detect timed out tx - * @tx_lock: Serialize access for writes to mailbox */ struct qmp_device { struct device *dev; - struct mbox_controller *mbox; const char *name; - enum qmp_local_state local_state; - struct completion link_complete; - struct completion ch_complete; - bool tx_sent; - bool ch_in_use; - struct qmp_pkt rx_pkt; - struct qmp_core_version version; - u32 mcore_mbox_offset; - u32 mcore_mbox_size; - void __iomem *desc; + struct list_head mboxes; + void __iomem *msgram; - u32 irq_mask; void __iomem *tx_irq_reg; void __iomem *rx_reset_reg; - u32 rx_irq_line; - u32 tx_irq_count; - u32 rx_irq_count; + struct kthread_work kwork; struct kthread_worker kworker; struct task_struct *task; - struct mutex state_lock; - struct delayed_work dwork; - spinlock_t tx_lock; + + u32 irq_mask; + u32 rx_irq_line; + u32 tx_irq_count; + u32 rx_irq_count; }; /** @@ -181,25 +206,7 @@ static void send_irq(struct qmp_device *mdev) mdev->tx_irq_count++; } -/** - * qmp_irq_handler() - handle irq from remote entitity. - * @irq: irq number for the trggered interrupt. - * @priv: private pointer to qmp mbox device. - */ -irqreturn_t qmp_irq_handler(int irq, void *priv) -{ - struct qmp_device *mdev = (struct qmp_device *)priv; - - if (mdev->rx_reset_reg) - writel_relaxed(mdev->irq_mask, mdev->rx_reset_reg); - - kthread_queue_work(&mdev->kworker, &mdev->kwork); - mdev->rx_irq_count++; - - return IRQ_HANDLED; -} - -static void memcpy32_toio(void *dest, void *src, size_t size) +static void memcpy32_toio(void __iomem *dest, void *src, size_t size) { u32 *dest_local = (u32 *)dest; u32 *src_local = (u32 *)src; @@ -210,7 +217,7 @@ static void memcpy32_toio(void *dest, void *src, size_t size) iowrite32(*src_local++, dest_local++); } -static void memcpy32_fromio(void *dest, void *src, size_t size) +static void memcpy32_fromio(void *dest, void __iomem *src, size_t size) { u32 *dest_local = (u32 *)dest; u32 *src_local = (u32 *)src; @@ -221,61 +228,74 @@ static void memcpy32_fromio(void *dest, void *src, size_t size) *dest_local++ = ioread32(src_local++); } +/** + * qmp_notify_timeout() - Notify client of tx timeout with -EIO + * @work: Structure for work that was scheduled. + */ +static void qmp_notify_timeout(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct qmp_mbox *mbox = container_of(dwork, struct qmp_mbox, dwork); + struct mbox_chan *chan = &mbox->ctrl.chans[mbox->idx_in_flight]; + int err = -EIO; + unsigned long flags; + + spin_lock_irqsave(&mbox->tx_lock, flags); + if (!mbox->tx_sent) { + spin_unlock_irqrestore(&mbox->tx_lock, flags); + return; + } + pr_err("%s: qmp tx timeout for %d\n", __func__, mbox->idx_in_flight); + mbox->tx_sent = false; + spin_unlock_irqrestore(&mbox->tx_lock, flags); + mbox_chan_txdone(chan, err); +} + +static inline void qmp_schedule_tx_timeout(struct qmp_mbox *mbox) +{ + schedule_delayed_work(&mbox->dwork, msecs_to_jiffies(QMP_TX_TOUT_MS)); +} + /** * set_ucore_link_ack() - set the link ack in the ucore channel desc. - * @mdev: the mailbox for the field that is being set. + * @mbox: the mailbox for the field that is being set. * @state: the value to set the ack field to. */ -static void set_ucore_link_ack(struct qmp_device *mdev, u32 state) +static void set_ucore_link_ack(struct qmp_mbox *mbox, u32 state) { u32 offset; offset = offsetof(struct mbox_desc, ucore); offset += offsetof(struct channel_desc, link_state_ack); - iowrite32(state, mdev->desc + offset); + iowrite32(state, mbox->desc + offset); } /** * set_ucore_ch_ack() - set the channel ack in the ucore channel desc. - * @mdev: the mailbox for the field that is being set. + * @mbox: the mailbox for the field that is being set. * @state: the value to set the ack field to. */ -static void set_ucore_ch_ack(struct qmp_device *mdev, u32 state) +static void set_ucore_ch_ack(struct qmp_mbox *mbox, u32 state) { u32 offset; offset = offsetof(struct mbox_desc, ucore); offset += offsetof(struct channel_desc, ch_state_ack); - iowrite32(state, mdev->desc + offset); + iowrite32(state, mbox->desc + offset); } /** * set_mcore_ch() - set the channel state in the mcore channel desc. - * @mdev: the mailbox for the field that is being set. + * @mbox: the mailbox for the field that is being set. * @state: the value to set the channel field to. */ -static void set_mcore_ch(struct qmp_device *mdev, u32 state) +static void set_mcore_ch(struct qmp_mbox *mbox, u32 state) { u32 offset; offset = offsetof(struct mbox_desc, mcore); offset += offsetof(struct channel_desc, ch_state); - iowrite32(state, mdev->desc + offset); -} - -/** - * qmp_notify_timeout() - Notify client of tx timeout with -EIO - * @work: Structure for work that was scheduled. - */ -static void qmp_notify_timeout(struct work_struct *work) -{ - struct delayed_work *dwork = to_delayed_work(work); - struct qmp_device *mdev = container_of(dwork, struct qmp_device, dwork); - struct mbox_chan *chan = &mdev->mbox->chans[0]; - int err = -EIO; - - pr_err("%s: qmp tx timeout for %s\n", __func__, mdev->name); - mbox_chan_txdone(chan, err); + iowrite32(state, mbox->desc + offset); } /** @@ -288,35 +308,27 @@ static void qmp_notify_timeout(struct work_struct *work) */ static int qmp_startup(struct mbox_chan *chan) { - struct qmp_device *mdev = chan->con_priv; + struct qmp_mbox *mbox = chan->con_priv; - if (!mdev) + if (!mbox) return -EINVAL; - mutex_lock(&mdev->state_lock); - if (mdev->local_state == CHANNEL_CONNECTED) { - mutex_unlock(&mdev->state_lock); - return -EINVAL; - } - if (!completion_done(&mdev->link_complete)) { - mutex_unlock(&mdev->state_lock); + mutex_lock(&mbox->state_lock); + if (!completion_done(&mbox->link_complete)) { + mutex_unlock(&mbox->state_lock); return -EAGAIN; } - set_mcore_ch(mdev, QMP_MBOX_CH_CONNECTED); - mdev->local_state = LOCAL_CONNECTING; - mutex_unlock(&mdev->state_lock); + set_mcore_ch(mbox, QMP_MBOX_CH_CONNECTED); + mbox->local_state = LOCAL_CONNECTING; + mutex_unlock(&mbox->state_lock); - send_irq(mdev); - wait_for_completion_interruptible_timeout(&mdev->ch_complete, + send_irq(mbox->mdev); + wait_for_completion_interruptible_timeout(&mbox->ch_complete, msecs_to_jiffies(QMP_TOUT_MS)); return 0; } -static inline void qmp_schedule_tx_timeout(struct qmp_device *mdev) -{ - schedule_delayed_work(&mdev->dwork, msecs_to_jiffies(QMP_TX_TOUT_MS)); -} /** * qmp_send_data() - Copy the data to the channel's mailbox and notify @@ -331,31 +343,39 @@ static inline void qmp_schedule_tx_timeout(struct qmp_device *mdev) */ static int qmp_send_data(struct mbox_chan *chan, void *data) { - struct qmp_device *mdev = chan->con_priv; + struct qmp_mbox *mbox = chan->con_priv; + struct qmp_device *mdev; struct qmp_pkt *pkt = (struct qmp_pkt *)data; void __iomem *addr; unsigned long flags; + int i; - if (!mdev || !data || mdev->local_state != CHANNEL_CONNECTED) + if (!mbox || !data || mbox->local_state != CHANNEL_CONNECTED) return -EINVAL; + mdev = mbox->mdev; - spin_lock_irqsave(&mdev->tx_lock, flags); - addr = mdev->msgram + mdev->mcore_mbox_offset; - if (ioread32(addr)) { - spin_unlock_irqrestore(&mdev->tx_lock, flags); + spin_lock_irqsave(&mbox->tx_lock, flags); + addr = mdev->msgram + mbox->mcore_mbox_offset; + if (mbox->tx_sent) { + spin_unlock_irqrestore(&mbox->tx_lock, flags); return -EBUSY; } - if (pkt->size + sizeof(pkt->size) > mdev->mcore_mbox_size) { - spin_unlock_irqrestore(&mdev->tx_lock, flags); + if (pkt->size + sizeof(pkt->size) > mbox->mcore_mbox_size) { + spin_unlock_irqrestore(&mbox->tx_lock, flags); return -EINVAL; } + memcpy32_toio(addr + sizeof(pkt->size), pkt->data, pkt->size); iowrite32(pkt->size, addr); - mdev->tx_sent = true; + mbox->tx_sent = true; + for (i = 0; i < mbox->ctrl.num_chans; i++) { + if (chan == &mbox->ctrl.chans[i]) + mbox->idx_in_flight = i; + } send_irq(mdev); - qmp_schedule_tx_timeout(mdev); - spin_unlock_irqrestore(&mdev->tx_lock, flags); + qmp_schedule_tx_timeout(mbox); + spin_unlock_irqrestore(&mbox->tx_lock, flags); return 0; } @@ -367,16 +387,23 @@ static int qmp_send_data(struct mbox_chan *chan, void *data) */ static void qmp_shutdown(struct mbox_chan *chan) { - struct qmp_device *mdev = chan->con_priv; + struct qmp_mbox *mbox = chan->con_priv; - mutex_lock(&mdev->state_lock); - if (mdev->local_state != LINK_DISCONNECTED) { - mdev->local_state = LOCAL_DISCONNECTING; - set_mcore_ch(mdev, QMP_MBOX_CH_DISCONNECTED); - send_irq(mdev); + mutex_lock(&mbox->state_lock); + mbox->num_shutdown++; + if (mbox->num_shutdown < mbox->num_assigned) { + mutex_unlock(&mbox->state_lock); + return; + } + + if (mbox->local_state != LINK_DISCONNECTED) { + mbox->local_state = LOCAL_DISCONNECTING; + set_mcore_ch(mbox, QMP_MBOX_CH_DISCONNECTED); + send_irq(mbox->mdev); } - mdev->ch_in_use = false; - mutex_unlock(&mdev->state_lock); + mbox->num_shutdown = 0; + mbox->num_assigned = 0; + mutex_unlock(&mbox->state_lock); } /** @@ -396,33 +423,34 @@ static bool qmp_last_tx_done(struct mbox_chan *chan) /** * qmp_recv_data() - received notification that data is available in the * mailbox. Copy data from mailbox and pass to client. - * @mdev: mailbox device that received the notification. + * @mbox: mailbox device that received the notification. * @mbox_of: offset of mailbox from msgram start. */ -static void qmp_recv_data(struct qmp_device *mdev, u32 mbox_of) +static void qmp_recv_data(struct qmp_mbox *mbox, u32 mbox_of) { void __iomem *addr; struct qmp_pkt *pkt; - addr = mdev->msgram + mbox_of; - pkt = &mdev->rx_pkt; + addr = mbox->mdev->msgram + mbox_of; + pkt = &mbox->rx_pkt; pkt->size = ioread32(addr); - if (pkt->size > mdev->mcore_mbox_size) + if (pkt->size > mbox->mcore_mbox_size) pr_err("%s: Invalid mailbox packet\n", __func__); else { memcpy32_fromio(pkt->data, addr + sizeof(pkt->size), pkt->size); - mbox_chan_received_data(&mdev->mbox->chans[0], &pkt); + mbox_chan_received_data(&mbox->ctrl.chans[mbox->idx_in_flight], + pkt); } iowrite32(0, addr); - send_irq(mdev); + send_irq(mbox->mdev); } /** * init_mcore_state() - initialize the mcore state of a mailbox. * @mdev: mailbox device to be initialized. */ -static void init_mcore_state(struct qmp_device *mdev) +static void init_mcore_state(struct qmp_mbox *mbox) { struct channel_desc mcore; u32 offset = offsetof(struct mbox_desc, mcore); @@ -431,40 +459,60 @@ static void init_mcore_state(struct qmp_device *mdev) mcore.link_state_ack = QMP_MBOX_LINK_DOWN; mcore.ch_state = QMP_MBOX_CH_DISCONNECTED; mcore.ch_state_ack = QMP_MBOX_CH_DISCONNECTED; - mcore.mailbox_size = mdev->mcore_mbox_size; - mcore.mailbox_offset = mdev->mcore_mbox_offset; - memcpy32_toio(mdev->desc + offset, &mcore, sizeof(mcore)); + mcore.mailbox_size = mbox->mcore_mbox_size; + mcore.mailbox_offset = mbox->mcore_mbox_offset; + memcpy32_toio(mbox->desc + offset, &mcore, sizeof(mcore)); +} + +/** + * qmp_irq_handler() - handle irq from remote entitity. + * @irq: irq number for the trggered interrupt. + * @priv: private pointer to qmp mbox device. + */ +static irqreturn_t qmp_irq_handler(int irq, void *priv) +{ + struct qmp_device *mdev = (struct qmp_device *)priv; + + if (mdev->rx_reset_reg) + writel_relaxed(mdev->irq_mask, mdev->rx_reset_reg); + + kthread_queue_work(&mdev->kworker, &mdev->kwork); + mdev->rx_irq_count++; + + return IRQ_HANDLED; } /** * __qmp_rx_worker() - Handle incoming messages from remote processor. - * @mdev: mailbox device that received notification. + * @mbox: mailbox device that received notification. */ -static void __qmp_rx_worker(struct qmp_device *mdev) +static void __qmp_rx_worker(struct qmp_mbox *mbox) { - u32 msg_len; + u32 msg_len, idx; struct mbox_desc desc; + struct qmp_device *mdev = mbox->mdev; + unsigned long flags; - memcpy_fromio(&desc, mdev->desc, sizeof(desc)); + memcpy_fromio(&desc, mbox->desc, sizeof(desc)); if (desc.magic != QMP_MAGIC) return; - mutex_lock(&mdev->state_lock); - switch (mdev->local_state) { + mutex_lock(&mbox->state_lock); + switch (mbox->local_state) { case LINK_DISCONNECTED: - mdev->version.version = desc.version; - mdev->version.features = desc.features; - set_ucore_link_ack(mdev, desc.ucore.link_state); + mbox->version.version = desc.version; + mbox->version.features = desc.features; + set_ucore_link_ack(mbox, desc.ucore.link_state); if (desc.mcore.mailbox_size) { - mdev->mcore_mbox_size = desc.mcore.mailbox_size; - mdev->mcore_mbox_offset = desc.mcore.mailbox_offset; + mbox->mcore_mbox_size = desc.mcore.mailbox_size; + mbox->mcore_mbox_offset = desc.mcore.mailbox_offset; } - init_mcore_state(mdev); - mdev->local_state = LINK_NEGOTIATION; - mdev->rx_pkt.data = devm_kzalloc(mdev->dev, + init_mcore_state(mbox); + mbox->local_state = LINK_NEGOTIATION; + mbox->rx_pkt.data = devm_kzalloc(mdev->dev, desc.ucore.mailbox_size, GFP_KERNEL); - if (!mdev->rx_pkt.data) { + if (!mbox->rx_pkt.data) { pr_err("In %s: failed to allocate rx pkt\n", __func__); break; } @@ -477,8 +525,8 @@ static void __qmp_rx_worker(struct qmp_device *mdev) __func__); break; } - mdev->local_state = LINK_CONNECTED; - complete_all(&mdev->link_complete); + mbox->local_state = LINK_CONNECTED; + complete_all(&mbox->link_complete); break; case LINK_CONNECTED: if (desc.ucore.ch_state == desc.ucore.ch_state_ack) { @@ -486,23 +534,23 @@ static void __qmp_rx_worker(struct qmp_device *mdev) __func__); break; } - set_ucore_ch_ack(mdev, desc.ucore.ch_state); + set_ucore_ch_ack(mbox, desc.ucore.ch_state); send_irq(mdev); break; case LOCAL_CONNECTING: if (desc.mcore.ch_state_ack == QMP_MBOX_CH_CONNECTED && desc.mcore.ch_state == QMP_MBOX_CH_CONNECTED) - mdev->local_state = LOCAL_CONNECTED; + mbox->local_state = LOCAL_CONNECTED; if (desc.ucore.ch_state != desc.ucore.ch_state_ack) { - set_ucore_ch_ack(mdev, desc.ucore.ch_state); + set_ucore_ch_ack(mbox, desc.ucore.ch_state); send_irq(mdev); } - if (mdev->local_state == LOCAL_CONNECTED && + if (mbox->local_state == LOCAL_CONNECTED && desc.mcore.ch_state == QMP_MBOX_CH_CONNECTED && desc.ucore.ch_state == QMP_MBOX_CH_CONNECTED) { - mdev->local_state = CHANNEL_CONNECTED; - complete_all(&mdev->ch_complete); + mbox->local_state = CHANNEL_CONNECTED; + complete_all(&mbox->ch_complete); } break; case LOCAL_CONNECTED: @@ -511,50 +559,58 @@ static void __qmp_rx_worker(struct qmp_device *mdev) __func__); break; } - set_ucore_ch_ack(mdev, desc.ucore.ch_state); - mdev->local_state = CHANNEL_CONNECTED; + set_ucore_ch_ack(mbox, desc.ucore.ch_state); + mbox->local_state = CHANNEL_CONNECTED; send_irq(mdev); - complete_all(&mdev->ch_complete); + complete_all(&mbox->ch_complete); break; case CHANNEL_CONNECTED: if (desc.ucore.ch_state == QMP_MBOX_CH_DISCONNECTED) { - set_ucore_ch_ack(mdev, desc.ucore.ch_state); - mdev->local_state = LOCAL_CONNECTED; + set_ucore_ch_ack(mbox, desc.ucore.ch_state); + mbox->local_state = LOCAL_CONNECTED; send_irq(mdev); } msg_len = ioread32(mdev->msgram + desc.ucore.mailbox_offset); - if (msg_len) - qmp_recv_data(mdev, desc.ucore.mailbox_offset); + if (msg_len && !mbox->rx_disabled) + qmp_recv_data(mbox, desc.ucore.mailbox_offset); - if (mdev->tx_sent) { + spin_lock_irqsave(&mbox->tx_lock, flags); + idx = mbox->idx_in_flight; + if (mbox->tx_sent) { msg_len = ioread32(mdev->msgram + - mdev->mcore_mbox_offset); + mbox->mcore_mbox_offset); if (msg_len == 0) { - mdev->tx_sent = false; - cancel_delayed_work(&mdev->dwork); - mbox_chan_txdone(&mdev->mbox->chans[0], 0); + mbox->tx_sent = false; + cancel_delayed_work(&mbox->dwork); + spin_unlock_irqrestore(&mbox->tx_lock, flags); + mbox_chan_txdone(&mbox->ctrl.chans[idx], 0); + spin_lock_irqsave(&mbox->tx_lock, flags); } } + spin_unlock_irqrestore(&mbox->tx_lock, flags); break; case LOCAL_DISCONNECTING: if (desc.mcore.ch_state_ack == QMP_MBOX_CH_DISCONNECTED && desc.mcore.ch_state == desc.mcore.ch_state_ack) - mdev->local_state = LINK_CONNECTED; - reinit_completion(&mdev->ch_complete); + mbox->local_state = LINK_CONNECTED; + reinit_completion(&mbox->ch_complete); break; default: pr_err("In %s: Local Channel State corrupted\n", __func__); } - mutex_unlock(&mdev->state_lock); + mutex_unlock(&mbox->state_lock); } static void rx_worker(struct kthread_work *work) { struct qmp_device *mdev; + struct qmp_mbox *mbox; mdev = container_of(work, struct qmp_device, kwork); - __qmp_rx_worker(mdev); + list_for_each_entry(mbox, &mdev->mboxes, list) { + __qmp_rx_worker(mbox); + } } /** @@ -566,48 +622,207 @@ static void rx_worker(struct kthread_work *work) static struct mbox_chan *qmp_mbox_of_xlate(struct mbox_controller *mbox, const struct of_phandle_args *spec) { - struct qmp_device *mdev = dev_get_drvdata(mbox->dev); - unsigned int channel = spec->args[0]; + struct qmp_mbox *dev = container_of(mbox, struct qmp_mbox, ctrl); + struct mbox_chan *chan; - if (!mdev || channel >= mbox->num_chans) - return ERR_PTR(-EINVAL); + if (dev->num_assigned >= mbox->num_chans || !dev->ctrl.chans) { + pr_err("%s: QMP out of channels\n", __func__); + return ERR_PTR(-ENOMEM); + } - mutex_lock(&mdev->state_lock); - if (mdev->ch_in_use) { - pr_err("%s, mbox channel already in use %s\n", __func__, - mdev->name); - mutex_unlock(&mdev->state_lock); - return ERR_PTR(-EBUSY); + mutex_lock(&dev->state_lock); + chan = &dev->ctrl.chans[dev->num_assigned++]; + mutex_unlock(&dev->state_lock); + + return chan; +} + +/** + * cleanup_workqueue() - Flush all work and stop the thread for this mailbox. + * @mdev: mailbox device to cleanup. + */ +static void cleanup_workqueue(struct qmp_device *mdev) +{ + kthread_flush_worker(&mdev->kworker); + kthread_stop(mdev->task); + mdev->task = NULL; +} + +static int qmp_mbox_remove(struct platform_device *pdev) +{ + struct qmp_device *mdev = platform_get_drvdata(pdev); + struct qmp_mbox *mbox = NULL; + + disable_irq(mdev->rx_irq_line); + cleanup_workqueue(mdev); + + list_for_each_entry(mbox, &mdev->mboxes, list) { + mbox_controller_unregister(&mbox->ctrl); } - mdev->ch_in_use = true; - mutex_unlock(&mdev->state_lock); - return &mbox->chans[0]; + return 0; } /** - * parse_devicetree() - Parse the device tree information for QMP, map io + * get_mbox_num_chans() - Find how many mbox channels need to be allocated + * + * @node: device node for this mailbox. + * + * Return: the number of phandles referring to this device node + */ +static u32 get_mbox_num_chans(struct device_node *node) +{ + int i, j, ret; + u32 num_chans = 0; + struct device_node *np; + struct of_phandle_args p; + + for_each_node_with_property(np, "mboxes") { + if (!of_device_is_available(np)) + continue; + i = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); + for (j = 0; j < i; j++) { + ret = of_parse_phandle_with_args(np, "mboxes", + "#mbox-cells", j, &p); + if (!ret && p.np == node) { + num_chans++; + break; + } + } + } + if (num_chans) + return num_chans; + + return 1; +} + +/** + * mdev_add_mbox() - Add a mailbox to qmp device based on priority + * + * @mdev: qmp device to add mailbox to. + * @new: new mailbox to add to qmp device. + */ +static void mdev_add_mbox(struct qmp_device *mdev, struct qmp_mbox *new) +{ + struct qmp_mbox *mbox; + + list_for_each_entry(mbox, &mdev->mboxes, list) { + if (mbox->priority > new->priority) + continue; + list_add_tail(&new->list, &mbox->list); + return; + } + list_add_tail(&new->list, &mdev->mboxes); +} + +static struct mbox_chan_ops qmp_mbox_ops = { + .startup = qmp_startup, + .shutdown = qmp_shutdown, + .send_data = qmp_send_data, + .last_tx_done = qmp_last_tx_done, +}; + +static const struct of_device_id qmp_mbox_match_table[] = { + { .compatible = "qcom,qmp-mbox" }, + {}, +}; + +/** + * qmp_mbox_init() - Parse the device tree for qmp mailbox and init structure + * + * @n: child device node representing a mailbox. + * @mbox: device structure for this edge. + * + * Return: 0 on succes or standard Linux error code. + */ +static int qmp_mbox_init(struct device_node *n, struct qmp_device *mdev) +{ + int rc, i; + char *key; + struct qmp_mbox *mbox; + struct mbox_chan *chans; + u32 mbox_of, mbox_size, desc_of, priority, num_chans; + + key = "mbox-desc-offset"; + rc = of_property_read_u32(n, key, &desc_of); + if (rc) { + pr_err("%s: missing key %s\n", __func__, key); + return 0; + } + key = "priority"; + rc = of_property_read_u32(n, key, &priority); + if (rc) { + pr_err("%s: missing key %s\n", __func__, key); + return 0; + } + mbox = devm_kzalloc(mdev->dev, sizeof(*mbox), GFP_KERNEL); + if (!mbox) + return -ENOMEM; + + rc = of_property_read_u32(n, "mbox-offset", &mbox_of); + if (!rc) + mbox->mcore_mbox_offset = mbox_of; + rc = of_property_read_u32(n, "mbox-size", &mbox_size); + if (!rc) + mbox->mcore_mbox_size = mbox_size; + + mbox->mdev = mdev; + mbox->priority = priority; + mbox->desc = mdev->msgram + desc_of; + num_chans = get_mbox_num_chans(n); + mbox->rx_disabled = (num_chans > 1) ? true : false; + chans = devm_kzalloc(mdev->dev, sizeof(*chans) * num_chans, GFP_KERNEL); + if (!chans) + return -ENOMEM; + + for (i = 0; i < num_chans; i++) + chans[i].con_priv = mbox; + + mbox->ctrl.dev = mdev->dev; + mbox->ctrl.ops = &qmp_mbox_ops; + mbox->ctrl.chans = chans; + mbox->ctrl.num_chans = num_chans; + mbox->ctrl.txdone_irq = true; + mbox->ctrl.txdone_poll = false; + mbox->ctrl.of_xlate = qmp_mbox_of_xlate; + + rc = mbox_controller_register(&mbox->ctrl); + if (rc) { + pr_err("%s: failed to register mbox controller %d\n", __func__, + rc); + return rc; + } + spin_lock_init(&mbox->tx_lock); + mutex_init(&mbox->state_lock); + mbox->local_state = LINK_DISCONNECTED; + init_completion(&mbox->link_complete); + init_completion(&mbox->ch_complete); + mbox->tx_sent = false; + mbox->num_assigned = 0; + INIT_DELAYED_WORK(&mbox->dwork, qmp_notify_timeout); + + mdev_add_mbox(mdev, mbox); + return 0; +} + + +/** + * qmp_edge_init() - Parse the device tree information for QMP, map io * memory and register for needed interrupts * @pdev: platform device for this driver. - * @mdev: mailbox device to hold the device tree configuration. * * Return: 0 on succes or standard Linux error code. */ -static int qmp_parse_devicetree(struct platform_device *pdev, - struct qmp_device *mdev) +static int qmp_edge_init(struct platform_device *pdev) { + struct qmp_device *mdev = platform_get_drvdata(pdev); struct device_node *node = pdev->dev.of_node; + struct resource *msgram_r, *tx_irq_reg_r; char *key; int rc; - const char *subsys_name; - u32 rx_irq_line, tx_irq_mask; - u32 desc_of = 0; - u32 mbox_of = 0; - u32 mbox_size = 0; - struct resource *msgram_r, *tx_irq_reg_r; key = "label"; - subsys_name = of_get_property(node, key, NULL); - if (!subsys_name) { + mdev->name = of_get_property(node, key, NULL); + if (!mdev->name) { pr_err("%s: missing key %s\n", __func__, key); return -ENODEV; } @@ -627,143 +842,60 @@ static int qmp_parse_devicetree(struct platform_device *pdev, } key = "qcom,irq-mask"; - rc = of_property_read_u32(node, key, &tx_irq_mask); + rc = of_property_read_u32(node, key, &mdev->irq_mask); if (rc) { pr_err("%s: missing key %s\n", __func__, key); return -ENODEV; } key = "interrupts"; - rx_irq_line = irq_of_parse_and_map(node, 0); - if (!rx_irq_line) { + mdev->rx_irq_line = irq_of_parse_and_map(node, 0); + if (!mdev->rx_irq_line) { pr_err("%s: missing key %s\n", __func__, key); return -ENODEV; } - key = "mbox-desc-offset"; - rc = of_property_read_u32(node, key, &desc_of); - if (rc) { - pr_err("%s: missing key %s\n", __func__, key); - return -ENODEV; - } - - key = "mbox-offset"; - rc = of_property_read_u32(node, key, &mbox_of); - if (!rc) - mdev->mcore_mbox_offset = mbox_of; - - key = "mbox-size"; - rc = of_property_read_u32(node, key, &mbox_size); - if (!rc) - mdev->mcore_mbox_size = mbox_size; - - mdev->name = subsys_name; - mdev->msgram = devm_ioremap_nocache(&pdev->dev, msgram_r->start, - resource_size(msgram_r)); - if (!mdev->msgram) - return -ENOMEM; - - mdev->desc = mdev->msgram + desc_of; - if (!mdev->desc) - return -ENOMEM; - - mdev->irq_mask = tx_irq_mask; + mdev->dev = &pdev->dev; mdev->tx_irq_reg = devm_ioremap_nocache(&pdev->dev, tx_irq_reg_r->start, resource_size(tx_irq_reg_r)); - if (!mdev->tx_irq_reg) - return -ENOMEM; + mdev->msgram = devm_ioremap_nocache(&pdev->dev, msgram_r->start, + resource_size(msgram_r)); + if (!mdev->msgram || !mdev->tx_irq_reg) + return -EIO; - mdev->rx_irq_line = rx_irq_line; + INIT_LIST_HEAD(&mdev->mboxes); return 0; } -/** - * cleanup_workqueue() - Flush all work and stop the thread for this mailbox. - * @mdev: mailbox device to cleanup. - */ -static void cleanup_workqueue(struct qmp_device *mdev) -{ - kthread_flush_worker(&mdev->kworker); - kthread_stop(mdev->task); - mdev->task = NULL; -} - -static struct mbox_chan_ops qmp_mbox_ops = { - .startup = qmp_startup, - .shutdown = qmp_shutdown, - .send_data = qmp_send_data, - .last_tx_done = qmp_last_tx_done, -}; - -static const struct of_device_id qmp_mbox_match_table[] = { - { .compatible = "qcom,qmp-mbox" }, - {}, -}; - static int qmp_mbox_probe(struct platform_device *pdev) { - struct device_node *node = pdev->dev.of_node; - struct mbox_controller *mbox; + struct device_node *edge_node = pdev->dev.of_node; struct qmp_device *mdev; - struct mbox_chan *chans; int ret = 0; mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL); if (!mdev) return -ENOMEM; - platform_set_drvdata(pdev, mdev); - ret = qmp_parse_devicetree(pdev, mdev); + platform_set_drvdata(pdev, mdev); + ret = qmp_edge_init(pdev); if (ret) return ret; - mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL); - if (!mbox) - return -ENOMEM; - - chans = devm_kzalloc(&pdev->dev, sizeof(*chans) * QMP_NUM_CHANS, - GFP_KERNEL); - if (!chans) - return -ENOMEM; - - mbox->dev = &pdev->dev; - mbox->ops = &qmp_mbox_ops; - mbox->chans = chans; - mbox->chans[0].con_priv = mdev; - mbox->num_chans = QMP_NUM_CHANS; - mbox->txdone_irq = true; - mbox->txdone_poll = false; - mbox->of_xlate = qmp_mbox_of_xlate; + ret = qmp_mbox_init(edge_node, mdev); + if (ret) + return ret; - mdev->dev = &pdev->dev; - mdev->mbox = mbox; - spin_lock_init(&mdev->tx_lock); - mutex_init(&mdev->state_lock); - mdev->local_state = LINK_DISCONNECTED; kthread_init_work(&mdev->kwork, rx_worker); kthread_init_worker(&mdev->kworker); mdev->task = kthread_run(kthread_worker_fn, &mdev->kworker, "qmp_%s", mdev->name); - init_completion(&mdev->link_complete); - init_completion(&mdev->ch_complete); - mdev->tx_sent = false; - mdev->ch_in_use = false; - INIT_DELAYED_WORK(&mdev->dwork, qmp_notify_timeout); - - ret = mbox_controller_register(mbox); - if (ret) { - cleanup_workqueue(mdev); - pr_err("%s: failed to register mbox controller %d\n", __func__, - ret); - return ret; - } ret = devm_request_irq(&pdev->dev, mdev->rx_irq_line, qmp_irq_handler, IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND | IRQF_SHARED, - node->name, mdev); + edge_node->name, mdev); if (ret < 0) { - cleanup_workqueue(mdev); - mbox_controller_unregister(mdev->mbox); + qmp_mbox_remove(pdev); pr_err("%s: request irq on %d failed: %d\n", __func__, mdev->rx_irq_line, ret); return ret; @@ -773,19 +905,11 @@ static int qmp_mbox_probe(struct platform_device *pdev) pr_err("%s: enable_irq_wake on %d failed: %d\n", __func__, mdev->rx_irq_line, ret); + /* Trigger RX */ qmp_irq_handler(0, mdev); return 0; } -static int qmp_mbox_remove(struct platform_device *pdev) -{ - struct qmp_device *mdev = platform_get_drvdata(pdev); - - cleanup_workqueue(mdev); - mbox_controller_unregister(mdev->mbox); - return 0; -} - static struct platform_driver qmp_mbox_driver = { .probe = qmp_mbox_probe, .remove = qmp_mbox_remove, -- GitLab From 72829773f31ba4e1f12082d69743fe685e7eca25 Mon Sep 17 00:00:00 2001 From: Chris Lew Date: Tue, 13 Jun 2017 17:08:03 -0700 Subject: [PATCH 243/786] ARM: dts: msm: Add priority to QMP device tree on sdm845 Add the priority field to the qmp device tree node for multi channel feature. Also change the node name to reflect the used memory. CRs-Fixed: 2052601 Change-Id: Ide304458b46b6bd7fc31879632c26fc7efa4b982 Signed-off-by: Chris Lew --- arch/arm64/boot/dts/qcom/sdm845.dtsi | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index 83f116688524..bac327da9f34 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -2272,7 +2272,7 @@ qcom,rx-ring-size = <0x400>; }; - qmp_aop: mailbox@1799000c { + qmp_aop: qcom,qmp-aop@c300000 { compatible = "qcom,qmp-mbox"; label = "aop"; reg = <0xc300000 0x100000>, @@ -2280,6 +2280,7 @@ reg-names = "msgram", "irq-reg-base"; qcom,irq-mask = <0x1>; interrupts = <0 389 1>; + priority = <0>; mbox-desc-offset = <0x0>; #mbox-cells = <1>; }; -- GitLab From e8d35de42a2a90e17df0e4b6a63906f05f084ad6 Mon Sep 17 00:00:00 2001 From: Rohit Gupta Date: Wed, 21 Jun 2017 10:51:39 -0700 Subject: [PATCH 244/786] ARM: dts: msm: Modify the l3 memlat tables for SDM845 Add more entries in the l3-cpu* memlat tables to account for additional frequencies available in speed bin 1. Change-Id: I7a42e90278d4fe489cda2a69589774634541f266 Signed-off-by: Rohit Gupta --- arch/arm64/boot/dts/qcom/sdm845.dtsi | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index 7ea200e5e1a1..770fddf1e657 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -983,12 +983,13 @@ qcom,target-dev = <&l3_cpu0>; qcom,cachemiss-ev = <0x17>; qcom,core-dev-table = - < 300000 300000 >, - < 748800 576000 >, - < 979200 652800 >, - < 1209600 806400 >, - < 1516800 883200 >, - < 1593600 960000 >; + < 300000 300000 >, + < 748800 576000 >, + < 979200 652800 >, + < 1209600 806400 >, + < 1516800 883200 >, + < 1593600 960000 >, + < 1708800 1094400 >; }; devfreq_l3lat_4: qcom,cpu4-l3lat-mon { @@ -997,11 +998,12 @@ qcom,target-dev = <&l3_cpu4>; qcom,cachemiss-ev = <0x17>; qcom,core-dev-table = - < 300000 300000 >, - < 1036800 652800 >, - < 1190400 806400 >, - < 1574400 883200 >, - < 1651200 960000 >; + < 300000 300000 >, + < 1036800 576000 >, + < 1190400 806400 >, + < 1574400 883200 >, + < 1804800 960000 >, + < 2092800 1094400 >; }; cpu_pmu: cpu-pmu { -- GitLab From 9e653b1cced74c55d1cbabf35a1da889e7327168 Mon Sep 17 00:00:00 2001 From: Devdutt Patnaik Date: Tue, 30 May 2017 22:13:22 -0700 Subject: [PATCH 245/786] usb: phy: snps: Implement PHY driver for femto PHY Introduce PHY driver for HS USB femto phy. Change-Id: I4c15312f6aa611e94cac27fa44297b6f5164024a Signed-off-by: Devdutt Patnaik --- .../devicetree/bindings/usb/msm-phy.txt | 34 + drivers/usb/phy/Kconfig | 9 + drivers/usb/phy/Makefile | 1 + drivers/usb/phy/phy-msm-snps-hs.c | 646 ++++++++++++++++++ 4 files changed, 690 insertions(+) create mode 100644 drivers/usb/phy/phy-msm-snps-hs.c diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt index e508a4fed2ab..99929f119b71 100644 --- a/Documentation/devicetree/bindings/usb/msm-phy.txt +++ b/Documentation/devicetree/bindings/usb/msm-phy.txt @@ -1,5 +1,39 @@ MSM USB PHY transceivers +HSUSB PHY + +Required properties: + - compatible: Should be "qcom,usb-hsphy-snps-femto" + - reg: Address and length of the register set for the device + Required regs are: + "hsusb_phy_base" : the base register for the PHY + - -supply: phandle to the regulator device tree node + Required "supply-name" examples are: + "vdd" : vdd supply for HSPHY digital circuit operation + "vdda18" : 1.8v supply for HSPHY + "vdda33" : 3.3v supply for HSPHY + - clocks: a list of phandles to the PHY clocks. Use as per + Documentation/devicetree/bindings/clock/clock-bindings.txt + - clock-names: Names of the clocks in 1-1 correspondence with the "clocks" + property. "ref_clk_src" is a mandatory clock. + - qcom,vdd-voltage-level: This property must be a list of three integer + values (no, min, max) where each value represents either a voltage in + microvolts or a value corresponding to voltage corner + - resets: reset specifier pair consists of phandle for the reset controller + and reset lines used by this controller. + - reset-names: reset signal name strings sorted in the same order as the resets + property. + +Example: + hsphy@f9200000 { + compatible = "qcom,usb-hsphy-snps-femto"; + reg = <0xff1000 0x400>; + vdd-supply = <&pm8841_s2_corner>; + vdda18-supply = <&pm8941_l6>; + vdda33-supply = <&pm8941_l24>; + qcom,vdd-voltage-level = <0 872000 872000>; + }; + SSUSB-QMP PHY Required properties: diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index d2c48766d58e..b1b74ff724e6 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -245,4 +245,13 @@ config MSM_QUSB_PHY Synopsys DWC3 USB IPs on MSM SOCs. This driver expects to configure the PHY with a dedicated register I/O memory region. +config MSM_HSUSB_PHY + tristate "MSM HSUSB PHY Driver" + depends on ARCH_QCOM + select USB_PHY + help + Enable this to support the HSUSB PHY on MSM chips. This driver supports + the high-speed PHY which is usually paired with either the ChipIdea or + Synopsys DWC3 USB IPs on MSM SOCs. This driver expects to configure the + PHY with a dedicated register I/O memory region. endmenu diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile index ce98866e9d38..5b748a6909ab 100644 --- a/drivers/usb/phy/Makefile +++ b/drivers/usb/phy/Makefile @@ -30,3 +30,4 @@ obj-$(CONFIG_USB_ULPI_VIEWPORT) += phy-ulpi-viewport.o obj-$(CONFIG_KEYSTONE_USB_PHY) += phy-keystone.o obj-$(CONFIG_USB_MSM_SSPHY_QMP) += phy-msm-ssusb-qmp.o obj-$(CONFIG_MSM_QUSB_PHY) += phy-msm-qusb.o phy-msm-qusb-v2.o +obj-$(CONFIG_MSM_HSUSB_PHY) += phy-msm-snps-hs.o diff --git a/drivers/usb/phy/phy-msm-snps-hs.c b/drivers/usb/phy/phy-msm-snps-hs.c new file mode 100644 index 000000000000..2d18faf19471 --- /dev/null +++ b/drivers/usb/phy/phy-msm-snps-hs.c @@ -0,0 +1,646 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define USB2_PHY_USB_PHY_UTMI_CTRL0 (0x3c) +#define SLEEPM BIT(0) + +#define USB2_PHY_USB_PHY_UTMI_CTRL5 (0x50) +#define ATERESET BIT(0) +#define POR BIT(1) + +#define USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON0 (0x54) +#define VATESTENB_MASK (0x3 << 0) +#define RETENABLEN BIT(3) +#define FSEL_MASK (0x7 << 4) +#define FSEL_DEFAULT (0x3 << 4) + +#define USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON1 (0x58) +#define VBUSVLDEXTSEL0 BIT(4) +#define PLLBTUNE BIT(5) + +#define USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON2 (0x5c) +#define VREGBYPASS BIT(0) + +#define USB2_PHY_USB_PHY_HS_PHY_CTRL1 (0x60) +#define VBUSVLDEXT0 BIT(0) + +#define USB2_PHY_USB_PHY_HS_PHY_CTRL2 (0x64) +#define USB2_SUSPEND_N BIT(2) +#define USB2_SUSPEND_N_SEL BIT(3) + +#define USB2_PHY_USB_PHY_HS_PHY_TEST0 (0x80) +#define TESTDATAIN_MASK (0xff << 0) + +#define USB2_PHY_USB_PHY_HS_PHY_TEST1 (0x84) +#define TESTDATAOUTSEL BIT(4) +#define TOGGLE_2WR BIT(6) + +#define USB2_PHY_USB_PHY_CFG0 (0x94) +#define UTMI_PHY_CMN_CTRL_OVERRIDE_EN BIT(1) + +#define USB2_PHY_USB_PHY_REFCLK_CTRL (0xa0) +#define REFCLK_SEL_MASK (0x3 << 0) +#define REFCLK_SEL_DEFAULT (0x2 << 0) + +#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */ +#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */ +#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */ +#define USB_HSPHY_3P3_VOL_FSHOST 3150000 /* uV */ + +#define USB_HSPHY_1P8_VOL_MIN 1800000 /* uV */ +#define USB_HSPHY_1P8_VOL_MAX 1800000 /* uV */ +#define USB_HSPHY_1P8_HPM_LOAD 19000 /* uA */ + +struct msm_hsphy { + struct usb_phy phy; + void __iomem *base; + + struct clk *ref_clk_src; + struct clk *cfg_ahb_clk; + struct reset_control *phy_reset; + + struct regulator *vdd; + struct regulator *vdda33; + struct regulator *vdda18; + int vdd_levels[3]; /* none, low, high */ + + bool clocks_enabled; + bool power_enabled; + bool suspended; + bool cable_connected; + + /* emulation targets specific */ + void __iomem *emu_phy_base; + int *emu_init_seq; + int emu_init_seq_len; + int *emu_dcm_reset_seq; + int emu_dcm_reset_seq_len; +}; + +static void msm_hsphy_enable_clocks(struct msm_hsphy *phy, bool on) +{ + dev_dbg(phy->phy.dev, "%s(): clocks_enabled:%d on:%d\n", + __func__, phy->clocks_enabled, on); + + if (!phy->clocks_enabled && on) { + clk_prepare_enable(phy->ref_clk_src); + + if (phy->cfg_ahb_clk) + clk_prepare_enable(phy->cfg_ahb_clk); + + phy->clocks_enabled = true; + } + + if (phy->clocks_enabled && !on) { + if (phy->cfg_ahb_clk) + clk_disable_unprepare(phy->cfg_ahb_clk); + + clk_disable_unprepare(phy->ref_clk_src); + phy->clocks_enabled = false; + } + +} +static int msm_hsphy_config_vdd(struct msm_hsphy *phy, int high) +{ + int min, ret; + + min = high ? 1 : 0; /* low or none? */ + ret = regulator_set_voltage(phy->vdd, phy->vdd_levels[min], + phy->vdd_levels[2]); + if (ret) { + dev_err(phy->phy.dev, "unable to set voltage for hsusb vdd\n"); + return ret; + } + + dev_dbg(phy->phy.dev, "%s: min_vol:%d max_vol:%d\n", __func__, + phy->vdd_levels[min], phy->vdd_levels[2]); + + return ret; +} + +static int msm_hsphy_enable_power(struct msm_hsphy *phy, bool on) +{ + int ret = 0; + + dev_dbg(phy->phy.dev, "%s turn %s regulators. power_enabled:%d\n", + __func__, on ? "on" : "off", phy->power_enabled); + + if (phy->power_enabled == on) { + dev_dbg(phy->phy.dev, "PHYs' regulators are already ON.\n"); + return 0; + } + + if (!on) + goto disable_vdda33; + + ret = msm_hsphy_config_vdd(phy, true); + if (ret) { + dev_err(phy->phy.dev, "Unable to config VDD:%d\n", + ret); + goto err_vdd; + } + + ret = regulator_enable(phy->vdd); + if (ret) { + dev_err(phy->phy.dev, "Unable to enable VDD\n"); + goto unconfig_vdd; + } + + ret = regulator_set_load(phy->vdda18, USB_HSPHY_1P8_HPM_LOAD); + if (ret < 0) { + dev_err(phy->phy.dev, "Unable to set HPM of vdda18:%d\n", ret); + goto disable_vdd; + } + + ret = regulator_set_voltage(phy->vdda18, USB_HSPHY_1P8_VOL_MIN, + USB_HSPHY_1P8_VOL_MAX); + if (ret) { + dev_err(phy->phy.dev, + "Unable to set voltage for vdda18:%d\n", ret); + goto put_vdda18_lpm; + } + + ret = regulator_enable(phy->vdda18); + if (ret) { + dev_err(phy->phy.dev, "Unable to enable vdda18:%d\n", ret); + goto unset_vdda18; + } + + ret = regulator_set_load(phy->vdda33, USB_HSPHY_3P3_HPM_LOAD); + if (ret < 0) { + dev_err(phy->phy.dev, "Unable to set HPM of vdda33:%d\n", ret); + goto disable_vdda18; + } + + ret = regulator_set_voltage(phy->vdda33, USB_HSPHY_3P3_VOL_MIN, + USB_HSPHY_3P3_VOL_MAX); + if (ret) { + dev_err(phy->phy.dev, + "Unable to set voltage for vdda33:%d\n", ret); + goto put_vdda33_lpm; + } + + ret = regulator_enable(phy->vdda33); + if (ret) { + dev_err(phy->phy.dev, "Unable to enable vdda33:%d\n", ret); + goto unset_vdd33; + } + + phy->power_enabled = true; + + pr_debug("%s(): HSUSB PHY's regulators are turned ON.\n", __func__); + return ret; + +disable_vdda33: + ret = regulator_disable(phy->vdda33); + if (ret) + dev_err(phy->phy.dev, "Unable to disable vdda33:%d\n", ret); + +unset_vdd33: + ret = regulator_set_voltage(phy->vdda33, 0, USB_HSPHY_3P3_VOL_MAX); + if (ret) + dev_err(phy->phy.dev, + "Unable to set (0) voltage for vdda33:%d\n", ret); + +put_vdda33_lpm: + ret = regulator_set_load(phy->vdda33, 0); + if (ret < 0) + dev_err(phy->phy.dev, "Unable to set (0) HPM of vdda33\n"); + +disable_vdda18: + ret = regulator_disable(phy->vdda18); + if (ret) + dev_err(phy->phy.dev, "Unable to disable vdda18:%d\n", ret); + +unset_vdda18: + ret = regulator_set_voltage(phy->vdda18, 0, USB_HSPHY_1P8_VOL_MAX); + if (ret) + dev_err(phy->phy.dev, + "Unable to set (0) voltage for vdda18:%d\n", ret); + +put_vdda18_lpm: + ret = regulator_set_load(phy->vdda18, 0); + if (ret < 0) + dev_err(phy->phy.dev, "Unable to set LPM of vdda18\n"); + +disable_vdd: + if (ret) + dev_err(phy->phy.dev, "Unable to disable vdd:%d\n", + ret); + +unconfig_vdd: + ret = msm_hsphy_config_vdd(phy, false); + if (ret) + dev_err(phy->phy.dev, "Unable unconfig VDD:%d\n", + ret); +err_vdd: + phy->power_enabled = false; + dev_dbg(phy->phy.dev, "HSUSB PHY's regulators are turned OFF.\n"); + return ret; +} + +static void msm_usb_write_readback(void __iomem *base, u32 offset, + const u32 mask, u32 val) +{ + u32 write_val, tmp = readl_relaxed(base + offset); + + tmp &= ~mask; /* retain other bits */ + write_val = tmp | val; + + writel_relaxed(write_val, base + offset); + + /* Read back to see if val was written */ + tmp = readl_relaxed(base + offset); + tmp &= mask; /* clear other bits */ + + if (tmp != val) + pr_err("%s: write: %x to QSCRATCH: %x FAILED\n", + __func__, val, offset); +} + +static void msm_hsphy_reset(struct msm_hsphy *phy) +{ + int ret; + + ret = reset_control_assert(phy->phy_reset); + if (ret) + dev_err(phy->phy.dev, "%s: phy_reset assert failed\n", + __func__); + usleep_range(100, 150); + + ret = reset_control_deassert(phy->phy_reset); + if (ret) + dev_err(phy->phy.dev, "%s: phy_reset deassert failed\n", + __func__); +} + +static void hsusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt, + unsigned long delay) +{ + int i; + + pr_debug("Seq count:%d\n", cnt); + for (i = 0; i < cnt; i = i+2) { + pr_debug("write 0x%02x to 0x%02x\n", seq[i], seq[i+1]); + writel_relaxed(seq[i], base + seq[i+1]); + if (delay) + usleep_range(delay, (delay + 2000)); + } +} + +static int msm_hsphy_emu_init(struct usb_phy *uphy) +{ + struct msm_hsphy *phy = container_of(uphy, struct msm_hsphy, phy); + int ret; + + dev_dbg(uphy->dev, "%s\n", __func__); + + ret = msm_hsphy_enable_power(phy, true); + if (ret) + return ret; + + msm_hsphy_enable_clocks(phy, true); + msm_hsphy_reset(phy); + + if (phy->emu_init_seq) { + hsusb_phy_write_seq(phy->base, + phy->emu_init_seq, + phy->emu_init_seq_len, 10000); + + /* Wait for 5ms as per QUSB2 RUMI sequence */ + usleep_range(5000, 7000); + + if (phy->emu_dcm_reset_seq) + hsusb_phy_write_seq(phy->emu_phy_base, + phy->emu_dcm_reset_seq, + phy->emu_dcm_reset_seq_len, 10000); + } + + return 0; +} + +static int msm_hsphy_init(struct usb_phy *uphy) +{ + struct msm_hsphy *phy = container_of(uphy, struct msm_hsphy, phy); + int ret; + + dev_dbg(uphy->dev, "%s\n", __func__); + + ret = msm_hsphy_enable_power(phy, true); + if (ret) + return ret; + + msm_hsphy_enable_clocks(phy, true); + msm_hsphy_reset(phy); + + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_CFG0, + UTMI_PHY_CMN_CTRL_OVERRIDE_EN, UTMI_PHY_CMN_CTRL_OVERRIDE_EN); + + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_UTMI_CTRL5, + POR, POR); + + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON0, + FSEL_MASK, 0); + + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON1, + PLLBTUNE, PLLBTUNE); + + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_REFCLK_CTRL, + REFCLK_SEL_MASK, REFCLK_SEL_DEFAULT); + + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON1, + VBUSVLDEXTSEL0, VBUSVLDEXTSEL0); + + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL1, + VBUSVLDEXT0, VBUSVLDEXT0); + + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON2, + VREGBYPASS, VREGBYPASS); + + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_UTMI_CTRL5, + ATERESET, ATERESET); + + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_TEST1, + TESTDATAOUTSEL, TESTDATAOUTSEL); + + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_TEST1, + TOGGLE_2WR, TOGGLE_2WR); + + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON0, + VATESTENB_MASK, 0); + + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_TEST0, + TESTDATAIN_MASK, 0); + + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL2, + USB2_SUSPEND_N_SEL, USB2_SUSPEND_N_SEL); + + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL2, + USB2_SUSPEND_N, USB2_SUSPEND_N); + + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_UTMI_CTRL0, + SLEEPM, SLEEPM); + + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_UTMI_CTRL5, + POR, 0); + + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL2, + USB2_SUSPEND_N_SEL, 0); + + msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_CFG0, + UTMI_PHY_CMN_CTRL_OVERRIDE_EN, 0); + + return 0; +} + +static int msm_hsphy_set_suspend(struct usb_phy *uphy, int suspend) +{ + return 0; +} + +static int msm_hsphy_notify_connect(struct usb_phy *uphy, + enum usb_device_speed speed) +{ + struct msm_hsphy *phy = container_of(uphy, struct msm_hsphy, phy); + + phy->cable_connected = true; + + return 0; +} + +static int msm_hsphy_notify_disconnect(struct usb_phy *uphy, + enum usb_device_speed speed) +{ + struct msm_hsphy *phy = container_of(uphy, struct msm_hsphy, phy); + + phy->cable_connected = false; + + return 0; +} + +static int msm_hsphy_probe(struct platform_device *pdev) +{ + struct msm_hsphy *phy; + struct device *dev = &pdev->dev; + struct resource *res; + int ret = 0, size = 0; + + + phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); + if (!phy) { + ret = -ENOMEM; + goto err_ret; + } + + phy->phy.dev = dev; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "hsusb_phy_base"); + if (!res) { + dev_err(dev, "missing memory base resource\n"); + ret = -ENODEV; + goto err_ret; + } + + phy->base = devm_ioremap_resource(dev, res); + if (IS_ERR(phy->base)) { + dev_err(dev, "ioremap failed\n"); + ret = -ENODEV; + goto err_ret; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "emu_phy_base"); + if (res) { + phy->emu_phy_base = devm_ioremap_resource(dev, res); + if (IS_ERR(phy->emu_phy_base)) { + dev_dbg(dev, "couldn't ioremap emu_phy_base\n"); + phy->emu_phy_base = NULL; + } + } + + /* ref_clk_src is needed irrespective of SE_CLK or DIFF_CLK usage */ + phy->ref_clk_src = devm_clk_get(dev, "ref_clk_src"); + if (IS_ERR(phy->ref_clk_src)) { + dev_dbg(dev, "clk get failed for ref_clk_src\n"); + ret = PTR_ERR(phy->ref_clk_src); + return ret; + } + + if (of_property_match_string(pdev->dev.of_node, + "clock-names", "cfg_ahb_clk") >= 0) { + phy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb_clk"); + if (IS_ERR(phy->cfg_ahb_clk)) { + ret = PTR_ERR(phy->cfg_ahb_clk); + if (ret != -EPROBE_DEFER) + dev_err(dev, + "clk get failed for cfg_ahb_clk ret %d\n", ret); + return ret; + } + } + + phy->phy_reset = devm_reset_control_get(dev, "phy_reset"); + if (IS_ERR(phy->phy_reset)) + return PTR_ERR(phy->phy_reset); + + of_get_property(dev->of_node, "qcom,emu-init-seq", &size); + if (size) { + phy->emu_init_seq = devm_kzalloc(dev, + size, GFP_KERNEL); + if (phy->emu_init_seq) { + phy->emu_init_seq_len = + (size / sizeof(*phy->emu_init_seq)); + if (phy->emu_init_seq_len % 2) { + dev_err(dev, "invalid emu_init_seq_len\n"); + return -EINVAL; + } + + of_property_read_u32_array(dev->of_node, + "qcom,emu-init-seq", + phy->emu_init_seq, + phy->emu_init_seq_len); + } else { + dev_dbg(dev, + "error allocating memory for emu_init_seq\n"); + } + } + + size = 0; + of_get_property(dev->of_node, "qcom,emu-dcm-reset-seq", &size); + if (size) { + phy->emu_dcm_reset_seq = devm_kzalloc(dev, + size, GFP_KERNEL); + if (phy->emu_dcm_reset_seq) { + phy->emu_dcm_reset_seq_len = + (size / sizeof(*phy->emu_dcm_reset_seq)); + if (phy->emu_dcm_reset_seq_len % 2) { + dev_err(dev, "invalid emu_dcm_reset_seq_len\n"); + return -EINVAL; + } + + of_property_read_u32_array(dev->of_node, + "qcom,emu-dcm-reset-seq", + phy->emu_dcm_reset_seq, + phy->emu_dcm_reset_seq_len); + } else { + dev_dbg(dev, + "error allocating memory for emu_dcm_reset_seq\n"); + } + } + + ret = of_property_read_u32_array(dev->of_node, "qcom,vdd-voltage-level", + (u32 *) phy->vdd_levels, + ARRAY_SIZE(phy->vdd_levels)); + if (ret) { + dev_err(dev, "error reading qcom,vdd-voltage-level property\n"); + goto err_ret; + } + + + phy->vdd = devm_regulator_get(dev, "vdd"); + if (IS_ERR(phy->vdd)) { + dev_err(dev, "unable to get vdd supply\n"); + ret = PTR_ERR(phy->vdd); + goto err_ret; + } + + phy->vdda33 = devm_regulator_get(dev, "vdda33"); + if (IS_ERR(phy->vdda33)) { + dev_err(dev, "unable to get vdda33 supply\n"); + ret = PTR_ERR(phy->vdda33); + goto err_ret; + } + + phy->vdda18 = devm_regulator_get(dev, "vdda18"); + if (IS_ERR(phy->vdda18)) { + dev_err(dev, "unable to get vdda18 supply\n"); + ret = PTR_ERR(phy->vdda18); + goto err_ret; + } + + platform_set_drvdata(pdev, phy); + + if (phy->emu_init_seq) + phy->phy.init = msm_hsphy_emu_init; + else + phy->phy.init = msm_hsphy_init; + phy->phy.set_suspend = msm_hsphy_set_suspend; + phy->phy.notify_connect = msm_hsphy_notify_connect; + phy->phy.notify_disconnect = msm_hsphy_notify_disconnect; + phy->phy.type = USB_PHY_TYPE_USB2; + + ret = usb_add_phy_dev(&phy->phy); + if (ret) + return ret; + + return 0; + +err_ret: + return ret; +} + +static int msm_hsphy_remove(struct platform_device *pdev) +{ + struct msm_hsphy *phy = platform_get_drvdata(pdev); + + if (!phy) + return 0; + + usb_remove_phy(&phy->phy); + clk_disable_unprepare(phy->ref_clk_src); + + msm_hsphy_enable_clocks(phy, false); + msm_hsphy_enable_power(phy, false); + + kfree(phy); + + return 0; +} + +static const struct of_device_id msm_usb_id_table[] = { + { + .compatible = "qcom,usb-hsphy-snps-femto", + }, + { }, +}; +MODULE_DEVICE_TABLE(of, msm_usb_id_table); + +static struct platform_driver msm_hsphy_driver = { + .probe = msm_hsphy_probe, + .remove = msm_hsphy_remove, + .driver = { + .name = "msm-usb-hsphy", + .of_match_table = of_match_ptr(msm_usb_id_table), + }, +}; + +module_platform_driver(msm_hsphy_driver); + +MODULE_DESCRIPTION("MSM USB HS PHY driver"); +MODULE_LICENSE("GPL v2"); -- GitLab From 4ff5bcd6f02cd543d211ed0ab22e97b1df7cb9f3 Mon Sep 17 00:00:00 2001 From: Devdutt Patnaik Date: Fri, 5 May 2017 19:45:01 -0700 Subject: [PATCH 246/786] ARM: dts: msm: Enable dtsi entries for USB driver for sdxpoorwills Enable dtsi for USB driver on sdxpoorwills platform Change-Id: I13951654515b05329720785cc18e8f53099b287a Signed-off-by: Devdutt Patnaik --- arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts | 32 +++ arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi | 219 +++++++++++++++++++ arch/arm/boot/dts/qcom/sdxpoorwills.dtsi | 1 + 3 files changed, 252 insertions(+) create mode 100644 arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts b/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts index fc4ff372ab45..b3103cdf8a18 100644 --- a/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts +++ b/arch/arm/boot/dts/qcom/sdxpoorwills-rumi.dts @@ -36,3 +36,35 @@ &gdsc_pcie { compatible = "regulator-fixed"; }; + +&usb { + /delete-property/ qcom,usb-dbm; + qcom,charging-disabled; + dwc3@a600000 { + usb-phy = <&usb2_phy>, <&usb_nop_phy>; + maximum-speed = "high-speed"; + }; +}; + +&usb2_phy { + reg = <0xff1000 0x1000>, + <0x0a60cd00 0x40>; + reg-names = "hsusb_phy_base", + "emu_phy_base"; + qcom,emu-init-seq = <0x19 0x404 + 0x20 0x414 + 0x79 0x410 + 0x00 0x418 + 0x99 0x404 + 0x04 0x408 + 0xd9 0x404>; + + qcom,emu-dcm-reset-seq = <0x100000 0x20 + 0x0 0x20 + 0x1e0 0x20 + 0x5 0x14>; +}; + +&usb3_qmp_phy { + status = "disabled"; +}; diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi new file mode 100644 index 000000000000..be2b63ea78aa --- /dev/null +++ b/arch/arm/boot/dts/qcom/sdxpoorwills-usb.dtsi @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include + +&soc { + /* USB port for DWC3 controller */ + usb: ssusb@a600000 { + compatible = "qcom,dwc-usb3-msm"; + reg = <0x0a600000 0xf8c00>; + reg-names = "core_base"; + #address-cells = <1>; + #size-cells = <1>; + ranges; + + interrupts = <0 131 0>, <0 130 0>, <0 59 0>; + interrupt-names = "hs_phy_irq", "pwr_event_irq", "ss_phy_irq"; + + USB3_GDSC-supply = <&gdsc_usb30>; + qcom,usb-dbm = <&dbm_1p5>; + qcom,dwc-usb3-msm-tx-fifo-size = <21288>; + qcom,num-gsi-evt-buffs = <0x3>; + + clocks = <&clock_gcc GCC_USB30_MASTER_CLK>, + <&clock_gcc GCC_SYS_NOC_USB3_CLK>, + <&clock_gcc GCC_USB30_MOCK_UTMI_CLK>, + <&clock_gcc GCC_USB30_SLEEP_CLK>, + <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>, + <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>; + + clock-names = "core_clk", "iface_clk", "utmi_clk", "sleep_clk", + "cfg_ahb_clk", "xo"; + + qcom,core-clk-rate = <133333333>; + qcom,core-clk-rate-hs = <66666667>; + + resets = <&clock_gcc GCC_USB30_BCR>; + reset-names = "core_reset"; + + dwc3@a600000 { + compatible = "snps,dwc3"; + reg = <0x0a600000 0xcd00>; + interrupt-parent = <&intc>; + interrupts = <0 133 0>; + usb-phy = <&usb2_phy>, <&usb3_qmp_phy>; + tx-fifo-resize; + linux,sysdev_is_parent; + snps,disable-clk-gating; + snps,has-lpm-erratum; + snps,hird-threshold = /bits/ 8 <0x10>; + }; + }; + + /* USB port for High Speed PHY */ + usb2_phy: hsphy@ff1000 { + compatible = "qcom,usb-hsphy-snps-femto"; + reg = <0xff1000 0x400>; + reg-names = "hsusb_phy_base"; + + vdd-supply = <&pmxpoorwills_l4>; + vdda18-supply = <&pmxpoorwills_l5>; + vdda33-supply = <&pmxpoorwills_l10>; + qcom,vdd-voltage-level = <0 872000 872000>; + clocks = <&clock_rpmh RPMH_CXO_CLK>, + <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>; + clock-names = "ref_clk_src", "cfg_ahb_clk"; + + resets = <&clock_gcc GCC_QUSB2PHY_BCR>; + reset-names = "phy_reset"; + }; + + dbm_1p5: dbm@a6f8000 { + compatible = "qcom,usb-dbm-1p5"; + reg = <0xa6f8000 0x400>; + qcom,reset-ep-after-lpm-resume; + }; + + usb_nop_phy: usb_nop_phy { + compatible = "usb-nop-xceiv"; + }; + + /* USB port for Super Speed PHY */ + usb3_qmp_phy: ssphy@ff0000 { + compatible = "qcom,usb-ssphy-qmp-v2"; + reg = <0xff0000 0x1000>; + reg-names = "qmp_phy_base"; + + vdd-supply = <&pmxpoorwills_l4>; + core-supply = <&pmxpoorwills_l1>; + qcom,vdd-voltage-level = <0 872000 872000>; + qcom,vbus-valid-override; + qcom,qmp-phy-init-seq = + /* */ + <0x048 0x07 0x00 /* QSERDES_COM_PLL_IVCO */ + 0x080 0x14 0x00 /* QSERDES_COM_SYSCLK_EN_SEL */ + 0x034 0x04 0x00 /* QSERDES_COM_BIAS_EN_CLKBUFLR_EN */ + 0x138 0x30 0x00 /* QSERDES_COM_CLK_SELECT */ + 0x03c 0x02 0x00 /* QSERDES_COM_SYS_CLK_CTRL */ + 0x08c 0x08 0x00 /* QSERDES_COM_RESETSM_CNTRL2 */ + 0x15c 0x06 0x00 /* QSERDES_COM_CMN_CONFIG */ + 0x164 0x01 0x00 /* QSERDES_COM_SVS_MODE_CLK_SEL */ + 0x13c 0x80 0x00 /* QSERDES_COM_HSCLK_SEL */ + 0x0b0 0x82 0x00 /* QSERDES_COM_DEC_START_MODE0 */ + 0x0b8 0xab 0x00 /* QSERDES_COM_DIV_FRAC_START1_MODE0 */ + 0x0bc 0xea 0x00 /* QSERDES_COM_DIV_FRAC_START2_MODE0 */ + 0x0c0 0x02 0x00 /* QSERDES_COM_DIV_FRAC_START3_MODE0 */ + 0x060 0x06 0x00 /* QSERDES_COM_CP_CTRL_MODE0 */ + 0x068 0x16 0x00 /* QSERDES_COM_PLL_RCTRL_MODE0 */ + 0x070 0x36 0x00 /* QSERDES_COM_PLL_CCTRL_MODE0 */ + 0x0dc 0x00 0x00 /* QSERDES_COM_INTEGLOOP_GAIN1_MODE0 */ + 0x0d8 0x3f 0x00 /* QSERDES_COM_INTEGLOOP_GAIN0_MODE0 */ + 0x0f8 0x01 0x00 /* QSERDES_COM_VCO_TUNE2_MODE0 */ + 0x0f4 0xc9 0x00 /* QSERDES_COM_VCO_TUNE1_MODE0 */ + 0x148 0x0a 0x00 /* QSERDES_COM_CORECLK_DIV_MODE0 */ + 0x0a0 0x00 0x00 /* QSERDES_COM_LOCK_CMP3_MODE0 */ + 0x09c 0x34 0x00 /* QSERDES_COM_LOCK_CMP2_MODE0 */ + 0x098 0x15 0x00 /* QSERDES_COM_LOCK_CMP1_MODE0 */ + 0x090 0x04 0x00 /* QSERDES_COM_LOCK_CMP_EN */ + 0x154 0x00 0x00 /* QSERDES_COM_CORE_CLK_EN */ + 0x094 0x00 0x00 /* QSERDES_COM_LOCK_CMP_CFG */ + 0x0f0 0x00 0x00 /* QSERDES_COM_VCO_TUNE_MAP */ + 0x040 0x0a 0x00 /* QSERDES_COM_SYSCLK_BUF_ENABLE */ + 0x0d0 0x80 0x00 /* QSERDES_COM_INTEGLOOP_INITVAL */ + 0x010 0x01 0x00 /* QSERDES_COM_SSC_EN_CENTER */ + 0x01c 0x31 0x00 /* QSERDES_COM_SSC_PER1 */ + 0x020 0x01 0x00 /* QSERDES_COM_SSC_PER2 */ + 0x014 0x00 0x00 /* QSERDES_COM_SSC_ADJ_PER1 */ + 0x018 0x00 0x00 /* QSERDES_COM_SSC_ADJ_PER2 */ + 0x024 0x85 0x00 /* QSERDES_COM_SSC_STEP_SIZE1 */ + 0x028 0x07 0x00 /* QSERDES_COM_SSC_STEP_SIZE2 */ + 0x4c0 0x0c 0x00 /* QSERDES_RX_VGA_CAL_CNTRL2 */ + 0x564 0x50 0x00 /* QSERDES_RX_RX_MODE_00 */ + 0x430 0x0b 0x00 /* QSERDES_RX_UCDR_FASTLOCK_FO_GAIN */ + 0x4d4 0x0e 0x00 /* QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 */ + 0x4d8 0x4e 0x00 /* QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3 */ + 0x4dc 0x18 0x00 /* QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4 */ + 0x4f8 0x77 0x00 /* RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 */ + 0x4fc 0x80 0x00 /* RX_RX_OFFSET_ADAPTOR_CNTRL2 */ + 0x504 0x03 0x00 /* QSERDES_RX_SIGDET_CNTRL */ + 0x50c 0x1c 0x00 /* QSERDES_RX_SIGDET_DEGLITCH_CNTRL */ + 0x434 0x75 0x00 /* RX_UCDR_SO_SATURATION_AND_ENABLE */ + 0x444 0x80 0x00 /* QSERDES_RX_UCDR_PI_CONTROLS */ + 0x408 0x0a 0x00 /* QSERDES_RX_UCDR_FO_GAIN */ + 0x40c 0x06 0x00 /* QSERDES_RX_UCDR_SO_GAIN */ + 0x500 0x00 0x00 /* QSERDES_RX_SIGDET_ENABLES */ + 0x260 0x10 0x00 /* QSERDES_TX_HIGHZ_DRVR_EN */ + 0x2a4 0x12 0x00 /* QSERDES_TX_RCV_DETECT_LVL_2 */ + 0x28c 0xc6 0x00 /* QSERDES_TX_LANE_MODE_1 */ + 0x248 0x09 0x00 /* TX_RES_CODE_LANE_OFFSET_RX */ + 0x244 0x0d 0x00 /* TX_RES_CODE_LANE_OFFSET_TX */ + 0x8c8 0x83 0x00 /* USB3_UNI_PCS_FLL_CNTRL2 */ + 0x8cc 0x09 0x00 /* USB3_UNI_PCS_FLL_CNT_VAL_L */ + 0x8d0 0xa2 0x00 /* USB3_UNI_PCS_FLL_CNT_VAL_H_TOL */ + 0x8d4 0x40 0x00 /* USB3_UNI_PCS_FLL_MAN_CODE */ + 0x8c4 0x02 0x00 /* USB3_UNI_PCS_FLL_CNTRL1 */ + 0x864 0x1b 0x00 /* USB3_UNI_PCS_POWER_STATE_CONFIG2 */ + 0x80c 0x9f 0x00 /* USB3_UNI_PCS_TXMGN_V0 */ + 0x810 0x9f 0x00 /* USB3_UNI_PCS_TXMGN_V1 */ + 0x814 0xb5 0x00 /* USB3_UNI_PCS_TXMGN_V2 */ + 0x818 0x4c 0x00 /* USB3_UNI_PCS_TXMGN_V3 */ + 0x81c 0x64 0x00 /* USB3_UNI_PCS_TXMGN_V4 */ + 0x820 0x6a 0x00 /* USB3_UNI_PCS_TXMGN_LS */ + 0x824 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_V0 */ + 0x828 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_V0 */ + 0x82c 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_V1 */ + 0x830 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_V1 */ + 0x834 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_V2 */ + 0x838 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_V2 */ + 0x83c 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_V3 */ + 0x840 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_V3 */ + 0x844 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_V4 */ + 0x848 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_V4 */ + 0x84c 0x15 0x00 /* USB3_UNI_PCS_TXDEEMPH_M6DB_LS */ + 0x850 0x0d 0x00 /* USB3_UNI_PCS_TXDEEMPH_M3P5DB_LS */ + 0x85c 0x02 0x00 /* USB3_UNI_PCS_RATE_SLEW_CNTRL */ + 0x8a0 0x04 0x00 /* PCS_PWRUP_RESET_DLY_TIME_AUXCLK */ + 0x88c 0x44 0x00 /* USB3_UNI_PCS_TSYNC_RSYNC_TIME */ + 0x880 0xd1 0x00 /* USB3_UNI_PCS_LOCK_DETECT_CONFIG1 */ + 0x884 0x1f 0x00 /* USB3_UNI_PCS_LOCK_DETECT_CONFIG2 */ + 0x888 0x47 0x00 /* USB3_UNI_PCS_LOCK_DETECT_CONFIG3 */ + 0x870 0xe7 0x00 /* USB3_UNI_PCS_RCVR_DTCT_DLY_P1U2_L */ + 0x874 0x03 0x00 /* USB3_UNI_PCS_RCVR_DTCT_DLY_P1U2_H */ + 0x878 0x40 0x00 /* USB3_UNI_PCS_RCVR_DTCT_DLY_U3_L */ + 0x87c 0x00 0x00 /* USB3_UNI_PCS_RCVR_DTCT_DLY_U3_H */ + 0x9d8 0xba 0x00 /* USB3_UNI_PCS_RX_SIGDET_LVL */ + 0x8b8 0x75 0x00 /* RXEQTRAINING_WAIT_TIME */ + 0x8b0 0x86 0x00 /* PCS_LFPS_TX_ECSTART_EQTLOCK */ + 0x8bc 0x13 0x00 /* PCS_RXEQTRAINING_RUN_TIME */ + 0xa0c 0x21 0x00 /* USB3_UNI_PCS_REFGEN_REQ_CONFIG1 */ + 0xa10 0x60 0x00 /* USB3_UNI_PCS_REFGEN_REQ_CONFIG2 */ + 0xffffffff 0xffffffff 0x00>; + + qcom,qmp-phy-reg-offset = + <0x974 /* USB3_UNI_PCS_PCS_STATUS */ + 0x8d8 /* USB3_UNI_PCS_AUTONOMOUS_MODE_CTRL */ + 0x8dc /* USB3_UNI_PCS_LFPS_RXTERM_IRQ_CLEAR */ + 0x804 /* USB3_UNI_PCS_POWER_DOWN_CONTROL */ + 0x800 /* USB3_UNI_PCS_SW_RESET */ + 0x808>; /* USB3_UNI_PCS_START_CONTROL */ + + clocks = <&clock_gcc GCC_USB3_PHY_AUX_CLK>, + <&clock_gcc GCC_USB3_PHY_PIPE_CLK>, + <&clock_rpmh RPMH_CXO_CLK>, + <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>; + + clock-names = "aux_clk", "pipe_clk", "ref_clk_src", + "cfg_ahb_clk"; + }; +}; diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi index 961adc9f2186..8aa6779bdf29 100644 --- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi +++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi @@ -183,3 +183,4 @@ }; #include "sdxpoorwills-regulator.dtsi" +#include "sdxpoorwills-usb.dtsi" -- GitLab From 5167bfc18a4e1420c03fffdc76f82c0de6d36359 Mon Sep 17 00:00:00 2001 From: Devdutt Patnaik Date: Fri, 2 Jun 2017 15:29:17 -0700 Subject: [PATCH 247/786] defconfig: sdxpoorwills: Enable USB driver for sdxpoorwills Enable components for USB driver for sdxpoorwills platform Change-Id: I1059a31501e764e2aa6bd01ba0d799df34994798 Signed-off-by: Devdutt Patnaik --- arch/arm/configs/sdxpoorwills-perf_defconfig | 9 +++++++++ arch/arm/configs/sdxpoorwills_defconfig | 10 ++++++++++ 2 files changed, 19 insertions(+) diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig index 40289a8dc299..9bdde423bffe 100644 --- a/arch/arm/configs/sdxpoorwills-perf_defconfig +++ b/arch/arm/configs/sdxpoorwills-perf_defconfig @@ -240,11 +240,20 @@ CONFIG_USB_STORAGE_ONETOUCH=y CONFIG_USB_STORAGE_KARMA=y CONFIG_USB_STORAGE_CYPRESS_ATACB=y CONFIG_USB_DWC3=y +CONFIG_USB_DWC3_MSM=y CONFIG_NOP_USB_XCEIV=y CONFIG_USB_MSM_SSPHY_QMP=y +CONFIG_MSM_HSUSB_PHY=y CONFIG_USB_GADGET=y CONFIG_USB_GADGET_DEBUG_FILES=y CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_F_DIAG=y +CONFIG_USB_CONFIGFS_F_CDEV=y +CONFIG_USB_CONFIGFS_F_GSI=y CONFIG_MMC=y CONFIG_MMC_PARANOID_SD_INIT=y CONFIG_MMC_BLOCK_MINORS=32 diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig index d91f5f625b02..e035b243207b 100644 --- a/arch/arm/configs/sdxpoorwills_defconfig +++ b/arch/arm/configs/sdxpoorwills_defconfig @@ -236,11 +236,21 @@ CONFIG_USB_STORAGE_ONETOUCH=y CONFIG_USB_STORAGE_KARMA=y CONFIG_USB_STORAGE_CYPRESS_ATACB=y CONFIG_USB_DWC3=y +CONFIG_USB_DWC3_MSM=y CONFIG_NOP_USB_XCEIV=y +CONFIG_USB_MSM_SSPHY_QMP=y +CONFIG_MSM_HSUSB_PHY=y CONFIG_USB_GADGET=y CONFIG_USB_GADGET_DEBUG_FILES=y CONFIG_USB_GADGET_DEBUG_FS=y CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_F_DIAG=y +CONFIG_USB_CONFIGFS_F_CDEV=y +CONFIG_USB_CONFIGFS_F_GSI=y CONFIG_MMC=y CONFIG_MMC_PARANOID_SD_INIT=y CONFIG_MMC_BLOCK_MINORS=32 -- GitLab From bb03ec547ac0f1470899d611d39dcd7248135610 Mon Sep 17 00:00:00 2001 From: Subhash Jadavani Date: Fri, 23 Jun 2017 19:42:09 -0700 Subject: [PATCH 248/786] lib/Kconfig.debug: add UFS_FAULT_INJECTION config UFS_FAULT_INJECTION config definition was present on msm-4.4 kernel but was missed on msm-4.9 while taking snapshot from msm-4.4. Add this config now so it can be enabled later for debug purposes. Change-Id: Ia62e30f2f123c0a8c79affb205d1fb956f8f6d57 Signed-off-by: Subhash Jadavani --- lib/Kconfig.debug | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 22eff0624bc8..6878aa856799 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1724,6 +1724,20 @@ config FAIL_MMC_REQUEST and to test how the mmc host driver handles retries from the block device. +config UFS_FAULT_INJECTION + bool "Fault-injection capability for UFS IO" + select DEBUG_FS + depends on FAULT_INJECTION && SCSI_UFSHCD + help + Provide fault-injection capability for UFS IO. + This will make the UFS host controller driver to randomly + abort ongoing commands in the host controller, update OCS + field according to the injected fatal error and can also + forcefully hang the command indefinitely till upper layer + timeout occurs. This is useful to test error handling in + the UFS contoller driver and test how the driver handles + the retries from block/SCSI mid layer. + config FAIL_FUTEX bool "Fault-injection capability for futexes" select DEBUG_FS -- GitLab From 7bafa17e6cdec8cf2d6629ddc489185a102708c6 Mon Sep 17 00:00:00 2001 From: Subhash Jadavani Date: Fri, 23 Jun 2017 19:51:13 -0700 Subject: [PATCH 249/786] defconfig: sdm845: enable CONFIG_UFS_FAULT_INJECTION CONFIG_UFS_FAULT_INJECTION would enable fault-injection framework to simulate error conditions in the UFS controller and verify error handling mechanisms implemented in UFS host controller driver. Change-Id: I3d026f8b083f0d94e0c9f2e3c5a21eb94a11a7f2 Signed-off-by: Subhash Jadavani --- arch/arm64/configs/sdm845_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig index 1f1b5b46d522..7f1e7700057a 100644 --- a/arch/arm64/configs/sdm845_defconfig +++ b/arch/arm64/configs/sdm845_defconfig @@ -601,6 +601,7 @@ CONFIG_DEBUG_ATOMIC_SLEEP=y CONFIG_DEBUG_LIST=y CONFIG_FAULT_INJECTION=y CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_UFS_FAULT_INJECTION=y CONFIG_FAULT_INJECTION_DEBUG_FS=y CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y CONFIG_IPC_LOGGING=y -- GitLab From 222aa34e5d791217aaba7096eae78d5bba42b30b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 16 Jun 2017 11:08:24 +0200 Subject: [PATCH 250/786] fs: pass on flags in compat_writev commit 20223f0f39ea9d31ece08f04ac79f8c4e8d98246 upstream. Fixes: 793b80ef14af ("vfs: pass a flags argument to vfs_readv/vfs_writev") Signed-off-by: Christoph Hellwig Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/read_write.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/read_write.c b/fs/read_write.c index 190e0d362581..e479e24dcd4c 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -1232,7 +1232,7 @@ static size_t compat_writev(struct file *file, if (!(file->f_mode & FMODE_CAN_WRITE)) goto out; - ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, 0); + ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos, flags); out: if (ret > 0) -- GitLab From a6d6282040b7196a58fba47f89b05b560fadde2b Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 8 Jun 2017 04:51:54 +0000 Subject: [PATCH 251/786] configfs: Fix race between create_link and configfs_rmdir commit ba80aa909c99802c428682c352b0ee0baac0acd3 upstream. This patch closes a long standing race in configfs between the creation of a new symlink in create_link(), while the symlink target's config_item is being concurrently removed via configfs_rmdir(). This can happen because the symlink target's reference is obtained by config_item_get() in create_link() before the CONFIGFS_USET_DROPPING bit set by configfs_detach_prep() during configfs_rmdir() shutdown is actually checked.. This originally manifested itself on ppc64 on v4.8.y under heavy load using ibmvscsi target ports with Novalink API: [ 7877.289863] rpadlpar_io: slot U8247.22L.212A91A-V1-C8 added [ 7879.893760] ------------[ cut here ]------------ [ 7879.893768] WARNING: CPU: 15 PID: 17585 at ./include/linux/kref.h:46 config_item_get+0x7c/0x90 [configfs] [ 7879.893811] CPU: 15 PID: 17585 Comm: targetcli Tainted: G O 4.8.17-customv2.22 #12 [ 7879.893812] task: c00000018a0d3400 task.stack: c0000001f3b40000 [ 7879.893813] NIP: d000000002c664ec LR: d000000002c60980 CTR: c000000000b70870 [ 7879.893814] REGS: c0000001f3b43810 TRAP: 0700 Tainted: G O (4.8.17-customv2.22) [ 7879.893815] MSR: 8000000000029033 CR: 28222242 XER: 00000000 [ 7879.893820] CFAR: d000000002c664bc SOFTE: 1 GPR00: d000000002c60980 c0000001f3b43a90 d000000002c70908 c0000000fbc06820 GPR04: c0000001ef1bd900 0000000000000004 0000000000000001 0000000000000000 GPR08: 0000000000000000 0000000000000001 d000000002c69560 d000000002c66d80 GPR12: c000000000b70870 c00000000e798700 c0000001f3b43ca0 c0000001d4949d40 GPR16: c00000014637e1c0 0000000000000000 0000000000000000 c0000000f2392940 GPR20: c0000001f3b43b98 0000000000000041 0000000000600000 0000000000000000 GPR24: fffffffffffff000 0000000000000000 d000000002c60be0 c0000001f1dac490 GPR28: 0000000000000004 0000000000000000 c0000001ef1bd900 c0000000f2392940 [ 7879.893839] NIP [d000000002c664ec] config_item_get+0x7c/0x90 [configfs] [ 7879.893841] LR [d000000002c60980] check_perm+0x80/0x2e0 [configfs] [ 7879.893842] Call Trace: [ 7879.893844] [c0000001f3b43ac0] [d000000002c60980] check_perm+0x80/0x2e0 [configfs] [ 7879.893847] [c0000001f3b43b10] [c000000000329770] do_dentry_open+0x2c0/0x460 [ 7879.893849] [c0000001f3b43b70] [c000000000344480] path_openat+0x210/0x1490 [ 7879.893851] [c0000001f3b43c80] [c00000000034708c] do_filp_open+0xfc/0x170 [ 7879.893853] [c0000001f3b43db0] [c00000000032b5bc] do_sys_open+0x1cc/0x390 [ 7879.893856] [c0000001f3b43e30] [c000000000009584] system_call+0x38/0xec [ 7879.893856] Instruction dump: [ 7879.893858] 409d0014 38210030 e8010010 7c0803a6 4e800020 3d220000 e94981e0 892a0000 [ 7879.893861] 2f890000 409effe0 39200001 992a0000 <0fe00000> 4bffffd0 60000000 60000000 [ 7879.893866] ---[ end trace 14078f0b3b5ad0aa ]--- To close this race, go ahead and obtain the symlink's target config_item reference only after the existing CONFIGFS_USET_DROPPING check succeeds. This way, if configfs_rmdir() wins create_link() will return -ENONET, and if create_link() wins configfs_rmdir() will return -EBUSY. Reported-by: Bryant G. Ly Tested-by: Bryant G. Ly Signed-off-by: Nicholas Bellinger Signed-off-by: Christoph Hellwig Signed-off-by: Greg Kroah-Hartman --- fs/configfs/symlink.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c index db6d69289608..314b4edac72b 100644 --- a/fs/configfs/symlink.c +++ b/fs/configfs/symlink.c @@ -83,14 +83,13 @@ static int create_link(struct config_item *parent_item, ret = -ENOMEM; sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL); if (sl) { - sl->sl_target = config_item_get(item); spin_lock(&configfs_dirent_lock); if (target_sd->s_type & CONFIGFS_USET_DROPPING) { spin_unlock(&configfs_dirent_lock); - config_item_put(item); kfree(sl); return -ENOENT; } + sl->sl_target = config_item_get(item); list_add(&sl->sl_list, &target_sd->s_links); spin_unlock(&configfs_dirent_lock); ret = configfs_create_link(sl, parent_item->ci_dentry, -- GitLab From 95f47cd7d7d8f40a1cc744a4a3116b44a43e6514 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Sun, 4 Jun 2017 14:03:42 +0200 Subject: [PATCH 252/786] can: gs_usb: fix memory leak in gs_cmd_reset() commit 5cda3ee5138e91ac369ed9d0b55eab0dab077686 upstream. This patch adds the missing kfree() in gs_cmd_reset() to free the memory that is not used anymore after usb_control_msg(). Cc: Maximilian Schneider Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- drivers/net/can/usb/gs_usb.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 7ab24c5262f3..05369dc9dd09 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -265,6 +265,8 @@ static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev) sizeof(*dm), 1000); + kfree(dm); + return rc; } -- GitLab From 47537bceb7b7de66a2cf8d0f3c1b15da7be7ef4d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 8 Jun 2017 09:54:24 +0200 Subject: [PATCH 253/786] ila_xlat: add missing hash secret initialization commit 0db47e3d323411beeb6ea97f2c4d19395c91fd8b upstream. While discussing the possible merits of clang warning about unused initialized functions, I found one function that was clearly meant to be called but never actually is. __ila_hash_secret_init() initializes the hash value for the ila locator, apparently this is intended to prevent hash collision attacks, but this ends up being a read-only zero constant since there is no caller. I could find no indication of why it was never called, the earliest patch submission for the module already was like this. If my interpretation is right, we certainly want to backport the patch to stable kernels as well. I considered adding it to the ila_xlat_init callback, but for best effect the random data is read as late as possible, just before it is first used. The underlying net_get_random_once() is already highly optimized to avoid overhead when called frequently. Fixes: 7f00feaf1076 ("ila: Add generic ILA translation facility") Link: https://www.spinics.net/lists/kernel/msg2527243.html Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ila/ila_xlat.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index e604013dd814..7a5b9812af10 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -68,6 +68,7 @@ static inline u32 ila_locator_hash(struct ila_locator loc) { u32 *v = (u32 *)loc.v32; + __ila_hash_secret_init(); return jhash_2words(v[0], v[1], hashrnd); } -- GitLab From 5d5605cc5833d6b83db393e2d178e10ef307bb40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Wilczy=C5=84ski?= Date: Sun, 11 Jun 2017 17:28:39 +0900 Subject: [PATCH 254/786] cpufreq: conservative: Allow down_threshold to take values from 1 to 10 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit b8e11f7d2791bd9320be1c6e772a60b2aa093e45 upstream. Commit 27ed3cd2ebf4 (cpufreq: conservative: Fix the logic in frequency decrease checking) removed the 10 point substraction when comparing the load against down_threshold but did not remove the related limit for the down_threshold value. As a result, down_threshold lower than 11 is not allowed even though values from 1 to 10 do work correctly too. The comment ("cannot be lower than 11 otherwise freq will not fall") also does not apply after removing the substraction. For this reason, allow down_threshold to take any value from 1 to 99 and fix the related comment. Fixes: 27ed3cd2ebf4 (cpufreq: conservative: Fix the logic in frequency decrease checking) Signed-off-by: Tomasz Wilczyński Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- drivers/cpufreq/cpufreq_conservative.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 13475890d792..00a74351f623 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -169,8 +169,8 @@ static ssize_t store_down_threshold(struct gov_attr_set *attr_set, int ret; ret = sscanf(buf, "%u", &input); - /* cannot be lower than 11 otherwise freq will not fall */ - if (ret != 1 || input < 11 || input > 100 || + /* cannot be lower than 1 otherwise freq will not fall */ + if (ret != 1 || input < 1 || input > 100 || input >= dbs_data->up_threshold) return -EINVAL; -- GitLab From 63d34ea7042af3ffe25cb19094cb5e766e7b67ca Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Fri, 28 Apr 2017 01:51:40 -0300 Subject: [PATCH 255/786] vb2: Fix an off by one error in 'vb2_plane_vaddr' commit 5ebb6dd36c9f5fb37b1077b393c254d70a14cb46 upstream. We should ensure that 'plane_no' is '< vb->num_planes' as done in 'vb2_plane_cookie' just a few lines below. Fixes: e23ccc0ad925 ("[media] v4l: add videobuf2 Video for Linux 2 driver framework") Signed-off-by: Christophe JAILLET Reviewed-by: Sakari Ailus Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/v4l2-core/videobuf2-core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 21900202ff83..9ccf7f5e0e2e 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -868,7 +868,7 @@ EXPORT_SYMBOL_GPL(vb2_core_create_bufs); void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) { - if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) + if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv) return NULL; return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv); -- GitLab From 2ec5b68bf62ef47daf526967f0fd069caf581e31 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Thu, 8 Jun 2017 14:00:49 +0300 Subject: [PATCH 256/786] mac80211: don't look at the PM bit of BAR frames commit 769dc04db3ed8484798aceb015b94deacc2ba557 upstream. When a peer sends a BAR frame with PM bit clear, we should not modify its PM state as madated by the spec in 802.11-20012 10.2.1.2. Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- net/mac80211/rx.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index acaaf616da71..c0731650c170 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1585,12 +1585,16 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) */ if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && !ieee80211_has_morefrags(hdr->frame_control) && + !ieee80211_is_back_req(hdr->frame_control) && !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && (rx->sdata->vif.type == NL80211_IFTYPE_AP || rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && - /* PM bit is only checked in frames where it isn't reserved, + /* + * PM bit is only checked in frames where it isn't reserved, * in AP mode it's reserved in non-bufferable management frames * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field) + * BAR frames should be ignored as specified in + * IEEE 802.11-2012 10.2.1.2. */ (!ieee80211_is_mgmt(hdr->frame_control) || ieee80211_is_bufferable_mmpdu(hdr->frame_control))) { -- GitLab From f6e99a2efc03eea6444f59f158e1b0ce7c51ef36 Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Sat, 10 Jun 2017 04:59:12 +0200 Subject: [PATCH 257/786] mac80211/wpa: use constant time memory comparison for MACs commit 98c67d187db7808b1f3c95f2110dd4392d034182 upstream. Otherwise, we enable all sorts of forgeries via timing attack. Signed-off-by: Jason A. Donenfeld Cc: Johannes Berg Cc: linux-wireless@vger.kernel.org Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- net/mac80211/wpa.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 42ce9bd4426f..5c71d60f3a64 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "ieee80211_i.h" #include "michael.h" @@ -153,7 +154,7 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; michael_mic(key, hdr, data, data_len, mic); - if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0) + if (crypto_memneq(mic, data + data_len, MICHAEL_MIC_LEN)) goto mic_fail; /* remove Michael MIC from payload */ @@ -1047,7 +1048,7 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx) bip_aad(skb, aad); ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad, skb->data + 24, skb->len - 24, mic); - if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { + if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { key->u.aes_cmac.icverrors++; return RX_DROP_UNUSABLE; } @@ -1097,7 +1098,7 @@ ieee80211_crypto_aes_cmac_256_decrypt(struct ieee80211_rx_data *rx) bip_aad(skb, aad); ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad, skb->data + 24, skb->len - 24, mic); - if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { + if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { key->u.aes_cmac.icverrors++; return RX_DROP_UNUSABLE; } @@ -1201,7 +1202,7 @@ ieee80211_crypto_aes_gmac_decrypt(struct ieee80211_rx_data *rx) if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce, skb->data + 24, skb->len - 24, mic) < 0 || - memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { + crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) { key->u.aes_gmac.icverrors++; return RX_DROP_UNUSABLE; } -- GitLab From 7f7bb1173db826a292727ee6d7a499cef4e6e0bc Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Tue, 13 Jun 2017 07:17:10 +0200 Subject: [PATCH 258/786] drm/amdgpu: Fix overflow of watermark calcs at > 4k resolutions. commit bea10413934dcf98cb9b2dfcdc56e1d28f192897 upstream. Commit d63c277dc672e0 ("drm/amdgpu: Make display watermark calculations more accurate") made watermark calculations more accurate, but not for > 4k resolutions on 32-Bit architectures, as it introduced an integer overflow for those setups and resolutions. Fix this by proper u64 casting and division. Signed-off-by: Mario Kleiner Reported-by: Ben Hutchings Fixes: d63c277dc672 ("drm/amdgpu: Make display watermark calculations more accurate") Cc: Ben Hutchings Cc: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 7 +++++-- drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 7 +++++-- drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 7 +++++-- drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 7 +++++-- 4 files changed, 20 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 42448c7c5ff5..db9b79a8b160 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -1290,8 +1290,11 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, u32 tmp, wm_mask, lb_vblank_lead_lines = 0; if (amdgpu_crtc->base.enabled && num_heads && mode) { - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, + (u32)mode->clock); + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, + (u32)mode->clock); + line_time = min(line_time, (u32)65535); /* watermark for high clocks */ if (adev->pm.dpm_enabled) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 904dabdc3a1e..36d5128a2aad 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -1257,8 +1257,11 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, u32 tmp, wm_mask, lb_vblank_lead_lines = 0; if (amdgpu_crtc->base.enabled && num_heads && mode) { - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, + (u32)mode->clock); + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, + (u32)mode->clock); + line_time = min(line_time, (u32)65535); /* watermark for high clocks */ if (adev->pm.dpm_enabled) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 6d02bdb25d98..75689a2c2de6 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -1054,8 +1054,11 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, fixed20_12 a, b, c; if (amdgpu_crtc->base.enabled && num_heads && mode) { - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, + (u32)mode->clock); + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, + (u32)mode->clock); + line_time = min(line_time, (u32)65535); priority_a_cnt = 0; priority_b_cnt = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index b1fb60107cfa..ba2321ea800e 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -1211,8 +1211,11 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, u32 tmp, wm_mask, lb_vblank_lead_lines = 0; if (amdgpu_crtc->base.enabled && num_heads && mode) { - active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; - line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); + active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, + (u32)mode->clock); + line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, + (u32)mode->clock); + line_time = min(line_time, (u32)65535); /* watermark for high clocks */ if (adev->pm.dpm_enabled) { -- GitLab From a317afc0c1ce9f49573ae8182d2a0f38d0171962 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Fri, 9 Jun 2017 15:48:05 +0800 Subject: [PATCH 259/786] drm/i915: Fix GVT-g PVINFO version compatibility check commit c380f681245d7ae57f17d9ebbbe8f8f1557ee1fb upstream. Current it's strictly checked if PVINFO version matches 1.0 for GVT-g i915 guest which doesn't help for compatibility at all and forces GVT-g host can't extend PVINFO easily with version bump for real compatibility check. This fixes that to check minimal required PVINFO version instead. v2: - drop unneeded version macro - use only major version for sanity check v3: - fix up PVInfo value with kernel type - one indent fix Reviewed-by: Joonas Lahtinen Cc: Chuanxiao Dong Cc: Joonas Lahtinen Signed-off-by: Zhenyu Wang Signed-off-by: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20170609074805.5101-1-zhenyuw@linux.intel.com (cherry picked from commit 0c8792d00d38de85b6ceb1dd67d3ee009d7c8e42) Signed-off-by: Jani Nikula Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/i915_pvinfo.h | 8 ++------ drivers/gpu/drm/i915/i915_vgpu.c | 10 ++++------ 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h index c0cb2974caac..2cfe96d3e5d1 100644 --- a/drivers/gpu/drm/i915/i915_pvinfo.h +++ b/drivers/gpu/drm/i915/i915_pvinfo.h @@ -36,10 +36,6 @@ #define VGT_VERSION_MAJOR 1 #define VGT_VERSION_MINOR 0 -#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor)) -#define INTEL_VGT_IF_VERSION \ - INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR) - /* * notifications from guest to vgpu device model */ @@ -55,8 +51,8 @@ enum vgt_g2v_type { struct vgt_if { u64 magic; /* VGT_MAGIC */ - uint16_t version_major; - uint16_t version_minor; + u16 version_major; + u16 version_minor; u32 vgt_id; /* ID of vGT instance */ u32 rsv1[12]; /* pad to offset 0x40 */ /* diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index dae340cfc6c7..125adcc6d6ca 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -60,8 +60,8 @@ */ void i915_check_vgpu(struct drm_i915_private *dev_priv) { - uint64_t magic; - uint32_t version; + u64 magic; + u16 version_major; BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); @@ -69,10 +69,8 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv) if (magic != VGT_MAGIC) return; - version = INTEL_VGT_IF_VERSION_ENCODE( - __raw_i915_read16(dev_priv, vgtif_reg(version_major)), - __raw_i915_read16(dev_priv, vgtif_reg(version_minor))); - if (version != INTEL_VGT_IF_VERSION) { + version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major)); + if (version_major < VGT_VERSION_MAJOR) { DRM_INFO("VGT interface version mismatch!\n"); return; } -- GitLab From bd3f89002e526bb954d8b8a5620bd6810423f040 Mon Sep 17 00:00:00 2001 From: Bin Liu Date: Thu, 25 May 2017 13:42:39 -0500 Subject: [PATCH 260/786] usb: musb: dsps: keep VBUS on for host-only mode commit b3addcf0d1f04f53fcc302577d5a5e964c18531a upstream. Currently VBUS is turned off while a usb device is detached, and turned on again by the polling routine. This short period VBUS loss prevents usb modem to switch mode. VBUS should be constantly on for host-only mode, so this changes the driver to not turn off VBUS for host-only mode. Fixes: 2f3fd2c5bde1 ("usb: musb: Prepare dsps glue layer for PM runtime support") Reported-by: Moreno Bartalucci Acked-by: Tony Lindgren Signed-off-by: Bin Liu Signed-off-by: Greg Kroah-Hartman --- drivers/usb/musb/musb_dsps.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 9f125e179acd..39666fb911b0 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -213,6 +213,12 @@ static int dsps_check_status(struct musb *musb, void *unused) msecs_to_jiffies(wrp->poll_timeout)); break; case OTG_STATE_A_WAIT_BCON: + /* keep VBUS on for host-only mode */ + if (musb->port_mode == MUSB_PORT_MODE_HOST) { + mod_timer(&glue->timer, jiffies + + msecs_to_jiffies(wrp->poll_timeout)); + break; + } musb_writeb(musb->mregs, MUSB_DEVCTL, 0); skip_session = 1; /* fall */ -- GitLab From f79d740f3289d747b5563da98b778c0021a8fc32 Mon Sep 17 00:00:00 2001 From: Koen Vandeputte Date: Wed, 8 Feb 2017 15:32:05 +0100 Subject: [PATCH 261/786] mac80211: fix CSA in IBSS mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit f181d6a3bcc35633facf5f3925699021c13492c5 upstream. Add the missing IBSS capability flag during capability init as it needs to be inserted into the generated beacon in order for CSA to work. Fixes: cd7760e62c2ac ("mac80211: add support for CSA in IBSS mode") Signed-off-by: Piotr Gawlowicz Signed-off-by: Mikołaj Chwalisz Tested-by: Koen Vandeputte Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- net/mac80211/ibss.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index a31d30713d08..98999d3d5262 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -487,14 +487,14 @@ int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata, struct beacon_data *presp, *old_presp; struct cfg80211_bss *cbss; const struct cfg80211_bss_ies *ies; - u16 capability = 0; + u16 capability = WLAN_CAPABILITY_IBSS; u64 tsf; int ret = 0; sdata_assert_lock(sdata); if (ifibss->privacy) - capability = WLAN_CAPABILITY_PRIVACY; + capability |= WLAN_CAPABILITY_PRIVACY; cbss = cfg80211_get_bss(sdata->local->hw.wiphy, ifibss->chandef.chan, ifibss->bssid, ifibss->ssid, -- GitLab From 841e4e775bb1229dce3404a459b8353c08c70f49 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 20 Feb 2017 08:59:16 +0100 Subject: [PATCH 262/786] mac80211: fix packet statistics for fast-RX commit 0328edc77d4f35014b35f32b46be0a7e16aae74f upstream. When adding per-CPU statistics, which added statistics back to mac80211 for the fast-RX path, I evidently forgot to add the "stats->packets++" line. The reason for that is likely that I didn't see it since it's done in defragmentation for the regular RX path. Add the missing line to properly count received packets in the fast-RX case. Fixes: c9c5962b56c1 ("mac80211: enable collecting station statistics per-CPU") Reported-by: Oren Givon Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- net/mac80211/rx.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index c0731650c170..d3738b475f7c 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3953,6 +3953,7 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, stats->last_rate = sta_stats_encode_rate(status); stats->fragments++; + stats->packets++; if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { stats->last_signal = status->signal; -- GitLab From c8143269c9c48172c832f25b5ae38163147244d2 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 27 Apr 2017 13:19:04 +0200 Subject: [PATCH 263/786] mac80211: fix IBSS presp allocation size commit f1f3e9e2a50a70de908f9dfe0d870e9cdc67e042 upstream. When VHT IBSS support was added, the size of the extra elements wasn't considered in ieee80211_ibss_build_presp(), which makes it possible that it would overrun the allocated buffer. Fix it by allocating the necessary space. Fixes: abcff6ef01f9 ("mac80211: add VHT support for IBSS") Reported-by: Shaul Triebitz Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- net/mac80211/ibss.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 98999d3d5262..62d13eabe17f 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -66,6 +66,8 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata, 2 + (IEEE80211_MAX_SUPP_RATES - 8) + 2 + sizeof(struct ieee80211_ht_cap) + 2 + sizeof(struct ieee80211_ht_operation) + + 2 + sizeof(struct ieee80211_vht_cap) + + 2 + sizeof(struct ieee80211_vht_operation) + ifibss->ie_len; presp = kzalloc(sizeof(*presp) + frame_len, GFP_KERNEL); if (!presp) -- GitLab From 6568f8f70152b787ded9fe9f7d88aaeac379b6d3 Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Sun, 14 May 2017 21:41:55 -0700 Subject: [PATCH 264/786] mac80211: strictly check mesh address extension mode commit 5667c86acf021e6dcf02584408b4484a273ac68f upstream. Mesh forwarding path checks for address extension mode to fetch appropriate proxied address and MPP address. Existing condition that looks for 6 address format is not strict enough so that frames with improper values are processed and invalid entries are added into MPP table. Fix that by adding a stricter check before processing the packet. Per IEEE Std 802.11s-2011 spec. Table 7-6g1 lists address extension mode 0x3 as reserved one. And also Table Table 9-13 does not specify 0x3 as valid address field. Fixes: 9b395bc3be1c ("mac80211: verify that skb data is present") Signed-off-by: Rajkumar Manoharan Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- net/mac80211/rx.c | 3 ++- net/wireless/util.c | 10 ++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index d3738b475f7c..c45a0fcfb3e7 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2471,7 +2471,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) if (is_multicast_ether_addr(hdr->addr1)) { mpp_addr = hdr->addr3; proxied_addr = mesh_hdr->eaddr1; - } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) { + } else if ((mesh_hdr->flags & MESH_FLAGS_AE) == + MESH_FLAGS_AE_A5_A6) { /* has_a4 already checked in ieee80211_rx_mesh_check */ mpp_addr = hdr->addr4; proxied_addr = mesh_hdr->eaddr2; diff --git a/net/wireless/util.c b/net/wireless/util.c index 659b507b347d..c921c2eed15d 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -454,6 +454,8 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, if (iftype == NL80211_IFTYPE_MESH_POINT) skb_copy_bits(skb, hdrlen, &mesh_flags, 1); + mesh_flags &= MESH_FLAGS_AE; + switch (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { case cpu_to_le16(IEEE80211_FCTL_TODS): @@ -469,9 +471,9 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, iftype != NL80211_IFTYPE_STATION)) return -1; if (iftype == NL80211_IFTYPE_MESH_POINT) { - if (mesh_flags & MESH_FLAGS_AE_A4) + if (mesh_flags == MESH_FLAGS_AE_A4) return -1; - if (mesh_flags & MESH_FLAGS_AE_A5_A6) { + if (mesh_flags == MESH_FLAGS_AE_A5_A6) { skb_copy_bits(skb, hdrlen + offsetof(struct ieee80211s_hdr, eaddr1), tmp.h_dest, 2 * ETH_ALEN); @@ -487,9 +489,9 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, ether_addr_equal(tmp.h_source, addr))) return -1; if (iftype == NL80211_IFTYPE_MESH_POINT) { - if (mesh_flags & MESH_FLAGS_AE_A5_A6) + if (mesh_flags == MESH_FLAGS_AE_A5_A6) return -1; - if (mesh_flags & MESH_FLAGS_AE_A4) + if (mesh_flags == MESH_FLAGS_AE_A4) skb_copy_bits(skb, hdrlen + offsetof(struct ieee80211s_hdr, eaddr1), tmp.h_source, ETH_ALEN); -- GitLab From 3e8c503d0a1626cb54c01c04f6485683d8d76f28 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 1 Jun 2017 21:26:03 +0200 Subject: [PATCH 265/786] mac80211: fix dropped counter in multiqueue RX commit e165bc02a02c70e40d5c811c705ba269aeca0497 upstream. In the commit enabling per-CPU station statistics, I inadvertedly copy-pasted some code to update rx_packets and forgot to change it to update rx_dropped_misc. Fix that. This addresses https://bugzilla.kernel.org/show_bug.cgi?id=195953. Fixes: c9c5962b56c1 ("mac80211: enable collecting station statistics per-CPU") Reported-by: Petru-Florin Mihancea Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- net/mac80211/sta_info.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 8e05032689f0..b2c823ffad74 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -2148,7 +2148,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) struct ieee80211_sta_rx_stats *cpurxs; cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); - sinfo->rx_packets += cpurxs->dropped; + sinfo->rx_dropped_misc += cpurxs->dropped; } } -- GitLab From 5b754c994f3893f64973aecb467c38cd9bfc25e5 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sat, 10 Jun 2017 13:52:45 +0300 Subject: [PATCH 266/786] mac80211: don't send SMPS action frame in AP mode when not needed commit b3dd8279659f14f3624bb32559782d699fa6f7d1 upstream. mac80211 allows to modify the SMPS state of an AP both, when it is started, and after it has been started. Such a change will trigger an action frame to all the peers that are currently connected, and will be remembered so that new peers will get notified as soon as they connect (since the SMPS setting in the beacon may not be the right one). This means that we need to remember the SMPS state currently requested as well as the SMPS state that was configured initially (and advertised in the beacon). The former is bss->req_smps and the latter is sdata->smps_mode. Initially, the AP interface could only be started with SMPS_OFF, which means that sdata->smps_mode was SMPS_OFF always. Later, a nl80211 API was added to be able to start an AP with a different AP mode. That code forgot to update bss->req_smps and because of that, if the AP interface was started with SMPS_DYNAMIC, we had: sdata->smps_mode = SMPS_DYNAMIC bss->req_smps = SMPS_OFF That configuration made mac80211 think it needs to fire off an action frame to any new station connecting to the AP in order to let it know that the actual SMPS configuration is SMPS_OFF. Fix that by properly setting bss->req_smps in ieee80211_start_ap. Fixes: f69931748730 ("mac80211: set smps_mode according to ap params") Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- net/mac80211/cfg.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index fd6541f3ade3..07001b6d36cc 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -865,6 +865,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, default: return -EINVAL; } + sdata->u.ap.req_smps = sdata->smps_mode; + sdata->needed_rx_chains = sdata->local->rx_chains; mutex_lock(&local->mtx); -- GitLab From 5899b635ecc7a8c4ae5007f2e74250ec4297feba Mon Sep 17 00:00:00 2001 From: YYS Date: Tue, 21 Mar 2017 16:27:03 +0800 Subject: [PATCH 267/786] drm/mediatek: fix mtk_hdmi_setup_vendor_specific_infoframe mistake commit 014580ffab654bb83256783a2b185cf6c06dffaa upstream. mtk_hdmi_setup_vendor_specific_infoframe will return before handle mtk_hdmi_hw_send_info_frame.Because hdmi_vendor_infoframe_pack returns the number of bytes packed into the binary buffer or a negative error code on failure. So correct it. Fixes: 8f83f26891e1 ("drm/mediatek: Add HDMI support") Signed-off-by: Nickey Yang Signed-off-by: CK Hu Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/mediatek/mtk_hdmi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 0e8c4d9af340..e097780752f6 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1061,7 +1061,7 @@ static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi, } err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer)); - if (err) { + if (err < 0) { dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n", err); return err; -- GitLab From 879d61f218a94afdc53a050e97c59209b64687c4 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 1 Mar 2017 10:56:02 -0800 Subject: [PATCH 268/786] drm/vc4: Fix OOPSes from trying to cache a partially constructed BO. commit ca39b449f6d03e8235969f12f5dd25b8eb4304d6 upstream. If a CMA allocation failed, the partially constructed BO would be unreferenced through the normal path, and we might choose to put it in the BO cache. If we then reused it before it expired from the cache, the kernel would OOPS. Signed-off-by: Eric Anholt Fixes: c826a6e10644 ("drm/vc4: Add a BO cache.") Reviewed-by: Boris Brezillon Link: http://patchwork.freedesktop.org/patch/msgid/20170301185602.6873-2-eric@anholt.net Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/vc4/vc4_bo.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 3f6704cf6608..ec9023bd935b 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -313,6 +313,14 @@ void vc4_free_object(struct drm_gem_object *gem_bo) goto out; } + /* If this object was partially constructed but CMA allocation + * had failed, just free it. + */ + if (!bo->base.vaddr) { + vc4_bo_destroy(bo); + goto out; + } + cache_list = vc4_get_cache_list_for_size(dev, gem_bo->size); if (!cache_list) { vc4_bo_destroy(bo); -- GitLab From a50aacf5de522a5af2a9cd61ba38b0bd8b50cad6 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Fri, 12 May 2017 16:35:45 +0200 Subject: [PATCH 269/786] serial: efm32: Fix parity management in 'efm32_uart_console_get_options()' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit be40597a1bc173bf9dadccdf5388b956f620ae8f upstream. UARTn_FRAME_PARITY_ODD is 0x0300 UARTn_FRAME_PARITY_EVEN is 0x0200 So if the UART is configured for EVEN parity, it would be reported as ODD. Fix it by correctly testing if the 2 bits are set. Fixes: 3afbd89c9639 ("serial/efm32: add new driver") Signed-off-by: Christophe JAILLET Acked-by: Uwe Kleine-König Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/efm32-uart.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c index 195acc868763..5d476916191b 100644 --- a/drivers/tty/serial/efm32-uart.c +++ b/drivers/tty/serial/efm32-uart.c @@ -27,6 +27,7 @@ #define UARTn_FRAME 0x04 #define UARTn_FRAME_DATABITS__MASK 0x000f #define UARTn_FRAME_DATABITS(n) ((n) - 3) +#define UARTn_FRAME_PARITY__MASK 0x0300 #define UARTn_FRAME_PARITY_NONE 0x0000 #define UARTn_FRAME_PARITY_EVEN 0x0200 #define UARTn_FRAME_PARITY_ODD 0x0300 @@ -572,12 +573,16 @@ static void efm32_uart_console_get_options(struct efm32_uart_port *efm_port, 16 * (4 + (clkdiv >> 6))); frame = efm32_uart_read32(efm_port, UARTn_FRAME); - if (frame & UARTn_FRAME_PARITY_ODD) + switch (frame & UARTn_FRAME_PARITY__MASK) { + case UARTn_FRAME_PARITY_ODD: *parity = 'o'; - else if (frame & UARTn_FRAME_PARITY_EVEN) + break; + case UARTn_FRAME_PARITY_EVEN: *parity = 'e'; - else + break; + default: *parity = 'n'; + } *bits = (frame & UARTn_FRAME_DATABITS__MASK) - UARTn_FRAME_DATABITS(4) + 4; -- GitLab From ad3faea03fdfeed4cb1f8fc1b3b28eb8282bd231 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 28 Mar 2017 11:13:45 +0200 Subject: [PATCH 270/786] serial: sh-sci: Fix late enablement of AUTORTS commit 5f76895e4c712b1b5af450cf344389b8c53ac2c2 upstream. When changing hardware control flow for a UART with dedicated RTS/CTS pins, the new AUTORTS state is not immediately reflected in the hardware, but only when RTS is raised. However, the serial core does not call .set_mctrl() after .set_termios(), hence AUTORTS may only become effective when the port is closed, and reopened later. Note that this problem does not happen when manually using stty to change CRTSCTS, as AUTORTS will work fine on next open. To fix this, call .set_mctrl() from .set_termios() when dedicated RTS/CTS pins are present, to refresh the AUTORTS or RTS state. This is similar to what other drivers supporting AUTORTS do (e.g. omap-serial). Reported-by: Baumann, Christoph (C.) Fixes: 33f50ffc253854cf ("serial: sh-sci: Fix support for hardware-assisted RTS/CTS") Signed-off-by: Geert Uytterhoeven Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/sh-sci.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index ee84f89391ca..7e97a1ccab23 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -2366,6 +2366,10 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios, */ udelay(DIV_ROUND_UP(10 * 1000000, baud)); } + if (port->flags & UPF_HARD_FLOW) { + /* Refresh (Auto) RTS */ + sci_set_mctrl(port, port->mctrl); + } #ifdef CONFIG_SERIAL_SH_SCI_DMA /* -- GitLab From caa6f1c7bcbe15e8cc009c689a10b1c67de48833 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Mon, 8 May 2017 14:23:16 -0700 Subject: [PATCH 271/786] x86/mm/32: Set the '__vmalloc_start_set' flag in initmem_init() commit 861ce4a3244c21b0af64f880d5bfe5e6e2fb9e4a upstream. '__vmalloc_start_set' currently only gets set in initmem_init() when !CONFIG_NEED_MULTIPLE_NODES. This breaks detection of vmalloc address with virt_addr_valid() with CONFIG_NEED_MULTIPLE_NODES=y, causing a kernel crash: [mm/usercopy] 517e1fbeb6: kernel BUG at arch/x86/mm/physaddr.c:78! Set '__vmalloc_start_set' appropriately for that case as well. Reported-by: kbuild test robot Signed-off-by: Laura Abbott Reviewed-by: Kees Cook Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: dc16ecf7fd1f ("x86-32: use specific __vmalloc_start_set flag in __virt_addr_valid") Link: http://lkml.kernel.org/r/1494278596-30373-1-git-send-email-labbott@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/numa_32.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 6b7ce6279133..aca6295350f3 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -100,5 +100,6 @@ void __init initmem_init(void) printk(KERN_DEBUG "High memory starts at vaddr %08lx\n", (ulong) pfn_to_kaddr(highstart_pfn)); + __vmalloc_start_set = true; setup_bootmem_allocator(); } -- GitLab From ed13a9c6464b49b654baeba96c833b1129dc4ce4 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Sat, 15 Apr 2017 10:05:08 -0700 Subject: [PATCH 272/786] mfd: omap-usb-tll: Fix inverted bit use for USB TLL mode commit 8b8a84c54aff4256d592dc18346c65ecf6811b45 upstream. Commit 16fa3dc75c22 ("mfd: omap-usb-tll: HOST TLL platform driver") added support for USB TLL, but uses OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF bit the wrong way. The comments in the code are correct, but the inverted use of OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF causes the register to be enabled instead of disabled unlike what the comments say. Without this change the Wrigley 3G LTE modem on droid 4 EHCI bus can be only pinged few times before it stops responding. Fixes: 16fa3dc75c22 ("mfd: omap-usb-tll: HOST TLL platform driver") Signed-off-by: Tony Lindgren Acked-by: Roger Quadros Signed-off-by: Lee Jones Signed-off-by: Greg Kroah-Hartman --- drivers/mfd/omap-usb-tll.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c index 1aa74c4c3ced..9d167c9af2c6 100644 --- a/drivers/mfd/omap-usb-tll.c +++ b/drivers/mfd/omap-usb-tll.c @@ -377,8 +377,8 @@ int omap_tll_init(struct usbhs_omap_platform_data *pdata) * and use SDR Mode */ reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE - | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE); + reg |= OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF; } else if (pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_HSIC) { /* -- GitLab From 773fdcdc0957527577b2f1de994448894f1e3305 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 22 Apr 2017 13:47:23 +0300 Subject: [PATCH 273/786] staging: rtl8188eu: prevent an underflow in rtw_check_beacon_data() commit 784047eb2d3405a35087af70cba46170c5576b25 upstream. The "len" could be as low as -14 so we should check for negatives. Fixes: 9a7fe54ddc3a ("staging: r8188eu: Add source files for new driver - part 1") Signed-off-by: Dan Carpenter Signed-off-by: Greg Kroah-Hartman --- drivers/staging/rtl8188eu/core/rtw_ap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c index 553e8d50352f..6513ace1fce6 100644 --- a/drivers/staging/rtl8188eu/core/rtw_ap.c +++ b/drivers/staging/rtl8188eu/core/rtw_ap.c @@ -890,7 +890,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len) return _FAIL; - if (len > MAX_IE_SZ) + if (len < 0 || len > MAX_IE_SZ) return _FAIL; pbss_network->IELength = len; -- GitLab From a1d51f7abf710729822bafeb82c664f6657a4db6 Mon Sep 17 00:00:00 2001 From: Eva Rachel Retuya Date: Mon, 20 Mar 2017 19:27:05 +0800 Subject: [PATCH 274/786] staging: iio: tsl2x7x_core: Fix standard deviation calculation commit cf6c77323a96fc40309cc8a4921ef206cccdd961 upstream. Standard deviation is calculated as the square root of the variance where variance is the mean of sample_sum and length. Correct the computation of statP->stddev in accordance to the proper calculation. Fixes: 3c97c08b5735 ("staging: iio: add TAOS tsl2x7x driver") Reported-by: Abhiram Balasubramanian Signed-off-by: Eva Rachel Retuya Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/staging/iio/light/tsl2x7x_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c index ea15bc1c300c..197201a70d59 100644 --- a/drivers/staging/iio/light/tsl2x7x_core.c +++ b/drivers/staging/iio/light/tsl2x7x_core.c @@ -854,7 +854,7 @@ void tsl2x7x_prox_calculate(int *data, int length, tmp = data[i] - statP->mean; sample_sum += tmp * tmp; } - statP->stddev = int_sqrt((long)sample_sum) / length; + statP->stddev = int_sqrt((long)sample_sum / length); } /** -- GitLab From 56251d138570b4c789ee8e55c4f58f8b658e8ea0 Mon Sep 17 00:00:00 2001 From: Marcin Niestroj Date: Mon, 12 Dec 2016 17:58:42 +0100 Subject: [PATCH 275/786] iio: st_pressure: Fix data sign commit 1b211d48abaa0e12e6e6177c0316ff55d11fdfce upstream. Datasheet of each device (lps331ap, lps25h, lps001wp, lps22hb) says that the pressure and temperature data is a 2's complement. I'm sending this the slow way, as negative pressures on these are pretty unusual and the nature of the fixing of multiple device introduction patches will make it hard to apply to older kernels - Jonathan. Fixes: 217494e5b780 ("iio:pressure: Add STMicroelectronics pressures driver") Fixes: 2f5effcbd097 ("iio: pressure-core: st: Expand and rename LPS331AP's channel descriptor") Fixes: 7885a8ce6800 ("iio: pressure: st: Add support for new LPS001WP pressure sensor") Fixes: e039e2f5b4da ("iio:st_pressure:initial lps22hb sensor support") Signed-off-by: Marcin Niestroj Reviewed-by: Linus Walleij Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/pressure/st_pressure_core.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index 55df9a75eb3a..44e46c159a7e 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c @@ -227,7 +227,7 @@ static const struct iio_chan_spec st_press_1_channels[] = { .address = ST_PRESS_1_OUT_XL_ADDR, .scan_index = 0, .scan_type = { - .sign = 'u', + .sign = 's', .realbits = 24, .storagebits = 32, .endianness = IIO_LE, @@ -240,7 +240,7 @@ static const struct iio_chan_spec st_press_1_channels[] = { .address = ST_TEMP_1_OUT_L_ADDR, .scan_index = 1, .scan_type = { - .sign = 'u', + .sign = 's', .realbits = 16, .storagebits = 16, .endianness = IIO_LE, @@ -259,7 +259,7 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = { .address = ST_PRESS_LPS001WP_OUT_L_ADDR, .scan_index = 0, .scan_type = { - .sign = 'u', + .sign = 's', .realbits = 16, .storagebits = 16, .endianness = IIO_LE, @@ -273,7 +273,7 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = { .address = ST_TEMP_LPS001WP_OUT_L_ADDR, .scan_index = 1, .scan_type = { - .sign = 'u', + .sign = 's', .realbits = 16, .storagebits = 16, .endianness = IIO_LE, @@ -291,7 +291,7 @@ static const struct iio_chan_spec st_press_lps22hb_channels[] = { .address = ST_PRESS_1_OUT_XL_ADDR, .scan_index = 0, .scan_type = { - .sign = 'u', + .sign = 's', .realbits = 24, .storagebits = 32, .endianness = IIO_LE, -- GitLab From cf308c15103e9b5e13a7719decec86b20d538863 Mon Sep 17 00:00:00 2001 From: Matt Ranostay Date: Fri, 14 Apr 2017 16:38:19 -0700 Subject: [PATCH 276/786] iio: proximity: as3935: recalibrate RCO after resume commit 6272c0de13abf1480f701d38288f28a11b4301c4 upstream. According to the datasheet the RCO must be recalibrated on every power-on-reset. Also remove mutex locking in the calibration function since callers other than the probe function (which doesn't need it) will have a lock. Fixes: 24ddb0e4bba4 ("iio: Add AS3935 lightning sensor support") Cc: George McCollister Signed-off-by: Matt Ranostay Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/proximity/as3935.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c index 268210ea4990..24fb54398a3b 100644 --- a/drivers/iio/proximity/as3935.c +++ b/drivers/iio/proximity/as3935.c @@ -269,8 +269,6 @@ static irqreturn_t as3935_interrupt_handler(int irq, void *private) static void calibrate_as3935(struct as3935_state *st) { - mutex_lock(&st->lock); - /* mask disturber interrupt bit */ as3935_write(st, AS3935_INT, BIT(5)); @@ -280,8 +278,6 @@ static void calibrate_as3935(struct as3935_state *st) mdelay(2); as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV)); - - mutex_unlock(&st->lock); } #ifdef CONFIG_PM_SLEEP @@ -318,6 +314,8 @@ static int as3935_resume(struct device *dev) val &= ~AS3935_AFE_PWR_BIT; ret = as3935_write(st, AS3935_AFE_GAIN, val); + calibrate_as3935(st); + err_resume: mutex_unlock(&st->lock); -- GitLab From dca02651cee78260ad9b6f1dce8f40b28367b7e2 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 10 May 2017 08:51:09 +0300 Subject: [PATCH 277/786] iio: adc: ti_am335x_adc: allocating too much in probe commit 5ba5b437efaa7a502eec393c045d3bf90c92c4e9 upstream. We should be allocating enough information for a tiadc_device struct which is about 400 bytes but instead we allocate enough for a second iio_dev struct which is over 2000 bytes. Fixes: fea89e2dfcea ("iio: adc: ti_am335x_adc: use variable names for sizeof() operator") Signed-off-by: Dan Carpenter Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/adc/ti_am335x_adc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index 2de1f52f1b19..62b0dec6d777 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c @@ -484,7 +484,7 @@ static int tiadc_probe(struct platform_device *pdev) return -EINVAL; } - indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*indio_dev)); + indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev)); if (indio_dev == NULL) { dev_err(&pdev->dev, "failed to allocate iio device\n"); return -ENOMEM; -- GitLab From b51e4b0ac6618814bb1b294086b84aa9a51aaf48 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Tue, 3 Jan 2017 23:55:19 +0200 Subject: [PATCH 278/786] IB/mlx5: Fix kernel to user leak prevention logic commit de8d6e02efbdb259c67832ccf027d7ace9b91d5d upstream. The logic was broken as it failed to update the response length for architectures with PAGE_SIZE larger than 4kB. As a result further extension of the ucontext response struct would fail. Fixes: d69e3bcf7976 ('IB/mlx5: Mmap the HCA's core clock register to user-space') Signed-off-by: Eli Cohen Reviewed-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/mlx5/main.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 11bfa27b022c..282c9fb0ba95 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1105,13 +1105,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, * pretend we don't support reading the HCA's core clock. This is also * forced by mmap function. */ - if (PAGE_SIZE <= 4096 && - field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { - resp.comp_mask |= - MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; - resp.hca_core_clock_offset = - offsetof(struct mlx5_init_seg, internal_timer_h) % - PAGE_SIZE; + if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { + if (PAGE_SIZE <= 4096) { + resp.comp_mask |= + MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; + resp.hca_core_clock_offset = + offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE; + } resp.response_length += sizeof(resp.hca_core_clock_offset) + sizeof(resp.reserved2); } -- GitLab From 723bd3b9f83fe4ba903351cdaa1e6dfbec8faac0 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Wed, 26 Apr 2017 20:50:07 +0900 Subject: [PATCH 279/786] usb: gadget: udc: renesas_usb3: fix pm_runtime functions calling commit cdc876877ebc3f0677b267756d4564e2a429e730 upstream. This patch fixes an issue that this driver is possible to access the registers before pm_runtime_get_sync() if a gadget driver is installed first. After that, oops happens on R-Car Gen3 environment. To avoid it, this patch changes the pm_runtime call timing from probe/remove to udc_start/udc_stop. Fixes: 746bfe63bba3 ("usb: gadget: renesas_usb3: add support for Renesas USB3.0 peripheral controller") Signed-off-by: Yoshihiro Shimoda Signed-off-by: Felipe Balbi Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/udc/renesas_usb3.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index fb8fc34827ab..6be61139ce15 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -1707,6 +1707,9 @@ static int renesas_usb3_start(struct usb_gadget *gadget, /* hook up the driver */ usb3->driver = driver; + pm_runtime_enable(usb3_to_dev(usb3)); + pm_runtime_get_sync(usb3_to_dev(usb3)); + renesas_usb3_init_controller(usb3); return 0; @@ -1724,6 +1727,9 @@ static int renesas_usb3_stop(struct usb_gadget *gadget) renesas_usb3_stop_controller(usb3); spin_unlock_irqrestore(&usb3->lock, flags); + pm_runtime_put(usb3_to_dev(usb3)); + pm_runtime_disable(usb3_to_dev(usb3)); + return 0; } @@ -1761,9 +1767,6 @@ static int renesas_usb3_remove(struct platform_device *pdev) { struct renesas_usb3 *usb3 = platform_get_drvdata(pdev); - pm_runtime_put(&pdev->dev); - pm_runtime_disable(&pdev->dev); - usb_del_gadget_udc(&usb3->gadget); __renesas_usb3_ep_free_request(usb3->ep0_req); @@ -1948,9 +1951,6 @@ static int renesas_usb3_probe(struct platform_device *pdev) usb3->workaround_for_vbus = priv->workaround_for_vbus; - pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); - dev_info(&pdev->dev, "probed\n"); return 0; -- GitLab From dd65c0958b77d97a9419f1ccfab3fa8c30f61338 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Wed, 26 Apr 2017 20:50:08 +0900 Subject: [PATCH 280/786] usb: gadget: udc: renesas_usb3: fix deadlock by spinlock commit 067d6fdc558d2c43f0bfdc7af99630dd5eb08dc5 upstream. This patch fixes an issue that this driver is possible to cause deadlock by double-spinclocked in renesas_usb3_stop_controller(). So, this patch removes spinlock API calling in renesas_usb3_stop(). (In other words, the previous code had a redundant lock.) Fixes: 746bfe63bba3 ("usb: gadget: renesas_usb3: add support for Renesas USB3.0 peripheral controller") Signed-off-by: Yoshihiro Shimoda Signed-off-by: Felipe Balbi Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/udc/renesas_usb3.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 6be61139ce15..82e301e4da89 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -1718,14 +1718,11 @@ static int renesas_usb3_start(struct usb_gadget *gadget, static int renesas_usb3_stop(struct usb_gadget *gadget) { struct renesas_usb3 *usb3 = gadget_to_renesas_usb3(gadget); - unsigned long flags; - spin_lock_irqsave(&usb3->lock, flags); usb3->softconnect = false; usb3->gadget.speed = USB_SPEED_UNKNOWN; usb3->driver = NULL; renesas_usb3_stop_controller(usb3); - spin_unlock_irqrestore(&usb3->lock, flags); pm_runtime_put(usb3_to_dev(usb3)); pm_runtime_disable(usb3_to_dev(usb3)); -- GitLab From cb53a4e03b2f2341f1b24d8c6785d4ff57928f41 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Wed, 26 Apr 2017 20:50:09 +0900 Subject: [PATCH 281/786] usb: gadget: udc: renesas_usb3: lock for PN_ registers access commit 940f538a100c84c6e72813e4ac88bd1753a86945 upstream. This controller disallows to change the PIPE until reading/writing a packet finishes. However. the previous code is not enough to hold the lock in some functions. So, this patch fixes it. Fixes: 746bfe63bba3 ("usb: gadget: renesas_usb3: add support for Renesas USB3.0 peripheral controller") Signed-off-by: Yoshihiro Shimoda Signed-off-by: Felipe Balbi Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/udc/renesas_usb3.c | 28 ++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 82e301e4da89..ba78e3f7aea8 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -1401,7 +1401,13 @@ static void usb3_request_done_pipen(struct renesas_usb3 *usb3, struct renesas_usb3_request *usb3_req, int status) { - usb3_pn_stop(usb3); + unsigned long flags; + + spin_lock_irqsave(&usb3->lock, flags); + if (usb3_pn_change(usb3, usb3_ep->num)) + usb3_pn_stop(usb3); + spin_unlock_irqrestore(&usb3->lock, flags); + usb3_disable_pipe_irq(usb3, usb3_ep->num); usb3_request_done(usb3_ep, usb3_req, status); @@ -1430,30 +1436,46 @@ static void usb3_irq_epc_pipen_bfrdy(struct renesas_usb3 *usb3, int num) { struct renesas_usb3_ep *usb3_ep = usb3_get_ep(usb3, num); struct renesas_usb3_request *usb3_req = usb3_get_request(usb3_ep); + bool done = false; if (!usb3_req) return; + spin_lock(&usb3->lock); + if (usb3_pn_change(usb3, num)) + goto out; + if (usb3_ep->dir_in) { /* Do not stop the IN pipe here to detect LSTTR interrupt */ if (!usb3_write_pipe(usb3_ep, usb3_req, USB3_PN_WRITE)) usb3_clear_bit(usb3, PN_INT_BFRDY, USB3_PN_INT_ENA); } else { if (!usb3_read_pipe(usb3_ep, usb3_req, USB3_PN_READ)) - usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0); + done = true; } + +out: + /* need to unlock because usb3_request_done_pipen() locks it */ + spin_unlock(&usb3->lock); + + if (done) + usb3_request_done_pipen(usb3, usb3_ep, usb3_req, 0); } static void usb3_irq_epc_pipen(struct renesas_usb3 *usb3, int num) { u32 pn_int_sta; - if (usb3_pn_change(usb3, num) < 0) + spin_lock(&usb3->lock); + if (usb3_pn_change(usb3, num) < 0) { + spin_unlock(&usb3->lock); return; + } pn_int_sta = usb3_read(usb3, USB3_PN_INT_STA); pn_int_sta &= usb3_read(usb3, USB3_PN_INT_ENA); usb3_write(usb3, pn_int_sta, USB3_PN_INT_STA); + spin_unlock(&usb3->lock); if (pn_int_sta & PN_INT_LSTTR) usb3_irq_epc_pipen_lsttr(usb3, num); if (pn_int_sta & PN_INT_BFRDY) -- GitLab From 12bfbe157d066696a8b34406cb0e3353346f6fc6 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 10 May 2017 18:18:29 +0200 Subject: [PATCH 282/786] USB: hub: fix SS max number of ports commit 93491ced3c87c94b12220dbac0527e1356702179 upstream. Add define for the maximum number of ports on a SuperSpeed hub as per USB 3.1 spec Table 10-5, and use it when verifying the retrieved hub descriptor. This specifically avoids benign attempts to update the DeviceRemovable mask for non-existing ports (should we get that far). Fixes: dbe79bbe9dcb ("USB 3.0 Hub Changes") Acked-by: Alan Stern Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/hub.c | 8 +++++++- include/uapi/linux/usb/ch11.h | 3 +++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 8714b352e57f..f953d6d647f2 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1334,7 +1334,13 @@ static int hub_configure(struct usb_hub *hub, if (ret < 0) { message = "can't read hub descriptor"; goto fail; - } else if (hub->descriptor->bNbrPorts > USB_MAXCHILDREN) { + } + + maxchild = USB_MAXCHILDREN; + if (hub_is_superspeed(hdev)) + maxchild = min_t(unsigned, maxchild, USB_SS_MAXPORTS); + + if (hub->descriptor->bNbrPorts > maxchild) { message = "hub has too many ports!"; ret = -ENODEV; goto fail; diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h index 361297e96f58..576c704e3fb8 100644 --- a/include/uapi/linux/usb/ch11.h +++ b/include/uapi/linux/usb/ch11.h @@ -22,6 +22,9 @@ */ #define USB_MAXCHILDREN 31 +/* See USB 3.1 spec Table 10-5 */ +#define USB_SS_MAXPORTS 15 + /* * Hub request types */ -- GitLab From 7b5bce3a5128108ec14dc22e8a9e46cd1a3b6c54 Mon Sep 17 00:00:00 2001 From: Anton Bondarenko Date: Sun, 7 May 2017 01:53:46 +0200 Subject: [PATCH 283/786] usb: core: fix potential memory leak in error path during hcd creation commit 1a744d2eb76aaafb997fda004ae3ae62a1538f85 upstream. Free memory allocated for address0_mutex if allocation of bandwidth_mutex failed. Fixes: feb26ac31a2a ("usb: core: hub: hub_port_init lock controller instead of bus") Signed-off-by: Anton Bondarenko Acked-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/hcd.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 3b9735abf2e0..8a7c6bbaed7e 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -2535,6 +2535,7 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver, hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex), GFP_KERNEL); if (!hcd->bandwidth_mutex) { + kfree(hcd->address0_mutex); kfree(hcd); dev_dbg(dev, "hcd bandwidth mutex alloc failed\n"); return NULL; -- GitLab From 9ae5dac225e28ce165464d8880ecbc147708e12b Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 10 May 2017 18:18:26 +0200 Subject: [PATCH 284/786] USB: usbip: fix nonconforming hub descriptor commit ec963b412a54aac8e527708ecad06a6988a86fb4 upstream. Fix up the root-hub descriptor to accommodate the variable-length DeviceRemovable and PortPwrCtrlMask fields, while marking all ports as removable (and leaving the reserved bit zero unset). Also add a build-time constraint on VHCI_HC_PORTS which must never be greater than USB_MAXCHILDREN (but this was only enforced through a KConfig constant). This specifically fixes the descriptor layout whenever VHCI_HC_PORTS is greater than seven (default is 8). Fixes: 04679b3489e0 ("Staging: USB/IP: add client driver") Cc: Takahiro Hirofuchi Cc: Valentina Manea Signed-off-by: Johan Hovold Acked-by: Shuah Khan Signed-off-by: Greg Kroah-Hartman --- drivers/usb/usbip/vhci_hcd.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 03eccf29ace0..d6dc165e924b 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -235,14 +235,19 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf) static inline void hub_descriptor(struct usb_hub_descriptor *desc) { + int width; + memset(desc, 0, sizeof(*desc)); desc->bDescriptorType = USB_DT_HUB; - desc->bDescLength = 9; desc->wHubCharacteristics = cpu_to_le16( HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM); + desc->bNbrPorts = VHCI_HC_PORTS; - desc->u.hs.DeviceRemovable[0] = 0xff; - desc->u.hs.DeviceRemovable[1] = 0xff; + BUILD_BUG_ON(VHCI_HC_PORTS > USB_MAXCHILDREN); + width = desc->bNbrPorts / 8 + 1; + desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * width; + memset(&desc->u.hs.DeviceRemovable[0], 0, width); + memset(&desc->u.hs.DeviceRemovable[width], 0xff, width); } static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, -- GitLab From 374aceef5912f3438a1cc582a2007551f0fbdb15 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 2 Feb 2017 12:53:04 -0200 Subject: [PATCH 285/786] pvrusb2: reduce stack usage pvr2_eeprom_analyze() commit 6830733d53a4517588e56227b9c8538633f0c496 upstream. The driver uses a relatively large data structure on the stack, which showed up on my radar as we get a warning with the "latent entropy" GCC plugin: drivers/media/usb/pvrusb2/pvrusb2-eeprom.c:153:1: error: the frame size of 1376 bytes is larger than 1152 bytes [-Werror=frame-larger-than=] The warning is usually hidden as we raise the warning limit to 2048 when the plugin is enabled, but I'd like to lower that again in the future, and making this function smaller helps to do that without build regressions. Further analysis shows that putting an 'i2c_client' structure on the stack is not really supported, as the embedded 'struct device' is not initialized here, and we are only saved by the fact that the function that is called here does not use the pointer at all. Fixes: d855497edbfb ("V4L/DVB (4228a): pvrusb2 to kernel 2.6.18") Signed-off-by: Arnd Bergmann Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Greg Kroah-Hartman --- drivers/media/usb/pvrusb2/pvrusb2-eeprom.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c index e1907cd0c3b7..7613d1fee104 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c @@ -123,15 +123,10 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw) memset(&tvdata,0,sizeof(tvdata)); eeprom = pvr2_eeprom_fetch(hdw); - if (!eeprom) return -EINVAL; - - { - struct i2c_client fake_client; - /* Newer version expects a useless client interface */ - fake_client.addr = hdw->eeprom_addr; - fake_client.adapter = &hdw->i2c_adap; - tveeprom_hauppauge_analog(&fake_client,&tvdata,eeprom); - } + if (!eeprom) + return -EINVAL; + + tveeprom_hauppauge_analog(NULL, &tvdata, eeprom); trace_eeprom("eeprom assumed v4l tveeprom module"); trace_eeprom("eeprom direct call results:"); -- GitLab From c8091f0e85493b9c8a3edfc60fa434e70a0949a4 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 10 May 2017 18:18:25 +0200 Subject: [PATCH 286/786] USB: gadget: dummy_hcd: fix hub-descriptor removable fields commit d81182ce30dbd497a1e7047d7fda2af040347790 upstream. Flag the first and only port as removable while also leaving the remaining bits (including the reserved bit zero) unset in accordance with the specifications: "Within a byte, if no port exists for a given location, the bit field representing the port characteristics shall be 0." Also add a comment marking the legacy PortPwrCtrlMask field. Fixes: 1cd8fd2887e1 ("usb: gadget: dummy_hcd: add SuperSpeed support") Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Cc: Tatyana Brokhman Signed-off-by: Johan Hovold Acked-by: Alan Stern Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/udc/dummy_hcd.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index 4fa5de2eb501..69226dcf1443 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -2009,7 +2009,7 @@ ss_hub_descriptor(struct usb_hub_descriptor *desc) HUB_CHAR_COMMON_OCPM); desc->bNbrPorts = 1; desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/ - desc->u.ss.DeviceRemovable = 0xffff; + desc->u.ss.DeviceRemovable = 0; } static inline void hub_descriptor(struct usb_hub_descriptor *desc) @@ -2021,8 +2021,8 @@ static inline void hub_descriptor(struct usb_hub_descriptor *desc) HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM); desc->bNbrPorts = 1; - desc->u.hs.DeviceRemovable[0] = 0xff; - desc->u.hs.DeviceRemovable[1] = 0xff; + desc->u.hs.DeviceRemovable[0] = 0; + desc->u.hs.DeviceRemovable[1] = 0xff; /* PortPwrCtrlMask */ } static int dummy_hub_control( -- GitLab From f75f4d196ab58f21746a0cf624bd00f2153273c7 Mon Sep 17 00:00:00 2001 From: Chris Brandt Date: Thu, 27 Apr 2017 12:12:02 -0700 Subject: [PATCH 287/786] usb: r8a66597-hcd: select a different endpoint on timeout commit 1f873d857b6c2fefb4dada952674aa01bcfb92bd upstream. If multiple endpoints on a single device have pending IN URBs and one endpoint times out due to NAKs (perfectly legal), select a different endpoint URB to try. The existing code only checked to see another device address has pending URBs and ignores other IN endpoints on the current device address. This leads to endpoints never getting serviced if one endpoint is using NAK as a flow control method. Fixes: 5d3043586db4 ("usb: r8a66597-hcd: host controller driver for R8A6659") Signed-off-by: Chris Brandt Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/r8a66597-hcd.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index bfa7fa3d2eea..9c3413d550bb 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c @@ -1785,6 +1785,7 @@ static void r8a66597_td_timer(unsigned long _r8a66597) pipe = td->pipe; pipe_stop(r8a66597, pipe); + /* Select a different address or endpoint */ new_td = td; do { list_move_tail(&new_td->queue, @@ -1794,7 +1795,8 @@ static void r8a66597_td_timer(unsigned long _r8a66597) new_td = td; break; } - } while (td != new_td && td->address == new_td->address); + } while (td != new_td && td->address == new_td->address && + td->pipe->info.epnum == new_td->pipe->info.epnum); start_transfer(r8a66597, new_td); -- GitLab From 07612c1227e8532e840f457f5b95596f7487e0f6 Mon Sep 17 00:00:00 2001 From: Chris Brandt Date: Thu, 27 Apr 2017 12:12:49 -0700 Subject: [PATCH 288/786] usb: r8a66597-hcd: decrease timeout commit dd14a3e9b92ac6f0918054f9e3477438760a4fa6 upstream. The timeout for BULK packets was 300ms which is a long time if other endpoints or devices are waiting for their turn. Changing it to 50ms greatly increased the overall performance for multi-endpoint devices. Fixes: 5d3043586db4 ("usb: r8a66597-hcd: host controller driver for R8A6659") Signed-off-by: Chris Brandt Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/r8a66597-hcd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index 9c3413d550bb..7bf78be1fd32 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c @@ -1269,7 +1269,7 @@ static void set_td_timer(struct r8a66597 *r8a66597, struct r8a66597_td *td) time = 30; break; default: - time = 300; + time = 50; break; } -- GitLab From 02d009e865a830a16d932c4ea2fce158642c0bbe Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Mon, 13 Feb 2017 12:38:39 +0200 Subject: [PATCH 289/786] ath10k: fix napi crash during rmmod when probe firmware fails commit 1427228d5869f5804b03d47acfa4a88122572a78 upstream. This fixes the below crash when ath10k probe firmware fails, NAPI polling tries to access a rx ring resource which was never allocated. An easy way to reproduce this is easy to remove all the firmware files, load ath10k modules and ath10k will crash when calling 'rmmod ath10k_pci'. The fix is to call napi_enable() from ath10k_pci_hif_start() so that it matches with napi_disable() being called from ath10k_pci_hif_stop(). Big thanks to Mohammed Shafi Shajakhan who debugged this and provided first version of the fix. In this patch I just fix the actual problem in pci.c instead of having a workaround in core.c. BUG: unable to handle kernel NULL pointer dereference at (null) IP: __ath10k_htt_rx_ring_fill_n+0x19/0x230 [ath10k_core] __ath10k_htt_rx_ring_fill_n+0x19/0x230 [ath10k_core] Call Trace: [] ath10k_htt_rx_msdu_buff_replenish+0x42/0x90 [ath10k_core] [] ath10k_htt_txrx_compl_task+0x433/0x17d0 [ath10k_core] [] ? __wake_up_common+0x4d/0x80 [] ? cpu_load_update+0xdc/0x150 [] ? ath10k_pci_read32+0xd/0x10 [ath10k_pci] [] ath10k_pci_napi_poll+0x47/0x110 [ath10k_pci] [] net_rx_action+0x20f/0x370 Reported-by: Ben Greear Fixes: 3c97f5de1f28 ("ath10k: implement NAPI support") Signed-off-by: Kalle Valo Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/ath/ath10k/pci.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 0457e315d336..6063cf439d3d 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1647,6 +1647,8 @@ static int ath10k_pci_hif_start(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); + napi_enable(&ar->napi); + ath10k_pci_irq_enable(ar); ath10k_pci_rx_post(ar); @@ -2531,7 +2533,6 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) ath10k_err(ar, "could not wake up target CPU: %d\n", ret); goto err_ce; } - napi_enable(&ar->napi); return 0; -- GitLab From f28ba80c6a3e8bf7ec96e06667dd82e98ae1c672 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 9 Jan 2017 11:20:16 +0300 Subject: [PATCH 290/786] misc: mic: double free on ioctl error path commit 816c9311f1144a03da1fdc4feb2f6b0d3299fca0 upstream. This function only has one caller. Freeing "vdev" here leads to a use after free bug. There are several other error paths in this function but this is the only one which frees "vdev". It looks like the kfree() can be safely removed. Fixes: 61e9c905df78 ("misc: mic: Enable VOP host side functionality") Signed-off-by: Dan Carpenter Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mic/vop/vop_vringh.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/misc/mic/vop/vop_vringh.c b/drivers/misc/mic/vop/vop_vringh.c index 88e45234d527..fed992e2c258 100644 --- a/drivers/misc/mic/vop/vop_vringh.c +++ b/drivers/misc/mic/vop/vop_vringh.c @@ -292,7 +292,6 @@ static int vop_virtio_add_device(struct vop_vdev *vdev, if (ret) { dev_err(vop_dev(vdev), "%s %d err %d\n", __func__, __LINE__, ret); - kfree(vdev); return ret; } -- GitLab From 2abac4084fb91021371a97fc42963f3e01228a3b Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 8 May 2017 15:55:17 -0700 Subject: [PATCH 291/786] drivers/misc/c2port/c2port-duramar2150.c: checking for NULL instead of IS_ERR() commit 8128a31eaadbcdfa37774bbd28f3f00bac69996a upstream. c2port_device_register() never returns NULL, it uses error pointers. Link: http://lkml.kernel.org/r/20170412083321.GC3250@mwanda Fixes: 65131cd52b9e ("c2port: add c2port support for Eurotech Duramar 2150") Signed-off-by: Dan Carpenter Acked-by: Rodolfo Giometti Cc: Greg Kroah-Hartman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- drivers/misc/c2port/c2port-duramar2150.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/misc/c2port/c2port-duramar2150.c b/drivers/misc/c2port/c2port-duramar2150.c index 5484301d57d9..3dc61ea7dc64 100644 --- a/drivers/misc/c2port/c2port-duramar2150.c +++ b/drivers/misc/c2port/c2port-duramar2150.c @@ -129,8 +129,8 @@ static int __init duramar2150_c2port_init(void) duramar2150_c2port_dev = c2port_device_register("uc", &duramar2150_c2port_ops, NULL); - if (!duramar2150_c2port_dev) { - ret = -ENODEV; + if (IS_ERR(duramar2150_c2port_dev)) { + ret = PTR_ERR(duramar2150_c2port_dev); goto free_region; } -- GitLab From 4581d7dd44f336d82cac7d5e5ae5ce429b52c98a Mon Sep 17 00:00:00 2001 From: YD Tseng Date: Fri, 9 Jun 2017 14:48:40 +0300 Subject: [PATCH 292/786] usb: xhci: Fix USB 3.1 supported protocol parsing commit b72eb8435b25be3a1880264cf32ac91e626ba5ba upstream. xHCI host controllers can have both USB 3.1 and 3.0 extended speed protocol lists. If the USB3.1 speed is parsed first and 3.0 second then the minor revision supported will be overwritten by the 3.0 speeds and the USB3 roothub will only show support for USB 3.0 speeds. This was the case with a xhci controller with the supported protocol capability listed below. In xhci-mem.c, the USB 3.1 speed is parsed first, the min_rev of usb3_rhub is set as 0x10. And then USB 3.0 is parsed. However, the min_rev of usb3_rhub will be changed to 0x00. If USB 3.1 device is connected behind this host controller, the speed of USB 3.1 device just reports 5G speed using lsusb. 00 01 02 03 04 05 06 07 08 09 0A 0B 0C 0D 0E 0F 00 01 08 00 00 00 00 00 40 00 00 00 00 00 00 00 00 10 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 20 02 08 10 03 55 53 42 20 01 02 00 00 00 00 00 00 //USB 3.1 30 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 40 02 08 00 03 55 53 42 20 03 06 00 00 00 00 00 00 //USB 3.0 50 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 60 02 08 00 02 55 53 42 20 09 0E 19 00 00 00 00 00 //USB 2.0 70 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 This patch fixes the issue by only owerwriting the minor revision if it is higher than the existing one. [reword commit message -Mathias] Signed-off-by: YD Tseng Signed-off-by: Mathias Nyman Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-mem.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 8c6eafe8966c..b7114c3f52aa 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -2123,11 +2123,12 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, { u32 temp, port_offset, port_count; int i; - u8 major_revision; + u8 major_revision, minor_revision; struct xhci_hub *rhub; temp = readl(addr); major_revision = XHCI_EXT_PORT_MAJOR(temp); + minor_revision = XHCI_EXT_PORT_MINOR(temp); if (major_revision == 0x03) { rhub = &xhci->usb3_rhub; @@ -2141,7 +2142,9 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, return; } rhub->maj_rev = XHCI_EXT_PORT_MAJOR(temp); - rhub->min_rev = XHCI_EXT_PORT_MINOR(temp); + + if (rhub->min_rev < minor_revision) + rhub->min_rev = minor_revision; /* Port offset and count in the third dword, see section 7.2 */ temp = readl(addr + 2); -- GitLab From 06178662474ca54f92664f72e2e3c48d716317f0 Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Fri, 9 Jun 2017 14:48:41 +0300 Subject: [PATCH 293/786] usb: xhci: ASMedia ASM1042A chipset need shorts TX quirk commit d2f48f05cd2a2a0a708fbfa45f1a00a87660d937 upstream. When plugging an USB webcam I see the following message: [106385.615559] xhci_hcd 0000:04:00.0: WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk? [106390.583860] handle_tx_event: 913 callbacks suppressed With this patch applied, I get no more printing of this message. Signed-off-by: Corentin Labbe Signed-off-by: Mathias Nyman Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-pci.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index e7d6752eff32..69864ba38698 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -201,6 +201,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1042) xhci->quirks |= XHCI_BROKEN_STREAMS; + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && + pdev->device == 0x1142) + xhci->quirks |= XHCI_TRUST_TX_LENGTH; if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7; -- GitLab From 3ff5f4f6a8a7c4a4f06e0b5f492ecd9a36460fb7 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Thu, 8 Jun 2017 13:55:59 -0400 Subject: [PATCH 294/786] USB: gadget: fix GPF in gadgetfs commit f50b878fed33e360d01dcdc31a8eeb1815d033d5 upstream. A NULL-pointer dereference bug in gadgetfs was uncovered by syzkaller: > kasan: GPF could be caused by NULL-ptr deref or user memory access > general protection fault: 0000 [#1] SMP KASAN > Dumping ftrace buffer: > (ftrace buffer empty) > Modules linked in: > CPU: 2 PID: 4820 Comm: syz-executor0 Not tainted 4.12.0-rc4+ #5 > Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 > task: ffff880039542dc0 task.stack: ffff88003bdd0000 > RIP: 0010:__list_del_entry_valid+0x7e/0x170 lib/list_debug.c:51 > RSP: 0018:ffff88003bdd6e50 EFLAGS: 00010246 > RAX: dffffc0000000000 RBX: 0000000000000000 RCX: 0000000000010000 > RDX: 0000000000000000 RSI: ffffffff86504948 RDI: ffffffff86504950 > RBP: ffff88003bdd6e68 R08: ffff880039542dc0 R09: ffffffff8778ce00 > R10: ffff88003bdd6e68 R11: dffffc0000000000 R12: 0000000000000000 > R13: dffffc0000000000 R14: 1ffff100077badd2 R15: ffffffff864d2e40 > FS: 0000000000000000(0000) GS:ffff88006dc00000(0000) knlGS:0000000000000000 > CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 > CR2: 000000002014aff9 CR3: 0000000006022000 CR4: 00000000000006e0 > Call Trace: > __list_del_entry include/linux/list.h:116 [inline] > list_del include/linux/list.h:124 [inline] > usb_gadget_unregister_driver+0x166/0x4c0 drivers/usb/gadget/udc/core.c:1387 > dev_release+0x80/0x160 drivers/usb/gadget/legacy/inode.c:1187 > __fput+0x332/0x7f0 fs/file_table.c:209 > ____fput+0x15/0x20 fs/file_table.c:245 > task_work_run+0x19b/0x270 kernel/task_work.c:116 > exit_task_work include/linux/task_work.h:21 [inline] > do_exit+0x18a3/0x2820 kernel/exit.c:878 > do_group_exit+0x149/0x420 kernel/exit.c:982 > get_signal+0x77f/0x1780 kernel/signal.c:2318 > do_signal+0xd2/0x2130 arch/x86/kernel/signal.c:808 > exit_to_usermode_loop+0x1a7/0x240 arch/x86/entry/common.c:157 > prepare_exit_to_usermode arch/x86/entry/common.c:194 [inline] > syscall_return_slowpath+0x3ba/0x410 arch/x86/entry/common.c:263 > entry_SYSCALL_64_fastpath+0xbc/0xbe > RIP: 0033:0x4461f9 > RSP: 002b:00007fdac2b1ecf8 EFLAGS: 00000246 ORIG_RAX: 00000000000000ca > RAX: fffffffffffffe00 RBX: 00000000007080c8 RCX: 00000000004461f9 > RDX: 0000000000000000 RSI: 0000000000000000 RDI: 00000000007080c8 > RBP: 00000000007080a8 R08: 0000000000000000 R09: 0000000000000000 > R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000 > R13: 0000000000000000 R14: 00007fdac2b1f9c0 R15: 00007fdac2b1f700 > Code: 00 00 00 00 ad de 49 39 c4 74 6a 48 b8 00 02 00 00 00 00 ad de > 48 89 da 48 39 c3 74 74 48 c1 ea 03 48 b8 00 00 00 00 00 fc ff df <80> > 3c 02 00 0f 85 92 00 00 00 48 8b 13 48 39 f2 75 66 49 8d 7c > RIP: __list_del_entry_valid+0x7e/0x170 lib/list_debug.c:51 RSP: ffff88003bdd6e50 > ---[ end trace 30e94b1eec4831c8 ]--- > Kernel panic - not syncing: Fatal exception The bug was caused by dev_release() failing to turn off its gadget_registered flag after unregistering the gadget driver. As a result, when a later user closed the device file before writing a valid set of descriptors, dev_release() thought the gadget had been registered and tried to unregister it, even though it had not been. This led to the NULL pointer dereference. The fix is simple: turn off the flag when the gadget is unregistered. Signed-off-by: Alan Stern Reported-and-tested-by: Andrey Konovalov Signed-off-by: Felipe Balbi Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/legacy/inode.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 1468d8f085a3..391e4853f4b0 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -1183,8 +1183,10 @@ dev_release (struct inode *inode, struct file *fd) /* closing ep0 === shutdown all */ - if (dev->gadget_registered) + if (dev->gadget_registered) { usb_gadget_unregister_driver (&gadgetfs_driver); + dev->gadget_registered = false; + } /* at this point "good" hardware has disconnected the * device from USB; the host won't see it any more. -- GitLab From 0c0d3d8730db10baf92ad82fd29aca53a1936483 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Tue, 13 Jun 2017 15:23:42 -0400 Subject: [PATCH 295/786] USB: gadgetfs, dummy-hcd, net2280: fix locking for callbacks commit f16443a034c7aa359ddf6f0f9bc40d01ca31faea upstream. Using the syzkaller kernel fuzzer, Andrey Konovalov generated the following error in gadgetfs: > BUG: KASAN: use-after-free in __lock_acquire+0x3069/0x3690 > kernel/locking/lockdep.c:3246 > Read of size 8 at addr ffff88003a2bdaf8 by task kworker/3:1/903 > > CPU: 3 PID: 903 Comm: kworker/3:1 Not tainted 4.12.0-rc4+ #35 > Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 > Workqueue: usb_hub_wq hub_event > Call Trace: > __dump_stack lib/dump_stack.c:16 [inline] > dump_stack+0x292/0x395 lib/dump_stack.c:52 > print_address_description+0x78/0x280 mm/kasan/report.c:252 > kasan_report_error mm/kasan/report.c:351 [inline] > kasan_report+0x230/0x340 mm/kasan/report.c:408 > __asan_report_load8_noabort+0x19/0x20 mm/kasan/report.c:429 > __lock_acquire+0x3069/0x3690 kernel/locking/lockdep.c:3246 > lock_acquire+0x22d/0x560 kernel/locking/lockdep.c:3855 > __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline] > _raw_spin_lock+0x2f/0x40 kernel/locking/spinlock.c:151 > spin_lock include/linux/spinlock.h:299 [inline] > gadgetfs_suspend+0x89/0x130 drivers/usb/gadget/legacy/inode.c:1682 > set_link_state+0x88e/0xae0 drivers/usb/gadget/udc/dummy_hcd.c:455 > dummy_hub_control+0xd7e/0x1fb0 drivers/usb/gadget/udc/dummy_hcd.c:2074 > rh_call_control drivers/usb/core/hcd.c:689 [inline] > rh_urb_enqueue drivers/usb/core/hcd.c:846 [inline] > usb_hcd_submit_urb+0x92f/0x20b0 drivers/usb/core/hcd.c:1650 > usb_submit_urb+0x8b2/0x12c0 drivers/usb/core/urb.c:542 > usb_start_wait_urb+0x148/0x5b0 drivers/usb/core/message.c:56 > usb_internal_control_msg drivers/usb/core/message.c:100 [inline] > usb_control_msg+0x341/0x4d0 drivers/usb/core/message.c:151 > usb_clear_port_feature+0x74/0xa0 drivers/usb/core/hub.c:412 > hub_port_disable+0x123/0x510 drivers/usb/core/hub.c:4177 > hub_port_init+0x1ed/0x2940 drivers/usb/core/hub.c:4648 > hub_port_connect drivers/usb/core/hub.c:4826 [inline] > hub_port_connect_change drivers/usb/core/hub.c:4999 [inline] > port_event drivers/usb/core/hub.c:5105 [inline] > hub_event+0x1ae1/0x3d40 drivers/usb/core/hub.c:5185 > process_one_work+0xc08/0x1bd0 kernel/workqueue.c:2097 > process_scheduled_works kernel/workqueue.c:2157 [inline] > worker_thread+0xb2b/0x1860 kernel/workqueue.c:2233 > kthread+0x363/0x440 kernel/kthread.c:231 > ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:424 > > Allocated by task 9958: > save_stack_trace+0x1b/0x20 arch/x86/kernel/stacktrace.c:59 > save_stack+0x43/0xd0 mm/kasan/kasan.c:513 > set_track mm/kasan/kasan.c:525 [inline] > kasan_kmalloc+0xad/0xe0 mm/kasan/kasan.c:617 > kmem_cache_alloc_trace+0x87/0x280 mm/slub.c:2745 > kmalloc include/linux/slab.h:492 [inline] > kzalloc include/linux/slab.h:665 [inline] > dev_new drivers/usb/gadget/legacy/inode.c:170 [inline] > gadgetfs_fill_super+0x24f/0x540 drivers/usb/gadget/legacy/inode.c:1993 > mount_single+0xf6/0x160 fs/super.c:1192 > gadgetfs_mount+0x31/0x40 drivers/usb/gadget/legacy/inode.c:2019 > mount_fs+0x9c/0x2d0 fs/super.c:1223 > vfs_kern_mount.part.25+0xcb/0x490 fs/namespace.c:976 > vfs_kern_mount fs/namespace.c:2509 [inline] > do_new_mount fs/namespace.c:2512 [inline] > do_mount+0x41b/0x2d90 fs/namespace.c:2834 > SYSC_mount fs/namespace.c:3050 [inline] > SyS_mount+0xb0/0x120 fs/namespace.c:3027 > entry_SYSCALL_64_fastpath+0x1f/0xbe > > Freed by task 9960: > save_stack_trace+0x1b/0x20 arch/x86/kernel/stacktrace.c:59 > save_stack+0x43/0xd0 mm/kasan/kasan.c:513 > set_track mm/kasan/kasan.c:525 [inline] > kasan_slab_free+0x72/0xc0 mm/kasan/kasan.c:590 > slab_free_hook mm/slub.c:1357 [inline] > slab_free_freelist_hook mm/slub.c:1379 [inline] > slab_free mm/slub.c:2961 [inline] > kfree+0xed/0x2b0 mm/slub.c:3882 > put_dev+0x124/0x160 drivers/usb/gadget/legacy/inode.c:163 > gadgetfs_kill_sb+0x33/0x60 drivers/usb/gadget/legacy/inode.c:2027 > deactivate_locked_super+0x8d/0xd0 fs/super.c:309 > deactivate_super+0x21e/0x310 fs/super.c:340 > cleanup_mnt+0xb7/0x150 fs/namespace.c:1112 > __cleanup_mnt+0x1b/0x20 fs/namespace.c:1119 > task_work_run+0x1a0/0x280 kernel/task_work.c:116 > exit_task_work include/linux/task_work.h:21 [inline] > do_exit+0x18a8/0x2820 kernel/exit.c:878 > do_group_exit+0x14e/0x420 kernel/exit.c:982 > get_signal+0x784/0x1780 kernel/signal.c:2318 > do_signal+0xd7/0x2130 arch/x86/kernel/signal.c:808 > exit_to_usermode_loop+0x1ac/0x240 arch/x86/entry/common.c:157 > prepare_exit_to_usermode arch/x86/entry/common.c:194 [inline] > syscall_return_slowpath+0x3ba/0x410 arch/x86/entry/common.c:263 > entry_SYSCALL_64_fastpath+0xbc/0xbe > > The buggy address belongs to the object at ffff88003a2bdae0 > which belongs to the cache kmalloc-1024 of size 1024 > The buggy address is located 24 bytes inside of > 1024-byte region [ffff88003a2bdae0, ffff88003a2bdee0) > The buggy address belongs to the page: > page:ffffea0000e8ae00 count:1 mapcount:0 mapping: (null) > index:0x0 compound_mapcount: 0 > flags: 0x100000000008100(slab|head) > raw: 0100000000008100 0000000000000000 0000000000000000 0000000100170017 > raw: ffffea0000ed3020 ffffea0000f5f820 ffff88003e80efc0 0000000000000000 > page dumped because: kasan: bad access detected > > Memory state around the buggy address: > ffff88003a2bd980: fb fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc > ffff88003a2bda00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc > >ffff88003a2bda80: fc fc fc fc fc fc fc fc fc fc fc fc fb fb fb fb > ^ > ffff88003a2bdb00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb > ffff88003a2bdb80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb > ================================================================== What this means is that the gadgetfs_suspend() routine was trying to access dev->lock after it had been deallocated. The root cause is a race in the dummy_hcd driver; the dummy_udc_stop() routine can race with the rest of the driver because it contains no locking. And even when proper locking is added, it can still race with the set_link_state() function because that function incorrectly drops the private spinlock before invoking any gadget driver callbacks. The result of this race, as seen above, is that set_link_state() can invoke a callback in gadgetfs even after gadgetfs has been unbound from dummy_hcd's UDC and its private data structures have been deallocated. include/linux/usb/gadget.h documents that the ->reset, ->disconnect, ->suspend, and ->resume callbacks may be invoked in interrupt context. In general this is necessary, to prevent races with gadget driver removal. This patch fixes dummy_hcd to retain the spinlock across these calls, and it adds a spinlock acquisition to dummy_udc_stop() to prevent the race. The net2280 driver makes the same mistake of dropping the private spinlock for its ->disconnect and ->reset callback invocations. The patch fixes it too. Lastly, since gadgetfs_suspend() may be invoked in interrupt context, it cannot assume that interrupts are enabled when it runs. It must use spin_lock_irqsave() instead of spin_lock_irq(). The patch fixes that bug as well. Signed-off-by: Alan Stern Reported-and-tested-by: Andrey Konovalov Acked-by: Felipe Balbi Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/legacy/inode.c | 5 +++-- drivers/usb/gadget/udc/dummy_hcd.c | 13 ++++--------- drivers/usb/gadget/udc/net2280.c | 9 +-------- 3 files changed, 8 insertions(+), 19 deletions(-) diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 391e4853f4b0..f959c42ecace 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -1679,9 +1679,10 @@ static void gadgetfs_suspend (struct usb_gadget *gadget) { struct dev_data *dev = get_gadget_data (gadget); + unsigned long flags; INFO (dev, "suspended from state %d\n", dev->state); - spin_lock (&dev->lock); + spin_lock_irqsave(&dev->lock, flags); switch (dev->state) { case STATE_DEV_SETUP: // VERY odd... host died?? case STATE_DEV_CONNECTED: @@ -1692,7 +1693,7 @@ gadgetfs_suspend (struct usb_gadget *gadget) default: break; } - spin_unlock (&dev->lock); + spin_unlock_irqrestore(&dev->lock, flags); } static struct usb_gadget_driver gadgetfs_driver = { diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index 69226dcf1443..94c8a9f6cbf1 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -442,23 +442,16 @@ static void set_link_state(struct dummy_hcd *dum_hcd) /* Report reset and disconnect events to the driver */ if (dum->driver && (disconnect || reset)) { stop_activity(dum); - spin_unlock(&dum->lock); if (reset) usb_gadget_udc_reset(&dum->gadget, dum->driver); else dum->driver->disconnect(&dum->gadget); - spin_lock(&dum->lock); } } else if (dum_hcd->active != dum_hcd->old_active) { - if (dum_hcd->old_active && dum->driver->suspend) { - spin_unlock(&dum->lock); + if (dum_hcd->old_active && dum->driver->suspend) dum->driver->suspend(&dum->gadget); - spin_lock(&dum->lock); - } else if (!dum_hcd->old_active && dum->driver->resume) { - spin_unlock(&dum->lock); + else if (!dum_hcd->old_active && dum->driver->resume) dum->driver->resume(&dum->gadget); - spin_lock(&dum->lock); - } } dum_hcd->old_status = dum_hcd->port_status; @@ -983,7 +976,9 @@ static int dummy_udc_stop(struct usb_gadget *g) struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g); struct dummy *dum = dum_hcd->dum; + spin_lock_irq(&dum->lock); dum->driver = NULL; + spin_unlock_irq(&dum->lock); return 0; } diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index 61c938c36d88..33f3987218f7 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c @@ -2469,11 +2469,8 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver) nuke(&dev->ep[i]); /* report disconnect; the driver is already quiesced */ - if (driver) { - spin_unlock(&dev->lock); + if (driver) driver->disconnect(&dev->gadget); - spin_lock(&dev->lock); - } usb_reinit(dev); } @@ -3347,8 +3344,6 @@ static void handle_stat0_irqs(struct net2280 *dev, u32 stat) BIT(PCI_RETRY_ABORT_INTERRUPT)) static void handle_stat1_irqs(struct net2280 *dev, u32 stat) -__releases(dev->lock) -__acquires(dev->lock) { struct net2280_ep *ep; u32 tmp, num, mask, scratch; @@ -3389,14 +3384,12 @@ __acquires(dev->lock) if (disconnect || reset) { stop_activity(dev, dev->driver); ep0_start(dev); - spin_unlock(&dev->lock); if (reset) usb_gadget_udc_reset (&dev->gadget, dev->driver); else (dev->driver->disconnect) (&dev->gadget); - spin_lock(&dev->lock); return; } } -- GitLab From 1419b8752153d50f21cb34131cc613ee09662d8a Mon Sep 17 00:00:00 2001 From: James Morse Date: Fri, 16 Jun 2017 14:02:29 -0700 Subject: [PATCH 296/786] mm/memory-failure.c: use compound_head() flags for huge pages commit 7258ae5c5a2ce2f5969e8b18b881be40ab55433d upstream. memory_failure() chooses a recovery action function based on the page flags. For huge pages it uses the tail page flags which don't have anything interesting set, resulting in: > Memory failure: 0x9be3b4: Unknown page state > Memory failure: 0x9be3b4: recovery action for unknown page: Failed Instead, save a copy of the head page's flags if this is a huge page, this means if there are no relevant flags for this tail page, we use the head pages flags instead. This results in the me_huge_page() recovery action being called: > Memory failure: 0x9b7969: recovery action for huge page: Delayed For hugepages that have not yet been allocated, this allows the hugepage to be dequeued. Fixes: 524fca1e7356 ("HWPOISON: fix misjudgement of page_action() for errors on mlocked pages") Link: http://lkml.kernel.org/r/20170524130204.21845-1-james.morse@arm.com Signed-off-by: James Morse Tested-by: Punit Agrawal Acked-by: Punit Agrawal Acked-by: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/memory-failure.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 4bd44803e366..ce7d416edab7 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1176,7 +1176,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags) * page_remove_rmap() in try_to_unmap_one(). So to determine page status * correctly, we save a copy of the page flags at this time. */ - page_flags = p->flags; + if (PageHuge(p)) + page_flags = hpage->flags; + else + page_flags = p->flags; /* * unpoison always clear PG_hwpoison inside page lock -- GitLab From f7ae7d2229d2f7c372a00b80c22b6eb5a9ac1949 Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Fri, 16 Jun 2017 14:02:31 -0700 Subject: [PATCH 297/786] swap: cond_resched in swap_cgroup_prepare() commit ef70762948dde012146926720b70e79736336764 upstream. I saw need_resched() warnings when swapping on large swapfile (TBs) because continuously allocating many pages in swap_cgroup_prepare() took too long. We already cond_resched when freeing page in swap_cgroup_swapoff(). Do the same for the page allocation. Link: http://lkml.kernel.org/r/20170604200109.17606-1-yuzhao@google.com Signed-off-by: Yu Zhao Acked-by: Michal Hocko Acked-by: Vladimir Davydov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/swap_cgroup.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c index 310ac0b8f974..454d6d7509ed 100644 --- a/mm/swap_cgroup.c +++ b/mm/swap_cgroup.c @@ -48,6 +48,9 @@ static int swap_cgroup_prepare(int type) if (!page) goto not_enough_page; ctrl->map[idx] = page; + + if (!(idx % SWAP_CLUSTER_MAX)) + cond_resched(); } return 0; not_enough_page: -- GitLab From cf6ac3abb3233b6178279bc2ecaa8c14890f0d77 Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Maneyrol Date: Mon, 29 May 2017 09:59:40 +0000 Subject: [PATCH 298/786] iio: imu: inv_mpu6050: add accel lpf setting for chip >= MPU6500 commit 948588e25b8af5e66962ed3f53e1cae1656fa5af upstream. Starting from MPU6500, accelerometer dlpf is set in a separate register named ACCEL_CONFIG_2. Add this new register in the map and set it for the corresponding chips. Signed-off-by: Jean-Baptiste Maneyrol Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/imu/inv_mpu6050/inv_mpu_core.c | 39 ++++++++++++++++++++-- drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h | 3 ++ 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index b9fcbf18aa99..5faea370ab57 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -41,6 +41,7 @@ static const int accel_scale[] = {598, 1196, 2392, 4785}; static const struct inv_mpu6050_reg_map reg_set_6500 = { .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV, .lpf = INV_MPU6050_REG_CONFIG, + .accel_lpf = INV_MPU6500_REG_ACCEL_CONFIG_2, .user_ctrl = INV_MPU6050_REG_USER_CTRL, .fifo_en = INV_MPU6050_REG_FIFO_EN, .gyro_config = INV_MPU6050_REG_GYRO_CONFIG, @@ -204,6 +205,37 @@ int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on) } EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg); +/** + * inv_mpu6050_set_lpf_regs() - set low pass filter registers, chip dependent + * + * MPU60xx/MPU9150 use only 1 register for accelerometer + gyroscope + * MPU6500 and above have a dedicated register for accelerometer + */ +static int inv_mpu6050_set_lpf_regs(struct inv_mpu6050_state *st, + enum inv_mpu6050_filter_e val) +{ + int result; + + result = regmap_write(st->map, st->reg->lpf, val); + if (result) + return result; + + switch (st->chip_type) { + case INV_MPU6050: + case INV_MPU6000: + case INV_MPU9150: + /* old chips, nothing to do */ + result = 0; + break; + default: + /* set accel lpf */ + result = regmap_write(st->map, st->reg->accel_lpf, val); + break; + } + + return result; +} + /** * inv_mpu6050_init_config() - Initialize hardware, disable FIFO. * @@ -227,8 +259,7 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev) if (result) return result; - d = INV_MPU6050_FILTER_20HZ; - result = regmap_write(st->map, st->reg->lpf, d); + result = inv_mpu6050_set_lpf_regs(st, INV_MPU6050_FILTER_20HZ); if (result) return result; @@ -531,6 +562,8 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev, * would be alising. This function basically search for the * correct low pass parameters based on the fifo rate, e.g, * sampling frequency. + * + * lpf is set automatically when setting sampling rate to avoid any aliases. */ static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate) { @@ -546,7 +579,7 @@ static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate) while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1)) i++; data = d[i]; - result = regmap_write(st->map, st->reg->lpf, data); + result = inv_mpu6050_set_lpf_regs(st, data); if (result) return result; st->chip_config.lpf = data; diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h index f0e8c5dd9fae..d851581bb0b8 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h @@ -28,6 +28,7 @@ * struct inv_mpu6050_reg_map - Notable registers. * @sample_rate_div: Divider applied to gyro output rate. * @lpf: Configures internal low pass filter. + * @accel_lpf: Configures accelerometer low pass filter. * @user_ctrl: Enables/resets the FIFO. * @fifo_en: Determines which data will appear in FIFO. * @gyro_config: gyro config register. @@ -47,6 +48,7 @@ struct inv_mpu6050_reg_map { u8 sample_rate_div; u8 lpf; + u8 accel_lpf; u8 user_ctrl; u8 fifo_en; u8 gyro_config; @@ -187,6 +189,7 @@ struct inv_mpu6050_state { #define INV_MPU6050_FIFO_THRESHOLD 500 /* mpu6500 registers */ +#define INV_MPU6500_REG_ACCEL_CONFIG_2 0x1D #define INV_MPU6500_REG_ACCEL_OFFSET 0x77 /* delay time in milliseconds */ -- GitLab From 8a48b7eace4d9a12363de9583e70e279e5fce536 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Fri, 9 Jun 2017 11:49:15 -0700 Subject: [PATCH 299/786] sched/core: Idle_task_exit() shouldn't use switch_mm_irqs_off() commit 252d2a4117bc181b287eeddf848863788da733ae upstream. idle_task_exit() can be called with IRQs on x86 on and therefore should use switch_mm(), not switch_mm_irqs_off(). This doesn't seem to cause any problems right now, but it will confuse my upcoming TLB flush changes. Nonetheless, I think it should be backported because it's trivial. There won't be any meaningful performance impact because idle_task_exit() is only used when offlining a CPU. Signed-off-by: Andy Lutomirski Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: f98db6013c55 ("sched/core: Add switch_mm_irqs_off() and use it in the scheduler") Link: http://lkml.kernel.org/r/ca3d1a9fa93a0b49f5a8ff729eda3640fb6abdf9.1497034141.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- kernel/sched/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 154fd689fe02..692c948ae333 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5469,7 +5469,7 @@ void idle_task_exit(void) BUG_ON(cpu_online(smp_processor_id())); if (mm != &init_mm) { - switch_mm_irqs_off(mm, &init_mm, current); + switch_mm(mm, &init_mm, current); finish_arch_post_lock_switch(); } mmdrop(mm); -- GitLab From 766283254b672293fa39b5cecd6a5f86efd75127 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sun, 11 Jun 2017 00:38:36 +0200 Subject: [PATCH 300/786] genirq: Release resources in __setup_irq() error path commit fa07ab72cbb0d843429e61bf179308aed6cbe0dd upstream. In case __irq_set_trigger() fails the resources requested via irq_request_resources() are not released. Add the missing release call into the error handling path. Fixes: c1bacbae8192 ("genirq: Provide irq_request/release_resources chip callbacks") Signed-off-by: Heiner Kallweit Signed-off-by: Thomas Gleixner Link: http://lkml.kernel.org/r/655538f5-cb20-a892-ff15-fbd2dd1fa4ec@gmail.com Signed-off-by: Greg Kroah-Hartman --- kernel/irq/manage.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 6b669593e7eb..ea41820ab12e 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1308,8 +1308,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) ret = __irq_set_trigger(desc, new->flags & IRQF_TRIGGER_MASK); - if (ret) + if (ret) { + irq_release_resources(desc); goto out_mask; + } } desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ -- GitLab From 8ee7f06f4dcaad729ecd7562b10b9a816f99ae70 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 30 May 2017 23:15:34 +0200 Subject: [PATCH 301/786] alarmtimer: Prevent overflow of relative timers commit f4781e76f90df7aec400635d73ea4c35ee1d4765 upstream. Andrey reported a alartimer related RCU stall while fuzzing the kernel with syzkaller. The reason for this is an overflow in ktime_add() which brings the resulting time into negative space and causes immediate expiry of the timer. The following rearm with a small interval does not bring the timer back into positive space due to the same issue. This results in a permanent firing alarmtimer which hogs the CPU. Use ktime_add_safe() instead which detects the overflow and clamps the result to KTIME_SEC_MAX. Reported-by: Andrey Konovalov Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Kostya Serebryany Cc: syzkaller Cc: John Stultz Cc: Dmitry Vyukov Link: http://lkml.kernel.org/r/20170530211655.802921648@linutronix.de Signed-off-by: Greg Kroah-Hartman --- kernel/time/alarmtimer.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 12dd190634ab..7702135e1456 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -354,7 +354,7 @@ void alarm_start_relative(struct alarm *alarm, ktime_t start) { struct alarm_base *base = &alarm_bases[alarm->type]; - start = ktime_add(start, base->gettime()); + start = ktime_add_safe(start, base->gettime()); alarm_start(alarm, start); } EXPORT_SYMBOL_GPL(alarm_start_relative); @@ -440,7 +440,7 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval) overrun++; } - alarm->node.expires = ktime_add(alarm->node.expires, interval); + alarm->node.expires = ktime_add_safe(alarm->node.expires, interval); return overrun; } EXPORT_SYMBOL_GPL(alarm_forward); @@ -630,7 +630,7 @@ static int alarm_timer_set(struct k_itimer *timr, int flags, ktime_t now; now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime(); - exp = ktime_add(now, exp); + exp = ktime_add_safe(now, exp); } alarm_start(&timr->it.alarm.alarmtimer, exp); -- GitLab From f0ee203c864faca0b8b36698a0ffacb246b1412a Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Wed, 4 Jan 2017 06:30:16 +0100 Subject: [PATCH 302/786] usb: gadget: composite: Fix function used to free memory commit 990758c53eafe5a220a780ed12e7b4d51b3df032 upstream. 'cdev->os_desc_req' has been allocated with 'usb_ep_alloc_request()' so 'usb_ep_free_request()' should be used to free it. Signed-off-by: Christophe JAILLET Signed-off-by: Felipe Balbi Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/composite.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index c3c5b87b35b3..baa7cdcc0ebc 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -2147,7 +2147,7 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev, cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL); if (!cdev->os_desc_req->buf) { ret = -ENOMEM; - kfree(cdev->os_desc_req); + usb_ep_free_request(ep0, cdev->os_desc_req); goto end; } cdev->os_desc_req->context = cdev; -- GitLab From 22921a9e232a1187873c0127d408767b6bdc558e Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Tue, 10 Jan 2017 16:05:28 -0700 Subject: [PATCH 303/786] usb: dwc3: exynos fix axius clock error path to do cleanup commit 8ae584d1951f241efd45499f8774fd7066f22823 upstream. Axius clock error path returns without disabling clock and suspend clock. Fix it to disable them before returning error. Reviewed-by: Javier Martinez Canillas Signed-off-by: Shuah Khan Signed-off-by: Felipe Balbi Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/usb/dwc3/dwc3-exynos.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index 2f1fb7e7aa54..9eba51b92f72 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c @@ -148,7 +148,8 @@ static int dwc3_exynos_probe(struct platform_device *pdev) exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk"); if (IS_ERR(exynos->axius_clk)) { dev_err(dev, "no AXI UpScaler clk specified\n"); - return -ENODEV; + ret = -ENODEV; + goto axius_clk_err; } clk_prepare_enable(exynos->axius_clk); } else { @@ -206,6 +207,7 @@ static int dwc3_exynos_probe(struct platform_device *pdev) regulator_disable(exynos->vdd33); err2: clk_disable_unprepare(exynos->axius_clk); +axius_clk_err: clk_disable_unprepare(exynos->susp_clk); clk_disable_unprepare(exynos->clk); return ret; -- GitLab From 6b706cbb16e9e794a2a37e57ae4951b8232270b4 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 2 Jun 2017 11:35:01 -0700 Subject: [PATCH 304/786] MIPS: Fix bnezc/jialc return address calculation commit 1a73d9310e093fc3adffba4d0a67b9fab2ee3f63 upstream. The code handling the pop76 opcode (ie. bnezc & jialc instructions) in __compute_return_epc_for_insn() needs to set the value of $31 in the jialc case, which is encoded with rs = 0. However its check to differentiate bnezc (rs != 0) from jialc (rs = 0) was unfortunately backwards, meaning that if we emulate a bnezc instruction we clobber $31 & if we emulate a jialc instruction it actually behaves like a jic instruction. Fix this by inverting the check of rs to match the way the instructions are actually encoded. Signed-off-by: Paul Burton Fixes: 28d6f93d201d ("MIPS: Emulate the new MIPS R6 BNEZC and JIALC instructions") Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16178/ Signed-off-by: Ralf Baechle Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/branch.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 12c718181e5e..c86b66b57fc6 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -804,8 +804,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, break; } /* Compact branch: BNEZC || JIALC */ - if (insn.i_format.rs) + if (!insn.i_format.rs) { + /* JIALC: set $31/ra */ regs->regs[31] = epc + 4; + } regs->cp0_epc += 8; break; #endif -- GitLab From ecae47331a431b0295905add457a06e766310f71 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 2 Jun 2017 12:02:08 -0700 Subject: [PATCH 305/786] MIPS: .its targets depend on vmlinux commit bcd7c45e0d5a82be9a64b90050f0e09d41a50758 upstream. The .its targets require information about the kernel binary, such as its entry point, which is extracted from the vmlinux ELF. We therefore require that the ELF is built before the .its files are generated. Declare this requirement in the Makefile such that make will ensure this is always the case, otherwise in corner cases we can hit issues as the .its is generated with an incorrect (either invalid or stale) entry point. Signed-off-by: Paul Burton Fixes: cf2a5e0bb4c6 ("MIPS: Support generating Flattened Image Trees (.itb)") Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16179/ Signed-off-by: Ralf Baechle Signed-off-by: Greg Kroah-Hartman --- arch/mips/boot/Makefile | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile index 2728a9a9c7c5..145b5ce8eb7e 100644 --- a/arch/mips/boot/Makefile +++ b/arch/mips/boot/Makefile @@ -128,19 +128,19 @@ quiet_cmd_cpp_its_S = ITS $@ -DADDR_BITS=$(ADDR_BITS) \ -DADDR_CELLS=$(itb_addr_cells) -$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE +$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE $(call if_changed_dep,cpp_its_S,none,vmlinux.bin) -$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE +$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE $(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz) -$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE +$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE $(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2) -$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE +$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE $(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma) -$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE +$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE $(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo) quiet_cmd_itb-image = ITB $@ -- GitLab From 7dfe7ca9ec12b7dbd4fdefa7f3cc60e0ec08ba3b Mon Sep 17 00:00:00 2001 From: "Hon Ching \\(Vicky) Lo" Date: Wed, 15 Mar 2017 01:28:07 -0400 Subject: [PATCH 306/786] vTPM: Fix missing NULL check commit 31574d321c70f6d3b40fe98f9b2eafd9a903fef9 upstream. The current code passes the address of tpm_chip as the argument to dev_get_drvdata() without prior NULL check in tpm_ibmvtpm_get_desired_dma. This resulted an oops during kernel boot when vTPM is enabled in Power partition configured in active memory sharing mode. The vio_driver's get_desired_dma() is called before the probe(), which for vtpm is tpm_ibmvtpm_probe, and it's this latter function that initializes the driver and set data. Attempting to get data before the probe() caused the problem. This patch adds a NULL check to the tpm_ibmvtpm_get_desired_dma. fixes: 9e0d39d8a6a0 ("tpm: Remove useless priv field in struct tpm_vendor_specific") Signed-off-by: Hon Ching(Vicky) Lo Reviewed-by: Jarkko Sakkine Signed-off-by: Jarkko Sakkinen Signed-off-by: Greg Kroah-Hartman --- drivers/char/tpm/tpm_ibmvtpm.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index 946025a7413b..84eca4f93b82 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -295,6 +295,8 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev) } kfree(ibmvtpm); + /* For tpm_ibmvtpm_get_desired_dma */ + dev_set_drvdata(&vdev->dev, NULL); return 0; } @@ -309,13 +311,16 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev) static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev) { struct tpm_chip *chip = dev_get_drvdata(&vdev->dev); - struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); + struct ibmvtpm_dev *ibmvtpm; - /* ibmvtpm initializes at probe time, so the data we are - * asking for may not be set yet. Estimate that 4K required - * for TCE-mapped buffer in addition to CRQ. - */ - if (!ibmvtpm) + /* + * ibmvtpm initializes at probe time, so the data we are + * asking for may not be set yet. Estimate that 4K required + * for TCE-mapped buffer in addition to CRQ. + */ + if (chip) + ibmvtpm = dev_get_drvdata(&chip->dev); + else return CRQ_RES_BUF_SIZE + PAGE_SIZE; return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size; -- GitLab From b355b899c74a11c06e1edd4812d4c8809ec36c5e Mon Sep 17 00:00:00 2001 From: David Miller Date: Fri, 2 Jun 2017 11:28:54 -0400 Subject: [PATCH 307/786] crypto: Work around deallocated stack frame reference gcc bug on sparc. commit d41519a69b35b10af7fda867fb9100df24fdf403 upstream. On sparc, if we have an alloca() like situation, as is the case with SHASH_DESC_ON_STACK(), we can end up referencing deallocated stack memory. The result can be that the value is clobbered if a trap or interrupt arrives at just the right instruction. It only occurs if the function ends returning a value from that alloca() area and that value can be placed into the return value register using a single instruction. For example, in lib/libcrc32c.c:crc32c() we end up with a return sequence like: return %i7+8 lduw [%o5+16], %o0 ! MEM[(u32 *)__shash_desc.1_10 + 16B], %o5 holds the base of the on-stack area allocated for the shash descriptor. But the return released the stack frame and the register window. So if an intererupt arrives between 'return' and 'lduw', then the value read at %o5+16 can be corrupted. Add a data compiler barrier to work around this problem. This is exactly what the gcc fix will end up doing as well, and it absolutely should not change the code generated for other cpus (unless gcc on them has the same bug :-) With crucial insight from Eric Sandeen. Reported-by: Anatoly Pugachev Signed-off-by: David S. Miller Signed-off-by: Herbert Xu Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/hash.c | 5 ++++- fs/f2fs/f2fs.h | 5 ++++- lib/libcrc32c.c | 6 ++++-- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/hash.c b/fs/btrfs/hash.c index a97fdc156a03..baacc1866861 100644 --- a/fs/btrfs/hash.c +++ b/fs/btrfs/hash.c @@ -38,6 +38,7 @@ u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length) { SHASH_DESC_ON_STACK(shash, tfm); u32 *ctx = (u32 *)shash_desc_ctx(shash); + u32 retval; int err; shash->tfm = tfm; @@ -47,5 +48,7 @@ u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length) err = crypto_shash_update(shash, address, length); BUG_ON(err); - return *ctx; + retval = *ctx; + barrier_data(ctx); + return retval; } diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index c12f695923b6..88e111ab068b 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -948,6 +948,7 @@ static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, { SHASH_DESC_ON_STACK(shash, sbi->s_chksum_driver); u32 *ctx = (u32 *)shash_desc_ctx(shash); + u32 retval; int err; shash->tfm = sbi->s_chksum_driver; @@ -957,7 +958,9 @@ static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, err = crypto_shash_update(shash, address, length); BUG_ON(err); - return *ctx; + retval = *ctx; + barrier_data(ctx); + return retval; } static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc, diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c index 74a54b7f2562..9f79547d1b97 100644 --- a/lib/libcrc32c.c +++ b/lib/libcrc32c.c @@ -43,7 +43,7 @@ static struct crypto_shash *tfm; u32 crc32c(u32 crc, const void *address, unsigned int length) { SHASH_DESC_ON_STACK(shash, tfm); - u32 *ctx = (u32 *)shash_desc_ctx(shash); + u32 ret, *ctx = (u32 *)shash_desc_ctx(shash); int err; shash->tfm = tfm; @@ -53,7 +53,9 @@ u32 crc32c(u32 crc, const void *address, unsigned int length) err = crypto_shash_update(shash, address, length); BUG_ON(err); - return *ctx; + ret = *ctx; + barrier_data(ctx); + return ret; } EXPORT_SYMBOL(crc32c); -- GitLab From 04651048c79a789827239d335225a6bd785ac16a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 30 May 2017 23:15:35 +0200 Subject: [PATCH 308/786] alarmtimer: Rate limit periodic intervals commit ff86bf0c65f14346bf2440534f9ba5ac232c39a0 upstream. The alarmtimer code has another source of potentially rearming itself too fast. Interval timers with a very samll interval have a similar CPU hog effect as the previously fixed overflow issue. The reason is that alarmtimers do not implement the normal protection against this kind of problem which the other posix timer use: timer expires -> queue signal -> deliver signal -> rearm timer This scheme brings the rearming under scheduler control and prevents permanently firing timers which hog the CPU. Bringing this scheme to the alarm timer code is a major overhaul because it lacks all the necessary mechanisms completely. So for a quick fix limit the interval to one jiffie. This is not problematic in practice as alarmtimers are usually backed by an RTC for suspend which have 1 second resolution. It could be therefor argued that the resolution of this clock should be set to 1 second in general, but that's outside the scope of this fix. Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Kostya Serebryany Cc: syzkaller Cc: John Stultz Cc: Dmitry Vyukov Link: http://lkml.kernel.org/r/20170530211655.896767100@linutronix.de Signed-off-by: Greg Kroah-Hartman --- kernel/time/alarmtimer.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 7702135e1456..9ba04aa740b9 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c @@ -624,6 +624,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags, /* start the timer */ timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval); + + /* + * Rate limit to the tick as a hot fix to prevent DOS. Will be + * mopped up later. + */ + if (ktime_to_ns(timr->it.alarm.interval) < TICK_NSEC) + timr->it.alarm.interval = ktime_set(0, TICK_NSEC); + exp = timespec_to_ktime(new_setting->it_value); /* Convert (if necessary) to absolute time */ if (flags != TIMER_ABSTIME) { -- GitLab From cfc0eb403816c5c4f9667d959de5e22789b5421e Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 19 Jun 2017 04:03:24 -0700 Subject: [PATCH 309/786] mm: larger stack guard gap, between vmas commit 1be7107fbe18eed3e319a6c3e83c78254b693acb upstream. Stack guard page is a useful feature to reduce a risk of stack smashing into a different mapping. We have been using a single page gap which is sufficient to prevent having stack adjacent to a different mapping. But this seems to be insufficient in the light of the stack usage in userspace. E.g. glibc uses as large as 64kB alloca() in many commonly used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX] which is 256kB or stack strings with MAX_ARG_STRLEN. This will become especially dangerous for suid binaries and the default no limit for the stack size limit because those applications can be tricked to consume a large portion of the stack and a single glibc call could jump over the guard page. These attacks are not theoretical, unfortunatelly. Make those attacks less probable by increasing the stack guard gap to 1MB (on systems with 4k pages; but make it depend on the page size because systems with larger base pages might cap stack allocations in the PAGE_SIZE units) which should cover larger alloca() and VLA stack allocations. It is obviously not a full fix because the problem is somehow inherent, but it should reduce attack space a lot. One could argue that the gap size should be configurable from userspace, but that can be done later when somebody finds that the new 1MB is wrong for some special case applications. For now, add a kernel command line option (stack_guard_gap) to specify the stack gap size (in page units). Implementation wise, first delete all the old code for stack guard page: because although we could get away with accounting one extra page in a stack vma, accounting a larger gap can break userspace - case in point, a program run with "ulimit -S -v 20000" failed when the 1MB gap was counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK and strict non-overcommit mode. Instead of keeping gap inside the stack vma, maintain the stack guard gap as a gap between vmas: using vm_start_gap() in place of vm_start (or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few places which need to respect the gap - mainly arch_get_unmapped_area(), and and the vma tree's subtree_gap support for that. Original-patch-by: Oleg Nesterov Original-patch-by: Michal Hocko Signed-off-by: Hugh Dickins Acked-by: Michal Hocko Tested-by: Helge Deller # parisc Signed-off-by: Linus Torvalds [wt: backport to 4.11: adjust context] [wt: backport to 4.9: adjust context ; kernel doc was not in admin-guide] Signed-off-by: Willy Tarreau Signed-off-by: Greg Kroah-Hartman --- Documentation/kernel-parameters.txt | 7 ++ arch/arc/mm/mmap.c | 2 +- arch/arm/mm/mmap.c | 4 +- arch/frv/mm/elf-fdpic.c | 2 +- arch/mips/mm/mmap.c | 2 +- arch/parisc/kernel/sys_parisc.c | 15 +-- arch/powerpc/mm/hugetlbpage-radix.c | 2 +- arch/powerpc/mm/mmap.c | 4 +- arch/powerpc/mm/slice.c | 2 +- arch/s390/mm/mmap.c | 4 +- arch/sh/mm/mmap.c | 4 +- arch/sparc/kernel/sys_sparc_64.c | 4 +- arch/sparc/mm/hugetlbpage.c | 2 +- arch/tile/mm/hugetlbpage.c | 2 +- arch/x86/kernel/sys_x86_64.c | 4 +- arch/x86/mm/hugetlbpage.c | 2 +- arch/xtensa/kernel/syscall.c | 2 +- fs/hugetlbfs/inode.c | 2 +- fs/proc/task_mmu.c | 4 - include/linux/mm.h | 53 +++++----- mm/gup.c | 5 - mm/memory.c | 38 ------- mm/mmap.c | 149 +++++++++++++++++----------- 23 files changed, 152 insertions(+), 163 deletions(-) diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index a6fadef92d6d..86a6746f6833 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -3932,6 +3932,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted. spia_pedr= spia_peddr= + stack_guard_gap= [MM] + override the default stack gap protection. The value + is in page units and it defines how many pages prior + to (for stacks growing down) resp. after (for stacks + growing up) the main stack are reserved for no other + mapping. Default value is 256 pages. + stacktrace [FTRACE] Enabled the stack tracer on boot up. diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c index 2e06d56e987b..cf4ae6958240 100644 --- a/arch/arc/mm/mmap.c +++ b/arch/arc/mm/mmap.c @@ -64,7 +64,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 66353caa35b9..641334ebf46d 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -89,7 +89,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -140,7 +140,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c index 836f14707a62..efa59f1f8022 100644 --- a/arch/frv/mm/elf-fdpic.c +++ b/arch/frv/mm/elf-fdpic.c @@ -74,7 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi addr = PAGE_ALIGN(addr); vma = find_vma(current->mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) goto success; } diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index d08ea3ff0f53..a44052c05f93 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -92,7 +92,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index 0a393a04e891..1d7691fa8ab2 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; unsigned long task_size = TASK_SIZE; int do_color_align, last_mmap; struct vm_unmapped_area_info info; @@ -115,9 +115,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, else addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + vma = find_vma_prev(mm, addr, &prev); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) goto found_addr; } @@ -141,7 +142,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; unsigned long addr = addr0; int do_color_align, last_mmap; @@ -175,9 +176,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = COLOR_ALIGN(addr, last_mmap, pgoff); else addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) goto found_addr; } diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c index 35254a678456..a2b2d97f7eda 100644 --- a/arch/powerpc/mm/hugetlbpage-radix.c +++ b/arch/powerpc/mm/hugetlbpage-radix.c @@ -65,7 +65,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } /* diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c index 2f1e44362198..5bc2845cddf4 100644 --- a/arch/powerpc/mm/mmap.c +++ b/arch/powerpc/mm/mmap.c @@ -106,7 +106,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -142,7 +142,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index 2b27458902ee..c4d5c9c61e0f 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -105,7 +105,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, if ((mm->task_size - len) < addr) return 0; vma = find_vma(mm, addr); - return (!vma || (addr + len) <= vma->vm_start); + return (!vma || (addr + len) <= vm_start_gap(vma)); } static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index eb9df2822da1..812368f274c9 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -98,7 +98,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -136,7 +136,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index 6777177807c2..7df7d5944188 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -63,7 +63,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -113,7 +113,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index fe8b8ee8e660..02e05e221b94 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -118,7 +118,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -181,7 +181,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 988acc8b1b80..58cde8d9be8a 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -116,7 +116,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, HPAGE_SIZE); vma = find_vma(mm, addr); if (task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c index 77ceaa343fce..67508b249ede 100644 --- a/arch/tile/mm/hugetlbpage.c +++ b/arch/tile/mm/hugetlbpage.c @@ -232,7 +232,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (current->mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index a55ed63b9f91..1119414ab419 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -140,7 +140,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (end - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } @@ -183,7 +183,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 2ae8584b44c7..fe342e8ed529 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -144,7 +144,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c index 83cf49685373..3aaaae18417c 100644 --- a/arch/xtensa/kernel/syscall.c +++ b/arch/xtensa/kernel/syscall.c @@ -87,7 +87,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, /* At this point: (!vmm || addr < vmm->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; - if (!vmm || addr + len <= vmm->vm_start) + if (!vmm || addr + len <= vm_start_gap(vmm)) return addr; addr = vmm->vm_end; if (flags & MAP_SHARED) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 4fb7b10f3a05..704fa0b17309 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -191,7 +191,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma))) return addr; } diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index b1517b6dcbdd..5138e781737a 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -299,11 +299,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) /* We don't show the stack guard page in /proc/maps */ start = vma->vm_start; - if (stack_guard_page_start(vma, start)) - start += PAGE_SIZE; end = vma->vm_end; - if (stack_guard_page_end(vma, end)) - end -= PAGE_SIZE; seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", diff --git a/include/linux/mm.h b/include/linux/mm.h index 0b5b2e4df14e..6c9e1ad12831 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1356,39 +1356,11 @@ int clear_page_dirty_for_io(struct page *page); int get_cmdline(struct task_struct *task, char *buffer, int buflen); -/* Is the vma a continuation of the stack vma above it? */ -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) -{ - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); -} - static inline bool vma_is_anonymous(struct vm_area_struct *vma) { return !vma->vm_ops; } -static inline int stack_guard_page_start(struct vm_area_struct *vma, - unsigned long addr) -{ - return (vma->vm_flags & VM_GROWSDOWN) && - (vma->vm_start == addr) && - !vma_growsdown(vma->vm_prev, addr); -} - -/* Is the vma a continuation of the stack vma below it? */ -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr) -{ - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP); -} - -static inline int stack_guard_page_end(struct vm_area_struct *vma, - unsigned long addr) -{ - return (vma->vm_flags & VM_GROWSUP) && - (vma->vm_end == addr) && - !vma_growsup(vma->vm_next, addr); -} - int vma_is_stack_for_current(struct vm_area_struct *vma); extern unsigned long move_page_tables(struct vm_area_struct *vma, @@ -2127,6 +2099,7 @@ void page_cache_async_readahead(struct address_space *mapping, pgoff_t offset, unsigned long size); +extern unsigned long stack_guard_gap; /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); @@ -2155,6 +2128,30 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m return vma; } +static inline unsigned long vm_start_gap(struct vm_area_struct *vma) +{ + unsigned long vm_start = vma->vm_start; + + if (vma->vm_flags & VM_GROWSDOWN) { + vm_start -= stack_guard_gap; + if (vm_start > vma->vm_start) + vm_start = 0; + } + return vm_start; +} + +static inline unsigned long vm_end_gap(struct vm_area_struct *vma) +{ + unsigned long vm_end = vma->vm_end; + + if (vma->vm_flags & VM_GROWSUP) { + vm_end += stack_guard_gap; + if (vm_end < vma->vm_end) + vm_end = -PAGE_SIZE; + } + return vm_end; +} + static inline unsigned long vma_pages(struct vm_area_struct *vma) { return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; diff --git a/mm/gup.c b/mm/gup.c index ec4f82704b6f..c63a0341ae38 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -370,11 +370,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, /* mlock all present pages, but do not fault in new pages */ if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) return -ENOENT; - /* For mm_populate(), just skip the stack guard page. */ - if ((*flags & FOLL_POPULATE) && - (stack_guard_page_start(vma, address) || - stack_guard_page_end(vma, address + PAGE_SIZE))) - return -ENOENT; if (*flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (*flags & FOLL_REMOTE) diff --git a/mm/memory.c b/mm/memory.c index cbb1e5e5f791..e6a5a1f20492 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2698,40 +2698,6 @@ int do_swap_page(struct fault_env *fe, pte_t orig_pte) return ret; } -/* - * This is like a special single-page "expand_{down|up}wards()", - * except we must first make sure that 'address{-|+}PAGE_SIZE' - * doesn't hit another vma. - */ -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) -{ - address &= PAGE_MASK; - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { - struct vm_area_struct *prev = vma->vm_prev; - - /* - * Is there a mapping abutting this one below? - * - * That's only ok if it's the same stack mapping - * that has gotten split.. - */ - if (prev && prev->vm_end == address) - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; - - return expand_downwards(vma, address - PAGE_SIZE); - } - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { - struct vm_area_struct *next = vma->vm_next; - - /* As VM_GROWSDOWN but s/below/above/ */ - if (next && next->vm_start == address + PAGE_SIZE) - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; - - return expand_upwards(vma, address + PAGE_SIZE); - } - return 0; -} - /* * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. @@ -2748,10 +2714,6 @@ static int do_anonymous_page(struct fault_env *fe) if (vma->vm_flags & VM_SHARED) return VM_FAULT_SIGBUS; - /* Check if we need to add a guard page to the stack */ - if (check_stack_guard_page(vma, fe->address) < 0) - return VM_FAULT_SIGSEGV; - /* * Use pte_alloc() instead of pte_alloc_map(). We can't run * pte_offset_map() on pmds where a huge pmd might be created diff --git a/mm/mmap.c b/mm/mmap.c index 1af87c14183d..26542b346229 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -183,6 +183,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) unsigned long retval; unsigned long newbrk, oldbrk; struct mm_struct *mm = current->mm; + struct vm_area_struct *next; unsigned long min_brk; bool populate; @@ -228,7 +229,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) } /* Check against existing mmap mappings. */ - if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) + next = find_vma(mm, oldbrk); + if (next && newbrk + PAGE_SIZE > vm_start_gap(next)) goto out; /* Ok, looks good - let it rip. */ @@ -251,10 +253,22 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) static long vma_compute_subtree_gap(struct vm_area_struct *vma) { - unsigned long max, subtree_gap; - max = vma->vm_start; - if (vma->vm_prev) - max -= vma->vm_prev->vm_end; + unsigned long max, prev_end, subtree_gap; + + /* + * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we + * allow two stack_guard_gaps between them here, and when choosing + * an unmapped area; whereas when expanding we only require one. + * That's a little inconsistent, but keeps the code here simpler. + */ + max = vm_start_gap(vma); + if (vma->vm_prev) { + prev_end = vm_end_gap(vma->vm_prev); + if (max > prev_end) + max -= prev_end; + else + max = 0; + } if (vma->vm_rb.rb_left) { subtree_gap = rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb)->rb_subtree_gap; @@ -350,7 +364,7 @@ static void validate_mm(struct mm_struct *mm) anon_vma_unlock_read(anon_vma); } - highest_address = vma->vm_end; + highest_address = vm_end_gap(vma); vma = vma->vm_next; i++; } @@ -539,7 +553,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, if (vma->vm_next) vma_gap_update(vma->vm_next); else - mm->highest_vm_end = vma->vm_end; + mm->highest_vm_end = vm_end_gap(vma); /* * vma->vm_prev wasn't known when we followed the rbtree to find the @@ -854,7 +868,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, vma_gap_update(vma); if (end_changed) { if (!next) - mm->highest_vm_end = end; + mm->highest_vm_end = vm_end_gap(vma); else if (!adjust_next) vma_gap_update(next); } @@ -939,7 +953,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, * mm->highest_vm_end doesn't need any update * in remove_next == 1 case. */ - VM_WARN_ON(mm->highest_vm_end != end); + VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); } } if (insert && file) @@ -1783,7 +1797,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) while (true) { /* Visit left subtree if it looks promising */ - gap_end = vma->vm_start; + gap_end = vm_start_gap(vma); if (gap_end >= low_limit && vma->vm_rb.rb_left) { struct vm_area_struct *left = rb_entry(vma->vm_rb.rb_left, @@ -1794,7 +1808,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) } } - gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; + gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; check_current: /* Check if current node has a suitable gap */ if (gap_start > high_limit) @@ -1821,8 +1835,8 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) vma = rb_entry(rb_parent(prev), struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_left) { - gap_start = vma->vm_prev->vm_end; - gap_end = vma->vm_start; + gap_start = vm_end_gap(vma->vm_prev); + gap_end = vm_start_gap(vma); goto check_current; } } @@ -1886,7 +1900,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) while (true) { /* Visit right subtree if it looks promising */ - gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; + gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; if (gap_start <= high_limit && vma->vm_rb.rb_right) { struct vm_area_struct *right = rb_entry(vma->vm_rb.rb_right, @@ -1899,7 +1913,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) check_current: /* Check if current node has a suitable gap */ - gap_end = vma->vm_start; + gap_end = vm_start_gap(vma); if (gap_end < low_limit) return -ENOMEM; if (gap_start <= high_limit && gap_end - gap_start >= length) @@ -1925,7 +1939,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) struct vm_area_struct, vm_rb); if (prev == vma->vm_rb.rb_right) { gap_start = vma->vm_prev ? - vma->vm_prev->vm_end : 0; + vm_end_gap(vma->vm_prev) : 0; goto check_current; } } @@ -1963,7 +1977,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; struct vm_unmapped_area_info info; if (len > TASK_SIZE - mmap_min_addr) @@ -1974,9 +1988,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) return addr; } @@ -1999,7 +2014,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { - struct vm_area_struct *vma; + struct vm_area_struct *vma, *prev; struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info; @@ -2014,9 +2029,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); + vma = find_vma_prev(mm, addr, &prev); if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && - (!vma || addr + len <= vma->vm_start)) + (!vma || addr + len <= vm_start_gap(vma)) && + (!prev || addr >= vm_end_gap(prev))) return addr; } @@ -2151,21 +2167,19 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr, * update accounting. This is shared with both the * grow-up and grow-down cases. */ -static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) +static int acct_stack_growth(struct vm_area_struct *vma, + unsigned long size, unsigned long grow) { struct mm_struct *mm = vma->vm_mm; struct rlimit *rlim = current->signal->rlim; - unsigned long new_start, actual_size; + unsigned long new_start; /* address space limit tests */ if (!may_expand_vm(mm, vma->vm_flags, grow)) return -ENOMEM; /* Stack limit test */ - actual_size = size; - if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) - actual_size -= PAGE_SIZE; - if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) + if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) return -ENOMEM; /* mlock limit tests */ @@ -2203,17 +2217,30 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns int expand_upwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; + struct vm_area_struct *next; + unsigned long gap_addr; int error = 0; if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; /* Guard against wrapping around to address 0. */ - if (address < PAGE_ALIGN(address+4)) - address = PAGE_ALIGN(address+4); - else + address &= PAGE_MASK; + address += PAGE_SIZE; + if (!address) return -ENOMEM; + /* Enforce stack_guard_gap */ + gap_addr = address + stack_guard_gap; + if (gap_addr < address) + return -ENOMEM; + next = vma->vm_next; + if (next && next->vm_start < gap_addr) { + if (!(next->vm_flags & VM_GROWSUP)) + return -ENOMEM; + /* Check that both stack segments have the same anon_vma? */ + } + /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; @@ -2257,7 +2284,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) if (vma->vm_next) vma_gap_update(vma->vm_next); else - mm->highest_vm_end = address; + mm->highest_vm_end = vm_end_gap(vma); spin_unlock(&mm->page_table_lock); perf_event_mmap(vma); @@ -2278,6 +2305,8 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; + struct vm_area_struct *prev; + unsigned long gap_addr; int error; address &= PAGE_MASK; @@ -2285,6 +2314,17 @@ int expand_downwards(struct vm_area_struct *vma, if (error) return error; + /* Enforce stack_guard_gap */ + gap_addr = address - stack_guard_gap; + if (gap_addr > address) + return -ENOMEM; + prev = vma->vm_prev; + if (prev && prev->vm_end > gap_addr) { + if (!(prev->vm_flags & VM_GROWSDOWN)) + return -ENOMEM; + /* Check that both stack segments have the same anon_vma? */ + } + /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; @@ -2339,28 +2379,25 @@ int expand_downwards(struct vm_area_struct *vma, return error; } -/* - * Note how expand_stack() refuses to expand the stack all the way to - * abut the next virtual mapping, *unless* that mapping itself is also - * a stack mapping. We want to leave room for a guard page, after all - * (the guard page itself is not added here, that is done by the - * actual page faulting logic) - * - * This matches the behavior of the guard page logic (see mm/memory.c: - * check_stack_guard_page()), which only allows the guard page to be - * removed under these circumstances. - */ +/* enforced gap between the expanding stack and other mappings. */ +unsigned long stack_guard_gap = 256UL< Debugged-by: Linus Torvalds Signed-off-by: Hugh Dickins Acked-by: Michal Hocko Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/mmap.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index d71a61e71d58..145d3d5253e8 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1813,7 +1813,8 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info) /* Check if current node has a suitable gap */ if (gap_start > high_limit) return -ENOMEM; - if (gap_end >= low_limit && gap_end - gap_start >= length) + if (gap_end >= low_limit && + gap_end > gap_start && gap_end - gap_start >= length) goto found; /* Visit right subtree if it looks promising */ @@ -1916,7 +1917,8 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) gap_end = vm_start_gap(vma); if (gap_end < low_limit) return -ENOMEM; - if (gap_start <= high_limit && gap_end - gap_start >= length) + if (gap_start <= high_limit && + gap_end > gap_start && gap_end - gap_start >= length) goto found; /* Visit left subtree if it looks promising */ -- GitLab From 493ecd5cd73ed41e319fe39816c6d3638ef080ff Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sat, 24 Jun 2017 07:14:26 +0200 Subject: [PATCH 312/786] Linux 4.9.34 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 8470d81d5cc2..a40b373eba3a 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 9 -SUBLEVEL = 33 +SUBLEVEL = 34 EXTRAVERSION = NAME = Roaring Lionus -- GitLab From 212eb9fda7667e5a452af03675f198154a277336 Mon Sep 17 00:00:00 2001 From: Amir Levy Date: Wed, 21 Jun 2017 13:44:15 +0300 Subject: [PATCH 313/786] msm: ipa3: remove delay from AP if QMI fails In case of failure to send QMI message to modem remove the delay from AP since modem is probably down (SSR\reboot). Change-Id: Iae4d5162d39cd05f5c50d75087ec90dfe04a6c43 Signed-off-by: Amir Levy --- drivers/platform/msm/ipa/ipa_v3/ipa_client.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c index 564397aa17ce..2d0876749660 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c @@ -1058,8 +1058,21 @@ static int ipa3_stop_ul_chan_with_data_drain(u32 qmi_req_id, if (should_force_clear) { result = ipa3_enable_force_clear(qmi_req_id, false, source_pipe_bitmask); - if (result) - goto exit; + if (result) { + struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 }; + + /* + * assuming here modem SSR\shutdown, AP can remove + * the delay in this case + */ + IPAERR( + "failed to force clear %d, remove delay from SCND reg\n" + , result); + ep_ctrl_scnd.endp_delay = false; + ipahal_write_reg_n_fields( + IPA_ENDP_INIT_CTRL_SCND_n, clnt_hdl, + &ep_ctrl_scnd); + } } /* with force clear, wait for emptiness */ for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) { -- GitLab From 97955bf5ad6c670fae82c0e4200305a3eb4951f1 Mon Sep 17 00:00:00 2001 From: Vikram Mulukutla Date: Sun, 25 Jun 2017 01:10:12 -0700 Subject: [PATCH 314/786] kthread: Ensure task isn't preempted before dequeue in kthread_parkme kthread_park waits for the target thread to park itself with kthread_parkme using a completion variable. kthread_parkme - which is invoked by the target thread - sets the completion variable before calling schedule to get itself off of the runqueue. This causes an interesting race in the hotplug path. takedown_cpu invoked for CPU X attempts to park the cpuhp/X thread before running the stopper thread on CPU X. There is a guarantee that the task state of cpuhp/X is set to TASK_PARKED, but there is no guarantee that it's actually off of the runqueue when kthread_park returns. takedown_cpu proceeds to run the stopper thread on CPUX which promptly migrates off the still-on-rq cpuhp/X thread to another cpu CPUY. All of this is actually OK - cpuhp/X may finally get itself off of CPU_Y's runqueue at some later point. However, let's assume CPU_Y has a rather long running RT task, and cpuhp/X doesn't actually get to run. Now for whatever reason CPU_X is brought online again, and an attempt is made to unpark cpuhp/X in cpuhp_online_idle with preemption disabled. kthread_unpark calls kthread_bind_mask, which finds that the task still active, leading to a schedule() call in wait_task_inactive, causing a "scheduling while atomic" BUG. Now we can force the hotplug thread to actually wait for smpboot threads to get off of the runqeue - but this sort of defeats the lightweight nature of parking for everyone else. Let's simply ensure that the setting of the completion variable and the schedule() is atomic. This completely fixes the hotplug versus kthread_parkme race. Change-Id: Ia624b07119462911a9d4d367100408f4426cb6f6 Signed-off-by: Vikram Mulukutla --- kernel/kthread.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/kthread.c b/kernel/kthread.c index b65854cdc0bb..80bf7bafc98b 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -160,9 +160,11 @@ static void __kthread_parkme(struct kthread *self) { __set_current_state(TASK_PARKED); while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) { + preempt_disable(); if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) complete(&self->parked); - schedule(); + schedule_preempt_disabled(); + preempt_enable(); __set_current_state(TASK_PARKED); } clear_bit(KTHREAD_IS_PARKED, &self->flags); -- GitLab From a4fc49b651bb9ed74aae411b3e03b304c05786e1 Mon Sep 17 00:00:00 2001 From: Ghanim Fodi Date: Tue, 20 Jun 2017 10:35:20 +0300 Subject: [PATCH 315/786] msm: ipa3: Ring IPA MHI event ring doorbell on channel start Ringing IPA MHI event ring doorbell is done at MHI device during MHI channel start. This is done after the rings are allocated. The ring write pointer updated by the host is used as the doorbell value. Doorbell ringing is required in order to supply event credits to GSI H/W. Change-Id: I2db110b4f99c8ab6c6878d426b3ebb37149b0b76 Signed-off-by: Ghanim Fodi --- drivers/platform/msm/gsi/gsi.c | 29 +++++++++++++++++++++++ drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c | 18 ++++++++++++++ include/linux/msm_gsi.h | 18 ++++++++++++++ 3 files changed, 65 insertions(+) diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c index 7fca7aae467a..9c133a8733ee 100644 --- a/drivers/platform/msm/gsi/gsi.c +++ b/drivers/platform/msm/gsi/gsi.c @@ -1353,6 +1353,35 @@ int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl, } EXPORT_SYMBOL(gsi_query_evt_ring_db_addr); +int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value) +{ + struct gsi_evt_ctx *ctx; + + if (!gsi_ctx) { + pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__); + return -GSI_STATUS_NODEV; + } + + if (evt_ring_hdl >= gsi_ctx->max_ev) { + GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl); + return -GSI_STATUS_INVALID_PARAMS; + } + + ctx = &gsi_ctx->evtr[evt_ring_hdl]; + + if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) { + GSIERR("bad state %d\n", + gsi_ctx->evtr[evt_ring_hdl].state); + return -GSI_STATUS_UNSUPPORTED_OP; + } + + ctx->ring.wp_local = value; + gsi_ring_evt_doorbell(ctx); + + return GSI_STATUS_SUCCESS; +} +EXPORT_SYMBOL(gsi_ring_evt_ring_db); + int gsi_reset_evt_ring(unsigned long evt_ring_hdl) { uint32_t val; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c index f66e3a32c489..0dd86fa0fb44 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c @@ -255,6 +255,24 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client, ep->gsi_evt_ring_hdl = *params->cached_gsi_evt_ring_hdl; } + if (params->ev_ctx_host->wp == params->ev_ctx_host->rbase) { + IPA_MHI_ERR("event ring wp is not updated. base=wp=0x%llx\n", + params->ev_ctx_host->wp); + goto fail_alloc_ch; + return res; + } + + IPA_MHI_DBG("Ring event db: evt_ring_hdl=%lu host_wp=0x%llx\n", + ep->gsi_evt_ring_hdl, params->ev_ctx_host->wp); + res = gsi_ring_evt_ring_db(ep->gsi_evt_ring_hdl, + params->ev_ctx_host->wp); + if (res) { + IPA_MHI_ERR("fail to ring evt ring db %d. hdl=%lu wp=0x%llx\n", + res, ep->gsi_evt_ring_hdl, params->ev_ctx_host->wp); + goto fail_alloc_ch; + return res; + } + memset(&ch_props, 0, sizeof(ch_props)); ch_props.prot = GSI_CHAN_PROT_MHI; ch_props.dir = IPA_CLIENT_IS_PROD(client) ? diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h index 0c460a0b304b..ebca44694898 100644 --- a/include/linux/msm_gsi.h +++ b/include/linux/msm_gsi.h @@ -751,6 +751,18 @@ int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl); int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl, uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb); +/** + * gsi_ring_evt_ring_db - Peripheral should call this function for + * ringing the event ring doorbell with given value + * + * @evt_ring_hdl: Client handle previously obtained from + * gsi_alloc_evt_ring + * @value: The value to be used for ringing the doorbell + * + * @Return gsi_status + */ +int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value); + /** * gsi_reset_evt_ring - Peripheral should call this function to * reset an event ring to recover from error state @@ -1142,6 +1154,12 @@ static inline int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl, return -GSI_STATUS_UNSUPPORTED_OP; } +static inline int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, + uint64_t value) +{ + return -GSI_STATUS_UNSUPPORTED_OP; +} + static inline int gsi_reset_evt_ring(unsigned long evt_ring_hdl) { return -GSI_STATUS_UNSUPPORTED_OP; -- GitLab From d1e3cc9e3e594970a3b6b83f2d27f8c3c12b3058 Mon Sep 17 00:00:00 2001 From: Ghanim Fodi Date: Sun, 11 Jun 2017 12:35:41 +0300 Subject: [PATCH 316/786] msm: ipa3: Assert on GSI MHI event ring fatal error GSI MHI event ring error is a fatal error where there is no recovery possible. Data path will be stalled. This change will assert on this case. Change-Id: I9c94e44b2f2d5e1b0b8d059b871d1bd9ad2d3fcf Signed-off-by: Ghanim Fodi --- drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c index a02247d3e938..5aa39b699bd6 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved. +/* Copyright (c) 2015, 2017 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1059,6 +1059,7 @@ static void ipa_mhi_gsi_ev_err_cb(struct gsi_evt_err_notify *notify) IPA_MHI_ERR("Unexpected err evt: %d\n", notify->evt_id); } IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc); + ipa_assert(); } static void ipa_mhi_gsi_ch_err_cb(struct gsi_chan_err_notify *notify) @@ -1090,6 +1091,7 @@ static void ipa_mhi_gsi_ch_err_cb(struct gsi_chan_err_notify *notify) IPA_MHI_ERR("Unexpected err evt: %d\n", notify->evt_id); } IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc); + ipa_assert(); } -- GitLab From 168922d3572e4538545df9a64e950298b9d75e4e Mon Sep 17 00:00:00 2001 From: Mahesh Sivasubramanian Date: Fri, 9 Jun 2017 09:47:52 -0600 Subject: [PATCH 317/786] drivers: cpuidle: lpm-levels: Support for different CPUs in a cluster The newer FCM microarchitecture allows for CPUs with different performance and power characteristics to be part of the same cluster. The CPUs have different power performances, like latency and residency for different low power mode. Adding support to differentiate between CPUs within a single cluster. Change-Id: I61fbe6d1b9e6963b5db269a079254be82a7b9d3c Signed-off-by: Mahesh Sivasubramanian --- .../bindings/arm/msm/lpm-levels.txt | 17 +- arch/arm64/boot/dts/qcom/sdm845-pm.dtsi | 56 ++++- drivers/cpuidle/lpm-levels-of.c | 229 ++++++++++-------- drivers/cpuidle/lpm-levels.c | 186 +++++++------- drivers/cpuidle/lpm-levels.h | 6 +- 5 files changed, 286 insertions(+), 208 deletions(-) diff --git a/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt b/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt index ae476d07466e..797dbcc5cec4 100644 --- a/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt +++ b/Documentation/devicetree/bindings/arm/msm/lpm-levels.txt @@ -28,9 +28,6 @@ Required properties: - qcom,default-level: The default low power level that a cluster is programmed. The SPM of the corresponding device is configured at this low power mode by default. - - qcom,cpu: List of CPU phandles to identify the CPUs associated with - this cluster. This property is required if and only if the cluster - node contains a qcom,pm-cpu node. qcom,pm-cluster contains qcom,pm-cluster-level nodes which identify the various low power modes that the cluster can enter. The @@ -103,9 +100,13 @@ Required properties: power collapse (PC) [Node bindings for qcom,pm-cpu] -qcom,pm-cpu contains the low power modes that a cpu could enter. Currently it -doesn't have any required properties and is a container for -qcom,pm-cpu-levels. +qcom,pm-cpu contains the low power modes that a cpu could enter and the CPUs +that share the parameters.It contains the following properties. + - qcom,cpu: List of CPU phandles to identify the CPUs associated with + this cluster. + - qcom,pm-cpu-levels: The different low power modes that a CPU could + enter. The following section explains the required properties of this + node. [Node bindings for qcom,pm-cpu-levels] Required properties: @@ -184,7 +185,6 @@ qcom,lpm-levels { label = "a53"; qcom,spm-device-names = "l2"; qcom,default-level=<0>; - qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3>; qcom,pm-cluster-level@0{ reg = <0>; @@ -210,6 +210,7 @@ qcom,lpm-levels { qcom,pm-cpu { #address-cells = <1>; #size-cells = <0>; + qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3>; qcom,pm-cpu-level@0 { reg = <0>; qcom,spm-cpu-mode = "wfi"; @@ -255,7 +256,6 @@ qcom,lpm-levels { label = "a57"; qcom,spm-device-names = "l2"; qcom,default-level=<0>; - qcom,cpu = <&CPU4 &CPU5 &CPU6 &CPU7>; qcom,pm-cluster-level@0{ reg = <0>; @@ -281,6 +281,7 @@ qcom,lpm-levels { qcom,pm-cpu { #address-cells = <1>; #size-cells = <0>; + qcom,cpu = <&CPU4 &CPU5 &CPU6 &CPU7>; qcom,pm-cpu-level@0 { reg = <0>; qcom,spm-cpu-mode = "wfi"; diff --git a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi index 6806145be9c8..6215771b69ae 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-pm.dtsi @@ -23,8 +23,6 @@ #size-cells = <0>; label = "L3"; qcom,spm-device-names = "L3"; - qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3 &CPU4 &CPU5 &CPU6 - &CPU7>; qcom,psci-mode-shift = <4>; qcom,psci-mode-mask = <0xfff>; @@ -86,12 +84,64 @@ qcom,is-reset; qcom,notify-rpm; }; + qcom,pm-cpu@0 { + #address-cells = <1>; + #size-cells = <0>; + qcom,psci-mode-shift = <0>; + qcom,psci-mode-mask = <0xf>; + qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3>; + + qcom,pm-cpu-level@0 { /* C1 */ + reg = <0>; + qcom,spm-cpu-mode = "wfi"; + qcom,psci-cpu-mode = <0x1>; + qcom,latency-us = <43>; + qcom,ss-power = <454>; + qcom,energy-overhead = <38639>; + qcom,time-overhead = <83>; + }; + + qcom,pm-cpu-level@1 { /* C2D */ + reg = <1>; + qcom,psci-cpu-mode = <0x2>; + qcom,spm-cpu-mode = "ret"; + qcom,latency-us = <86>; + qcom,ss-power = <449>; + qcom,energy-overhead = <78456>; + qcom,time-overhead = <167>; + }; + + qcom,pm-cpu-level@2 { /* C3 */ + reg = <2>; + qcom,spm-cpu-mode = "pc"; + qcom,psci-cpu-mode = <0x3>; + qcom,latency-us = <612>; + qcom,ss-power = <436>; + qcom,energy-overhead = <418225>; + qcom,time-overhead = <885>; + qcom,is-reset; + qcom,use-broadcast-timer; + }; + + qcom,pm-cpu-level@3 { /* C4 */ + reg = <3>; + qcom,spm-cpu-mode = "rail-pc"; + qcom,psci-cpu-mode = <0x4>; + qcom,latency-us = <700>; + qcom,ss-power = <400>; + qcom,energy-overhead = <428225>; + qcom,time-overhead = <1000>; + qcom,is-reset; + qcom,use-broadcast-timer; + }; + }; - qcom,pm-cpu { + qcom,pm-cpu@1 { #address-cells = <1>; #size-cells = <0>; qcom,psci-mode-shift = <0>; qcom,psci-mode-mask = <0xf>; + qcom,cpu = <&CPU4 &CPU5 &CPU6 &CPU7>; qcom,pm-cpu-level@0 { /* C1 */ reg = <0>; diff --git a/drivers/cpuidle/lpm-levels-of.c b/drivers/cpuidle/lpm-levels-of.c index 2201196c89a9..39e0484077b0 100644 --- a/drivers/cpuidle/lpm-levels-of.c +++ b/drivers/cpuidle/lpm-levels-of.c @@ -305,6 +305,7 @@ static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent) struct lpm_level_avail *level_list = NULL; char cpu_name[20] = {0}; int ret = 0; + struct list_head *pos; cpu_kobj = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu_kobj) * cpumask_weight(&p->child_cpus), GFP_KERNEL); @@ -312,38 +313,45 @@ static int create_cpu_lvl_nodes(struct lpm_cluster *p, struct kobject *parent) return -ENOMEM; cpu_idx = 0; - for_each_cpu(cpu, &p->child_cpus) { - snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu); - cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name, parent); - if (!cpu_kobj[cpu_idx]) { - ret = -ENOMEM; - goto release_kobj; - } + list_for_each(pos, &p->cpu) { + struct lpm_cpu *lpm_cpu = list_entry(pos, struct lpm_cpu, list); + + for_each_cpu(cpu, &lpm_cpu->related_cpus) { + snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu); + cpu_kobj[cpu_idx] = kobject_create_and_add(cpu_name, + parent); + if (!cpu_kobj[cpu_idx]) { + ret = -ENOMEM; + goto release_kobj; + } - level_list = devm_kzalloc(&lpm_pdev->dev, - p->cpu->nlevels * sizeof(*level_list), - GFP_KERNEL); - if (!level_list) { - ret = -ENOMEM; - goto release_kobj; - } + level_list = devm_kzalloc(&lpm_pdev->dev, + lpm_cpu->nlevels * sizeof(*level_list), + GFP_KERNEL); + if (!level_list) { + ret = -ENOMEM; + goto release_kobj; + } - /* - * Skip enable/disable for WFI. cpuidle expects WFI to be - * available at all times. - */ - for (i = 1; i < p->cpu->nlevels; i++) { + /* + * Skip enable/disable for WFI. cpuidle expects WFI to + * be available at all times. + */ + for (i = 1; i < lpm_cpu->nlevels; i++) { + level_list[i].latency_us = + p->levels[i].pwr.latency_us; + ret = create_lvl_avail_nodes( + lpm_cpu->levels[i].name, + cpu_kobj[cpu_idx], + &level_list[i], + (void *)lpm_cpu, cpu, true); + if (ret) + goto release_kobj; + } - level_list[i].latency_us = p->levels[i].pwr.latency_us; - ret = create_lvl_avail_nodes(p->cpu->levels[i].name, - cpu_kobj[cpu_idx], &level_list[i], - (void *)p->cpu, cpu, true); - if (ret) - goto release_kobj; + cpu_level_available[cpu] = level_list; + cpu_idx++; } - - cpu_level_available[cpu] = level_list; - cpu_idx++; } return ret; @@ -384,7 +392,7 @@ int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj) return ret; } - if (p->cpu) { + if (!list_empty(&p->cpu)) { ret = create_cpu_lvl_nodes(p, cluster_kobj); if (ret) return ret; @@ -619,49 +627,26 @@ static int calculate_residency(struct power_params *base_pwr, next_pwr->time_overhead_us : residency; } -static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c) +static int parse_cpu(struct device_node *node, struct lpm_cpu *cpu) { - struct device_node *n; - int ret = -ENOMEM; - int i, j; - char *key; - - c->cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*c->cpu), GFP_KERNEL); - if (!c->cpu) - return ret; - c->cpu->parent = c; - - key = "qcom,psci-mode-shift"; - - ret = of_property_read_u32(node, key, &c->cpu->psci_mode_shift); - if (ret) { - pr_err("Failed reading %s on device %s\n", key, - node->name); - return ret; - } - key = "qcom,psci-mode-mask"; - - ret = of_property_read_u32(node, key, &c->cpu->psci_mode_mask); - if (ret) { - pr_err("Failed reading %s on device %s\n", key, - node->name); - return ret; - } + struct device_node *n; + int ret, i, j; + const char *key; for_each_child_of_node(node, n) { - struct lpm_cpu_level *l = &c->cpu->levels[c->cpu->nlevels]; + struct lpm_cpu_level *l = &cpu->levels[cpu->nlevels]; - c->cpu->nlevels++; + cpu->nlevels++; ret = parse_cpu_mode(n, l); if (ret < 0) { pr_info("Failed %s\n", l->name); - goto failed; + return ret; } ret = parse_power_params(n, &l->pwr); if (ret) - goto failed; + return ret; key = "qcom,use-broadcast-timer"; l->use_bc_timer = of_property_read_bool(n, key); @@ -676,32 +661,83 @@ static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c) if (ret == -EINVAL) l->reset_level = LPM_RESET_LVL_NONE; else if (ret) - goto failed; + return ret; } - for (i = 0; i < c->cpu->nlevels; i++) { - for (j = 0; j < c->cpu->nlevels; j++) { + for (i = 0; i < cpu->nlevels; i++) { + for (j = 0; j < cpu->nlevels; j++) { if (i >= j) { - c->cpu->levels[i].pwr.residencies[j] = 0; + cpu->levels[i].pwr.residencies[j] = 0; continue; } - c->cpu->levels[i].pwr.residencies[j] = - calculate_residency(&c->cpu->levels[i].pwr, - &c->cpu->levels[j].pwr); + cpu->levels[i].pwr.residencies[j] = + calculate_residency(&cpu->levels[i].pwr, + &cpu->levels[j].pwr); pr_err("%s: idx %d %u\n", __func__, j, - c->cpu->levels[i].pwr.residencies[j]); + cpu->levels[i].pwr.residencies[j]); } } + for_each_cpu(i, &cpu->related_cpus) { + per_cpu(max_residency, i) = devm_kzalloc(&lpm_pdev->dev, + sizeof(uint32_t) * cpu->nlevels, + GFP_KERNEL); + if (!per_cpu(max_residency, i)) + return -ENOMEM; + per_cpu(min_residency, i) = devm_kzalloc( + &lpm_pdev->dev, + sizeof(uint32_t) * cpu->nlevels, + GFP_KERNEL); + if (!per_cpu(min_residency, i)) + return -ENOMEM; + set_optimum_cpu_residency(cpu, i, true); + } + + return 0; +} + +static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c) +{ + int ret = -ENOMEM, i; + char *key; + struct lpm_cpu *cpu; + + cpu = devm_kzalloc(&lpm_pdev->dev, sizeof(*cpu), GFP_KERNEL); + if (!cpu) + return ret; + + if (get_cpumask_for_node(node, &cpu->related_cpus)) + return -EINVAL; + + cpu->parent = c; + + key = "qcom,psci-mode-shift"; + ret = of_property_read_u32(node, key, &cpu->psci_mode_shift); + if (ret) { + pr_err("Failed reading %s on device %s\n", key, + node->name); + return ret; + } + key = "qcom,psci-mode-mask"; + ret = of_property_read_u32(node, key, &cpu->psci_mode_mask); + if (ret) { + pr_err("Failed reading %s on device %s\n", key, + node->name); + return ret; + } + + if (parse_cpu(node, cpu)) + goto failed; + cpumask_or(&c->child_cpus, &c->child_cpus, &cpu->related_cpus); + list_add(&cpu->list, &c->cpu); return 0; failed: - for (i = 0; i < c->cpu->nlevels; i++) { - kfree(c->cpu->levels[i].name); - c->cpu->levels[i].name = NULL; + for (i = 0; i < cpu->nlevels; i++) { + kfree(cpu->levels[i].name); + cpu->levels[i].name = NULL; } - kfree(c->cpu); - c->cpu = NULL; + kfree(cpu); pr_err("%s(): Failed with error code:%d\n", __func__, ret); return ret; } @@ -709,6 +745,7 @@ static int parse_cpu_levels(struct device_node *node, struct lpm_cluster *c) void free_cluster_node(struct lpm_cluster *cluster) { struct list_head *list; + struct lpm_cpu *cpu, *n; int i; list_for_each(list, &cluster->child) { @@ -719,19 +756,20 @@ void free_cluster_node(struct lpm_cluster *cluster) free_cluster_node(n); }; - if (cluster->cpu) { - for (i = 0; i < cluster->cpu->nlevels; i++) { - kfree(cluster->cpu->levels[i].name); - cluster->cpu->levels[i].name = NULL; + list_for_each_entry_safe(cpu, n, &cluster->cpu, list) { + struct lpm_cpu *cpu = list_entry(list, typeof(*cpu), list); + + for (i = 0; i < cpu->nlevels; i++) { + kfree(cpu->levels[i].name); + cpu->levels[i].name = NULL; } + list_del(list); } for (i = 0; i < cluster->nlevels; i++) { kfree(cluster->levels[i].mode); cluster->levels[i].mode = NULL; } - kfree(cluster->cpu); kfree(cluster->name); - cluster->cpu = NULL; cluster->name = NULL; cluster->ndevices = 0; } @@ -761,6 +799,7 @@ struct lpm_cluster *parse_cluster(struct device_node *node, goto failed_parse_params; INIT_LIST_HEAD(&c->child); + INIT_LIST_HEAD(&c->cpu); c->parent = parent; spin_lock_init(&c->sync_lock); c->min_child_level = NR_LPM_LEVELS; @@ -793,34 +832,11 @@ struct lpm_cluster *parse_cluster(struct device_node *node, key = "qcom,pm-cpu"; if (!of_node_cmp(n->name, key)) { - /* - * Parse the the cpu node only if a pm-cpu node - * is available, though the mask is defined @ the - * cluster level - */ - if (get_cpumask_for_node(node, &c->child_cpus)) - goto failed_parse_cluster; - if (parse_cpu_levels(n, c)) goto failed_parse_cluster; c->aff_level = 1; - for_each_cpu(i, &c->child_cpus) { - per_cpu(max_residency, i) = devm_kzalloc( - &lpm_pdev->dev, - sizeof(uint32_t) * c->cpu->nlevels, - GFP_KERNEL); - if (!per_cpu(max_residency, i)) - return ERR_PTR(-ENOMEM); - per_cpu(min_residency, i) = devm_kzalloc( - &lpm_pdev->dev, - sizeof(uint32_t) * c->cpu->nlevels, - GFP_KERNEL); - if (!per_cpu(min_residency, i)) - return ERR_PTR(-ENOMEM); - set_optimum_cpu_residency(c->cpu, i, true); - } } } @@ -870,6 +886,7 @@ struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev) void cluster_dt_walkthrough(struct lpm_cluster *cluster) { struct list_head *list; + struct lpm_cpu *cpu; int i, j; static int id; char str[10] = {0}; @@ -890,12 +907,12 @@ void cluster_dt_walkthrough(struct lpm_cluster *cluster) &cluster->name[j], &l->mode[i]); } - if (cluster->cpu) { + list_for_each_entry(cpu, &cluster->cpu, list) { pr_info("%d\n", __LINE__); - for (j = 0; j < cluster->cpu->nlevels; j++) + for (j = 0; j < cpu->nlevels; j++) pr_info("%s\tCPU mode: %s id:%d\n", str, - cluster->cpu->levels[j].name, - cluster->cpu->levels[j].mode); + cpu->levels[j].name, + cpu->levels[j].mode); } id++; diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c index 1a01e3fd30c8..79f6c52708e1 100644 --- a/drivers/cpuidle/lpm-levels.c +++ b/drivers/cpuidle/lpm-levels.c @@ -104,7 +104,7 @@ struct lpm_history { static DEFINE_PER_CPU(struct lpm_history, hist); -static DEFINE_PER_CPU(struct lpm_cluster*, cpu_cluster); +static DEFINE_PER_CPU(struct lpm_cpu*, cpu_lpm); static bool suspend_in_progress; static struct hrtimer lpm_hrtimer; static struct hrtimer histtimer; @@ -207,7 +207,7 @@ static uint32_t least_cpu_latency(struct list_head *child, struct power_params *pwr_params; struct lpm_cpu *cpu; struct lpm_cluster *n; - uint32_t latency = 0; + uint32_t lat = 0; int i; list_for_each(list, child) { @@ -216,19 +216,21 @@ static uint32_t least_cpu_latency(struct list_head *child, if (strcmp(lat_level->level_name, n->cluster_name)) continue; } - cpu = n->cpu; - for (i = 0; i < cpu->nlevels; i++) { - level = &cpu->levels[i]; - pwr_params = &level->pwr; - if (lat_level->reset_level == level->reset_level) { - if ((latency > pwr_params->latency_us) - || (!latency)) - latency = pwr_params->latency_us; - break; + list_for_each_entry(cpu, &n->cpu, list) { + for (i = 0; i < cpu->nlevels; i++) { + level = &cpu->levels[i]; + pwr_params = &level->pwr; + if (lat_level->reset_level + == level->reset_level) { + if ((lat > pwr_params->latency_us) + || (!lat)) + lat = pwr_params->latency_us; + break; + } } } } - return latency; + return lat; } static struct lpm_cluster *cluster_aff_match(struct lpm_cluster *cluster, @@ -237,9 +239,9 @@ static struct lpm_cluster *cluster_aff_match(struct lpm_cluster *cluster, struct lpm_cluster *n; if ((cluster->aff_level == affinity_level) - || ((cluster->cpu) && (affinity_level == 0))) + || ((!list_empty(&cluster->cpu)) && (affinity_level == 0))) return cluster; - else if (!cluster->cpu) { + else if (list_empty(&cluster->cpu)) { n = list_entry(cluster->child.next, typeof(*n), list); return cluster_aff_match(n, affinity_level); } else @@ -314,7 +316,7 @@ static void update_debug_pc_event(enum debug_event event, uint32_t arg1, static int lpm_dying_cpu(unsigned int cpu) { - struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu); + struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent; cluster_prepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0); return 0; @@ -322,7 +324,7 @@ static int lpm_dying_cpu(unsigned int cpu) static int lpm_starting_cpu(unsigned int cpu) { - struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu); + struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent; cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0); return 0; @@ -376,7 +378,7 @@ static void cluster_timer_init(struct lpm_cluster *cluster) static void clusttimer_cancel(void) { int cpu = raw_smp_processor_id(); - struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu); + struct lpm_cluster *cluster = per_cpu(cpu_lpm, cpu)->parent; hrtimer_try_to_cancel(&cluster->histtimer); @@ -1169,12 +1171,11 @@ static void cluster_unprepare(struct lpm_cluster *cluster, spin_unlock(&cluster->sync_lock); } -static inline void cpu_prepare(struct lpm_cluster *cluster, int cpu_index, +static inline void cpu_prepare(struct lpm_cpu *cpu, int cpu_index, bool from_idle) { - struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index]; - bool jtag_save_restore = - cluster->cpu->levels[cpu_index].jtag_save_restore; + struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index]; + bool jtag_save_restore = cpu->levels[cpu_index].jtag_save_restore; /* Use broadcast timer for aggregating sleep mode within a cluster. * A broadcast timer could be used in the following scenarios @@ -1202,12 +1203,11 @@ static inline void cpu_prepare(struct lpm_cluster *cluster, int cpu_index, msm_jtag_save_state(); } -static inline void cpu_unprepare(struct lpm_cluster *cluster, int cpu_index, +static inline void cpu_unprepare(struct lpm_cpu *cpu, int cpu_index, bool from_idle) { - struct lpm_cpu_level *cpu_level = &cluster->cpu->levels[cpu_index]; - bool jtag_save_restore = - cluster->cpu->levels[cpu_index].jtag_save_restore; + struct lpm_cpu_level *cpu_level = &cpu->levels[cpu_index]; + bool jtag_save_restore = cpu->levels[cpu_index].jtag_save_restore; if (from_idle && cpu_level->use_bc_timer) tick_broadcast_exit(); @@ -1253,13 +1253,12 @@ int get_cluster_id(struct lpm_cluster *cluster, int *aff_lvl) return state_id; } -static bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, - bool from_idle) +static bool psci_enter_sleep(struct lpm_cpu *cpu, int idx, bool from_idle) { int affinity_level = 0; - int state_id = get_cluster_id(cluster, &affinity_level); + int state_id = get_cluster_id(cpu->parent, &affinity_level); int power_state = - PSCI_POWER_STATE(cluster->cpu->levels[idx].is_reset); + PSCI_POWER_STATE(cpu->levels[idx].is_reset); bool success = false; /* * idx = 0 is the default LPM state @@ -1273,7 +1272,7 @@ static bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, affinity_level = PSCI_AFFINITY_LEVEL(affinity_level); state_id |= (power_state | affinity_level - | cluster->cpu->levels[idx].psci_id); + | cpu->levels[idx].psci_id); update_debug_pc_event(CPU_ENTER, state_id, 0xdeaffeed, 0xdeaffeed, true); @@ -1288,13 +1287,13 @@ static bool psci_enter_sleep(struct lpm_cluster *cluster, int idx, static int lpm_cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) { - struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu); + struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu); int idx; - if (!cluster) + if (!cpu) return 0; - idx = cpu_power_select(dev, cluster->cpu); + idx = cpu_power_select(dev, cpu); if (idx < 0) return 0; @@ -1338,18 +1337,18 @@ static void update_history(struct cpuidle_device *dev, int idx) static int lpm_cpuidle_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { - struct lpm_cluster *cluster = per_cpu(cpu_cluster, dev->cpu); + struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu); bool success = true; const struct cpumask *cpumask = get_cpu_mask(dev->cpu); int64_t start_time = ktime_to_ns(ktime_get()), end_time; struct power_params *pwr_params; - pwr_params = &cluster->cpu->levels[idx].pwr; + pwr_params = &cpu->levels[idx].pwr; - pwr_params = &cluster->cpu->levels[idx].pwr; + pwr_params = &cpu->levels[idx].pwr; - cpu_prepare(cluster, idx, true); - cluster_prepare(cluster, cpumask, idx, true, ktime_to_ns(ktime_get())); + cpu_prepare(cpu, idx, true); + cluster_prepare(cpu->parent, cpumask, idx, true, start_time); trace_cpu_idle_enter(idx); lpm_stats_cpu_enter(idx, start_time); @@ -1357,14 +1356,14 @@ static int lpm_cpuidle_enter(struct cpuidle_device *dev, if (need_resched() || (idx < 0)) goto exit; - success = psci_enter_sleep(cluster, idx, true); + success = psci_enter_sleep(cpu, idx, true); exit: end_time = ktime_to_ns(ktime_get()); lpm_stats_cpu_exit(idx, end_time, success); - cluster_unprepare(cluster, cpumask, idx, true, end_time); - cpu_unprepare(cluster, idx, true); + cluster_unprepare(cpu->parent, cpumask, idx, true, end_time); + cpu_unprepare(cpu, idx, true); sched_set_cpu_cstate(smp_processor_id(), 0, 0, 0); end_time = ktime_to_ns(ktime_get()) - start_time; do_div(end_time, 1000); @@ -1434,8 +1433,9 @@ static int cluster_cpuidle_register(struct lpm_cluster *cl) int i = 0, ret = 0; unsigned int cpu; struct lpm_cluster *p = NULL; + struct lpm_cpu *lpm_cpu; - if (!cl->cpu) { + if (list_empty(&cl->cpu)) { struct lpm_cluster *n; list_for_each_entry(n, &cl->child, list) { @@ -1446,51 +1446,56 @@ static int cluster_cpuidle_register(struct lpm_cluster *cl) return ret; } - cl->drv = kcalloc(1, sizeof(*cl->drv), GFP_KERNEL); - if (!cl->drv) - return -ENOMEM; + list_for_each_entry(lpm_cpu, &cl->cpu, list) { + lpm_cpu->drv = kcalloc(1, sizeof(*lpm_cpu->drv), GFP_KERNEL); + if (!lpm_cpu->drv) + return -ENOMEM; - cl->drv->name = "msm_idle"; + lpm_cpu->drv->name = "msm_idle"; - for (i = 0; i < cl->cpu->nlevels; i++) { - struct cpuidle_state *st = &cl->drv->states[i]; - struct lpm_cpu_level *cpu_level = &cl->cpu->levels[i]; + for (i = 0; i < lpm_cpu->nlevels; i++) { + struct cpuidle_state *st = &lpm_cpu->drv->states[i]; + struct lpm_cpu_level *cpu_level = &lpm_cpu->levels[i]; - snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i); - snprintf(st->desc, CPUIDLE_DESC_LEN, cpu_level->name); - st->flags = 0; - st->exit_latency = cpu_level->pwr.latency_us; - st->power_usage = cpu_level->pwr.ss_power; - st->target_residency = 0; - st->enter = lpm_cpuidle_enter; - } + snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i); + snprintf(st->desc, CPUIDLE_DESC_LEN, cpu_level->name); + st->flags = 0; + st->exit_latency = cpu_level->pwr.latency_us; + st->power_usage = cpu_level->pwr.ss_power; + st->target_residency = 0; + st->enter = lpm_cpuidle_enter; + } - cl->drv->state_count = cl->cpu->nlevels; - cl->drv->safe_state_index = 0; - for_each_cpu(cpu, &cl->child_cpus) - per_cpu(cpu_cluster, cpu) = cl; + lpm_cpu->drv->state_count = lpm_cpu->nlevels; + lpm_cpu->drv->safe_state_index = 0; + for_each_cpu(cpu, &lpm_cpu->related_cpus) + per_cpu(cpu_lpm, cpu) = lpm_cpu; - for_each_possible_cpu(cpu) { - if (cpu_online(cpu)) - continue; - p = per_cpu(cpu_cluster, cpu); - while (p) { - int j; - - spin_lock(&p->sync_lock); - cpumask_set_cpu(cpu, &p->num_children_in_sync); - for (j = 0; j < p->nlevels; j++) - cpumask_copy(&p->levels[j].num_cpu_votes, + for_each_possible_cpu(cpu) { + if (cpu_online(cpu)) + continue; + if (per_cpu(cpu_lpm, cpu)) + p = per_cpu(cpu_lpm, cpu)->parent; + while (p) { + int j; + + spin_lock(&p->sync_lock); + cpumask_set_cpu(cpu, &p->num_children_in_sync); + for (j = 0; j < p->nlevels; j++) + cpumask_copy( + &p->levels[j].num_cpu_votes, &p->num_children_in_sync); - spin_unlock(&p->sync_lock); - p = p->parent; + spin_unlock(&p->sync_lock); + p = p->parent; + } } - } - ret = cpuidle_register_cpu(cl->drv, &cl->child_cpus); + ret = cpuidle_register_cpu(lpm_cpu->drv, + &lpm_cpu->related_cpus); - if (ret) { - kfree(cl->drv); - return -ENOMEM; + if (ret) { + kfree(lpm_cpu->drv); + return -ENOMEM; + } } return 0; } @@ -1520,7 +1525,7 @@ static void register_cpu_lpm_stats(struct lpm_cpu *cpu, level_name[i] = cpu->levels[i].name; lpm_stats_config_level("cpu", level_name, cpu->nlevels, - parent->stats, &parent->child_cpus); + parent->stats, &cpu->related_cpus); kfree(level_name); } @@ -1529,8 +1534,9 @@ static void register_cluster_lpm_stats(struct lpm_cluster *cl, struct lpm_cluster *parent) { const char **level_name; - int i; struct lpm_cluster *child; + struct lpm_cpu *cpu; + int i; if (!cl) return; @@ -1548,10 +1554,12 @@ static void register_cluster_lpm_stats(struct lpm_cluster *cl, kfree(level_name); - if (cl->cpu) { - register_cpu_lpm_stats(cl->cpu, cl); - return; + list_for_each_entry(cpu, &cl->cpu, list) { + pr_err("%s()\n", __func__); + register_cpu_lpm_stats(cpu, cl); } + if (!list_empty(&cl->cpu)) + return; list_for_each_entry(child, &cl->child, list) register_cluster_lpm_stats(child, cl); @@ -1574,8 +1582,8 @@ static void lpm_suspend_wake(void) static int lpm_suspend_enter(suspend_state_t state) { int cpu = raw_smp_processor_id(); - struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu); - struct lpm_cpu *lpm_cpu = cluster->cpu; + struct lpm_cpu *lpm_cpu = per_cpu(cpu_lpm, cpu); + struct lpm_cluster *cluster = lpm_cpu->parent; const struct cpumask *cpumask = get_cpu_mask(cpu); int idx; @@ -1588,7 +1596,7 @@ static int lpm_suspend_enter(suspend_state_t state) pr_err("Failed suspend\n"); return 0; } - cpu_prepare(cluster, idx, false); + cpu_prepare(lpm_cpu, idx, false); cluster_prepare(cluster, cpumask, idx, false, 0); if (idx > 0) update_debug_pc_event(CPU_ENTER, idx, 0xdeaffeed, @@ -1601,14 +1609,14 @@ static int lpm_suspend_enter(suspend_state_t state) * LPMs(XO and Vmin). */ - psci_enter_sleep(cluster, idx, true); + psci_enter_sleep(lpm_cpu, idx, true); if (idx > 0) update_debug_pc_event(CPU_EXIT, idx, true, 0xdeaffeed, false); cluster_unprepare(cluster, cpumask, idx, false, 0); - cpu_unprepare(cluster, idx, false); + cpu_unprepare(lpm_cpu, idx, false); return 0; } diff --git a/drivers/cpuidle/lpm-levels.h b/drivers/cpuidle/lpm-levels.h index 9875edd0ef54..c9f272e4b458 100644 --- a/drivers/cpuidle/lpm-levels.h +++ b/drivers/cpuidle/lpm-levels.h @@ -45,10 +45,13 @@ struct lpm_cpu_level { }; struct lpm_cpu { + struct list_head list; + struct cpumask related_cpus; struct lpm_cpu_level levels[NR_LPM_LEVELS]; int nlevels; unsigned int psci_mode_shift; unsigned int psci_mode_mask; + struct cpuidle_driver *drv; struct lpm_cluster *parent; }; @@ -104,8 +107,7 @@ struct lpm_cluster { int min_child_level; int default_level; int last_level; - struct lpm_cpu *cpu; - struct cpuidle_driver *drv; + struct list_head cpu; spinlock_t sync_lock; struct cpumask child_cpus; struct cpumask num_children_in_sync; -- GitLab From 2f2b4b69b883898f69260278e0f59d9eccccc373 Mon Sep 17 00:00:00 2001 From: Mahesh Sivasubramanian Date: Mon, 26 Jun 2017 08:46:49 -0600 Subject: [PATCH 318/786] drivers: cpuidle: lpm-levels: Remove conversion to SCLK cycles The system_pm driver takes sleep time in microseconds. The programming in SCLK was required by earlier SOCs that expected sleep time in SCLK ticks. On newer, chipsets this division results in a shorter sleep duration being programmed at the System timer. Remove conversion from microseconds to SCLK cycles. Change-Id: I8fc0449f051de9650831af49a2315fb158660c0c Signed-off-by: Mahesh Sivasubramanian --- drivers/cpuidle/lpm-levels.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c index 8b59beee4b4a..e091fd83d61d 100644 --- a/drivers/cpuidle/lpm-levels.c +++ b/drivers/cpuidle/lpm-levels.c @@ -1045,7 +1045,6 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx, clear_predict_history(); clear_cl_predict_history(); - do_div(us, USEC_PER_SEC/SCLK_HZ); system_sleep_enter(us); } /* Notify cluster enter event after successfully config completion */ -- GitLab From b17d98c5b5f3ad20baffc1be7c1074ca81abdef7 Mon Sep 17 00:00:00 2001 From: Ram Chandrasekar Date: Fri, 23 Jun 2017 10:41:23 -0600 Subject: [PATCH 319/786] drivers: thermal: Use deferrable work and power efficient workqueue Thermal core uses work events to poll for sensor driver temperature crossing a threshold. Since it is not using a deferrable workqueue, it might wake-up the device from sleep. Use a deferrable work event and post the work in the power efficient workqueue for estimating virtual sensor temperature. Change-Id: I9dd21d8fc4e5ca96e06db9ecb57a628618494a01 Signed-off-by: Ram Chandrasekar --- drivers/thermal/thermal_core.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 4c1ccee68c12..68d9feb0520d 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -418,8 +418,9 @@ static void monitor_thermal_zone(struct thermal_zone_device *tz) thermal_zone_device_set_polling(thermal_passive_wq, tz, tz->passive_delay); else if (tz->polling_delay) - thermal_zone_device_set_polling(system_freezable_wq, - tz, tz->polling_delay); + thermal_zone_device_set_polling( + system_freezable_power_efficient_wq, + tz, tz->polling_delay); else thermal_zone_device_set_polling(NULL, tz, 0); @@ -2134,7 +2135,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, /* Bind cooling devices for this zone */ bind_tz(tz); - INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check); + INIT_DEFERRABLE_WORK(&(tz->poll_queue), thermal_zone_device_check); thermal_zone_device_reset(tz); /* Update the new thermal zone and mark it as already updated. */ -- GitLab From debcd41d0a4f50e0e64002daf9d63107f2b52630 Mon Sep 17 00:00:00 2001 From: Ram Chandrasekar Date: Fri, 23 Jun 2017 13:47:38 -0600 Subject: [PATCH 320/786] drivers: thermal: virtual-sensor: update the virtual sensor name Update the virtual sensor names from silver-virt-max-usr and gold-virt-max-usr to silv-virt-max-step and gold-virt-max-step respectively. The name change reflects that the virtual sensors will be using the step_wise governor instead of userspace governor. Change-Id: I57de42315f2638c3f4b78ca79d9597b568797d77 Signed-off-by: Ram Chandrasekar --- arch/arm64/boot/dts/qcom/sdm845.dtsi | 16 ++++++++-------- drivers/thermal/qcom/qti_virtual_sensor.c | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index 1eaeb59d9785..2f718bb83cf2 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -3261,10 +3261,10 @@ }; }; - silver-virt-max-usr { - polling-delay-passive = <100>; - polling-delay = <100>; - thermal-governor = "user_space"; + silv-virt-max-step { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "step_wise"; trips { silver-trip { temperature = <120000>; @@ -3274,10 +3274,10 @@ }; }; - gold-virt-max-usr { - polling-delay-passive = <100>; - polling-delay = <100>; - thermal-governor = "user_space"; + gold-virt-max-step { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "step_wise"; trips { gold-trip { temperature = <120000>; diff --git a/drivers/thermal/qcom/qti_virtual_sensor.c b/drivers/thermal/qcom/qti_virtual_sensor.c index 3064c74894ad..923680abea4f 100644 --- a/drivers/thermal/qcom/qti_virtual_sensor.c +++ b/drivers/thermal/qcom/qti_virtual_sensor.c @@ -29,7 +29,7 @@ static const struct virtual_sensor_data qti_virtual_sensors[] = { .logic = VIRT_MAXIMUM, }, { - .virt_zone_name = "silver-virt-max-usr", + .virt_zone_name = "silv-virt-max-step", .num_sensors = 4, .sensor_names = {"cpu0-silver-usr", "cpu1-silver-usr", @@ -38,7 +38,7 @@ static const struct virtual_sensor_data qti_virtual_sensors[] = { .logic = VIRT_MAXIMUM, }, { - .virt_zone_name = "gold-virt-max-usr", + .virt_zone_name = "gold-virt-max-step", .num_sensors = 4, .sensor_names = {"cpu0-gold-usr", "cpu1-gold-usr", -- GitLab From 0f5ac9591f265b87f5d9e6c0f04ee8f003f8b388 Mon Sep 17 00:00:00 2001 From: "Bao D. Nguyen" Date: Wed, 14 Jun 2017 12:42:41 -0700 Subject: [PATCH 321/786] mmc: sdhci-msm: Corrected the '&' operator with '&&' in the 'if' statement Without this fix the MMC may incorrectly set the CAPS for the SD card. Change-Id: Ia4be2e78453663818cfd18deb9a590ec80423bca Signed-off-by: Bao D. Nguyen --- drivers/mmc/host/sdhci-msm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 1d9cf344f463..fea297143c11 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -1824,7 +1824,7 @@ struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev, } pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags); - if (gpio_is_valid(pdata->status_gpio) & !(flags & OF_GPIO_ACTIVE_LOW)) + if (gpio_is_valid(pdata->status_gpio) && !(flags & OF_GPIO_ACTIVE_LOW)) pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; of_property_read_u32(np, "qcom,bus-width", &bus_width); -- GitLab From c1fdb2567f05e41f5c4c4a51580aad0819a5fc04 Mon Sep 17 00:00:00 2001 From: Caesar Wang Date: Tue, 23 Aug 2016 11:47:02 +0100 Subject: [PATCH 322/786] sched/fair: remove printk while schedule is in progress It will cause deadlock and while(1) if call printk while schedule is in progress. The block state like as below: cpu0(hold the console sem): printk->console_unlock->up_sem->spin_lock(&sem->lock)->wake_up_process(cpu1) ->try_to_wake_up(cpu1)->while(p->on_cpu). cpu1(request console sem): console_lock->down_sem->schedule->idle_banlance->update_cpu_capacity-> printk->console_trylock->spin_lock(&sem->lock). p->on_cpu will be 1 forever, because the task is still running on cpu1, so cpu0 is blocked in while(p->on_cpu), but cpu1 could not get spin_lock(&sem->lock), it is blocked too, it means the task will running on cpu1 forever. Change-Id: I61f5633b5e8dfa4fb6bac801dc1732a5494a0a88 Signed-off-by: Caesar Wang Git-commit: df232437710122fcb4e4a0484a1eded5aec29a6a Git-repo: https://android.googlesource.com/kernel/msm Signed-off-by: Pavankumar Kondeti --- kernel/sched/fair.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 45f404b0a3c5..4d7c0546e36a 100755 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8430,7 +8430,8 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu) mcc->cpu = cpu; #ifdef CONFIG_SCHED_DEBUG raw_spin_unlock_irqrestore(&mcc->lock, flags); - pr_info("CPU%d: update max cpu_capacity %lu\n", cpu, capacity); + printk_deferred(KERN_INFO "CPU%d: update max cpu_capacity %lu\n", + cpu, capacity); goto skip_unlock; #endif } -- GitLab From c2fb8ead235c683a6477a68a56249454235c302b Mon Sep 17 00:00:00 2001 From: William Wu Date: Tue, 25 Apr 2017 17:45:48 +0800 Subject: [PATCH 323/786] UPSTREAM: usb: gadget: f_fs: avoid out of bounds access on comp_desc Companion descriptor is only used for SuperSpeed endpoints, if the endpoints are HighSpeed or FullSpeed, the Companion descriptor will not allocated, so we can only access it if gadget is SuperSpeed. I can reproduce this issue on Rockchip platform rk3368 SoC which supports USB 2.0, and use functionfs for ADB. Kernel build with CONFIG_KASAN=y and CONFIG_SLUB_DEBUG=y report the following BUG: ================================================================== BUG: KASAN: slab-out-of-bounds in ffs_func_set_alt+0x224/0x3a0 at addr ffffffc0601f6509 Read of size 1 by task swapper/0/0 ============================================================================ BUG kmalloc-256 (Not tainted): kasan: bad access detected ---------------------------------------------------------------------------- Disabling lock debugging due to kernel taint INFO: Allocated in ffs_func_bind+0x52c/0x99c age=1275 cpu=0 pid=1 alloc_debug_processing+0x128/0x17c ___slab_alloc.constprop.58+0x50c/0x610 __slab_alloc.isra.55.constprop.57+0x24/0x34 __kmalloc+0xe0/0x250 ffs_func_bind+0x52c/0x99c usb_add_function+0xd8/0x1d4 configfs_composite_bind+0x48c/0x570 udc_bind_to_driver+0x6c/0x170 usb_udc_attach_driver+0xa4/0xd0 gadget_dev_desc_UDC_store+0xcc/0x118 configfs_write_file+0x1a0/0x1f8 __vfs_write+0x64/0x174 vfs_write+0xe4/0x200 SyS_write+0x68/0xc8 el0_svc_naked+0x24/0x28 INFO: Freed in inode_doinit_with_dentry+0x3f0/0x7c4 age=1275 cpu=7 pid=247 ... Call trace: [] dump_backtrace+0x0/0x230 [] show_stack+0x14/0x1c [] dump_stack+0xa0/0xc8 [] print_trailer+0x188/0x198 [] object_err+0x3c/0x4c [] kasan_report+0x324/0x4dc [] __asan_load1+0x24/0x50 [] ffs_func_set_alt+0x224/0x3a0 [] composite_setup+0xdcc/0x1ac8 [] android_setup+0x124/0x1a0 [] _setup+0x54/0x74 [] handle_ep0+0x3288/0x4390 [] dwc_otg_pcd_handle_out_ep_intr+0x14dc/0x2ae4 [] dwc_otg_pcd_handle_intr+0x1ec/0x298 [] dwc_otg_pcd_irq+0x10/0x20 [] handle_irq_event_percpu+0x124/0x3ac [] handle_irq_event+0x60/0xa0 [] handle_fasteoi_irq+0x10c/0x1d4 [] generic_handle_irq+0x30/0x40 [] __handle_domain_irq+0xac/0xdc [] gic_handle_irq+0x64/0xa4 ... Memory state around the buggy address: ffffffc0601f6400: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ffffffc0601f6480: 00 00 00 00 00 00 00 00 00 00 06 fc fc fc fc fc >ffffffc0601f6500: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ^ ffffffc0601f6580: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ffffffc0601f6600: fc fc fc fc fc fc fc fc 00 00 00 00 00 00 00 00 ================================================================== Signed-off-by: William Wu Signed-off-by: Felipe Balbi (cherry picked from commit b7f73850bb4fac1e2209a4dd5e636d39be92f42c) Signed-off-by: Jerry Zhang --- drivers/usb/gadget/function/f_fs.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 8a788d69e4d5..f9c99803a43d 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1858,12 +1858,12 @@ static int ffs_func_eps_enable(struct ffs_function *func) ep->ep->driver_data = ep; ep->ep->desc = ds; - comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds + - USB_DT_ENDPOINT_SIZE); - ep->ep->maxburst = comp_desc->bMaxBurst + 1; - - if (needs_comp_desc) + if (needs_comp_desc) { + comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds + + USB_DT_ENDPOINT_SIZE); + ep->ep->maxburst = comp_desc->bMaxBurst + 1; ep->ep->comp_desc = comp_desc; + } ret = usb_ep_enable(ep->ep); if (likely(!ret)) { -- GitLab From a9a212a939617d0bcdbb2033e44c6d95215d480a Mon Sep 17 00:00:00 2001 From: Satyajit Desai Date: Wed, 10 May 2017 11:58:50 -0700 Subject: [PATCH 324/786] coresight: tmc: Add usb support for coresight Add support to enable USB mode for coresight devices. Set memory as the default mode on ETR. Change-Id: Id804fb819ad19b589a1b978ea0cd65b0265a9268 Signed-off-by: Satyajit Desai --- .../hwtracing/coresight/coresight-tmc-etr.c | 81 +++++++++++++------ drivers/hwtracing/coresight/coresight-tmc.c | 1 + 2 files changed, 57 insertions(+), 25 deletions(-) diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index 966a9885c9e0..d0ae889c46b7 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -771,34 +771,48 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode) mutex_lock(&drvdata->mem_lock); - /* - * ETR DDR memory is not allocated until user enables - * tmc at least once. If user specifies different ETR - * DDR size than the default size or switches between - * contiguous or scatter-gather memory type after - * enabling tmc; the new selection will be honored from - * next tmc enable session. - */ - if (drvdata->size != drvdata->mem_size || - drvdata->memtype != drvdata->mem_type) { - tmc_etr_free_mem(drvdata); - drvdata->size = drvdata->mem_size; - drvdata->memtype = drvdata->mem_type; - } - ret = tmc_etr_alloc_mem(drvdata); - if (ret) { - pm_runtime_put(drvdata->dev); + spin_lock_irqsave(&drvdata->spinlock, flags); + if (drvdata->reading) { + ret = -EBUSY; + spin_unlock_irqrestore(&drvdata->spinlock, flags); mutex_unlock(&drvdata->mem_lock); return ret; } - mutex_unlock(&drvdata->mem_lock); + spin_unlock_irqrestore(&drvdata->spinlock, flags); - spin_lock_irqsave(&drvdata->spinlock, flags); - if (drvdata->reading) { - ret = -EBUSY; - goto out; + if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) { + /* + * ETR DDR memory is not allocated until user enables + * tmc at least once. If user specifies different ETR + * DDR size than the default size or switches between + * contiguous or scatter-gather memory type after + * enabling tmc; the new selection will be honored from + * next tmc enable session. + */ + if (drvdata->size != drvdata->mem_size || + drvdata->memtype != drvdata->mem_type) { + tmc_etr_free_mem(drvdata); + drvdata->size = drvdata->mem_size; + drvdata->memtype = drvdata->mem_type; + } + ret = tmc_etr_alloc_mem(drvdata); + if (ret) { + mutex_unlock(&drvdata->mem_lock); + return ret; + } + } else { + drvdata->usbch = usb_qdss_open("qdss", drvdata, + usb_notifier); + if (IS_ERR_OR_NULL(drvdata->usbch)) { + dev_err(drvdata->dev, "usb_qdss_open failed\n"); + ret = PTR_ERR(drvdata->usbch); + mutex_unlock(&drvdata->mem_lock); + return ret; + } } + spin_lock_irqsave(&drvdata->spinlock, flags); + val = local_xchg(&drvdata->mode, mode); /* * In sysFS mode we can have multiple writers per sink. Since this @@ -808,9 +822,14 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode) if (val == CS_MODE_SYSFS) goto out; - tmc_etr_enable_hw(drvdata); + if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) + tmc_etr_enable_hw(drvdata); + + drvdata->enable = true; + drvdata->sticky_enable = true; out: spin_unlock_irqrestore(&drvdata->spinlock, flags); + mutex_unlock(&drvdata->mem_lock); if (!ret) dev_info(drvdata->dev, "TMC-ETR enabled\n"); @@ -880,8 +899,15 @@ static void tmc_disable_etr_sink(struct coresight_device *csdev) val = local_xchg(&drvdata->mode, CS_MODE_DISABLED); /* Disable the TMC only if it needs to */ - if (val != CS_MODE_DISABLED) - tmc_etr_disable_hw(drvdata); + if (val != CS_MODE_DISABLED) { + if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) { + __tmc_etr_disable_to_bam(drvdata); + tmc_etr_bam_disable(drvdata); + usb_qdss_close(drvdata->usbch); + } else { + tmc_etr_disable_hw(drvdata); + } + } spin_unlock_irqrestore(&drvdata->spinlock, flags); @@ -913,6 +939,11 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata) goto out; } + if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) { + ret = -EINVAL; + goto out; + } + val = local_read(&drvdata->mode); /* Don't interfere if operated from Perf */ if (val == CS_MODE_PERF) { diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c index 077cb451b5bb..012c56e4be45 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.c +++ b/drivers/hwtracing/coresight/coresight-tmc.c @@ -540,6 +540,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) drvdata->memtype = TMC_ETR_MEM_TYPE_CONTIG; drvdata->mem_size = drvdata->size; drvdata->mem_type = drvdata->memtype; + drvdata->out_mode = TMC_ETR_OUT_MODE_MEM; } else { drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4; } -- GitLab From d0fedd05511b15cb25aaa1ea81219b1637cc2849 Mon Sep 17 00:00:00 2001 From: Lloyd Atkinson Date: Wed, 1 Mar 2017 13:25:40 -0500 Subject: [PATCH 325/786] drm/msm/sde: support command mode autorefresh Add support for the autorefresh feature in which hardware automatically kicks off the previously sent frame to the command mode panel, similar to a video mode panel. Change-Id: I1fa339784330c19241433a44acab5dabf9910ce8 Signed-off-by: Lloyd Atkinson --- drivers/gpu/drm/msm/sde/sde_encoder.c | 6 + drivers/gpu/drm/msm/sde/sde_encoder_phys.h | 17 + .../gpu/drm/msm/sde/sde_encoder_phys_cmd.c | 364 +++++++++++++++++- 3 files changed, 381 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index 09882cda4650..2ff8c38fc805 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -1847,6 +1847,12 @@ static void sde_encoder_frame_done_callback( struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); unsigned int i; + if (!sde_enc->frame_busy_mask[0]) { + /* suppress frame_done without waiter, likely autorefresh */ + SDE_EVT32(DRMID(drm_enc), event, ready_phys->intf_idx); + return; + } + /* One of the physical encoders has become idle */ for (i = 0; i < sde_enc->num_phys_encs; i++) if (sde_enc->phys_encs[i] == ready_phys) { diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h index 6e6960a3e951..b173876c875b 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h @@ -172,6 +172,8 @@ struct sde_encoder_phys_ops { * @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel * @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel * @INTR_IDX_RDPTR: Readpointer done unterrupt for cmd mode panel + * @INTR_IDX_AUTOREFRESH_DONE: Autorefresh done for cmd mode panel meaning + * autorefresh has triggered a double buffer flip */ enum sde_intr_idx { INTR_IDX_VSYNC, @@ -179,6 +181,7 @@ enum sde_intr_idx { INTR_IDX_UNDERRUN, INTR_IDX_CTL_START, INTR_IDX_RDPTR, + INTR_IDX_AUTOREFRESH_DONE, INTR_IDX_MAX, }; @@ -283,6 +286,18 @@ struct sde_encoder_phys_vid { u64 rot_prefill_line; }; +/** + * struct sde_encoder_phys_cmd_autorefresh - autorefresh state tracking + * @cfg: current active autorefresh configuration + * @kickoff_cnt: atomic count tracking autorefresh done irq kickoffs pending + * @kickoff_wq: wait queue for waiting on autorefresh done irq + */ +struct sde_encoder_phys_cmd_autorefresh { + struct sde_hw_autorefresh cfg; + atomic_t kickoff_cnt; + wait_queue_head_t kickoff_wq; +}; + /** * struct sde_encoder_phys_cmd - sub-class of sde_encoder_phys to handle command * mode specific operations @@ -292,12 +307,14 @@ struct sde_encoder_phys_vid { * @serialize_wait4pp: serialize wait4pp feature waits for pp_done interrupt * after ctl_start instead of before next frame kickoff * @pp_timeout_report_cnt: number of pingpong done irq timeout errors + * @autorefresh: autorefresh feature state */ struct sde_encoder_phys_cmd { struct sde_encoder_phys base; int stream_sel; bool serialize_wait4pp; int pp_timeout_report_cnt; + struct sde_encoder_phys_cmd_autorefresh autorefresh; }; /** diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c index 447fdcc9e89c..9880ab1ab74b 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c @@ -44,6 +44,16 @@ #define DEFAULT_TEARCHECK_SYNC_THRESH_START 4 #define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4 +#define SDE_ENC_WR_PTR_START_TIMEOUT_US 20000 + +static inline int _sde_encoder_phys_cmd_get_idle_timeout( + struct sde_encoder_phys_cmd *cmd_enc) +{ + return cmd_enc->autorefresh.cfg.frame_count ? + cmd_enc->autorefresh.cfg.frame_count * + KICKOFF_TIMEOUT_MS : KICKOFF_TIMEOUT_MS; +} + static inline bool sde_encoder_phys_cmd_is_master( struct sde_encoder_phys *phys_enc) { @@ -60,6 +70,52 @@ static bool sde_encoder_phys_cmd_mode_fixup( return true; } +static uint64_t _sde_encoder_phys_cmd_get_autorefresh_property( + struct sde_encoder_phys *phys_enc) +{ + struct drm_connector *conn = phys_enc->connector; + + if (!conn || !conn->state) + return 0; + + return sde_connector_get_property(conn->state, + CONNECTOR_PROP_AUTOREFRESH); +} + +static void _sde_encoder_phys_cmd_config_autorefresh( + struct sde_encoder_phys *phys_enc, + u32 new_frame_count) +{ + struct sde_encoder_phys_cmd *cmd_enc = + to_sde_encoder_phys_cmd(phys_enc); + struct sde_hw_pingpong *hw_pp = phys_enc->hw_pp; + struct drm_connector *conn = phys_enc->connector; + struct sde_hw_autorefresh *cfg_cur, cfg_nxt; + + if (!conn || !conn->state || !hw_pp) + return; + + cfg_cur = &cmd_enc->autorefresh.cfg; + + /* autorefresh property value should be validated already */ + memset(&cfg_nxt, 0, sizeof(cfg_nxt)); + cfg_nxt.frame_count = new_frame_count; + cfg_nxt.enable = (cfg_nxt.frame_count != 0); + + SDE_DEBUG_CMDENC(cmd_enc, "autorefresh state %d->%d framecount %d\n", + cfg_cur->enable, cfg_nxt.enable, cfg_nxt.frame_count); + SDE_EVT32(DRMID(phys_enc->parent), hw_pp->idx, cfg_cur->enable, + cfg_nxt.enable, cfg_nxt.frame_count); + + /* only proceed on state changes */ + if (cfg_nxt.enable == cfg_cur->enable) + return; + + memcpy(cfg_cur, &cfg_nxt, sizeof(*cfg_cur)); + if (hw_pp->ops.setup_autorefresh) + hw_pp->ops.setup_autorefresh(hw_pp, cfg_cur); +} + static void _sde_encoder_phys_cmd_update_flush_mask( struct sde_encoder_phys *phys_enc) { @@ -124,6 +180,29 @@ static void sde_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx) wake_up_all(&phys_enc->pending_kickoff_wq); } +static void sde_encoder_phys_cmd_autorefresh_done_irq(void *arg, int irq_idx) +{ + struct sde_encoder_phys *phys_enc = arg; + struct sde_encoder_phys_cmd *cmd_enc = + to_sde_encoder_phys_cmd(phys_enc); + unsigned long lock_flags; + int new_cnt; + + if (!cmd_enc) + return; + + phys_enc = &cmd_enc->base; + spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); + new_cnt = atomic_add_unless(&cmd_enc->autorefresh.kickoff_cnt, -1, 0); + spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); + + SDE_EVT32_IRQ(DRMID(phys_enc->parent), + phys_enc->hw_pp->idx - PINGPONG_0, new_cnt); + + /* Signal any waiting atomic commit thread */ + wake_up_all(&cmd_enc->autorefresh.kickoff_wq); +} + static void sde_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx) { struct sde_encoder_phys *phys_enc = arg; @@ -190,6 +269,10 @@ static void _sde_encoder_phys_cmd_setup_irq_hw_idx( irq = &phys_enc->irq[INTR_IDX_UNDERRUN]; irq->hw_idx = phys_enc->intf_idx; irq->irq_idx = -EINVAL; + + irq = &phys_enc->irq[INTR_IDX_AUTOREFRESH_DONE]; + irq->hw_idx = phys_enc->hw_pp->idx; + irq->irq_idx = -EINVAL; } static void sde_encoder_phys_cmd_mode_set( @@ -302,6 +385,74 @@ static bool _sde_encoder_phys_is_ppsplit_slave( phys_enc->split_role == ENC_ROLE_SLAVE; } +static int _sde_encoder_phys_cmd_poll_write_pointer_started( + struct sde_encoder_phys *phys_enc) +{ + struct sde_encoder_phys_cmd *cmd_enc = + to_sde_encoder_phys_cmd(phys_enc); + struct sde_hw_pingpong *hw_pp = phys_enc->hw_pp; + struct sde_hw_pp_vsync_info info; + u32 timeout_us = SDE_ENC_WR_PTR_START_TIMEOUT_US; + int ret; + + if (!hw_pp || !hw_pp->ops.get_vsync_info || + !hw_pp->ops.poll_timeout_wr_ptr) + return 0; + + ret = hw_pp->ops.get_vsync_info(hw_pp, &info); + if (ret) + return ret; + + SDE_DEBUG_CMDENC(cmd_enc, + "pp:%d rd_ptr %d wr_ptr %d\n", + phys_enc->hw_pp->idx - PINGPONG_0, + info.rd_ptr_line_count, + info.wr_ptr_line_count); + SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0, + info.wr_ptr_line_count); + + ret = hw_pp->ops.poll_timeout_wr_ptr(hw_pp, timeout_us); + if (ret) { + SDE_EVT32(DRMID(phys_enc->parent), + phys_enc->hw_pp->idx - PINGPONG_0, + timeout_us, + ret); + SDE_DBG_DUMP("sde", "dsi0_ctrl", "dsi0_phy", "dsi1_ctrl", + "dsi1_phy", "vbif_rt", "dbg_bus", + "vbif_dbg_bus", "panic"); + } + + return ret; +} + +static bool _sde_encoder_phys_cmd_is_ongoing_pptx( + struct sde_encoder_phys *phys_enc) +{ + struct sde_hw_pingpong *hw_pp; + struct sde_hw_pp_vsync_info info; + + if (!phys_enc) + return false; + + hw_pp = phys_enc->hw_pp; + if (!hw_pp || !hw_pp->ops.get_vsync_info) + return false; + + hw_pp->ops.get_vsync_info(hw_pp, &info); + + SDE_EVT32(DRMID(phys_enc->parent), + phys_enc->hw_pp->idx - PINGPONG_0, + atomic_read(&phys_enc->pending_kickoff_cnt), + info.wr_ptr_line_count, + phys_enc->cached_mode.vdisplay); + + if (info.wr_ptr_line_count > 0 && info.wr_ptr_line_count < + phys_enc->cached_mode.vdisplay) + return true; + + return false; +} + static int _sde_encoder_phys_cmd_wait_for_idle( struct sde_encoder_phys *phys_enc) { @@ -333,6 +484,42 @@ static int _sde_encoder_phys_cmd_wait_for_idle( return ret; } +static int _sde_encoder_phys_cmd_wait_for_autorefresh_done( + struct sde_encoder_phys *phys_enc) +{ + struct sde_encoder_phys_cmd *cmd_enc = + to_sde_encoder_phys_cmd(phys_enc); + struct sde_encoder_wait_info wait_info; + int ret = 0; + + if (!phys_enc) { + SDE_ERROR("invalid encoder\n"); + return -EINVAL; + } + + /* only master deals with autorefresh */ + if (!sde_encoder_phys_cmd_is_master(phys_enc)) + return 0; + + wait_info.wq = &cmd_enc->autorefresh.kickoff_wq; + wait_info.atomic_cnt = &cmd_enc->autorefresh.kickoff_cnt; + wait_info.timeout_ms = _sde_encoder_phys_cmd_get_idle_timeout(cmd_enc); + + /* wait for autorefresh kickoff to start */ + ret = sde_encoder_helper_wait_for_irq(phys_enc, + INTR_IDX_AUTOREFRESH_DONE, &wait_info); + + /* double check that kickoff has started by reading write ptr reg */ + if (!ret) + ret = _sde_encoder_phys_cmd_poll_write_pointer_started( + phys_enc); + else + sde_encoder_helper_report_irq_timeout(phys_enc, + INTR_IDX_AUTOREFRESH_DONE); + + return ret; +} + static int sde_encoder_phys_cmd_control_vblank_irq( struct sde_encoder_phys *phys_enc, bool enable) @@ -387,14 +574,20 @@ void sde_encoder_phys_cmd_irq_control(struct sde_encoder_phys *phys_enc, sde_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN); sde_encoder_phys_cmd_control_vblank_irq(phys_enc, true); - if (sde_encoder_phys_cmd_is_master(phys_enc)) + if (sde_encoder_phys_cmd_is_master(phys_enc)) { sde_encoder_helper_register_irq(phys_enc, INTR_IDX_CTL_START); - } else { + sde_encoder_helper_register_irq(phys_enc, + INTR_IDX_AUTOREFRESH_DONE); + } - if (sde_encoder_phys_cmd_is_master(phys_enc)) + } else { + if (sde_encoder_phys_cmd_is_master(phys_enc)) { sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_CTL_START); + sde_encoder_helper_unregister_irq(phys_enc, + INTR_IDX_AUTOREFRESH_DONE); + } sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN); sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false); @@ -445,7 +638,9 @@ static void sde_encoder_phys_cmd_tearcheck_config( } tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh); - tc_cfg.hw_vsync_mode = 1; + + /* enable external TE after kickoff to avoid premature autorefresh */ + tc_cfg.hw_vsync_mode = 0; /* * By setting sync_cfg_height to near max register value, we essentially @@ -561,6 +756,41 @@ static void sde_encoder_phys_cmd_enable(struct sde_encoder_phys *phys_enc) phys_enc->enable_state = SDE_ENC_ENABLED; } +static bool _sde_encoder_phys_cmd_is_autorefresh_enabled( + struct sde_encoder_phys *phys_enc) +{ + struct sde_hw_pingpong *hw_pp; + struct sde_hw_autorefresh cfg; + int ret; + + if (!phys_enc || !phys_enc->hw_pp) + return 0; + + if (!sde_encoder_phys_cmd_is_master(phys_enc)) + return 0; + + hw_pp = phys_enc->hw_pp; + if (!hw_pp->ops.get_autorefresh) + return 0; + + ret = hw_pp->ops.get_autorefresh(hw_pp, &cfg); + if (ret) + return 0; + + return cfg.enable; +} + +static void _sde_encoder_phys_cmd_connect_te( + struct sde_encoder_phys *phys_enc, bool enable) +{ + if (!phys_enc || !phys_enc->hw_pp || + !phys_enc->hw_pp->ops.connect_external_te) + return; + + SDE_EVT32(DRMID(phys_enc->parent), enable); + phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable); +} + static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc) { struct sde_encoder_phys_cmd *cmd_enc = @@ -638,7 +868,10 @@ static void sde_encoder_phys_cmd_prepare_for_kickoff( return; } SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0); - SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0); + + SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0, + atomic_read(&phys_enc->pending_kickoff_cnt), + atomic_read(&cmd_enc->autorefresh.kickoff_cnt)); /* * Mark kickoff request as outstanding. If there are more than one, @@ -652,6 +885,10 @@ static void sde_encoder_phys_cmd_prepare_for_kickoff( phys_enc->hw_pp->idx - PINGPONG_0); SDE_ERROR("failed wait_for_idle: %d\n", ret); } + + SDE_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n", + phys_enc->hw_pp->idx - PINGPONG_0, + atomic_read(&phys_enc->pending_kickoff_cnt)); } static int _sde_encoder_phys_cmd_wait_for_ctl_start( @@ -722,6 +959,10 @@ static int sde_encoder_phys_cmd_wait_for_commit_done( if (sde_encoder_phys_cmd_is_master(phys_enc)) rc = _sde_encoder_phys_cmd_wait_for_ctl_start(phys_enc); + if (!rc && sde_encoder_phys_cmd_is_master(phys_enc) && + cmd_enc->autorefresh.cfg.enable) + rc = _sde_encoder_phys_cmd_wait_for_autorefresh_done(phys_enc); + /* required for both controllers */ if (!rc && cmd_enc->serialize_wait4pp) sde_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL); @@ -765,6 +1006,108 @@ static void sde_encoder_phys_cmd_update_split_role( static void sde_encoder_phys_cmd_prepare_commit( struct sde_encoder_phys *phys_enc) { + struct sde_encoder_phys_cmd *cmd_enc = + to_sde_encoder_phys_cmd(phys_enc); + + if (!phys_enc) + return; + + if (sde_encoder_phys_cmd_is_master(phys_enc)) { + unsigned long lock_flags; + + + SDE_EVT32(DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0, + cmd_enc->autorefresh.cfg.enable); + + if (!_sde_encoder_phys_cmd_is_autorefresh_enabled(phys_enc)) + return; + + /** + * Autorefresh must be disabled carefully: + * - Must disable while there is no ongoing transmission + * - Receiving a TE will trigger the next Autorefresh TX + * - Only safe to disable Autorefresh between PPDone and TE + * - However, that is a small time window + * - Disabling External TE gives large safe window, assuming + * internally generated TE is set to a large counter value + * + * If Autorefresh is active: + * 1. Disable external TE + * - TE will run on an SDE counter set to large value (~200ms) + * + * 2. Check for ongoing TX + * - If ongoing TX, set pending_kickoff_cnt if not set already + * - We don't want to wait for a ppdone that will never + * arrive, so verify ongoing TX + * + * 3. Wait for TX to Complete + * - Wait for PPDone pending count to reach 0 + * + * 4. Leave Autorefresh Disabled + * - Assume disable of Autorefresh since it is now safe + * - Can now safely Disable Encoder, do debug printing, etc. + * without worrying that Autorefresh will kickoff + */ + + spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); + + /* disable external TE to prevent next autorefresh */ + _sde_encoder_phys_cmd_connect_te(phys_enc, false); + + /* verify that we disabled TE during outstanding TX */ + if (_sde_encoder_phys_cmd_is_ongoing_pptx(phys_enc)) + atomic_add_unless(&phys_enc->pending_kickoff_cnt, 1, 1); + + spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); + + /* wait for ppdone if necessary due to catching ongoing TX */ + if (_sde_encoder_phys_cmd_wait_for_idle(phys_enc)) + SDE_ERROR_CMDENC(cmd_enc, + "pp:%d kickoff timed out\n", + phys_enc->hw_pp->idx - PINGPONG_0); + + /* + * not strictly necessary for kickoff, but simplifies disable + * callflow since our disable is split across multiple phys_encs + */ + _sde_encoder_phys_cmd_config_autorefresh(phys_enc, 0); + + SDE_DEBUG_CMDENC(cmd_enc, "disabled autorefresh & ext TE\n"); + + } +} + +static void sde_encoder_phys_cmd_handle_post_kickoff( + struct sde_encoder_phys *phys_enc) +{ + if (!phys_enc) + return; + + /** + * re-enable external TE, either for the first time after enabling + * or if disabled for Autorefresh + */ + _sde_encoder_phys_cmd_connect_te(phys_enc, true); +} + +static void sde_encoder_phys_cmd_trigger_start( + struct sde_encoder_phys *phys_enc) +{ + struct sde_encoder_phys_cmd *cmd_enc = + to_sde_encoder_phys_cmd(phys_enc); + u32 frame_cnt; + + if (!phys_enc) + return; + + /* we don't issue CTL_START when using autorefresh */ + frame_cnt = _sde_encoder_phys_cmd_get_autorefresh_property(phys_enc); + if (frame_cnt) { + _sde_encoder_phys_cmd_config_autorefresh(phys_enc, frame_cnt); + atomic_inc(&cmd_enc->autorefresh.kickoff_cnt); + } else { + sde_encoder_helper_trigger_start(phys_enc); + } } static void sde_encoder_phys_cmd_init_ops( @@ -782,7 +1125,8 @@ static void sde_encoder_phys_cmd_init_ops( ops->wait_for_commit_done = sde_encoder_phys_cmd_wait_for_commit_done; ops->prepare_for_kickoff = sde_encoder_phys_cmd_prepare_for_kickoff; ops->wait_for_tx_complete = sde_encoder_phys_cmd_wait_for_tx_complete; - ops->trigger_start = sde_encoder_helper_trigger_start; + ops->handle_post_kickoff = sde_encoder_phys_cmd_handle_post_kickoff; + ops->trigger_start = sde_encoder_phys_cmd_trigger_start; ops->needs_single_flush = sde_encoder_phys_cmd_needs_single_flush; ops->hw_reset = sde_encoder_helper_hw_reset; ops->irq_control = sde_encoder_phys_cmd_irq_control; @@ -860,10 +1204,18 @@ struct sde_encoder_phys *sde_encoder_phys_cmd_init( irq->intr_idx = INTR_IDX_UNDERRUN; irq->cb.func = sde_encoder_phys_cmd_underrun_irq; + irq = &phys_enc->irq[INTR_IDX_AUTOREFRESH_DONE]; + irq->name = "autorefresh_done"; + irq->intr_type = SDE_IRQ_TYPE_PING_PONG_AUTO_REF; + irq->intr_idx = INTR_IDX_AUTOREFRESH_DONE; + irq->cb.func = sde_encoder_phys_cmd_autorefresh_done_irq; + atomic_set(&phys_enc->vblank_refcount, 0); atomic_set(&phys_enc->pending_kickoff_cnt, 0); atomic_set(&phys_enc->pending_ctlstart_cnt, 0); init_waitqueue_head(&phys_enc->pending_kickoff_wq); + atomic_set(&cmd_enc->autorefresh.kickoff_cnt, 0); + init_waitqueue_head(&cmd_enc->autorefresh.kickoff_wq); SDE_DEBUG_CMDENC(cmd_enc, "created\n"); -- GitLab From 8a9b38a32f2efd450b485490a62d8706dfa85969 Mon Sep 17 00:00:00 2001 From: Alan Kwong Date: Thu, 22 Jun 2017 11:30:52 -0400 Subject: [PATCH 326/786] drm/msm: add affected planes during idle power restore During idle power restore, software plane states are not modified so they are not added to the new atomic state during commit. This causes drm framework to bypass plane atomic check & update, and results in pipe buffer offset to be in reset state. This causes smmu fault subsequently. Add affected planes to new atomic state during first atomic check after power restore so hardware pipe buffer offset will be updated. CRs-Fixed: 2064858 Change-Id: Icd8978cfb24a1c19980fab99f607102e95a16b85 Signed-off-by: Alan Kwong --- drivers/gpu/drm/msm/msm_drv.c | 11 ++++++++ drivers/gpu/drm/msm/msm_kms.h | 3 +++ drivers/gpu/drm/msm/sde/sde_crtc.c | 25 ++++++++++++++++- drivers/gpu/drm/msm/sde/sde_crtc.h | 5 ++++ drivers/gpu/drm/msm/sde/sde_kms.c | 43 ++++++++++++++++++++++++++++++ 5 files changed, 86 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index f7d5d02d473c..d606e4d9f7b3 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -126,10 +126,21 @@ static void msm_fb_output_poll_changed(struct drm_device *dev) int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { + struct msm_drm_private *priv; + + if (!dev) + return -EINVAL; + if (msm_is_suspend_blocked(dev)) { DRM_DEBUG("rejecting commit during suspend\n"); return -EBUSY; } + + priv = dev->dev_private; + if (priv && priv->kms && priv->kms->funcs && + priv->kms->funcs->atomic_check) + return priv->kms->funcs->atomic_check(priv->kms, state); + return drm_atomic_helper_check(dev, state); } diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index d8ac40758eb9..eed0f1b6ae02 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -75,6 +75,9 @@ struct msm_kms_funcs { const struct msm_format *msm_fmt, const struct drm_mode_fb_cmd2 *cmd, struct drm_gem_object **bos); + /* perform complete atomic check of given atomic state */ + int (*atomic_check)(struct msm_kms *kms, + struct drm_atomic_state *state); /* misc: */ long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, struct drm_encoder *encoder); diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index 30bb72bf0367..f2c7a505614a 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -2357,6 +2357,8 @@ static struct drm_crtc_state *sde_crtc_duplicate_state(struct drm_crtc *crtc) _sde_crtc_rp_duplicate(&old_cstate->rp, &cstate->rp); + cstate->idle_pc = sde_crtc->idle_pc; + return &cstate->base; } @@ -2457,6 +2459,24 @@ static void sde_crtc_handle_power_event(u32 event_type, void *arg) sde_encoder_virt_restore(encoder); } + } else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) { + /* + * Serialize h/w idle state update with crtc atomic check. + * Grab the modeset lock to ensure that there is no on-going + * atomic check, then increment the idle_pc counter. The next + * atomic check will detect a new idle_pc since the counter + * has advanced between the old_state and new_state, and + * therefore properly reprogram all relevant drm objects' + * hardware. + */ + drm_modeset_lock_crtc(crtc, NULL); + + sde_crtc->idle_pc++; + + SDE_DEBUG("crtc%d idle_pc:%d\n", crtc->base.id, + sde_crtc->idle_pc); + SDE_EVT32(DRMID(crtc), sde_crtc->idle_pc); + } else if (event_type == SDE_POWER_EVENT_POST_DISABLE) { struct drm_plane *plane; @@ -2466,6 +2486,8 @@ static void sde_crtc_handle_power_event(u32 event_type, void *arg) */ drm_atomic_crtc_for_each_plane(plane, crtc) sde_plane_set_revalidate(plane, true); + + drm_modeset_unlock_crtc(crtc); } mutex_unlock(&sde_crtc->crtc_lock); @@ -2594,7 +2616,8 @@ static void sde_crtc_enable(struct drm_crtc *crtc) sde_crtc->power_event = sde_power_handle_register_event( &priv->phandle, - SDE_POWER_EVENT_POST_ENABLE | SDE_POWER_EVENT_POST_DISABLE, + SDE_POWER_EVENT_POST_ENABLE | SDE_POWER_EVENT_POST_DISABLE | + SDE_POWER_EVENT_PRE_DISABLE, sde_crtc_handle_power_event, crtc, sde_crtc->name); } diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h index 0d72ff153420..f021477e65eb 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.h +++ b/drivers/gpu/drm/msm/sde/sde_crtc.h @@ -125,6 +125,7 @@ struct sde_crtc_event { * @vblank_cb_time : ktime at vblank count reset * @vblank_refcount : reference count for vblank enable request * @suspend : whether or not a suspend operation is in progress + * @idle_pc : count of current idle power collapse request * @feature_list : list of color processing features supported on a crtc * @active_list : list of color processing features are active * @dirty_list : list of color processing features are dirty @@ -173,6 +174,7 @@ struct sde_crtc { ktime_t vblank_cb_time; atomic_t vblank_refcount; bool suspend; + u32 idle_pc; struct list_head feature_list; struct list_head active_list; @@ -278,6 +280,7 @@ struct sde_crtc_respool { * @sbuf_cfg: stream buffer configuration * @sbuf_prefill_line: number of line for inline rotator prefetch * @sbuf_flush_mask: flush mask for inline rotator + * @idle_pc: count of idle power collapse request when state is duplicated */ struct sde_crtc_state { struct drm_crtc_state base; @@ -307,6 +310,8 @@ struct sde_crtc_state { u32 sbuf_prefill_line; u32 sbuf_flush_mask; + u32 idle_pc; + struct sde_crtc_respool rp; }; diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index 26125d8d9e5d..4c820c64db6d 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -1370,6 +1370,48 @@ static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file) sde_crtc_cancel_pending_flip(priv->crtcs[i], file); } +static int sde_kms_atomic_check(struct msm_kms *kms, + struct drm_atomic_state *state) +{ + struct sde_kms *sde_kms = to_sde_kms(kms); + struct drm_device *dev = sde_kms->dev; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int rc, i; + + if (!kms || !state) + return -EINVAL; + + /* + * Add planes (and other affected DRM objects, if any) to new state + * if idle power collapse occurred since previous commit. + * Since atomic state is a delta from the last, if the user-space + * did not request any changes on a plane/connector, that object + * will not be included in the new atomic state. Idle power collapse + * is driver-autonomous, so the driver needs to ensure that all + * hardware is reprogrammed as the power comes back on by forcing + * the drm objects attached to the CRTC into the new atomic state. + */ + for_each_crtc_in_state(state, crtc, crtc_state, i) { + struct sde_crtc_state *cstate = to_sde_crtc_state(crtc_state); + struct sde_crtc_state *old_cstate = + to_sde_crtc_state(crtc->state); + + if (cstate->idle_pc != old_cstate->idle_pc) { + SDE_DEBUG("crtc%d idle_pc:%d/%d\n", + crtc->base.id, cstate->idle_pc, + old_cstate->idle_pc); + SDE_EVT32(DRMID(crtc), cstate->idle_pc, + old_cstate->idle_pc); + rc = drm_atomic_add_affected_planes(state, crtc); + if (rc) + return rc; + } + } + + return drm_atomic_helper_check(dev, state); +} + static const struct msm_kms_funcs kms_funcs = { .hw_init = sde_kms_hw_init, .postinit = sde_kms_postinit, @@ -1387,6 +1429,7 @@ static const struct msm_kms_funcs kms_funcs = { .enable_vblank = sde_kms_enable_vblank, .disable_vblank = sde_kms_disable_vblank, .check_modified_format = sde_format_check_modified_format, + .atomic_check = sde_kms_atomic_check, .get_format = sde_get_msm_format, .round_pixclk = sde_kms_round_pixclk, .destroy = sde_kms_destroy, -- GitLab From a51b2c4bb6f3ec31c86cef7413c6310b5f7cbfdf Mon Sep 17 00:00:00 2001 From: Praneeth Paladugu Date: Fri, 23 Jun 2017 12:48:06 -0700 Subject: [PATCH 327/786] msm: vidc: Add support for decoder UBWC CR Stats This change adds support for UBWC CR stats extradata, so that HW can write UBWC stats into Extradata buffer. This information is exchanged to consumers through extradata. This information is useful for optimal run-time bus BW voting. CRs-Fixed: 2012520 Change-Id: I62c53bd43da0a94cfb7fff6b02f5d449c0f216a6 Signed-off-by: Praneeth Paladugu --- .../media/platform/msm/vidc/hfi_packetization.c | 3 +++ drivers/media/platform/msm/vidc/msm_vdec.c | 6 ++++-- .../media/platform/msm/vidc/msm_vidc_common.c | 4 ++++ drivers/media/platform/msm/vidc/vidc_hfi.h | 6 ++++++ drivers/media/platform/msm/vidc/vidc_hfi_api.h | 1 + include/uapi/linux/v4l2-controls.h | 3 +++ include/uapi/media/msm_vidc.h | 16 ++++++++++++++++ 7 files changed, 37 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c index 40c306d50359..5d769f431a9e 100644 --- a/drivers/media/platform/msm/vidc/hfi_packetization.c +++ b/drivers/media/platform/msm/vidc/hfi_packetization.c @@ -616,6 +616,9 @@ static int get_hfi_extradata_index(enum hal_extradata_id index) case HAL_EXTRADATA_VPX_COLORSPACE: ret = HFI_PROPERTY_PARAM_VDEC_VPX_COLORSPACE_EXTRADATA; break; + case HAL_EXTRADATA_UBWC_CR_STATS_INFO: + ret = HFI_PROPERTY_PARAM_VDEC_UBWC_CR_STAT_INFO_EXTRADATA; + break; default: dprintk(VIDC_WARN, "Extradata index not found: %d\n", index); break; diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c index 554e89a19fa3..77bde43c5b32 100644 --- a/drivers/media/platform/msm/vidc/msm_vdec.c +++ b/drivers/media/platform/msm/vidc/msm_vdec.c @@ -152,7 +152,7 @@ static struct msm_vidc_ctrl msm_vdec_ctrls[] = { .name = "Extradata Type", .type = V4L2_CTRL_TYPE_MENU, .minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE, - .maximum = V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE, + .maximum = V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO, .default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE, .menu_skip_mask = ~( (1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) | @@ -179,7 +179,8 @@ static struct msm_vidc_ctrl msm_vdec_ctrls[] = { (1 << V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI) | (1 << V4L2_MPEG_VIDC_EXTRADATA_VUI_DISPLAY) | - (1 << V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE) + (1 << V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE) | + (1 << V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO) ), .qmenu = mpeg_video_vidc_extradata, }, @@ -877,6 +878,7 @@ int msm_vdec_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) case V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI: case V4L2_MPEG_VIDC_EXTRADATA_VUI_DISPLAY: case V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE: + case V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO: inst->bufq[CAPTURE_PORT].num_planes = 2; inst->bufq[CAPTURE_PORT].plane_sizes[EXTRADATA_IDX(2)] = VENUS_EXTRADATA_SIZE( diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c index ac69ab855920..3c990e439c26 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c @@ -70,6 +70,7 @@ const char *const mpeg_video_vidc_extradata[] = { "Extradata PQ Info", "Extradata display VUI", "Extradata vpx color space", + "Extradata UBWC CR stats info", }; struct getprop_buf { @@ -4893,6 +4894,9 @@ enum hal_extradata_id msm_comm_get_hal_extradata_index( case V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE: ret = HAL_EXTRADATA_VPX_COLORSPACE; break; + case V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO: + ret = HAL_EXTRADATA_UBWC_CR_STATS_INFO; + break; default: dprintk(VIDC_WARN, "Extradata not found: %d\n", index); break; diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h index 5601f1bef46c..8e9e51f87622 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi.h @@ -188,6 +188,12 @@ struct hfi_extradata_header { (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001E) #define HFI_PROPERTY_PARAM_VDEC_CONTENT_LIGHT_LEVEL_SEI_EXTRADATA \ (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x001F) +#define HFI_PROPERTY_PARAM_VDEC_COLOUR_REMAPPING_INFO_SEI_EXTRADATA \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x0020) +#define HFI_PROPERTY_PARAM_VDEC_DOWN_SCALAR \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x0021) +#define HFI_PROPERTY_PARAM_VDEC_UBWC_CR_STAT_INFO_EXTRADATA \ + (HFI_PROPERTY_PARAM_VDEC_OX_START + 0x0022) #define HFI_PROPERTY_CONFIG_VDEC_OX_START \ (HFI_DOMAIN_BASE_VDEC + HFI_ARCH_OX_OFFSET + 0x4000) diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h index 47ce0ba3f0f1..927e7447c130 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h @@ -125,6 +125,7 @@ enum hal_extradata_id { HAL_EXTRADATA_PQ_INFO, HAL_EXTRADATA_VUI_DISPLAY_INFO, HAL_EXTRADATA_VPX_COLORSPACE, + HAL_EXTRADATA_UBWC_CR_STATS_INFO, }; enum hal_property { diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index cf96ac188aef..e5c4ddf29697 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -779,6 +779,9 @@ enum v4l2_mpeg_vidc_extradata { #define V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE \ V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE V4L2_MPEG_VIDC_EXTRADATA_VPX_COLORSPACE = 30, +#define V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO \ + V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO + V4L2_MPEG_VIDC_EXTRADATA_UBWC_CR_STATS_INFO = 31, }; #define V4L2_CID_MPEG_VIDEO_MULTI_SLICE_DELIVERY_MODE \ diff --git a/include/uapi/media/msm_vidc.h b/include/uapi/media/msm_vidc.h index 038dd48b4813..4fe325dcd5c9 100644 --- a/include/uapi/media/msm_vidc.h +++ b/include/uapi/media/msm_vidc.h @@ -170,6 +170,16 @@ struct msm_vidc_vqzip_sei_payload { unsigned int data[1]; }; +struct msm_vidc_ubwc_cr_stats_info { + unsigned int stats_tile_32; + unsigned int stats_tile_64; + unsigned int stats_tile_96; + unsigned int stats_tile_128; + unsigned int stats_tile_160; + unsigned int stats_tile_192; + unsigned int stats_tile_256; +}; + struct msm_vidc_yuv_stats_payload { unsigned int frame_qp; unsigned int texture; @@ -250,6 +260,12 @@ enum msm_vidc_extradata_type { #define MSM_VIDC_EXTRADATA_PQ_INFO \ MSM_VIDC_EXTRADATA_PQ_INFO MSM_VIDC_EXTRADATA_PQ_INFO = 0x00000017, +#define MSM_VIDC_EXTRADATA_COLOUR_REMAPPING_INFO_SEI \ + MSM_VIDC_EXTRADATA_COLOUR_REMAPPING_INFO_SEI + MSM_VIDC_EXTRADATA_COLOUR_REMAPPING_INFO_SEI = 0x00000018, +#define MSM_VIDC_EXTRADATA_UBWC_CR_STAT_INFO \ + MSM_VIDC_EXTRADATA_UBWC_CR_STAT_INFO + MSM_VIDC_EXTRADATA_UBWC_CR_STAT_INFO = 0x00000019, MSM_VIDC_EXTRADATA_INPUT_CROP = 0x0700000E, #define MSM_VIDC_EXTRADATA_OUTPUT_CROP \ MSM_VIDC_EXTRADATA_OUTPUT_CROP -- GitLab From 19263c9b9c0ef60648d0b1bbb46a6c6fa41ca6f2 Mon Sep 17 00:00:00 2001 From: Ram Chandrasekar Date: Mon, 26 Jun 2017 12:56:21 -0600 Subject: [PATCH 328/786] drivers: thermal: step-wise: Update the mitigation clear logic Step-wise algorithm will lower mitigation if the temperature is above the trip and the trend is decreasing. This ends up in a case where the temperature is bouncing up and down above the trip and the observed max temperature is increasing steadily. This will eventually lead to device reset for reaching the critical temperature. To avoid this, update the step-wise algorithm to not reduce the mitigation if the temperature is above the trip and the trend is decreasing. The algorithm will apply the previous mitigation value in the above case. Change-Id: Ia734c19af5e732948a3f755c719ed9dedbf7ce3b Signed-off-by: Ram Chandrasekar --- drivers/thermal/step_wise.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c index 6b05b7bc07c5..f6f30a0eb7c1 100644 --- a/drivers/thermal/step_wise.c +++ b/drivers/thermal/step_wise.c @@ -102,7 +102,8 @@ static unsigned long get_target_state(struct thermal_instance *instance, if (!throttle) next_target = THERMAL_NO_TARGET; } else { - next_target = cur_state - 1; + if (!throttle) + next_target = cur_state - 1; if (next_target > instance->upper) next_target = instance->upper; } -- GitLab From 577caa319c04a4dfd64c9bd4c11318acf2787754 Mon Sep 17 00:00:00 2001 From: Kishor PK Date: Thu, 30 Mar 2017 14:23:37 +0530 Subject: [PATCH 329/786] soc: qcom: pil: Avoid possible buffer overflow during Modem boot Buffer overflow can occur if MBA firmware size exceeds 1MB. So validate size before copying the firmware. CRs-Fixed: 2001803 Change-Id: I070ddf85fbc47df072e7258369272366262ebf46 Signed-off-by: Kishor PK --- drivers/soc/qcom/pil-msa.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c index 4a586ac29fa1..20b9769195d9 100644 --- a/drivers/soc/qcom/pil-msa.c +++ b/drivers/soc/qcom/pil-msa.c @@ -677,7 +677,15 @@ int pil_mss_reset_load_mba(struct pil_desc *pil) /* Load the MBA image into memory */ count = fw->size; - memcpy(mba_dp_virt, data, count); + if (count <= SZ_1M) { + /* Ensures memcpy is done for max 1MB fw size */ + memcpy(mba_dp_virt, data, count); + } else { + dev_err(pil->dev, "%s fw image loading into memory is failed due to fw size overflow\n", + __func__); + ret = -EINVAL; + goto err_mba_data; + } /* Ensure memcpy of the MBA memory is done before loading the DP */ wmb(); -- GitLab From e5b72e59df0475014083aa67d985bf65a9ba7091 Mon Sep 17 00:00:00 2001 From: Rama Aparna Mallavarapu Date: Wed, 14 Jun 2017 10:52:31 -0700 Subject: [PATCH 330/786] ARM: dts: msm: Add property to identify TPDMs that need MSR fix for sdm845 Add device-tree property to indentify all TPDMs that require a different enable sequence for their functionality on SDM845. The property will be used to configure MSR to DSB after setting the enable bit. Change-Id: I7370f44a9df70cc54296e871811089b7f9a56679 Signed-off-by: Rama Aparna Mallavarapu --- arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi index 04a332ec1872..d2189a7962e3 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi @@ -245,6 +245,8 @@ clocks = <&clock_aop QDSS_CLK>; clock-names = "apb_pclk"; + qcom,msr-fix-req; + port { tpdm_swao1_out_tpda_swao: endpoint { remote-endpoint = <&tpda_swao_in_tpdm_swao1>; @@ -819,6 +821,8 @@ clocks = <&clock_aop QDSS_CLK>; clock-names = "apb_pclk"; + qcom,msr-fix-req; + port { tpdm_lpass_out_funnel_lpass: endpoint { remote-endpoint = <&funnel_lpass_in_tpdm_lpass>; @@ -837,6 +841,8 @@ clocks = <&clock_aop QDSS_CLK>; clock-names = "apb_pclk"; + qcom,msr-fix-req; + port { tpdm_center_out_tpda: endpoint { remote-endpoint = <&tpda_in_tpdm_center>; @@ -855,6 +861,8 @@ clocks = <&clock_aop QDSS_CLK>; clock-names = "apb_pclk"; + qcom,msr-fix-req; + port { tpdm_north_out_tpda: endpoint { remote-endpoint = <&tpda_in_tpdm_north>; @@ -1090,6 +1098,8 @@ clocks = <&clock_aop QDSS_CLK>; clock-names = "apb_pclk"; + qcom,msr-fix-req; + port { tpdm_mm_out_funnel_dl_mm: endpoint { remote-endpoint = <&funnel_dl_mm_in_tpdm_mm>; @@ -1181,6 +1191,8 @@ clocks = <&clock_aop QDSS_CLK>; clock-names = "apb_pclk"; + qcom,msr-fix-req; + port { tpdm_turing_out_funnel_turing: endpoint { remote-endpoint = @@ -1235,6 +1247,8 @@ clocks = <&clock_aop QDSS_CLK>; clock-names = "apb_pclk"; + qcom,msr-fix-req; + port { tpdm_ddr_out_funnel_ddr_0: endpoint { remote-endpoint = <&funnel_ddr_0_in_tpdm_ddr>; @@ -1376,7 +1390,6 @@ clocks = <&clock_aop QDSS_CLK>; clock-names = "apb_pclk"; - qcom,msr-fix-req; port{ tpdm_spss_out_tpda_spss: endpoint { -- GitLab From 0048ce9b00449762b58a8850803b7ded8bfb1ae9 Mon Sep 17 00:00:00 2001 From: Skylar Chang Date: Thu, 22 Jun 2017 19:57:57 -0700 Subject: [PATCH 331/786] msm: ipa: fix mhi suspend logic Add a missing return statement in IPA MHI suspend routine. Change-Id: I1dfbcff21ba3ca72ff985e76e851caf6a9da1ae4 CRs-Fixed: 2066866 Acked-by: Ady Abraham Signed-off-by: Skylar Chang --- drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c index 5aa39b699bd6..9b3b53dcba68 100644 --- a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c +++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c @@ -2046,6 +2046,8 @@ static int ipa_mhi_suspend_dl(bool force) if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) ipa_mhi_update_host_ch_state(true); + return 0; + fail_stop_event_update_dl_channel: ipa_mhi_resume_channels(true, ipa_mhi_client_ctx->dl_channels); -- GitLab From ec11a0a30cf871979bbb7d50529125b9f7b2bfb8 Mon Sep 17 00:00:00 2001 From: Narendra Muppalla Date: Thu, 15 Jun 2017 15:35:17 -0700 Subject: [PATCH 332/786] drm/msm/sde: fix null parameter checks in drm driver This change adds null parameter checks prior to dereferencing them in drm driver. Change-Id: I0354924948bead8a4f7fa8e1201f01cecda510cb Signed-off-by: Narendra Muppalla --- drivers/gpu/drm/msm/msm_smmu.c | 10 ++++++++++ drivers/gpu/drm/msm/sde/sde_crtc.c | 16 ++++++++++++++++ drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c | 2 ++ drivers/gpu/drm/msm/sde/sde_kms.c | 4 +++- drivers/gpu/drm/msm/sde/sde_plane.c | 4 ++-- 5 files changed, 33 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c index 7fbcff435d56..26d29533f0b3 100644 --- a/drivers/gpu/drm/msm/msm_smmu.c +++ b/drivers/gpu/drm/msm/msm_smmu.c @@ -229,6 +229,11 @@ static int msm_smmu_map_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt, unsigned long attrs = 0x0; int ret; + if (!sgt || !client) { + DRM_ERROR("sg table is invalid\n"); + return -ENOMEM; + } + if (flags & MSM_BO_KEEPATTRS) attrs |= DMA_ATTR_IOMMU_USE_UPSTREAM_HINT; @@ -256,6 +261,11 @@ static void msm_smmu_unmap_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt, struct msm_smmu *smmu = to_msm_smmu(mmu); struct msm_smmu_client *client = msm_smmu_to_client(smmu); + if (!sgt || !client) { + DRM_ERROR("sg table is invalid\n"); + return; + } + if (sgt && sgt->sgl) { DRM_DEBUG("%pad/0x%x/0x%x\n", &sgt->sgl->dma_address, sgt->sgl->dma_length, dir); diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index 30bb72bf0367..e942e2d46a84 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -1202,6 +1202,11 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc, state->fb ? state->fb->base.id : -1); format = to_sde_format(msm_framebuffer_format(pstate->base.fb)); + if (!format) { + SDE_ERROR("invalid format\n"); + return; + } + if (pstate->stage == SDE_STAGE_BASE && format->alpha_enable) bg_alpha_enable = true; @@ -2149,6 +2154,12 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc) dev = crtc->dev; sde_crtc = to_sde_crtc(crtc); sde_kms = _sde_crtc_get_kms(crtc); + + if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) { + SDE_ERROR("invalid argument\n"); + return; + } + priv = sde_kms->dev->dev_private; cstate = to_sde_crtc_state(crtc->state); @@ -3013,6 +3024,11 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc, dev = crtc->dev; sde_kms = _sde_crtc_get_kms(crtc); + if (!sde_kms) { + SDE_ERROR("invalid argument\n"); + return; + } + info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL); if (!info) { SDE_ERROR("failed to allocate info memory\n"); diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c index 678c84a1a4b0..0a5346ed52c4 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c @@ -447,6 +447,7 @@ static int write_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg) u32 cmd1; struct sde_hw_blk_reg_map hw; + memset(&hw, 0, sizeof(hw)); cmd1 = (cfg->op == REG_DMA_READ) ? (dspp_read_sel[cfg->block_select] << 30) : 0; cmd1 |= (cfg->last_command) ? BIT(24) : 0; @@ -547,6 +548,7 @@ int reset_v1(struct sde_hw_ctl *ctl) return -EINVAL; } + memset(&hw, 0, sizeof(hw)); index = ctl->idx - CTL_0; SET_UP_REG_DMA_REG(hw, reg_dma); SDE_REG_WRITE(&hw, REG_DMA_OP_MODE_OFF, BIT(0)); diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index 26125d8d9e5d..dd8b31e2372f 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -459,7 +459,7 @@ static void sde_kms_wait_for_commit_done(struct msm_kms *kms, struct drm_crtc *crtc) { struct drm_encoder *encoder; - struct drm_device *dev = crtc->dev; + struct drm_device *dev; int ret; if (!kms || !crtc || !crtc->state) { @@ -467,6 +467,8 @@ static void sde_kms_wait_for_commit_done(struct msm_kms *kms, return; } + dev = crtc->dev; + if (!crtc->state->enable) { SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id); return; diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c index 2a98af45d2f6..246e401c1baf 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.c +++ b/drivers/gpu/drm/msm/sde/sde_plane.c @@ -367,7 +367,7 @@ static void _sde_plane_set_qos_lut(struct drm_plane *plane, total_fl = _sde_plane_calc_fill_level(plane, fmt, psde->pipe_cfg.src_rect.w); - if (SDE_FORMAT_IS_LINEAR(fmt)) + if (fmt && SDE_FORMAT_IS_LINEAR(fmt)) lut_usage = SDE_QOS_LUT_USAGE_LINEAR; else lut_usage = SDE_QOS_LUT_USAGE_MACROTILE; @@ -428,7 +428,7 @@ static void _sde_plane_set_danger_lut(struct drm_plane *plane, fb->modifier, drm_format_num_planes(fb->pixel_format)); - if (SDE_FORMAT_IS_LINEAR(fmt)) { + if (fmt && SDE_FORMAT_IS_LINEAR(fmt)) { danger_lut = psde->catalog->perf.danger_lut_tbl [SDE_QOS_LUT_USAGE_LINEAR]; safe_lut = psde->catalog->perf.safe_lut_tbl -- GitLab From 02f8f85ab2a9ccfede56ef61af206088ef4e0a8a Mon Sep 17 00:00:00 2001 From: Chinmay Sawarkar Date: Mon, 26 Jun 2017 12:05:38 -0700 Subject: [PATCH 333/786] msm: vidc: Fix buffer requirement negotiations Call buffer requirements before setting the min buffer size to FW. Otherwise, in the case of event Sufficient, FW will use outdated buffer size and error out. While at it, additional buffer requirement call is added to get Output2 buffer requirement in 10-bit case. CRs-Fixed: 2066658 Change-Id: Iaa61aa550faa926f337c4a3f9e353c6396fe80c7 Signed-off-by: Chinmay Sawarkar --- drivers/media/platform/msm/vidc/msm_vdec.c | 7 +++++++ drivers/media/platform/msm/vidc/msm_vidc.c | 4 ++-- include/media/msm_vidc.h | 2 +- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c index 554e89a19fa3..b71681cd5cfe 100644 --- a/drivers/media/platform/msm/vidc/msm_vdec.c +++ b/drivers/media/platform/msm/vidc/msm_vdec.c @@ -1115,6 +1115,13 @@ int msm_vdec_s_ext_ctrl(struct msm_vidc_inst *inst, __func__, rc); break; } + rc = msm_comm_try_get_bufreqs(inst); + if (rc) { + dprintk(VIDC_ERR, + "%s Failed to get buffer requirements : %d\n", + __func__, rc); + break; + } } inst->clk_data.dpb_fourcc = fourcc; break; diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c index 2ca3e8d513f0..c3a06854aa8e 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_vidc.c @@ -837,13 +837,13 @@ static inline int start_streaming(struct msm_vidc_inst *inst) b.buffer_type = HAL_BUFFER_OUTPUT; } + rc = msm_comm_try_get_bufreqs(inst); + b.buffer_size = inst->bufq[CAPTURE_PORT].plane_sizes[0]; rc = call_hfi_op(hdev, session_set_property, inst->session, HAL_PARAM_BUFFER_SIZE_MINIMUM, &b); - rc = msm_comm_try_get_bufreqs(inst); - /* Verify if buffer counts are correct */ rc = msm_vidc_verify_buffer_counts(inst); if (rc) { diff --git a/include/media/msm_vidc.h b/include/media/msm_vidc.h index bb5a21cb682d..623b6f0a5e05 100644 --- a/include/media/msm_vidc.h +++ b/include/media/msm_vidc.h @@ -20,7 +20,7 @@ #include #include -#define HAL_BUFFER_MAX 0xb +#define HAL_BUFFER_MAX 0xd enum smem_type { SMEM_ION, -- GitLab From 169a372f22e33b48f11b71fd6f4d3a23fe2fa6d5 Mon Sep 17 00:00:00 2001 From: George Shen Date: Fri, 23 Jun 2017 10:58:18 -0700 Subject: [PATCH 334/786] msm: kgsl: Fixed a warning when turning off GMU clock Before turning off GMU clock, GMU driver voted a wrong clock frequency. CRs-Fixed: 2062271 Change-Id: I356fd1df223ea0ee402dd10bd52f4da16258c7dd Signed-off-by: George Shen --- drivers/gpu/msm/kgsl_gmu.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c index 2a6e7ddee5c0..832b2b6db79d 100644 --- a/drivers/gpu/msm/kgsl_gmu.c +++ b/drivers/gpu/msm/kgsl_gmu.c @@ -1202,14 +1202,16 @@ static int gmu_enable_clks(struct gmu_device *gmu) static int gmu_disable_clks(struct gmu_device *gmu) { int ret, j = 0; + unsigned int gmu_freq; if (IS_ERR_OR_NULL(gmu->clks[0])) return 0; - ret = clk_set_rate(gmu->clks[0], gmu->gmu_freqs[0]); + gmu_freq = gmu->gmu_freqs[gmu->num_gmupwrlevels - 1]; + ret = clk_set_rate(gmu->clks[0], gmu_freq); if (ret) { dev_err(&gmu->pdev->dev, "fail to reset GMU clk freq %d\n", - gmu->gmu_freqs[0]); + gmu_freq); return ret; } -- GitLab From 68553202b9a0b972e5b3467dcb4c5a5a567b5a21 Mon Sep 17 00:00:00 2001 From: Praneeth Paladugu Date: Mon, 26 Jun 2017 16:08:56 -0700 Subject: [PATCH 335/786] msm: vidc: Misc bug fixes in platform specific data This change has following bug fixes - Fix typo for Never Unload FW key string. - Reduce HW response time than power collapse time. CRs-Fixed: 2062045 Change-Id: I354962b15ff7e0faaf42b14d665cbe7e58af15e3 Signed-off-by: Praneeth Paladugu --- drivers/media/platform/msm/vidc/msm_vidc_platform.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c index 25f22c7c5898..8a701cba1ae9 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_platform.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c @@ -53,14 +53,14 @@ static struct msm_vidc_codec_data sdm845_codec_data[] = { static struct msm_vidc_common_data default_common_data[] = { { - .key = "qcon,never-unload-fw", + .key = "qcom,never-unload-fw", .value = 1, }, }; static struct msm_vidc_common_data sdm845_common_data[] = { { - .key = "qcon,never-unload-fw", + .key = "qcom,never-unload-fw", .value = 1, }, { @@ -97,7 +97,7 @@ static struct msm_vidc_common_data sdm845_common_data[] = { }, { .key = "qcom,hw-resp-timeout", - .value = 2000, + .value = 250, }, }; -- GitLab From cde17edb330aee3d61d1b2b3d84e6dd74fdad5cc Mon Sep 17 00:00:00 2001 From: Skylar Chang Date: Wed, 21 Jun 2017 16:51:26 -0700 Subject: [PATCH 336/786] msm: ipa: prevent string buffer overflows On rmnet_ipa_set_data_quota() API, add the string terminator to prevent vulnerability of string buffer overflows on debug prints. Change-Id: Ie669f6606f76b9006bce4edd0c6d04aef9cfb600 Signed-off-by: Skylar Chang --- drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c | 3 +++ drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c | 3 +++ 2 files changed, 6 insertions(+) diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c index 29766fb84dc8..11eeb2f452fa 100644 --- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c @@ -2616,6 +2616,9 @@ static int rmnet_ipa_set_data_quota_modem(struct wan_ioctl_set_data_quota *data) if (!data->set_quota) ipa_qmi_stop_data_qouta(); + /* prevent string buffer overflows */ + data->interface_name[IFNAMSIZ-1] = '\0'; + index = find_vchannel_name_index(data->interface_name); IPAWANERR("iface name %s, quota %lu\n", data->interface_name, diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c index fcaabe3fb48f..cea4dc90782c 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c @@ -2725,6 +2725,9 @@ static int rmnet_ipa3_set_data_quota_modem( if (!data->set_quota) ipa3_qmi_stop_data_qouta(); + /* prevent string buffer overflows */ + data->interface_name[IFNAMSIZ-1] = '\0'; + index = find_vchannel_name_index(data->interface_name); IPAWANERR("iface name %s, quota %lu\n", data->interface_name, -- GitLab From 27cfd4edee7dcc5f645fd2818f387c6f16a5b8b5 Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Thu, 1 Jun 2017 13:43:25 -0600 Subject: [PATCH 337/786] net: add a per-cpu counter for the number of frames coalesced in GRO A low cost method of determining GRO statistics is required. This change introduces a new counter which tracks whenever GRO coalesces ingress packets. The counter is per-CPU and exposed in /proc/net/softnet_stat as the last column of data. No user space impact is expected as a result of this change. However, this change should be reverted if legacy tools have problems with the new column in softnet_stat. CRs-Fixed: 2062245 Change-Id: I05965c0cb150947935d5977884cc4d583b37131d Signed-off-by: Subash Abhinov Kasiviswanathan --- include/linux/netdevice.h | 2 ++ net/core/dev.c | 1 + net/core/net-procfs.c | 12 ++++++------ 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index a47c29e6d052..a9dcd27f83c7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2831,6 +2831,8 @@ struct softnet_data { unsigned int processed; unsigned int time_squeeze; unsigned int received_rps; + unsigned int gro_coalesced; + #ifdef CONFIG_RPS struct softnet_data *rps_ipi_list; #endif diff --git a/net/core/dev.c b/net/core/dev.c index dff801241dd1..2b126034dca8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4400,6 +4400,7 @@ static int napi_gro_complete(struct sk_buff *skb) } out: + __this_cpu_add(softnet_data.gro_coalesced, NAPI_GRO_CB(skb)->count > 1); return netif_receive_skb_internal(skb); } diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c index 14d09345f00d..699c4e70da62 100644 --- a/net/core/net-procfs.c +++ b/net/core/net-procfs.c @@ -158,12 +158,12 @@ static int softnet_seq_show(struct seq_file *seq, void *v) rcu_read_unlock(); #endif - seq_printf(seq, - "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", - sd->processed, sd->dropped, sd->time_squeeze, 0, - 0, 0, 0, 0, /* was fastroute */ - 0, /* was cpu_collision */ - sd->received_rps, flow_limit_count); + seq_printf + (seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", + sd->processed, sd->dropped, sd->time_squeeze, 0, + 0, 0, 0, 0, /* was fastroute */ + 0, /* was cpu_collision */ + sd->received_rps, flow_limit_count, sd->gro_coalesced); return 0; } -- GitLab From 24f80a003235c6fadf34294b9dd3334813b9736c Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Thu, 1 Jun 2017 13:56:05 -0600 Subject: [PATCH 338/786] tun: Set CHECKSUM_UNNECESSARY if userspace passes this indication Userspace may already know that the checksum validation is already completed prior to being passed to the TUN interface. As a result, computing checksum again in network stack may be a redundant operation. Add support to read this information from the TUN header flags to skip checksum validation for these packets only. This is useful in cases where the packet checksum was computed by hardware for IPv4 / IPv6 TCP / UDP packets. Since the packet intergrity was already verified for packets over the wire, subsequent validation within the network stack is redundant work. CRs-Fixed: 2062245 Change-Id: I18ee3408c05910207b205d6205f282e6f3599156 Signed-off-by: Subash Abhinov Kasiviswanathan --- drivers/net/tun.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 4b7a363448a2..35aa28b74e63 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1276,6 +1276,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, return -EINVAL; } + if (!(tun->flags & IFF_NO_PI)) + if (pi.flags & htons(CHECKSUM_UNNECESSARY)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + switch (tun->flags & TUN_TYPE_MASK) { case IFF_TUN: if (tun->flags & IFF_NO_PI) { -- GitLab From c6595493908866f8b431e9e5d1d8e452a324dfb5 Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Mon, 26 Jun 2017 19:04:30 -0600 Subject: [PATCH 339/786] net: Reset NAPI bit if IPI failed During hotplug if an RPS CPU goes offline, then there is a possibility that the IPI delivery to the RPS core might fail, this happens in the cases when unruly drivers use netif_rx API in the wrong context. This happens due to two reasons a) Firstly using netif_rx API in non preemptive context leads to enough latencies that the IPI delivery might fail to an RPS core. This is because the softIRQ trigger will become unpredictable. b) by using netif_rx it becomes an architectural issue where we are trying to do two things in two different contexts. We set the NAPI bit in context and sent the IPI in other context. Now since the context switch is allowed, the remote CPU is allowed to go finish its hotplug. If there was no context switch in the first place, which typically happens by either using the correct version of netif_rx or switching to NAPI framework, then the remote CPU is not allowed to go to CPU DOWN state. This is by design since hotplug framework causes the remote dying CPU to wait until atleast one context switch happens on all other CPUS. If preemption is disabled then the dying CPU has to wait until preemption is enabled and a context switch happens. This patch catches these unruly drivers and handles IPI misses by clearing NAPI sate on remote RPS CPUs Please refere here for more documentation on hotplug and preemption cases https://lwn.net/Articles/569686/ CRs-Fixed: 2062245 Change-Id: I072f91bdb4d7e444e3624e8e010ef1b66a67b1ed Signed-off-by: Subash Abhinov Kasiviswanathan --- net/core/dev.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/net/core/dev.c b/net/core/dev.c index dff801241dd1..806c2eb01ccd 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4836,9 +4836,15 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) while (remsd) { struct softnet_data *next = remsd->rps_ipi_next; - if (cpu_online(remsd->cpu)) + if (cpu_online(remsd->cpu)) { smp_call_function_single_async(remsd->cpu, &remsd->csd); + } else { + pr_err("%s() cpu offline\n", __func__); + rps_lock(remsd); + remsd->backlog.state = 0; + rps_unlock(remsd); + } remsd = next; } } else -- GitLab From 36371a16e69f6054267573d22dba6fbe1eb6c317 Mon Sep 17 00:00:00 2001 From: Veerabhadrarao Badiganti Date: Tue, 4 Apr 2017 12:38:24 +0530 Subject: [PATCH 340/786] mmc: core: Increase the runtime PM reference count in try_claim_host Runtime PM reference count is being increased in mmc_claim_host() and is decreased in mmc_release_host(). This reference count is kept during the complete cycle of a claim -> release host. Same need to be done even in mmc_try_claim_host() as well. Increase the runtime PM reference count by invoking pm_runtime_get_sync() from mmc_try_claim_host() upon first successful claim. Without this change the runtime PM reference count goes for a toss since count is not getting incremented in mmc_try_claim_host() but is getting decremented in mmc_release_host(). Change-Id: I77836875b4700a4bf3dbde2bf1abdf2ad36c4cac Signed-off-by: Veerabhadrarao Badiganti --- drivers/mmc/core/core.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 787779c6699b..c19aa0ca4cdf 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -2252,6 +2252,7 @@ int mmc_try_claim_host(struct mmc_host *host, unsigned int delay_ms) int claimed_host = 0; unsigned long flags; int retry_cnt = delay_ms/10; + bool pm = false; do { spin_lock_irqsave(&host->lock, flags); @@ -2260,11 +2261,17 @@ int mmc_try_claim_host(struct mmc_host *host, unsigned int delay_ms) host->claimer = current; host->claim_cnt += 1; claimed_host = 1; + if (host->claim_cnt == 1) + pm = true; } spin_unlock_irqrestore(&host->lock, flags); if (!claimed_host) mmc_delay(10); } while (!claimed_host && retry_cnt--); + + if (pm) + pm_runtime_get_sync(mmc_dev(host)); + if (host->ops->enable && claimed_host && host->claim_cnt == 1) host->ops->enable(host); return claimed_host; -- GitLab From 2e52d49d65cb12d5f9d3b88a1d169c2764b5f75b Mon Sep 17 00:00:00 2001 From: Sudarshan Rajagopalan Date: Mon, 26 Jun 2017 23:08:59 -0700 Subject: [PATCH 341/786] defconfig: sdm845: enable iommu debug tracking Enable CONFIG_IOMMU_DEBUG_TRACKING into perf-defconfig. This config is needed for the iommu page-table parsing to proceed successfully in the lrdp tool. Change-Id: Iea8020dfb93ba0cc37c0626a2e7d16fc1069e5fc Signed-off-by: Sudarshan Rajagopalan --- arch/arm64/configs/sdm845-perf_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig index 8a5b17d3b9fe..b6d210667b8f 100644 --- a/arch/arm64/configs/sdm845-perf_defconfig +++ b/arch/arm64/configs/sdm845-perf_defconfig @@ -463,6 +463,7 @@ CONFIG_IOMMU_IO_PGTABLE_FAST=y CONFIG_ARM_SMMU=y CONFIG_QCOM_LAZY_MAPPING=y CONFIG_IOMMU_DEBUG=y +CONFIG_IOMMU_DEBUG_TRACKING=y CONFIG_IOMMU_TESTS=y CONFIG_QCOM_RUN_QUEUE_STATS=y CONFIG_QCOM_LLCC=y -- GitLab From 7a592aea6a44eb9b19c33b1c3380d6694bea7a05 Mon Sep 17 00:00:00 2001 From: Hemant Gupta Date: Fri, 3 Mar 2017 20:01:00 +0530 Subject: [PATCH 342/786] HID: Remove playstation4 as special driver Playstation4 needs to be removed as having special hid driver and needs to be used as generic hid driver because special hid driver support is not working well. CRs-Fixed: 2014640 Change-Id: Ibf053717adf043274ad3c4f889e5a88abf17e387 Signed-off-by: Hemant Gupta --- drivers/hid/hid-core.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index a5dd7e63ada3..cda2bc571734 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2056,7 +2056,6 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) }, - { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) }, -- GitLab From 8fbf35806500a2755331cbb804517b8a6996ea4b Mon Sep 17 00:00:00 2001 From: Ashay Jaiswal Date: Mon, 27 Feb 2017 12:33:17 +0530 Subject: [PATCH 343/786] spmi-pmic-arb: add support to dispatch interrupt based on IRQ status Current implementation of SPMI arbiter dispatches interrupt based on the Arbiter's accumulator status, in some cases the accumulator status may remain zero and the interrupt remains un-handled. Add logic to dispatch interrupts based Arbiter's IRQ status if the accumulator status is zero. CRs-Fixed: 2934741 Change-Id: I068f5c7d33758063878721d7cce1308fa803e3bd Signed-off-by: Ashay Jaiswal --- drivers/spmi/spmi-pmic-arb.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index 9cc85eea8cef..bfd4b7a23292 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c @@ -576,10 +576,16 @@ static void __pmic_arb_chained_irq(struct spmi_pmic_arb *pa, bool show) int last = pa->max_apid >> 5; u32 status, enable; int i, id, apid; + /* status based dispatch */ + bool acc_valid = false; + u32 irq_status = 0; for (i = first; i <= last; ++i) { status = readl_relaxed(pa->acc_status + pa->ver_ops->owner_acc_status(pa->ee, i)); + if (status) + acc_valid = true; + while (status) { id = ffs(status) - 1; status &= ~BIT(id); @@ -595,6 +601,28 @@ static void __pmic_arb_chained_irq(struct spmi_pmic_arb *pa, bool show) periph_interrupt(pa, apid, show); } } + + /* ACC_STATUS is empty but IRQ fired check IRQ_STATUS */ + if (!acc_valid) { + for (i = pa->min_apid; i <= pa->max_apid; i++) { + /* skip if APPS is not irq owner */ + if (pa->apid_data[i].irq_owner != pa->ee) + continue; + + irq_status = readl_relaxed(pa->intr + + pa->ver_ops->irq_status(i)); + if (irq_status) { + enable = readl_relaxed(pa->intr + + pa->ver_ops->acc_enable(i)); + if (enable & SPMI_PIC_ACC_ENABLE_BIT) { + dev_dbg(&pa->spmic->dev, + "Dispatching IRQ for apid=%d status=%x\n", + i, irq_status); + periph_interrupt(pa, i, show); + } + } + } + } } static void pmic_arb_chained_irq(struct irq_desc *desc) -- GitLab From e0fab6165bea7294fbfd1b451cc291aed185bc41 Mon Sep 17 00:00:00 2001 From: Hemant Gupta Date: Fri, 23 Jun 2017 20:51:47 +0530 Subject: [PATCH 344/786] defconfig: sdm845: Add support for BT uhid drivers Add support for BT uhid drivers CRs-Fixed: 553571 Signed-off-by: Hemant Gupta Change-Id: If56742a9c6b6c4ef774da6e83d57aee56bf28842 --- arch/arm64/configs/sdm845-perf_defconfig | 3 +++ arch/arm64/configs/sdm845_defconfig | 3 +++ 2 files changed, 6 insertions(+) diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig index 8a5b17d3b9fe..5a096d65f54c 100644 --- a/arch/arm64/configs/sdm845-perf_defconfig +++ b/arch/arm64/configs/sdm845-perf_defconfig @@ -368,7 +368,10 @@ CONFIG_SND_SOC=y CONFIG_SND_SOC_SDM845=y CONFIG_UHID=y CONFIG_HID_APPLE=y +CONFIG_HID_ELECOM=y +CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=y +CONFIG_HID_MULTITOUCH=y CONFIG_HID_PLANTRONICS=y CONFIG_USB=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig index e70963ae464c..65b6bd800d68 100644 --- a/arch/arm64/configs/sdm845_defconfig +++ b/arch/arm64/configs/sdm845_defconfig @@ -375,7 +375,10 @@ CONFIG_SND_SOC=y CONFIG_SND_SOC_SDM845=y CONFIG_UHID=y CONFIG_HID_APPLE=y +CONFIG_HID_ELECOM=y +CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=y +CONFIG_HID_MULTITOUCH=y CONFIG_HID_PLANTRONICS=y CONFIG_USB=y CONFIG_USB_XHCI_HCD=y -- GitLab From 26f9e8b2f5fc3ebea8b9004ddc307f251fd7303d Mon Sep 17 00:00:00 2001 From: Hemant Gupta Date: Wed, 28 Sep 2016 11:19:03 +0530 Subject: [PATCH 345/786] Bluetooth: HID: Add Bus type for specific HID Keyboard Add Bluetooth bus type for product id = 0x05ac and vendor id = 0x0220, so that it can be handled by specific vendor driver to parse specific key events which cannot be handled by generic USB HID driver(s) of kernel. CRs-Fixed: 1072093 Change-Id: I68b585db1b350c7ffd8ea662cab550aaa7a0727d Signed-off-by: Hemant Gupta --- drivers/hid/hid-apple.c | 3 +++ drivers/hid/hid-core.c | 2 ++ 2 files changed, 5 insertions(+) diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 2e046082210f..cb2e85c69d7d 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -440,6 +440,9 @@ static const struct hid_device_id apple_devices[] = { .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI), .driver_data = APPLE_HAS_FN }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, + USB_DEVICE_ID_APPLE_ALU_ANSI), + .driver_data = APPLE_HAS_FN }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO), .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS), diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index a5dd7e63ada3..9e82e5e14709 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1791,6 +1791,8 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_MINI_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ANSI) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, + USB_DEVICE_ID_APPLE_ALU_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) }, -- GitLab From dd670e09a4eb4edddf7452e4a0e7271d38272170 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 27 Jun 2017 10:30:33 +0200 Subject: [PATCH 346/786] Revert "ANDROID: kernel/watchdog: fix unused variable warning" This reverts commit b5ea92ffa88e9d4d6ba8fc2da32be942ed87ce88. We need to do this for some merge issues with 4.9.32, the commit will be put back later... Cc: Brian Norris Signed-off-by: Greg Kroah-Hartman --- kernel/watchdog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/watchdog.c b/kernel/watchdog.c index ea6271bed784..1970037b6029 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -120,7 +120,7 @@ static unsigned long soft_lockup_nmi_warn; #ifdef CONFIG_HARDLOCKUP_DETECTOR unsigned int __read_mostly hardlockup_panic = CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; -static unsigned long __maybe_unused hardlockup_allcpu_dumped; +static unsigned long hardlockup_allcpu_dumped; /* * We may not want to enable hard lockup detection by default in all cases, * for example when running the kernel as a guest on a hypervisor. In these -- GitLab From a26cd8eba6b580b15f27193637519b33b08e05ea Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 27 Jun 2017 10:31:16 +0200 Subject: [PATCH 347/786] Revert "ANDROID: hardlockup: detect hard lockups without NMIs using secondary cpus" This reverts commit aaf78ec6c898b72f756bdb7d9ea43c5c15b8ae18. We need to do this to handle a merge conflict with 4.9.32. It will be forward ported after the merge is complete. Cc: Brian Norris Signed-off-by: Greg Kroah-Hartman --- include/linux/nmi.h | 5 +- kernel/watchdog.c | 123 ++------------------------------------------ lib/Kconfig.debug | 14 +---- 3 files changed, 7 insertions(+), 135 deletions(-) diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 780949dff0ed..a78c35cff1ae 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -14,11 +14,8 @@ * may be used to reset the timeout - for code which intentionally * disables interrupts for a long time. This call is stateless. */ -#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR_NMI) -#include -#endif - #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) +#include extern void touch_nmi_watchdog(void); #else static inline void touch_nmi_watchdog(void) diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 1970037b6029..6d1020c03d41 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -104,11 +104,6 @@ static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved); static DEFINE_PER_CPU(bool, hard_watchdog_warn); static DEFINE_PER_CPU(bool, watchdog_nmi_touch); static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); -#endif -#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU -static cpumask_t __read_mostly watchdog_cpus; -#endif -#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); #endif static unsigned long soft_lockup_nmi_warn; @@ -292,7 +287,7 @@ void touch_softlockup_watchdog_sync(void) __this_cpu_write(watchdog_touch_ts, 0); } -#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI +#ifdef CONFIG_HARDLOCKUP_DETECTOR /* watchdog detector functions */ static bool is_hardlockup(void) { @@ -306,76 +301,6 @@ static bool is_hardlockup(void) } #endif -#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU -static unsigned int watchdog_next_cpu(unsigned int cpu) -{ - cpumask_t cpus = watchdog_cpus; - unsigned int next_cpu; - - next_cpu = cpumask_next(cpu, &cpus); - if (next_cpu >= nr_cpu_ids) - next_cpu = cpumask_first(&cpus); - - if (next_cpu == cpu) - return nr_cpu_ids; - - return next_cpu; -} - -static int is_hardlockup_other_cpu(unsigned int cpu) -{ - unsigned long hrint = per_cpu(hrtimer_interrupts, cpu); - - if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint) - return 1; - - per_cpu(hrtimer_interrupts_saved, cpu) = hrint; - return 0; -} - -static void watchdog_check_hardlockup_other_cpu(void) -{ - unsigned int next_cpu; - - /* - * Test for hardlockups every 3 samples. The sample period is - * watchdog_thresh * 2 / 5, so 3 samples gets us back to slightly over - * watchdog_thresh (over by 20%). - */ - if (__this_cpu_read(hrtimer_interrupts) % 3 != 0) - return; - - /* check for a hardlockup on the next cpu */ - next_cpu = watchdog_next_cpu(smp_processor_id()); - if (next_cpu >= nr_cpu_ids) - return; - - smp_rmb(); - - if (per_cpu(watchdog_nmi_touch, next_cpu) == true) { - per_cpu(watchdog_nmi_touch, next_cpu) = false; - return; - } - - if (is_hardlockup_other_cpu(next_cpu)) { - /* only warn once */ - if (per_cpu(hard_watchdog_warn, next_cpu) == true) - return; - - if (hardlockup_panic) - panic("Watchdog detected hard LOCKUP on cpu %u", next_cpu); - else - WARN(1, "Watchdog detected hard LOCKUP on cpu %u", next_cpu); - - per_cpu(hard_watchdog_warn, next_cpu) = true; - } else { - per_cpu(hard_watchdog_warn, next_cpu) = false; - } -} -#else -static inline void watchdog_check_hardlockup_other_cpu(void) { return; } -#endif - static int is_softlockup(unsigned long touch_ts) { unsigned long now = get_timestamp(); @@ -388,7 +313,7 @@ static int is_softlockup(unsigned long touch_ts) return 0; } -#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI +#ifdef CONFIG_HARDLOCKUP_DETECTOR static struct perf_event_attr wd_hw_attr = { .type = PERF_TYPE_HARDWARE, @@ -450,7 +375,7 @@ static void watchdog_overflow_callback(struct perf_event *event, __this_cpu_write(hard_watchdog_warn, false); return; } -#endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */ +#endif /* CONFIG_HARDLOCKUP_DETECTOR */ static void watchdog_interrupt_count(void) { @@ -474,9 +399,6 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) /* kick the hardlockup detector */ watchdog_interrupt_count(); - /* test for hardlockups on the next cpu */ - watchdog_check_hardlockup_other_cpu(); - /* kick the softlockup detector */ wake_up_process(__this_cpu_read(softlockup_watchdog)); @@ -654,7 +576,7 @@ static void watchdog(unsigned int cpu) watchdog_nmi_disable(cpu); } -#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI +#ifdef CONFIG_HARDLOCKUP_DETECTOR /* * People like the simple clean cpu node info on boot. * Reduce the watchdog noise by only printing messages @@ -752,45 +674,10 @@ static void watchdog_nmi_disable(unsigned int cpu) } } -#else -#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU -static int watchdog_nmi_enable(unsigned int cpu) -{ - /* - * The new cpu will be marked online before the first hrtimer interrupt - * runs on it. If another cpu tests for a hardlockup on the new cpu - * before it has run its first hrtimer, it will get a false positive. - * Touch the watchdog on the new cpu to delay the first check for at - * least 3 sampling periods to guarantee one hrtimer has run on the new - * cpu. - */ - per_cpu(watchdog_nmi_touch, cpu) = true; - smp_wmb(); - cpumask_set_cpu(cpu, &watchdog_cpus); - return 0; -} - -static void watchdog_nmi_disable(unsigned int cpu) -{ - unsigned int next_cpu = watchdog_next_cpu(cpu); - - /* - * Offlining this cpu will cause the cpu before this one to start - * checking the one after this one. If this cpu just finished checking - * the next cpu and updating hrtimer_interrupts_saved, and then the - * previous cpu checks it within one sample period, it will trigger a - * false positive. Touch the watchdog on the next cpu to prevent it. - */ - if (next_cpu < nr_cpu_ids) - per_cpu(watchdog_nmi_touch, next_cpu) = true; - smp_wmb(); - cpumask_clear_cpu(cpu, &watchdog_cpus); -} #else static int watchdog_nmi_enable(unsigned int cpu) { return 0; } static void watchdog_nmi_disable(unsigned int cpu) { return; } -#endif /* CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU */ -#endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */ +#endif /* CONFIG_HARDLOCKUP_DETECTOR */ static struct smp_hotplug_thread watchdog_threads = { .store = &softlockup_watchdog, diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 8afce47c31a1..9109b279e20e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -764,27 +764,15 @@ config LOCKUP_DETECTOR The overhead should be minimal. A periodic hrtimer runs to generate interrupts and kick the watchdog task every 4 seconds. An NMI is generated every 10 seconds or so to check for hardlockups. - If NMIs are not available on the platform, every 12 seconds the - hrtimer interrupt on one cpu will be used to check for hardlockups - on the next cpu. The frequency of hrtimer and NMI events and the soft and hard lockup thresholds can be controlled through the sysctl watchdog_thresh. -config HARDLOCKUP_DETECTOR_NMI +config HARDLOCKUP_DETECTOR def_bool y depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI -config HARDLOCKUP_DETECTOR_OTHER_CPU - def_bool y - depends on LOCKUP_DETECTOR && SMP - depends on !HARDLOCKUP_DETECTOR_NMI && !HAVE_NMI_WATCHDOG - -config HARDLOCKUP_DETECTOR - def_bool y - depends on HARDLOCKUP_DETECTOR_NMI || HARDLOCKUP_DETECTOR_OTHER_CPU - config BOOTPARAM_HARDLOCKUP_PANIC bool "Panic (Reboot) On Hard Lockups" depends on HARDLOCKUP_DETECTOR -- GitLab From fea56da3eaa68e0d66e7f7ab622e7547c105d1ae Mon Sep 17 00:00:00 2001 From: Ajay Agarwal Date: Tue, 30 May 2017 10:27:23 +0530 Subject: [PATCH 348/786] usb: gadget: Bind android devices for all UDC gadgets For targets with multiple UDCs having DRD feature, two UDCs may be in peripheral mode. In that case, create and bind android device corresponding to each gadget as and when mkdir gadget is run. Keep global structure android_device as android0 (or g1's device) for ease of use by f_midi and f_audio_source. Change-Id: Idae6f6d0d8811f27e836f5f6399395a15fbf3c2f Signed-off-by: Ajay Agarwal --- drivers/usb/gadget/configfs.c | 46 +++++++++++++++++++++++------------ 1 file changed, 30 insertions(+), 16 deletions(-) diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 6b2c1379923b..53965576adc1 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -22,6 +22,7 @@ void acc_disconnect(void); static struct class *android_class; static struct device *android_device; static int index; +static int gadget_index; struct device *create_function_device(char *name) { @@ -1425,21 +1426,21 @@ static void android_work(struct work_struct *data) spin_unlock_irqrestore(&cdev->lock, flags); if (status[0]) { - kobject_uevent_env(&android_device->kobj, + kobject_uevent_env(&gi->dev->kobj, KOBJ_CHANGE, connected); pr_info("%s: sent uevent %s\n", __func__, connected[0]); uevent_sent = true; } if (status[1]) { - kobject_uevent_env(&android_device->kobj, + kobject_uevent_env(&gi->dev->kobj, KOBJ_CHANGE, configured); pr_info("%s: sent uevent %s\n", __func__, configured[0]); uevent_sent = true; } if (status[2]) { - kobject_uevent_env(&android_device->kobj, + kobject_uevent_env(&gi->dev->kobj, KOBJ_CHANGE, disconnected); pr_info("%s: sent uevent %s\n", __func__, disconnected[0]); uevent_sent = true; @@ -1600,23 +1601,28 @@ static int android_device_create(struct gadget_info *gi) { struct device_attribute **attrs; struct device_attribute *attr; + char str[10]; INIT_WORK(&gi->work, android_work); - android_device = device_create(android_class, NULL, - MKDEV(0, 0), NULL, "android0"); - if (IS_ERR(android_device)) - return PTR_ERR(android_device); + snprintf(str, sizeof(str), "android%d", gadget_index - 1); + pr_debug("Creating android device %s\n", str); + gi->dev = device_create(android_class, NULL, + MKDEV(0, 0), NULL, str); + if (IS_ERR(gi->dev)) + return PTR_ERR(gi->dev); - dev_set_drvdata(android_device, gi); + dev_set_drvdata(gi->dev, gi); + if (gadget_index == 1) + android_device = gi->dev; attrs = android_usb_attributes; while ((attr = *attrs++)) { int err; - err = device_create_file(android_device, attr); + err = device_create_file(gi->dev, attr); if (err) { - device_destroy(android_device->class, - android_device->devt); + device_destroy(gi->dev->class, + gi->dev->devt); return err; } } @@ -1624,15 +1630,15 @@ static int android_device_create(struct gadget_info *gi) return 0; } -static void android_device_destroy(void) +static void android_device_destroy(struct device *dev) { struct device_attribute **attrs; struct device_attribute *attr; attrs = android_usb_attributes; while ((attr = *attrs++)) - device_remove_file(android_device, attr); - device_destroy(android_device->class, android_device->devt); + device_remove_file(dev, attr); + device_destroy(dev->class, dev->devt); } #else static inline int android_device_create(struct gadget_info *gi) @@ -1640,7 +1646,7 @@ static inline int android_device_create(struct gadget_info *gi) return 0; } -static inline void android_device_destroy(void) +static inline void android_device_destroy(struct device *dev) { } #endif @@ -1696,6 +1702,8 @@ static struct config_group *gadgets_make( if (!gi->composite.gadget_driver.function) goto err; + gadget_index++; + pr_debug("Creating gadget index %d\n", gadget_index); if (android_device_create(gi) < 0) goto err; @@ -1708,8 +1716,14 @@ static struct config_group *gadgets_make( static void gadgets_drop(struct config_group *group, struct config_item *item) { + struct gadget_info *gi; + + gi = container_of(to_config_group(item), struct gadget_info, group); config_item_put(item); - android_device_destroy(); + if (gi->dev) { + android_device_destroy(gi->dev); + gi->dev = NULL; + } } static struct configfs_group_operations gadgets_ops = { -- GitLab From 9481c3ebf4b0b7bf2cec1f39be30140a792238bf Mon Sep 17 00:00:00 2001 From: Olav Haugan Date: Fri, 23 Jun 2017 11:04:21 -0700 Subject: [PATCH 349/786] defconfig: sdm845: Enable stack protector strong Change build from using configuration option stack protector regular to stack protector strong. With stack protector strong more functions will be protected from stack overflow. Change-Id: I7358570557f7543a5b6ebc51736ccc2f860e260a Signed-off-by: Olav Haugan --- arch/arm64/configs/sdm845-perf_defconfig | 2 +- arch/arm64/configs/sdm845_defconfig | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig index 8a5b17d3b9fe..ac7eb50c407a 100644 --- a/arch/arm64/configs/sdm845-perf_defconfig +++ b/arch/arm64/configs/sdm845-perf_defconfig @@ -39,7 +39,7 @@ CONFIG_EMBEDDED=y # CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y -CONFIG_CC_STACKPROTECTOR_REGULAR=y +CONFIG_CC_STACKPROTECTOR_STRONG=y CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig index e70963ae464c..1c01513be91c 100644 --- a/arch/arm64/configs/sdm845_defconfig +++ b/arch/arm64/configs/sdm845_defconfig @@ -42,7 +42,7 @@ CONFIG_BPF_SYSCALL=y CONFIG_EMBEDDED=y # CONFIG_COMPAT_BRK is not set CONFIG_PROFILING=y -CONFIG_CC_STACKPROTECTOR_REGULAR=y +CONFIG_CC_STACKPROTECTOR_STRONG=y CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y -- GitLab From 326dbb8273bab613412f1581c86bf80bf78ae188 Mon Sep 17 00:00:00 2001 From: Michael Adisumarta Date: Tue, 27 Jun 2017 11:29:41 -0700 Subject: [PATCH 350/786] msm: ipa3: Fix fast_replenish race condition. Add spinlocks to protect rx_packet from being contested by the current ctx and the work_queue ctx which causes a race condition that crashes the APPS. Change-Id: I8fb9a894f8523ce39411842eb4963a59b61939ae CRs-Fixed: 2059082 Signed-off-by: Michael Adisumarta --- drivers/platform/msm/ipa/ipa_v3/ipa_dp.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c index 915f2b8de82a..4fb4da8688c9 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c @@ -1836,6 +1836,8 @@ static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys) struct gsi_xfer_elem gsi_xfer_elem_one; u32 curr; + spin_lock_bh(&sys->spinlock); + rx_len_cached = sys->len; curr = atomic_read(&sys->repl.head_idx); @@ -1878,6 +1880,7 @@ static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys) mb(); atomic_set(&sys->repl.head_idx, curr); } + spin_unlock_bh(&sys->spinlock); queue_work(sys->repl_wq, &sys->repl_work); -- GitLab From 083533b1a8b29532e268c3ed6d7a8cee12f9f3d5 Mon Sep 17 00:00:00 2001 From: Banajit Goswami Date: Sat, 13 May 2017 01:32:10 -0700 Subject: [PATCH 351/786] ASoC: wcd: remove unused wcd9330 codec driver WCD9330 codec driver is not used anymore on newer MSM targets. Remove these unused drivers. Change-Id: If96cacbf7100933dd06ee3b98e42c08f36524bbe Signed-off-by: Banajit Goswami --- sound/soc/codecs/Kconfig | 15 +- sound/soc/codecs/Makefile | 5 - sound/soc/codecs/wcd9330-tables.c | 1675 ------ sound/soc/codecs/wcd9330.c | 9113 ----------------------------- sound/soc/codecs/wcd9330.h | 128 - sound/soc/codecs/wcd9xxx-common.c | 1480 ----- sound/soc/codecs/wcd9xxx-common.h | 286 - sound/soc/codecs/wcd9xxx-mbhc.c | 5671 ------------------ sound/soc/codecs/wcd9xxx-mbhc.h | 492 -- sound/soc/codecs/wcd9xxx-resmgr.c | 1099 ---- sound/soc/codecs/wcd9xxx-resmgr.h | 280 - 11 files changed, 2 insertions(+), 20242 deletions(-) delete mode 100644 sound/soc/codecs/wcd9330-tables.c delete mode 100644 sound/soc/codecs/wcd9330.c delete mode 100644 sound/soc/codecs/wcd9330.h delete mode 100644 sound/soc/codecs/wcd9xxx-common.c delete mode 100644 sound/soc/codecs/wcd9xxx-common.h delete mode 100644 sound/soc/codecs/wcd9xxx-mbhc.c delete mode 100644 sound/soc/codecs/wcd9xxx-mbhc.h delete mode 100644 sound/soc/codecs/wcd9xxx-resmgr.c delete mode 100644 sound/soc/codecs/wcd9xxx-resmgr.h diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 17224de33037..158b1ee2bab8 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -878,13 +878,6 @@ config SND_SOC_UDA1380 config SND_SOC_WCD934X_DSD tristate -config SND_SOC_WCD9320 - tristate - -config SND_SOC_WCD9330 - tristate - depends on WCD9330_CODEC - config SND_SOC_WCD9335 tristate depends on WCD9335_CODEC @@ -916,21 +909,17 @@ config SND_SOC_WSA881X_ANALOG tristate select REGMAP_I2C -config SND_SOC_WCD9XXX - tristate - default y if SND_SOC_WCD9320=y || SND_SOC_WCD9330=y || SND_SOC_WCD9335=y - config SND_SOC_WCD9XXX_V2 tristate default y if SND_SOC_WCD9335=y config SND_SOC_WCD_CPE tristate - default y if SND_SOC_WCD9330=y || SND_SOC_WCD9335=y + default y if SND_SOC_WCD9335=y config AUDIO_EXT_CLK tristate - default y if SND_SOC_WCD9335=y || SND_SOC_WCD9330=y || SND_SOC_SDM660_CDC=y + default y if SND_SOC_WCD9335=y || SND_SOC_SDM660_CDC=y config SND_SOC_WCD_MBHC tristate diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile index 8c844605d6ef..96ebd536fc35 100644 --- a/sound/soc/codecs/Makefile +++ b/sound/soc/codecs/Makefile @@ -158,11 +158,8 @@ snd-soc-twl4030-objs := twl4030.o snd-soc-twl6040-objs := twl6040.o snd-soc-uda134x-objs := uda134x.o snd-soc-uda1380-objs := uda1380.o -snd-soc-wcd9320-objs := wcd9320.o wcd9320-tables.o -snd-soc-wcd9330-objs := wcd9330.o wcd9330-tables.o snd-soc-wcd9335-objs := wcd9335.o snd-soc-wcd934x-objs := wcd934x.o -snd-soc-wcd9xxx-objs := wcd9xxx-resmgr.o wcd9xxx-mbhc.o wcd9xxx-common.o wcdcal-hwdep.o snd-soc-wcd9xxx-v2-objs := wcd9xxx-common-v2.o wcd9xxx-resmgr-v2.o wcdcal-hwdep.o ifeq ($(CONFIG_COMMON_CLK_MSM), y) audio-ext-clock-objs := audio-ext-clk.o @@ -407,7 +404,6 @@ obj-$(CONFIG_SND_SOC_TWL6040) += snd-soc-twl6040.o obj-$(CONFIG_SND_SOC_UDA134X) += snd-soc-uda134x.o obj-$(CONFIG_SND_SOC_UDA1380) += snd-soc-uda1380.o obj-$(CONFIG_SND_SOC_WCD9320) += snd-soc-wcd9320.o -obj-$(CONFIG_SND_SOC_WCD9330) += snd-soc-wcd9330.o obj-$(CONFIG_SND_SOC_WCD9335) += snd-soc-wcd9335.o obj-$(CONFIG_SND_SOC_WCD934X) += wcd934x/ ifeq ($(CONFIG_COMMON_CLK_MSM), y) @@ -416,7 +412,6 @@ endif ifeq ($(CONFIG_COMMON_CLK_QCOM), y) obj-$(CONFIG_AUDIO_EXT_CLK) += audio-ext-clock-up.o endif -obj-$(CONFIG_SND_SOC_WCD9XXX) += snd-soc-wcd9xxx.o obj-$(CONFIG_SND_SOC_WCD9XXX_V2) += snd-soc-wcd9xxx-v2.o obj-$(CONFIG_SND_SOC_WCD_CPE) += snd-soc-wcd-cpe.o obj-$(CONFIG_SND_SOC_WCD_MBHC) += snd-soc-wcd-mbhc.o diff --git a/sound/soc/codecs/wcd9330-tables.c b/sound/soc/codecs/wcd9330-tables.c deleted file mode 100644 index 1866fb3cf27e..000000000000 --- a/sound/soc/codecs/wcd9330-tables.c +++ /dev/null @@ -1,1675 +0,0 @@ -/* Copyright (c) 2014, 2017 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include "wcd9330.h" - -const u8 tomtom_reg_readable[WCD9330_MAX_REGISTER + 1] = { - [TOMTOM_A_CHIP_CTL] = 1, - [TOMTOM_A_CHIP_STATUS] = 1, - [TOMTOM_A_CHIP_ID_BYTE_0] = 1, - [TOMTOM_A_CHIP_ID_BYTE_1] = 1, - [TOMTOM_A_CHIP_ID_BYTE_2] = 1, - [TOMTOM_A_CHIP_ID_BYTE_3] = 1, - [TOMTOM_A_CHIP_I2C_SLAVE_ID] = 1, - [TOMTOM_A_SLAVE_ID_1] = 1, - [TOMTOM_A_SLAVE_ID_2] = 1, - [TOMTOM_A_SLAVE_ID_3] = 1, - [TOMTOM_A_PIN_CTL_OE0] = 1, - [TOMTOM_A_PIN_CTL_OE1] = 1, - [TOMTOM_A_PIN_CTL_OE2] = 1, - [TOMTOM_A_PIN_CTL_DATA0] = 1, - [TOMTOM_A_PIN_CTL_DATA1] = 1, - [TOMTOM_A_PIN_CTL_DATA2] = 1, - [TOMTOM_A_HDRIVE_GENERIC] = 1, - [TOMTOM_A_HDRIVE_OVERRIDE] = 1, - [TOMTOM_A_ANA_CSR_WAIT_STATE] = 1, - [TOMTOM_A_PROCESS_MONITOR_CTL0] = 1, - [TOMTOM_A_PROCESS_MONITOR_CTL1] = 1, - [TOMTOM_A_PROCESS_MONITOR_CTL2] = 1, - [TOMTOM_A_PROCESS_MONITOR_CTL3] = 1, - [TOMTOM_A_QFUSE_CTL] = 1, - [TOMTOM_A_QFUSE_STATUS] = 1, - [TOMTOM_A_QFUSE_DATA_OUT0] = 1, - [TOMTOM_A_QFUSE_DATA_OUT1] = 1, - [TOMTOM_A_QFUSE_DATA_OUT2] = 1, - [TOMTOM_A_QFUSE_DATA_OUT3] = 1, - [TOMTOM_A_QFUSE_DATA_OUT4] = 1, - [TOMTOM_A_QFUSE_DATA_OUT5] = 1, - [TOMTOM_A_QFUSE_DATA_OUT6] = 1, - [TOMTOM_A_QFUSE_DATA_OUT7] = 1, - [TOMTOM_A_CDC_CTL] = 1, - [TOMTOM_A_LEAKAGE_CTL] = 1, - [TOMTOM_A_SVASS_MEM_PTR0] = 1, - [TOMTOM_A_SVASS_MEM_PTR1] = 1, - [TOMTOM_A_SVASS_MEM_PTR2] = 1, - [TOMTOM_A_SVASS_MEM_CTL] = 1, - [TOMTOM_A_SVASS_MEM_BANK] = 1, - [TOMTOM_A_DMIC_B1_CTL] = 1, - [TOMTOM_A_DMIC_B2_CTL] = 1, - [TOMTOM_A_SVASS_CLKRST_CTL] = 1, - [TOMTOM_A_SVASS_CPAR_CFG] = 1, - [TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD] = 1, - [TOMTOM_A_SVASS_CPAR_WDOG_CFG] = 1, - [TOMTOM_A_SVASS_CFG] = 1, - [TOMTOM_A_SVASS_SPE_CFG] = 1, - [TOMTOM_A_SVASS_STATUS] = 1, - [TOMTOM_A_SVASS_INT_MASK] = 1, - [TOMTOM_A_SVASS_INT_STATUS] = 1, - [TOMTOM_A_SVASS_INT_CLR] = 0, - [TOMTOM_A_SVASS_DEBUG] = 1, - [TOMTOM_A_SVASS_SPE_BKUP_INT] = 0, - [TOMTOM_A_SVASS_MEM_ACC] = 1, - [TOMTOM_A_MEM_LEAKAGE_CTL] = 1, - [TOMTOM_A_SVASS_SPE_INBOX_TRG] = 0, - [TOMTOM_A_SVASS_SPE_INBOX_0] = 0, - [TOMTOM_A_SVASS_SPE_INBOX_1] = 0, - [TOMTOM_A_SVASS_SPE_INBOX_2] = 0, - [TOMTOM_A_SVASS_SPE_INBOX_3] = 0, - [TOMTOM_A_SVASS_SPE_INBOX_4] = 0, - [TOMTOM_A_SVASS_SPE_INBOX_5] = 0, - [TOMTOM_A_SVASS_SPE_INBOX_6] = 0, - [TOMTOM_A_SVASS_SPE_INBOX_7] = 0, - [TOMTOM_A_SVASS_SPE_INBOX_8] = 0, - [TOMTOM_A_SVASS_SPE_INBOX_9] = 0, - [TOMTOM_A_SVASS_SPE_INBOX_10] = 0, - [TOMTOM_A_SVASS_SPE_INBOX_11] = 0, - [TOMTOM_A_SVASS_SPE_OUTBOX_0] = 1, - [TOMTOM_A_SVASS_SPE_OUTBOX_1] = 1, - [TOMTOM_A_SVASS_SPE_OUTBOX_2] = 1, - [TOMTOM_A_SVASS_SPE_OUTBOX_3] = 1, - [TOMTOM_A_SVASS_SPE_OUTBOX_4] = 1, - [TOMTOM_A_SVASS_SPE_OUTBOX_5] = 1, - [TOMTOM_A_SVASS_SPE_OUTBOX_6] = 1, - [TOMTOM_A_SVASS_SPE_OUTBOX_7] = 1, - [TOMTOM_A_SVASS_SPE_OUTBOX_8] = 1, - [TOMTOM_A_SVASS_SPE_OUTBOX_9] = 1, - [TOMTOM_A_SVASS_SPE_OUTBOX_10] = 1, - [TOMTOM_A_SVASS_SPE_OUTBOX_11] = 1, - [TOMTOM_A_INTR_MODE] = 1, - [TOMTOM_A_INTR1_MASK0] = 1, - [TOMTOM_A_INTR1_MASK1] = 1, - [TOMTOM_A_INTR1_MASK2] = 1, - [TOMTOM_A_INTR1_MASK3] = 1, - [TOMTOM_A_INTR1_STATUS0] = 1, - [TOMTOM_A_INTR1_STATUS1] = 1, - [TOMTOM_A_INTR1_STATUS2] = 1, - [TOMTOM_A_INTR1_STATUS3] = 1, - [TOMTOM_A_INTR1_CLEAR0] = 0, - [TOMTOM_A_INTR1_CLEAR1] = 0, - [TOMTOM_A_INTR1_CLEAR2] = 0, - [TOMTOM_A_INTR1_CLEAR3] = 0, - [TOMTOM_A_INTR1_LEVEL0] = 1, - [TOMTOM_A_INTR1_LEVEL1] = 1, - [TOMTOM_A_INTR1_LEVEL2] = 1, - [TOMTOM_A_INTR1_LEVEL3] = 1, - [TOMTOM_A_INTR1_TEST0] = 1, - [TOMTOM_A_INTR1_TEST1] = 1, - [TOMTOM_A_INTR1_TEST2] = 1, - [TOMTOM_A_INTR1_TEST3] = 1, - [TOMTOM_A_INTR1_SET0] = 1, - [TOMTOM_A_INTR1_SET1] = 1, - [TOMTOM_A_INTR1_SET2] = 1, - [TOMTOM_A_INTR1_SET3] = 1, - [TOMTOM_A_INTR2_MASK0] = 1, - [TOMTOM_A_INTR2_STATUS0] = 1, - [TOMTOM_A_INTR2_CLEAR0] = 0, - [TOMTOM_A_INTR2_LEVEL0] = 1, - [TOMTOM_A_INTR2_TEST0] = 1, - [TOMTOM_A_INTR2_SET0] = 1, - [TOMTOM_A_CDC_TX_I2S_SCK_MODE] = 1, - [TOMTOM_A_CDC_TX_I2S_WS_MODE] = 1, - [TOMTOM_A_CDC_DMIC_DATA0_MODE] = 1, - [TOMTOM_A_CDC_DMIC_CLK0_MODE] = 1, - [TOMTOM_A_CDC_DMIC_DATA1_MODE] = 1, - [TOMTOM_A_CDC_DMIC_CLK1_MODE] = 1, - [TOMTOM_A_CDC_RX_I2S_SCK_MODE] = 1, - [TOMTOM_A_CDC_RX_I2S_WS_MODE] = 1, - [TOMTOM_A_CDC_DMIC_DATA2_MODE] = 1, - [TOMTOM_A_CDC_DMIC_CLK2_MODE] = 1, - [TOMTOM_A_CDC_INTR1_MODE] = 1, - [TOMTOM_A_CDC_SB_NRZ_SEL_MODE] = 1, - [TOMTOM_A_CDC_INTR2_MODE] = 1, - [TOMTOM_A_CDC_RF_PA_ON_MODE] = 1, - [TOMTOM_A_CDC_BOOST_MODE] = 1, - [TOMTOM_A_CDC_JTCK_MODE] = 1, - [TOMTOM_A_CDC_JTDI_MODE] = 1, - [TOMTOM_A_CDC_JTMS_MODE] = 1, - [TOMTOM_A_CDC_JTDO_MODE] = 1, - [TOMTOM_A_CDC_JTRST_MODE] = 1, - [TOMTOM_A_CDC_BIST_MODE_MODE] = 1, - [TOMTOM_A_CDC_MAD_MAIN_CTL_1] = 1, - [TOMTOM_A_CDC_MAD_MAIN_CTL_2] = 1, - [TOMTOM_A_CDC_MAD_AUDIO_CTL_1] = 1, - [TOMTOM_A_CDC_MAD_AUDIO_CTL_2] = 1, - [TOMTOM_A_CDC_MAD_AUDIO_CTL_3] = 1, - [TOMTOM_A_CDC_MAD_AUDIO_CTL_4] = 1, - [TOMTOM_A_CDC_MAD_AUDIO_CTL_5] = 1, - [TOMTOM_A_CDC_MAD_AUDIO_CTL_6] = 1, - [TOMTOM_A_CDC_MAD_AUDIO_CTL_7] = 1, - [TOMTOM_A_CDC_MAD_AUDIO_CTL_8] = 1, - [TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR] = 1, - [TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL] = 1, - [TOMTOM_A_CDC_MAD_ULTR_CTL_1] = 1, - [TOMTOM_A_CDC_MAD_ULTR_CTL_2] = 1, - [TOMTOM_A_CDC_MAD_ULTR_CTL_3] = 1, - [TOMTOM_A_CDC_MAD_ULTR_CTL_4] = 1, - [TOMTOM_A_CDC_MAD_ULTR_CTL_5] = 1, - [TOMTOM_A_CDC_MAD_ULTR_CTL_6] = 1, - [TOMTOM_A_CDC_MAD_ULTR_CTL_7] = 1, - [TOMTOM_A_CDC_MAD_BEACON_CTL_1] = 1, - [TOMTOM_A_CDC_MAD_BEACON_CTL_2] = 1, - [TOMTOM_A_CDC_MAD_BEACON_CTL_3] = 1, - [TOMTOM_A_CDC_MAD_BEACON_CTL_4] = 1, - [TOMTOM_A_CDC_MAD_BEACON_CTL_5] = 1, - [TOMTOM_A_CDC_MAD_BEACON_CTL_6] = 1, - [TOMTOM_A_CDC_MAD_BEACON_CTL_7] = 1, - [TOMTOM_A_CDC_MAD_BEACON_CTL_8] = 1, - [TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR] = 1, - [TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL] = 1, - [TOMTOM_A_CDC_MAD_INP_SEL] = 1, - [TOMTOM_A_BIAS_REF_CTL] = 1, - [TOMTOM_A_BIAS_CENTRAL_BG_CTL] = 1, - [TOMTOM_A_BIAS_PRECHRG_CTL] = 1, - [TOMTOM_A_BIAS_CURR_CTL_1] = 1, - [TOMTOM_A_BIAS_CURR_CTL_2] = 1, - [TOMTOM_A_BIAS_OSC_BG_CTL] = 1, - [TOMTOM_A_CLK_BUFF_EN1] = 1, - [TOMTOM_A_CLK_BUFF_EN2] = 1, - [TOMTOM_A_LDO_L_MODE_1] = 1, - [TOMTOM_A_LDO_L_MODE_2] = 1, - [TOMTOM_A_LDO_L_CTRL_1] = 1, - [TOMTOM_A_LDO_L_CTRL_2] = 1, - [TOMTOM_A_LDO_L_CTRL_3] = 1, - [TOMTOM_A_LDO_L_CTRL_4] = 1, - [TOMTOM_A_LDO_H_MODE_1] = 1, - [TOMTOM_A_LDO_H_MODE_2] = 1, - [TOMTOM_A_LDO_H_LOOP_CTL] = 1, - [TOMTOM_A_LDO_H_COMP_1] = 1, - [TOMTOM_A_LDO_H_COMP_2] = 1, - [TOMTOM_A_LDO_H_BIAS_1] = 1, - [TOMTOM_A_LDO_H_BIAS_2] = 1, - [TOMTOM_A_LDO_H_BIAS_3] = 1, - [TOMTOM_A_VBAT_CLK] = 1, - [TOMTOM_A_VBAT_LOOP] = 1, - [TOMTOM_A_VBAT_REF] = 1, - [TOMTOM_A_VBAT_ADC_TEST] = 1, - [TOMTOM_A_VBAT_FE] = 1, - [TOMTOM_A_VBAT_BIAS_1] = 1, - [TOMTOM_A_VBAT_BIAS_2] = 1, - [TOMTOM_A_VBAT_ADC_DATA_MSB] = 1, - [TOMTOM_A_VBAT_ADC_DATA_LSB] = 1, - [TOMTOM_A_FLL_NREF] = 1, - [TOMTOM_A_FLL_KDCO_TUNE] = 1, - [TOMTOM_A_FLL_LOCK_THRESH] = 1, - [TOMTOM_A_FLL_LOCK_DET_COUNT] = 1, - [TOMTOM_A_FLL_DAC_THRESHOLD] = 1, - [TOMTOM_A_FLL_TEST_DCO_FREERUN] = 1, - [TOMTOM_A_FLL_TEST_ENABLE] = 1, - [TOMTOM_A_MICB_CFILT_1_CTL] = 1, - [TOMTOM_A_MICB_CFILT_1_VAL] = 1, - [TOMTOM_A_MICB_CFILT_1_PRECHRG] = 1, - [TOMTOM_A_MICB_1_CTL] = 1, - [TOMTOM_A_MICB_1_INT_RBIAS] = 1, - [TOMTOM_A_MICB_1_MBHC] = 1, - [TOMTOM_A_MICB_CFILT_2_CTL] = 1, - [TOMTOM_A_MICB_CFILT_2_VAL] = 1, - [TOMTOM_A_MICB_CFILT_2_PRECHRG] = 1, - [TOMTOM_A_MICB_2_CTL] = 1, - [TOMTOM_A_MICB_2_INT_RBIAS] = 1, - [TOMTOM_A_MICB_2_MBHC] = 1, - [TOMTOM_A_MICB_CFILT_3_CTL] = 1, - [TOMTOM_A_MICB_CFILT_3_VAL] = 1, - [TOMTOM_A_MICB_CFILT_3_PRECHRG] = 1, - [TOMTOM_A_MICB_3_CTL] = 1, - [TOMTOM_A_MICB_3_INT_RBIAS] = 1, - [TOMTOM_A_MICB_3_MBHC] = 1, - [TOMTOM_A_MICB_4_CTL] = 1, - [TOMTOM_A_MICB_4_INT_RBIAS] = 1, - [TOMTOM_A_MICB_4_MBHC] = 1, - [TOMTOM_A_SPKR_DRV2_EN] = 1, - [TOMTOM_A_SPKR_DRV2_GAIN] = 1, - [TOMTOM_A_SPKR_DRV2_DAC_CTL] = 1, - [TOMTOM_A_SPKR_DRV2_OCP_CTL] = 1, - [TOMTOM_A_SPKR_DRV2_CLIP_DET] = 1, - [TOMTOM_A_SPKR_DRV2_DBG_DAC] = 1, - [TOMTOM_A_SPKR_DRV2_DBG_PA] = 1, - [TOMTOM_A_SPKR_DRV2_DBG_PWRSTG] = 1, - [TOMTOM_A_SPKR_DRV2_BIAS_LDO] = 1, - [TOMTOM_A_SPKR_DRV2_BIAS_INT] = 1, - [TOMTOM_A_SPKR_DRV2_BIAS_PA] = 1, - [TOMTOM_A_SPKR_DRV2_STATUS_OCP] = 1, - [TOMTOM_A_SPKR_DRV2_STATUS_PA] = 1, - [TOMTOM_A_MBHC_INSERT_DETECT] = 1, - [TOMTOM_A_MBHC_INSERT_DET_STATUS] = 1, - [TOMTOM_A_TX_COM_BIAS] = 1, - [TOMTOM_A_MBHC_INSERT_DETECT2] = 1, - [TOMTOM_A_MBHC_SCALING_MUX_1] = 1, - [TOMTOM_A_MBHC_SCALING_MUX_2] = 1, - [TOMTOM_A_MAD_ANA_CTRL] = 1, - [TOMTOM_A_TX_SUP_SWITCH_CTRL_1] = 1, - [TOMTOM_A_TX_SUP_SWITCH_CTRL_2] = 1, - [TOMTOM_A_TX_1_GAIN] = 1, - [TOMTOM_A_TX_1_2_TEST_EN] = 1, - [TOMTOM_A_TX_2_GAIN] = 1, - [TOMTOM_A_TX_1_2_ADC_IB] = 1, - [TOMTOM_A_TX_1_2_ATEST_REFCTRL] = 1, - [TOMTOM_A_TX_1_2_TEST_CTL] = 1, - [TOMTOM_A_TX_1_2_TEST_BLOCK_EN] = 1, - [TOMTOM_A_TX_1_2_TXFE_CLKDIV] = 1, - [TOMTOM_A_TX_1_2_SAR_ERR_CH1] = 1, - [TOMTOM_A_TX_1_2_SAR_ERR_CH2] = 1, - [TOMTOM_A_TX_3_GAIN] = 1, - [TOMTOM_A_TX_3_4_TEST_EN] = 1, - [TOMTOM_A_TX_4_GAIN] = 1, - [TOMTOM_A_TX_3_4_ADC_IB] = 1, - [TOMTOM_A_TX_3_4_ATEST_REFCTRL] = 1, - [TOMTOM_A_TX_3_4_TEST_CTL] = 1, - [TOMTOM_A_TX_3_4_TEST_BLOCK_EN] = 1, - [TOMTOM_A_TX_3_4_TXFE_CKDIV] = 1, - [TOMTOM_A_TX_3_4_SAR_ERR_CH3] = 1, - [TOMTOM_A_TX_3_4_SAR_ERR_CH4] = 1, - [TOMTOM_A_TX_5_GAIN] = 1, - [TOMTOM_A_TX_5_6_TEST_EN] = 1, - [TOMTOM_A_TX_6_GAIN] = 1, - [TOMTOM_A_TX_5_6_ADC_IB] = 1, - [TOMTOM_A_TX_5_6_ATEST_REFCTRL] = 1, - [TOMTOM_A_TX_5_6_TEST_CTL] = 1, - [TOMTOM_A_TX_5_6_TEST_BLOCK_EN] = 1, - [TOMTOM_A_TX_5_6_TXFE_CKDIV] = 1, - [TOMTOM_A_TX_5_6_SAR_ERR_CH5] = 1, - [TOMTOM_A_TX_5_6_SAR_ERR_CH6] = 1, - [TOMTOM_A_TX_7_MBHC_EN] = 1, - [TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL] = 1, - [TOMTOM_A_TX_7_MBHC_ADC] = 1, - [TOMTOM_A_TX_7_MBHC_TEST_CTL] = 1, - [TOMTOM_A_TX_7_MBHC_SAR_ERR] = 1, - [TOMTOM_A_TX_7_TXFE_CLKDIV] = 1, - [TOMTOM_A_RCO_CTRL] = 1, - [TOMTOM_A_RCO_CALIBRATION_CTRL1] = 1, - [TOMTOM_A_RCO_CALIBRATION_CTRL2] = 1, - [TOMTOM_A_RCO_CALIBRATION_CTRL3] = 1, - [TOMTOM_A_RCO_TEST_CTRL] = 1, - [TOMTOM_A_RCO_CALIBRATION_RESULT1] = 1, - [TOMTOM_A_RCO_CALIBRATION_RESULT2] = 1, - [TOMTOM_A_BUCK_MODE_1] = 1, - [TOMTOM_A_BUCK_MODE_2] = 1, - [TOMTOM_A_BUCK_MODE_3] = 1, - [TOMTOM_A_BUCK_MODE_4] = 1, - [TOMTOM_A_BUCK_MODE_5] = 1, - [TOMTOM_A_BUCK_CTRL_VCL_1] = 1, - [TOMTOM_A_BUCK_CTRL_VCL_2] = 1, - [TOMTOM_A_BUCK_CTRL_VCL_3] = 1, - [TOMTOM_A_BUCK_CTRL_CCL_1] = 1, - [TOMTOM_A_BUCK_CTRL_CCL_2] = 1, - [TOMTOM_A_BUCK_CTRL_CCL_3] = 1, - [TOMTOM_A_BUCK_CTRL_CCL_4] = 1, - [TOMTOM_A_BUCK_CTRL_PWM_DRVR_1] = 1, - [TOMTOM_A_BUCK_CTRL_PWM_DRVR_2] = 1, - [TOMTOM_A_BUCK_CTRL_PWM_DRVR_3] = 1, - [TOMTOM_A_BUCK_TMUX_A_D] = 1, - [TOMTOM_A_NCP_BUCKREF] = 1, - [TOMTOM_A_NCP_EN] = 1, - [TOMTOM_A_NCP_CLK] = 1, - [TOMTOM_A_NCP_STATIC] = 1, - [TOMTOM_A_NCP_VTH_LOW] = 1, - [TOMTOM_A_NCP_VTH_HIGH] = 1, - [TOMTOM_A_NCP_ATEST] = 1, - [TOMTOM_A_NCP_DTEST] = 1, - [TOMTOM_A_NCP_DLY1] = 1, - [TOMTOM_A_NCP_DLY2] = 1, - [TOMTOM_A_RX_AUX_SW_CTL] = 1, - [TOMTOM_A_RX_PA_AUX_IN_CONN] = 1, - [TOMTOM_A_RX_COM_TIMER_DIV] = 1, - [TOMTOM_A_RX_COM_OCP_CTL] = 1, - [TOMTOM_A_RX_COM_OCP_COUNT] = 1, - [TOMTOM_A_RX_COM_DAC_CTL] = 1, - [TOMTOM_A_RX_COM_BIAS] = 1, - [TOMTOM_A_RX_HPH_AUTO_CHOP] = 1, - [TOMTOM_A_RX_HPH_CHOP_CTL] = 1, - [TOMTOM_A_RX_HPH_BIAS_PA] = 1, - [TOMTOM_A_RX_HPH_BIAS_LDO] = 1, - [TOMTOM_A_RX_HPH_BIAS_CNP] = 1, - [TOMTOM_A_RX_HPH_BIAS_WG_OCP] = 1, - [TOMTOM_A_RX_HPH_OCP_CTL] = 1, - [TOMTOM_A_RX_HPH_CNP_EN] = 1, - [TOMTOM_A_RX_HPH_CNP_WG_CTL] = 1, - [TOMTOM_A_RX_HPH_CNP_WG_TIME] = 1, - [TOMTOM_A_RX_HPH_L_GAIN] = 1, - [TOMTOM_A_RX_HPH_L_TEST] = 1, - [TOMTOM_A_RX_HPH_L_PA_CTL] = 1, - [TOMTOM_A_RX_HPH_L_DAC_CTL] = 1, - [TOMTOM_A_RX_HPH_L_ATEST] = 1, - [TOMTOM_A_RX_HPH_L_STATUS] = 1, - [TOMTOM_A_RX_HPH_R_GAIN] = 1, - [TOMTOM_A_RX_HPH_R_TEST] = 1, - [TOMTOM_A_RX_HPH_R_PA_CTL] = 1, - [TOMTOM_A_RX_HPH_R_DAC_CTL] = 1, - [TOMTOM_A_RX_HPH_R_ATEST] = 1, - [TOMTOM_A_RX_HPH_R_STATUS] = 1, - [TOMTOM_A_RX_EAR_BIAS_PA] = 1, - [TOMTOM_A_RX_EAR_BIAS_CMBUFF] = 1, - [TOMTOM_A_RX_EAR_EN] = 1, - [TOMTOM_A_RX_EAR_GAIN] = 1, - [TOMTOM_A_RX_EAR_CMBUFF] = 1, - [TOMTOM_A_RX_EAR_ICTL] = 1, - [TOMTOM_A_RX_EAR_CCOMP] = 1, - [TOMTOM_A_RX_EAR_VCM] = 1, - [TOMTOM_A_RX_EAR_CNP] = 1, - [TOMTOM_A_RX_EAR_DAC_CTL_ATEST] = 1, - [TOMTOM_A_RX_EAR_STATUS] = 1, - [TOMTOM_A_RX_LINE_BIAS_PA] = 1, - [TOMTOM_A_RX_BUCK_BIAS1] = 1, - [TOMTOM_A_RX_BUCK_BIAS2] = 1, - [TOMTOM_A_RX_LINE_COM] = 1, - [TOMTOM_A_RX_LINE_CNP_EN] = 1, - [TOMTOM_A_RX_LINE_CNP_WG_CTL] = 1, - [TOMTOM_A_RX_LINE_CNP_WG_TIME] = 1, - [TOMTOM_A_RX_LINE_1_GAIN] = 1, - [TOMTOM_A_RX_LINE_1_TEST] = 1, - [TOMTOM_A_RX_LINE_1_DAC_CTL] = 1, - [TOMTOM_A_RX_LINE_1_STATUS] = 1, - [TOMTOM_A_RX_LINE_2_GAIN] = 1, - [TOMTOM_A_RX_LINE_2_TEST] = 1, - [TOMTOM_A_RX_LINE_2_DAC_CTL] = 1, - [TOMTOM_A_RX_LINE_2_STATUS] = 1, - [TOMTOM_A_RX_LINE_3_GAIN] = 1, - [TOMTOM_A_RX_LINE_3_TEST] = 1, - [TOMTOM_A_RX_LINE_3_DAC_CTL] = 1, - [TOMTOM_A_RX_LINE_3_STATUS] = 1, - [TOMTOM_A_RX_LINE_4_GAIN] = 1, - [TOMTOM_A_RX_LINE_4_TEST] = 1, - [TOMTOM_A_RX_LINE_4_DAC_CTL] = 1, - [TOMTOM_A_RX_LINE_4_STATUS] = 1, - [TOMTOM_A_RX_LINE_CNP_DBG] = 1, - [TOMTOM_A_SPKR_DRV1_EN] = 1, - [TOMTOM_A_SPKR_DRV1_GAIN] = 1, - [TOMTOM_A_SPKR_DRV1_DAC_CTL] = 1, - [TOMTOM_A_SPKR_DRV1_OCP_CTL] = 1, - [TOMTOM_A_SPKR_DRV1_CLIP_DET] = 1, - [TOMTOM_A_SPKR_DRV1_IEC] = 1, - [TOMTOM_A_SPKR_DRV1_DBG_DAC] = 1, - [TOMTOM_A_SPKR_DRV1_DBG_PA] = 1, - [TOMTOM_A_SPKR_DRV1_DBG_PWRSTG] = 1, - [TOMTOM_A_SPKR_DRV1_BIAS_LDO] = 1, - [TOMTOM_A_SPKR_DRV1_BIAS_INT] = 1, - [TOMTOM_A_SPKR_DRV1_BIAS_PA] = 1, - [TOMTOM_A_SPKR_DRV1_STATUS_OCP] = 1, - [TOMTOM_A_SPKR_DRV1_STATUS_PA] = 1, - [TOMTOM_A_SPKR1_PROT_EN] = 1, - [TOMTOM_A_SPKR1_PROT_ADC_TEST_EN] = 1, - [TOMTOM_A_SPKR1_PROT_ATEST] = 1, - [TOMTOM_A_SPKR1_PROT_LDO_CTRL] = 1, - [TOMTOM_A_SPKR1_PROT_ISENSE_CTRL] = 1, - [TOMTOM_A_SPKR1_PROT_VSENSE_CTRL] = 1, - [TOMTOM_A_SPKR2_PROT_EN] = 1, - [TOMTOM_A_SPKR2_PROT_ADC_TEST_EN] = 1, - [TOMTOM_A_SPKR2_PROT_ATEST] = 1, - [TOMTOM_A_SPKR2_PROT_LDO_CTRL] = 1, - [TOMTOM_A_SPKR2_PROT_ISENSE_CTRL] = 1, - [TOMTOM_A_SPKR2_PROT_VSENSE_CTRL] = 1, - [TOMTOM_A_MBHC_HPH] = 1, - [TOMTOM_A_CDC_ANC1_B1_CTL] = 1, - [TOMTOM_A_CDC_ANC2_B1_CTL] = 1, - [TOMTOM_A_CDC_ANC1_SHIFT] = 1, - [TOMTOM_A_CDC_ANC2_SHIFT] = 1, - [TOMTOM_A_CDC_ANC1_IIR_B1_CTL] = 1, - [TOMTOM_A_CDC_ANC2_IIR_B1_CTL] = 1, - [TOMTOM_A_CDC_ANC1_IIR_B2_CTL] = 1, - [TOMTOM_A_CDC_ANC2_IIR_B2_CTL] = 1, - [TOMTOM_A_CDC_ANC1_IIR_B3_CTL] = 1, - [TOMTOM_A_CDC_ANC2_IIR_B3_CTL] = 1, - [TOMTOM_A_CDC_ANC1_LPF_B1_CTL] = 1, - [TOMTOM_A_CDC_ANC2_LPF_B1_CTL] = 1, - [TOMTOM_A_CDC_ANC1_LPF_B2_CTL] = 1, - [TOMTOM_A_CDC_ANC2_LPF_B2_CTL] = 1, - [TOMTOM_A_CDC_ANC1_SPARE] = 1, - [TOMTOM_A_CDC_ANC2_SPARE] = 1, - [TOMTOM_A_CDC_ANC1_SMLPF_CTL] = 1, - [TOMTOM_A_CDC_ANC2_SMLPF_CTL] = 1, - [TOMTOM_A_CDC_ANC1_DCFLT_CTL] = 1, - [TOMTOM_A_CDC_ANC2_DCFLT_CTL] = 1, - [TOMTOM_A_CDC_ANC1_GAIN_CTL] = 1, - [TOMTOM_A_CDC_ANC2_GAIN_CTL] = 1, - [TOMTOM_A_CDC_ANC1_B2_CTL] = 1, - [TOMTOM_A_CDC_ANC2_B2_CTL] = 1, - [TOMTOM_A_CDC_TX1_VOL_CTL_TIMER] = 1, - [TOMTOM_A_CDC_TX2_VOL_CTL_TIMER] = 1, - [TOMTOM_A_CDC_TX3_VOL_CTL_TIMER] = 1, - [TOMTOM_A_CDC_TX4_VOL_CTL_TIMER] = 1, - [TOMTOM_A_CDC_TX5_VOL_CTL_TIMER] = 1, - [TOMTOM_A_CDC_TX6_VOL_CTL_TIMER] = 1, - [TOMTOM_A_CDC_TX7_VOL_CTL_TIMER] = 1, - [TOMTOM_A_CDC_TX8_VOL_CTL_TIMER] = 1, - [TOMTOM_A_CDC_TX9_VOL_CTL_TIMER] = 1, - [TOMTOM_A_CDC_TX10_VOL_CTL_TIMER] = 1, - [TOMTOM_A_CDC_TX1_VOL_CTL_GAIN] = 1, - [TOMTOM_A_CDC_TX2_VOL_CTL_GAIN] = 1, - [TOMTOM_A_CDC_TX3_VOL_CTL_GAIN] = 1, - [TOMTOM_A_CDC_TX4_VOL_CTL_GAIN] = 1, - [TOMTOM_A_CDC_TX5_VOL_CTL_GAIN] = 1, - [TOMTOM_A_CDC_TX6_VOL_CTL_GAIN] = 1, - [TOMTOM_A_CDC_TX7_VOL_CTL_GAIN] = 1, - [TOMTOM_A_CDC_TX8_VOL_CTL_GAIN] = 1, - [TOMTOM_A_CDC_TX9_VOL_CTL_GAIN] = 1, - [TOMTOM_A_CDC_TX10_VOL_CTL_GAIN] = 1, - [TOMTOM_A_CDC_TX1_VOL_CTL_CFG] = 1, - [TOMTOM_A_CDC_TX2_VOL_CTL_CFG] = 1, - [TOMTOM_A_CDC_TX3_VOL_CTL_CFG] = 1, - [TOMTOM_A_CDC_TX4_VOL_CTL_CFG] = 1, - [TOMTOM_A_CDC_TX5_VOL_CTL_CFG] = 1, - [TOMTOM_A_CDC_TX6_VOL_CTL_CFG] = 1, - [TOMTOM_A_CDC_TX7_VOL_CTL_CFG] = 1, - [TOMTOM_A_CDC_TX8_VOL_CTL_CFG] = 1, - [TOMTOM_A_CDC_TX9_VOL_CTL_CFG] = 1, - [TOMTOM_A_CDC_TX10_VOL_CTL_CFG] = 1, - [TOMTOM_A_CDC_TX1_MUX_CTL] = 1, - [TOMTOM_A_CDC_TX2_MUX_CTL] = 1, - [TOMTOM_A_CDC_TX3_MUX_CTL] = 1, - [TOMTOM_A_CDC_TX4_MUX_CTL] = 1, - [TOMTOM_A_CDC_TX5_MUX_CTL] = 1, - [TOMTOM_A_CDC_TX6_MUX_CTL] = 1, - [TOMTOM_A_CDC_TX7_MUX_CTL] = 1, - [TOMTOM_A_CDC_TX8_MUX_CTL] = 1, - [TOMTOM_A_CDC_TX9_MUX_CTL] = 1, - [TOMTOM_A_CDC_TX10_MUX_CTL] = 1, - [TOMTOM_A_CDC_TX1_CLK_FS_CTL] = 1, - [TOMTOM_A_CDC_TX2_CLK_FS_CTL] = 1, - [TOMTOM_A_CDC_TX3_CLK_FS_CTL] = 1, - [TOMTOM_A_CDC_TX4_CLK_FS_CTL] = 1, - [TOMTOM_A_CDC_TX5_CLK_FS_CTL] = 1, - [TOMTOM_A_CDC_TX6_CLK_FS_CTL] = 1, - [TOMTOM_A_CDC_TX7_CLK_FS_CTL] = 1, - [TOMTOM_A_CDC_TX8_CLK_FS_CTL] = 1, - [TOMTOM_A_CDC_TX9_CLK_FS_CTL] = 1, - [TOMTOM_A_CDC_TX10_CLK_FS_CTL] = 1, - [TOMTOM_A_CDC_TX1_DMIC_CTL] = 1, - [TOMTOM_A_CDC_TX2_DMIC_CTL] = 1, - [TOMTOM_A_CDC_TX3_DMIC_CTL] = 1, - [TOMTOM_A_CDC_TX4_DMIC_CTL] = 1, - [TOMTOM_A_CDC_TX5_DMIC_CTL] = 1, - [TOMTOM_A_CDC_TX6_DMIC_CTL] = 1, - [TOMTOM_A_CDC_TX7_DMIC_CTL] = 1, - [TOMTOM_A_CDC_TX8_DMIC_CTL] = 1, - [TOMTOM_A_CDC_TX9_DMIC_CTL] = 1, - [TOMTOM_A_CDC_TX10_DMIC_CTL] = 1, - [TOMTOM_A_CDC_SPKR_CLIPDET_VAL0] = 1, - [TOMTOM_A_CDC_SPKR_CLIPDET_VAL1] = 1, - [TOMTOM_A_CDC_SPKR_CLIPDET_VAL2] = 1, - [TOMTOM_A_CDC_SPKR_CLIPDET_VAL3] = 1, - [TOMTOM_A_CDC_SPKR_CLIPDET_VAL4] = 1, - [TOMTOM_A_CDC_SPKR_CLIPDET_VAL5] = 1, - [TOMTOM_A_CDC_SPKR_CLIPDET_VAL6] = 1, - [TOMTOM_A_CDC_SPKR_CLIPDET_VAL7] = 1, - [TOMTOM_A_CDC_DEBUG_B1_CTL] = 1, - [TOMTOM_A_CDC_DEBUG_B2_CTL] = 1, - [TOMTOM_A_CDC_DEBUG_B3_CTL] = 1, - [TOMTOM_A_CDC_DEBUG_B4_CTL] = 1, - [TOMTOM_A_CDC_DEBUG_B5_CTL] = 1, - [TOMTOM_A_CDC_DEBUG_B6_CTL] = 1, - [TOMTOM_A_CDC_DEBUG_B7_CTL] = 1, - [TOMTOM_A_CDC_SRC1_PDA_CFG] = 1, - [TOMTOM_A_CDC_SRC2_PDA_CFG] = 1, - [TOMTOM_A_CDC_SRC1_FS_CTL] = 1, - [TOMTOM_A_CDC_SRC2_FS_CTL] = 1, - [TOMTOM_A_CDC_RX1_B1_CTL] = 1, - [TOMTOM_A_CDC_RX2_B1_CTL] = 1, - [TOMTOM_A_CDC_RX3_B1_CTL] = 1, - [TOMTOM_A_CDC_RX4_B1_CTL] = 1, - [TOMTOM_A_CDC_RX5_B1_CTL] = 1, - [TOMTOM_A_CDC_RX6_B1_CTL] = 1, - [TOMTOM_A_CDC_RX7_B1_CTL] = 1, - [TOMTOM_A_CDC_RX1_B2_CTL] = 1, - [TOMTOM_A_CDC_RX2_B2_CTL] = 1, - [TOMTOM_A_CDC_RX3_B2_CTL] = 1, - [TOMTOM_A_CDC_RX4_B2_CTL] = 1, - [TOMTOM_A_CDC_RX5_B2_CTL] = 1, - [TOMTOM_A_CDC_RX6_B2_CTL] = 1, - [TOMTOM_A_CDC_RX7_B2_CTL] = 1, - [TOMTOM_A_CDC_RX1_B3_CTL] = 1, - [TOMTOM_A_CDC_RX2_B3_CTL] = 1, - [TOMTOM_A_CDC_RX3_B3_CTL] = 1, - [TOMTOM_A_CDC_RX4_B3_CTL] = 1, - [TOMTOM_A_CDC_RX5_B3_CTL] = 1, - [TOMTOM_A_CDC_RX6_B3_CTL] = 1, - [TOMTOM_A_CDC_RX7_B3_CTL] = 1, - [TOMTOM_A_CDC_RX1_B4_CTL] = 1, - [TOMTOM_A_CDC_RX2_B4_CTL] = 1, - [TOMTOM_A_CDC_RX3_B4_CTL] = 1, - [TOMTOM_A_CDC_RX4_B4_CTL] = 1, - [TOMTOM_A_CDC_RX5_B4_CTL] = 1, - [TOMTOM_A_CDC_RX6_B4_CTL] = 1, - [TOMTOM_A_CDC_RX7_B4_CTL] = 1, - [TOMTOM_A_CDC_RX1_B5_CTL] = 1, - [TOMTOM_A_CDC_RX2_B5_CTL] = 1, - [TOMTOM_A_CDC_RX3_B5_CTL] = 1, - [TOMTOM_A_CDC_RX4_B5_CTL] = 1, - [TOMTOM_A_CDC_RX5_B5_CTL] = 1, - [TOMTOM_A_CDC_RX6_B5_CTL] = 1, - [TOMTOM_A_CDC_RX7_B5_CTL] = 1, - [TOMTOM_A_CDC_RX1_B6_CTL] = 1, - [TOMTOM_A_CDC_RX2_B6_CTL] = 1, - [TOMTOM_A_CDC_RX3_B6_CTL] = 1, - [TOMTOM_A_CDC_RX4_B6_CTL] = 1, - [TOMTOM_A_CDC_RX5_B6_CTL] = 1, - [TOMTOM_A_CDC_RX6_B6_CTL] = 1, - [TOMTOM_A_CDC_RX7_B6_CTL] = 1, - [TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL] = 1, - [TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL] = 1, - [TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL] = 1, - [TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL] = 1, - [TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL] = 1, - [TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL] = 1, - [TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL] = 1, - [TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL] = 1, - [TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL] = 1, - [TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL] = 1, - [TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL] = 1, - [TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL] = 1, - [TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL] = 1, - [TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL] = 1, - [TOMTOM_A_CDC_VBAT_CFG] = 1, - [TOMTOM_A_CDC_VBAT_ADC_CAL1] = 1, - [TOMTOM_A_CDC_VBAT_ADC_CAL2] = 1, - [TOMTOM_A_CDC_VBAT_ADC_CAL3] = 1, - [TOMTOM_A_CDC_VBAT_PK_EST1] = 1, - [TOMTOM_A_CDC_VBAT_PK_EST2] = 1, - [TOMTOM_A_CDC_VBAT_PK_EST3] = 1, - [TOMTOM_A_CDC_VBAT_RF_PROC1] = 1, - [TOMTOM_A_CDC_VBAT_RF_PROC2] = 1, - [TOMTOM_A_CDC_VBAT_TAC1] = 1, - [TOMTOM_A_CDC_VBAT_TAC2] = 1, - [TOMTOM_A_CDC_VBAT_TAC3] = 1, - [TOMTOM_A_CDC_VBAT_TAC4] = 1, - [TOMTOM_A_CDC_VBAT_GAIN_UPD1] = 1, - [TOMTOM_A_CDC_VBAT_GAIN_UPD2] = 1, - [TOMTOM_A_CDC_VBAT_GAIN_UPD3] = 1, - [TOMTOM_A_CDC_VBAT_GAIN_UPD4] = 1, - [TOMTOM_A_CDC_VBAT_DEBUG1] = 1, - [TOMTOM_A_CDC_VBAT_GAIN_UPD_MON] = 0, - [TOMTOM_A_CDC_VBAT_GAIN_MON_VAL] = 1, - [TOMTOM_A_CDC_CLK_ANC_RESET_CTL] = 1, - [TOMTOM_A_CDC_CLK_RX_RESET_CTL] = 1, - [TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL] = 1, - [TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL] = 1, - [TOMTOM_A_CDC_CLK_RX_I2S_CTL] = 1, - [TOMTOM_A_CDC_CLK_TX_I2S_CTL] = 1, - [TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL] = 1, - [TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL] = 1, - [TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL] = 1, - [TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL] = 1, - [TOMTOM_A_CDC_CLK_OTHR_CTL] = 1, - [TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL] = 1, - [TOMTOM_A_CDC_CLK_RX_B1_CTL] = 1, - [TOMTOM_A_CDC_CLK_RX_B2_CTL] = 1, - [TOMTOM_A_CDC_CLK_MCLK_CTL] = 1, - [TOMTOM_A_CDC_CLK_PDM_CTL] = 1, - [TOMTOM_A_CDC_CLK_SD_CTL] = 1, - [TOMTOM_A_CDC_CLSH_B1_CTL] = 1, - [TOMTOM_A_CDC_CLSH_B2_CTL] = 1, - [TOMTOM_A_CDC_CLSH_B3_CTL] = 1, - [TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS] = 1, - [TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD] = 1, - [TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD] = 1, - [TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD] = 1, - [TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD] = 1, - [TOMTOM_A_CDC_CLSH_K_ADDR] = 1, - [TOMTOM_A_CDC_CLSH_K_DATA] = 1, - [TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L] = 1, - [TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U] = 1, - [TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L] = 1, - [TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U] = 1, - [TOMTOM_A_CDC_CLSH_V_PA_HD_EAR] = 1, - [TOMTOM_A_CDC_CLSH_V_PA_HD_HPH] = 1, - [TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR] = 1, - [TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH] = 1, - [TOMTOM_A_CDC_IIR1_GAIN_B1_CTL] = 1, - [TOMTOM_A_CDC_IIR2_GAIN_B1_CTL] = 1, - [TOMTOM_A_CDC_IIR1_GAIN_B2_CTL] = 1, - [TOMTOM_A_CDC_IIR2_GAIN_B2_CTL] = 1, - [TOMTOM_A_CDC_IIR1_GAIN_B3_CTL] = 1, - [TOMTOM_A_CDC_IIR2_GAIN_B3_CTL] = 1, - [TOMTOM_A_CDC_IIR1_GAIN_B4_CTL] = 1, - [TOMTOM_A_CDC_IIR2_GAIN_B4_CTL] = 1, - [TOMTOM_A_CDC_IIR1_GAIN_B5_CTL] = 1, - [TOMTOM_A_CDC_IIR2_GAIN_B5_CTL] = 1, - [TOMTOM_A_CDC_IIR1_GAIN_B6_CTL] = 1, - [TOMTOM_A_CDC_IIR2_GAIN_B6_CTL] = 1, - [TOMTOM_A_CDC_IIR1_GAIN_B7_CTL] = 1, - [TOMTOM_A_CDC_IIR2_GAIN_B7_CTL] = 1, - [TOMTOM_A_CDC_IIR1_GAIN_B8_CTL] = 1, - [TOMTOM_A_CDC_IIR2_GAIN_B8_CTL] = 1, - [TOMTOM_A_CDC_IIR1_CTL] = 1, - [TOMTOM_A_CDC_IIR2_CTL] = 1, - [TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL] = 1, - [TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL] = 1, - [TOMTOM_A_CDC_IIR1_COEF_B1_CTL] = 1, - [TOMTOM_A_CDC_IIR2_COEF_B1_CTL] = 1, - [TOMTOM_A_CDC_IIR1_COEF_B2_CTL] = 1, - [TOMTOM_A_CDC_IIR2_COEF_B2_CTL] = 1, - [TOMTOM_A_CDC_TOP_GAIN_UPDATE] = 1, - [TOMTOM_A_CDC_PA_RAMP_B1_CTL] = 1, - [TOMTOM_A_CDC_PA_RAMP_B2_CTL] = 1, - [TOMTOM_A_CDC_PA_RAMP_B3_CTL] = 1, - [TOMTOM_A_CDC_PA_RAMP_B4_CTL] = 1, - [TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL] = 1, - [TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL] = 1, - [TOMTOM_A_CDC_COMP0_B1_CTL] = 1, - [TOMTOM_A_CDC_COMP1_B1_CTL] = 1, - [TOMTOM_A_CDC_COMP2_B1_CTL] = 1, - [TOMTOM_A_CDC_COMP0_B2_CTL] = 1, - [TOMTOM_A_CDC_COMP1_B2_CTL] = 1, - [TOMTOM_A_CDC_COMP2_B2_CTL] = 1, - [TOMTOM_A_CDC_COMP0_B3_CTL] = 1, - [TOMTOM_A_CDC_COMP1_B3_CTL] = 1, - [TOMTOM_A_CDC_COMP2_B3_CTL] = 1, - [TOMTOM_A_CDC_COMP0_B4_CTL] = 1, - [TOMTOM_A_CDC_COMP1_B4_CTL] = 1, - [TOMTOM_A_CDC_COMP2_B4_CTL] = 1, - [TOMTOM_A_CDC_COMP0_B5_CTL] = 1, - [TOMTOM_A_CDC_COMP1_B5_CTL] = 1, - [TOMTOM_A_CDC_COMP2_B5_CTL] = 1, - [TOMTOM_A_CDC_COMP0_B6_CTL] = 1, - [TOMTOM_A_CDC_COMP1_B6_CTL] = 1, - [TOMTOM_A_CDC_COMP2_B6_CTL] = 1, - [TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS] = 1, - [TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS] = 1, - [TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS] = 1, - [TOMTOM_A_CDC_COMP0_FS_CFG] = 1, - [TOMTOM_A_CDC_COMP1_FS_CFG] = 1, - [TOMTOM_A_CDC_COMP2_FS_CFG] = 1, - [TOMTOM_A_CDC_CONN_RX1_B1_CTL] = 1, - [TOMTOM_A_CDC_CONN_RX1_B2_CTL] = 1, - [TOMTOM_A_CDC_CONN_RX1_B3_CTL] = 1, - [TOMTOM_A_CDC_CONN_RX2_B1_CTL] = 1, - [TOMTOM_A_CDC_CONN_RX2_B2_CTL] = 1, - [TOMTOM_A_CDC_CONN_RX2_B3_CTL] = 1, - [TOMTOM_A_CDC_CONN_RX3_B1_CTL] = 1, - [TOMTOM_A_CDC_CONN_RX3_B2_CTL] = 1, - [TOMTOM_A_CDC_CONN_RX4_B1_CTL] = 1, - [TOMTOM_A_CDC_CONN_RX4_B2_CTL] = 1, - [TOMTOM_A_CDC_CONN_RX5_B1_CTL] = 1, - [TOMTOM_A_CDC_CONN_RX5_B2_CTL] = 1, - [TOMTOM_A_CDC_CONN_RX6_B1_CTL] = 1, - [TOMTOM_A_CDC_CONN_RX6_B2_CTL] = 1, - [TOMTOM_A_CDC_CONN_RX7_B1_CTL] = 1, - [TOMTOM_A_CDC_CONN_RX7_B2_CTL] = 1, - [TOMTOM_A_CDC_CONN_RX7_B3_CTL] = 1, - [TOMTOM_A_CDC_CONN_ANC_B1_CTL] = 1, - [TOMTOM_A_CDC_CONN_ANC_B2_CTL] = 1, - [TOMTOM_A_CDC_CONN_TX_B1_CTL] = 1, - [TOMTOM_A_CDC_CONN_TX_B2_CTL] = 1, - [TOMTOM_A_CDC_CONN_TX_B3_CTL] = 1, - [TOMTOM_A_CDC_CONN_TX_B4_CTL] = 1, - [TOMTOM_A_CDC_CONN_EQ1_B1_CTL] = 1, - [TOMTOM_A_CDC_CONN_EQ1_B2_CTL] = 1, - [TOMTOM_A_CDC_CONN_EQ1_B3_CTL] = 1, - [TOMTOM_A_CDC_CONN_EQ1_B4_CTL] = 1, - [TOMTOM_A_CDC_CONN_EQ2_B1_CTL] = 1, - [TOMTOM_A_CDC_CONN_EQ2_B2_CTL] = 1, - [TOMTOM_A_CDC_CONN_EQ2_B3_CTL] = 1, - [TOMTOM_A_CDC_CONN_EQ2_B4_CTL] = 1, - [TOMTOM_A_CDC_CONN_SRC1_B1_CTL] = 1, - [TOMTOM_A_CDC_CONN_SRC1_B2_CTL] = 1, - [TOMTOM_A_CDC_CONN_SRC2_B1_CTL] = 1, - [TOMTOM_A_CDC_CONN_SRC2_B2_CTL] = 1, - [TOMTOM_A_CDC_CONN_TX_SB_B1_CTL] = 1, - [TOMTOM_A_CDC_CONN_TX_SB_B2_CTL] = 1, - [TOMTOM_A_CDC_CONN_TX_SB_B3_CTL] = 1, - [TOMTOM_A_CDC_CONN_TX_SB_B4_CTL] = 1, - [TOMTOM_A_CDC_CONN_TX_SB_B5_CTL] = 1, - [TOMTOM_A_CDC_CONN_TX_SB_B6_CTL] = 1, - [TOMTOM_A_CDC_CONN_TX_SB_B7_CTL] = 1, - [TOMTOM_A_CDC_CONN_TX_SB_B8_CTL] = 1, - [TOMTOM_A_CDC_CONN_TX_SB_B9_CTL] = 1, - [TOMTOM_A_CDC_CONN_TX_SB_B10_CTL] = 1, - [TOMTOM_A_CDC_CONN_TX_SB_B11_CTL] = 1, - [TOMTOM_A_CDC_CONN_RX_SB_B1_CTL] = 1, - [TOMTOM_A_CDC_CONN_RX_SB_B2_CTL] = 1, - [TOMTOM_A_CDC_CONN_CLSH_CTL] = 1, - [TOMTOM_A_CDC_CONN_MISC] = 1, - [TOMTOM_A_CDC_CONN_RX8_B1_CTL] = 1, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL] = 1, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST] = 1, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD] = 1, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS] = 1, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK] = 1, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING] = 1, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL] = 1, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST] = 1, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD] = 1, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS] = 1, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK] = 1, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING] = 1, - [TOMTOM_A_CDC_MBHC_EN_CTL] = 1, - [TOMTOM_A_CDC_MBHC_FIR_B1_CFG] = 1, - [TOMTOM_A_CDC_MBHC_FIR_B2_CFG] = 1, - [TOMTOM_A_CDC_MBHC_TIMER_B1_CTL] = 1, - [TOMTOM_A_CDC_MBHC_TIMER_B2_CTL] = 1, - [TOMTOM_A_CDC_MBHC_TIMER_B3_CTL] = 1, - [TOMTOM_A_CDC_MBHC_TIMER_B4_CTL] = 1, - [TOMTOM_A_CDC_MBHC_TIMER_B5_CTL] = 1, - [TOMTOM_A_CDC_MBHC_TIMER_B6_CTL] = 1, - [TOMTOM_A_CDC_MBHC_B1_STATUS] = 1, - [TOMTOM_A_CDC_MBHC_B2_STATUS] = 1, - [TOMTOM_A_CDC_MBHC_B3_STATUS] = 1, - [TOMTOM_A_CDC_MBHC_B4_STATUS] = 1, - [TOMTOM_A_CDC_MBHC_B5_STATUS] = 1, - [TOMTOM_A_CDC_MBHC_B1_CTL] = 1, - [TOMTOM_A_CDC_MBHC_B2_CTL] = 1, - [TOMTOM_A_CDC_MBHC_VOLT_B1_CTL] = 1, - [TOMTOM_A_CDC_MBHC_VOLT_B2_CTL] = 1, - [TOMTOM_A_CDC_MBHC_VOLT_B3_CTL] = 1, - [TOMTOM_A_CDC_MBHC_VOLT_B4_CTL] = 1, - [TOMTOM_A_CDC_MBHC_VOLT_B5_CTL] = 1, - [TOMTOM_A_CDC_MBHC_VOLT_B6_CTL] = 1, - [TOMTOM_A_CDC_MBHC_VOLT_B7_CTL] = 1, - [TOMTOM_A_CDC_MBHC_VOLT_B8_CTL] = 1, - [TOMTOM_A_CDC_MBHC_VOLT_B9_CTL] = 1, - [TOMTOM_A_CDC_MBHC_VOLT_B10_CTL] = 1, - [TOMTOM_A_CDC_MBHC_VOLT_B11_CTL] = 1, - [TOMTOM_A_CDC_MBHC_VOLT_B12_CTL] = 1, - [TOMTOM_A_CDC_MBHC_CLK_CTL] = 1, - [TOMTOM_A_CDC_MBHC_INT_CTL] = 1, - [TOMTOM_A_CDC_MBHC_DEBUG_CTL] = 1, - [TOMTOM_A_CDC_MBHC_SPARE] = 1, - [TOMTOM_A_CDC_RX8_B1_CTL] = 1, - [TOMTOM_A_CDC_RX8_B2_CTL] = 1, - [TOMTOM_A_CDC_RX8_B3_CTL] = 1, - [TOMTOM_A_CDC_RX8_B4_CTL] = 1, - [TOMTOM_A_CDC_RX8_B5_CTL] = 1, - [TOMTOM_A_CDC_RX8_B6_CTL] = 1, - [TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL] = 1, - [TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL] = 1, - [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0] = 1, - [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1] = 1, - [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2] = 1, - [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3] = 1, - [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4] = 1, - [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5] = 1, - [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6] = 1, - [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7] = 1, - [TOMTOM_A_CDC_BOOST_MODE_CTL] = 1, - [TOMTOM_A_CDC_BOOST_THRESHOLD] = 1, - [TOMTOM_A_CDC_BOOST_TAP_SEL] = 1, - [TOMTOM_A_CDC_BOOST_HOLD_TIME] = 1, - [TOMTOM_A_CDC_BOOST_TRGR_EN] = 1, -}; - -const u8 tomtom_reset_reg_defaults[TOMTOM_CACHE_SIZE] = { - [TOMTOM_A_CHIP_CTL] = TOMTOM_A_CHIP_CTL__POR, - [TOMTOM_A_CHIP_STATUS] = TOMTOM_A_CHIP_STATUS__POR, - [TOMTOM_A_CHIP_ID_BYTE_0] = TOMTOM_A_CHIP_ID_BYTE_0__POR, - [TOMTOM_A_CHIP_ID_BYTE_1] = TOMTOM_A_CHIP_ID_BYTE_1__POR, - [TOMTOM_A_CHIP_ID_BYTE_2] = TOMTOM_A_CHIP_ID_BYTE_2__POR, - [TOMTOM_A_CHIP_ID_BYTE_3] = TOMTOM_A_CHIP_ID_BYTE_3__POR, - [TOMTOM_A_CHIP_I2C_SLAVE_ID] = TOMTOM_A_CHIP_I2C_SLAVE_ID__POR, - [TOMTOM_A_SLAVE_ID_1] = TOMTOM_A_SLAVE_ID_1__POR, - [TOMTOM_A_SLAVE_ID_2] = TOMTOM_A_SLAVE_ID_2__POR, - [TOMTOM_A_SLAVE_ID_3] = TOMTOM_A_SLAVE_ID_3__POR, - [TOMTOM_A_PIN_CTL_OE0] = TOMTOM_A_PIN_CTL_OE0__POR, - [TOMTOM_A_PIN_CTL_OE1] = TOMTOM_A_PIN_CTL_OE1__POR, - [TOMTOM_A_PIN_CTL_OE2] = TOMTOM_A_PIN_CTL_OE2__POR, - [TOMTOM_A_PIN_CTL_DATA0] = TOMTOM_A_PIN_CTL_DATA0__POR, - [TOMTOM_A_PIN_CTL_DATA1] = TOMTOM_A_PIN_CTL_DATA1__POR, - [TOMTOM_A_PIN_CTL_DATA2] = TOMTOM_A_PIN_CTL_DATA2__POR, - [TOMTOM_A_HDRIVE_GENERIC] = TOMTOM_A_HDRIVE_GENERIC__POR, - [TOMTOM_A_HDRIVE_OVERRIDE] = TOMTOM_A_HDRIVE_OVERRIDE__POR, - [TOMTOM_A_ANA_CSR_WAIT_STATE] = TOMTOM_A_ANA_CSR_WAIT_STATE__POR, - [TOMTOM_A_PROCESS_MONITOR_CTL0] = TOMTOM_A_PROCESS_MONITOR_CTL0__POR, - [TOMTOM_A_PROCESS_MONITOR_CTL1] = TOMTOM_A_PROCESS_MONITOR_CTL1__POR, - [TOMTOM_A_PROCESS_MONITOR_CTL2] = TOMTOM_A_PROCESS_MONITOR_CTL2__POR, - [TOMTOM_A_PROCESS_MONITOR_CTL3] = TOMTOM_A_PROCESS_MONITOR_CTL3__POR, - [TOMTOM_A_QFUSE_CTL] = TOMTOM_A_QFUSE_CTL__POR, - [TOMTOM_A_QFUSE_STATUS] = TOMTOM_A_QFUSE_STATUS__POR, - [TOMTOM_A_QFUSE_DATA_OUT0] = TOMTOM_A_QFUSE_DATA_OUT0__POR, - [TOMTOM_A_QFUSE_DATA_OUT1] = TOMTOM_A_QFUSE_DATA_OUT1__POR, - [TOMTOM_A_QFUSE_DATA_OUT2] = TOMTOM_A_QFUSE_DATA_OUT2__POR, - [TOMTOM_A_QFUSE_DATA_OUT3] = TOMTOM_A_QFUSE_DATA_OUT3__POR, - [TOMTOM_A_QFUSE_DATA_OUT4] = TOMTOM_A_QFUSE_DATA_OUT4__POR, - [TOMTOM_A_QFUSE_DATA_OUT5] = TOMTOM_A_QFUSE_DATA_OUT5__POR, - [TOMTOM_A_QFUSE_DATA_OUT6] = TOMTOM_A_QFUSE_DATA_OUT6__POR, - [TOMTOM_A_QFUSE_DATA_OUT7] = TOMTOM_A_QFUSE_DATA_OUT7__POR, - [TOMTOM_A_CDC_CTL] = TOMTOM_A_CDC_CTL__POR, - [TOMTOM_A_LEAKAGE_CTL] = TOMTOM_A_LEAKAGE_CTL__POR, - [TOMTOM_A_SVASS_MEM_PTR0] = TOMTOM_A_SVASS_MEM_PTR0__POR, - [TOMTOM_A_SVASS_MEM_PTR1] = TOMTOM_A_SVASS_MEM_PTR1__POR, - [TOMTOM_A_SVASS_MEM_PTR2] = TOMTOM_A_SVASS_MEM_PTR2__POR, - [TOMTOM_A_SVASS_MEM_CTL] = TOMTOM_A_SVASS_MEM_CTL__POR, - [TOMTOM_A_SVASS_MEM_BANK] = TOMTOM_A_SVASS_MEM_BANK__POR, - [TOMTOM_A_DMIC_B1_CTL] = TOMTOM_A_DMIC_B1_CTL__POR, - [TOMTOM_A_DMIC_B2_CTL] = TOMTOM_A_DMIC_B2_CTL__POR, - [TOMTOM_A_SVASS_CLKRST_CTL] = TOMTOM_A_SVASS_CLKRST_CTL__POR, - [TOMTOM_A_SVASS_CPAR_CFG] = TOMTOM_A_SVASS_CPAR_CFG__POR, - [TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD] = - TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD__POR, - [TOMTOM_A_SVASS_CPAR_WDOG_CFG] = TOMTOM_A_SVASS_CPAR_WDOG_CFG__POR, - [TOMTOM_A_SVASS_CFG] = TOMTOM_A_SVASS_CFG__POR, - [TOMTOM_A_SVASS_SPE_CFG] = TOMTOM_A_SVASS_SPE_CFG__POR, - [TOMTOM_A_SVASS_STATUS] = TOMTOM_A_SVASS_STATUS__POR, - [TOMTOM_A_SVASS_INT_MASK] = TOMTOM_A_SVASS_INT_MASK__POR, - [TOMTOM_A_SVASS_INT_STATUS] = TOMTOM_A_SVASS_INT_STATUS__POR, - [TOMTOM_A_SVASS_INT_CLR] = TOMTOM_A_SVASS_INT_CLR__POR, - [TOMTOM_A_SVASS_DEBUG] = TOMTOM_A_SVASS_DEBUG__POR, - [TOMTOM_A_SVASS_SPE_BKUP_INT] = TOMTOM_A_SVASS_SPE_BKUP_INT__POR, - [TOMTOM_A_SVASS_MEM_ACC] = TOMTOM_A_SVASS_MEM_ACC__POR, - [TOMTOM_A_MEM_LEAKAGE_CTL] = TOMTOM_A_MEM_LEAKAGE_CTL__POR, - [TOMTOM_A_SVASS_SPE_INBOX_TRG] = TOMTOM_A_SVASS_SPE_INBOX_TRG__POR, - [TOMTOM_A_SVASS_SPE_INBOX_0] = TOMTOM_A_SVASS_SPE_INBOX_0__POR, - [TOMTOM_A_SVASS_SPE_INBOX_1] = TOMTOM_A_SVASS_SPE_INBOX_1__POR, - [TOMTOM_A_SVASS_SPE_INBOX_2] = TOMTOM_A_SVASS_SPE_INBOX_2__POR, - [TOMTOM_A_SVASS_SPE_INBOX_3] = TOMTOM_A_SVASS_SPE_INBOX_3__POR, - [TOMTOM_A_SVASS_SPE_INBOX_4] = TOMTOM_A_SVASS_SPE_INBOX_4__POR, - [TOMTOM_A_SVASS_SPE_INBOX_5] = TOMTOM_A_SVASS_SPE_INBOX_5__POR, - [TOMTOM_A_SVASS_SPE_INBOX_6] = TOMTOM_A_SVASS_SPE_INBOX_6__POR, - [TOMTOM_A_SVASS_SPE_INBOX_7] = TOMTOM_A_SVASS_SPE_INBOX_7__POR, - [TOMTOM_A_SVASS_SPE_INBOX_8] = TOMTOM_A_SVASS_SPE_INBOX_8__POR, - [TOMTOM_A_SVASS_SPE_INBOX_9] = TOMTOM_A_SVASS_SPE_INBOX_9__POR, - [TOMTOM_A_SVASS_SPE_INBOX_10] = TOMTOM_A_SVASS_SPE_INBOX_10__POR, - [TOMTOM_A_SVASS_SPE_INBOX_11] = TOMTOM_A_SVASS_SPE_INBOX_11__POR, - [TOMTOM_A_SVASS_SPE_OUTBOX_0] = TOMTOM_A_SVASS_SPE_OUTBOX_0__POR, - [TOMTOM_A_SVASS_SPE_OUTBOX_1] = TOMTOM_A_SVASS_SPE_OUTBOX_1__POR, - [TOMTOM_A_SVASS_SPE_OUTBOX_2] = TOMTOM_A_SVASS_SPE_OUTBOX_2__POR, - [TOMTOM_A_SVASS_SPE_OUTBOX_3] = TOMTOM_A_SVASS_SPE_OUTBOX_3__POR, - [TOMTOM_A_SVASS_SPE_OUTBOX_4] = TOMTOM_A_SVASS_SPE_OUTBOX_4__POR, - [TOMTOM_A_SVASS_SPE_OUTBOX_5] = TOMTOM_A_SVASS_SPE_OUTBOX_5__POR, - [TOMTOM_A_SVASS_SPE_OUTBOX_6] = TOMTOM_A_SVASS_SPE_OUTBOX_6__POR, - [TOMTOM_A_SVASS_SPE_OUTBOX_7] = TOMTOM_A_SVASS_SPE_OUTBOX_7__POR, - [TOMTOM_A_SVASS_SPE_OUTBOX_8] = TOMTOM_A_SVASS_SPE_OUTBOX_8__POR, - [TOMTOM_A_SVASS_SPE_OUTBOX_9] = TOMTOM_A_SVASS_SPE_OUTBOX_9__POR, - [TOMTOM_A_SVASS_SPE_OUTBOX_10] = TOMTOM_A_SVASS_SPE_OUTBOX_10__POR, - [TOMTOM_A_SVASS_SPE_OUTBOX_11] = TOMTOM_A_SVASS_SPE_OUTBOX_11__POR, - [TOMTOM_A_INTR_MODE] = TOMTOM_A_INTR_MODE__POR, - [TOMTOM_A_INTR1_MASK0] = TOMTOM_A_INTR1_MASK0__POR, - [TOMTOM_A_INTR1_MASK1] = TOMTOM_A_INTR1_MASK1__POR, - [TOMTOM_A_INTR1_MASK2] = TOMTOM_A_INTR1_MASK2__POR, - [TOMTOM_A_INTR1_MASK3] = TOMTOM_A_INTR1_MASK3__POR, - [TOMTOM_A_INTR1_STATUS0] = TOMTOM_A_INTR1_STATUS0__POR, - [TOMTOM_A_INTR1_STATUS1] = TOMTOM_A_INTR1_STATUS1__POR, - [TOMTOM_A_INTR1_STATUS2] = TOMTOM_A_INTR1_STATUS2__POR, - [TOMTOM_A_INTR1_STATUS3] = TOMTOM_A_INTR1_STATUS3__POR, - [TOMTOM_A_INTR1_CLEAR0] = TOMTOM_A_INTR1_CLEAR0__POR, - [TOMTOM_A_INTR1_CLEAR1] = TOMTOM_A_INTR1_CLEAR1__POR, - [TOMTOM_A_INTR1_CLEAR2] = TOMTOM_A_INTR1_CLEAR2__POR, - [TOMTOM_A_INTR1_CLEAR3] = TOMTOM_A_INTR1_CLEAR3__POR, - [TOMTOM_A_INTR1_LEVEL0] = TOMTOM_A_INTR1_LEVEL0__POR, - [TOMTOM_A_INTR1_LEVEL1] = TOMTOM_A_INTR1_LEVEL1__POR, - [TOMTOM_A_INTR1_LEVEL2] = TOMTOM_A_INTR1_LEVEL2__POR, - [TOMTOM_A_INTR1_LEVEL3] = TOMTOM_A_INTR1_LEVEL3__POR, - [TOMTOM_A_INTR1_TEST0] = TOMTOM_A_INTR1_TEST0__POR, - [TOMTOM_A_INTR1_TEST1] = TOMTOM_A_INTR1_TEST1__POR, - [TOMTOM_A_INTR1_TEST2] = TOMTOM_A_INTR1_TEST2__POR, - [TOMTOM_A_INTR1_TEST3] = TOMTOM_A_INTR1_TEST3__POR, - [TOMTOM_A_INTR1_SET0] = TOMTOM_A_INTR1_SET0__POR, - [TOMTOM_A_INTR1_SET1] = TOMTOM_A_INTR1_SET1__POR, - [TOMTOM_A_INTR1_SET2] = TOMTOM_A_INTR1_SET2__POR, - [TOMTOM_A_INTR1_SET3] = TOMTOM_A_INTR1_SET3__POR, - [TOMTOM_A_INTR2_MASK0] = TOMTOM_A_INTR2_MASK0__POR, - [TOMTOM_A_INTR2_STATUS0] = TOMTOM_A_INTR2_STATUS0__POR, - [TOMTOM_A_INTR2_CLEAR0] = TOMTOM_A_INTR2_CLEAR0__POR, - [TOMTOM_A_INTR2_LEVEL0] = TOMTOM_A_INTR2_LEVEL0__POR, - [TOMTOM_A_INTR2_TEST0] = TOMTOM_A_INTR2_TEST0__POR, - [TOMTOM_A_INTR2_SET0] = TOMTOM_A_INTR2_SET0__POR, - [TOMTOM_A_CDC_TX_I2S_SCK_MODE] = TOMTOM_A_CDC_TX_I2S_SCK_MODE__POR, - [TOMTOM_A_CDC_TX_I2S_WS_MODE] = TOMTOM_A_CDC_TX_I2S_WS_MODE__POR, - [TOMTOM_A_CDC_DMIC_DATA0_MODE] = TOMTOM_A_CDC_DMIC_DATA0_MODE__POR, - [TOMTOM_A_CDC_DMIC_CLK0_MODE] = TOMTOM_A_CDC_DMIC_CLK0_MODE__POR, - [TOMTOM_A_CDC_DMIC_DATA1_MODE] = TOMTOM_A_CDC_DMIC_DATA1_MODE__POR, - [TOMTOM_A_CDC_DMIC_CLK1_MODE] = TOMTOM_A_CDC_DMIC_CLK1_MODE__POR, - [TOMTOM_A_CDC_RX_I2S_SCK_MODE] = TOMTOM_A_CDC_RX_I2S_SCK_MODE__POR, - [TOMTOM_A_CDC_RX_I2S_WS_MODE] = TOMTOM_A_CDC_RX_I2S_WS_MODE__POR, - [TOMTOM_A_CDC_DMIC_DATA2_MODE] = TOMTOM_A_CDC_DMIC_DATA2_MODE__POR, - [TOMTOM_A_CDC_DMIC_CLK2_MODE] = TOMTOM_A_CDC_DMIC_CLK2_MODE__POR, - [TOMTOM_A_CDC_INTR1_MODE] = TOMTOM_A_CDC_INTR1_MODE__POR, - [TOMTOM_A_CDC_SB_NRZ_SEL_MODE] = TOMTOM_A_CDC_SB_NRZ_SEL_MODE__POR, - [TOMTOM_A_CDC_INTR2_MODE] = TOMTOM_A_CDC_INTR2_MODE__POR, - [TOMTOM_A_CDC_RF_PA_ON_MODE] = TOMTOM_A_CDC_RF_PA_ON_MODE__POR, - [TOMTOM_A_CDC_BOOST_MODE] = TOMTOM_A_CDC_BOOST_MODE__POR, - [TOMTOM_A_CDC_JTCK_MODE] = TOMTOM_A_CDC_JTCK_MODE__POR, - [TOMTOM_A_CDC_JTDI_MODE] = TOMTOM_A_CDC_JTDI_MODE__POR, - [TOMTOM_A_CDC_JTMS_MODE] = TOMTOM_A_CDC_JTMS_MODE__POR, - [TOMTOM_A_CDC_JTDO_MODE] = TOMTOM_A_CDC_JTDO_MODE__POR, - [TOMTOM_A_CDC_JTRST_MODE] = TOMTOM_A_CDC_JTRST_MODE__POR, - [TOMTOM_A_CDC_BIST_MODE_MODE] = TOMTOM_A_CDC_BIST_MODE_MODE__POR, - [TOMTOM_A_CDC_MAD_MAIN_CTL_1] = TOMTOM_A_CDC_MAD_MAIN_CTL_1__POR, - [TOMTOM_A_CDC_MAD_MAIN_CTL_2] = TOMTOM_A_CDC_MAD_MAIN_CTL_2__POR, - [TOMTOM_A_CDC_MAD_AUDIO_CTL_1] = TOMTOM_A_CDC_MAD_AUDIO_CTL_1__POR, - [TOMTOM_A_CDC_MAD_AUDIO_CTL_2] = TOMTOM_A_CDC_MAD_AUDIO_CTL_2__POR, - [TOMTOM_A_CDC_MAD_AUDIO_CTL_3] = TOMTOM_A_CDC_MAD_AUDIO_CTL_3__POR, - [TOMTOM_A_CDC_MAD_AUDIO_CTL_4] = TOMTOM_A_CDC_MAD_AUDIO_CTL_4__POR, - [TOMTOM_A_CDC_MAD_AUDIO_CTL_5] = TOMTOM_A_CDC_MAD_AUDIO_CTL_5__POR, - [TOMTOM_A_CDC_MAD_AUDIO_CTL_6] = TOMTOM_A_CDC_MAD_AUDIO_CTL_6__POR, - [TOMTOM_A_CDC_MAD_AUDIO_CTL_7] = TOMTOM_A_CDC_MAD_AUDIO_CTL_7__POR, - [TOMTOM_A_CDC_MAD_AUDIO_CTL_8] = TOMTOM_A_CDC_MAD_AUDIO_CTL_8__POR, - [TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR] = - TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR__POR, - [TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL] = - TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL__POR, - [TOMTOM_A_CDC_MAD_ULTR_CTL_1] = TOMTOM_A_CDC_MAD_ULTR_CTL_1__POR, - [TOMTOM_A_CDC_MAD_ULTR_CTL_2] = TOMTOM_A_CDC_MAD_ULTR_CTL_2__POR, - [TOMTOM_A_CDC_MAD_ULTR_CTL_3] = TOMTOM_A_CDC_MAD_ULTR_CTL_3__POR, - [TOMTOM_A_CDC_MAD_ULTR_CTL_4] = TOMTOM_A_CDC_MAD_ULTR_CTL_4__POR, - [TOMTOM_A_CDC_MAD_ULTR_CTL_5] = TOMTOM_A_CDC_MAD_ULTR_CTL_5__POR, - [TOMTOM_A_CDC_MAD_ULTR_CTL_6] = TOMTOM_A_CDC_MAD_ULTR_CTL_6__POR, - [TOMTOM_A_CDC_MAD_ULTR_CTL_7] = TOMTOM_A_CDC_MAD_ULTR_CTL_7__POR, - [TOMTOM_A_CDC_MAD_BEACON_CTL_1] = TOMTOM_A_CDC_MAD_BEACON_CTL_1__POR, - [TOMTOM_A_CDC_MAD_BEACON_CTL_2] = TOMTOM_A_CDC_MAD_BEACON_CTL_2__POR, - [TOMTOM_A_CDC_MAD_BEACON_CTL_3] = TOMTOM_A_CDC_MAD_BEACON_CTL_3__POR, - [TOMTOM_A_CDC_MAD_BEACON_CTL_4] = TOMTOM_A_CDC_MAD_BEACON_CTL_4__POR, - [TOMTOM_A_CDC_MAD_BEACON_CTL_5] = TOMTOM_A_CDC_MAD_BEACON_CTL_5__POR, - [TOMTOM_A_CDC_MAD_BEACON_CTL_6] = TOMTOM_A_CDC_MAD_BEACON_CTL_6__POR, - [TOMTOM_A_CDC_MAD_BEACON_CTL_7] = TOMTOM_A_CDC_MAD_BEACON_CTL_7__POR, - [TOMTOM_A_CDC_MAD_BEACON_CTL_8] = TOMTOM_A_CDC_MAD_BEACON_CTL_8__POR, - [TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR] = - TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR__POR, - [TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL] = - TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL__POR, - [TOMTOM_A_CDC_MAD_INP_SEL] = TOMTOM_A_CDC_MAD_INP_SEL__POR, - [TOMTOM_A_BIAS_REF_CTL] = TOMTOM_A_BIAS_REF_CTL__POR, - [TOMTOM_A_BIAS_CENTRAL_BG_CTL] = TOMTOM_A_BIAS_CENTRAL_BG_CTL__POR, - [TOMTOM_A_BIAS_PRECHRG_CTL] = TOMTOM_A_BIAS_PRECHRG_CTL__POR, - [TOMTOM_A_BIAS_CURR_CTL_1] = TOMTOM_A_BIAS_CURR_CTL_1__POR, - [TOMTOM_A_BIAS_CURR_CTL_2] = TOMTOM_A_BIAS_CURR_CTL_2__POR, - [TOMTOM_A_BIAS_OSC_BG_CTL] = TOMTOM_A_BIAS_OSC_BG_CTL__POR, - [TOMTOM_A_CLK_BUFF_EN1] = TOMTOM_A_CLK_BUFF_EN1__POR, - [TOMTOM_A_CLK_BUFF_EN2] = TOMTOM_A_CLK_BUFF_EN2__POR, - [TOMTOM_A_LDO_L_MODE_1] = TOMTOM_A_LDO_L_MODE_1__POR, - [TOMTOM_A_LDO_L_MODE_2] = TOMTOM_A_LDO_L_MODE_2__POR, - [TOMTOM_A_LDO_L_CTRL_1] = TOMTOM_A_LDO_L_CTRL_1__POR, - [TOMTOM_A_LDO_L_CTRL_2] = TOMTOM_A_LDO_L_CTRL_2__POR, - [TOMTOM_A_LDO_L_CTRL_3] = TOMTOM_A_LDO_L_CTRL_3__POR, - [TOMTOM_A_LDO_L_CTRL_4] = TOMTOM_A_LDO_L_CTRL_4__POR, - [TOMTOM_A_LDO_H_MODE_1] = TOMTOM_A_LDO_H_MODE_1__POR, - [TOMTOM_A_LDO_H_MODE_2] = TOMTOM_A_LDO_H_MODE_2__POR, - [TOMTOM_A_LDO_H_LOOP_CTL] = TOMTOM_A_LDO_H_LOOP_CTL__POR, - [TOMTOM_A_LDO_H_COMP_1] = TOMTOM_A_LDO_H_COMP_1__POR, - [TOMTOM_A_LDO_H_COMP_2] = TOMTOM_A_LDO_H_COMP_2__POR, - [TOMTOM_A_LDO_H_BIAS_1] = TOMTOM_A_LDO_H_BIAS_1__POR, - [TOMTOM_A_LDO_H_BIAS_2] = TOMTOM_A_LDO_H_BIAS_2__POR, - [TOMTOM_A_LDO_H_BIAS_3] = TOMTOM_A_LDO_H_BIAS_3__POR, - [TOMTOM_A_VBAT_CLK] = TOMTOM_A_VBAT_CLK__POR, - [TOMTOM_A_VBAT_LOOP] = TOMTOM_A_VBAT_LOOP__POR, - [TOMTOM_A_VBAT_REF] = TOMTOM_A_VBAT_REF__POR, - [TOMTOM_A_VBAT_ADC_TEST] = TOMTOM_A_VBAT_ADC_TEST__POR, - [TOMTOM_A_VBAT_FE] = TOMTOM_A_VBAT_FE__POR, - [TOMTOM_A_VBAT_BIAS_1] = TOMTOM_A_VBAT_BIAS_1__POR, - [TOMTOM_A_VBAT_BIAS_2] = TOMTOM_A_VBAT_BIAS_2__POR, - [TOMTOM_A_VBAT_ADC_DATA_MSB] = TOMTOM_A_VBAT_ADC_DATA_MSB__POR, - [TOMTOM_A_VBAT_ADC_DATA_LSB] = TOMTOM_A_VBAT_ADC_DATA_LSB__POR, - [TOMTOM_A_FLL_NREF] = TOMTOM_A_FLL_NREF__POR, - [TOMTOM_A_FLL_KDCO_TUNE] = TOMTOM_A_FLL_KDCO_TUNE__POR, - [TOMTOM_A_FLL_LOCK_THRESH] = TOMTOM_A_FLL_LOCK_THRESH__POR, - [TOMTOM_A_FLL_LOCK_DET_COUNT] = TOMTOM_A_FLL_LOCK_DET_COUNT__POR, - [TOMTOM_A_FLL_DAC_THRESHOLD] = TOMTOM_A_FLL_DAC_THRESHOLD__POR, - [TOMTOM_A_FLL_TEST_DCO_FREERUN] = TOMTOM_A_FLL_TEST_DCO_FREERUN__POR, - [TOMTOM_A_FLL_TEST_ENABLE] = TOMTOM_A_FLL_TEST_ENABLE__POR, - [TOMTOM_A_MICB_CFILT_1_CTL] = TOMTOM_A_MICB_CFILT_1_CTL__POR, - [TOMTOM_A_MICB_CFILT_1_VAL] = TOMTOM_A_MICB_CFILT_1_VAL__POR, - [TOMTOM_A_MICB_CFILT_1_PRECHRG] = TOMTOM_A_MICB_CFILT_1_PRECHRG__POR, - [TOMTOM_A_MICB_1_CTL] = TOMTOM_A_MICB_1_CTL__POR, - [TOMTOM_A_MICB_1_INT_RBIAS] = TOMTOM_A_MICB_1_INT_RBIAS__POR, - [TOMTOM_A_MICB_1_MBHC] = TOMTOM_A_MICB_1_MBHC__POR, - [TOMTOM_A_MICB_CFILT_2_CTL] = TOMTOM_A_MICB_CFILT_2_CTL__POR, - [TOMTOM_A_MICB_CFILT_2_VAL] = TOMTOM_A_MICB_CFILT_2_VAL__POR, - [TOMTOM_A_MICB_CFILT_2_PRECHRG] = TOMTOM_A_MICB_CFILT_2_PRECHRG__POR, - [TOMTOM_A_MICB_2_CTL] = TOMTOM_A_MICB_2_CTL__POR, - [TOMTOM_A_MICB_2_INT_RBIAS] = TOMTOM_A_MICB_2_INT_RBIAS__POR, - [TOMTOM_A_MICB_2_MBHC] = TOMTOM_A_MICB_2_MBHC__POR, - [TOMTOM_A_MICB_CFILT_3_CTL] = TOMTOM_A_MICB_CFILT_3_CTL__POR, - [TOMTOM_A_MICB_CFILT_3_VAL] = TOMTOM_A_MICB_CFILT_3_VAL__POR, - [TOMTOM_A_MICB_CFILT_3_PRECHRG] = TOMTOM_A_MICB_CFILT_3_PRECHRG__POR, - [TOMTOM_A_MICB_3_CTL] = TOMTOM_A_MICB_3_CTL__POR, - [TOMTOM_A_MICB_3_INT_RBIAS] = TOMTOM_A_MICB_3_INT_RBIAS__POR, - [TOMTOM_A_MICB_3_MBHC] = TOMTOM_A_MICB_3_MBHC__POR, - [TOMTOM_A_MICB_4_CTL] = TOMTOM_A_MICB_4_CTL__POR, - [TOMTOM_A_MICB_4_INT_RBIAS] = TOMTOM_A_MICB_4_INT_RBIAS__POR, - [TOMTOM_A_MICB_4_MBHC] = TOMTOM_A_MICB_4_MBHC__POR, - [TOMTOM_A_SPKR_DRV2_EN] = TOMTOM_A_SPKR_DRV2_EN__POR, - [TOMTOM_A_SPKR_DRV2_GAIN] = TOMTOM_A_SPKR_DRV2_GAIN__POR, - [TOMTOM_A_SPKR_DRV2_DAC_CTL] = TOMTOM_A_SPKR_DRV2_DAC_CTL__POR, - [TOMTOM_A_SPKR_DRV2_OCP_CTL] = TOMTOM_A_SPKR_DRV2_OCP_CTL__POR, - [TOMTOM_A_SPKR_DRV2_CLIP_DET] = TOMTOM_A_SPKR_DRV2_CLIP_DET__POR, - [TOMTOM_A_SPKR_DRV2_DBG_DAC] = TOMTOM_A_SPKR_DRV2_DBG_DAC__POR, - [TOMTOM_A_SPKR_DRV2_DBG_PA] = TOMTOM_A_SPKR_DRV2_DBG_PA__POR, - [TOMTOM_A_SPKR_DRV2_DBG_PWRSTG] = TOMTOM_A_SPKR_DRV2_DBG_PWRSTG__POR, - [TOMTOM_A_SPKR_DRV2_BIAS_LDO] = TOMTOM_A_SPKR_DRV2_BIAS_LDO__POR, - [TOMTOM_A_SPKR_DRV2_BIAS_INT] = TOMTOM_A_SPKR_DRV2_BIAS_INT__POR, - [TOMTOM_A_SPKR_DRV2_BIAS_PA] = TOMTOM_A_SPKR_DRV2_BIAS_PA__POR, - [TOMTOM_A_SPKR_DRV2_STATUS_OCP] = TOMTOM_A_SPKR_DRV2_STATUS_OCP__POR, - [TOMTOM_A_SPKR_DRV2_STATUS_PA] = TOMTOM_A_SPKR_DRV2_STATUS_PA__POR, - [TOMTOM_A_MBHC_INSERT_DETECT] = TOMTOM_A_MBHC_INSERT_DETECT__POR, - [TOMTOM_A_MBHC_INSERT_DET_STATUS] = - TOMTOM_A_MBHC_INSERT_DET_STATUS__POR, - [TOMTOM_A_TX_COM_BIAS] = TOMTOM_A_TX_COM_BIAS__POR, - [TOMTOM_A_MBHC_INSERT_DETECT2] = TOMTOM_A_MBHC_INSERT_DETECT2__POR, - [TOMTOM_A_MBHC_SCALING_MUX_1] = TOMTOM_A_MBHC_SCALING_MUX_1__POR, - [TOMTOM_A_MBHC_SCALING_MUX_2] = TOMTOM_A_MBHC_SCALING_MUX_2__POR, - [TOMTOM_A_MAD_ANA_CTRL] = TOMTOM_A_MAD_ANA_CTRL__POR, - [TOMTOM_A_TX_SUP_SWITCH_CTRL_1] = TOMTOM_A_TX_SUP_SWITCH_CTRL_1__POR, - [TOMTOM_A_TX_SUP_SWITCH_CTRL_2] = TOMTOM_A_TX_SUP_SWITCH_CTRL_2__POR, - [TOMTOM_A_TX_1_GAIN] = TOMTOM_A_TX_1_GAIN__POR, - [TOMTOM_A_TX_1_2_TEST_EN] = TOMTOM_A_TX_1_2_TEST_EN__POR, - [TOMTOM_A_TX_2_GAIN] = TOMTOM_A_TX_2_GAIN__POR, - [TOMTOM_A_TX_1_2_ADC_IB] = TOMTOM_A_TX_1_2_ADC_IB__POR, - [TOMTOM_A_TX_1_2_ATEST_REFCTRL] = TOMTOM_A_TX_1_2_ATEST_REFCTRL__POR, - [TOMTOM_A_TX_1_2_TEST_CTL] = TOMTOM_A_TX_1_2_TEST_CTL__POR, - [TOMTOM_A_TX_1_2_TEST_BLOCK_EN] = TOMTOM_A_TX_1_2_TEST_BLOCK_EN__POR, - [TOMTOM_A_TX_1_2_TXFE_CLKDIV] = TOMTOM_A_TX_1_2_TXFE_CLKDIV__POR, - [TOMTOM_A_TX_1_2_SAR_ERR_CH1] = TOMTOM_A_TX_1_2_SAR_ERR_CH1__POR, - [TOMTOM_A_TX_1_2_SAR_ERR_CH2] = TOMTOM_A_TX_1_2_SAR_ERR_CH2__POR, - [TOMTOM_A_TX_3_GAIN] = TOMTOM_A_TX_3_GAIN__POR, - [TOMTOM_A_TX_3_4_TEST_EN] = TOMTOM_A_TX_3_4_TEST_EN__POR, - [TOMTOM_A_TX_4_GAIN] = TOMTOM_A_TX_4_GAIN__POR, - [TOMTOM_A_TX_3_4_ADC_IB] = TOMTOM_A_TX_3_4_ADC_IB__POR, - [TOMTOM_A_TX_3_4_ATEST_REFCTRL] = TOMTOM_A_TX_3_4_ATEST_REFCTRL__POR, - [TOMTOM_A_TX_3_4_TEST_CTL] = TOMTOM_A_TX_3_4_TEST_CTL__POR, - [TOMTOM_A_TX_3_4_TEST_BLOCK_EN] = TOMTOM_A_TX_3_4_TEST_BLOCK_EN__POR, - [TOMTOM_A_TX_3_4_TXFE_CKDIV] = TOMTOM_A_TX_3_4_TXFE_CKDIV__POR, - [TOMTOM_A_TX_3_4_SAR_ERR_CH3] = TOMTOM_A_TX_3_4_SAR_ERR_CH3__POR, - [TOMTOM_A_TX_3_4_SAR_ERR_CH4] = TOMTOM_A_TX_3_4_SAR_ERR_CH4__POR, - [TOMTOM_A_TX_5_GAIN] = TOMTOM_A_TX_5_GAIN__POR, - [TOMTOM_A_TX_5_6_TEST_EN] = TOMTOM_A_TX_5_6_TEST_EN__POR, - [TOMTOM_A_TX_6_GAIN] = TOMTOM_A_TX_6_GAIN__POR, - [TOMTOM_A_TX_5_6_ADC_IB] = TOMTOM_A_TX_5_6_ADC_IB__POR, - [TOMTOM_A_TX_5_6_ATEST_REFCTRL] = TOMTOM_A_TX_5_6_ATEST_REFCTRL__POR, - [TOMTOM_A_TX_5_6_TEST_CTL] = TOMTOM_A_TX_5_6_TEST_CTL__POR, - [TOMTOM_A_TX_5_6_TEST_BLOCK_EN] = TOMTOM_A_TX_5_6_TEST_BLOCK_EN__POR, - [TOMTOM_A_TX_5_6_TXFE_CKDIV] = TOMTOM_A_TX_5_6_TXFE_CKDIV__POR, - [TOMTOM_A_TX_5_6_SAR_ERR_CH5] = TOMTOM_A_TX_5_6_SAR_ERR_CH5__POR, - [TOMTOM_A_TX_5_6_SAR_ERR_CH6] = TOMTOM_A_TX_5_6_SAR_ERR_CH6__POR, - [TOMTOM_A_TX_7_MBHC_EN] = TOMTOM_A_TX_7_MBHC_EN__POR, - [TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL] = - TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL__POR, - [TOMTOM_A_TX_7_MBHC_ADC] = TOMTOM_A_TX_7_MBHC_ADC__POR, - [TOMTOM_A_TX_7_MBHC_TEST_CTL] = TOMTOM_A_TX_7_MBHC_TEST_CTL__POR, - [TOMTOM_A_TX_7_MBHC_SAR_ERR] = TOMTOM_A_TX_7_MBHC_SAR_ERR__POR, - [TOMTOM_A_TX_7_TXFE_CLKDIV] = TOMTOM_A_TX_7_TXFE_CLKDIV__POR, - [TOMTOM_A_RCO_CTRL] = TOMTOM_A_RCO_CTRL__POR, - [TOMTOM_A_RCO_CALIBRATION_CTRL1] = TOMTOM_A_RCO_CALIBRATION_CTRL1__POR, - [TOMTOM_A_RCO_CALIBRATION_CTRL2] = TOMTOM_A_RCO_CALIBRATION_CTRL2__POR, - [TOMTOM_A_RCO_CALIBRATION_CTRL3] = TOMTOM_A_RCO_CALIBRATION_CTRL3__POR, - [TOMTOM_A_RCO_TEST_CTRL] = TOMTOM_A_RCO_TEST_CTRL__POR, - [TOMTOM_A_RCO_CALIBRATION_RESULT1] = - TOMTOM_A_RCO_CALIBRATION_RESULT1__POR, - [TOMTOM_A_RCO_CALIBRATION_RESULT2] = - TOMTOM_A_RCO_CALIBRATION_RESULT2__POR, - [TOMTOM_A_BUCK_MODE_1] = TOMTOM_A_BUCK_MODE_1__POR, - [TOMTOM_A_BUCK_MODE_2] = TOMTOM_A_BUCK_MODE_2__POR, - [TOMTOM_A_BUCK_MODE_3] = TOMTOM_A_BUCK_MODE_3__POR, - [TOMTOM_A_BUCK_MODE_4] = TOMTOM_A_BUCK_MODE_4__POR, - [TOMTOM_A_BUCK_MODE_5] = TOMTOM_A_BUCK_MODE_5__POR, - [TOMTOM_A_BUCK_CTRL_VCL_1] = TOMTOM_A_BUCK_CTRL_VCL_1__POR, - [TOMTOM_A_BUCK_CTRL_VCL_2] = TOMTOM_A_BUCK_CTRL_VCL_2__POR, - [TOMTOM_A_BUCK_CTRL_VCL_3] = TOMTOM_A_BUCK_CTRL_VCL_3__POR, - [TOMTOM_A_BUCK_CTRL_CCL_1] = TOMTOM_A_BUCK_CTRL_CCL_1__POR, - [TOMTOM_A_BUCK_CTRL_CCL_2] = TOMTOM_A_BUCK_CTRL_CCL_2__POR, - [TOMTOM_A_BUCK_CTRL_CCL_3] = TOMTOM_A_BUCK_CTRL_CCL_3__POR, - [TOMTOM_A_BUCK_CTRL_CCL_4] = TOMTOM_A_BUCK_CTRL_CCL_4__POR, - [TOMTOM_A_BUCK_CTRL_PWM_DRVR_1] = TOMTOM_A_BUCK_CTRL_PWM_DRVR_1__POR, - [TOMTOM_A_BUCK_CTRL_PWM_DRVR_2] = TOMTOM_A_BUCK_CTRL_PWM_DRVR_2__POR, - [TOMTOM_A_BUCK_CTRL_PWM_DRVR_3] = TOMTOM_A_BUCK_CTRL_PWM_DRVR_3__POR, - [TOMTOM_A_BUCK_TMUX_A_D] = TOMTOM_A_BUCK_TMUX_A_D__POR, - [TOMTOM_A_NCP_BUCKREF] = TOMTOM_A_NCP_BUCKREF__POR, - [TOMTOM_A_NCP_EN] = TOMTOM_A_NCP_EN__POR, - [TOMTOM_A_NCP_CLK] = TOMTOM_A_NCP_CLK__POR, - [TOMTOM_A_NCP_STATIC] = TOMTOM_A_NCP_STATIC__POR, - [TOMTOM_A_NCP_VTH_LOW] = TOMTOM_A_NCP_VTH_LOW__POR, - [TOMTOM_A_NCP_VTH_HIGH] = TOMTOM_A_NCP_VTH_HIGH__POR, - [TOMTOM_A_NCP_ATEST] = TOMTOM_A_NCP_ATEST__POR, - [TOMTOM_A_NCP_DTEST] = TOMTOM_A_NCP_DTEST__POR, - [TOMTOM_A_NCP_DLY1] = TOMTOM_A_NCP_DLY1__POR, - [TOMTOM_A_NCP_DLY2] = TOMTOM_A_NCP_DLY2__POR, - [TOMTOM_A_RX_AUX_SW_CTL] = TOMTOM_A_RX_AUX_SW_CTL__POR, - [TOMTOM_A_RX_PA_AUX_IN_CONN] = TOMTOM_A_RX_PA_AUX_IN_CONN__POR, - [TOMTOM_A_RX_COM_TIMER_DIV] = TOMTOM_A_RX_COM_TIMER_DIV__POR, - [TOMTOM_A_RX_COM_OCP_CTL] = TOMTOM_A_RX_COM_OCP_CTL__POR, - [TOMTOM_A_RX_COM_OCP_COUNT] = TOMTOM_A_RX_COM_OCP_COUNT__POR, - [TOMTOM_A_RX_COM_DAC_CTL] = TOMTOM_A_RX_COM_DAC_CTL__POR, - [TOMTOM_A_RX_COM_BIAS] = TOMTOM_A_RX_COM_BIAS__POR, - [TOMTOM_A_RX_HPH_AUTO_CHOP] = TOMTOM_A_RX_HPH_AUTO_CHOP__POR, - [TOMTOM_A_RX_HPH_CHOP_CTL] = TOMTOM_A_RX_HPH_CHOP_CTL__POR, - [TOMTOM_A_RX_HPH_BIAS_PA] = TOMTOM_A_RX_HPH_BIAS_PA__POR, - [TOMTOM_A_RX_HPH_BIAS_LDO] = TOMTOM_A_RX_HPH_BIAS_LDO__POR, - [TOMTOM_A_RX_HPH_BIAS_CNP] = TOMTOM_A_RX_HPH_BIAS_CNP__POR, - [TOMTOM_A_RX_HPH_BIAS_WG_OCP] = TOMTOM_A_RX_HPH_BIAS_WG_OCP__POR, - [TOMTOM_A_RX_HPH_OCP_CTL] = TOMTOM_A_RX_HPH_OCP_CTL__POR, - [TOMTOM_A_RX_HPH_CNP_EN] = TOMTOM_A_RX_HPH_CNP_EN__POR, - [TOMTOM_A_RX_HPH_CNP_WG_CTL] = TOMTOM_A_RX_HPH_CNP_WG_CTL__POR, - [TOMTOM_A_RX_HPH_CNP_WG_TIME] = TOMTOM_A_RX_HPH_CNP_WG_TIME__POR, - [TOMTOM_A_RX_HPH_L_GAIN] = TOMTOM_A_RX_HPH_L_GAIN__POR, - [TOMTOM_A_RX_HPH_L_TEST] = TOMTOM_A_RX_HPH_L_TEST__POR, - [TOMTOM_A_RX_HPH_L_PA_CTL] = TOMTOM_A_RX_HPH_L_PA_CTL__POR, - [TOMTOM_A_RX_HPH_L_DAC_CTL] = TOMTOM_A_RX_HPH_L_DAC_CTL__POR, - [TOMTOM_A_RX_HPH_L_ATEST] = TOMTOM_A_RX_HPH_L_ATEST__POR, - [TOMTOM_A_RX_HPH_L_STATUS] = TOMTOM_A_RX_HPH_L_STATUS__POR, - [TOMTOM_A_RX_HPH_R_GAIN] = TOMTOM_A_RX_HPH_R_GAIN__POR, - [TOMTOM_A_RX_HPH_R_TEST] = TOMTOM_A_RX_HPH_R_TEST__POR, - [TOMTOM_A_RX_HPH_R_PA_CTL] = TOMTOM_A_RX_HPH_R_PA_CTL__POR, - [TOMTOM_A_RX_HPH_R_DAC_CTL] = TOMTOM_A_RX_HPH_R_DAC_CTL__POR, - [TOMTOM_A_RX_HPH_R_ATEST] = TOMTOM_A_RX_HPH_R_ATEST__POR, - [TOMTOM_A_RX_HPH_R_STATUS] = TOMTOM_A_RX_HPH_R_STATUS__POR, - [TOMTOM_A_RX_EAR_BIAS_PA] = TOMTOM_A_RX_EAR_BIAS_PA__POR, - [TOMTOM_A_RX_EAR_BIAS_CMBUFF] = TOMTOM_A_RX_EAR_BIAS_CMBUFF__POR, - [TOMTOM_A_RX_EAR_EN] = TOMTOM_A_RX_EAR_EN__POR, - [TOMTOM_A_RX_EAR_GAIN] = TOMTOM_A_RX_EAR_GAIN__POR, - [TOMTOM_A_RX_EAR_CMBUFF] = TOMTOM_A_RX_EAR_CMBUFF__POR, - [TOMTOM_A_RX_EAR_ICTL] = TOMTOM_A_RX_EAR_ICTL__POR, - [TOMTOM_A_RX_EAR_CCOMP] = TOMTOM_A_RX_EAR_CCOMP__POR, - [TOMTOM_A_RX_EAR_VCM] = TOMTOM_A_RX_EAR_VCM__POR, - [TOMTOM_A_RX_EAR_CNP] = TOMTOM_A_RX_EAR_CNP__POR, - [TOMTOM_A_RX_EAR_DAC_CTL_ATEST] = TOMTOM_A_RX_EAR_DAC_CTL_ATEST__POR, - [TOMTOM_A_RX_EAR_STATUS] = TOMTOM_A_RX_EAR_STATUS__POR, - [TOMTOM_A_RX_LINE_BIAS_PA] = TOMTOM_A_RX_LINE_BIAS_PA__POR, - [TOMTOM_A_RX_BUCK_BIAS1] = TOMTOM_A_RX_BUCK_BIAS1__POR, - [TOMTOM_A_RX_BUCK_BIAS2] = TOMTOM_A_RX_BUCK_BIAS2__POR, - [TOMTOM_A_RX_LINE_COM] = TOMTOM_A_RX_LINE_COM__POR, - [TOMTOM_A_RX_LINE_CNP_EN] = TOMTOM_A_RX_LINE_CNP_EN__POR, - [TOMTOM_A_RX_LINE_CNP_WG_CTL] = TOMTOM_A_RX_LINE_CNP_WG_CTL__POR, - [TOMTOM_A_RX_LINE_CNP_WG_TIME] = TOMTOM_A_RX_LINE_CNP_WG_TIME__POR, - [TOMTOM_A_RX_LINE_1_GAIN] = TOMTOM_A_RX_LINE_1_GAIN__POR, - [TOMTOM_A_RX_LINE_1_TEST] = TOMTOM_A_RX_LINE_1_TEST__POR, - [TOMTOM_A_RX_LINE_1_DAC_CTL] = TOMTOM_A_RX_LINE_1_DAC_CTL__POR, - [TOMTOM_A_RX_LINE_1_STATUS] = TOMTOM_A_RX_LINE_1_STATUS__POR, - [TOMTOM_A_RX_LINE_2_GAIN] = TOMTOM_A_RX_LINE_2_GAIN__POR, - [TOMTOM_A_RX_LINE_2_TEST] = TOMTOM_A_RX_LINE_2_TEST__POR, - [TOMTOM_A_RX_LINE_2_DAC_CTL] = TOMTOM_A_RX_LINE_2_DAC_CTL__POR, - [TOMTOM_A_RX_LINE_2_STATUS] = TOMTOM_A_RX_LINE_2_STATUS__POR, - [TOMTOM_A_RX_LINE_3_GAIN] = TOMTOM_A_RX_LINE_3_GAIN__POR, - [TOMTOM_A_RX_LINE_3_TEST] = TOMTOM_A_RX_LINE_3_TEST__POR, - [TOMTOM_A_RX_LINE_3_DAC_CTL] = TOMTOM_A_RX_LINE_3_DAC_CTL__POR, - [TOMTOM_A_RX_LINE_3_STATUS] = TOMTOM_A_RX_LINE_3_STATUS__POR, - [TOMTOM_A_RX_LINE_4_GAIN] = TOMTOM_A_RX_LINE_4_GAIN__POR, - [TOMTOM_A_RX_LINE_4_TEST] = TOMTOM_A_RX_LINE_4_TEST__POR, - [TOMTOM_A_RX_LINE_4_DAC_CTL] = TOMTOM_A_RX_LINE_4_DAC_CTL__POR, - [TOMTOM_A_RX_LINE_4_STATUS] = TOMTOM_A_RX_LINE_4_STATUS__POR, - [TOMTOM_A_RX_LINE_CNP_DBG] = TOMTOM_A_RX_LINE_CNP_DBG__POR, - [TOMTOM_A_SPKR_DRV1_EN] = TOMTOM_A_SPKR_DRV1_EN__POR, - [TOMTOM_A_SPKR_DRV1_GAIN] = TOMTOM_A_SPKR_DRV1_GAIN__POR, - [TOMTOM_A_SPKR_DRV1_DAC_CTL] = TOMTOM_A_SPKR_DRV1_DAC_CTL__POR, - [TOMTOM_A_SPKR_DRV1_OCP_CTL] = TOMTOM_A_SPKR_DRV1_OCP_CTL__POR, - [TOMTOM_A_SPKR_DRV1_CLIP_DET] = TOMTOM_A_SPKR_DRV1_CLIP_DET__POR, - [TOMTOM_A_SPKR_DRV1_IEC] = TOMTOM_A_SPKR_DRV1_IEC__POR, - [TOMTOM_A_SPKR_DRV1_DBG_DAC] = TOMTOM_A_SPKR_DRV1_DBG_DAC__POR, - [TOMTOM_A_SPKR_DRV1_DBG_PA] = TOMTOM_A_SPKR_DRV1_DBG_PA__POR, - [TOMTOM_A_SPKR_DRV1_DBG_PWRSTG] = TOMTOM_A_SPKR_DRV1_DBG_PWRSTG__POR, - [TOMTOM_A_SPKR_DRV1_BIAS_LDO] = TOMTOM_A_SPKR_DRV1_BIAS_LDO__POR, - [TOMTOM_A_SPKR_DRV1_BIAS_INT] = TOMTOM_A_SPKR_DRV1_BIAS_INT__POR, - [TOMTOM_A_SPKR_DRV1_BIAS_PA] = TOMTOM_A_SPKR_DRV1_BIAS_PA__POR, - [TOMTOM_A_SPKR_DRV1_STATUS_OCP] = TOMTOM_A_SPKR_DRV1_STATUS_OCP__POR, - [TOMTOM_A_SPKR_DRV1_STATUS_PA] = TOMTOM_A_SPKR_DRV1_STATUS_PA__POR, - [TOMTOM_A_SPKR1_PROT_EN] = TOMTOM_A_SPKR1_PROT_EN__POR, - [TOMTOM_A_SPKR1_PROT_ADC_TEST_EN] = - TOMTOM_A_SPKR1_PROT_ADC_TEST_EN__POR, - [TOMTOM_A_SPKR1_PROT_ATEST] = TOMTOM_A_SPKR1_PROT_ATEST__POR, - [TOMTOM_A_SPKR1_PROT_LDO_CTRL] = TOMTOM_A_SPKR1_PROT_LDO_CTRL__POR, - [TOMTOM_A_SPKR1_PROT_ISENSE_CTRL] = - TOMTOM_A_SPKR1_PROT_ISENSE_CTRL__POR, - [TOMTOM_A_SPKR1_PROT_VSENSE_CTRL] = - TOMTOM_A_SPKR1_PROT_VSENSE_CTRL__POR, - [TOMTOM_A_SPKR2_PROT_EN] = TOMTOM_A_SPKR2_PROT_EN__POR, - [TOMTOM_A_SPKR2_PROT_ADC_TEST_EN] = - TOMTOM_A_SPKR2_PROT_ADC_TEST_EN__POR, - [TOMTOM_A_SPKR2_PROT_ATEST] = TOMTOM_A_SPKR2_PROT_ATEST__POR, - [TOMTOM_A_SPKR2_PROT_LDO_CTRL] = TOMTOM_A_SPKR2_PROT_LDO_CTRL__POR, - [TOMTOM_A_SPKR2_PROT_ISENSE_CTRL] = - TOMTOM_A_SPKR2_PROT_ISENSE_CTRL__POR, - [TOMTOM_A_SPKR2_PROT_VSENSE_CTRL] = - TOMTOM_A_SPKR2_PROT_VSENSE_CTRL__POR, - [TOMTOM_A_MBHC_HPH] = TOMTOM_A_MBHC_HPH__POR, - [TOMTOM_A_CDC_ANC1_B1_CTL] = TOMTOM_A_CDC_ANC1_B1_CTL__POR, - [TOMTOM_A_CDC_ANC2_B1_CTL] = TOMTOM_A_CDC_ANC2_B1_CTL__POR, - [TOMTOM_A_CDC_ANC1_SHIFT] = TOMTOM_A_CDC_ANC1_SHIFT__POR, - [TOMTOM_A_CDC_ANC2_SHIFT] = TOMTOM_A_CDC_ANC2_SHIFT__POR, - [TOMTOM_A_CDC_ANC1_IIR_B1_CTL] = TOMTOM_A_CDC_ANC1_IIR_B1_CTL__POR, - [TOMTOM_A_CDC_ANC2_IIR_B1_CTL] = TOMTOM_A_CDC_ANC2_IIR_B1_CTL__POR, - [TOMTOM_A_CDC_ANC1_IIR_B2_CTL] = TOMTOM_A_CDC_ANC1_IIR_B2_CTL__POR, - [TOMTOM_A_CDC_ANC2_IIR_B2_CTL] = TOMTOM_A_CDC_ANC2_IIR_B2_CTL__POR, - [TOMTOM_A_CDC_ANC1_IIR_B3_CTL] = TOMTOM_A_CDC_ANC1_IIR_B3_CTL__POR, - [TOMTOM_A_CDC_ANC2_IIR_B3_CTL] = TOMTOM_A_CDC_ANC2_IIR_B3_CTL__POR, - [TOMTOM_A_CDC_ANC1_LPF_B1_CTL] = TOMTOM_A_CDC_ANC1_LPF_B1_CTL__POR, - [TOMTOM_A_CDC_ANC2_LPF_B1_CTL] = TOMTOM_A_CDC_ANC2_LPF_B1_CTL__POR, - [TOMTOM_A_CDC_ANC1_LPF_B2_CTL] = TOMTOM_A_CDC_ANC1_LPF_B2_CTL__POR, - [TOMTOM_A_CDC_ANC2_LPF_B2_CTL] = TOMTOM_A_CDC_ANC2_LPF_B2_CTL__POR, - [TOMTOM_A_CDC_ANC1_SPARE] = TOMTOM_A_CDC_ANC1_SPARE__POR, - [TOMTOM_A_CDC_ANC2_SPARE] = TOMTOM_A_CDC_ANC2_SPARE__POR, - [TOMTOM_A_CDC_ANC1_SMLPF_CTL] = TOMTOM_A_CDC_ANC1_SMLPF_CTL__POR, - [TOMTOM_A_CDC_ANC2_SMLPF_CTL] = TOMTOM_A_CDC_ANC2_SMLPF_CTL__POR, - [TOMTOM_A_CDC_ANC1_DCFLT_CTL] = TOMTOM_A_CDC_ANC1_DCFLT_CTL__POR, - [TOMTOM_A_CDC_ANC2_DCFLT_CTL] = TOMTOM_A_CDC_ANC2_DCFLT_CTL__POR, - [TOMTOM_A_CDC_ANC1_GAIN_CTL] = TOMTOM_A_CDC_ANC1_GAIN_CTL__POR, - [TOMTOM_A_CDC_ANC2_GAIN_CTL] = TOMTOM_A_CDC_ANC2_GAIN_CTL__POR, - [TOMTOM_A_CDC_ANC1_B2_CTL] = TOMTOM_A_CDC_ANC1_B2_CTL__POR, - [TOMTOM_A_CDC_ANC2_B2_CTL] = TOMTOM_A_CDC_ANC2_B2_CTL__POR, - [TOMTOM_A_CDC_TX1_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX1_VOL_CTL_TIMER__POR, - [TOMTOM_A_CDC_TX2_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX2_VOL_CTL_TIMER__POR, - [TOMTOM_A_CDC_TX3_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX3_VOL_CTL_TIMER__POR, - [TOMTOM_A_CDC_TX4_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX4_VOL_CTL_TIMER__POR, - [TOMTOM_A_CDC_TX5_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX5_VOL_CTL_TIMER__POR, - [TOMTOM_A_CDC_TX6_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX6_VOL_CTL_TIMER__POR, - [TOMTOM_A_CDC_TX7_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX7_VOL_CTL_TIMER__POR, - [TOMTOM_A_CDC_TX8_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX8_VOL_CTL_TIMER__POR, - [TOMTOM_A_CDC_TX9_VOL_CTL_TIMER] = TOMTOM_A_CDC_TX9_VOL_CTL_TIMER__POR, - [TOMTOM_A_CDC_TX10_VOL_CTL_TIMER] = - TOMTOM_A_CDC_TX10_VOL_CTL_TIMER__POR, - [TOMTOM_A_CDC_TX1_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX1_VOL_CTL_GAIN__POR, - [TOMTOM_A_CDC_TX2_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX2_VOL_CTL_GAIN__POR, - [TOMTOM_A_CDC_TX3_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX3_VOL_CTL_GAIN__POR, - [TOMTOM_A_CDC_TX4_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX4_VOL_CTL_GAIN__POR, - [TOMTOM_A_CDC_TX5_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX5_VOL_CTL_GAIN__POR, - [TOMTOM_A_CDC_TX6_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX6_VOL_CTL_GAIN__POR, - [TOMTOM_A_CDC_TX7_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX7_VOL_CTL_GAIN__POR, - [TOMTOM_A_CDC_TX8_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX8_VOL_CTL_GAIN__POR, - [TOMTOM_A_CDC_TX9_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX9_VOL_CTL_GAIN__POR, - [TOMTOM_A_CDC_TX10_VOL_CTL_GAIN] = TOMTOM_A_CDC_TX10_VOL_CTL_GAIN__POR, - [TOMTOM_A_CDC_TX1_VOL_CTL_CFG] = TOMTOM_A_CDC_TX1_VOL_CTL_CFG__POR, - [TOMTOM_A_CDC_TX2_VOL_CTL_CFG] = TOMTOM_A_CDC_TX2_VOL_CTL_CFG__POR, - [TOMTOM_A_CDC_TX3_VOL_CTL_CFG] = TOMTOM_A_CDC_TX3_VOL_CTL_CFG__POR, - [TOMTOM_A_CDC_TX4_VOL_CTL_CFG] = TOMTOM_A_CDC_TX4_VOL_CTL_CFG__POR, - [TOMTOM_A_CDC_TX5_VOL_CTL_CFG] = TOMTOM_A_CDC_TX5_VOL_CTL_CFG__POR, - [TOMTOM_A_CDC_TX6_VOL_CTL_CFG] = TOMTOM_A_CDC_TX6_VOL_CTL_CFG__POR, - [TOMTOM_A_CDC_TX7_VOL_CTL_CFG] = TOMTOM_A_CDC_TX7_VOL_CTL_CFG__POR, - [TOMTOM_A_CDC_TX8_VOL_CTL_CFG] = TOMTOM_A_CDC_TX8_VOL_CTL_CFG__POR, - [TOMTOM_A_CDC_TX9_VOL_CTL_CFG] = TOMTOM_A_CDC_TX9_VOL_CTL_CFG__POR, - [TOMTOM_A_CDC_TX10_VOL_CTL_CFG] = TOMTOM_A_CDC_TX10_VOL_CTL_CFG__POR, - [TOMTOM_A_CDC_TX1_MUX_CTL] = TOMTOM_A_CDC_TX1_MUX_CTL__POR, - [TOMTOM_A_CDC_TX2_MUX_CTL] = TOMTOM_A_CDC_TX2_MUX_CTL__POR, - [TOMTOM_A_CDC_TX3_MUX_CTL] = TOMTOM_A_CDC_TX3_MUX_CTL__POR, - [TOMTOM_A_CDC_TX4_MUX_CTL] = TOMTOM_A_CDC_TX4_MUX_CTL__POR, - [TOMTOM_A_CDC_TX5_MUX_CTL] = TOMTOM_A_CDC_TX5_MUX_CTL__POR, - [TOMTOM_A_CDC_TX6_MUX_CTL] = TOMTOM_A_CDC_TX6_MUX_CTL__POR, - [TOMTOM_A_CDC_TX7_MUX_CTL] = TOMTOM_A_CDC_TX7_MUX_CTL__POR, - [TOMTOM_A_CDC_TX8_MUX_CTL] = TOMTOM_A_CDC_TX8_MUX_CTL__POR, - [TOMTOM_A_CDC_TX9_MUX_CTL] = TOMTOM_A_CDC_TX9_MUX_CTL__POR, - [TOMTOM_A_CDC_TX10_MUX_CTL] = TOMTOM_A_CDC_TX10_MUX_CTL__POR, - [TOMTOM_A_CDC_TX1_CLK_FS_CTL] = TOMTOM_A_CDC_TX1_CLK_FS_CTL__POR, - [TOMTOM_A_CDC_TX2_CLK_FS_CTL] = TOMTOM_A_CDC_TX2_CLK_FS_CTL__POR, - [TOMTOM_A_CDC_TX3_CLK_FS_CTL] = TOMTOM_A_CDC_TX3_CLK_FS_CTL__POR, - [TOMTOM_A_CDC_TX4_CLK_FS_CTL] = TOMTOM_A_CDC_TX4_CLK_FS_CTL__POR, - [TOMTOM_A_CDC_TX5_CLK_FS_CTL] = TOMTOM_A_CDC_TX5_CLK_FS_CTL__POR, - [TOMTOM_A_CDC_TX6_CLK_FS_CTL] = TOMTOM_A_CDC_TX6_CLK_FS_CTL__POR, - [TOMTOM_A_CDC_TX7_CLK_FS_CTL] = TOMTOM_A_CDC_TX7_CLK_FS_CTL__POR, - [TOMTOM_A_CDC_TX8_CLK_FS_CTL] = TOMTOM_A_CDC_TX8_CLK_FS_CTL__POR, - [TOMTOM_A_CDC_TX9_CLK_FS_CTL] = TOMTOM_A_CDC_TX9_CLK_FS_CTL__POR, - [TOMTOM_A_CDC_TX10_CLK_FS_CTL] = TOMTOM_A_CDC_TX10_CLK_FS_CTL__POR, - [TOMTOM_A_CDC_TX1_DMIC_CTL] = TOMTOM_A_CDC_TX1_DMIC_CTL__POR, - [TOMTOM_A_CDC_TX2_DMIC_CTL] = TOMTOM_A_CDC_TX2_DMIC_CTL__POR, - [TOMTOM_A_CDC_TX3_DMIC_CTL] = TOMTOM_A_CDC_TX3_DMIC_CTL__POR, - [TOMTOM_A_CDC_TX4_DMIC_CTL] = TOMTOM_A_CDC_TX4_DMIC_CTL__POR, - [TOMTOM_A_CDC_TX5_DMIC_CTL] = TOMTOM_A_CDC_TX5_DMIC_CTL__POR, - [TOMTOM_A_CDC_TX6_DMIC_CTL] = TOMTOM_A_CDC_TX6_DMIC_CTL__POR, - [TOMTOM_A_CDC_TX7_DMIC_CTL] = TOMTOM_A_CDC_TX7_DMIC_CTL__POR, - [TOMTOM_A_CDC_TX8_DMIC_CTL] = TOMTOM_A_CDC_TX8_DMIC_CTL__POR, - [TOMTOM_A_CDC_TX9_DMIC_CTL] = TOMTOM_A_CDC_TX9_DMIC_CTL__POR, - [TOMTOM_A_CDC_TX10_DMIC_CTL] = TOMTOM_A_CDC_TX10_DMIC_CTL__POR, - [TOMTOM_A_CDC_SPKR_CLIPDET_VAL0] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL0__POR, - [TOMTOM_A_CDC_SPKR_CLIPDET_VAL1] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL1__POR, - [TOMTOM_A_CDC_SPKR_CLIPDET_VAL2] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL2__POR, - [TOMTOM_A_CDC_SPKR_CLIPDET_VAL3] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL3__POR, - [TOMTOM_A_CDC_SPKR_CLIPDET_VAL4] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL4__POR, - [TOMTOM_A_CDC_SPKR_CLIPDET_VAL5] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL5__POR, - [TOMTOM_A_CDC_SPKR_CLIPDET_VAL6] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL6__POR, - [TOMTOM_A_CDC_SPKR_CLIPDET_VAL7] = TOMTOM_A_CDC_SPKR_CLIPDET_VAL7__POR, - [TOMTOM_A_CDC_DEBUG_B1_CTL] = TOMTOM_A_CDC_DEBUG_B1_CTL__POR, - [TOMTOM_A_CDC_DEBUG_B2_CTL] = TOMTOM_A_CDC_DEBUG_B2_CTL__POR, - [TOMTOM_A_CDC_DEBUG_B3_CTL] = TOMTOM_A_CDC_DEBUG_B3_CTL__POR, - [TOMTOM_A_CDC_DEBUG_B4_CTL] = TOMTOM_A_CDC_DEBUG_B4_CTL__POR, - [TOMTOM_A_CDC_DEBUG_B5_CTL] = TOMTOM_A_CDC_DEBUG_B5_CTL__POR, - [TOMTOM_A_CDC_DEBUG_B6_CTL] = TOMTOM_A_CDC_DEBUG_B6_CTL__POR, - [TOMTOM_A_CDC_DEBUG_B7_CTL] = TOMTOM_A_CDC_DEBUG_B7_CTL__POR, - [TOMTOM_A_CDC_SRC1_PDA_CFG] = TOMTOM_A_CDC_SRC1_PDA_CFG__POR, - [TOMTOM_A_CDC_SRC2_PDA_CFG] = TOMTOM_A_CDC_SRC2_PDA_CFG__POR, - [TOMTOM_A_CDC_SRC1_FS_CTL] = TOMTOM_A_CDC_SRC1_FS_CTL__POR, - [TOMTOM_A_CDC_SRC2_FS_CTL] = TOMTOM_A_CDC_SRC2_FS_CTL__POR, - [TOMTOM_A_CDC_RX1_B1_CTL] = TOMTOM_A_CDC_RX1_B1_CTL__POR, - [TOMTOM_A_CDC_RX2_B1_CTL] = TOMTOM_A_CDC_RX2_B1_CTL__POR, - [TOMTOM_A_CDC_RX3_B1_CTL] = TOMTOM_A_CDC_RX3_B1_CTL__POR, - [TOMTOM_A_CDC_RX4_B1_CTL] = TOMTOM_A_CDC_RX4_B1_CTL__POR, - [TOMTOM_A_CDC_RX5_B1_CTL] = TOMTOM_A_CDC_RX5_B1_CTL__POR, - [TOMTOM_A_CDC_RX6_B1_CTL] = TOMTOM_A_CDC_RX6_B1_CTL__POR, - [TOMTOM_A_CDC_RX7_B1_CTL] = TOMTOM_A_CDC_RX7_B1_CTL__POR, - [TOMTOM_A_CDC_RX1_B2_CTL] = TOMTOM_A_CDC_RX1_B2_CTL__POR, - [TOMTOM_A_CDC_RX2_B2_CTL] = TOMTOM_A_CDC_RX2_B2_CTL__POR, - [TOMTOM_A_CDC_RX3_B2_CTL] = TOMTOM_A_CDC_RX3_B2_CTL__POR, - [TOMTOM_A_CDC_RX4_B2_CTL] = TOMTOM_A_CDC_RX4_B2_CTL__POR, - [TOMTOM_A_CDC_RX5_B2_CTL] = TOMTOM_A_CDC_RX5_B2_CTL__POR, - [TOMTOM_A_CDC_RX6_B2_CTL] = TOMTOM_A_CDC_RX6_B2_CTL__POR, - [TOMTOM_A_CDC_RX7_B2_CTL] = TOMTOM_A_CDC_RX7_B2_CTL__POR, - [TOMTOM_A_CDC_RX1_B3_CTL] = TOMTOM_A_CDC_RX1_B3_CTL__POR, - [TOMTOM_A_CDC_RX2_B3_CTL] = TOMTOM_A_CDC_RX2_B3_CTL__POR, - [TOMTOM_A_CDC_RX3_B3_CTL] = TOMTOM_A_CDC_RX3_B3_CTL__POR, - [TOMTOM_A_CDC_RX4_B3_CTL] = TOMTOM_A_CDC_RX4_B3_CTL__POR, - [TOMTOM_A_CDC_RX5_B3_CTL] = TOMTOM_A_CDC_RX5_B3_CTL__POR, - [TOMTOM_A_CDC_RX6_B3_CTL] = TOMTOM_A_CDC_RX6_B3_CTL__POR, - [TOMTOM_A_CDC_RX7_B3_CTL] = TOMTOM_A_CDC_RX7_B3_CTL__POR, - [TOMTOM_A_CDC_RX1_B4_CTL] = TOMTOM_A_CDC_RX1_B4_CTL__POR, - [TOMTOM_A_CDC_RX2_B4_CTL] = TOMTOM_A_CDC_RX2_B4_CTL__POR, - [TOMTOM_A_CDC_RX3_B4_CTL] = TOMTOM_A_CDC_RX3_B4_CTL__POR, - [TOMTOM_A_CDC_RX4_B4_CTL] = TOMTOM_A_CDC_RX4_B4_CTL__POR, - [TOMTOM_A_CDC_RX5_B4_CTL] = TOMTOM_A_CDC_RX5_B4_CTL__POR, - [TOMTOM_A_CDC_RX6_B4_CTL] = TOMTOM_A_CDC_RX6_B4_CTL__POR, - [TOMTOM_A_CDC_RX7_B4_CTL] = TOMTOM_A_CDC_RX7_B4_CTL__POR, - [TOMTOM_A_CDC_RX1_B5_CTL] = TOMTOM_A_CDC_RX1_B5_CTL__POR, - [TOMTOM_A_CDC_RX2_B5_CTL] = TOMTOM_A_CDC_RX2_B5_CTL__POR, - [TOMTOM_A_CDC_RX3_B5_CTL] = TOMTOM_A_CDC_RX3_B5_CTL__POR, - [TOMTOM_A_CDC_RX4_B5_CTL] = TOMTOM_A_CDC_RX4_B5_CTL__POR, - [TOMTOM_A_CDC_RX5_B5_CTL] = TOMTOM_A_CDC_RX5_B5_CTL__POR, - [TOMTOM_A_CDC_RX6_B5_CTL] = TOMTOM_A_CDC_RX6_B5_CTL__POR, - [TOMTOM_A_CDC_RX7_B5_CTL] = TOMTOM_A_CDC_RX7_B5_CTL__POR, - [TOMTOM_A_CDC_RX1_B6_CTL] = TOMTOM_A_CDC_RX1_B6_CTL__POR, - [TOMTOM_A_CDC_RX2_B6_CTL] = TOMTOM_A_CDC_RX2_B6_CTL__POR, - [TOMTOM_A_CDC_RX3_B6_CTL] = TOMTOM_A_CDC_RX3_B6_CTL__POR, - [TOMTOM_A_CDC_RX4_B6_CTL] = TOMTOM_A_CDC_RX4_B6_CTL__POR, - [TOMTOM_A_CDC_RX5_B6_CTL] = TOMTOM_A_CDC_RX5_B6_CTL__POR, - [TOMTOM_A_CDC_RX6_B6_CTL] = TOMTOM_A_CDC_RX6_B6_CTL__POR, - [TOMTOM_A_CDC_RX7_B6_CTL] = TOMTOM_A_CDC_RX7_B6_CTL__POR, - [TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL] = - TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL__POR, - [TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL] = - TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL__POR, - [TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL] = - TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL__POR, - [TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL] = - TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL__POR, - [TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL] = - TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL__POR, - [TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL] = - TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL__POR, - [TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL] = - TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL__POR, - [TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL] = - TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL__POR, - [TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL] = - TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL__POR, - [TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL] = - TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL__POR, - [TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL] = - TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL__POR, - [TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL] = - TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL__POR, - [TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL] = - TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL__POR, - [TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL] = - TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL__POR, - [TOMTOM_A_CDC_VBAT_CFG] = TOMTOM_A_CDC_VBAT_CFG__POR, - [TOMTOM_A_CDC_VBAT_ADC_CAL1] = TOMTOM_A_CDC_VBAT_ADC_CAL1__POR, - [TOMTOM_A_CDC_VBAT_ADC_CAL2] = TOMTOM_A_CDC_VBAT_ADC_CAL2__POR, - [TOMTOM_A_CDC_VBAT_ADC_CAL3] = TOMTOM_A_CDC_VBAT_ADC_CAL3__POR, - [TOMTOM_A_CDC_VBAT_PK_EST1] = TOMTOM_A_CDC_VBAT_PK_EST1__POR, - [TOMTOM_A_CDC_VBAT_PK_EST2] = TOMTOM_A_CDC_VBAT_PK_EST2__POR, - [TOMTOM_A_CDC_VBAT_PK_EST3] = TOMTOM_A_CDC_VBAT_PK_EST3__POR, - [TOMTOM_A_CDC_VBAT_RF_PROC1] = TOMTOM_A_CDC_VBAT_RF_PROC1__POR, - [TOMTOM_A_CDC_VBAT_RF_PROC2] = TOMTOM_A_CDC_VBAT_RF_PROC2__POR, - [TOMTOM_A_CDC_VBAT_TAC1] = TOMTOM_A_CDC_VBAT_TAC1__POR, - [TOMTOM_A_CDC_VBAT_TAC2] = TOMTOM_A_CDC_VBAT_TAC2__POR, - [TOMTOM_A_CDC_VBAT_TAC3] = TOMTOM_A_CDC_VBAT_TAC3__POR, - [TOMTOM_A_CDC_VBAT_TAC4] = TOMTOM_A_CDC_VBAT_TAC4__POR, - [TOMTOM_A_CDC_VBAT_GAIN_UPD1] = TOMTOM_A_CDC_VBAT_GAIN_UPD1__POR, - [TOMTOM_A_CDC_VBAT_GAIN_UPD2] = TOMTOM_A_CDC_VBAT_GAIN_UPD2__POR, - [TOMTOM_A_CDC_VBAT_GAIN_UPD3] = TOMTOM_A_CDC_VBAT_GAIN_UPD3__POR, - [TOMTOM_A_CDC_VBAT_GAIN_UPD4] = TOMTOM_A_CDC_VBAT_GAIN_UPD4__POR, - [TOMTOM_A_CDC_VBAT_DEBUG1] = TOMTOM_A_CDC_VBAT_DEBUG1__POR, - [TOMTOM_A_CDC_VBAT_GAIN_UPD_MON] = TOMTOM_A_CDC_VBAT_GAIN_UPD_MON__POR, - [TOMTOM_A_CDC_VBAT_GAIN_MON_VAL] = TOMTOM_A_CDC_VBAT_GAIN_MON_VAL__POR, - [TOMTOM_A_CDC_CLK_ANC_RESET_CTL] = TOMTOM_A_CDC_CLK_ANC_RESET_CTL__POR, - [TOMTOM_A_CDC_CLK_RX_RESET_CTL] = TOMTOM_A_CDC_CLK_RX_RESET_CTL__POR, - [TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL] = - TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL__POR, - [TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL] = - TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL__POR, - [TOMTOM_A_CDC_CLK_RX_I2S_CTL] = TOMTOM_A_CDC_CLK_RX_I2S_CTL__POR, - [TOMTOM_A_CDC_CLK_TX_I2S_CTL] = TOMTOM_A_CDC_CLK_TX_I2S_CTL__POR, - [TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL] = - TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL__POR, - [TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL] = - TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL__POR, - [TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL] = - TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL__POR, - [TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL] = - TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL__POR, - [TOMTOM_A_CDC_CLK_OTHR_CTL] = TOMTOM_A_CDC_CLK_OTHR_CTL__POR, - [TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL] = - TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL__POR, - [TOMTOM_A_CDC_CLK_RX_B1_CTL] = TOMTOM_A_CDC_CLK_RX_B1_CTL__POR, - [TOMTOM_A_CDC_CLK_RX_B2_CTL] = TOMTOM_A_CDC_CLK_RX_B2_CTL__POR, - [TOMTOM_A_CDC_CLK_MCLK_CTL] = TOMTOM_A_CDC_CLK_MCLK_CTL__POR, - [TOMTOM_A_CDC_CLK_PDM_CTL] = TOMTOM_A_CDC_CLK_PDM_CTL__POR, - [TOMTOM_A_CDC_CLK_SD_CTL] = TOMTOM_A_CDC_CLK_SD_CTL__POR, - [TOMTOM_A_CDC_CLSH_B1_CTL] = TOMTOM_A_CDC_CLSH_B1_CTL__POR, - [TOMTOM_A_CDC_CLSH_B2_CTL] = TOMTOM_A_CDC_CLSH_B2_CTL__POR, - [TOMTOM_A_CDC_CLSH_B3_CTL] = TOMTOM_A_CDC_CLSH_B3_CTL__POR, - [TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS] = - TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS__POR, - [TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD] = - TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD__POR, - [TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD] = - TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD__POR, - [TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD] = - TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD__POR, - [TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD] = - TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD__POR, - [TOMTOM_A_CDC_CLSH_K_ADDR] = TOMTOM_A_CDC_CLSH_K_ADDR__POR, - [TOMTOM_A_CDC_CLSH_K_DATA] = TOMTOM_A_CDC_CLSH_K_DATA__POR, - [TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L] = - TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L__POR, - [TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U] = - TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U__POR, - [TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L] = - TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L__POR, - [TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U] = - TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U__POR, - [TOMTOM_A_CDC_CLSH_V_PA_HD_EAR] = TOMTOM_A_CDC_CLSH_V_PA_HD_EAR__POR, - [TOMTOM_A_CDC_CLSH_V_PA_HD_HPH] = TOMTOM_A_CDC_CLSH_V_PA_HD_HPH__POR, - [TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR] = TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR__POR, - [TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH] = TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH__POR, - [TOMTOM_A_CDC_IIR1_GAIN_B1_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B1_CTL__POR, - [TOMTOM_A_CDC_IIR2_GAIN_B1_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B1_CTL__POR, - [TOMTOM_A_CDC_IIR1_GAIN_B2_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B2_CTL__POR, - [TOMTOM_A_CDC_IIR2_GAIN_B2_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B2_CTL__POR, - [TOMTOM_A_CDC_IIR1_GAIN_B3_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B3_CTL__POR, - [TOMTOM_A_CDC_IIR2_GAIN_B3_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B3_CTL__POR, - [TOMTOM_A_CDC_IIR1_GAIN_B4_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B4_CTL__POR, - [TOMTOM_A_CDC_IIR2_GAIN_B4_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B4_CTL__POR, - [TOMTOM_A_CDC_IIR1_GAIN_B5_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B5_CTL__POR, - [TOMTOM_A_CDC_IIR2_GAIN_B5_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B5_CTL__POR, - [TOMTOM_A_CDC_IIR1_GAIN_B6_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B6_CTL__POR, - [TOMTOM_A_CDC_IIR2_GAIN_B6_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B6_CTL__POR, - [TOMTOM_A_CDC_IIR1_GAIN_B7_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B7_CTL__POR, - [TOMTOM_A_CDC_IIR2_GAIN_B7_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B7_CTL__POR, - [TOMTOM_A_CDC_IIR1_GAIN_B8_CTL] = TOMTOM_A_CDC_IIR1_GAIN_B8_CTL__POR, - [TOMTOM_A_CDC_IIR2_GAIN_B8_CTL] = TOMTOM_A_CDC_IIR2_GAIN_B8_CTL__POR, - [TOMTOM_A_CDC_IIR1_CTL] = TOMTOM_A_CDC_IIR1_CTL__POR, - [TOMTOM_A_CDC_IIR2_CTL] = TOMTOM_A_CDC_IIR2_CTL__POR, - [TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL] = - TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL__POR, - [TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL] = - TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL__POR, - [TOMTOM_A_CDC_IIR1_COEF_B1_CTL] = TOMTOM_A_CDC_IIR1_COEF_B1_CTL__POR, - [TOMTOM_A_CDC_IIR2_COEF_B1_CTL] = TOMTOM_A_CDC_IIR2_COEF_B1_CTL__POR, - [TOMTOM_A_CDC_IIR1_COEF_B2_CTL] = TOMTOM_A_CDC_IIR1_COEF_B2_CTL__POR, - [TOMTOM_A_CDC_IIR2_COEF_B2_CTL] = TOMTOM_A_CDC_IIR2_COEF_B2_CTL__POR, - [TOMTOM_A_CDC_TOP_GAIN_UPDATE] = TOMTOM_A_CDC_TOP_GAIN_UPDATE__POR, - [TOMTOM_A_CDC_PA_RAMP_B1_CTL] = TOMTOM_A_CDC_PA_RAMP_B1_CTL__POR, - [TOMTOM_A_CDC_PA_RAMP_B2_CTL] = TOMTOM_A_CDC_PA_RAMP_B2_CTL__POR, - [TOMTOM_A_CDC_PA_RAMP_B3_CTL] = TOMTOM_A_CDC_PA_RAMP_B3_CTL__POR, - [TOMTOM_A_CDC_PA_RAMP_B4_CTL] = TOMTOM_A_CDC_PA_RAMP_B4_CTL__POR, - [TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL] = - TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL__POR, - [TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL] = - TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL__POR, - [TOMTOM_A_CDC_COMP0_B1_CTL] = TOMTOM_A_CDC_COMP0_B1_CTL__POR, - [TOMTOM_A_CDC_COMP1_B1_CTL] = TOMTOM_A_CDC_COMP1_B1_CTL__POR, - [TOMTOM_A_CDC_COMP2_B1_CTL] = TOMTOM_A_CDC_COMP2_B1_CTL__POR, - [TOMTOM_A_CDC_COMP0_B2_CTL] = TOMTOM_A_CDC_COMP0_B2_CTL__POR, - [TOMTOM_A_CDC_COMP1_B2_CTL] = TOMTOM_A_CDC_COMP1_B2_CTL__POR, - [TOMTOM_A_CDC_COMP2_B2_CTL] = TOMTOM_A_CDC_COMP2_B2_CTL__POR, - [TOMTOM_A_CDC_COMP0_B3_CTL] = TOMTOM_A_CDC_COMP0_B3_CTL__POR, - [TOMTOM_A_CDC_COMP1_B3_CTL] = TOMTOM_A_CDC_COMP1_B3_CTL__POR, - [TOMTOM_A_CDC_COMP2_B3_CTL] = TOMTOM_A_CDC_COMP2_B3_CTL__POR, - [TOMTOM_A_CDC_COMP0_B4_CTL] = TOMTOM_A_CDC_COMP0_B4_CTL__POR, - [TOMTOM_A_CDC_COMP1_B4_CTL] = TOMTOM_A_CDC_COMP1_B4_CTL__POR, - [TOMTOM_A_CDC_COMP2_B4_CTL] = TOMTOM_A_CDC_COMP2_B4_CTL__POR, - [TOMTOM_A_CDC_COMP0_B5_CTL] = TOMTOM_A_CDC_COMP0_B5_CTL__POR, - [TOMTOM_A_CDC_COMP1_B5_CTL] = TOMTOM_A_CDC_COMP1_B5_CTL__POR, - [TOMTOM_A_CDC_COMP2_B5_CTL] = TOMTOM_A_CDC_COMP2_B5_CTL__POR, - [TOMTOM_A_CDC_COMP0_B6_CTL] = TOMTOM_A_CDC_COMP0_B6_CTL__POR, - [TOMTOM_A_CDC_COMP1_B6_CTL] = TOMTOM_A_CDC_COMP1_B6_CTL__POR, - [TOMTOM_A_CDC_COMP2_B6_CTL] = TOMTOM_A_CDC_COMP2_B6_CTL__POR, - [TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS] = - TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS__POR, - [TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS] = - TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS__POR, - [TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS] = - TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS__POR, - [TOMTOM_A_CDC_COMP0_FS_CFG] = TOMTOM_A_CDC_COMP0_FS_CFG__POR, - [TOMTOM_A_CDC_COMP1_FS_CFG] = TOMTOM_A_CDC_COMP1_FS_CFG__POR, - [TOMTOM_A_CDC_COMP2_FS_CFG] = TOMTOM_A_CDC_COMP2_FS_CFG__POR, - [TOMTOM_A_CDC_CONN_RX1_B1_CTL] = TOMTOM_A_CDC_CONN_RX1_B1_CTL__POR, - [TOMTOM_A_CDC_CONN_RX1_B2_CTL] = TOMTOM_A_CDC_CONN_RX1_B2_CTL__POR, - [TOMTOM_A_CDC_CONN_RX1_B3_CTL] = TOMTOM_A_CDC_CONN_RX1_B3_CTL__POR, - [TOMTOM_A_CDC_CONN_RX2_B1_CTL] = TOMTOM_A_CDC_CONN_RX2_B1_CTL__POR, - [TOMTOM_A_CDC_CONN_RX2_B2_CTL] = TOMTOM_A_CDC_CONN_RX2_B2_CTL__POR, - [TOMTOM_A_CDC_CONN_RX2_B3_CTL] = TOMTOM_A_CDC_CONN_RX2_B3_CTL__POR, - [TOMTOM_A_CDC_CONN_RX3_B1_CTL] = TOMTOM_A_CDC_CONN_RX3_B1_CTL__POR, - [TOMTOM_A_CDC_CONN_RX3_B2_CTL] = TOMTOM_A_CDC_CONN_RX3_B2_CTL__POR, - [TOMTOM_A_CDC_CONN_RX4_B1_CTL] = TOMTOM_A_CDC_CONN_RX4_B1_CTL__POR, - [TOMTOM_A_CDC_CONN_RX4_B2_CTL] = TOMTOM_A_CDC_CONN_RX4_B2_CTL__POR, - [TOMTOM_A_CDC_CONN_RX5_B1_CTL] = TOMTOM_A_CDC_CONN_RX5_B1_CTL__POR, - [TOMTOM_A_CDC_CONN_RX5_B2_CTL] = TOMTOM_A_CDC_CONN_RX5_B2_CTL__POR, - [TOMTOM_A_CDC_CONN_RX6_B1_CTL] = TOMTOM_A_CDC_CONN_RX6_B1_CTL__POR, - [TOMTOM_A_CDC_CONN_RX6_B2_CTL] = TOMTOM_A_CDC_CONN_RX6_B2_CTL__POR, - [TOMTOM_A_CDC_CONN_RX7_B1_CTL] = TOMTOM_A_CDC_CONN_RX7_B1_CTL__POR, - [TOMTOM_A_CDC_CONN_RX7_B2_CTL] = TOMTOM_A_CDC_CONN_RX7_B2_CTL__POR, - [TOMTOM_A_CDC_CONN_RX7_B3_CTL] = TOMTOM_A_CDC_CONN_RX7_B3_CTL__POR, - [TOMTOM_A_CDC_CONN_ANC_B1_CTL] = TOMTOM_A_CDC_CONN_ANC_B1_CTL__POR, - [TOMTOM_A_CDC_CONN_ANC_B2_CTL] = TOMTOM_A_CDC_CONN_ANC_B2_CTL__POR, - [TOMTOM_A_CDC_CONN_TX_B1_CTL] = TOMTOM_A_CDC_CONN_TX_B1_CTL__POR, - [TOMTOM_A_CDC_CONN_TX_B2_CTL] = TOMTOM_A_CDC_CONN_TX_B2_CTL__POR, - [TOMTOM_A_CDC_CONN_TX_B3_CTL] = TOMTOM_A_CDC_CONN_TX_B3_CTL__POR, - [TOMTOM_A_CDC_CONN_TX_B4_CTL] = TOMTOM_A_CDC_CONN_TX_B4_CTL__POR, - [TOMTOM_A_CDC_CONN_EQ1_B1_CTL] = TOMTOM_A_CDC_CONN_EQ1_B1_CTL__POR, - [TOMTOM_A_CDC_CONN_EQ1_B2_CTL] = TOMTOM_A_CDC_CONN_EQ1_B2_CTL__POR, - [TOMTOM_A_CDC_CONN_EQ1_B3_CTL] = TOMTOM_A_CDC_CONN_EQ1_B3_CTL__POR, - [TOMTOM_A_CDC_CONN_EQ1_B4_CTL] = TOMTOM_A_CDC_CONN_EQ1_B4_CTL__POR, - [TOMTOM_A_CDC_CONN_EQ2_B1_CTL] = TOMTOM_A_CDC_CONN_EQ2_B1_CTL__POR, - [TOMTOM_A_CDC_CONN_EQ2_B2_CTL] = TOMTOM_A_CDC_CONN_EQ2_B2_CTL__POR, - [TOMTOM_A_CDC_CONN_EQ2_B3_CTL] = TOMTOM_A_CDC_CONN_EQ2_B3_CTL__POR, - [TOMTOM_A_CDC_CONN_EQ2_B4_CTL] = TOMTOM_A_CDC_CONN_EQ2_B4_CTL__POR, - [TOMTOM_A_CDC_CONN_SRC1_B1_CTL] = TOMTOM_A_CDC_CONN_SRC1_B1_CTL__POR, - [TOMTOM_A_CDC_CONN_SRC1_B2_CTL] = TOMTOM_A_CDC_CONN_SRC1_B2_CTL__POR, - [TOMTOM_A_CDC_CONN_SRC2_B1_CTL] = TOMTOM_A_CDC_CONN_SRC2_B1_CTL__POR, - [TOMTOM_A_CDC_CONN_SRC2_B2_CTL] = TOMTOM_A_CDC_CONN_SRC2_B2_CTL__POR, - [TOMTOM_A_CDC_CONN_TX_SB_B1_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B1_CTL__POR, - [TOMTOM_A_CDC_CONN_TX_SB_B2_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B2_CTL__POR, - [TOMTOM_A_CDC_CONN_TX_SB_B3_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B3_CTL__POR, - [TOMTOM_A_CDC_CONN_TX_SB_B4_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B4_CTL__POR, - [TOMTOM_A_CDC_CONN_TX_SB_B5_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B5_CTL__POR, - [TOMTOM_A_CDC_CONN_TX_SB_B6_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B6_CTL__POR, - [TOMTOM_A_CDC_CONN_TX_SB_B7_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B7_CTL__POR, - [TOMTOM_A_CDC_CONN_TX_SB_B8_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B8_CTL__POR, - [TOMTOM_A_CDC_CONN_TX_SB_B9_CTL] = TOMTOM_A_CDC_CONN_TX_SB_B9_CTL__POR, - [TOMTOM_A_CDC_CONN_TX_SB_B10_CTL] = - TOMTOM_A_CDC_CONN_TX_SB_B10_CTL__POR, - [TOMTOM_A_CDC_CONN_TX_SB_B11_CTL] = - TOMTOM_A_CDC_CONN_TX_SB_B11_CTL__POR, - [TOMTOM_A_CDC_CONN_RX_SB_B1_CTL] = TOMTOM_A_CDC_CONN_RX_SB_B1_CTL__POR, - [TOMTOM_A_CDC_CONN_RX_SB_B2_CTL] = TOMTOM_A_CDC_CONN_RX_SB_B2_CTL__POR, - [TOMTOM_A_CDC_CONN_CLSH_CTL] = TOMTOM_A_CDC_CONN_CLSH_CTL__POR, - [TOMTOM_A_CDC_CONN_MISC] = TOMTOM_A_CDC_CONN_MISC__POR, - [TOMTOM_A_CDC_CONN_RX8_B1_CTL] = TOMTOM_A_CDC_CONN_RX8_B1_CTL__POR, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL] = - TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL__POR, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST] = - TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST__POR, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD] = - TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD__POR, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS] = - TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS__POR, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK] = - TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK__POR, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING] = - TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING__POR, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL] = - TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL__POR, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST] = - TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST__POR, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD] = - TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD__POR, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS] = - TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS__POR, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK] = - TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK__POR, - [TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING] = - TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING__POR, - [TOMTOM_A_CDC_MBHC_EN_CTL] = TOMTOM_A_CDC_MBHC_EN_CTL__POR, - [TOMTOM_A_CDC_MBHC_FIR_B1_CFG] = TOMTOM_A_CDC_MBHC_FIR_B1_CFG__POR, - [TOMTOM_A_CDC_MBHC_FIR_B2_CFG] = TOMTOM_A_CDC_MBHC_FIR_B2_CFG__POR, - [TOMTOM_A_CDC_MBHC_TIMER_B1_CTL] = TOMTOM_A_CDC_MBHC_TIMER_B1_CTL__POR, - [TOMTOM_A_CDC_MBHC_TIMER_B2_CTL] = TOMTOM_A_CDC_MBHC_TIMER_B2_CTL__POR, - [TOMTOM_A_CDC_MBHC_TIMER_B3_CTL] = TOMTOM_A_CDC_MBHC_TIMER_B3_CTL__POR, - [TOMTOM_A_CDC_MBHC_TIMER_B4_CTL] = TOMTOM_A_CDC_MBHC_TIMER_B4_CTL__POR, - [TOMTOM_A_CDC_MBHC_TIMER_B5_CTL] = TOMTOM_A_CDC_MBHC_TIMER_B5_CTL__POR, - [TOMTOM_A_CDC_MBHC_TIMER_B6_CTL] = TOMTOM_A_CDC_MBHC_TIMER_B6_CTL__POR, - [TOMTOM_A_CDC_MBHC_B1_STATUS] = TOMTOM_A_CDC_MBHC_B1_STATUS__POR, - [TOMTOM_A_CDC_MBHC_B2_STATUS] = TOMTOM_A_CDC_MBHC_B2_STATUS__POR, - [TOMTOM_A_CDC_MBHC_B3_STATUS] = TOMTOM_A_CDC_MBHC_B3_STATUS__POR, - [TOMTOM_A_CDC_MBHC_B4_STATUS] = TOMTOM_A_CDC_MBHC_B4_STATUS__POR, - [TOMTOM_A_CDC_MBHC_B5_STATUS] = TOMTOM_A_CDC_MBHC_B5_STATUS__POR, - [TOMTOM_A_CDC_MBHC_B1_CTL] = TOMTOM_A_CDC_MBHC_B1_CTL__POR, - [TOMTOM_A_CDC_MBHC_B2_CTL] = TOMTOM_A_CDC_MBHC_B2_CTL__POR, - [TOMTOM_A_CDC_MBHC_VOLT_B1_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B1_CTL__POR, - [TOMTOM_A_CDC_MBHC_VOLT_B2_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B2_CTL__POR, - [TOMTOM_A_CDC_MBHC_VOLT_B3_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B3_CTL__POR, - [TOMTOM_A_CDC_MBHC_VOLT_B4_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B4_CTL__POR, - [TOMTOM_A_CDC_MBHC_VOLT_B5_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B5_CTL__POR, - [TOMTOM_A_CDC_MBHC_VOLT_B6_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B6_CTL__POR, - [TOMTOM_A_CDC_MBHC_VOLT_B7_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B7_CTL__POR, - [TOMTOM_A_CDC_MBHC_VOLT_B8_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B8_CTL__POR, - [TOMTOM_A_CDC_MBHC_VOLT_B9_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B9_CTL__POR, - [TOMTOM_A_CDC_MBHC_VOLT_B10_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B10_CTL__POR, - [TOMTOM_A_CDC_MBHC_VOLT_B11_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B11_CTL__POR, - [TOMTOM_A_CDC_MBHC_VOLT_B12_CTL] = TOMTOM_A_CDC_MBHC_VOLT_B12_CTL__POR, - [TOMTOM_A_CDC_MBHC_CLK_CTL] = TOMTOM_A_CDC_MBHC_CLK_CTL__POR, - [TOMTOM_A_CDC_MBHC_INT_CTL] = TOMTOM_A_CDC_MBHC_INT_CTL__POR, - [TOMTOM_A_CDC_MBHC_DEBUG_CTL] = TOMTOM_A_CDC_MBHC_DEBUG_CTL__POR, - [TOMTOM_A_CDC_MBHC_SPARE] = TOMTOM_A_CDC_MBHC_SPARE__POR, - [TOMTOM_A_CDC_RX8_B1_CTL] = TOMTOM_A_CDC_RX8_B1_CTL__POR, - [TOMTOM_A_CDC_RX8_B2_CTL] = TOMTOM_A_CDC_RX8_B2_CTL__POR, - [TOMTOM_A_CDC_RX8_B3_CTL] = TOMTOM_A_CDC_RX8_B3_CTL__POR, - [TOMTOM_A_CDC_RX8_B4_CTL] = TOMTOM_A_CDC_RX8_B4_CTL__POR, - [TOMTOM_A_CDC_RX8_B5_CTL] = TOMTOM_A_CDC_RX8_B5_CTL__POR, - [TOMTOM_A_CDC_RX8_B6_CTL] = TOMTOM_A_CDC_RX8_B6_CTL__POR, - [TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL] = - TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL__POR, - [TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL] = - TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL__POR, - [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0] = - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0__POR, - [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1] = - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1__POR, - [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2] = - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2__POR, - [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3] = - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3__POR, - [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4] = - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4__POR, - [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5] = - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5__POR, - [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6] = - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6__POR, - [TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7] = - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7__POR, - [TOMTOM_A_CDC_BOOST_MODE_CTL] = TOMTOM_A_CDC_BOOST_MODE_CTL__POR, - [TOMTOM_A_CDC_BOOST_THRESHOLD] = TOMTOM_A_CDC_BOOST_THRESHOLD__POR, - [TOMTOM_A_CDC_BOOST_TAP_SEL] = TOMTOM_A_CDC_BOOST_TAP_SEL__POR, - [TOMTOM_A_CDC_BOOST_HOLD_TIME] = TOMTOM_A_CDC_BOOST_HOLD_TIME__POR, - [TOMTOM_A_CDC_BOOST_TRGR_EN] = TOMTOM_A_CDC_BOOST_TRGR_EN__POR, -}; diff --git a/sound/soc/codecs/wcd9330.c b/sound/soc/codecs/wcd9330.c deleted file mode 100644 index 4278e36ad174..000000000000 --- a/sound/soc/codecs/wcd9330.c +++ /dev/null @@ -1,9113 +0,0 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "wcd9330.h" -#include "wcd9xxx-resmgr.h" -#include "wcd9xxx-common.h" -#include "wcdcal-hwdep.h" -#include "wcd_cpe_core.h" - -enum { - BUS_DOWN, - ADC1_TXFE, - ADC2_TXFE, - ADC3_TXFE, - ADC4_TXFE, - ADC5_TXFE, - ADC6_TXFE, - HPH_DELAY, -}; - -#define TOMTOM_MAD_SLIMBUS_TX_PORT 12 -#define TOMTOM_MAD_AUDIO_FIRMWARE_PATH "wcd9320/wcd9320_mad_audio.bin" -#define TOMTOM_VALIDATE_RX_SBPORT_RANGE(port) ((port >= 16) && (port <= 23)) -#define TOMTOM_VALIDATE_TX_SBPORT_RANGE(port) ((port >= 0) && (port <= 15)) -#define TOMTOM_CONVERT_RX_SBPORT_ID(port) (port - 16) /* RX1 port ID = 0 */ -#define TOMTOM_BIT_ADJ_SHIFT_PORT1_6 4 -#define TOMTOM_BIT_ADJ_SHIFT_PORT7_10 5 - -#define TOMTOM_HPH_PA_SETTLE_COMP_ON 10000 -#define TOMTOM_HPH_PA_SETTLE_COMP_OFF 13000 -#define TOMTOM_HPH_PA_RAMP_DELAY 30000 - -#define TOMTOM_SVASS_INT_STATUS_RCO_WDOG 0x20 -#define TOMTOM_SVASS_INT_STATUS_WDOG_BITE 0x02 - -/* Add any SVA IRQs that are to be treated as FATAL */ -#define TOMTOM_CPE_FATAL_IRQS \ - (TOMTOM_SVASS_INT_STATUS_RCO_WDOG | \ - TOMTOM_SVASS_INT_STATUS_WDOG_BITE) - -#define DAPM_MICBIAS2_EXTERNAL_STANDALONE "MIC BIAS2 External Standalone" - -/* RX_HPH_CNP_WG_TIME increases by 0.24ms */ -#define TOMTOM_WG_TIME_FACTOR_US 240 - -#define RX8_PATH 8 -#define HPH_PA_ENABLE true -#define HPH_PA_DISABLE false - -#define SLIM_BW_CLK_GEAR_9 6200000 -#define SLIM_BW_UNVOTE 0 - -static int cpe_debug_mode; -module_param(cpe_debug_mode, int, 0664); -MODULE_PARM_DESC(cpe_debug_mode, "boot cpe in debug mode"); - -static atomic_t kp_tomtom_priv; - -static int high_perf_mode; -module_param(high_perf_mode, int, 0664); -MODULE_PARM_DESC(high_perf_mode, "enable/disable class AB config for hph"); - -static struct afe_param_slimbus_slave_port_cfg tomtom_slimbus_slave_port_cfg = { - .minor_version = 1, - .slimbus_dev_id = AFE_SLIMBUS_DEVICE_1, - .slave_dev_pgd_la = 0, - .slave_dev_intfdev_la = 0, - .bit_width = 16, - .data_format = 0, - .num_channels = 1 -}; - -static struct afe_param_cdc_reg_cfg audio_reg_cfg[] = { - { - 1, - (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_CDC_MAD_MAIN_CTL_1), - HW_MAD_AUDIO_ENABLE, 0x1, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_CDC_MAD_AUDIO_CTL_3), - HW_MAD_AUDIO_SLEEP_TIME, 0xF, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_CDC_MAD_AUDIO_CTL_4), - HW_MAD_TX_AUDIO_SWITCH_OFF, 0x1, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR_MODE), - MAD_AUDIO_INT_DEST_SELECT_REG, 0x4, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_MASK0), - MAD_AUDIO_INT_MASK_REG, 0x2, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_STATUS0), - MAD_AUDIO_INT_STATUS_REG, 0x2, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_CLEAR0), - MAD_AUDIO_INT_CLEAR_REG, 0x2, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + TOMTOM_SB_PGD_PORT_TX_BASE), - SB_PGD_PORT_TX_WATERMARK_N, 0x1E, 8, 0x1 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + TOMTOM_SB_PGD_PORT_TX_BASE), - SB_PGD_PORT_TX_ENABLE_N, 0x1, 8, 0x1 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + TOMTOM_SB_PGD_PORT_RX_BASE), - SB_PGD_PORT_RX_WATERMARK_N, 0x1E, 8, 0x1 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + TOMTOM_SB_PGD_PORT_RX_BASE), - SB_PGD_PORT_RX_ENABLE_N, 0x1, 8, 0x1 - }, - { 1, - (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_CDC_ANC1_IIR_B1_CTL), - AANC_FF_GAIN_ADAPTIVE, 0x4, 8, 0 - }, - { 1, - (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_CDC_ANC1_IIR_B1_CTL), - AANC_FFGAIN_ADAPTIVE_EN, 0x8, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_CDC_ANC1_GAIN_CTL), - AANC_GAIN_CONTROL, 0xFF, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_MASK0), - MAD_CLIP_INT_MASK_REG, 0x10, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_MASK0), - MAD2_CLIP_INT_MASK_REG, 0x20, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_STATUS0), - MAD_CLIP_INT_STATUS_REG, 0x10, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_STATUS0), - MAD2_CLIP_INT_STATUS_REG, 0x20, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_CLEAR0), - MAD_CLIP_INT_CLEAR_REG, 0x10, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + TOMTOM_A_INTR2_CLEAR0), - MAD2_CLIP_INT_CLEAR_REG, 0x20, 8, 0 - }, -}; - -static struct afe_param_cdc_reg_cfg clip_reg_cfg[] = { - { - 1, - (TOMTOM_REGISTER_START_OFFSET + - TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL), - SPKR_CLIP_PIPE_BANK_SEL, 0x3, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + - TOMTOM_A_CDC_SPKR_CLIPDET_VAL0), - SPKR_CLIPDET_VAL0, 0xff, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + - TOMTOM_A_CDC_SPKR_CLIPDET_VAL1), - SPKR_CLIPDET_VAL1, 0xff, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + - TOMTOM_A_CDC_SPKR_CLIPDET_VAL2), - SPKR_CLIPDET_VAL2, 0xff, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + - TOMTOM_A_CDC_SPKR_CLIPDET_VAL3), - SPKR_CLIPDET_VAL3, 0xff, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + - TOMTOM_A_CDC_SPKR_CLIPDET_VAL4), - SPKR_CLIPDET_VAL4, 0xff, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + - TOMTOM_A_CDC_SPKR_CLIPDET_VAL5), - SPKR_CLIPDET_VAL5, 0xff, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + - TOMTOM_A_CDC_SPKR_CLIPDET_VAL6), - SPKR_CLIPDET_VAL6, 0xff, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + - TOMTOM_A_CDC_SPKR_CLIPDET_VAL7), - SPKR_CLIPDET_VAL7, 0xff, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + - TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL), - SPKR2_CLIP_PIPE_BANK_SEL, 0x3, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0), - SPKR2_CLIPDET_VAL0, 0xff, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1), - SPKR2_CLIPDET_VAL1, 0xff, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2), - SPKR2_CLIPDET_VAL2, 0xff, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3), - SPKR2_CLIPDET_VAL3, 0xff, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4), - SPKR2_CLIPDET_VAL4, 0xff, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5), - SPKR2_CLIPDET_VAL5, 0xff, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6), - SPKR2_CLIPDET_VAL6, 0xff, 8, 0 - }, - { - 1, - (TOMTOM_REGISTER_START_OFFSET + - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7), - SPKR2_CLIPDET_VAL7, 0xff, 8, 0 - }, -}; - -static struct afe_param_cdc_reg_cfg_data tomtom_audio_reg_cfg = { - .num_registers = ARRAY_SIZE(audio_reg_cfg), - .reg_data = audio_reg_cfg, -}; - -static struct afe_param_cdc_reg_cfg_data tomtom_clip_reg_cfg = { - .num_registers = ARRAY_SIZE(clip_reg_cfg), - .reg_data = clip_reg_cfg, -}; - -static struct afe_param_id_cdc_aanc_version tomtom_cdc_aanc_version = { - .cdc_aanc_minor_version = AFE_API_VERSION_CDC_AANC_VERSION, - .aanc_hw_version = AANC_HW_BLOCK_VERSION_2, -}; - -static struct afe_param_id_clip_bank_sel clip_bank_sel = { - .minor_version = AFE_API_VERSION_CLIP_BANK_SEL_CFG, - .num_banks = AFE_CLIP_MAX_BANKS, - .bank_map = {0, 1, 2, 3}, -}; - -#define WCD9330_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\ - SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000 |\ - SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000) - -#define NUM_DECIMATORS 10 -#define NUM_INTERPOLATORS 8 -#define BITS_PER_REG 8 -#define TOMTOM_TX_PORT_NUMBER 16 -#define TOMTOM_RX_PORT_START_NUMBER 16 - -#define TOMTOM_I2S_MASTER_MODE_MASK 0x08 - -#define TOMTOM_SLIM_CLOSE_TIMEOUT 1000 -#define TOMTOM_SLIM_IRQ_OVERFLOW (1 << 0) -#define TOMTOM_SLIM_IRQ_UNDERFLOW (1 << 1) -#define TOMTOM_SLIM_IRQ_PORT_CLOSED (1 << 2) -#define TOMTOM_MCLK_CLK_12P288MHZ 12288000 -#define TOMTOM_MCLK_CLK_9P6MHZ 9600000 - -#define TOMTOM_FORMATS_S16_S24_LE (SNDRV_PCM_FMTBIT_S16_LE | \ - SNDRV_PCM_FORMAT_S24_LE) - -#define TOMTOM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE) - -#define TOMTOM_SLIM_PGD_PORT_INT_TX_EN0 (TOMTOM_SLIM_PGD_PORT_INT_EN0 + 2) -#define TOMTOM_ZDET_BOX_CAR_AVG_LOOP_COUNT 1 -#define TOMTOM_ZDET_MUL_FACTOR_1X 7218 -#define TOMTOM_ZDET_MUL_FACTOR_10X (TOMTOM_ZDET_MUL_FACTOR_1X * 10) -#define TOMTOM_ZDET_MUL_FACTOR_100X (TOMTOM_ZDET_MUL_FACTOR_1X * 100) -#define TOMTOM_ZDET_ERROR_APPROX_MUL_FACTOR 655 -#define TOMTOM_ZDET_ERROR_APPROX_SHIFT 16 -#define TOMTOM_ZDET_ZONE_3_DEFAULT_VAL 1000000 - -enum { - AIF1_PB = 0, - AIF1_CAP, - AIF2_PB, - AIF2_CAP, - AIF3_PB, - AIF3_CAP, - AIF4_VIFEED, - AIF4_MAD_TX, - NUM_CODEC_DAIS, -}; - -enum { - RX_MIX1_INP_SEL_ZERO = 0, - RX_MIX1_INP_SEL_SRC1, - RX_MIX1_INP_SEL_SRC2, - RX_MIX1_INP_SEL_IIR1, - RX_MIX1_INP_SEL_IIR2, - RX_MIX1_INP_SEL_RX1, - RX_MIX1_INP_SEL_RX2, - RX_MIX1_INP_SEL_RX3, - RX_MIX1_INP_SEL_RX4, - RX_MIX1_INP_SEL_RX5, - RX_MIX1_INP_SEL_RX6, - RX_MIX1_INP_SEL_RX7, - RX_MIX1_INP_SEL_AUXRX, -}; -enum { - RX8_MIX1_INP_SEL_ZERO = 0, - RX8_MIX1_INP_SEL_IIR1, - RX8_MIX1_INP_SEL_IIR2, - RX8_MIX1_INP_SEL_RX1, - RX8_MIX1_INP_SEL_RX2, - RX8_MIX1_INP_SEL_RX3, - RX8_MIX1_INP_SEL_RX4, - RX8_MIX1_INP_SEL_RX5, - RX8_MIX1_INP_SEL_RX6, - RX8_MIX1_INP_SEL_RX7, - RX8_MIX1_INP_SEL_RX8, -}; - -#define TOMTOM_COMP_DIGITAL_GAIN_OFFSET 3 - -static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0); -static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1); -static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1); -static struct snd_soc_dai_driver tomtom_dai[]; -static const DECLARE_TLV_DB_SCALE(aux_pga_gain, 0, 2, 0); - -/* Codec supports 2 IIR filters */ -enum { - IIR1 = 0, - IIR2, - IIR_MAX, -}; -/* Codec supports 5 bands */ -enum { - BAND1 = 0, - BAND2, - BAND3, - BAND4, - BAND5, - BAND_MAX, -}; - -enum { - COMPANDER_0, - COMPANDER_1, - COMPANDER_2, - COMPANDER_MAX, -}; - -enum { - COMPANDER_FS_8KHZ = 0, - COMPANDER_FS_16KHZ, - COMPANDER_FS_32KHZ, - COMPANDER_FS_48KHZ, - COMPANDER_FS_96KHZ, - COMPANDER_FS_192KHZ, - COMPANDER_FS_MAX, -}; - -struct comp_sample_dependent_params { - u32 peak_det_timeout; - u32 rms_meter_div_fact; - u32 rms_meter_resamp_fact; -}; - -struct hpf_work { - struct tomtom_priv *tomtom; - u32 decimator; - u8 tx_hpf_cut_of_freq; - bool tx_hpf_bypass; - struct delayed_work dwork; -}; - -static struct hpf_work tx_hpf_work[NUM_DECIMATORS]; - -static const struct wcd9xxx_ch tomtom_rx_chs[TOMTOM_RX_MAX] = { - WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER, 0), - WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 1, 1), - WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 2, 2), - WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 3, 3), - WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 4, 4), - WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 5, 5), - WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 6, 6), - WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 7, 7), - WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 8, 8), - WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 9, 9), - WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 10, 10), - WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 11, 11), - WCD9XXX_CH(TOMTOM_RX_PORT_START_NUMBER + 12, 12), -}; - -static const struct wcd9xxx_ch tomtom_tx_chs[TOMTOM_TX_MAX] = { - WCD9XXX_CH(0, 0), - WCD9XXX_CH(1, 1), - WCD9XXX_CH(2, 2), - WCD9XXX_CH(3, 3), - WCD9XXX_CH(4, 4), - WCD9XXX_CH(5, 5), - WCD9XXX_CH(6, 6), - WCD9XXX_CH(7, 7), - WCD9XXX_CH(8, 8), - WCD9XXX_CH(9, 9), - WCD9XXX_CH(10, 10), - WCD9XXX_CH(11, 11), - WCD9XXX_CH(12, 12), - WCD9XXX_CH(13, 13), - WCD9XXX_CH(14, 14), - WCD9XXX_CH(15, 15), -}; - -static const u32 vport_check_table[NUM_CODEC_DAIS] = { - 0, /* AIF1_PB */ - (1 << AIF2_CAP) | (1 << AIF3_CAP), /* AIF1_CAP */ - 0, /* AIF2_PB */ - (1 << AIF1_CAP) | (1 << AIF3_CAP), /* AIF2_CAP */ - 0, /* AIF3_PB */ - (1 << AIF1_CAP) | (1 << AIF2_CAP), /* AIF3_CAP */ -}; - -static const u32 vport_i2s_check_table[NUM_CODEC_DAIS] = { - 0, /* AIF1_PB */ - 0, /* AIF1_CAP */ - 0, /* AIF2_PB */ - 0, /* AIF2_CAP */ -}; - -/* - * Interrupt table for v3 corresponds to newer version - * codecs (wcd9330) - */ -static const struct intr_data wcd9330_intr_tbl[] = { - {WCD9XXX_IRQ_SLIMBUS, false}, - {WCD9XXX_IRQ_MBHC_INSERTION, true}, - {WCD9XXX_IRQ_MBHC_POTENTIAL, true}, - {WCD9XXX_IRQ_MBHC_RELEASE, true}, - {WCD9XXX_IRQ_MBHC_PRESS, true}, - {WCD9XXX_IRQ_MBHC_SHORT_TERM, true}, - {WCD9XXX_IRQ_MBHC_REMOVAL, true}, - {WCD9330_IRQ_MBHC_JACK_SWITCH, true}, - {WCD9XXX_IRQ_BG_PRECHARGE, false}, - {WCD9XXX_IRQ_PA1_STARTUP, false}, - {WCD9XXX_IRQ_PA2_STARTUP, false}, - {WCD9XXX_IRQ_PA3_STARTUP, false}, - {WCD9XXX_IRQ_PA4_STARTUP, false}, - {WCD9XXX_IRQ_PA5_STARTUP, false}, - {WCD9XXX_IRQ_MICBIAS1_PRECHARGE, false}, - {WCD9XXX_IRQ_MICBIAS2_PRECHARGE, false}, - {WCD9XXX_IRQ_MICBIAS3_PRECHARGE, false}, - {WCD9XXX_IRQ_HPH_PA_OCPL_FAULT, false}, - {WCD9XXX_IRQ_HPH_PA_OCPR_FAULT, false}, - {WCD9XXX_IRQ_EAR_PA_OCPL_FAULT, false}, - {WCD9XXX_IRQ_HPH_L_PA_STARTUP, false}, - {WCD9XXX_IRQ_HPH_R_PA_STARTUP, false}, - {WCD9320_IRQ_EAR_PA_STARTUP, false}, - {WCD9330_IRQ_SVASS_ERR_EXCEPTION, false}, - {WCD9330_IRQ_SVASS_ENGINE, true}, - {WCD9330_IRQ_MAD_AUDIO, false}, - {WCD9330_IRQ_MAD_BEACON, false}, - {WCD9330_IRQ_MAD_ULTRASOUND, false}, - {WCD9330_IRQ_SPEAKER1_CLIPPING, false}, - {WCD9330_IRQ_SPEAKER2_CLIPPING, false}, - {WCD9330_IRQ_VBAT_MONITOR_ATTACK, false}, - {WCD9330_IRQ_VBAT_MONITOR_RELEASE, false}, -}; - -struct tomtom_priv { - struct snd_soc_codec *codec; - u32 adc_count; - u32 rx_bias_count; - s32 dmic_1_2_clk_cnt; - s32 dmic_3_4_clk_cnt; - s32 dmic_5_6_clk_cnt; - s32 ldo_h_users; - s32 micb_2_users; - - u32 anc_slot; - bool anc_func; - - /* cal info for codec */ - struct fw_info *fw_data; - - /*track tomtom interface type*/ - u8 intf_type; - - /* num of slim ports required */ - struct wcd9xxx_codec_dai_data dai[NUM_CODEC_DAIS]; - - /*compander*/ - int comp_enabled[COMPANDER_MAX]; - u32 comp_fs[COMPANDER_MAX]; - - /* Maintain the status of AUX PGA */ - int aux_pga_cnt; - u8 aux_l_gain; - u8 aux_r_gain; - - bool spkr_pa_widget_on; - struct regulator *spkdrv_reg; - struct regulator *spkdrv2_reg; - - bool mbhc_started; - - struct afe_param_cdc_slimbus_slave_cfg slimbus_slave_cfg; - - /* resmgr module */ - struct wcd9xxx_resmgr resmgr; - /* mbhc module */ - struct wcd9xxx_mbhc mbhc; - - /* class h specific data */ - struct wcd9xxx_clsh_cdc_data clsh_d; - - int (*machine_codec_event_cb)(struct snd_soc_codec *codec, - enum wcd9xxx_codec_event); - int (*codec_ext_clk_en_cb)(struct snd_soc_codec *codec, - int enable, bool dapm); - int (*codec_get_ext_clk_cnt)(void); - /* - * list used to save/restore registers at start and - * end of impedance measurement - */ - struct list_head reg_save_restore; - - /* handle to cpe core */ - struct wcd_cpe_core *cpe_core; - - /* UHQA (class AB) mode */ - u8 uhqa_mode; - - /* Multiplication factor used for impedance detection */ - int zdet_gain_mul_fact; - - /* to track the status */ - unsigned long status_mask; - - int ext_clk_users; - struct clk *wcd_ext_clk; - - /* Port values for Rx and Tx codec_dai */ - unsigned int rx_port_value; - unsigned int tx_port_value; - - struct mutex codec_mutex; -}; - -static const u32 comp_shift[] = { - 4, /* Compander 0's clock source is on interpolator 7 */ - 0, - 2, -}; - -static const int comp_rx_path[] = { - COMPANDER_1, - COMPANDER_1, - COMPANDER_2, - COMPANDER_2, - COMPANDER_2, - COMPANDER_2, - COMPANDER_0, - COMPANDER_0, - COMPANDER_MAX, -}; - -static const struct comp_sample_dependent_params comp_samp_params[] = { - { - /* 8 Khz */ - .peak_det_timeout = 0x06, - .rms_meter_div_fact = 0x09, - .rms_meter_resamp_fact = 0x06, - }, - { - /* 16 Khz */ - .peak_det_timeout = 0x07, - .rms_meter_div_fact = 0x0A, - .rms_meter_resamp_fact = 0x0C, - }, - { - /* 32 Khz */ - .peak_det_timeout = 0x08, - .rms_meter_div_fact = 0x0B, - .rms_meter_resamp_fact = 0x1E, - }, - { - /* 48 Khz */ - .peak_det_timeout = 0x09, - .rms_meter_div_fact = 0x0B, - .rms_meter_resamp_fact = 0x28, - }, - { - /* 96 Khz */ - .peak_det_timeout = 0x0A, - .rms_meter_div_fact = 0x0C, - .rms_meter_resamp_fact = 0x50, - }, - { - /* 192 Khz */ - .peak_det_timeout = 0x0B, - .rms_meter_div_fact = 0xC, - .rms_meter_resamp_fact = 0xA0, - }, -}; - -static unsigned short rx_digital_gain_reg[] = { - TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL, - TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL, - TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL, - TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL, - TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL, - TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL, - TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL, - TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL, -}; - - -static unsigned short tx_digital_gain_reg[] = { - TOMTOM_A_CDC_TX1_VOL_CTL_GAIN, - TOMTOM_A_CDC_TX2_VOL_CTL_GAIN, - TOMTOM_A_CDC_TX3_VOL_CTL_GAIN, - TOMTOM_A_CDC_TX4_VOL_CTL_GAIN, - TOMTOM_A_CDC_TX5_VOL_CTL_GAIN, - TOMTOM_A_CDC_TX6_VOL_CTL_GAIN, - TOMTOM_A_CDC_TX7_VOL_CTL_GAIN, - TOMTOM_A_CDC_TX8_VOL_CTL_GAIN, - TOMTOM_A_CDC_TX9_VOL_CTL_GAIN, - TOMTOM_A_CDC_TX10_VOL_CTL_GAIN, -}; - -/* - * wcd9330_get_codec_info: Get codec specific information - * - * @wcd9xxx: pointer to wcd9xxx structure - * @wcd_type: pointer to wcd9xxx_codec_type structure - * - * Returns 0 for success or negative error code for failure - */ -int wcd9330_get_codec_info(struct wcd9xxx *wcd9xxx, - struct wcd9xxx_codec_type *wcd_type) -{ - u16 id_minor, id_major; - struct regmap *wcd_regmap; - int rc, val, version = 0; - - if (!wcd9xxx || !wcd_type) - return -EINVAL; - - if (!wcd9xxx->regmap) { - dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n", - __func__); - return -EINVAL; - } - wcd_regmap = wcd9xxx->regmap; - rc = regmap_bulk_read(wcd_regmap, TOMTOM_A_CHIP_ID_BYTE_0, - (u8 *)&id_minor, sizeof(u16)); - if (rc) - return -EINVAL; - - rc = regmap_bulk_read(wcd_regmap, TOMTOM_A_CHIP_ID_BYTE_2, - (u8 *)&id_major, sizeof(u16)); - if (rc) - return -EINVAL; - - dev_info(wcd9xxx->dev, "%s: wcd9xxx chip id major 0x%x, minor 0x%x\n", - __func__, id_major, id_minor); - - if (id_minor == cpu_to_le16(0x1)) - version = 2; - else if (id_minor == cpu_to_le16(0x0)) - version = 1; - else - dev_err(wcd9xxx->dev, "%s: wcd9330 version unknown (major 0x%x, minor 0x%x)\n", - __func__, id_major, id_minor); - - /* Fill codec type info */ - wcd_type->id_major = id_major; - wcd_type->id_minor = id_minor; - wcd_type->num_irqs = WCD9330_NUM_IRQS; - wcd_type->version = version; - wcd_type->slim_slave_type = WCD9XXX_SLIM_SLAVE_ADDR_TYPE_1; - wcd_type->i2c_chip_status = 0x01; - wcd_type->intr_tbl = wcd9330_intr_tbl; - wcd_type->intr_tbl_size = ARRAY_SIZE(wcd9330_intr_tbl); - - wcd_type->intr_reg[WCD9XXX_INTR_STATUS_BASE] = - TOMTOM_A_INTR1_STATUS0; - wcd_type->intr_reg[WCD9XXX_INTR_CLEAR_BASE] = - TOMTOM_A_INTR1_CLEAR0; - wcd_type->intr_reg[WCD9XXX_INTR_MASK_BASE] = - TOMTOM_A_INTR1_MASK0; - wcd_type->intr_reg[WCD9XXX_INTR_LEVEL_BASE] = - TOMTOM_A_INTR1_LEVEL0; - wcd_type->intr_reg[WCD9XXX_INTR_CLR_COMMIT] = - TOMTOM_A_INTR_MODE; - - return rc; -} -EXPORT_SYMBOL(wcd9330_get_codec_info); - -/* - * wcd9330_bringdown: Bringdown WCD Codec - * - * @wcd9xxx: Pointer to wcd9xxx structure - * - * Returns 0 for success or negative error code for failure - */ -int wcd9330_bringdown(struct wcd9xxx *wcd9xxx) -{ - if (!wcd9xxx || !wcd9xxx->regmap) - return -EINVAL; - - regmap_write(wcd9xxx->regmap, TOMTOM_A_LEAKAGE_CTL, 0x7); - regmap_write(wcd9xxx->regmap, TOMTOM_A_LEAKAGE_CTL, 0x6); - regmap_write(wcd9xxx->regmap, TOMTOM_A_LEAKAGE_CTL, 0xe); - regmap_write(wcd9xxx->regmap, TOMTOM_A_LEAKAGE_CTL, 0x8); - - return 0; -} -EXPORT_SYMBOL(wcd9330_bringdown); - -/* - * wcd9330_bringup: Bring up WCD Codec - * - * @wcd9xxx: Pointer to wcd9xxx structure - * - * Returns 0 for success or negative error code for failure - */ -int wcd9330_bringup(struct wcd9xxx *wcd9xxx) -{ - if (!wcd9xxx || !wcd9xxx->regmap) - return -EINVAL; - - regmap_write(wcd9xxx->regmap, TOMTOM_A_LEAKAGE_CTL, 0x4); - regmap_write(wcd9xxx->regmap, TOMTOM_A_CDC_CTL, 0x0); - /* wait for 5ms after codec reset for it to complete */ - usleep_range(5000, 5100); - regmap_write(wcd9xxx->regmap, TOMTOM_A_CDC_CTL, 0x1); - regmap_write(wcd9xxx->regmap, TOMTOM_A_LEAKAGE_CTL, 0x3); - regmap_write(wcd9xxx->regmap, TOMTOM_A_CDC_CTL, 0x3); - - return 0; -} -EXPORT_SYMBOL(wcd9330_bringup); - -int tomtom_enable_qfuse_sensing(struct snd_soc_codec *codec) -{ - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - - if (tomtom->wcd_ext_clk) - tomtom_codec_mclk_enable(codec, true, false); - - snd_soc_write(codec, TOMTOM_A_QFUSE_CTL, 0x03); - /* - * 5ms sleep required after enabling qfuse control - * before checking the status. - */ - usleep_range(5000, 5500); - if ((snd_soc_read(codec, TOMTOM_A_QFUSE_STATUS) & (0x03)) != 0x03) - WARN(1, "%s: Qfuse sense is not complete\n", __func__); - - if (tomtom->wcd_ext_clk) - tomtom_codec_mclk_enable(codec, false, false); - return 0; -} -EXPORT_SYMBOL(tomtom_enable_qfuse_sensing); - -static int tomtom_get_sample_rate(struct snd_soc_codec *codec, int path) -{ - if (path == RX8_PATH) - return snd_soc_read(codec, TOMTOM_A_CDC_RX8_B5_CTL); - else - return snd_soc_read(codec, - (TOMTOM_A_CDC_RX1_B5_CTL + 8 * (path - 1))); -} - -static int tomtom_compare_bit_format(struct snd_soc_codec *codec, - int bit_format) -{ - int i = 0; - int ret = 0; - struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec); - - for (i = 0; i < NUM_CODEC_DAIS; i++) { - if (tomtom_p->dai[i].bit_width == bit_format) { - ret = 1; - break; - } - } - return ret; -} - -static int tomtom_update_uhqa_mode(struct snd_soc_codec *codec, int path) -{ - int ret = 0; - struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec); - - /* UHQA path has fs=192KHz & bit=24 bit */ - if (((tomtom_get_sample_rate(codec, path) & 0xE0) == 0xA0) && - (tomtom_compare_bit_format(codec, 24))) { - tomtom_p->uhqa_mode = 1; - } else { - tomtom_p->uhqa_mode = 0; - } - dev_dbg(codec->dev, "%s: uhqa_mode=%d", __func__, tomtom_p->uhqa_mode); - return ret; -} - -static int tomtom_get_anc_slot(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - - ucontrol->value.integer.value[0] = tomtom->anc_slot; - return 0; -} - -static int tomtom_put_anc_slot(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - - tomtom->anc_slot = ucontrol->value.integer.value[0]; - return 0; -} - -static int tomtom_get_anc_func(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - - ucontrol->value.integer.value[0] = (tomtom->anc_func == true ? 1 : 0); - return 0; -} - -static int tomtom_put_anc_func(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - struct snd_soc_dapm_context *dapm = - snd_soc_codec_get_dapm(codec); - - mutex_lock(&tomtom->codec_mutex); - tomtom->anc_func = (!ucontrol->value.integer.value[0] ? false : true); - - dev_dbg(codec->dev, "%s: anc_func %x", __func__, tomtom->anc_func); - - if (tomtom->anc_func == true) { - snd_soc_dapm_enable_pin(dapm, "ANC HPHR"); - snd_soc_dapm_enable_pin(dapm, "ANC HPHL"); - snd_soc_dapm_enable_pin(dapm, "ANC HEADPHONE"); - snd_soc_dapm_enable_pin(dapm, "ANC EAR PA"); - snd_soc_dapm_enable_pin(dapm, "ANC EAR"); - snd_soc_dapm_disable_pin(dapm, "HPHR"); - snd_soc_dapm_disable_pin(dapm, "HPHL"); - snd_soc_dapm_disable_pin(dapm, "HEADPHONE"); - snd_soc_dapm_disable_pin(dapm, "EAR PA"); - snd_soc_dapm_disable_pin(dapm, "EAR"); - } else { - snd_soc_dapm_disable_pin(dapm, "ANC HPHR"); - snd_soc_dapm_disable_pin(dapm, "ANC HPHL"); - snd_soc_dapm_disable_pin(dapm, "ANC HEADPHONE"); - snd_soc_dapm_disable_pin(dapm, "ANC EAR PA"); - snd_soc_dapm_disable_pin(dapm, "ANC EAR"); - snd_soc_dapm_enable_pin(dapm, "HPHR"); - snd_soc_dapm_enable_pin(dapm, "HPHL"); - snd_soc_dapm_enable_pin(dapm, "HEADPHONE"); - snd_soc_dapm_enable_pin(dapm, "EAR PA"); - snd_soc_dapm_enable_pin(dapm, "EAR"); - } - mutex_unlock(&tomtom->codec_mutex); - snd_soc_dapm_sync(dapm); - return 0; -} - -static int tomtom_get_iir_enable_audio_mixer( - struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); - int iir_idx = ((struct soc_multi_mixer_control *) - kcontrol->private_value)->reg; - int band_idx = ((struct soc_multi_mixer_control *) - kcontrol->private_value)->shift; - - ucontrol->value.integer.value[0] = - (snd_soc_read(codec, (TOMTOM_A_CDC_IIR1_CTL + 16 * iir_idx)) & - (1 << band_idx)) != 0; - - pr_debug("%s: IIR #%d band #%d enable %d\n", __func__, - iir_idx, band_idx, - (uint32_t)ucontrol->value.integer.value[0]); - return 0; -} - -static int tomtom_put_iir_enable_audio_mixer( - struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); - int iir_idx = ((struct soc_multi_mixer_control *) - kcontrol->private_value)->reg; - int band_idx = ((struct soc_multi_mixer_control *) - kcontrol->private_value)->shift; - int value = ucontrol->value.integer.value[0]; - - /* Mask first 5 bits, 6-8 are reserved */ - snd_soc_update_bits(codec, (TOMTOM_A_CDC_IIR1_CTL + 16 * iir_idx), - (1 << band_idx), (value << band_idx)); - - pr_debug("%s: IIR #%d band #%d enable %d\n", __func__, - iir_idx, band_idx, - ((snd_soc_read(codec, (TOMTOM_A_CDC_IIR1_CTL + 16 * iir_idx)) & - (1 << band_idx)) != 0)); - return 0; -} -static uint32_t get_iir_band_coeff(struct snd_soc_codec *codec, - int iir_idx, int band_idx, - int coeff_idx) -{ - uint32_t value = 0; - - /* Address does not automatically update if reading */ - snd_soc_write(codec, - (TOMTOM_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx), - ((band_idx * BAND_MAX + coeff_idx) - * sizeof(uint32_t)) & 0x7F); - - value |= snd_soc_read(codec, - (TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx)); - - snd_soc_write(codec, - (TOMTOM_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx), - ((band_idx * BAND_MAX + coeff_idx) - * sizeof(uint32_t) + 1) & 0x7F); - - value |= (snd_soc_read(codec, - (TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx)) << 8); - - snd_soc_write(codec, - (TOMTOM_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx), - ((band_idx * BAND_MAX + coeff_idx) - * sizeof(uint32_t) + 2) & 0x7F); - - value |= (snd_soc_read(codec, - (TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx)) << 16); - - snd_soc_write(codec, - (TOMTOM_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx), - ((band_idx * BAND_MAX + coeff_idx) - * sizeof(uint32_t) + 3) & 0x7F); - - /* Mask bits top 2 bits since they are reserved */ - value |= ((snd_soc_read(codec, - (TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx)) & 0x3F) << 24); - - return value; -} - -static int tomtom_get_iir_band_audio_mixer( - struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); - int iir_idx = ((struct soc_multi_mixer_control *) - kcontrol->private_value)->reg; - int band_idx = ((struct soc_multi_mixer_control *) - kcontrol->private_value)->shift; - - ucontrol->value.integer.value[0] = - get_iir_band_coeff(codec, iir_idx, band_idx, 0); - ucontrol->value.integer.value[1] = - get_iir_band_coeff(codec, iir_idx, band_idx, 1); - ucontrol->value.integer.value[2] = - get_iir_band_coeff(codec, iir_idx, band_idx, 2); - ucontrol->value.integer.value[3] = - get_iir_band_coeff(codec, iir_idx, band_idx, 3); - ucontrol->value.integer.value[4] = - get_iir_band_coeff(codec, iir_idx, band_idx, 4); - - pr_debug("%s: IIR #%d band #%d b0 = 0x%x\n" - "%s: IIR #%d band #%d b1 = 0x%x\n" - "%s: IIR #%d band #%d b2 = 0x%x\n" - "%s: IIR #%d band #%d a1 = 0x%x\n" - "%s: IIR #%d band #%d a2 = 0x%x\n", - __func__, iir_idx, band_idx, - (uint32_t)ucontrol->value.integer.value[0], - __func__, iir_idx, band_idx, - (uint32_t)ucontrol->value.integer.value[1], - __func__, iir_idx, band_idx, - (uint32_t)ucontrol->value.integer.value[2], - __func__, iir_idx, band_idx, - (uint32_t)ucontrol->value.integer.value[3], - __func__, iir_idx, band_idx, - (uint32_t)ucontrol->value.integer.value[4]); - return 0; -} - -static void set_iir_band_coeff(struct snd_soc_codec *codec, - int iir_idx, int band_idx, - uint32_t value) -{ - snd_soc_write(codec, - (TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx), - (value & 0xFF)); - - snd_soc_write(codec, - (TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx), - (value >> 8) & 0xFF); - - snd_soc_write(codec, - (TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx), - (value >> 16) & 0xFF); - - /* Mask top 2 bits, 7-8 are reserved */ - snd_soc_write(codec, - (TOMTOM_A_CDC_IIR1_COEF_B2_CTL + 16 * iir_idx), - (value >> 24) & 0x3F); -} - -static int tomtom_put_iir_band_audio_mixer( - struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); - int iir_idx = ((struct soc_multi_mixer_control *) - kcontrol->private_value)->reg; - int band_idx = ((struct soc_multi_mixer_control *) - kcontrol->private_value)->shift; - - /* Mask top bit it is reserved */ - /* Updates addr automatically for each B2 write */ - snd_soc_write(codec, - (TOMTOM_A_CDC_IIR1_COEF_B1_CTL + 16 * iir_idx), - (band_idx * BAND_MAX * sizeof(uint32_t)) & 0x7F); - - set_iir_band_coeff(codec, iir_idx, band_idx, - ucontrol->value.integer.value[0]); - set_iir_band_coeff(codec, iir_idx, band_idx, - ucontrol->value.integer.value[1]); - set_iir_band_coeff(codec, iir_idx, band_idx, - ucontrol->value.integer.value[2]); - set_iir_band_coeff(codec, iir_idx, band_idx, - ucontrol->value.integer.value[3]); - set_iir_band_coeff(codec, iir_idx, band_idx, - ucontrol->value.integer.value[4]); - - pr_debug("%s: IIR #%d band #%d b0 = 0x%x\n" - "%s: IIR #%d band #%d b1 = 0x%x\n" - "%s: IIR #%d band #%d b2 = 0x%x\n" - "%s: IIR #%d band #%d a1 = 0x%x\n" - "%s: IIR #%d band #%d a2 = 0x%x\n", - __func__, iir_idx, band_idx, - get_iir_band_coeff(codec, iir_idx, band_idx, 0), - __func__, iir_idx, band_idx, - get_iir_band_coeff(codec, iir_idx, band_idx, 1), - __func__, iir_idx, band_idx, - get_iir_band_coeff(codec, iir_idx, band_idx, 2), - __func__, iir_idx, band_idx, - get_iir_band_coeff(codec, iir_idx, band_idx, 3), - __func__, iir_idx, band_idx, - get_iir_band_coeff(codec, iir_idx, band_idx, 4)); - return 0; -} - -static int tomtom_get_compander(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - - struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); - int comp = ((struct soc_multi_mixer_control *) - kcontrol->private_value)->shift; - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - - ucontrol->value.integer.value[0] = tomtom->comp_enabled[comp]; - return 0; -} - -static int tomtom_set_compander(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - int comp = ((struct soc_multi_mixer_control *) - kcontrol->private_value)->shift; - int value = ucontrol->value.integer.value[0]; - - pr_debug("%s: Compander %d enable current %d, new %d\n", - __func__, comp, tomtom->comp_enabled[comp], value); - tomtom->comp_enabled[comp] = value; - - if (comp == COMPANDER_1 && - tomtom->comp_enabled[comp] == 1) { - /* Wavegen to 5 msec */ - snd_soc_write(codec, TOMTOM_A_RX_HPH_CNP_WG_CTL, 0xDB); - snd_soc_write(codec, TOMTOM_A_RX_HPH_CNP_WG_TIME, 0x2A); - snd_soc_write(codec, TOMTOM_A_RX_HPH_BIAS_WG_OCP, 0x2A); - - /* Enable Chopper */ - snd_soc_update_bits(codec, - TOMTOM_A_RX_HPH_CHOP_CTL, 0x80, 0x80); - - snd_soc_write(codec, TOMTOM_A_NCP_DTEST, 0x20); - pr_debug("%s: Enabled Chopper and set wavegen to 5 msec\n", - __func__); - } else if (comp == COMPANDER_1 && - tomtom->comp_enabled[comp] == 0) { - /* Wavegen to 20 msec */ - snd_soc_write(codec, TOMTOM_A_RX_HPH_CNP_WG_CTL, 0xDB); - snd_soc_write(codec, TOMTOM_A_RX_HPH_CNP_WG_TIME, 0x58); - snd_soc_write(codec, TOMTOM_A_RX_HPH_BIAS_WG_OCP, 0x1A); - - /* Disable CHOPPER block */ - snd_soc_update_bits(codec, - TOMTOM_A_RX_HPH_CHOP_CTL, 0x80, 0x00); - - snd_soc_write(codec, TOMTOM_A_NCP_DTEST, 0x10); - pr_debug("%s: Disabled Chopper and set wavegen to 20 msec\n", - __func__); - } - return 0; -} - -static int tomtom_config_gain_compander(struct snd_soc_codec *codec, - int comp, bool enable) -{ - int ret = 0; - - switch (comp) { - case COMPANDER_0: - snd_soc_update_bits(codec, TOMTOM_A_SPKR_DRV1_GAIN, - 1 << 2, !enable << 2); - snd_soc_update_bits(codec, TOMTOM_A_SPKR_DRV2_GAIN, - 1 << 2, !enable << 2); - break; - case COMPANDER_1: - snd_soc_update_bits(codec, TOMTOM_A_RX_HPH_L_GAIN, - 1 << 5, !enable << 5); - snd_soc_update_bits(codec, TOMTOM_A_RX_HPH_R_GAIN, - 1 << 5, !enable << 5); - break; - case COMPANDER_2: - snd_soc_update_bits(codec, TOMTOM_A_RX_LINE_1_GAIN, - 1 << 5, !enable << 5); - snd_soc_update_bits(codec, TOMTOM_A_RX_LINE_3_GAIN, - 1 << 5, !enable << 5); - snd_soc_update_bits(codec, TOMTOM_A_RX_LINE_2_GAIN, - 1 << 5, !enable << 5); - snd_soc_update_bits(codec, TOMTOM_A_RX_LINE_4_GAIN, - 1 << 5, !enable << 5); - break; - default: - WARN_ON(1); - ret = -EINVAL; - } - - return ret; -} - -static void tomtom_discharge_comp(struct snd_soc_codec *codec, int comp) -{ - /* Level meter DIV Factor to 5*/ - snd_soc_update_bits(codec, TOMTOM_A_CDC_COMP0_B2_CTL + (comp * 8), 0xF0, - 0x05 << 4); - /* RMS meter Sampling to 0x01 */ - snd_soc_write(codec, TOMTOM_A_CDC_COMP0_B3_CTL + (comp * 8), 0x01); - - /* Worst case timeout for compander CnP sleep timeout */ - usleep_range(3000, 3100); -} - -static enum wcd9xxx_buck_volt tomtom_codec_get_buck_mv( - struct snd_soc_codec *codec) -{ - int buck_volt = WCD9XXX_CDC_BUCK_UNSUPPORTED; - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - struct wcd9xxx_pdata *pdata = tomtom->resmgr.pdata; - int i; - - for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) { - if (!strcmp(pdata->regulator[i].name, - WCD9XXX_SUPPLY_BUCK_NAME)) { - if ((pdata->regulator[i].min_uV == - WCD9XXX_CDC_BUCK_MV_1P8) || - (pdata->regulator[i].min_uV == - WCD9XXX_CDC_BUCK_MV_2P15)) - buck_volt = pdata->regulator[i].min_uV; - break; - } - } - return buck_volt; -} - -static int tomtom_config_compander(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - int mask, enable_mask; - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - const int comp = w->shift; - const u32 rate = tomtom->comp_fs[comp]; - const struct comp_sample_dependent_params *comp_params = - &comp_samp_params[rate]; - enum wcd9xxx_buck_volt buck_mv; - - pr_debug("%s: %s event %d compander %d, enabled %d", __func__, - w->name, event, comp, tomtom->comp_enabled[comp]); - - if (!tomtom->comp_enabled[comp]) - return 0; - - /* Compander 0 has two channels */ - mask = enable_mask = 0x03; - buck_mv = tomtom_codec_get_buck_mv(codec); - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - /* Set compander Sample rate */ - snd_soc_update_bits(codec, - TOMTOM_A_CDC_COMP0_FS_CFG + (comp * 8), - 0x07, rate); - /* Set the static gain offset for HPH Path */ - if (comp == COMPANDER_1) { - if (buck_mv == WCD9XXX_CDC_BUCK_MV_2P15) { - snd_soc_update_bits(codec, - TOMTOM_A_CDC_COMP0_B4_CTL + (comp * 8), - 0x80, 0x00); - } else { - snd_soc_update_bits(codec, - TOMTOM_A_CDC_COMP0_B4_CTL + (comp * 8), - 0x80, 0x80); - } - } - /* Enable RX interpolation path compander clocks */ - snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_RX_B2_CTL, - mask << comp_shift[comp], - mask << comp_shift[comp]); - /* Toggle compander reset bits */ - snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL, - mask << comp_shift[comp], - mask << comp_shift[comp]); - snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL, - mask << comp_shift[comp], 0); - - /* Set gain source to compander */ - tomtom_config_gain_compander(codec, comp, true); - - /* Compander enable */ - snd_soc_update_bits(codec, TOMTOM_A_CDC_COMP0_B1_CTL + - (comp * 8), enable_mask, enable_mask); - - tomtom_discharge_comp(codec, comp); - - /* Set sample rate dependent parameter */ - snd_soc_write(codec, TOMTOM_A_CDC_COMP0_B3_CTL + (comp * 8), - comp_params->rms_meter_resamp_fact); - snd_soc_update_bits(codec, - TOMTOM_A_CDC_COMP0_B2_CTL + (comp * 8), - 0xF0, comp_params->rms_meter_div_fact << 4); - snd_soc_update_bits(codec, - TOMTOM_A_CDC_COMP0_B2_CTL + (comp * 8), - 0x0F, comp_params->peak_det_timeout); - break; - case SND_SOC_DAPM_PRE_PMD: - /* Disable compander */ - snd_soc_update_bits(codec, - TOMTOM_A_CDC_COMP0_B1_CTL + (comp * 8), - enable_mask, 0x00); - - /* Toggle compander reset bits */ - snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL, - mask << comp_shift[comp], - mask << comp_shift[comp]); - snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL, - mask << comp_shift[comp], 0); - - /* Turn off the clock for compander in pair */ - snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_RX_B2_CTL, - mask << comp_shift[comp], 0); - - /* Set gain source to register */ - tomtom_config_gain_compander(codec, comp, false); - break; - } - return 0; -} - - - -static const char *const tomtom_anc_func_text[] = {"OFF", "ON"}; -static const struct soc_enum tomtom_anc_func_enum = - SOC_ENUM_SINGLE_EXT(2, tomtom_anc_func_text); - -static const char *const tabla_ear_pa_gain_text[] = {"POS_6_DB", "POS_2_DB"}; -static const struct soc_enum tabla_ear_pa_gain_enum[] = { - SOC_ENUM_SINGLE_EXT(2, tabla_ear_pa_gain_text), -}; - -/*cut of frequency for high pass filter*/ -static const char * const cf_text[] = { - "MIN_3DB_4Hz", "MIN_3DB_75Hz", "MIN_3DB_150Hz" -}; - -static const char * const rx_cf_text[] = { - "MIN_3DB_4Hz", "MIN_3DB_75Hz", "MIN_3DB_150Hz", - "MIN_3DB_0P48Hz" -}; - -static const struct soc_enum cf_dec1_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX1_MUX_CTL, 4, 3, cf_text); - -static const struct soc_enum cf_dec2_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX2_MUX_CTL, 4, 3, cf_text); - -static const struct soc_enum cf_dec3_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX3_MUX_CTL, 4, 3, cf_text); - -static const struct soc_enum cf_dec4_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX4_MUX_CTL, 4, 3, cf_text); - -static const struct soc_enum cf_dec5_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX5_MUX_CTL, 4, 3, cf_text); - -static const struct soc_enum cf_dec6_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX6_MUX_CTL, 4, 3, cf_text); - -static const struct soc_enum cf_dec7_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX7_MUX_CTL, 4, 3, cf_text); - -static const struct soc_enum cf_dec8_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX8_MUX_CTL, 4, 3, cf_text); - -static const struct soc_enum cf_dec9_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX9_MUX_CTL, 4, 3, cf_text); - -static const struct soc_enum cf_dec10_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_TX10_MUX_CTL, 4, 3, cf_text); - -static const struct soc_enum cf_rxmix1_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX1_B4_CTL, 0, 4, rx_cf_text); - -static const struct soc_enum cf_rxmix2_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX2_B4_CTL, 0, 4, rx_cf_text); - -static const struct soc_enum cf_rxmix3_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX3_B4_CTL, 0, 4, rx_cf_text); - -static const struct soc_enum cf_rxmix4_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX4_B4_CTL, 0, 4, rx_cf_text); - -static const struct soc_enum cf_rxmix5_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX5_B4_CTL, 0, 4, rx_cf_text) -; -static const struct soc_enum cf_rxmix6_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX6_B4_CTL, 0, 4, rx_cf_text); - -static const struct soc_enum cf_rxmix7_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX7_B4_CTL, 0, 4, rx_cf_text); - -static const struct soc_enum cf_rxmix8_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_RX8_B4_CTL, 0, 4, rx_cf_text); - -static const char * const class_h_dsm_text[] = { - "ZERO", "DSM_HPHL_RX1", "DSM_SPKR_RX7" -}; - -static const struct soc_enum class_h_dsm_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_CLSH_CTL, 4, 3, class_h_dsm_text); - -static const struct snd_kcontrol_new class_h_dsm_mux = - SOC_DAPM_ENUM("CLASS_H_DSM MUX Mux", class_h_dsm_enum); - -static const char * const rx1_interp_text[] = { - "ZERO", "RX1 MIX2" -}; - -static const struct soc_enum rx1_interp_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CLK_RX_B1_CTL, 0, 2, rx1_interp_text); - -static const struct snd_kcontrol_new rx1_interp_mux = - SOC_DAPM_ENUM("RX1 INTERP MUX Mux", rx1_interp_enum); - -static const char * const rx2_interp_text[] = { - "ZERO", "RX2 MIX2" -}; - -static const struct soc_enum rx2_interp_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CLK_RX_B1_CTL, 1, 2, rx2_interp_text); - -static const struct snd_kcontrol_new rx2_interp_mux = - SOC_DAPM_ENUM("RX2 INTERP MUX Mux", rx2_interp_enum); - -static const char *const tomtom_conn_mad_text[] = { - "ADC_MB", "ADC1", "ADC2", "ADC3", "ADC4", "ADC5", "ADC6", "NOTUSED1", - "DMIC1", "DMIC2", "DMIC3", "DMIC4", "DMIC5", "DMIC6", "NOTUSED2", - "NOTUSED3"}; - -static const struct soc_enum tomtom_conn_mad_enum = - SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tomtom_conn_mad_text), - tomtom_conn_mad_text); - - -static int tomtom_mad_input_get(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - u8 tomtom_mad_input; - struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); - - tomtom_mad_input = snd_soc_read(codec, TOMTOM_A_CDC_MAD_INP_SEL); - - tomtom_mad_input = tomtom_mad_input & 0x0F; - - ucontrol->value.integer.value[0] = tomtom_mad_input; - - pr_debug("%s: tomtom_mad_input = %s\n", __func__, - tomtom_conn_mad_text[tomtom_mad_input]); - - return 0; -} - -static int tomtom_mad_input_put(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - u8 tomtom_mad_input; - u16 micb_int_reg, micb_4_int_reg; - struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); - struct snd_soc_card *card = codec->component.card; - char mad_amic_input_widget[6]; - u32 adc; - const char *mad_input_widget; - const char *source_widget = NULL; - u32 mic_bias_found = 0; - u32 i; - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - int ret = 0; - char *mad_input; - - tomtom_mad_input = ucontrol->value.integer.value[0]; - micb_4_int_reg = tomtom->resmgr.reg_addr->micb_4_int_rbias; - - if (tomtom_mad_input >= ARRAY_SIZE(tomtom_conn_mad_text)) { - dev_err(codec->dev, - "%s: tomtom_mad_input = %d out of bounds\n", - __func__, tomtom_mad_input); - return -EINVAL; - } - - pr_debug("%s: tomtom_mad_input = %s\n", __func__, - tomtom_conn_mad_text[tomtom_mad_input]); - - if (!strcmp(tomtom_conn_mad_text[tomtom_mad_input], "NOTUSED1") || - !strcmp(tomtom_conn_mad_text[tomtom_mad_input], "NOTUSED2") || - !strcmp(tomtom_conn_mad_text[tomtom_mad_input], "NOTUSED3") || - !strcmp(tomtom_conn_mad_text[tomtom_mad_input], "ADC_MB")) { - pr_info("%s: tomtom mad input is set to unsupported input = %s\n", - __func__, tomtom_conn_mad_text[tomtom_mad_input]); - return -EINVAL; - } - - if (strnstr(tomtom_conn_mad_text[tomtom_mad_input], - "ADC", sizeof("ADC"))) { - mad_input = strpbrk(tomtom_conn_mad_text[tomtom_mad_input], - "123456"); - if (!mad_input) { - dev_err(codec->dev, "%s: Invalid MAD input %s\n", - __func__, tomtom_conn_mad_text[tomtom_mad_input]); - return -EINVAL; - } - ret = kstrtouint(mad_input, 10, &adc); - if ((ret < 0) || (adc > 6)) { - pr_err("%s: Invalid ADC = %s\n", __func__, - tomtom_conn_mad_text[tomtom_mad_input]); - ret = -EINVAL; - } - - snprintf(mad_amic_input_widget, 6, "%s%u", "AMIC", adc); - - mad_input_widget = mad_amic_input_widget; - pr_debug("%s: tomtom amic input widget = %s\n", __func__, - mad_amic_input_widget); - } else { - /* DMIC type input widget*/ - mad_input_widget = tomtom_conn_mad_text[tomtom_mad_input]; - } - - pr_debug("%s: tomtom input widget = %s\n", __func__, mad_input_widget); - - for (i = 0; i < card->num_dapm_routes; i++) { - - if (!strcmp(card->dapm_routes[i].sink, mad_input_widget)) { - - source_widget = card->dapm_routes[i].source; - if (!source_widget) { - dev_err(codec->dev, - "%s: invalid source widget\n", - __func__); - return -EINVAL; - } - - if (strnstr(source_widget, - "MIC BIAS1", sizeof("MIC BIAS1"))) { - mic_bias_found = 1; - micb_int_reg = TOMTOM_A_MICB_1_INT_RBIAS; - break; - } else if (strnstr(source_widget, - "MIC BIAS2", sizeof("MIC BIAS2"))) { - mic_bias_found = 2; - micb_int_reg = TOMTOM_A_MICB_2_INT_RBIAS; - break; - } else if (strnstr(source_widget, - "MIC BIAS3", sizeof("MIC BIAS3"))) { - mic_bias_found = 3; - micb_int_reg = TOMTOM_A_MICB_3_INT_RBIAS; - break; - } else if (strnstr(source_widget, - "MIC BIAS4", sizeof("MIC BIAS4"))) { - mic_bias_found = 4; - micb_int_reg = micb_4_int_reg; - break; - } - } - } - - if (mic_bias_found) { - pr_debug("%s: source mic bias = %s. sink = %s\n", __func__, - card->dapm_routes[i].source, - card->dapm_routes[i].sink); - - snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_INP_SEL, - 0x0F, tomtom_mad_input); - snd_soc_update_bits(codec, TOMTOM_A_MAD_ANA_CTRL, - 0x07, mic_bias_found); - - /* Setup internal micbias */ - - if (strnstr(source_widget, "Internal1", strlen(source_widget))) - snd_soc_update_bits(codec, - micb_int_reg, - 0xE0, 0xE0); - else if (strnstr(source_widget, "Internal2", - strlen(source_widget))) - snd_soc_update_bits(codec, - micb_int_reg, - 0x1C, 0x1C); - else if (strnstr(source_widget, "Internal3", - strlen(source_widget))) - snd_soc_update_bits(codec, - micb_int_reg, - 0x3, 0x3); - else - /* - * If not internal, make sure to write the - * register to default value - */ - snd_soc_write(codec, micb_int_reg, 0x24); - return 0; - } - - pr_err("%s: mic bias source not found for input = %s\n", - __func__, mad_input_widget); - return -EINVAL; -} - -static int tomtom_tx_hpf_bypass_get(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - u32 tx_index; - - tx_index = (u32)kcontrol->private_value; - - if (tx_index > NUM_DECIMATORS) { - pr_err("%s: Invalid TX decimator %d\n", __func__, - tx_index); - return -EINVAL; - } - - ucontrol->value.integer.value[0] = - tx_hpf_work[tx_index-1].tx_hpf_bypass; - - return 0; -} - -static int tomtom_tx_hpf_bypass_put(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - bool tx_hpf_bypass_cfg; - u32 tx_index; - - tx_hpf_bypass_cfg = (bool)ucontrol->value.integer.value[0]; - - pr_debug("%s: tx_hpf_bypass = %d\n", __func__, - tx_hpf_bypass_cfg); - - tx_index = (u32)kcontrol->private_value; - - if (tx_index > NUM_DECIMATORS) { - pr_err("%s: Invalid TX decimator %d\n", __func__, - tx_index); - return -EINVAL; - } - if (tx_hpf_work[tx_index-1].tx_hpf_bypass != tx_hpf_bypass_cfg) - tx_hpf_work[tx_index-1].tx_hpf_bypass = tx_hpf_bypass_cfg; - - pr_debug("%s: Set TX%d HPF bypass configuration %d", - __func__, tx_index, - tx_hpf_work[tx_index-1].tx_hpf_bypass); - - return 0; -} - -static const struct snd_kcontrol_new tomtom_snd_controls[] = { - - SOC_SINGLE_SX_TLV("RX1 Digital Volume", TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX2 Digital Volume", TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX3 Digital Volume", TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX4 Digital Volume", TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX5 Digital Volume", TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX6 Digital Volume", TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX7 Digital Volume", TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL, - 0, -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("RX8 Digital Volume", TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL, - 0, -84, 40, digital_gain), - - SOC_SINGLE_SX_TLV("DEC1 Volume", TOMTOM_A_CDC_TX1_VOL_CTL_GAIN, 0, - -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("DEC2 Volume", TOMTOM_A_CDC_TX2_VOL_CTL_GAIN, 0, - -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("DEC3 Volume", TOMTOM_A_CDC_TX3_VOL_CTL_GAIN, 0, - -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("DEC4 Volume", TOMTOM_A_CDC_TX4_VOL_CTL_GAIN, 0, - -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("DEC5 Volume", TOMTOM_A_CDC_TX5_VOL_CTL_GAIN, 0, - -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("DEC6 Volume", TOMTOM_A_CDC_TX6_VOL_CTL_GAIN, 0, - -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("DEC7 Volume", TOMTOM_A_CDC_TX7_VOL_CTL_GAIN, 0, - -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("DEC8 Volume", TOMTOM_A_CDC_TX8_VOL_CTL_GAIN, 0, - -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("DEC9 Volume", TOMTOM_A_CDC_TX9_VOL_CTL_GAIN, 0, - -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("DEC10 Volume", TOMTOM_A_CDC_TX10_VOL_CTL_GAIN, 0, - -84, 40, digital_gain), - - SOC_SINGLE_SX_TLV("IIR1 INP1 Volume", TOMTOM_A_CDC_IIR1_GAIN_B1_CTL, 0, - -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("IIR1 INP2 Volume", TOMTOM_A_CDC_IIR1_GAIN_B2_CTL, 0, - -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("IIR1 INP3 Volume", TOMTOM_A_CDC_IIR1_GAIN_B3_CTL, 0, - -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("IIR1 INP4 Volume", TOMTOM_A_CDC_IIR1_GAIN_B4_CTL, 0, - -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("IIR2 INP1 Volume", TOMTOM_A_CDC_IIR2_GAIN_B1_CTL, 0, - -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("IIR2 INP2 Volume", TOMTOM_A_CDC_IIR2_GAIN_B2_CTL, 0, - -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("IIR2 INP3 Volume", TOMTOM_A_CDC_IIR2_GAIN_B3_CTL, 0, - -84, 40, digital_gain), - SOC_SINGLE_SX_TLV("IIR2 INP4 Volume", TOMTOM_A_CDC_IIR2_GAIN_B4_CTL, 0, - -84, 40, digital_gain), - - SOC_SINGLE_EXT("ANC Slot", SND_SOC_NOPM, 0, 100, 0, tomtom_get_anc_slot, - tomtom_put_anc_slot), - SOC_ENUM_EXT("ANC Function", tomtom_anc_func_enum, tomtom_get_anc_func, - tomtom_put_anc_func), - - SOC_ENUM("TX1 HPF cut off", cf_dec1_enum), - SOC_ENUM("TX2 HPF cut off", cf_dec2_enum), - SOC_ENUM("TX3 HPF cut off", cf_dec3_enum), - SOC_ENUM("TX4 HPF cut off", cf_dec4_enum), - SOC_ENUM("TX5 HPF cut off", cf_dec5_enum), - SOC_ENUM("TX6 HPF cut off", cf_dec6_enum), - SOC_ENUM("TX7 HPF cut off", cf_dec7_enum), - SOC_ENUM("TX8 HPF cut off", cf_dec8_enum), - SOC_ENUM("TX9 HPF cut off", cf_dec9_enum), - SOC_ENUM("TX10 HPF cut off", cf_dec10_enum), - - SOC_SINGLE_BOOL_EXT("TX1 HPF Switch", 1, - tomtom_tx_hpf_bypass_get, - tomtom_tx_hpf_bypass_put), - SOC_SINGLE_BOOL_EXT("TX2 HPF Switch", 2, - tomtom_tx_hpf_bypass_get, - tomtom_tx_hpf_bypass_put), - SOC_SINGLE_BOOL_EXT("TX3 HPF Switch", 3, - tomtom_tx_hpf_bypass_get, - tomtom_tx_hpf_bypass_put), - SOC_SINGLE_BOOL_EXT("TX4 HPF Switch", 4, - tomtom_tx_hpf_bypass_get, - tomtom_tx_hpf_bypass_put), - SOC_SINGLE_BOOL_EXT("TX5 HPF Switch", 5, - tomtom_tx_hpf_bypass_get, - tomtom_tx_hpf_bypass_put), - SOC_SINGLE_BOOL_EXT("TX6 HPF Switch", 6, - tomtom_tx_hpf_bypass_get, - tomtom_tx_hpf_bypass_put), - SOC_SINGLE_BOOL_EXT("TX7 HPF Switch", 7, - tomtom_tx_hpf_bypass_get, - tomtom_tx_hpf_bypass_put), - SOC_SINGLE_BOOL_EXT("TX8 HPF Switch", 8, - tomtom_tx_hpf_bypass_get, - tomtom_tx_hpf_bypass_put), - SOC_SINGLE_BOOL_EXT("TX9 HPF Switch", 9, - tomtom_tx_hpf_bypass_get, - tomtom_tx_hpf_bypass_put), - SOC_SINGLE_BOOL_EXT("TX10 HPF Switch", 10, - tomtom_tx_hpf_bypass_get, - tomtom_tx_hpf_bypass_put), - - SOC_SINGLE("RX1 HPF Switch", TOMTOM_A_CDC_RX1_B5_CTL, 2, 1, 0), - SOC_SINGLE("RX2 HPF Switch", TOMTOM_A_CDC_RX2_B5_CTL, 2, 1, 0), - SOC_SINGLE("RX3 HPF Switch", TOMTOM_A_CDC_RX3_B5_CTL, 2, 1, 0), - SOC_SINGLE("RX4 HPF Switch", TOMTOM_A_CDC_RX4_B5_CTL, 2, 1, 0), - SOC_SINGLE("RX5 HPF Switch", TOMTOM_A_CDC_RX5_B5_CTL, 2, 1, 0), - SOC_SINGLE("RX6 HPF Switch", TOMTOM_A_CDC_RX6_B5_CTL, 2, 1, 0), - SOC_SINGLE("RX7 HPF Switch", TOMTOM_A_CDC_RX7_B5_CTL, 2, 1, 0), - SOC_SINGLE("RX8 HPF Switch", TOMTOM_A_CDC_RX8_B5_CTL, 2, 1, 0), - - SOC_ENUM("RX1 HPF cut off", cf_rxmix1_enum), - SOC_ENUM("RX2 HPF cut off", cf_rxmix2_enum), - SOC_ENUM("RX3 HPF cut off", cf_rxmix3_enum), - SOC_ENUM("RX4 HPF cut off", cf_rxmix4_enum), - SOC_ENUM("RX5 HPF cut off", cf_rxmix5_enum), - SOC_ENUM("RX6 HPF cut off", cf_rxmix6_enum), - SOC_ENUM("RX7 HPF cut off", cf_rxmix7_enum), - SOC_ENUM("RX8 HPF cut off", cf_rxmix8_enum), - - SOC_SINGLE_EXT("IIR1 Enable Band1", IIR1, BAND1, 1, 0, - tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer), - SOC_SINGLE_EXT("IIR1 Enable Band2", IIR1, BAND2, 1, 0, - tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer), - SOC_SINGLE_EXT("IIR1 Enable Band3", IIR1, BAND3, 1, 0, - tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer), - SOC_SINGLE_EXT("IIR1 Enable Band4", IIR1, BAND4, 1, 0, - tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer), - SOC_SINGLE_EXT("IIR1 Enable Band5", IIR1, BAND5, 1, 0, - tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer), - SOC_SINGLE_EXT("IIR2 Enable Band1", IIR2, BAND1, 1, 0, - tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer), - SOC_SINGLE_EXT("IIR2 Enable Band2", IIR2, BAND2, 1, 0, - tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer), - SOC_SINGLE_EXT("IIR2 Enable Band3", IIR2, BAND3, 1, 0, - tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer), - SOC_SINGLE_EXT("IIR2 Enable Band4", IIR2, BAND4, 1, 0, - tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer), - SOC_SINGLE_EXT("IIR2 Enable Band5", IIR2, BAND5, 1, 0, - tomtom_get_iir_enable_audio_mixer, tomtom_put_iir_enable_audio_mixer), - - SOC_SINGLE_MULTI_EXT("IIR1 Band1", IIR1, BAND1, 255, 0, 5, - tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer), - SOC_SINGLE_MULTI_EXT("IIR1 Band2", IIR1, BAND2, 255, 0, 5, - tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer), - SOC_SINGLE_MULTI_EXT("IIR1 Band3", IIR1, BAND3, 255, 0, 5, - tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer), - SOC_SINGLE_MULTI_EXT("IIR1 Band4", IIR1, BAND4, 255, 0, 5, - tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer), - SOC_SINGLE_MULTI_EXT("IIR1 Band5", IIR1, BAND5, 255, 0, 5, - tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer), - SOC_SINGLE_MULTI_EXT("IIR2 Band1", IIR2, BAND1, 255, 0, 5, - tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer), - SOC_SINGLE_MULTI_EXT("IIR2 Band2", IIR2, BAND2, 255, 0, 5, - tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer), - SOC_SINGLE_MULTI_EXT("IIR2 Band3", IIR2, BAND3, 255, 0, 5, - tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer), - SOC_SINGLE_MULTI_EXT("IIR2 Band4", IIR2, BAND4, 255, 0, 5, - tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer), - SOC_SINGLE_MULTI_EXT("IIR2 Band5", IIR2, BAND5, 255, 0, 5, - tomtom_get_iir_band_audio_mixer, tomtom_put_iir_band_audio_mixer), - - SOC_SINGLE_EXT("COMP0 Switch", SND_SOC_NOPM, COMPANDER_0, 1, 0, - tomtom_get_compander, tomtom_set_compander), - SOC_SINGLE_EXT("COMP1 Switch", SND_SOC_NOPM, COMPANDER_1, 1, 0, - tomtom_get_compander, tomtom_set_compander), - SOC_SINGLE_EXT("COMP2 Switch", SND_SOC_NOPM, COMPANDER_2, 1, 0, - tomtom_get_compander, tomtom_set_compander), - - SOC_ENUM_EXT("MAD Input", tomtom_conn_mad_enum, - tomtom_mad_input_get, tomtom_mad_input_put), - -}; - -static int tomtom_pa_gain_get(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - u8 ear_pa_gain; - struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); - - ear_pa_gain = snd_soc_read(codec, TOMTOM_A_RX_EAR_GAIN); - - ear_pa_gain = ear_pa_gain >> 5; - - ucontrol->value.integer.value[0] = ear_pa_gain; - - pr_debug("%s: ear_pa_gain = 0x%x\n", __func__, ear_pa_gain); - - return 0; -} - -static int tomtom_pa_gain_put(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - u8 ear_pa_gain; - struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); - - pr_debug("%s: ucontrol->value.integer.value[0] = %ld\n", __func__, - ucontrol->value.integer.value[0]); - - ear_pa_gain = ucontrol->value.integer.value[0] << 5; - - snd_soc_update_bits(codec, TOMTOM_A_RX_EAR_GAIN, 0xE0, ear_pa_gain); - return 0; -} - -static const char * const tomtom_1_x_ear_pa_gain_text[] = { - "POS_6_DB", "POS_4P5_DB", "POS_3_DB", "POS_1P5_DB", - "POS_0_DB", "NEG_2P5_DB", "UNDEFINED", "NEG_12_DB" -}; - -static const struct soc_enum tomtom_1_x_ear_pa_gain_enum = - SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(tomtom_1_x_ear_pa_gain_text), - tomtom_1_x_ear_pa_gain_text); - -static const struct snd_kcontrol_new tomtom_1_x_analog_gain_controls[] = { - - SOC_ENUM_EXT("EAR PA Gain", tomtom_1_x_ear_pa_gain_enum, - tomtom_pa_gain_get, tomtom_pa_gain_put), - - SOC_SINGLE_TLV("HPHL Volume", TOMTOM_A_RX_HPH_L_GAIN, 0, 20, 1, - line_gain), - SOC_SINGLE_TLV("HPHR Volume", TOMTOM_A_RX_HPH_R_GAIN, 0, 20, 1, - line_gain), - - SOC_SINGLE_TLV("LINEOUT1 Volume", TOMTOM_A_RX_LINE_1_GAIN, 0, 20, 1, - line_gain), - SOC_SINGLE_TLV("LINEOUT2 Volume", TOMTOM_A_RX_LINE_2_GAIN, 0, 20, 1, - line_gain), - SOC_SINGLE_TLV("LINEOUT3 Volume", TOMTOM_A_RX_LINE_3_GAIN, 0, 20, 1, - line_gain), - SOC_SINGLE_TLV("LINEOUT4 Volume", TOMTOM_A_RX_LINE_4_GAIN, 0, 20, 1, - line_gain), - - SOC_SINGLE_TLV("SPK DRV Volume", TOMTOM_A_SPKR_DRV1_GAIN, 3, 8, 1, - line_gain), - SOC_SINGLE_TLV("SPK DRV2 Volume", TOMTOM_A_SPKR_DRV2_GAIN, 3, 8, 1, - line_gain), - - SOC_SINGLE_TLV("ADC1 Volume", TOMTOM_A_TX_1_GAIN, 2, 19, 0, - analog_gain), - SOC_SINGLE_TLV("ADC2 Volume", TOMTOM_A_TX_2_GAIN, 2, 19, 0, - analog_gain), - SOC_SINGLE_TLV("ADC3 Volume", TOMTOM_A_TX_3_GAIN, 2, 19, 0, - analog_gain), - SOC_SINGLE_TLV("ADC4 Volume", TOMTOM_A_TX_4_GAIN, 2, 19, 0, - analog_gain), - SOC_SINGLE_TLV("ADC5 Volume", TOMTOM_A_TX_5_GAIN, 2, 19, 0, - analog_gain), - SOC_SINGLE_TLV("ADC6 Volume", TOMTOM_A_TX_6_GAIN, 2, 19, 0, - analog_gain), -}; - -static int tomtom_hph_impedance_get(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - uint32_t zl, zr; - bool hphr; - struct soc_multi_mixer_control *mc; - struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); - struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec); - - mc = (struct soc_multi_mixer_control *)(kcontrol->private_value); - - hphr = mc->shift; - wcd9xxx_mbhc_get_impedance(&priv->mbhc, &zl, &zr); - pr_debug("%s: zl %u, zr %u\n", __func__, zl, zr); - ucontrol->value.integer.value[0] = hphr ? zr : zl; - - return 0; -} - -static const struct snd_kcontrol_new impedance_detect_controls[] = { - SOC_SINGLE_EXT("HPHL Impedance", 0, 0, UINT_MAX, 0, - tomtom_hph_impedance_get, NULL), - SOC_SINGLE_EXT("HPHR Impedance", 0, 1, UINT_MAX, 0, - tomtom_hph_impedance_get, NULL), -}; - -static int tomtom_get_hph_type(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); - struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec); - struct wcd9xxx_mbhc *mbhc; - - if (!priv) { - pr_debug("%s: wcd9330 private data is NULL\n", __func__); - return 0; - } - - mbhc = &priv->mbhc; - if (!mbhc) { - pr_debug("%s: mbhc not initialized\n", __func__); - return 0; - } - - ucontrol->value.integer.value[0] = (u32) mbhc->hph_type; - pr_debug("%s: hph_type = %u\n", __func__, mbhc->hph_type); - - return 0; -} - -static const struct snd_kcontrol_new hph_type_detect_controls[] = { - SOC_SINGLE_EXT("HPH Type", 0, 0, UINT_MAX, 0, - tomtom_get_hph_type, NULL), -}; - -static const char * const rx_mix1_text[] = { - "ZERO", "SRC1", "SRC2", "IIR1", "IIR2", "RX1", "RX2", "RX3", "RX4", - "RX5", "RX6", "RX7" -}; - -static const char * const rx8_mix1_text[] = { - "ZERO", "IIR1", "IIR2", "RX1", "RX2", "RX3", "RX4", - "RX5", "RX6", "RX7", "RX8" -}; - -static const char * const rx_mix2_text[] = { - "ZERO", "SRC1", "SRC2", "IIR1", "IIR2" -}; - -static const char * const rx_rdac5_text[] = { - "DEM4", "DEM3_INV" -}; - -static const char * const rx_rdac7_text[] = { - "DEM6", "DEM5_INV" -}; - -static const char * const mad_sel_text[] = { - "SPE", "MSM" -}; - -static const char * const sb_tx1_mux_text[] = { - "ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7", - "DEC1", "RMIX8" -}; - -static const char * const sb_tx2_mux_text[] = { - "ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7", - "DEC2", "RMIX8" -}; - -static const char * const sb_tx3_mux_text[] = { - "ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7", - "DEC3", "RMIX8" -}; - -static const char * const sb_tx4_mux_text[] = { - "ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7", - "DEC4", "RMIX8" -}; - -static const char * const sb_tx5_mux_text[] = { - "ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7", - "DEC5", "RMIX8" -}; - -static const char * const sb_tx6_mux_text[] = { - "ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7", - "DEC6", "RMIX8" -}; - -static const char * const sb_tx7_to_tx10_mux_text[] = { - "ZERO", "RMIX1", "RMIX2", "RMIX3", "RMIX4", "RMIX5", "RMIX6", "RMIX7", - "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8", - "DEC9", "DEC10" -}; - -static const char * const dec1_mux_text[] = { - "ZERO", "DMIC1", "ADC6", -}; - -static const char * const dec2_mux_text[] = { - "ZERO", "DMIC2", "ADC5", -}; - -static const char * const dec3_mux_text[] = { - "ZERO", "DMIC3", "ADC4", -}; - -static const char * const dec4_mux_text[] = { - "ZERO", "DMIC4", "ADC3", -}; - -static const char * const dec5_mux_text[] = { - "ZERO", "DMIC5", "ADC2", -}; - -static const char * const dec6_mux_text[] = { - "ZERO", "DMIC6", "ADC1", -}; - -static const char * const dec7_mux_text[] = { - "ZERO", "DMIC1", "DMIC6", "ADC1", "ADC6", "ANC1_FB", "ANC2_FB", -}; - -static const char * const dec8_mux_text[] = { - "ZERO", "DMIC2", "DMIC5", "ADC2", "ADC5", "ANC1_FB", "ANC2_FB", -}; - -static const char * const dec9_mux_text[] = { - "ZERO", "DMIC4", "DMIC5", "ADC2", "ADC3", "ADCMB", "ANC1_FB", "ANC2_FB", -}; - -static const char * const dec10_mux_text[] = { - "ZERO", "DMIC3", "DMIC6", "ADC1", "ADC4", "ADCMB", "ANC1_FB", "ANC2_FB", -}; - -static const char * const anc_mux_text[] = { - "ZERO", "ADC1", "ADC2", "ADC3", "ADC4", "ADC5", "ADC6", "ADC_MB", - "RSVD_1", "DMIC1", "DMIC2", "DMIC3", "DMIC4", "DMIC5", "DMIC6" -}; - -static const char * const anc1_fb_mux_text[] = { - "ZERO", "EAR_HPH_L", "EAR_LINE_1", -}; - -static const char * const iir_inp1_text[] = { - "ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8", - "DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7" -}; - -static const char * const iir_inp2_text[] = { - "ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8", - "DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7" -}; - -static const char * const iir_inp3_text[] = { - "ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8", - "DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7" -}; - -static const char * const iir_inp4_text[] = { - "ZERO", "DEC1", "DEC2", "DEC3", "DEC4", "DEC5", "DEC6", "DEC7", "DEC8", - "DEC9", "DEC10", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7" -}; - -static const struct soc_enum rx_mix1_inp1_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX1_B1_CTL, 0, 12, rx_mix1_text); - -static const struct soc_enum rx_mix1_inp2_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX1_B1_CTL, 4, 12, rx_mix1_text); - -static const struct soc_enum rx_mix1_inp3_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX1_B2_CTL, 0, 12, rx_mix1_text); - -static const struct soc_enum rx2_mix1_inp1_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX2_B1_CTL, 0, 12, rx_mix1_text); - -static const struct soc_enum rx2_mix1_inp2_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX2_B1_CTL, 4, 12, rx_mix1_text); - -static const struct soc_enum rx3_mix1_inp1_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX3_B1_CTL, 0, 12, rx_mix1_text); - -static const struct soc_enum rx3_mix1_inp2_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX3_B1_CTL, 4, 12, rx_mix1_text); - -static const struct soc_enum rx4_mix1_inp1_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX4_B1_CTL, 0, 12, rx_mix1_text); - -static const struct soc_enum rx4_mix1_inp2_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX4_B1_CTL, 4, 12, rx_mix1_text); - -static const struct soc_enum rx5_mix1_inp1_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX5_B1_CTL, 0, 12, rx_mix1_text); - -static const struct soc_enum rx5_mix1_inp2_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX5_B1_CTL, 4, 12, rx_mix1_text); - -static const struct soc_enum rx6_mix1_inp1_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX6_B1_CTL, 0, 12, rx_mix1_text); - -static const struct soc_enum rx6_mix1_inp2_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX6_B1_CTL, 4, 12, rx_mix1_text); - -static const struct soc_enum rx7_mix1_inp1_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX7_B1_CTL, 0, 12, rx_mix1_text); - -static const struct soc_enum rx7_mix1_inp2_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX7_B1_CTL, 4, 12, rx_mix1_text); - -static const struct soc_enum rx8_mix1_inp1_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX8_B1_CTL, 0, 11, rx8_mix1_text); - -static const struct soc_enum rx8_mix1_inp2_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX8_B1_CTL, 4, 11, rx8_mix1_text); - -static const struct soc_enum rx1_mix2_inp1_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX1_B3_CTL, 0, 5, rx_mix2_text); - -static const struct soc_enum rx1_mix2_inp2_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX1_B3_CTL, 3, 5, rx_mix2_text); - -static const struct soc_enum rx2_mix2_inp1_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX2_B3_CTL, 0, 5, rx_mix2_text); - -static const struct soc_enum rx2_mix2_inp2_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX2_B3_CTL, 3, 5, rx_mix2_text); - -static const struct soc_enum rx7_mix2_inp1_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX7_B3_CTL, 0, 5, rx_mix2_text); - -static const struct soc_enum rx7_mix2_inp2_chain_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_RX7_B3_CTL, 3, 5, rx_mix2_text); - -static const struct soc_enum rx_rdac5_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_MISC, 2, 2, rx_rdac5_text); - -static const struct soc_enum rx_rdac7_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_MISC, 1, 2, rx_rdac7_text); - -static const struct soc_enum mad_sel_enum = - SOC_ENUM_SINGLE(TOMTOM_A_SVASS_CFG, 0, 2, mad_sel_text); - -static const struct soc_enum sb_tx1_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B1_CTL, 0, 10, sb_tx1_mux_text); - -static const struct soc_enum sb_tx2_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B2_CTL, 0, 10, sb_tx2_mux_text); - -static const struct soc_enum sb_tx3_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B3_CTL, 0, 10, sb_tx3_mux_text); - -static const struct soc_enum sb_tx4_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B4_CTL, 0, 10, sb_tx4_mux_text); - -static const struct soc_enum sb_tx5_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B5_CTL, 0, 10, sb_tx5_mux_text); - -static const struct soc_enum sb_tx6_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B6_CTL, 0, 10, sb_tx6_mux_text); - -static const struct soc_enum sb_tx7_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B7_CTL, 0, 18, - sb_tx7_to_tx10_mux_text); - -static const struct soc_enum sb_tx8_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B8_CTL, 0, 18, - sb_tx7_to_tx10_mux_text); - -static const struct soc_enum sb_tx9_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B9_CTL, 0, 18, - sb_tx7_to_tx10_mux_text); - -static const struct soc_enum sb_tx10_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_SB_B10_CTL, 0, 18, - sb_tx7_to_tx10_mux_text); - -static const struct soc_enum dec1_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B1_CTL, 0, 3, dec1_mux_text); - -static const struct soc_enum dec2_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B1_CTL, 2, 3, dec2_mux_text); - -static const struct soc_enum dec3_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B1_CTL, 4, 3, dec3_mux_text); - -static const struct soc_enum dec4_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B1_CTL, 6, 3, dec4_mux_text); - -static const struct soc_enum dec5_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B2_CTL, 0, 3, dec5_mux_text); - -static const struct soc_enum dec6_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B2_CTL, 2, 3, dec6_mux_text); - -static const struct soc_enum dec7_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B2_CTL, 4, 7, dec7_mux_text); - -static const struct soc_enum dec8_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B3_CTL, 0, 7, dec8_mux_text); - -static const struct soc_enum dec9_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B3_CTL, 3, 8, dec9_mux_text); - -static const struct soc_enum dec10_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_TX_B4_CTL, 0, 8, dec10_mux_text); - -static const struct soc_enum anc1_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_ANC_B1_CTL, 0, 15, anc_mux_text); - -static const struct soc_enum anc2_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_ANC_B1_CTL, 4, 15, anc_mux_text); - -static const struct soc_enum anc1_fb_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_ANC_B2_CTL, 0, 3, anc1_fb_mux_text); - -static const struct soc_enum iir1_inp1_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ1_B1_CTL, 0, 18, iir_inp1_text); - -static const struct soc_enum iir2_inp1_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ2_B1_CTL, 0, 18, iir_inp1_text); - -static const struct soc_enum iir1_inp2_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ1_B2_CTL, 0, 18, iir_inp2_text); - -static const struct soc_enum iir2_inp2_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ2_B2_CTL, 0, 18, iir_inp2_text); - -static const struct soc_enum iir1_inp3_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ1_B3_CTL, 0, 18, iir_inp3_text); - -static const struct soc_enum iir2_inp3_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ2_B3_CTL, 0, 18, iir_inp3_text); - -static const struct soc_enum iir1_inp4_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ1_B4_CTL, 0, 18, iir_inp4_text); - -static const struct soc_enum iir2_inp4_mux_enum = - SOC_ENUM_SINGLE(TOMTOM_A_CDC_CONN_EQ2_B4_CTL, 0, 18, iir_inp4_text); - -static const struct snd_kcontrol_new rx_mix1_inp1_mux = - SOC_DAPM_ENUM("RX1 MIX1 INP1 Mux", rx_mix1_inp1_chain_enum); - -static const struct snd_kcontrol_new rx_mix1_inp2_mux = - SOC_DAPM_ENUM("RX1 MIX1 INP2 Mux", rx_mix1_inp2_chain_enum); - -static const struct snd_kcontrol_new rx_mix1_inp3_mux = - SOC_DAPM_ENUM("RX1 MIX1 INP3 Mux", rx_mix1_inp3_chain_enum); - -static const struct snd_kcontrol_new rx2_mix1_inp1_mux = - SOC_DAPM_ENUM("RX2 MIX1 INP1 Mux", rx2_mix1_inp1_chain_enum); - -static const struct snd_kcontrol_new rx2_mix1_inp2_mux = - SOC_DAPM_ENUM("RX2 MIX1 INP2 Mux", rx2_mix1_inp2_chain_enum); - -static const struct snd_kcontrol_new rx3_mix1_inp1_mux = - SOC_DAPM_ENUM("RX3 MIX1 INP1 Mux", rx3_mix1_inp1_chain_enum); - -static const struct snd_kcontrol_new rx3_mix1_inp2_mux = - SOC_DAPM_ENUM("RX3 MIX1 INP2 Mux", rx3_mix1_inp2_chain_enum); - -static const struct snd_kcontrol_new rx4_mix1_inp1_mux = - SOC_DAPM_ENUM("RX4 MIX1 INP1 Mux", rx4_mix1_inp1_chain_enum); - -static const struct snd_kcontrol_new rx4_mix1_inp2_mux = - SOC_DAPM_ENUM("RX4 MIX1 INP2 Mux", rx4_mix1_inp2_chain_enum); - -static const struct snd_kcontrol_new rx5_mix1_inp1_mux = - SOC_DAPM_ENUM("RX5 MIX1 INP1 Mux", rx5_mix1_inp1_chain_enum); - -static const struct snd_kcontrol_new rx5_mix1_inp2_mux = - SOC_DAPM_ENUM("RX5 MIX1 INP2 Mux", rx5_mix1_inp2_chain_enum); - -static const struct snd_kcontrol_new rx6_mix1_inp1_mux = - SOC_DAPM_ENUM("RX6 MIX1 INP1 Mux", rx6_mix1_inp1_chain_enum); - -static const struct snd_kcontrol_new rx6_mix1_inp2_mux = - SOC_DAPM_ENUM("RX6 MIX1 INP2 Mux", rx6_mix1_inp2_chain_enum); - -static const struct snd_kcontrol_new rx7_mix1_inp1_mux = - SOC_DAPM_ENUM("RX7 MIX1 INP1 Mux", rx7_mix1_inp1_chain_enum); - -static const struct snd_kcontrol_new rx7_mix1_inp2_mux = - SOC_DAPM_ENUM("RX7 MIX1 INP2 Mux", rx7_mix1_inp2_chain_enum); - -static const struct snd_kcontrol_new rx8_mix1_inp1_mux = - SOC_DAPM_ENUM("RX8 MIX1 INP1 Mux", rx8_mix1_inp1_chain_enum); - -static const struct snd_kcontrol_new rx8_mix1_inp2_mux = - SOC_DAPM_ENUM("RX8 MIX1 INP2 Mux", rx8_mix1_inp2_chain_enum); - -static const struct snd_kcontrol_new rx1_mix2_inp1_mux = - SOC_DAPM_ENUM("RX1 MIX2 INP1 Mux", rx1_mix2_inp1_chain_enum); - -static const struct snd_kcontrol_new rx1_mix2_inp2_mux = - SOC_DAPM_ENUM("RX1 MIX2 INP2 Mux", rx1_mix2_inp2_chain_enum); - -static const struct snd_kcontrol_new rx2_mix2_inp1_mux = - SOC_DAPM_ENUM("RX2 MIX2 INP1 Mux", rx2_mix2_inp1_chain_enum); - -static const struct snd_kcontrol_new rx2_mix2_inp2_mux = - SOC_DAPM_ENUM("RX2 MIX2 INP2 Mux", rx2_mix2_inp2_chain_enum); - -static const struct snd_kcontrol_new rx7_mix2_inp1_mux = - SOC_DAPM_ENUM("RX7 MIX2 INP1 Mux", rx7_mix2_inp1_chain_enum); - -static const struct snd_kcontrol_new rx7_mix2_inp2_mux = - SOC_DAPM_ENUM("RX7 MIX2 INP2 Mux", rx7_mix2_inp2_chain_enum); - -static const struct snd_kcontrol_new rx_dac5_mux = - SOC_DAPM_ENUM("RDAC5 MUX Mux", rx_rdac5_enum); - -static const struct snd_kcontrol_new rx_dac7_mux = - SOC_DAPM_ENUM("RDAC7 MUX Mux", rx_rdac7_enum); - -static const struct snd_kcontrol_new mad_sel_mux = - SOC_DAPM_ENUM("MAD_SEL MUX Mux", mad_sel_enum); - -static const struct snd_kcontrol_new sb_tx1_mux = - SOC_DAPM_ENUM("SLIM TX1 MUX Mux", sb_tx1_mux_enum); - -static const struct snd_kcontrol_new sb_tx2_mux = - SOC_DAPM_ENUM("SLIM TX2 MUX Mux", sb_tx2_mux_enum); - -static const struct snd_kcontrol_new sb_tx3_mux = - SOC_DAPM_ENUM("SLIM TX3 MUX Mux", sb_tx3_mux_enum); - -static const struct snd_kcontrol_new sb_tx4_mux = - SOC_DAPM_ENUM("SLIM TX4 MUX Mux", sb_tx4_mux_enum); - -static const struct snd_kcontrol_new sb_tx5_mux = - SOC_DAPM_ENUM("SLIM TX5 MUX Mux", sb_tx5_mux_enum); - -static const struct snd_kcontrol_new sb_tx6_mux = - SOC_DAPM_ENUM("SLIM TX6 MUX Mux", sb_tx6_mux_enum); - -static const struct snd_kcontrol_new sb_tx7_mux = - SOC_DAPM_ENUM("SLIM TX7 MUX Mux", sb_tx7_mux_enum); - -static const struct snd_kcontrol_new sb_tx8_mux = - SOC_DAPM_ENUM("SLIM TX8 MUX Mux", sb_tx8_mux_enum); - -static const struct snd_kcontrol_new sb_tx9_mux = - SOC_DAPM_ENUM("SLIM TX9 MUX Mux", sb_tx9_mux_enum); - -static const struct snd_kcontrol_new sb_tx10_mux = - SOC_DAPM_ENUM("SLIM TX10 MUX Mux", sb_tx10_mux_enum); - - -static int wcd9330_put_dec_enum(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_dapm_widget_list *wlist = - dapm_kcontrol_get_wlist(kcontrol); - struct snd_soc_dapm_widget *w = wlist->widgets[0]; - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; - unsigned int dec_mux, decimator; - char *dec_name = NULL; - char *widget_name = NULL; - char *temp; - u16 tx_mux_ctl_reg; - u8 adc_dmic_sel = 0x0; - int ret = 0; - char *dec; - - if (ucontrol->value.enumerated.item[0] >= e->items) - return -EINVAL; - - dec_mux = ucontrol->value.enumerated.item[0]; - - widget_name = kstrndup(w->name, 15, GFP_KERNEL); - if (!widget_name) - return -ENOMEM; - temp = widget_name; - - dec_name = strsep(&widget_name, " "); - widget_name = temp; - if (!dec_name) { - pr_err("%s: Invalid decimator = %s\n", __func__, w->name); - ret = -EINVAL; - goto out; - } - dec = strpbrk(dec_name, "123456789"); - if (!dec) { - dev_err(w->dapm->dev, "%s: decimator index not found\n", - __func__); - ret = -EINVAL; - goto out; - } - ret = kstrtouint(dec, 10, &decimator); - if (ret < 0) { - pr_err("%s: Invalid decimator = %s\n", __func__, dec_name); - ret = -EINVAL; - goto out; - } - - dev_dbg(w->dapm->dev, "%s(): widget = %s decimator = %u dec_mux = %u\n" - , __func__, w->name, decimator, dec_mux); - - - switch (decimator) { - case 1: - case 2: - case 3: - case 4: - case 5: - case 6: - if (dec_mux == 1) - adc_dmic_sel = 0x1; - else - adc_dmic_sel = 0x0; - break; - case 7: - case 8: - case 9: - case 10: - if ((dec_mux == 1) || (dec_mux == 2)) - adc_dmic_sel = 0x1; - else - adc_dmic_sel = 0x0; - break; - default: - pr_err("%s: Invalid Decimator = %u\n", __func__, decimator); - ret = -EINVAL; - goto out; - } - - tx_mux_ctl_reg = TOMTOM_A_CDC_TX1_MUX_CTL + 8 * (decimator - 1); - - snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x1, adc_dmic_sel); - - ret = snd_soc_dapm_put_enum_double(kcontrol, ucontrol); - -out: - kfree(widget_name); - return ret; -} - -#define WCD9330_DEC_ENUM(xname, xenum) \ -{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ - .info = snd_soc_info_enum_double, \ - .get = snd_soc_dapm_get_enum_double, \ - .put = wcd9330_put_dec_enum, \ - .private_value = (unsigned long)&xenum } - -static const struct snd_kcontrol_new dec1_mux = - WCD9330_DEC_ENUM("DEC1 MUX Mux", dec1_mux_enum); - -static const struct snd_kcontrol_new dec2_mux = - WCD9330_DEC_ENUM("DEC2 MUX Mux", dec2_mux_enum); - -static const struct snd_kcontrol_new dec3_mux = - WCD9330_DEC_ENUM("DEC3 MUX Mux", dec3_mux_enum); - -static const struct snd_kcontrol_new dec4_mux = - WCD9330_DEC_ENUM("DEC4 MUX Mux", dec4_mux_enum); - -static const struct snd_kcontrol_new dec5_mux = - WCD9330_DEC_ENUM("DEC5 MUX Mux", dec5_mux_enum); - -static const struct snd_kcontrol_new dec6_mux = - WCD9330_DEC_ENUM("DEC6 MUX Mux", dec6_mux_enum); - -static const struct snd_kcontrol_new dec7_mux = - WCD9330_DEC_ENUM("DEC7 MUX Mux", dec7_mux_enum); - -static const struct snd_kcontrol_new dec8_mux = - WCD9330_DEC_ENUM("DEC8 MUX Mux", dec8_mux_enum); - -static const struct snd_kcontrol_new dec9_mux = - WCD9330_DEC_ENUM("DEC9 MUX Mux", dec9_mux_enum); - -static const struct snd_kcontrol_new dec10_mux = - WCD9330_DEC_ENUM("DEC10 MUX Mux", dec10_mux_enum); - -static const struct snd_kcontrol_new iir1_inp1_mux = - SOC_DAPM_ENUM("IIR1 INP1 Mux", iir1_inp1_mux_enum); - -static const struct snd_kcontrol_new iir2_inp1_mux = - SOC_DAPM_ENUM("IIR2 INP1 Mux", iir2_inp1_mux_enum); - -static const struct snd_kcontrol_new iir1_inp2_mux = - SOC_DAPM_ENUM("IIR1 INP2 Mux", iir1_inp2_mux_enum); - -static const struct snd_kcontrol_new iir2_inp2_mux = - SOC_DAPM_ENUM("IIR2 INP2 Mux", iir2_inp2_mux_enum); - -static const struct snd_kcontrol_new iir1_inp3_mux = - SOC_DAPM_ENUM("IIR1 INP3 Mux", iir1_inp3_mux_enum); - -static const struct snd_kcontrol_new iir2_inp3_mux = - SOC_DAPM_ENUM("IIR2 INP3 Mux", iir2_inp3_mux_enum); - -static const struct snd_kcontrol_new iir1_inp4_mux = - SOC_DAPM_ENUM("IIR1 INP4 Mux", iir1_inp4_mux_enum); - -static const struct snd_kcontrol_new iir2_inp4_mux = - SOC_DAPM_ENUM("IIR2 INP4 Mux", iir2_inp4_mux_enum); - -static const struct snd_kcontrol_new anc1_mux = - SOC_DAPM_ENUM("ANC1 MUX Mux", anc1_mux_enum); - -static const struct snd_kcontrol_new anc2_mux = - SOC_DAPM_ENUM("ANC2 MUX Mux", anc2_mux_enum); - -static const struct snd_kcontrol_new anc1_fb_mux = - SOC_DAPM_ENUM("ANC1 FB MUX Mux", anc1_fb_mux_enum); - -static const struct snd_kcontrol_new dac1_switch[] = { - SOC_DAPM_SINGLE("Switch", TOMTOM_A_RX_EAR_EN, 5, 1, 0) -}; -static const struct snd_kcontrol_new hphl_switch[] = { - SOC_DAPM_SINGLE("Switch", TOMTOM_A_RX_HPH_L_DAC_CTL, 6, 1, 0) -}; - -static const struct snd_kcontrol_new hphl_pa_mix[] = { - SOC_DAPM_SINGLE("AUX_PGA_L Switch", TOMTOM_A_RX_PA_AUX_IN_CONN, - 7, 1, 0), -}; - -static const struct snd_kcontrol_new hphr_pa_mix[] = { - SOC_DAPM_SINGLE("AUX_PGA_R Switch", TOMTOM_A_RX_PA_AUX_IN_CONN, - 6, 1, 0), -}; - -static const struct snd_kcontrol_new ear_pa_mix[] = { - SOC_DAPM_SINGLE("AUX_PGA_L Switch", TOMTOM_A_RX_PA_AUX_IN_CONN, - 5, 1, 0), -}; -static const struct snd_kcontrol_new lineout1_pa_mix[] = { - SOC_DAPM_SINGLE("AUX_PGA_L Switch", TOMTOM_A_RX_PA_AUX_IN_CONN, - 4, 1, 0), -}; - -static const struct snd_kcontrol_new lineout2_pa_mix[] = { - SOC_DAPM_SINGLE("AUX_PGA_R Switch", TOMTOM_A_RX_PA_AUX_IN_CONN, - 3, 1, 0), -}; - -static const struct snd_kcontrol_new lineout3_pa_mix[] = { - SOC_DAPM_SINGLE("AUX_PGA_L Switch", TOMTOM_A_RX_PA_AUX_IN_CONN, - 2, 1, 0), -}; - -static const struct snd_kcontrol_new lineout4_pa_mix[] = { - SOC_DAPM_SINGLE("AUX_PGA_R Switch", TOMTOM_A_RX_PA_AUX_IN_CONN, - 1, 1, 0), -}; - -static const struct snd_kcontrol_new lineout3_ground_switch = - SOC_DAPM_SINGLE("Switch", TOMTOM_A_RX_LINE_3_DAC_CTL, 6, 1, 0); - -static const struct snd_kcontrol_new lineout4_ground_switch = - SOC_DAPM_SINGLE("Switch", TOMTOM_A_RX_LINE_4_DAC_CTL, 6, 1, 0); - -static const struct snd_kcontrol_new aif4_mad_switch = - SOC_DAPM_SINGLE("Switch", TOMTOM_A_SVASS_CLKRST_CTL, 0, 1, 0); - -static const struct snd_kcontrol_new aif4_vi_switch = - SOC_DAPM_SINGLE("Switch", TOMTOM_A_SPKR1_PROT_EN, 3, 1, 0); - -/* virtual port entries */ -static int slim_tx_mixer_get(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_dapm_widget_list *wlist = - dapm_kcontrol_get_wlist(kcontrol); - struct snd_soc_dapm_widget *widget = wlist->widgets[0]; - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm); - struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec); - - ucontrol->value.integer.value[0] = tomtom_p->tx_port_value; - return 0; -} - -static int slim_tx_mixer_put(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_dapm_widget_list *wlist = - dapm_kcontrol_get_wlist(kcontrol); - struct snd_soc_dapm_widget *widget = wlist->widgets[0]; - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm); - struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec); - struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent); - struct snd_soc_dapm_update *update = NULL; - struct soc_multi_mixer_control *mixer = - ((struct soc_multi_mixer_control *)kcontrol->private_value); - u32 dai_id = widget->shift; - u32 port_id = mixer->shift; - u32 enable = ucontrol->value.integer.value[0]; - u32 vtable = vport_check_table[dai_id]; - - - pr_debug("%s: wname %s cname %s value %u shift %d item %ld\n", __func__, - widget->name, ucontrol->id.name, tomtom_p->tx_port_value, - widget->shift, ucontrol->value.integer.value[0]); - - mutex_lock(&tomtom_p->codec_mutex); - - if (tomtom_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS) { - if (dai_id != AIF1_CAP) { - dev_err(codec->dev, "%s: invalid AIF for I2C mode\n", - __func__); - mutex_unlock(&tomtom_p->codec_mutex); - return -EINVAL; - } - } - switch (dai_id) { - case AIF1_CAP: - case AIF2_CAP: - case AIF3_CAP: - /* only add to the list if value not set - */ - if (enable && !(tomtom_p->tx_port_value & 1 << port_id)) { - - if (tomtom_p->intf_type == - WCD9XXX_INTERFACE_TYPE_SLIMBUS) - vtable = vport_check_table[dai_id]; - if (tomtom_p->intf_type == - WCD9XXX_INTERFACE_TYPE_I2C) - vtable = vport_i2s_check_table[dai_id]; - - if (wcd9xxx_tx_vport_validation( - vtable, - port_id, - tomtom_p->dai, NUM_CODEC_DAIS)) { - dev_dbg(codec->dev, "%s: TX%u is used by other virtual port\n", - __func__, port_id + 1); - mutex_unlock(&tomtom_p->codec_mutex); - return 0; - } - tomtom_p->tx_port_value |= 1 << port_id; - list_add_tail(&core->tx_chs[port_id].list, - &tomtom_p->dai[dai_id].wcd9xxx_ch_list - ); - } else if (!enable && (tomtom_p->tx_port_value & - 1 << port_id)) { - tomtom_p->tx_port_value &= ~(1 << port_id); - list_del_init(&core->tx_chs[port_id].list); - } else { - if (enable) - dev_dbg(codec->dev, "%s: TX%u port is used by\n" - "this virtual port\n", - __func__, port_id + 1); - else - dev_dbg(codec->dev, "%s: TX%u port is not used by\n" - "this virtual port\n", - __func__, port_id + 1); - /* avoid update power function */ - mutex_unlock(&tomtom_p->codec_mutex); - return 0; - } - break; - default: - pr_err("Unknown AIF %d\n", dai_id); - mutex_unlock(&tomtom_p->codec_mutex); - return -EINVAL; - } - pr_debug("%s: name %s sname %s updated value %u shift %d\n", __func__, - widget->name, widget->sname, tomtom_p->tx_port_value, - widget->shift); - - mutex_unlock(&tomtom_p->codec_mutex); - snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, enable, update); - - return 0; -} - -static int slim_rx_mux_get(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_dapm_widget_list *wlist = - dapm_kcontrol_get_wlist(kcontrol); - struct snd_soc_dapm_widget *widget = wlist->widgets[0]; - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm); - struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec); - - ucontrol->value.enumerated.item[0] = tomtom_p->rx_port_value; - return 0; -} - -static const char *const slim_rx_mux_text[] = { - "ZERO", "AIF1_PB", "AIF2_PB", "AIF3_PB" -}; - -static int slim_rx_mux_put(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - struct snd_soc_dapm_widget_list *wlist = - dapm_kcontrol_get_wlist(kcontrol); - struct snd_soc_dapm_widget *widget = wlist->widgets[0]; - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(widget->dapm); - struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec); - struct wcd9xxx *core = dev_get_drvdata(codec->dev->parent); - struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; - struct snd_soc_dapm_update *update = NULL; - u32 port_id = widget->shift; - - pr_debug("%s: wname %s cname %s value %u shift %d item %ld\n", __func__, - widget->name, ucontrol->id.name, tomtom_p->rx_port_value, - widget->shift, ucontrol->value.integer.value[0]); - - tomtom_p->rx_port_value = ucontrol->value.enumerated.item[0]; - - mutex_lock(&tomtom_p->codec_mutex); - - if (tomtom_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS) { - if (tomtom_p->rx_port_value > 2) { - dev_err(codec->dev, "%s: invalid AIF for I2C mode\n", - __func__); - goto err; - } - } - /* value need to match the Virtual port and AIF number - */ - switch (tomtom_p->rx_port_value) { - case 0: - list_del_init(&core->rx_chs[port_id].list); - break; - case 1: - if (wcd9xxx_rx_vport_validation(port_id + - TOMTOM_RX_PORT_START_NUMBER, - &tomtom_p->dai[AIF1_PB].wcd9xxx_ch_list)) { - dev_dbg(codec->dev, "%s: RX%u is used by current requesting AIF_PB itself\n", - __func__, port_id + 1); - goto rtn; - } - list_add_tail(&core->rx_chs[port_id].list, - &tomtom_p->dai[AIF1_PB].wcd9xxx_ch_list); - break; - case 2: - if (wcd9xxx_rx_vport_validation(port_id + - TOMTOM_RX_PORT_START_NUMBER, - &tomtom_p->dai[AIF2_PB].wcd9xxx_ch_list)) { - dev_dbg(codec->dev, "%s: RX%u is used by current requesting AIF_PB itself\n", - __func__, port_id + 1); - goto rtn; - } - list_add_tail(&core->rx_chs[port_id].list, - &tomtom_p->dai[AIF2_PB].wcd9xxx_ch_list); - break; - case 3: - if (wcd9xxx_rx_vport_validation(port_id + - TOMTOM_RX_PORT_START_NUMBER, - &tomtom_p->dai[AIF3_PB].wcd9xxx_ch_list)) { - dev_dbg(codec->dev, "%s: RX%u is used by current requesting AIF_PB itself\n", - __func__, port_id + 1); - goto rtn; - } - list_add_tail(&core->rx_chs[port_id].list, - &tomtom_p->dai[AIF3_PB].wcd9xxx_ch_list); - break; - default: - pr_err("Unknown AIF %d\n", tomtom_p->rx_port_value); - goto err; - } -rtn: - mutex_unlock(&tomtom_p->codec_mutex); - snd_soc_dapm_mux_update_power(widget->dapm, kcontrol, - tomtom_p->rx_port_value, e, update); - - return 0; -err: - mutex_unlock(&tomtom_p->codec_mutex); - return -EINVAL; -} - -static const struct soc_enum slim_rx_mux_enum = - SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(slim_rx_mux_text), slim_rx_mux_text); - -static const struct snd_kcontrol_new slim_rx_mux[TOMTOM_RX_MAX] = { - SOC_DAPM_ENUM_EXT("SLIM RX1 Mux", slim_rx_mux_enum, - slim_rx_mux_get, slim_rx_mux_put), - SOC_DAPM_ENUM_EXT("SLIM RX2 Mux", slim_rx_mux_enum, - slim_rx_mux_get, slim_rx_mux_put), - SOC_DAPM_ENUM_EXT("SLIM RX3 Mux", slim_rx_mux_enum, - slim_rx_mux_get, slim_rx_mux_put), - SOC_DAPM_ENUM_EXT("SLIM RX4 Mux", slim_rx_mux_enum, - slim_rx_mux_get, slim_rx_mux_put), - SOC_DAPM_ENUM_EXT("SLIM RX5 Mux", slim_rx_mux_enum, - slim_rx_mux_get, slim_rx_mux_put), - SOC_DAPM_ENUM_EXT("SLIM RX6 Mux", slim_rx_mux_enum, - slim_rx_mux_get, slim_rx_mux_put), - SOC_DAPM_ENUM_EXT("SLIM RX7 Mux", slim_rx_mux_enum, - slim_rx_mux_get, slim_rx_mux_put), - SOC_DAPM_ENUM_EXT("SLIM RX8 Mux", slim_rx_mux_enum, - slim_rx_mux_get, slim_rx_mux_put), -}; - -static const struct snd_kcontrol_new aif1_cap_mixer[] = { - SOC_SINGLE_EXT("SLIM TX1", SND_SOC_NOPM, TOMTOM_TX1, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX2", SND_SOC_NOPM, TOMTOM_TX2, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX3", SND_SOC_NOPM, TOMTOM_TX3, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX4", SND_SOC_NOPM, TOMTOM_TX4, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX5", SND_SOC_NOPM, TOMTOM_TX5, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX6", SND_SOC_NOPM, TOMTOM_TX6, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX7", SND_SOC_NOPM, TOMTOM_TX7, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX8", SND_SOC_NOPM, TOMTOM_TX8, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX9", SND_SOC_NOPM, TOMTOM_TX9, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX10", SND_SOC_NOPM, TOMTOM_TX10, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), -}; - -static const struct snd_kcontrol_new aif2_cap_mixer[] = { - SOC_SINGLE_EXT("SLIM TX1", SND_SOC_NOPM, TOMTOM_TX1, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX2", SND_SOC_NOPM, TOMTOM_TX2, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX3", SND_SOC_NOPM, TOMTOM_TX3, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX4", SND_SOC_NOPM, TOMTOM_TX4, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX5", SND_SOC_NOPM, TOMTOM_TX5, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX6", SND_SOC_NOPM, TOMTOM_TX6, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX7", SND_SOC_NOPM, TOMTOM_TX7, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX8", SND_SOC_NOPM, TOMTOM_TX8, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX9", SND_SOC_NOPM, TOMTOM_TX9, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX10", SND_SOC_NOPM, TOMTOM_TX10, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), -}; - -static const struct snd_kcontrol_new aif3_cap_mixer[] = { - SOC_SINGLE_EXT("SLIM TX1", SND_SOC_NOPM, TOMTOM_TX1, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX2", SND_SOC_NOPM, TOMTOM_TX2, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX3", SND_SOC_NOPM, TOMTOM_TX3, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX4", SND_SOC_NOPM, TOMTOM_TX4, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX5", SND_SOC_NOPM, TOMTOM_TX5, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX6", SND_SOC_NOPM, TOMTOM_TX6, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX7", SND_SOC_NOPM, TOMTOM_TX7, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX8", SND_SOC_NOPM, TOMTOM_TX8, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX9", SND_SOC_NOPM, TOMTOM_TX9, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), - SOC_SINGLE_EXT("SLIM TX10", SND_SOC_NOPM, TOMTOM_TX10, 1, 0, - slim_tx_mixer_get, slim_tx_mixer_put), -}; - -static void tomtom_codec_enable_adc_block(struct snd_soc_codec *codec, - int enable) -{ - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - - pr_debug("%s %d\n", __func__, enable); - - if (enable) { - tomtom->adc_count++; - snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL, - 0x2, 0x2); - } else { - tomtom->adc_count--; - if (!tomtom->adc_count) - snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL, - 0x2, 0x0); - } -} - -static int tomtom_codec_enable_adc(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec); - u16 adc_reg; - u16 tx_fe_clkdiv_reg; - u8 tx_fe_clkdiv_mask; - u8 init_bit_shift; - u8 bit_pos; - - pr_debug("%s %d\n", __func__, event); - - switch (w->reg) { - case TOMTOM_A_TX_1_GAIN: - adc_reg = TOMTOM_A_TX_1_2_TEST_CTL; - tx_fe_clkdiv_reg = TOMTOM_A_TX_1_2_TXFE_CLKDIV; - tx_fe_clkdiv_mask = 0x0F; - init_bit_shift = 7; - bit_pos = ADC1_TXFE; - break; - case TOMTOM_A_TX_2_GAIN: - adc_reg = TOMTOM_A_TX_1_2_TEST_CTL; - tx_fe_clkdiv_reg = TOMTOM_A_TX_1_2_TXFE_CLKDIV; - tx_fe_clkdiv_mask = 0xF0; - init_bit_shift = 6; - bit_pos = ADC2_TXFE; - break; - case TOMTOM_A_TX_3_GAIN: - adc_reg = TOMTOM_A_TX_3_4_TEST_CTL; - init_bit_shift = 7; - tx_fe_clkdiv_reg = TOMTOM_A_TX_3_4_TXFE_CKDIV; - tx_fe_clkdiv_mask = 0x0F; - bit_pos = ADC3_TXFE; - break; - case TOMTOM_A_TX_4_GAIN: - adc_reg = TOMTOM_A_TX_3_4_TEST_CTL; - init_bit_shift = 6; - tx_fe_clkdiv_reg = TOMTOM_A_TX_3_4_TXFE_CKDIV; - tx_fe_clkdiv_mask = 0xF0; - bit_pos = ADC4_TXFE; - break; - case TOMTOM_A_TX_5_GAIN: - adc_reg = TOMTOM_A_TX_5_6_TEST_CTL; - init_bit_shift = 7; - tx_fe_clkdiv_reg = TOMTOM_A_TX_5_6_TXFE_CKDIV; - tx_fe_clkdiv_mask = 0x0F; - bit_pos = ADC5_TXFE; - break; - case TOMTOM_A_TX_6_GAIN: - adc_reg = TOMTOM_A_TX_5_6_TEST_CTL; - init_bit_shift = 6; - tx_fe_clkdiv_reg = TOMTOM_A_TX_5_6_TXFE_CKDIV; - tx_fe_clkdiv_mask = 0xF0; - bit_pos = ADC6_TXFE; - break; - default: - pr_err("%s: Error, invalid adc register\n", __func__); - return -EINVAL; - } - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - snd_soc_update_bits(codec, tx_fe_clkdiv_reg, tx_fe_clkdiv_mask, - 0x0); - set_bit(bit_pos, &priv->status_mask); - tomtom_codec_enable_adc_block(codec, 1); - snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift, - 1 << init_bit_shift); - break; - case SND_SOC_DAPM_POST_PMU: - snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift, 0x00); - break; - case SND_SOC_DAPM_POST_PMD: - tomtom_codec_enable_adc_block(codec, 0); - break; - } - return 0; -} - -static int tomtom_codec_ext_clk_en(struct snd_soc_codec *codec, - int enable, bool dapm) -{ - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - - if (!tomtom->codec_ext_clk_en_cb) { - dev_err(codec->dev, - "%s: Invalid ext_clk_callback\n", - __func__); - return -EINVAL; - } - - return tomtom->codec_ext_clk_en_cb(codec, enable, dapm); -} - -static int __tomtom_mclk_enable(struct tomtom_priv *tomtom, int mclk_enable) -{ - int ret = 0; - - WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr); - if (mclk_enable) { - tomtom->ext_clk_users++; - if (tomtom->ext_clk_users > 1) - goto bg_clk_unlock; - ret = clk_prepare_enable(tomtom->wcd_ext_clk); - if (ret) { - pr_err("%s: ext clk enable failed\n", - __func__); - tomtom->ext_clk_users--; - goto bg_clk_unlock; - } - wcd9xxx_resmgr_get_bandgap(&tomtom->resmgr, - WCD9XXX_BANDGAP_AUDIO_MODE); - wcd9xxx_resmgr_get_clk_block(&tomtom->resmgr, WCD9XXX_CLK_MCLK); - } else { - tomtom->ext_clk_users--; - if (tomtom->ext_clk_users == 0) { - /* Put clock and BG */ - wcd9xxx_resmgr_put_clk_block(&tomtom->resmgr, - WCD9XXX_CLK_MCLK); - wcd9xxx_resmgr_put_bandgap(&tomtom->resmgr, - WCD9XXX_BANDGAP_AUDIO_MODE); - clk_disable_unprepare(tomtom->wcd_ext_clk); - } - } -bg_clk_unlock: - WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr); - - return ret; -} - -int tomtom_codec_mclk_enable(struct snd_soc_codec *codec, - int enable, bool dapm) -{ - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - - if (tomtom->wcd_ext_clk) { - dev_dbg(codec->dev, "%s: mclk_enable = %u, dapm = %d\n", - __func__, enable, dapm); - return __tomtom_mclk_enable(tomtom, enable); - } else if (tomtom->codec_ext_clk_en_cb) - return tomtom_codec_ext_clk_en(codec, enable, dapm); - else { - dev_err(codec->dev, - "%s: Cannot turn on MCLK\n", - __func__); - return -EINVAL; - } -} -EXPORT_SYMBOL(tomtom_codec_mclk_enable); - -static int tomtom_codec_get_ext_clk_users(struct tomtom_priv *tomtom) -{ - if (tomtom->wcd_ext_clk) - return tomtom->ext_clk_users; - else if (tomtom->codec_get_ext_clk_cnt) - return tomtom->codec_get_ext_clk_cnt(); - else - return 0; -} - -/* tomtom_codec_internal_rco_ctrl( ) - * Make sure that BG_CLK_LOCK is not acquired. Exit if acquired to avoid - * potential deadlock as ext_clk_en_cb() also tries to acquire the same - * lock to enable MCLK for RCO calibration - */ -static int tomtom_codec_internal_rco_ctrl(struct snd_soc_codec *codec, - bool enable) -{ - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - int ret = 0; - - if (enable) { - if (wcd9xxx_resmgr_get_clk_type(&tomtom->resmgr) == - WCD9XXX_CLK_RCO) { - WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr); - wcd9xxx_resmgr_get_clk_block(&tomtom->resmgr, - WCD9XXX_CLK_RCO); - WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr); - } else { - tomtom_codec_mclk_enable(codec, true, false); - WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr); - tomtom->resmgr.ext_clk_users = - tomtom_codec_get_ext_clk_users(tomtom); - wcd9xxx_resmgr_get_clk_block(&tomtom->resmgr, - WCD9XXX_CLK_RCO); - WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr); - tomtom_codec_mclk_enable(codec, false, false); - } - - } else { - WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr); - wcd9xxx_resmgr_put_clk_block(&tomtom->resmgr, - WCD9XXX_CLK_RCO); - WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr); - } - - return ret; -} - -static int tomtom_codec_enable_aux_pga(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - - pr_debug("%s: %d\n", __func__, event); - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr); - wcd9xxx_resmgr_get_bandgap(&tomtom->resmgr, - WCD9XXX_BANDGAP_AUDIO_MODE); - WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr); - /* AUX PGA requires RCO or MCLK */ - tomtom_codec_internal_rco_ctrl(codec, true); - WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr); - wcd9xxx_resmgr_enable_rx_bias(&tomtom->resmgr, 1); - WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr); - break; - - case SND_SOC_DAPM_POST_PMD: - WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr); - wcd9xxx_resmgr_enable_rx_bias(&tomtom->resmgr, 0); - WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr); - tomtom_codec_internal_rco_ctrl(codec, false); - WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr); - wcd9xxx_resmgr_put_bandgap(&tomtom->resmgr, - WCD9XXX_BANDGAP_AUDIO_MODE); - WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr); - break; - } - return 0; -} - -static int tomtom_codec_enable_lineout(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - u16 lineout_gain_reg; - - pr_debug("%s %d %s\n", __func__, event, w->name); - - switch (w->shift) { - case 0: - lineout_gain_reg = TOMTOM_A_RX_LINE_1_GAIN; - break; - case 1: - lineout_gain_reg = TOMTOM_A_RX_LINE_2_GAIN; - break; - case 2: - lineout_gain_reg = TOMTOM_A_RX_LINE_3_GAIN; - break; - case 3: - lineout_gain_reg = TOMTOM_A_RX_LINE_4_GAIN; - break; - default: - pr_err("%s: Error, incorrect lineout register value\n", - __func__); - return -EINVAL; - } - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - snd_soc_update_bits(codec, lineout_gain_reg, 0x40, 0x40); - break; - case SND_SOC_DAPM_POST_PMU: - wcd9xxx_clsh_fsm(codec, &tomtom->clsh_d, - WCD9XXX_CLSH_STATE_LO, - WCD9XXX_CLSH_REQ_ENABLE, - WCD9XXX_CLSH_EVENT_POST_PA); - pr_debug("%s: sleeping 5 ms after %s PA turn on\n", - __func__, w->name); - /* Wait for CnP time after PA enable */ - usleep_range(5000, 5100); - break; - case SND_SOC_DAPM_POST_PMD: - snd_soc_update_bits(codec, lineout_gain_reg, 0x40, 0x00); - pr_debug("%s: sleeping 5 ms after %s PA turn off\n", - __func__, w->name); - /* Wait for CnP time after PA disable */ - usleep_range(5000, 5100); - break; - } - return 0; -} - -static int tomtom_codec_enable_spk_pa(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - u16 spk_drv_reg; - - pr_debug("%s: %d %s\n", __func__, event, w->name); - if (strnstr(w->name, "SPK2 PA", sizeof("SPK2 PA"))) - spk_drv_reg = TOMTOM_A_SPKR_DRV2_EN; - else - spk_drv_reg = TOMTOM_A_SPKR_DRV1_EN; - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - tomtom->spkr_pa_widget_on = true; - snd_soc_update_bits(codec, spk_drv_reg, 0x80, 0x80); - break; - case SND_SOC_DAPM_POST_PMD: - tomtom->spkr_pa_widget_on = false; - snd_soc_update_bits(codec, spk_drv_reg, 0x80, 0x00); - break; - } - return 0; -} - -static u8 tomtom_get_dmic_clk_val(struct snd_soc_codec *codec, - u32 mclk_rate, u32 dmic_clk_rate) -{ - u32 div_factor; - u8 dmic_ctl_val; - - dev_dbg(codec->dev, - "%s: mclk_rate = %d, dmic_sample_rate = %d\n", - __func__, mclk_rate, dmic_clk_rate); - - /* Default value to return in case of error */ - if (mclk_rate == TOMTOM_MCLK_CLK_9P6MHZ) - dmic_ctl_val = WCD9330_DMIC_CLK_DIV_2; - else - dmic_ctl_val = WCD9330_DMIC_CLK_DIV_3; - - if (dmic_clk_rate == 0) { - dev_err(codec->dev, - "%s: dmic_sample_rate cannot be 0\n", - __func__); - goto done; - } - - div_factor = mclk_rate / dmic_clk_rate; - switch (div_factor) { - case 2: - dmic_ctl_val = WCD9330_DMIC_CLK_DIV_2; - break; - case 3: - dmic_ctl_val = WCD9330_DMIC_CLK_DIV_3; - break; - case 4: - dmic_ctl_val = WCD9330_DMIC_CLK_DIV_4; - break; - case 6: - dmic_ctl_val = WCD9330_DMIC_CLK_DIV_6; - break; - case 16: - dmic_ctl_val = WCD9330_DMIC_CLK_DIV_16; - break; - default: - dev_err(codec->dev, - "%s: Invalid div_factor %u, clk_rate(%u), dmic_rate(%u)\n", - __func__, div_factor, mclk_rate, dmic_clk_rate); - break; - } - -done: - return dmic_ctl_val; -} - -static int tomtom_codec_enable_dmic(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - struct wcd9xxx_pdata *pdata = tomtom->resmgr.pdata; - u8 dmic_clk_en; - u16 dmic_clk_reg; - s32 *dmic_clk_cnt; - u8 dmic_rate_val, dmic_rate_shift; - unsigned int dmic; - int ret; - char *wname; - - wname = strpbrk(w->name, "123456"); - if (!wname) { - dev_err(codec->dev, "%s: widget not found\n", __func__); - return -EINVAL; - } - - ret = kstrtouint(wname, 10, &dmic); - if (ret < 0) { - pr_err("%s: Invalid DMIC line on the codec\n", __func__); - return -EINVAL; - } - - switch (dmic) { - case 1: - case 2: - dmic_clk_en = 0x01; - dmic_clk_cnt = &(tomtom->dmic_1_2_clk_cnt); - dmic_clk_reg = TOMTOM_A_DMIC_B1_CTL; - dmic_rate_shift = 5; - pr_debug("%s() event %d DMIC%d dmic_1_2_clk_cnt %d\n", - __func__, event, dmic, *dmic_clk_cnt); - - break; - - case 3: - case 4: - dmic_clk_en = 0x02; - dmic_clk_cnt = &(tomtom->dmic_3_4_clk_cnt); - dmic_clk_reg = TOMTOM_A_DMIC_B2_CTL; - dmic_rate_shift = 1; - pr_debug("%s() event %d DMIC%d dmic_3_4_clk_cnt %d\n", - __func__, event, dmic, *dmic_clk_cnt); - break; - - case 5: - case 6: - dmic_clk_en = 0x04; - dmic_clk_cnt = &(tomtom->dmic_5_6_clk_cnt); - dmic_clk_reg = TOMTOM_A_DMIC_B2_CTL; - dmic_rate_shift = 4; - pr_debug("%s() event %d DMIC%d dmic_5_6_clk_cnt %d\n", - __func__, event, dmic, *dmic_clk_cnt); - - break; - - default: - pr_err("%s: Invalid DMIC Selection\n", __func__); - return -EINVAL; - } - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - - dmic_rate_val = - tomtom_get_dmic_clk_val(codec, - pdata->mclk_rate, - pdata->dmic_sample_rate); - - (*dmic_clk_cnt)++; - if (*dmic_clk_cnt == 1) { - snd_soc_update_bits(codec, dmic_clk_reg, - 0x07 << dmic_rate_shift, - dmic_rate_val << dmic_rate_shift); - snd_soc_update_bits(codec, TOMTOM_A_DMIC_B1_CTL, - dmic_clk_en, dmic_clk_en); - } - - break; - case SND_SOC_DAPM_POST_PMD: - - dmic_rate_val = - tomtom_get_dmic_clk_val(codec, - pdata->mclk_rate, - pdata->mad_dmic_sample_rate); - (*dmic_clk_cnt)--; - if (*dmic_clk_cnt == 0) { - snd_soc_update_bits(codec, TOMTOM_A_DMIC_B1_CTL, - dmic_clk_en, 0); - snd_soc_update_bits(codec, dmic_clk_reg, - 0x07 << dmic_rate_shift, - dmic_rate_val << dmic_rate_shift); - } - break; - } - return 0; -} - -static int tomtom_codec_config_mad(struct snd_soc_codec *codec) -{ - int ret = 0; - const struct firmware *fw; - struct firmware_cal *hwdep_cal = NULL; - struct mad_audio_cal *mad_cal; - const void *data; - const char *filename = TOMTOM_MAD_AUDIO_FIRMWARE_PATH; - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - size_t cal_size; - int idx; - - pr_debug("%s: enter\n", __func__); - - if (!tomtom->fw_data) { - dev_err(codec->dev, "%s: invalid cal data\n", - __func__); - return -ENODEV; - } - - hwdep_cal = wcdcal_get_fw_cal(tomtom->fw_data, WCD9XXX_MAD_CAL); - if (hwdep_cal) { - data = hwdep_cal->data; - cal_size = hwdep_cal->size; - dev_dbg(codec->dev, "%s: using hwdep calibration\n", - __func__); - } else { - ret = request_firmware(&fw, filename, codec->dev); - if (ret != 0) { - pr_err("Failed to acquire MAD firwmare data %s: %d\n", - filename, ret); - return -ENODEV; - } - if (!fw) { - dev_err(codec->dev, "failed to get mad fw"); - return -ENODEV; - } - data = fw->data; - cal_size = fw->size; - dev_dbg(codec->dev, "%s: using request_firmware calibration\n", - __func__); - } - if (cal_size < sizeof(struct mad_audio_cal)) { - pr_err("%s: incorrect hwdep cal size %zu\n", - __func__, cal_size); - ret = -ENOMEM; - goto err; - } - - mad_cal = (struct mad_audio_cal *)(data); - if (!mad_cal) { - dev_err(codec->dev, "%s: Invalid calibration data\n", - __func__); - ret = -EINVAL; - goto err; - } - - snd_soc_write(codec, TOMTOM_A_CDC_MAD_MAIN_CTL_2, - mad_cal->microphone_info.cycle_time); - snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_MAIN_CTL_1, 0xFF << 3, - ((uint16_t)mad_cal->microphone_info.settle_time) - << 3); - - /* Audio */ - snd_soc_write(codec, TOMTOM_A_CDC_MAD_AUDIO_CTL_8, - mad_cal->audio_info.rms_omit_samples); - snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_AUDIO_CTL_1, - 0x07 << 4, mad_cal->audio_info.rms_comp_time << 4); - snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_AUDIO_CTL_2, 0x03 << 2, - mad_cal->audio_info.detection_mechanism << 2); - snd_soc_write(codec, TOMTOM_A_CDC_MAD_AUDIO_CTL_7, - mad_cal->audio_info.rms_diff_threshold & 0x3F); - snd_soc_write(codec, TOMTOM_A_CDC_MAD_AUDIO_CTL_5, - mad_cal->audio_info.rms_threshold_lsb); - snd_soc_write(codec, TOMTOM_A_CDC_MAD_AUDIO_CTL_6, - mad_cal->audio_info.rms_threshold_msb); - - for (idx = 0; idx < ARRAY_SIZE(mad_cal->audio_info.iir_coefficients); - idx++) { - snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR, - 0x3F, idx); - snd_soc_write(codec, TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL, - mad_cal->audio_info.iir_coefficients[idx]); - dev_dbg(codec->dev, "%s:MAD Audio IIR Coef[%d] = 0X%x", - __func__, idx, - mad_cal->audio_info.iir_coefficients[idx]); - } - - /* Beacon */ - snd_soc_write(codec, TOMTOM_A_CDC_MAD_BEACON_CTL_8, - mad_cal->beacon_info.rms_omit_samples); - snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_BEACON_CTL_1, - 0x07 << 4, mad_cal->beacon_info.rms_comp_time); - snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_BEACON_CTL_2, 0x03 << 2, - mad_cal->beacon_info.detection_mechanism << 2); - snd_soc_write(codec, TOMTOM_A_CDC_MAD_BEACON_CTL_7, - mad_cal->beacon_info.rms_diff_threshold & 0x1F); - snd_soc_write(codec, TOMTOM_A_CDC_MAD_BEACON_CTL_5, - mad_cal->beacon_info.rms_threshold_lsb); - snd_soc_write(codec, TOMTOM_A_CDC_MAD_BEACON_CTL_6, - mad_cal->beacon_info.rms_threshold_msb); - - /* Ultrasound */ - snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_BEACON_CTL_1, - 0x07 << 4, mad_cal->beacon_info.rms_comp_time); - snd_soc_update_bits(codec, TOMTOM_A_CDC_MAD_ULTR_CTL_2, 0x03 << 2, - mad_cal->ultrasound_info.detection_mechanism); - snd_soc_write(codec, TOMTOM_A_CDC_MAD_ULTR_CTL_7, - mad_cal->ultrasound_info.rms_diff_threshold & 0x1F); - snd_soc_write(codec, TOMTOM_A_CDC_MAD_ULTR_CTL_5, - mad_cal->ultrasound_info.rms_threshold_lsb); - snd_soc_write(codec, TOMTOM_A_CDC_MAD_ULTR_CTL_6, - mad_cal->ultrasound_info.rms_threshold_msb); - - /* Set MAD intr time to 20 msec */ - snd_soc_update_bits(codec, 0x4E, 0x01F, 0x13); - - pr_debug("%s: leave ret %d\n", __func__, ret); -err: - if (!hwdep_cal) - release_firmware(fw); - return ret; -} - -static int tomtom_codec_enable_mad(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - int ret = 0; - u8 mad_micb, mad_cfilt; - u16 mad_cfilt_reg; - - mad_micb = snd_soc_read(codec, TOMTOM_A_MAD_ANA_CTRL) & 0x07; - switch (mad_micb) { - case 1: - mad_cfilt = tomtom->resmgr.pdata->micbias.bias1_cfilt_sel; - break; - case 2: - mad_cfilt = tomtom->resmgr.pdata->micbias.bias2_cfilt_sel; - break; - case 3: - mad_cfilt = tomtom->resmgr.pdata->micbias.bias3_cfilt_sel; - break; - case 4: - mad_cfilt = tomtom->resmgr.pdata->micbias.bias4_cfilt_sel; - break; - default: - dev_err(codec->dev, - "%s: Invalid micbias selection 0x%x\n", - __func__, mad_micb); - return -EINVAL; - } - - switch (mad_cfilt) { - case WCD9XXX_CFILT1_SEL: - mad_cfilt_reg = TOMTOM_A_MICB_CFILT_1_VAL; - break; - case WCD9XXX_CFILT2_SEL: - mad_cfilt_reg = TOMTOM_A_MICB_CFILT_2_VAL; - break; - case WCD9XXX_CFILT3_SEL: - mad_cfilt_reg = TOMTOM_A_MICB_CFILT_3_VAL; - break; - default: - dev_err(codec->dev, - "%s: invalid cfilt 0x%x for micb 0x%x\n", - __func__, mad_cfilt, mad_micb); - return -EINVAL; - } - - dev_dbg(codec->dev, - "%s event = %d, mad_cfilt_reg = 0x%x\n", - __func__, event, mad_cfilt_reg); - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - /* Undo reset for MAD */ - snd_soc_update_bits(codec, TOMTOM_A_SVASS_CLKRST_CTL, - 0x02, 0x00); - - ret = tomtom_codec_config_mad(codec); - if (ret) { - pr_err("%s: Failed to config MAD\n", __func__); - break; - } - - /* setup MAD micbias to VDDIO */ - snd_soc_update_bits(codec, mad_cfilt_reg, - 0x02, 0x02); - break; - case SND_SOC_DAPM_POST_PMD: - /* Reset the MAD block */ - snd_soc_update_bits(codec, TOMTOM_A_SVASS_CLKRST_CTL, - 0x02, 0x02); - - /* Undo setup of MAD micbias to VDDIO */ - snd_soc_update_bits(codec, mad_cfilt_reg, - 0x02, 0x00); - } - return ret; -} - -static int tomtom_codec_enable_micbias(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - u16 micb_int_reg = 0, micb_ctl_reg = 0; - u8 cfilt_sel_val = 0; - char *internal1_text = "Internal1"; - char *internal2_text = "Internal2"; - char *internal3_text = "Internal3"; - enum wcd9xxx_notify_event e_post_off, e_pre_on, e_post_on; - - pr_debug("%s: w->name %s event %d\n", __func__, w->name, event); - if (strnstr(w->name, "MIC BIAS1", sizeof("MIC BIAS1"))) { - micb_ctl_reg = TOMTOM_A_MICB_1_CTL; - micb_int_reg = TOMTOM_A_MICB_1_INT_RBIAS; - cfilt_sel_val = tomtom->resmgr.pdata->micbias.bias1_cfilt_sel; - e_pre_on = WCD9XXX_EVENT_PRE_MICBIAS_1_ON; - e_post_on = WCD9XXX_EVENT_POST_MICBIAS_1_ON; - e_post_off = WCD9XXX_EVENT_POST_MICBIAS_1_OFF; - } else if (strnstr(w->name, "MIC BIAS2", sizeof("MIC BIAS2"))) { - micb_ctl_reg = TOMTOM_A_MICB_2_CTL; - micb_int_reg = TOMTOM_A_MICB_2_INT_RBIAS; - cfilt_sel_val = tomtom->resmgr.pdata->micbias.bias2_cfilt_sel; - e_pre_on = WCD9XXX_EVENT_PRE_MICBIAS_2_ON; - e_post_on = WCD9XXX_EVENT_POST_MICBIAS_2_ON; - e_post_off = WCD9XXX_EVENT_POST_MICBIAS_2_OFF; - } else if (strnstr(w->name, "MIC BIAS3", sizeof("MIC BIAS3"))) { - micb_ctl_reg = TOMTOM_A_MICB_3_CTL; - micb_int_reg = TOMTOM_A_MICB_3_INT_RBIAS; - cfilt_sel_val = tomtom->resmgr.pdata->micbias.bias3_cfilt_sel; - e_pre_on = WCD9XXX_EVENT_PRE_MICBIAS_3_ON; - e_post_on = WCD9XXX_EVENT_POST_MICBIAS_3_ON; - e_post_off = WCD9XXX_EVENT_POST_MICBIAS_3_OFF; - } else if (strnstr(w->name, "MIC BIAS4", sizeof("MIC BIAS4"))) { - micb_ctl_reg = TOMTOM_A_MICB_4_CTL; - micb_int_reg = tomtom->resmgr.reg_addr->micb_4_int_rbias; - cfilt_sel_val = tomtom->resmgr.pdata->micbias.bias4_cfilt_sel; - e_pre_on = WCD9XXX_EVENT_PRE_MICBIAS_4_ON; - e_post_on = WCD9XXX_EVENT_POST_MICBIAS_4_ON; - e_post_off = WCD9XXX_EVENT_POST_MICBIAS_4_OFF; - } else { - pr_err("%s: Error, invalid micbias %s\n", __func__, w->name); - return -EINVAL; - } - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - /* Let MBHC module know so micbias switch to be off */ - wcd9xxx_resmgr_notifier_call(&tomtom->resmgr, e_pre_on); - - /* Get cfilt */ - wcd9xxx_resmgr_cfilt_get(&tomtom->resmgr, cfilt_sel_val); - - if (strnstr(w->name, internal1_text, 30)) - snd_soc_update_bits(codec, micb_int_reg, 0xE0, 0xE0); - else if (strnstr(w->name, internal2_text, 30)) - snd_soc_update_bits(codec, micb_int_reg, 0x1C, 0x1C); - else if (strnstr(w->name, internal3_text, 30)) - snd_soc_update_bits(codec, micb_int_reg, 0x3, 0x3); - else - /* - * If not internal, make sure to write the - * register to default value - */ - snd_soc_write(codec, micb_int_reg, 0x24); - if (tomtom->mbhc_started && micb_ctl_reg == - TOMTOM_A_MICB_2_CTL) { - if (++tomtom->micb_2_users == 1) { - if (tomtom->resmgr.pdata-> - micbias.bias2_is_headset_only) - wcd9xxx_resmgr_add_cond_update_bits( - &tomtom->resmgr, - WCD9XXX_COND_HPH_MIC, - micb_ctl_reg, w->shift, - false); - else - snd_soc_update_bits(codec, micb_ctl_reg, - 1 << w->shift, - 1 << w->shift); - } - pr_debug("%s: micb_2_users %d\n", __func__, - tomtom->micb_2_users); - } else { - snd_soc_update_bits(codec, micb_ctl_reg, 1 << w->shift, - 1 << w->shift); - } - break; - case SND_SOC_DAPM_POST_PMU: - usleep_range(5000, 5100); - /* Let MBHC module know so micbias is on */ - wcd9xxx_resmgr_notifier_call(&tomtom->resmgr, e_post_on); - break; - case SND_SOC_DAPM_POST_PMD: - if (tomtom->mbhc_started && micb_ctl_reg == - TOMTOM_A_MICB_2_CTL) { - if (--tomtom->micb_2_users == 0) { - if (tomtom->resmgr.pdata-> - micbias.bias2_is_headset_only) - wcd9xxx_resmgr_rm_cond_update_bits( - &tomtom->resmgr, - WCD9XXX_COND_HPH_MIC, - micb_ctl_reg, 7, false); - else - snd_soc_update_bits(codec, micb_ctl_reg, - 1 << w->shift, 0); - } - pr_debug("%s: micb_2_users %d\n", __func__, - tomtom->micb_2_users); - WARN(tomtom->micb_2_users < 0, - "Unexpected micbias users %d\n", - tomtom->micb_2_users); - } else { - snd_soc_update_bits(codec, micb_ctl_reg, 1 << w->shift, - 0); - } - - /* Let MBHC module know so micbias switch to be off */ - wcd9xxx_resmgr_notifier_call(&tomtom->resmgr, e_post_off); - - if (strnstr(w->name, internal1_text, 30)) - snd_soc_update_bits(codec, micb_int_reg, 0x80, 0x00); - else if (strnstr(w->name, internal2_text, 30)) - snd_soc_update_bits(codec, micb_int_reg, 0x10, 0x00); - else if (strnstr(w->name, internal3_text, 30)) - snd_soc_update_bits(codec, micb_int_reg, 0x2, 0x0); - - /* Put cfilt */ - wcd9xxx_resmgr_cfilt_put(&tomtom->resmgr, cfilt_sel_val); - break; - } - - return 0; -} - -/* called under codec_resource_lock acquisition */ -static int tomtom_enable_mbhc_micbias(struct snd_soc_codec *codec, bool enable, - enum wcd9xxx_micbias_num micb_num) -{ - int rc; - - if (micb_num != MBHC_MICBIAS2) { - dev_err(codec->dev, "%s: Unsupported micbias, micb_num=%d\n", - __func__, micb_num); - return -EINVAL; - } - - if (enable) - rc = snd_soc_dapm_force_enable_pin( - snd_soc_codec_get_dapm(codec), - DAPM_MICBIAS2_EXTERNAL_STANDALONE); - else - rc = snd_soc_dapm_disable_pin(snd_soc_codec_get_dapm(codec), - DAPM_MICBIAS2_EXTERNAL_STANDALONE); - if (!rc) - snd_soc_dapm_sync(snd_soc_codec_get_dapm(codec)); - pr_debug("%s: leave ret %d\n", __func__, rc); - return rc; -} - -static void txfe_clkdiv_update(struct snd_soc_codec *codec) -{ - struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec); - - if (test_bit(ADC1_TXFE, &priv->status_mask)) { - snd_soc_update_bits(codec, TOMTOM_A_TX_1_2_TXFE_CLKDIV, - 0x0F, 0x05); - clear_bit(ADC1_TXFE, &priv->status_mask); - } - if (test_bit(ADC2_TXFE, &priv->status_mask)) { - snd_soc_update_bits(codec, TOMTOM_A_TX_1_2_TXFE_CLKDIV, - 0xF0, 0x50); - clear_bit(ADC2_TXFE, &priv->status_mask); - } - if (test_bit(ADC3_TXFE, &priv->status_mask)) { - snd_soc_update_bits(codec, TOMTOM_A_TX_3_4_TXFE_CKDIV, - 0x0F, 0x05); - clear_bit(ADC3_TXFE, &priv->status_mask); - } - if (test_bit(ADC4_TXFE, &priv->status_mask)) { - snd_soc_update_bits(codec, TOMTOM_A_TX_3_4_TXFE_CKDIV, - 0xF0, 0x50); - clear_bit(ADC4_TXFE, &priv->status_mask); - } - if (test_bit(ADC5_TXFE, &priv->status_mask)) { - snd_soc_update_bits(codec, TOMTOM_A_TX_5_6_TXFE_CKDIV, - 0x0F, 0x05); - clear_bit(ADC5_TXFE, &priv->status_mask); - } - if (test_bit(ADC6_TXFE, &priv->status_mask)) { - snd_soc_update_bits(codec, TOMTOM_A_TX_5_6_TXFE_CKDIV, - 0xF0, 0x50); - clear_bit(ADC6_TXFE, &priv->status_mask); - } -} - -static void tx_hpf_corner_freq_callback(struct work_struct *work) -{ - struct delayed_work *hpf_delayed_work; - struct hpf_work *hpf_work; - struct tomtom_priv *tomtom; - struct snd_soc_codec *codec; - u16 tx_mux_ctl_reg; - u8 hpf_cut_of_freq; - - hpf_delayed_work = to_delayed_work(work); - hpf_work = container_of(hpf_delayed_work, struct hpf_work, dwork); - tomtom = hpf_work->tomtom; - codec = hpf_work->tomtom->codec; - hpf_cut_of_freq = hpf_work->tx_hpf_cut_of_freq; - - tx_mux_ctl_reg = TOMTOM_A_CDC_TX1_MUX_CTL + - (hpf_work->decimator - 1) * 8; - - pr_debug("%s(): decimator %u hpf_cut_of_freq 0x%x\n", __func__, - hpf_work->decimator, (unsigned int)hpf_cut_of_freq); - - /* - * Restore TXFE ClkDiv registers to default. - * If any of these registers are modified during analog - * front-end enablement, they will be restored back to the - * default - */ - txfe_clkdiv_update(codec); - - snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x30, hpf_cut_of_freq << 4); -} - -#define TX_MUX_CTL_CUT_OFF_FREQ_MASK 0x30 -#define CF_MIN_3DB_4HZ 0x0 -#define CF_MIN_3DB_75HZ 0x1 -#define CF_MIN_3DB_150HZ 0x2 - -static int tomtom_codec_enable_dec(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - unsigned int decimator; - char *dec_name = NULL; - char *widget_name = NULL; - char *temp; - int ret = 0; - u16 dec_reset_reg, tx_vol_ctl_reg, tx_mux_ctl_reg; - u8 dec_hpf_cut_of_freq; - int offset; - char *dec; - - pr_debug("%s %d\n", __func__, event); - - widget_name = kstrndup(w->name, 15, GFP_KERNEL); - if (!widget_name) - return -ENOMEM; - temp = widget_name; - - dec_name = strsep(&widget_name, " "); - widget_name = temp; - if (!dec_name) { - pr_err("%s: Invalid decimator = %s\n", __func__, w->name); - ret = -EINVAL; - goto out; - } - - dec = strpbrk(dec_name, "123456789"); - if (!dec) { - dev_err(codec->dev, "%s: decimator index not found\n", - __func__); - ret = -EINVAL; - goto out; - } - - ret = kstrtouint(dec, 10, &decimator); - if (ret < 0) { - pr_err("%s: Invalid decimator = %s\n", __func__, dec_name); - ret = -EINVAL; - goto out; - } - - pr_debug("%s(): widget = %s dec_name = %s decimator = %u\n", __func__, - w->name, dec_name, decimator); - - if (w->reg == TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL) { - dec_reset_reg = TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL; - offset = 0; - } else if (w->reg == TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL) { - dec_reset_reg = TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL; - offset = 8; - } else { - pr_err("%s: Error, incorrect dec\n", __func__); - return -EINVAL; - } - - tx_vol_ctl_reg = TOMTOM_A_CDC_TX1_VOL_CTL_CFG + 8 * (decimator - 1); - tx_mux_ctl_reg = TOMTOM_A_CDC_TX1_MUX_CTL + 8 * (decimator - 1); - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - - /* Enableable TX digital mute */ - snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x01); - - snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift, - 1 << w->shift); - snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift, 0x0); - - pr_debug("%s: decimator = %u, bypass = %d\n", __func__, - decimator, tx_hpf_work[decimator - 1].tx_hpf_bypass); - if (tx_hpf_work[decimator - 1].tx_hpf_bypass != true) { - dec_hpf_cut_of_freq = snd_soc_read(codec, - tx_mux_ctl_reg); - - dec_hpf_cut_of_freq = (dec_hpf_cut_of_freq & 0x30) >> 4; - - tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq = - dec_hpf_cut_of_freq; - - if (dec_hpf_cut_of_freq != CF_MIN_3DB_150HZ) { - - /* set cut of freq to CF_MIN_3DB_150HZ (0x1); */ - snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x30, - CF_MIN_3DB_150HZ << 4); - } - - /* enable HPF */ - snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x00); - } else - /* bypass HPF */ - snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x08); - - break; - - case SND_SOC_DAPM_POST_PMU: - - /* Disable TX digital mute */ - snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x00); - - if ((tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq != - CF_MIN_3DB_150HZ) && - (tx_hpf_work[decimator - 1].tx_hpf_bypass != true)) { - - schedule_delayed_work(&tx_hpf_work[decimator - 1].dwork, - msecs_to_jiffies(300)); - } - /* apply the digital gain after the decimator is enabled*/ - if ((w->shift + offset) < ARRAY_SIZE(tx_digital_gain_reg)) - snd_soc_write(codec, - tx_digital_gain_reg[w->shift + offset], - snd_soc_read(codec, - tx_digital_gain_reg[w->shift + offset]) - ); - - break; - - case SND_SOC_DAPM_PRE_PMD: - - snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x01); - cancel_delayed_work_sync(&tx_hpf_work[decimator - 1].dwork); - break; - - case SND_SOC_DAPM_POST_PMD: - - snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x08); - snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x30, - (tx_hpf_work[decimator - 1].tx_hpf_cut_of_freq) << 4); - - break; - } -out: - kfree(widget_name); - return ret; -} - -static int tomtom_codec_enable_vdd_spkr(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - int ret = 0; - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec); - - pr_debug("%s: %d %s\n", __func__, event, w->name); - - WARN_ONCE(!priv->spkdrv_reg, "SPKDRV supply %s isn't defined\n", - WCD9XXX_VDD_SPKDRV_NAME); - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - if (priv->spkdrv_reg) { - ret = regulator_enable(priv->spkdrv_reg); - if (ret) - pr_err("%s: Failed to enable spkdrv_reg %s\n", - __func__, WCD9XXX_VDD_SPKDRV_NAME); - } - break; - case SND_SOC_DAPM_POST_PMD: - if (priv->spkdrv_reg) { - ret = regulator_disable(priv->spkdrv_reg); - if (ret) - pr_err("%s: Failed to disable spkdrv_reg %s\n", - __func__, WCD9XXX_VDD_SPKDRV_NAME); - } - break; - } - - return ret; -} - -static int tomtom_codec_enable_vdd_spkr2(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - int ret = 0; - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec); - - pr_debug("%s: %d %s\n", __func__, event, w->name); - - /* - * If on-demand voltage regulators of spkr1 and spkr2 has been derived - * from same power rail then same on-demand voltage regulator can be - * used by both spkr1 and spkr2, if a separate device tree entry has - * not been defined for on-demand voltage regulator for spkr2. - */ - if (!priv->spkdrv2_reg) { - if (priv->spkdrv_reg) { - priv->spkdrv2_reg = priv->spkdrv_reg; - } else { - WARN_ONCE(!priv->spkdrv2_reg, - "SPKDRV2 supply %s isn't defined\n", - WCD9XXX_VDD_SPKDRV2_NAME); - return 0; - } - } - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - if (priv->spkdrv2_reg) { - ret = regulator_enable(priv->spkdrv2_reg); - if (ret) - pr_err("%s: Failed to enable spkdrv2_reg %s ret:%d\n", - __func__, WCD9XXX_VDD_SPKDRV2_NAME, ret); - } - break; - case SND_SOC_DAPM_POST_PMD: - if (priv->spkdrv2_reg) { - ret = regulator_disable(priv->spkdrv2_reg); - if (ret) - pr_err("%s: Failed to disable spkdrv2_reg %s ret:%d\n", - __func__, WCD9XXX_VDD_SPKDRV2_NAME, ret); - } - break; - } - - return ret; -} - -static int tomtom_codec_enable_interpolator(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - - pr_debug("%s %d %s\n", __func__, event, w->name); - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_RX_RESET_CTL, - 1 << w->shift, 1 << w->shift); - snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_RX_RESET_CTL, - 1 << w->shift, 0x0); - break; - case SND_SOC_DAPM_POST_PMU: - /* apply the digital gain after the interpolator is enabled*/ - if ((w->shift) < ARRAY_SIZE(rx_digital_gain_reg)) - snd_soc_write(codec, - rx_digital_gain_reg[w->shift], - snd_soc_read(codec, - rx_digital_gain_reg[w->shift]) - ); - /* Check for Rx1 and Rx2 paths for uhqa mode update */ - if (w->shift == 0 || w->shift == 1) - tomtom_update_uhqa_mode(codec, (1 << w->shift)); - - break; - } - return 0; -} - -/* called under codec_resource_lock acquisition */ -static int __tomtom_codec_enable_ldo_h(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec); - - pr_debug("%s: enter\n", __func__); - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - /* - * ldo_h_users is protected by tomtom->codec_mutex, don't need - * additional mutex - */ - if (++priv->ldo_h_users == 1) { - WCD9XXX_BG_CLK_LOCK(&priv->resmgr); - wcd9xxx_resmgr_get_bandgap(&priv->resmgr, - WCD9XXX_BANDGAP_AUDIO_MODE); - WCD9XXX_BG_CLK_UNLOCK(&priv->resmgr); - tomtom_codec_internal_rco_ctrl(codec, true); - snd_soc_update_bits(codec, TOMTOM_A_LDO_H_MODE_1, - 1 << 7, 1 << 7); - tomtom_codec_internal_rco_ctrl(codec, false); - pr_debug("%s: ldo_h_users %d\n", __func__, - priv->ldo_h_users); - /* LDO enable requires 1ms to settle down */ - usleep_range(1000, 1100); - } - break; - case SND_SOC_DAPM_POST_PMD: - if (--priv->ldo_h_users == 0) { - tomtom_codec_internal_rco_ctrl(codec, true); - snd_soc_update_bits(codec, TOMTOM_A_LDO_H_MODE_1, - 1 << 7, 0); - tomtom_codec_internal_rco_ctrl(codec, false); - WCD9XXX_BG_CLK_LOCK(&priv->resmgr); - wcd9xxx_resmgr_put_bandgap(&priv->resmgr, - WCD9XXX_BANDGAP_AUDIO_MODE); - WCD9XXX_BG_CLK_UNLOCK(&priv->resmgr); - pr_debug("%s: ldo_h_users %d\n", __func__, - priv->ldo_h_users); - } - WARN(priv->ldo_h_users < 0, "Unexpected ldo_h users %d\n", - priv->ldo_h_users); - break; - } - pr_debug("%s: leave\n", __func__); - return 0; -} - -static int tomtom_codec_enable_ldo_h(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - int rc; - - rc = __tomtom_codec_enable_ldo_h(w, kcontrol, event); - return rc; -} - -static int tomtom_codec_enable_rx_bias(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - - pr_debug("%s %d\n", __func__, event); - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - wcd9xxx_resmgr_enable_rx_bias(&tomtom->resmgr, 1); - break; - case SND_SOC_DAPM_POST_PMD: - wcd9xxx_resmgr_enable_rx_bias(&tomtom->resmgr, 0); - break; - } - return 0; -} - -static int tomtom_codec_enable_anc(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - const char *filename; - const struct firmware *fw; - int i; - int ret = 0; - int num_anc_slots; - struct wcd9xxx_anc_header *anc_head; - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - struct firmware_cal *hwdep_cal = NULL; - u32 anc_writes_size = 0; - u32 anc_cal_size = 0; - int anc_size_remaining; - u32 *anc_ptr; - u16 reg; - u8 mask, val, old_val; - size_t cal_size; - const void *data; - - if (tomtom->anc_func == 0) - return 0; - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - filename = "wcd9320/wcd9320_anc.bin"; - - hwdep_cal = wcdcal_get_fw_cal(tomtom->fw_data, WCD9XXX_ANC_CAL); - if (hwdep_cal) { - data = hwdep_cal->data; - cal_size = hwdep_cal->size; - dev_dbg(codec->dev, "%s: using hwdep calibration\n", - __func__); - } else { - ret = request_firmware(&fw, filename, codec->dev); - if (ret != 0) { - dev_err(codec->dev, "Failed to acquire ANC data: %d\n", - ret); - return -ENODEV; - } - if (!fw) { - dev_err(codec->dev, "failed to get anc fw"); - return -ENODEV; - } - data = fw->data; - cal_size = fw->size; - dev_dbg(codec->dev, "%s: using request_firmware calibration\n", - __func__); - } - if (cal_size < sizeof(struct wcd9xxx_anc_header)) { - dev_err(codec->dev, "Not enough data\n"); - ret = -ENOMEM; - goto err; - } - /* First number is the number of register writes */ - anc_head = (struct wcd9xxx_anc_header *)(data); - anc_ptr = (u32 *)(data + - sizeof(struct wcd9xxx_anc_header)); - anc_size_remaining = cal_size - - sizeof(struct wcd9xxx_anc_header); - num_anc_slots = anc_head->num_anc_slots; - - if (tomtom->anc_slot >= num_anc_slots) { - dev_err(codec->dev, "Invalid ANC slot selected\n"); - ret = -EINVAL; - goto err; - } - for (i = 0; i < num_anc_slots; i++) { - if (anc_size_remaining < TOMTOM_PACKED_REG_SIZE) { - dev_err(codec->dev, "Invalid register format\n"); - ret = -EINVAL; - goto err; - } - anc_writes_size = (u32)(*anc_ptr); - anc_size_remaining -= sizeof(u32); - anc_ptr += 1; - - if (anc_writes_size * TOMTOM_PACKED_REG_SIZE - > anc_size_remaining) { - dev_err(codec->dev, "Invalid register format\n"); - ret = -EINVAL; - goto err; - } - - if (tomtom->anc_slot == i) - break; - - anc_size_remaining -= (anc_writes_size * - TOMTOM_PACKED_REG_SIZE); - anc_ptr += anc_writes_size; - } - if (i == num_anc_slots) { - dev_err(codec->dev, "Selected ANC slot not present\n"); - ret = -EINVAL; - goto err; - } - - i = 0; - anc_cal_size = anc_writes_size; - if (w->reg == TOMTOM_A_RX_HPH_L_DAC_CTL) { - snd_soc_update_bits(codec, - TOMTOM_A_CDC_CLK_ANC_RESET_CTL, 0x03, 0x03); - anc_writes_size = (anc_cal_size/2); - } - - if (w->reg == TOMTOM_A_RX_HPH_R_DAC_CTL) { - snd_soc_update_bits(codec, - TOMTOM_A_CDC_CLK_ANC_RESET_CTL, 0x0C, 0x0C); - i = (anc_cal_size/2); - anc_writes_size = anc_cal_size; - } - - for (; i < anc_writes_size; i++) { - TOMTOM_CODEC_UNPACK_ENTRY(anc_ptr[i], reg, - mask, val); - /* - * ANC Soft reset register is ignored from ACDB - * because ANC left soft reset bits will be called - * while enabling ANC HPH Right DAC. - */ - if ((reg == TOMTOM_A_CDC_CLK_ANC_RESET_CTL) && - ((w->reg == TOMTOM_A_RX_HPH_L_DAC_CTL) || - (w->reg == TOMTOM_A_RX_HPH_R_DAC_CTL))) { - continue; - } - old_val = snd_soc_read(codec, reg); - snd_soc_write(codec, reg, (old_val & ~mask) | - (val & mask)); - } - if (w->reg == TOMTOM_A_RX_HPH_L_DAC_CTL) - snd_soc_update_bits(codec, - TOMTOM_A_CDC_CLK_ANC_RESET_CTL, 0x03, 0x00); - - if (w->reg == TOMTOM_A_RX_HPH_R_DAC_CTL) - snd_soc_update_bits(codec, - TOMTOM_A_CDC_CLK_ANC_RESET_CTL, 0x0C, 0x00); - if (!hwdep_cal) - release_firmware(fw); - txfe_clkdiv_update(codec); - break; - case SND_SOC_DAPM_PRE_PMD: - msleep(40); - snd_soc_update_bits(codec, TOMTOM_A_CDC_ANC1_B1_CTL, 0x01, - 0x00); - snd_soc_update_bits(codec, TOMTOM_A_CDC_ANC2_B1_CTL, 0x02, - 0x00); - msleep(20); - snd_soc_write(codec, TOMTOM_A_CDC_CLK_ANC_RESET_CTL, 0x0F); - snd_soc_write(codec, TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL, 0); - snd_soc_write(codec, TOMTOM_A_CDC_CLK_ANC_RESET_CTL, 0x00); - break; - } - return 0; -err: - if (!hwdep_cal) - release_firmware(fw); - return ret; -} - -static int tomtom_hphl_dac_event(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec); - uint32_t impedl, impedr; - int ret = 0; - - pr_debug("%s %s %d\n", __func__, w->name, event); - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - if (tomtom_p->anc_func) { - tomtom_codec_enable_anc(w, kcontrol, event); - msleep(50); - } - - if (!high_perf_mode && !tomtom_p->uhqa_mode) { - wcd9xxx_clsh_fsm(codec, &tomtom_p->clsh_d, - WCD9XXX_CLSH_STATE_HPHL, - WCD9XXX_CLSH_REQ_ENABLE, - WCD9XXX_CLSH_EVENT_PRE_DAC); - } else { - wcd9xxx_enable_high_perf_mode(codec, &tomtom_p->clsh_d, - tomtom_p->uhqa_mode, - WCD9XXX_CLSAB_STATE_HPHL, - WCD9XXX_CLSAB_REQ_ENABLE); - } - ret = wcd9xxx_mbhc_get_impedance(&tomtom_p->mbhc, - &impedl, &impedr); - if (!ret) - wcd9xxx_clsh_imped_config(codec, impedl); - else - dev_dbg(codec->dev, "%s: Failed to get mbhc impedance %d\n", - __func__, ret); - break; - case SND_SOC_DAPM_POST_PMU: - snd_soc_update_bits(codec, TOMTOM_A_CDC_RX1_B3_CTL, 0xBC, 0x94); - snd_soc_update_bits(codec, TOMTOM_A_CDC_RX1_B4_CTL, 0x30, 0x10); - break; - case SND_SOC_DAPM_PRE_PMD: - snd_soc_update_bits(codec, TOMTOM_A_CDC_RX1_B3_CTL, 0xBC, 0x00); - snd_soc_update_bits(codec, TOMTOM_A_CDC_RX1_B4_CTL, 0x30, 0x00); - break; - case SND_SOC_DAPM_POST_PMD: - if (!high_perf_mode && !tomtom_p->uhqa_mode) { - wcd9xxx_clsh_fsm(codec, &tomtom_p->clsh_d, - WCD9XXX_CLSH_STATE_HPHL, - WCD9XXX_CLSH_REQ_DISABLE, - WCD9XXX_CLSH_EVENT_POST_PA); - } else { - wcd9xxx_enable_high_perf_mode(codec, &tomtom_p->clsh_d, - tomtom_p->uhqa_mode, - WCD9XXX_CLSAB_STATE_HPHL, - WCD9XXX_CLSAB_REQ_DISABLE); - } - break; - } - return 0; -} - -static int tomtom_hphr_dac_event(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec); - - pr_debug("%s %s %d\n", __func__, w->name, event); - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - if (tomtom_p->anc_func) { - tomtom_codec_enable_anc(w, kcontrol, event); - msleep(50); - } - - snd_soc_update_bits(codec, w->reg, 0x40, 0x40); - if (!high_perf_mode && !tomtom_p->uhqa_mode) { - wcd9xxx_clsh_fsm(codec, &tomtom_p->clsh_d, - WCD9XXX_CLSH_STATE_HPHR, - WCD9XXX_CLSH_REQ_ENABLE, - WCD9XXX_CLSH_EVENT_PRE_DAC); - } else { - wcd9xxx_enable_high_perf_mode(codec, &tomtom_p->clsh_d, - tomtom_p->uhqa_mode, - WCD9XXX_CLSAB_STATE_HPHR, - WCD9XXX_CLSAB_REQ_ENABLE); - } - break; - case SND_SOC_DAPM_POST_PMU: - snd_soc_update_bits(codec, TOMTOM_A_CDC_RX2_B3_CTL, 0xBC, 0x94); - snd_soc_update_bits(codec, TOMTOM_A_CDC_RX2_B4_CTL, 0x30, 0x10); - break; - case SND_SOC_DAPM_PRE_PMD: - snd_soc_update_bits(codec, TOMTOM_A_CDC_RX2_B3_CTL, 0xBC, 0x00); - snd_soc_update_bits(codec, TOMTOM_A_CDC_RX2_B4_CTL, 0x30, 0x00); - break; - case SND_SOC_DAPM_POST_PMD: - snd_soc_update_bits(codec, w->reg, 0x40, 0x00); - if (!high_perf_mode && !tomtom_p->uhqa_mode) { - wcd9xxx_clsh_fsm(codec, &tomtom_p->clsh_d, - WCD9XXX_CLSH_STATE_HPHR, - WCD9XXX_CLSH_REQ_DISABLE, - WCD9XXX_CLSH_EVENT_POST_PA); - } else { - wcd9xxx_enable_high_perf_mode(codec, &tomtom_p->clsh_d, - tomtom_p->uhqa_mode, - WCD9XXX_CLSAB_STATE_HPHR, - WCD9XXX_CLSAB_REQ_DISABLE); - } - break; - } - return 0; -} - -static int tomtom_hph_pa_event(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - enum wcd9xxx_notify_event e_pre_on, e_post_off; - u8 req_clsh_state; - u32 pa_settle_time = TOMTOM_HPH_PA_SETTLE_COMP_OFF; - - pr_debug("%s: %s event = %d\n", __func__, w->name, event); - if (w->shift == 5) { - e_pre_on = WCD9XXX_EVENT_PRE_HPHL_PA_ON; - e_post_off = WCD9XXX_EVENT_POST_HPHL_PA_OFF; - req_clsh_state = WCD9XXX_CLSH_STATE_HPHL; - } else if (w->shift == 4) { - e_pre_on = WCD9XXX_EVENT_PRE_HPHR_PA_ON; - e_post_off = WCD9XXX_EVENT_POST_HPHR_PA_OFF; - req_clsh_state = WCD9XXX_CLSH_STATE_HPHR; - } else { - pr_err("%s: Invalid w->shift %d\n", __func__, w->shift); - return -EINVAL; - } - - if (tomtom->comp_enabled[COMPANDER_1]) - pa_settle_time = TOMTOM_HPH_PA_SETTLE_COMP_ON; - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - set_bit(HPH_DELAY, &tomtom->status_mask); - /* Let MBHC module know PA is turning on */ - wcd9xxx_resmgr_notifier_call(&tomtom->resmgr, e_pre_on); - break; - - case SND_SOC_DAPM_POST_PMU: - if (test_bit(HPH_DELAY, &tomtom->status_mask)) { - /* - * Make sure to wait 10ms after enabling HPHR_HPHL - * in register 0x1AB - */ - usleep_range(pa_settle_time, pa_settle_time + 1000); - clear_bit(HPH_DELAY, &tomtom->status_mask); - pr_debug("%s: sleep %d us after %s PA enable\n", - __func__, pa_settle_time, w->name); - } - if (!high_perf_mode && !tomtom->uhqa_mode) { - wcd9xxx_clsh_fsm(codec, &tomtom->clsh_d, - req_clsh_state, - WCD9XXX_CLSH_REQ_ENABLE, - WCD9XXX_CLSH_EVENT_POST_PA); - } - break; - - case SND_SOC_DAPM_PRE_PMD: - set_bit(HPH_DELAY, &tomtom->status_mask); - break; - - case SND_SOC_DAPM_POST_PMD: - /* Let MBHC module know PA turned off */ - wcd9xxx_resmgr_notifier_call(&tomtom->resmgr, e_post_off); - if (test_bit(HPH_DELAY, &tomtom->status_mask)) { - /* - * Make sure to wait 10ms after disabling HPHR_HPHL - * in register 0x1AB - */ - usleep_range(pa_settle_time, pa_settle_time + 1000); - clear_bit(HPH_DELAY, &tomtom->status_mask); - pr_debug("%s: sleep %d us after %s PA disable\n", - __func__, pa_settle_time, w->name); - } - - break; - } - return 0; -} - -static int tomtom_codec_enable_anc_hph(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - int ret = 0; - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - ret = tomtom_hph_pa_event(w, kcontrol, event); - break; - case SND_SOC_DAPM_POST_PMU: - if ((snd_soc_read(codec, TOMTOM_A_RX_HPH_L_DAC_CTL) & 0x80) && - (snd_soc_read(codec, TOMTOM_A_RX_HPH_R_DAC_CTL) - & 0x80)) { - snd_soc_update_bits(codec, - TOMTOM_A_RX_HPH_CNP_EN, 0x30, 0x30); - msleep(30); - } - ret = tomtom_hph_pa_event(w, kcontrol, event); - break; - case SND_SOC_DAPM_PRE_PMD: - if (w->shift == 5) { - snd_soc_update_bits(codec, - TOMTOM_A_RX_HPH_CNP_EN, 0x30, 0x00); - msleep(40); - snd_soc_update_bits(codec, - TOMTOM_A_TX_7_MBHC_EN, 0x80, 00); - ret |= tomtom_codec_enable_anc(w, kcontrol, event); - } - break; - case SND_SOC_DAPM_POST_PMD: - ret = tomtom_hph_pa_event(w, kcontrol, event); - break; - } - return ret; -} - -static const struct snd_soc_dapm_widget tomtom_dapm_i2s_widgets[] = { - SND_SOC_DAPM_SUPPLY("RX_I2S_CLK", TOMTOM_A_CDC_CLK_RX_I2S_CTL, - 4, 0, NULL, 0), - SND_SOC_DAPM_SUPPLY("TX_I2S_CLK", TOMTOM_A_CDC_CLK_TX_I2S_CTL, 4, - 0, NULL, 0), -}; - -static int tomtom_lineout_dac_event(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - - pr_debug("%s %s %d\n", __func__, w->name, event); - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - wcd9xxx_clsh_fsm(codec, &tomtom->clsh_d, - WCD9XXX_CLSH_STATE_LO, - WCD9XXX_CLSH_REQ_ENABLE, - WCD9XXX_CLSH_EVENT_PRE_DAC); - snd_soc_update_bits(codec, w->reg, 0x40, 0x40); - break; - - case SND_SOC_DAPM_POST_PMD: - snd_soc_update_bits(codec, w->reg, 0x40, 0x00); - wcd9xxx_clsh_fsm(codec, &tomtom->clsh_d, - WCD9XXX_CLSH_STATE_LO, - WCD9XXX_CLSH_REQ_DISABLE, - WCD9XXX_CLSH_EVENT_POST_PA); - break; - } - return 0; -} - -static int tomtom_spk_dac_event(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - - pr_debug("%s %s %d\n", __func__, w->name, event); - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL, - 0x80, 0x80); - break; - case SND_SOC_DAPM_POST_PMD: - if ((snd_soc_read(codec, w->reg) & 0x03) == 0) - snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL, - 0x80, 0x00); - break; - } - return 0; -} - -static const struct snd_soc_dapm_route audio_i2s_map[] = { - {"SLIM RX1", NULL, "RX_I2S_CLK"}, - {"SLIM RX2", NULL, "RX_I2S_CLK"}, - {"SLIM RX3", NULL, "RX_I2S_CLK"}, - {"SLIM RX4", NULL, "RX_I2S_CLK"}, - - {"SLIM TX7 MUX", NULL, "TX_I2S_CLK"}, - {"SLIM TX8 MUX", NULL, "TX_I2S_CLK"}, - {"SLIM TX9 MUX", NULL, "TX_I2S_CLK"}, - {"SLIM TX10 MUX", NULL, "TX_I2S_CLK"}, - - {"RX_I2S_CLK", NULL, "CDC_I2S_RX_CONN"}, -}; - -static const struct snd_soc_dapm_route audio_map[] = { - /* SLIMBUS Connections */ - {"AIF1 CAP", NULL, "AIF1_CAP Mixer"}, - {"AIF2 CAP", NULL, "AIF2_CAP Mixer"}, - {"AIF3 CAP", NULL, "AIF3_CAP Mixer"}, - - /* VI Feedback */ - {"AIF4 VI", NULL, "VIONOFF"}, - {"VIONOFF", "Switch", "VIINPUT"}, - - /* MAD */ - {"MAD_SEL MUX", "SPE", "MAD_CPE_INPUT"}, - {"MAD_SEL MUX", "MSM", "MADINPUT"}, - {"MADONOFF", "Switch", "MAD_SEL MUX"}, - {"AIF4 MAD", NULL, "MADONOFF"}, - - /* SLIM_MIXER("AIF1_CAP Mixer"),*/ - {"AIF1_CAP Mixer", "SLIM TX1", "SLIM TX1 MUX"}, - {"AIF1_CAP Mixer", "SLIM TX2", "SLIM TX2 MUX"}, - {"AIF1_CAP Mixer", "SLIM TX3", "SLIM TX3 MUX"}, - {"AIF1_CAP Mixer", "SLIM TX4", "SLIM TX4 MUX"}, - {"AIF1_CAP Mixer", "SLIM TX5", "SLIM TX5 MUX"}, - {"AIF1_CAP Mixer", "SLIM TX6", "SLIM TX6 MUX"}, - {"AIF1_CAP Mixer", "SLIM TX7", "SLIM TX7 MUX"}, - {"AIF1_CAP Mixer", "SLIM TX8", "SLIM TX8 MUX"}, - {"AIF1_CAP Mixer", "SLIM TX9", "SLIM TX9 MUX"}, - {"AIF1_CAP Mixer", "SLIM TX10", "SLIM TX10 MUX"}, - /* SLIM_MIXER("AIF2_CAP Mixer"),*/ - {"AIF2_CAP Mixer", "SLIM TX1", "SLIM TX1 MUX"}, - {"AIF2_CAP Mixer", "SLIM TX2", "SLIM TX2 MUX"}, - {"AIF2_CAP Mixer", "SLIM TX3", "SLIM TX3 MUX"}, - {"AIF2_CAP Mixer", "SLIM TX4", "SLIM TX4 MUX"}, - {"AIF2_CAP Mixer", "SLIM TX5", "SLIM TX5 MUX"}, - {"AIF2_CAP Mixer", "SLIM TX6", "SLIM TX6 MUX"}, - {"AIF2_CAP Mixer", "SLIM TX7", "SLIM TX7 MUX"}, - {"AIF2_CAP Mixer", "SLIM TX8", "SLIM TX8 MUX"}, - {"AIF2_CAP Mixer", "SLIM TX9", "SLIM TX9 MUX"}, - {"AIF2_CAP Mixer", "SLIM TX10", "SLIM TX10 MUX"}, - /* SLIM_MIXER("AIF3_CAP Mixer"),*/ - {"AIF3_CAP Mixer", "SLIM TX1", "SLIM TX1 MUX"}, - {"AIF3_CAP Mixer", "SLIM TX2", "SLIM TX2 MUX"}, - {"AIF3_CAP Mixer", "SLIM TX3", "SLIM TX3 MUX"}, - {"AIF3_CAP Mixer", "SLIM TX4", "SLIM TX4 MUX"}, - {"AIF3_CAP Mixer", "SLIM TX5", "SLIM TX5 MUX"}, - {"AIF3_CAP Mixer", "SLIM TX6", "SLIM TX6 MUX"}, - {"AIF3_CAP Mixer", "SLIM TX7", "SLIM TX7 MUX"}, - {"AIF3_CAP Mixer", "SLIM TX8", "SLIM TX8 MUX"}, - {"AIF3_CAP Mixer", "SLIM TX9", "SLIM TX9 MUX"}, - {"AIF3_CAP Mixer", "SLIM TX10", "SLIM TX10 MUX"}, - - {"SLIM TX1 MUX", "DEC1", "DEC1 MUX"}, - {"SLIM TX1 MUX", "RMIX1", "RX1 MIX1"}, - {"SLIM TX1 MUX", "RMIX2", "RX2 MIX1"}, - {"SLIM TX1 MUX", "RMIX3", "RX3 MIX1"}, - {"SLIM TX1 MUX", "RMIX4", "RX4 MIX1"}, - {"SLIM TX1 MUX", "RMIX5", "RX5 MIX1"}, - {"SLIM TX1 MUX", "RMIX6", "RX6 MIX1"}, - {"SLIM TX1 MUX", "RMIX7", "RX7 MIX1"}, - {"SLIM TX1 MUX", "RMIX8", "RX8 MIX1"}, - - {"SLIM TX2 MUX", "DEC2", "DEC2 MUX"}, - {"SLIM TX2 MUX", "RMIX1", "RX1 MIX1"}, - {"SLIM TX2 MUX", "RMIX2", "RX2 MIX1"}, - {"SLIM TX2 MUX", "RMIX3", "RX3 MIX1"}, - {"SLIM TX2 MUX", "RMIX4", "RX4 MIX1"}, - {"SLIM TX2 MUX", "RMIX5", "RX5 MIX1"}, - {"SLIM TX2 MUX", "RMIX6", "RX6 MIX1"}, - {"SLIM TX2 MUX", "RMIX7", "RX7 MIX1"}, - {"SLIM TX2 MUX", "RMIX8", "RX8 MIX1"}, - - {"SLIM TX3 MUX", "DEC3", "DEC3 MUX"}, - {"SLIM TX3 MUX", "RMIX1", "RX1 MIX1"}, - {"SLIM TX3 MUX", "RMIX2", "RX2 MIX1"}, - {"SLIM TX3 MUX", "RMIX3", "RX3 MIX1"}, - {"SLIM TX3 MUX", "RMIX4", "RX4 MIX1"}, - {"SLIM TX3 MUX", "RMIX5", "RX5 MIX1"}, - {"SLIM TX3 MUX", "RMIX6", "RX6 MIX1"}, - {"SLIM TX3 MUX", "RMIX7", "RX7 MIX1"}, - {"SLIM TX3 MUX", "RMIX8", "RX8 MIX1"}, - - {"SLIM TX4 MUX", "DEC4", "DEC4 MUX"}, - {"SLIM TX4 MUX", "RMIX1", "RX1 MIX1"}, - {"SLIM TX4 MUX", "RMIX2", "RX2 MIX1"}, - {"SLIM TX4 MUX", "RMIX3", "RX3 MIX1"}, - {"SLIM TX4 MUX", "RMIX4", "RX4 MIX1"}, - {"SLIM TX4 MUX", "RMIX5", "RX5 MIX1"}, - {"SLIM TX4 MUX", "RMIX6", "RX6 MIX1"}, - {"SLIM TX4 MUX", "RMIX7", "RX7 MIX1"}, - {"SLIM TX4 MUX", "RMIX8", "RX8 MIX1"}, - - {"SLIM TX5 MUX", "DEC5", "DEC5 MUX"}, - {"SLIM TX5 MUX", "RMIX1", "RX1 MIX1"}, - {"SLIM TX5 MUX", "RMIX2", "RX2 MIX1"}, - {"SLIM TX5 MUX", "RMIX3", "RX3 MIX1"}, - {"SLIM TX5 MUX", "RMIX4", "RX4 MIX1"}, - {"SLIM TX5 MUX", "RMIX5", "RX5 MIX1"}, - {"SLIM TX5 MUX", "RMIX6", "RX6 MIX1"}, - {"SLIM TX5 MUX", "RMIX7", "RX7 MIX1"}, - {"SLIM TX5 MUX", "RMIX8", "RX8 MIX1"}, - - {"SLIM TX6 MUX", "DEC6", "DEC6 MUX"}, - - {"SLIM TX7 MUX", "DEC1", "DEC1 MUX"}, - {"SLIM TX7 MUX", "DEC2", "DEC2 MUX"}, - {"SLIM TX7 MUX", "DEC3", "DEC3 MUX"}, - {"SLIM TX7 MUX", "DEC4", "DEC4 MUX"}, - {"SLIM TX7 MUX", "DEC5", "DEC5 MUX"}, - {"SLIM TX7 MUX", "DEC6", "DEC6 MUX"}, - {"SLIM TX7 MUX", "DEC7", "DEC7 MUX"}, - {"SLIM TX7 MUX", "DEC8", "DEC8 MUX"}, - {"SLIM TX7 MUX", "DEC9", "DEC9 MUX"}, - {"SLIM TX7 MUX", "DEC10", "DEC10 MUX"}, - {"SLIM TX7 MUX", "RMIX1", "RX1 MIX1"}, - {"SLIM TX7 MUX", "RMIX2", "RX2 MIX1"}, - {"SLIM TX7 MUX", "RMIX3", "RX3 MIX1"}, - {"SLIM TX7 MUX", "RMIX4", "RX4 MIX1"}, - {"SLIM TX7 MUX", "RMIX5", "RX5 MIX1"}, - {"SLIM TX7 MUX", "RMIX6", "RX6 MIX1"}, - {"SLIM TX7 MUX", "RMIX7", "RX7 MIX1"}, - - {"SLIM TX8 MUX", "DEC1", "DEC1 MUX"}, - {"SLIM TX8 MUX", "DEC2", "DEC2 MUX"}, - {"SLIM TX8 MUX", "DEC3", "DEC3 MUX"}, - {"SLIM TX8 MUX", "DEC4", "DEC4 MUX"}, - {"SLIM TX8 MUX", "DEC5", "DEC5 MUX"}, - {"SLIM TX8 MUX", "DEC6", "DEC6 MUX"}, - {"SLIM TX8 MUX", "DEC7", "DEC7 MUX"}, - {"SLIM TX8 MUX", "DEC8", "DEC8 MUX"}, - {"SLIM TX8 MUX", "DEC9", "DEC9 MUX"}, - {"SLIM TX8 MUX", "DEC10", "DEC10 MUX"}, - - {"SLIM TX9 MUX", "DEC1", "DEC1 MUX"}, - {"SLIM TX9 MUX", "DEC2", "DEC2 MUX"}, - {"SLIM TX9 MUX", "DEC3", "DEC3 MUX"}, - {"SLIM TX9 MUX", "DEC4", "DEC4 MUX"}, - {"SLIM TX9 MUX", "DEC5", "DEC5 MUX"}, - {"SLIM TX9 MUX", "DEC6", "DEC6 MUX"}, - {"SLIM TX9 MUX", "DEC7", "DEC7 MUX"}, - {"SLIM TX9 MUX", "DEC8", "DEC8 MUX"}, - {"SLIM TX9 MUX", "DEC9", "DEC9 MUX"}, - {"SLIM TX9 MUX", "DEC10", "DEC10 MUX"}, - - {"SLIM TX10 MUX", "DEC1", "DEC1 MUX"}, - {"SLIM TX10 MUX", "DEC2", "DEC2 MUX"}, - {"SLIM TX10 MUX", "DEC3", "DEC3 MUX"}, - {"SLIM TX10 MUX", "DEC4", "DEC4 MUX"}, - {"SLIM TX10 MUX", "DEC5", "DEC5 MUX"}, - {"SLIM TX10 MUX", "DEC6", "DEC6 MUX"}, - {"SLIM TX10 MUX", "DEC7", "DEC7 MUX"}, - {"SLIM TX10 MUX", "DEC8", "DEC8 MUX"}, - {"SLIM TX10 MUX", "DEC9", "DEC9 MUX"}, - {"SLIM TX10 MUX", "DEC10", "DEC10 MUX"}, - - /* Earpiece (RX MIX1) */ - {"EAR", NULL, "EAR PA"}, - {"EAR PA", NULL, "EAR_PA_MIXER"}, - {"EAR_PA_MIXER", NULL, "DAC1"}, - {"DAC1", NULL, "RX_BIAS"}, - - {"ANC EAR", NULL, "ANC EAR PA"}, - {"ANC EAR PA", NULL, "EAR_PA_MIXER"}, - {"ANC1 FB MUX", "EAR_HPH_L", "RX1 MIX2"}, - {"ANC1 FB MUX", "EAR_LINE_1", "RX2 MIX2"}, - - /* Headset (RX MIX1 and RX MIX2) */ - {"HEADPHONE", NULL, "HPHL"}, - {"HEADPHONE", NULL, "HPHR"}, - - {"HPHL", NULL, "HPHL_PA_MIXER"}, - {"HPHL_PA_MIXER", NULL, "HPHL DAC"}, - {"HPHL DAC", NULL, "RX_BIAS"}, - - {"HPHR", NULL, "HPHR_PA_MIXER"}, - {"HPHR_PA_MIXER", NULL, "HPHR DAC"}, - {"HPHR DAC", NULL, "RX_BIAS"}, - - {"ANC HEADPHONE", NULL, "ANC HPHL"}, - {"ANC HEADPHONE", NULL, "ANC HPHR"}, - - {"ANC HPHL", NULL, "HPHL_PA_MIXER"}, - {"ANC HPHR", NULL, "HPHR_PA_MIXER"}, - - {"ANC1 MUX", "ADC1", "ADC1"}, - {"ANC1 MUX", "ADC2", "ADC2"}, - {"ANC1 MUX", "ADC3", "ADC3"}, - {"ANC1 MUX", "ADC4", "ADC4"}, - {"ANC1 MUX", "ADC5", "ADC5"}, - {"ANC1 MUX", "ADC6", "ADC6"}, - {"ANC1 MUX", "DMIC1", "DMIC1"}, - {"ANC1 MUX", "DMIC2", "DMIC2"}, - {"ANC1 MUX", "DMIC3", "DMIC3"}, - {"ANC1 MUX", "DMIC4", "DMIC4"}, - {"ANC1 MUX", "DMIC5", "DMIC5"}, - {"ANC1 MUX", "DMIC6", "DMIC6"}, - {"ANC2 MUX", "ADC1", "ADC1"}, - {"ANC2 MUX", "ADC2", "ADC2"}, - {"ANC2 MUX", "ADC3", "ADC3"}, - {"ANC2 MUX", "ADC4", "ADC4"}, - {"ANC2 MUX", "ADC5", "ADC5"}, - {"ANC2 MUX", "ADC6", "ADC6"}, - {"ANC2 MUX", "DMIC1", "DMIC1"}, - {"ANC2 MUX", "DMIC2", "DMIC2"}, - {"ANC2 MUX", "DMIC3", "DMIC3"}, - {"ANC2 MUX", "DMIC4", "DMIC4"}, - {"ANC2 MUX", "DMIC5", "DMIC5"}, - {"ANC2 MUX", "DMIC6", "DMIC6"}, - - {"ANC HPHR", NULL, "CDC_CONN"}, - - {"DAC1", "Switch", "CLASS_H_DSM MUX"}, - {"HPHL DAC", "Switch", "CLASS_H_DSM MUX"}, - {"HPHR DAC", NULL, "RX2 CHAIN"}, - - {"LINEOUT1", NULL, "LINEOUT1 PA"}, - {"LINEOUT2", NULL, "LINEOUT2 PA"}, - {"LINEOUT3", NULL, "LINEOUT3 PA"}, - {"LINEOUT4", NULL, "LINEOUT4 PA"}, - {"SPK_OUT", NULL, "SPK PA"}, - {"SPK_OUT", NULL, "SPK2 PA"}, - - {"LINEOUT1 PA", NULL, "LINEOUT1_PA_MIXER"}, - {"LINEOUT1_PA_MIXER", NULL, "LINEOUT1 DAC"}, - - {"LINEOUT2 PA", NULL, "LINEOUT2_PA_MIXER"}, - {"LINEOUT2_PA_MIXER", NULL, "LINEOUT2 DAC"}, - - {"LINEOUT3 PA", NULL, "LINEOUT3_PA_MIXER"}, - {"LINEOUT3_PA_MIXER", NULL, "LINEOUT3 DAC"}, - - {"LINEOUT4 PA", NULL, "LINEOUT4_PA_MIXER"}, - {"LINEOUT4_PA_MIXER", NULL, "LINEOUT4 DAC"}, - - {"LINEOUT1 DAC", NULL, "RX3 MIX1"}, - - {"RDAC5 MUX", "DEM3_INV", "RX3 MIX1"}, - {"RDAC5 MUX", "DEM4", "RX4 MIX1"}, - - {"LINEOUT3 DAC", NULL, "RDAC5 MUX"}, - - {"LINEOUT2 DAC", NULL, "RX5 MIX1"}, - - {"RDAC7 MUX", "DEM5_INV", "RX5 MIX1"}, - {"RDAC7 MUX", "DEM6", "RX6 MIX1"}, - - {"LINEOUT4 DAC", NULL, "RDAC7 MUX"}, - - {"SPK PA", NULL, "SPK DAC"}, - {"SPK DAC", NULL, "RX7 MIX2"}, - {"SPK DAC", NULL, "VDD_SPKDRV"}, - - {"SPK2 PA", NULL, "SPK2 DAC"}, - {"SPK2 DAC", NULL, "RX8 MIX1"}, - {"SPK2 DAC", NULL, "VDD_SPKDRV2"}, - - {"CLASS_H_DSM MUX", "DSM_HPHL_RX1", "RX1 CHAIN"}, - - {"RX1 INTERP", NULL, "RX1 MIX2"}, - {"RX1 CHAIN", NULL, "RX1 INTERP"}, - {"RX2 INTERP", NULL, "RX2 MIX2"}, - {"RX2 CHAIN", NULL, "RX2 INTERP"}, - {"RX1 MIX2", NULL, "ANC1 MUX"}, - {"RX2 MIX2", NULL, "ANC2 MUX"}, - - {"LINEOUT1 DAC", NULL, "RX_BIAS"}, - {"LINEOUT2 DAC", NULL, "RX_BIAS"}, - {"LINEOUT3 DAC", NULL, "RX_BIAS"}, - {"LINEOUT4 DAC", NULL, "RX_BIAS"}, - {"SPK DAC", NULL, "RX_BIAS"}, - {"SPK2 DAC", NULL, "RX_BIAS"}, - - {"RX7 MIX1", NULL, "COMP0_CLK"}, - {"RX8 MIX1", NULL, "COMP0_CLK"}, - {"RX1 MIX1", NULL, "COMP1_CLK"}, - {"RX2 MIX1", NULL, "COMP1_CLK"}, - {"RX3 MIX1", NULL, "COMP2_CLK"}, - {"RX5 MIX1", NULL, "COMP2_CLK"}, - - {"RX1 MIX1", NULL, "RX1 MIX1 INP1"}, - {"RX1 MIX1", NULL, "RX1 MIX1 INP2"}, - {"RX1 MIX1", NULL, "RX1 MIX1 INP3"}, - {"RX2 MIX1", NULL, "RX2 MIX1 INP1"}, - {"RX2 MIX1", NULL, "RX2 MIX1 INP2"}, - {"RX3 MIX1", NULL, "RX3 MIX1 INP1"}, - {"RX3 MIX1", NULL, "RX3 MIX1 INP2"}, - {"RX4 MIX1", NULL, "RX4 MIX1 INP1"}, - {"RX4 MIX1", NULL, "RX4 MIX1 INP2"}, - {"RX5 MIX1", NULL, "RX5 MIX1 INP1"}, - {"RX5 MIX1", NULL, "RX5 MIX1 INP2"}, - {"RX6 MIX1", NULL, "RX6 MIX1 INP1"}, - {"RX6 MIX1", NULL, "RX6 MIX1 INP2"}, - {"RX7 MIX1", NULL, "RX7 MIX1 INP1"}, - {"RX7 MIX1", NULL, "RX7 MIX1 INP2"}, - {"RX8 MIX1", NULL, "RX8 MIX1 INP1"}, - {"RX8 MIX1", NULL, "RX8 MIX1 INP2"}, - {"RX1 MIX2", NULL, "RX1 MIX1"}, - {"RX1 MIX2", NULL, "RX1 MIX2 INP1"}, - {"RX1 MIX2", NULL, "RX1 MIX2 INP2"}, - {"RX2 MIX2", NULL, "RX2 MIX1"}, - {"RX2 MIX2", NULL, "RX2 MIX2 INP1"}, - {"RX2 MIX2", NULL, "RX2 MIX2 INP2"}, - {"RX7 MIX2", NULL, "RX7 MIX1"}, - {"RX7 MIX2", NULL, "RX7 MIX2 INP1"}, - {"RX7 MIX2", NULL, "RX7 MIX2 INP2"}, - - /* SLIM_MUX("AIF1_PB", "AIF1 PB"),*/ - {"SLIM RX1 MUX", "AIF1_PB", "AIF1 PB"}, - {"SLIM RX2 MUX", "AIF1_PB", "AIF1 PB"}, - {"SLIM RX3 MUX", "AIF1_PB", "AIF1 PB"}, - {"SLIM RX4 MUX", "AIF1_PB", "AIF1 PB"}, - {"SLIM RX5 MUX", "AIF1_PB", "AIF1 PB"}, - {"SLIM RX6 MUX", "AIF1_PB", "AIF1 PB"}, - {"SLIM RX7 MUX", "AIF1_PB", "AIF1 PB"}, - {"SLIM RX8 MUX", "AIF1_PB", "AIF1 PB"}, - /* SLIM_MUX("AIF2_PB", "AIF2 PB"),*/ - {"SLIM RX1 MUX", "AIF2_PB", "AIF2 PB"}, - {"SLIM RX2 MUX", "AIF2_PB", "AIF2 PB"}, - {"SLIM RX3 MUX", "AIF2_PB", "AIF2 PB"}, - {"SLIM RX4 MUX", "AIF2_PB", "AIF2 PB"}, - {"SLIM RX5 MUX", "AIF2_PB", "AIF2 PB"}, - {"SLIM RX6 MUX", "AIF2_PB", "AIF2 PB"}, - {"SLIM RX7 MUX", "AIF2_PB", "AIF2 PB"}, - {"SLIM RX8 MUX", "AIF2_PB", "AIF2 PB"}, - /* SLIM_MUX("AIF3_PB", "AIF3 PB"),*/ - {"SLIM RX1 MUX", "AIF3_PB", "AIF3 PB"}, - {"SLIM RX2 MUX", "AIF3_PB", "AIF3 PB"}, - {"SLIM RX3 MUX", "AIF3_PB", "AIF3 PB"}, - {"SLIM RX4 MUX", "AIF3_PB", "AIF3 PB"}, - {"SLIM RX5 MUX", "AIF3_PB", "AIF3 PB"}, - {"SLIM RX6 MUX", "AIF3_PB", "AIF3 PB"}, - {"SLIM RX7 MUX", "AIF3_PB", "AIF3 PB"}, - {"SLIM RX8 MUX", "AIF3_PB", "AIF3 PB"}, - - {"SLIM RX1", NULL, "SLIM RX1 MUX"}, - {"SLIM RX2", NULL, "SLIM RX2 MUX"}, - {"SLIM RX3", NULL, "SLIM RX3 MUX"}, - {"SLIM RX4", NULL, "SLIM RX4 MUX"}, - {"SLIM RX5", NULL, "SLIM RX5 MUX"}, - {"SLIM RX6", NULL, "SLIM RX6 MUX"}, - {"SLIM RX7", NULL, "SLIM RX7 MUX"}, - {"SLIM RX8", NULL, "SLIM RX8 MUX"}, - - {"RX1 MIX1 INP1", "RX1", "SLIM RX1"}, - {"RX1 MIX1 INP1", "RX2", "SLIM RX2"}, - {"RX1 MIX1 INP1", "RX3", "SLIM RX3"}, - {"RX1 MIX1 INP1", "RX4", "SLIM RX4"}, - {"RX1 MIX1 INP1", "RX5", "SLIM RX5"}, - {"RX1 MIX1 INP1", "RX6", "SLIM RX6"}, - {"RX1 MIX1 INP1", "RX7", "SLIM RX7"}, - {"RX1 MIX1 INP1", "IIR1", "IIR1"}, - {"RX1 MIX1 INP1", "IIR2", "IIR2"}, - {"RX1 MIX1 INP2", "RX1", "SLIM RX1"}, - {"RX1 MIX1 INP2", "RX2", "SLIM RX2"}, - {"RX1 MIX1 INP2", "RX3", "SLIM RX3"}, - {"RX1 MIX1 INP2", "RX4", "SLIM RX4"}, - {"RX1 MIX1 INP2", "RX5", "SLIM RX5"}, - {"RX1 MIX1 INP2", "RX6", "SLIM RX6"}, - {"RX1 MIX1 INP2", "RX7", "SLIM RX7"}, - {"RX1 MIX1 INP2", "IIR1", "IIR1"}, - {"RX1 MIX1 INP2", "IIR2", "IIR2"}, - {"RX1 MIX1 INP3", "RX1", "SLIM RX1"}, - {"RX1 MIX1 INP3", "RX2", "SLIM RX2"}, - {"RX1 MIX1 INP3", "RX3", "SLIM RX3"}, - {"RX1 MIX1 INP3", "RX4", "SLIM RX4"}, - {"RX1 MIX1 INP3", "RX5", "SLIM RX5"}, - {"RX1 MIX1 INP3", "RX6", "SLIM RX6"}, - {"RX1 MIX1 INP3", "RX7", "SLIM RX7"}, - {"RX2 MIX1 INP1", "RX1", "SLIM RX1"}, - {"RX2 MIX1 INP1", "RX2", "SLIM RX2"}, - {"RX2 MIX1 INP1", "RX3", "SLIM RX3"}, - {"RX2 MIX1 INP1", "RX4", "SLIM RX4"}, - {"RX2 MIX1 INP1", "RX5", "SLIM RX5"}, - {"RX2 MIX1 INP1", "RX6", "SLIM RX6"}, - {"RX2 MIX1 INP1", "RX7", "SLIM RX7"}, - {"RX2 MIX1 INP1", "IIR1", "IIR1"}, - {"RX2 MIX1 INP1", "IIR2", "IIR2"}, - {"RX2 MIX1 INP2", "RX1", "SLIM RX1"}, - {"RX2 MIX1 INP2", "RX2", "SLIM RX2"}, - {"RX2 MIX1 INP2", "RX3", "SLIM RX3"}, - {"RX2 MIX1 INP2", "RX4", "SLIM RX4"}, - {"RX2 MIX1 INP2", "RX5", "SLIM RX5"}, - {"RX2 MIX1 INP2", "RX6", "SLIM RX6"}, - {"RX2 MIX1 INP2", "RX7", "SLIM RX7"}, - {"RX2 MIX1 INP2", "IIR1", "IIR1"}, - {"RX2 MIX1 INP2", "IIR2", "IIR2"}, - {"RX3 MIX1 INP1", "RX1", "SLIM RX1"}, - {"RX3 MIX1 INP1", "RX2", "SLIM RX2"}, - {"RX3 MIX1 INP1", "RX3", "SLIM RX3"}, - {"RX3 MIX1 INP1", "RX4", "SLIM RX4"}, - {"RX3 MIX1 INP1", "RX5", "SLIM RX5"}, - {"RX3 MIX1 INP1", "RX6", "SLIM RX6"}, - {"RX3 MIX1 INP1", "RX7", "SLIM RX7"}, - {"RX3 MIX1 INP1", "IIR1", "IIR1"}, - {"RX3 MIX1 INP1", "IIR2", "IIR2"}, - {"RX3 MIX1 INP2", "RX1", "SLIM RX1"}, - {"RX3 MIX1 INP2", "RX2", "SLIM RX2"}, - {"RX3 MIX1 INP2", "RX3", "SLIM RX3"}, - {"RX3 MIX1 INP2", "RX4", "SLIM RX4"}, - {"RX3 MIX1 INP2", "RX5", "SLIM RX5"}, - {"RX3 MIX1 INP2", "RX6", "SLIM RX6"}, - {"RX3 MIX1 INP2", "RX7", "SLIM RX7"}, - {"RX3 MIX1 INP2", "IIR1", "IIR1"}, - {"RX3 MIX1 INP2", "IIR2", "IIR2"}, - {"RX4 MIX1 INP1", "RX1", "SLIM RX1"}, - {"RX4 MIX1 INP1", "RX2", "SLIM RX2"}, - {"RX4 MIX1 INP1", "RX3", "SLIM RX3"}, - {"RX4 MIX1 INP1", "RX4", "SLIM RX4"}, - {"RX4 MIX1 INP1", "RX5", "SLIM RX5"}, - {"RX4 MIX1 INP1", "RX6", "SLIM RX6"}, - {"RX4 MIX1 INP1", "RX7", "SLIM RX7"}, - {"RX4 MIX1 INP1", "IIR1", "IIR1"}, - {"RX4 MIX1 INP1", "IIR2", "IIR2"}, - {"RX4 MIX1 INP2", "RX1", "SLIM RX1"}, - {"RX4 MIX1 INP2", "RX2", "SLIM RX2"}, - {"RX4 MIX1 INP2", "RX3", "SLIM RX3"}, - {"RX4 MIX1 INP2", "RX5", "SLIM RX5"}, - {"RX4 MIX1 INP2", "RX4", "SLIM RX4"}, - {"RX4 MIX1 INP2", "RX6", "SLIM RX6"}, - {"RX4 MIX1 INP2", "RX7", "SLIM RX7"}, - {"RX4 MIX1 INP2", "IIR1", "IIR1"}, - {"RX4 MIX1 INP2", "IIR2", "IIR2"}, - {"RX5 MIX1 INP1", "RX1", "SLIM RX1"}, - {"RX5 MIX1 INP1", "RX2", "SLIM RX2"}, - {"RX5 MIX1 INP1", "RX3", "SLIM RX3"}, - {"RX5 MIX1 INP1", "RX4", "SLIM RX4"}, - {"RX5 MIX1 INP1", "RX5", "SLIM RX5"}, - {"RX5 MIX1 INP1", "RX6", "SLIM RX6"}, - {"RX5 MIX1 INP1", "RX7", "SLIM RX7"}, - {"RX5 MIX1 INP1", "IIR1", "IIR1"}, - {"RX5 MIX1 INP1", "IIR2", "IIR2"}, - {"RX5 MIX1 INP2", "RX1", "SLIM RX1"}, - {"RX5 MIX1 INP2", "RX2", "SLIM RX2"}, - {"RX5 MIX1 INP2", "RX3", "SLIM RX3"}, - {"RX5 MIX1 INP2", "RX4", "SLIM RX4"}, - {"RX5 MIX1 INP2", "RX5", "SLIM RX5"}, - {"RX5 MIX1 INP2", "RX6", "SLIM RX6"}, - {"RX5 MIX1 INP2", "RX7", "SLIM RX7"}, - {"RX5 MIX1 INP2", "IIR1", "IIR1"}, - {"RX5 MIX1 INP2", "IIR2", "IIR2"}, - {"RX6 MIX1 INP1", "RX1", "SLIM RX1"}, - {"RX6 MIX1 INP1", "RX2", "SLIM RX2"}, - {"RX6 MIX1 INP1", "RX3", "SLIM RX3"}, - {"RX6 MIX1 INP1", "RX4", "SLIM RX4"}, - {"RX6 MIX1 INP1", "RX5", "SLIM RX5"}, - {"RX6 MIX1 INP1", "RX6", "SLIM RX6"}, - {"RX6 MIX1 INP1", "RX7", "SLIM RX7"}, - {"RX6 MIX1 INP1", "IIR1", "IIR1"}, - {"RX6 MIX1 INP1", "IIR2", "IIR2"}, - {"RX6 MIX1 INP2", "RX1", "SLIM RX1"}, - {"RX6 MIX1 INP2", "RX2", "SLIM RX2"}, - {"RX6 MIX1 INP2", "RX3", "SLIM RX3"}, - {"RX6 MIX1 INP2", "RX4", "SLIM RX4"}, - {"RX6 MIX1 INP2", "RX5", "SLIM RX5"}, - {"RX6 MIX1 INP2", "RX6", "SLIM RX6"}, - {"RX6 MIX1 INP2", "RX7", "SLIM RX7"}, - {"RX6 MIX1 INP2", "IIR1", "IIR1"}, - {"RX6 MIX1 INP2", "IIR2", "IIR2"}, - {"RX7 MIX1 INP1", "RX1", "SLIM RX1"}, - {"RX7 MIX1 INP1", "RX2", "SLIM RX2"}, - {"RX7 MIX1 INP1", "RX3", "SLIM RX3"}, - {"RX7 MIX1 INP1", "RX4", "SLIM RX4"}, - {"RX7 MIX1 INP1", "RX5", "SLIM RX5"}, - {"RX7 MIX1 INP1", "RX6", "SLIM RX6"}, - {"RX7 MIX1 INP1", "RX7", "SLIM RX7"}, - {"RX7 MIX1 INP1", "IIR1", "IIR1"}, - {"RX7 MIX1 INP1", "IIR2", "IIR2"}, - {"RX7 MIX1 INP2", "RX1", "SLIM RX1"}, - {"RX7 MIX1 INP2", "RX2", "SLIM RX2"}, - {"RX7 MIX1 INP2", "RX3", "SLIM RX3"}, - {"RX7 MIX1 INP2", "RX4", "SLIM RX4"}, - {"RX7 MIX1 INP2", "RX5", "SLIM RX5"}, - {"RX7 MIX1 INP2", "RX6", "SLIM RX6"}, - {"RX7 MIX1 INP2", "RX7", "SLIM RX7"}, - {"RX7 MIX1 INP2", "IIR1", "IIR1"}, - {"RX7 MIX1 INP2", "IIR2", "IIR2"}, - {"RX8 MIX1 INP1", "RX1", "SLIM RX1"}, - {"RX8 MIX1 INP1", "RX2", "SLIM RX2"}, - {"RX8 MIX1 INP1", "RX3", "SLIM RX3"}, - {"RX8 MIX1 INP1", "RX4", "SLIM RX4"}, - {"RX8 MIX1 INP1", "RX5", "SLIM RX5"}, - {"RX8 MIX1 INP1", "RX6", "SLIM RX6"}, - {"RX8 MIX1 INP1", "RX7", "SLIM RX7"}, - {"RX8 MIX1 INP1", "RX8", "SLIM RX8"}, - {"RX8 MIX1 INP1", "IIR1", "IIR1"}, - {"RX8 MIX1 INP1", "IIR2", "IIR2"}, - {"RX8 MIX1 INP2", "RX1", "SLIM RX1"}, - {"RX8 MIX1 INP2", "RX2", "SLIM RX2"}, - {"RX8 MIX1 INP2", "RX3", "SLIM RX3"}, - {"RX8 MIX1 INP2", "RX4", "SLIM RX4"}, - {"RX8 MIX1 INP2", "RX5", "SLIM RX5"}, - {"RX8 MIX1 INP2", "RX6", "SLIM RX6"}, - {"RX8 MIX1 INP2", "RX7", "SLIM RX7"}, - {"RX8 MIX1 INP2", "RX8", "SLIM RX8"}, - {"RX8 MIX1 INP2", "IIR1", "IIR1"}, - {"RX8 MIX1 INP2", "IIR2", "IIR2"}, - - /* IIR1, IIR2 inputs to Second RX Mixer on RX1, RX2 and RX7 chains. */ - {"RX1 MIX2 INP1", "IIR1", "IIR1"}, - {"RX1 MIX2 INP2", "IIR1", "IIR1"}, - {"RX2 MIX2 INP1", "IIR1", "IIR1"}, - {"RX2 MIX2 INP2", "IIR1", "IIR1"}, - {"RX7 MIX2 INP1", "IIR1", "IIR1"}, - {"RX7 MIX2 INP2", "IIR1", "IIR1"}, - {"RX1 MIX2 INP1", "IIR2", "IIR2"}, - {"RX1 MIX2 INP2", "IIR2", "IIR2"}, - {"RX2 MIX2 INP1", "IIR2", "IIR2"}, - {"RX2 MIX2 INP2", "IIR2", "IIR2"}, - {"RX7 MIX2 INP1", "IIR2", "IIR2"}, - {"RX7 MIX2 INP2", "IIR2", "IIR2"}, - - /* Decimator Inputs */ - {"DEC1 MUX", "DMIC1", "DMIC1"}, - {"DEC1 MUX", "ADC6", "ADC6"}, - {"DEC1 MUX", NULL, "CDC_CONN"}, - {"DEC2 MUX", "DMIC2", "DMIC2"}, - {"DEC2 MUX", "ADC5", "ADC5"}, - {"DEC2 MUX", NULL, "CDC_CONN"}, - {"DEC3 MUX", "DMIC3", "DMIC3"}, - {"DEC3 MUX", "ADC4", "ADC4"}, - {"DEC3 MUX", NULL, "CDC_CONN"}, - {"DEC4 MUX", "DMIC4", "DMIC4"}, - {"DEC4 MUX", "ADC3", "ADC3"}, - {"DEC4 MUX", NULL, "CDC_CONN"}, - {"DEC5 MUX", "DMIC5", "DMIC5"}, - {"DEC5 MUX", "ADC2", "ADC2"}, - {"DEC5 MUX", NULL, "CDC_CONN"}, - {"DEC6 MUX", "DMIC6", "DMIC6"}, - {"DEC6 MUX", "ADC1", "ADC1"}, - {"DEC6 MUX", NULL, "CDC_CONN"}, - {"DEC7 MUX", "DMIC1", "DMIC1"}, - {"DEC7 MUX", "DMIC6", "DMIC6"}, - {"DEC7 MUX", "ADC1", "ADC1"}, - {"DEC7 MUX", "ADC6", "ADC6"}, - {"DEC7 MUX", "ANC1_FB", "ANC1 MUX"}, - {"DEC7 MUX", "ANC2_FB", "ANC2 MUX"}, - {"DEC7 MUX", NULL, "CDC_CONN"}, - {"DEC8 MUX", "DMIC2", "DMIC2"}, - {"DEC8 MUX", "DMIC5", "DMIC5"}, - {"DEC8 MUX", "ADC2", "ADC2"}, - {"DEC8 MUX", "ADC5", "ADC5"}, - {"DEC8 MUX", "ANC1_FB", "ANC1 MUX"}, - {"DEC8 MUX", "ANC2_FB", "ANC2 MUX"}, - {"DEC8 MUX", NULL, "CDC_CONN"}, - {"DEC9 MUX", "DMIC4", "DMIC4"}, - {"DEC9 MUX", "DMIC5", "DMIC5"}, - {"DEC9 MUX", "ADC2", "ADC2"}, - {"DEC9 MUX", "ADC3", "ADC3"}, - {"DEC9 MUX", "ANC1_FB", "ANC1 MUX"}, - {"DEC9 MUX", "ANC2_FB", "ANC2 MUX"}, - {"DEC9 MUX", NULL, "CDC_CONN"}, - {"DEC10 MUX", "DMIC3", "DMIC3"}, - {"DEC10 MUX", "DMIC6", "DMIC6"}, - {"DEC10 MUX", "ADC1", "ADC1"}, - {"DEC10 MUX", "ADC4", "ADC4"}, - {"DEC10 MUX", "ANC1_FB", "ANC1 MUX"}, - {"DEC10 MUX", "ANC2_FB", "ANC2 MUX"}, - {"DEC10 MUX", NULL, "CDC_CONN"}, - - /* ADC Connections */ - {"ADC1", NULL, "AMIC1"}, - {"ADC2", NULL, "AMIC2"}, - {"ADC3", NULL, "AMIC3"}, - {"ADC4", NULL, "AMIC4"}, - {"ADC5", NULL, "AMIC5"}, - {"ADC6", NULL, "AMIC6"}, - - /* AUX PGA Connections */ - {"EAR_PA_MIXER", "AUX_PGA_L Switch", "AUX_PGA_Left"}, - {"HPHL_PA_MIXER", "AUX_PGA_L Switch", "AUX_PGA_Left"}, - {"HPHR_PA_MIXER", "AUX_PGA_R Switch", "AUX_PGA_Right"}, - {"LINEOUT1_PA_MIXER", "AUX_PGA_L Switch", "AUX_PGA_Left"}, - {"LINEOUT2_PA_MIXER", "AUX_PGA_R Switch", "AUX_PGA_Right"}, - {"LINEOUT3_PA_MIXER", "AUX_PGA_L Switch", "AUX_PGA_Left"}, - {"LINEOUT4_PA_MIXER", "AUX_PGA_R Switch", "AUX_PGA_Right"}, - {"AUX_PGA_Left", NULL, "AMIC5"}, - {"AUX_PGA_Right", NULL, "AMIC6"}, - - {"IIR1", NULL, "IIR1 INP1 MUX"}, - {"IIR1 INP1 MUX", "DEC1", "DEC1 MUX"}, - {"IIR1 INP1 MUX", "DEC2", "DEC2 MUX"}, - {"IIR1 INP1 MUX", "DEC3", "DEC3 MUX"}, - {"IIR1 INP1 MUX", "DEC4", "DEC4 MUX"}, - {"IIR1 INP1 MUX", "DEC5", "DEC5 MUX"}, - {"IIR1 INP1 MUX", "DEC6", "DEC6 MUX"}, - {"IIR1 INP1 MUX", "DEC7", "DEC7 MUX"}, - {"IIR1 INP1 MUX", "DEC8", "DEC8 MUX"}, - {"IIR1 INP1 MUX", "DEC9", "DEC9 MUX"}, - {"IIR1 INP1 MUX", "DEC10", "DEC10 MUX"}, - {"IIR1 INP1 MUX", "RX1", "SLIM RX1"}, - {"IIR1 INP1 MUX", "RX2", "SLIM RX2"}, - {"IIR1 INP1 MUX", "RX3", "SLIM RX3"}, - {"IIR1 INP1 MUX", "RX4", "SLIM RX4"}, - {"IIR1 INP1 MUX", "RX5", "SLIM RX5"}, - {"IIR1 INP1 MUX", "RX6", "SLIM RX6"}, - {"IIR1 INP1 MUX", "RX7", "SLIM RX7"}, - - {"IIR2", NULL, "IIR2 INP1 MUX"}, - {"IIR2 INP1 MUX", "DEC1", "DEC1 MUX"}, - {"IIR2 INP1 MUX", "DEC2", "DEC2 MUX"}, - {"IIR2 INP1 MUX", "DEC3", "DEC3 MUX"}, - {"IIR2 INP1 MUX", "DEC4", "DEC4 MUX"}, - {"IIR2 INP1 MUX", "DEC5", "DEC5 MUX"}, - {"IIR2 INP1 MUX", "DEC6", "DEC6 MUX"}, - {"IIR2 INP1 MUX", "DEC7", "DEC7 MUX"}, - {"IIR2 INP1 MUX", "DEC8", "DEC8 MUX"}, - {"IIR2 INP1 MUX", "DEC9", "DEC9 MUX"}, - {"IIR2 INP1 MUX", "DEC10", "DEC10 MUX"}, - {"IIR2 INP1 MUX", "RX1", "SLIM RX1"}, - {"IIR2 INP1 MUX", "RX2", "SLIM RX2"}, - {"IIR2 INP1 MUX", "RX3", "SLIM RX3"}, - {"IIR2 INP1 MUX", "RX4", "SLIM RX4"}, - {"IIR2 INP1 MUX", "RX5", "SLIM RX5"}, - {"IIR2 INP1 MUX", "RX6", "SLIM RX6"}, - {"IIR2 INP1 MUX", "RX7", "SLIM RX7"}, - - {"IIR1", NULL, "IIR1 INP2 MUX"}, - {"IIR1 INP2 MUX", "DEC1", "DEC1 MUX"}, - {"IIR1 INP2 MUX", "DEC2", "DEC2 MUX"}, - {"IIR1 INP2 MUX", "DEC3", "DEC3 MUX"}, - {"IIR1 INP2 MUX", "DEC4", "DEC4 MUX"}, - {"IIR1 INP2 MUX", "DEC5", "DEC5 MUX"}, - {"IIR1 INP2 MUX", "DEC6", "DEC6 MUX"}, - {"IIR1 INP2 MUX", "DEC7", "DEC7 MUX"}, - {"IIR1 INP2 MUX", "DEC8", "DEC8 MUX"}, - {"IIR1 INP2 MUX", "DEC9", "DEC9 MUX"}, - {"IIR1 INP2 MUX", "DEC10", "DEC10 MUX"}, - {"IIR1 INP2 MUX", "RX1", "SLIM RX1"}, - {"IIR1 INP2 MUX", "RX2", "SLIM RX2"}, - {"IIR1 INP2 MUX", "RX3", "SLIM RX3"}, - {"IIR1 INP2 MUX", "RX4", "SLIM RX4"}, - {"IIR1 INP2 MUX", "RX5", "SLIM RX5"}, - {"IIR1 INP2 MUX", "RX6", "SLIM RX6"}, - {"IIR1 INP2 MUX", "RX7", "SLIM RX7"}, - - {"IIR2", NULL, "IIR2 INP2 MUX"}, - {"IIR2 INP2 MUX", "DEC1", "DEC1 MUX"}, - {"IIR2 INP2 MUX", "DEC2", "DEC2 MUX"}, - {"IIR2 INP2 MUX", "DEC3", "DEC3 MUX"}, - {"IIR2 INP2 MUX", "DEC4", "DEC4 MUX"}, - {"IIR2 INP2 MUX", "DEC5", "DEC5 MUX"}, - {"IIR2 INP2 MUX", "DEC6", "DEC6 MUX"}, - {"IIR2 INP2 MUX", "DEC7", "DEC7 MUX"}, - {"IIR2 INP2 MUX", "DEC8", "DEC8 MUX"}, - {"IIR2 INP2 MUX", "DEC9", "DEC9 MUX"}, - {"IIR2 INP2 MUX", "DEC10", "DEC10 MUX"}, - {"IIR2 INP2 MUX", "RX1", "SLIM RX1"}, - {"IIR2 INP2 MUX", "RX2", "SLIM RX2"}, - {"IIR2 INP2 MUX", "RX3", "SLIM RX3"}, - {"IIR2 INP2 MUX", "RX4", "SLIM RX4"}, - {"IIR2 INP2 MUX", "RX5", "SLIM RX5"}, - {"IIR2 INP2 MUX", "RX6", "SLIM RX6"}, - {"IIR2 INP2 MUX", "RX7", "SLIM RX7"}, - - {"IIR1", NULL, "IIR1 INP3 MUX"}, - {"IIR1 INP3 MUX", "DEC1", "DEC1 MUX"}, - {"IIR1 INP3 MUX", "DEC2", "DEC2 MUX"}, - {"IIR1 INP3 MUX", "DEC3", "DEC3 MUX"}, - {"IIR1 INP3 MUX", "DEC4", "DEC4 MUX"}, - {"IIR1 INP3 MUX", "DEC5", "DEC5 MUX"}, - {"IIR1 INP3 MUX", "DEC6", "DEC6 MUX"}, - {"IIR1 INP3 MUX", "DEC7", "DEC7 MUX"}, - {"IIR1 INP3 MUX", "DEC8", "DEC8 MUX"}, - {"IIR1 INP3 MUX", "DEC9", "DEC9 MUX"}, - {"IIR1 INP3 MUX", "DEC10", "DEC10 MUX"}, - {"IIR1 INP3 MUX", "RX1", "SLIM RX1"}, - {"IIR1 INP3 MUX", "RX2", "SLIM RX2"}, - {"IIR1 INP3 MUX", "RX3", "SLIM RX3"}, - {"IIR1 INP3 MUX", "RX4", "SLIM RX4"}, - {"IIR1 INP3 MUX", "RX5", "SLIM RX5"}, - {"IIR1 INP3 MUX", "RX6", "SLIM RX6"}, - {"IIR1 INP3 MUX", "RX7", "SLIM RX7"}, - - {"IIR2", NULL, "IIR2 INP3 MUX"}, - {"IIR2 INP3 MUX", "DEC1", "DEC1 MUX"}, - {"IIR2 INP3 MUX", "DEC2", "DEC2 MUX"}, - {"IIR2 INP3 MUX", "DEC3", "DEC3 MUX"}, - {"IIR2 INP3 MUX", "DEC4", "DEC4 MUX"}, - {"IIR2 INP3 MUX", "DEC5", "DEC5 MUX"}, - {"IIR2 INP3 MUX", "DEC6", "DEC6 MUX"}, - {"IIR2 INP3 MUX", "DEC7", "DEC7 MUX"}, - {"IIR2 INP3 MUX", "DEC8", "DEC8 MUX"}, - {"IIR2 INP3 MUX", "DEC9", "DEC9 MUX"}, - {"IIR2 INP3 MUX", "DEC10", "DEC10 MUX"}, - {"IIR2 INP3 MUX", "RX1", "SLIM RX1"}, - {"IIR2 INP3 MUX", "RX2", "SLIM RX2"}, - {"IIR2 INP3 MUX", "RX3", "SLIM RX3"}, - {"IIR2 INP3 MUX", "RX4", "SLIM RX4"}, - {"IIR2 INP3 MUX", "RX5", "SLIM RX5"}, - {"IIR2 INP3 MUX", "RX6", "SLIM RX6"}, - {"IIR2 INP3 MUX", "RX7", "SLIM RX7"}, - - {"IIR1", NULL, "IIR1 INP4 MUX"}, - {"IIR1 INP4 MUX", "DEC1", "DEC1 MUX"}, - {"IIR1 INP4 MUX", "DEC2", "DEC2 MUX"}, - {"IIR1 INP4 MUX", "DEC3", "DEC3 MUX"}, - {"IIR1 INP4 MUX", "DEC4", "DEC4 MUX"}, - {"IIR1 INP4 MUX", "DEC5", "DEC5 MUX"}, - {"IIR1 INP4 MUX", "DEC6", "DEC6 MUX"}, - {"IIR1 INP4 MUX", "DEC7", "DEC7 MUX"}, - {"IIR1 INP4 MUX", "DEC8", "DEC8 MUX"}, - {"IIR1 INP4 MUX", "DEC9", "DEC9 MUX"}, - {"IIR1 INP4 MUX", "DEC10", "DEC10 MUX"}, - {"IIR1 INP4 MUX", "RX1", "SLIM RX1"}, - {"IIR1 INP4 MUX", "RX2", "SLIM RX2"}, - {"IIR1 INP4 MUX", "RX3", "SLIM RX3"}, - {"IIR1 INP4 MUX", "RX4", "SLIM RX4"}, - {"IIR1 INP4 MUX", "RX5", "SLIM RX5"}, - {"IIR1 INP4 MUX", "RX6", "SLIM RX6"}, - {"IIR1 INP4 MUX", "RX7", "SLIM RX7"}, - - {"IIR2", NULL, "IIR2 INP4 MUX"}, - {"IIR2 INP4 MUX", "DEC1", "DEC1 MUX"}, - {"IIR2 INP4 MUX", "DEC2", "DEC2 MUX"}, - {"IIR2 INP4 MUX", "DEC3", "DEC3 MUX"}, - {"IIR2 INP4 MUX", "DEC4", "DEC4 MUX"}, - {"IIR2 INP4 MUX", "DEC5", "DEC5 MUX"}, - {"IIR2 INP4 MUX", "DEC6", "DEC6 MUX"}, - {"IIR2 INP4 MUX", "DEC7", "DEC7 MUX"}, - {"IIR2 INP4 MUX", "DEC8", "DEC8 MUX"}, - {"IIR2 INP4 MUX", "DEC9", "DEC9 MUX"}, - {"IIR2 INP4 MUX", "DEC10", "DEC10 MUX"}, - {"IIR2 INP4 MUX", "RX1", "SLIM RX1"}, - {"IIR2 INP4 MUX", "RX2", "SLIM RX2"}, - {"IIR2 INP4 MUX", "RX3", "SLIM RX3"}, - {"IIR2 INP4 MUX", "RX4", "SLIM RX4"}, - {"IIR2 INP4 MUX", "RX5", "SLIM RX5"}, - {"IIR2 INP4 MUX", "RX6", "SLIM RX6"}, - {"IIR2 INP4 MUX", "RX7", "SLIM RX7"}, - - {"MIC BIAS1 Internal1", NULL, "LDO_H"}, - {"MIC BIAS1 Internal2", NULL, "LDO_H"}, - {"MIC BIAS1 External", NULL, "LDO_H"}, - {"MIC BIAS2 Internal1", NULL, "LDO_H"}, - {"MIC BIAS2 Internal2", NULL, "LDO_H"}, - {"MIC BIAS2 Internal3", NULL, "LDO_H"}, - {"MIC BIAS2 External", NULL, "LDO_H"}, - {"MIC BIAS3 Internal1", NULL, "LDO_H"}, - {"MIC BIAS3 Internal2", NULL, "LDO_H"}, - {"MIC BIAS3 External", NULL, "LDO_H"}, - {"MIC BIAS4 External", NULL, "LDO_H"}, - {DAPM_MICBIAS2_EXTERNAL_STANDALONE, NULL, "LDO_H Standalone"}, -}; - -static int tomtom_startup(struct snd_pcm_substream *substream, - struct snd_soc_dai *dai) -{ - pr_debug("%s(): substream = %s stream = %d\n", __func__, - substream->name, substream->stream); - - return 0; -} - -static void tomtom_shutdown(struct snd_pcm_substream *substream, - struct snd_soc_dai *dai) -{ - pr_debug("%s(): substream = %s stream = %d\n", __func__, - substream->name, substream->stream); -} - -int tomtom_mclk_enable(struct snd_soc_codec *codec, int mclk_enable, bool dapm) -{ - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - - pr_debug("%s: mclk_enable = %u, dapm = %d\n", __func__, mclk_enable, - dapm); - - WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr); - if (mclk_enable) { - wcd9xxx_resmgr_get_bandgap(&tomtom->resmgr, - WCD9XXX_BANDGAP_AUDIO_MODE); - wcd9xxx_resmgr_get_clk_block(&tomtom->resmgr, WCD9XXX_CLK_MCLK); - } else { - /* Put clock and BG */ - wcd9xxx_resmgr_put_clk_block(&tomtom->resmgr, WCD9XXX_CLK_MCLK); - wcd9xxx_resmgr_put_bandgap(&tomtom->resmgr, - WCD9XXX_BANDGAP_AUDIO_MODE); - } - WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr); - - return 0; -} - -static int tomtom_set_dai_sysclk(struct snd_soc_dai *dai, - int clk_id, unsigned int freq, int dir) -{ - pr_debug("%s\n", __func__); - return 0; -} - -static int tomtom_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt) -{ - u8 val = 0; - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(dai->codec); - - pr_debug("%s\n", __func__); - switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { - case SND_SOC_DAIFMT_CBS_CFS: - /* CPU is master */ - if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) { - if (dai->id == AIF1_CAP) - snd_soc_update_bits(dai->codec, - TOMTOM_A_CDC_CLK_TX_I2S_CTL, - TOMTOM_I2S_MASTER_MODE_MASK, 0); - else if (dai->id == AIF1_PB) - snd_soc_update_bits(dai->codec, - TOMTOM_A_CDC_CLK_RX_I2S_CTL, - TOMTOM_I2S_MASTER_MODE_MASK, 0); - } - break; - case SND_SOC_DAIFMT_CBM_CFM: - /* CPU is slave */ - if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) { - val = TOMTOM_I2S_MASTER_MODE_MASK; - if (dai->id == AIF1_CAP) - snd_soc_update_bits(dai->codec, - TOMTOM_A_CDC_CLK_TX_I2S_CTL, val, val); - else if (dai->id == AIF1_PB) - snd_soc_update_bits(dai->codec, - TOMTOM_A_CDC_CLK_RX_I2S_CTL, val, val); - } - break; - default: - return -EINVAL; - } - return 0; -} - -static int tomtom_set_channel_map(struct snd_soc_dai *dai, - unsigned int tx_num, unsigned int *tx_slot, - unsigned int rx_num, unsigned int *rx_slot) - -{ - struct wcd9xxx_codec_dai_data *dai_data = NULL; - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(dai->codec); - struct wcd9xxx *core = dev_get_drvdata(dai->codec->dev->parent); - - if (!tx_slot || !rx_slot) { - pr_err("%s: Invalid tx_slot=%pK, rx_slot=%pK\n", - __func__, tx_slot, rx_slot); - return -EINVAL; - } - pr_debug("%s(): dai_name = %s DAI-ID %x tx_ch %d rx_ch %d\n" - "tomtom->intf_type %d\n", - __func__, dai->name, dai->id, tx_num, rx_num, - tomtom->intf_type); - - if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_SLIMBUS) { - wcd9xxx_init_slimslave(core, core->slim->laddr, - tx_num, tx_slot, rx_num, rx_slot); - /*Reserve tx11 and tx12 for VI feedback path*/ - dai_data = &tomtom->dai[AIF4_VIFEED]; - if (dai_data) { - list_add_tail(&core->tx_chs[TOMTOM_TX11].list, - &dai_data->wcd9xxx_ch_list); - list_add_tail(&core->tx_chs[TOMTOM_TX12].list, - &dai_data->wcd9xxx_ch_list); - } - - /* Reserve TX13 for MAD data channel */ - dai_data = &tomtom->dai[AIF4_MAD_TX]; - if (dai_data) - list_add_tail(&core->tx_chs[TOMTOM_TX13].list, - &dai_data->wcd9xxx_ch_list); - } - - return 0; -} - -static int tomtom_get_channel_map(struct snd_soc_dai *dai, - unsigned int *tx_num, unsigned int *tx_slot, - unsigned int *rx_num, unsigned int *rx_slot) - -{ - struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(dai->codec); - u32 i = 0; - struct wcd9xxx_ch *ch; - - switch (dai->id) { - case AIF1_PB: - case AIF2_PB: - case AIF3_PB: - if (!rx_slot || !rx_num) { - pr_err("%s: Invalid rx_slot %pK or rx_num %pK\n", - __func__, rx_slot, rx_num); - return -EINVAL; - } - list_for_each_entry(ch, &tomtom_p->dai[dai->id].wcd9xxx_ch_list, - list) { - pr_debug("%s: slot_num %u ch->ch_num %d\n", - __func__, i, ch->ch_num); - rx_slot[i++] = ch->ch_num; - } - pr_debug("%s: rx_num %d\n", __func__, i); - *rx_num = i; - break; - case AIF1_CAP: - case AIF2_CAP: - case AIF3_CAP: - case AIF4_VIFEED: - case AIF4_MAD_TX: - if (!tx_slot || !tx_num) { - pr_err("%s: Invalid tx_slot %pK or tx_num %pK\n", - __func__, tx_slot, tx_num); - return -EINVAL; - } - list_for_each_entry(ch, &tomtom_p->dai[dai->id].wcd9xxx_ch_list, - list) { - pr_debug("%s: slot_num %u ch->ch_num %d\n", - __func__, i, ch->ch_num); - tx_slot[i++] = ch->ch_num; - } - pr_debug("%s: tx_num %d\n", __func__, i); - *tx_num = i; - break; - - default: - pr_err("%s: Invalid DAI ID %x\n", __func__, dai->id); - break; - } - - return 0; -} - -static int tomtom_set_interpolator_rate(struct snd_soc_dai *dai, - u8 rx_fs_rate_reg_val, u32 compander_fs, u32 sample_rate) -{ - u32 j; - u8 rx_mix1_inp, rx8_mix1_inp; - u16 rx_mix_1_reg_1, rx_mix_1_reg_2; - u16 rx_fs_reg; - u8 rx_mix_1_reg_1_val, rx_mix_1_reg_2_val; - struct snd_soc_codec *codec = dai->codec; - struct wcd9xxx_ch *ch; - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - int port_rx_8 = TOMTOM_RX_PORT_START_NUMBER + NUM_INTERPOLATORS - 1; - - list_for_each_entry(ch, &tomtom->dai[dai->id].wcd9xxx_ch_list, list) { - /* for RX port starting from 16 instead of 10 like tabla */ - rx_mix1_inp = ch->port + RX_MIX1_INP_SEL_RX1 - - TOMTOM_TX_PORT_NUMBER; - rx8_mix1_inp = ch->port + RX8_MIX1_INP_SEL_RX1 - - TOMTOM_RX_PORT_START_NUMBER; - if (((ch->port < port_rx_8) && - ((rx_mix1_inp < RX_MIX1_INP_SEL_RX1) || - (rx_mix1_inp > RX_MIX1_INP_SEL_RX7))) || - ((rx8_mix1_inp < RX8_MIX1_INP_SEL_RX1) || - (rx8_mix1_inp > RX8_MIX1_INP_SEL_RX8))) { - pr_err("%s: Invalid TOMTOM_RX%u port. Dai ID is %d\n", - __func__, rx8_mix1_inp - 2, - dai->id); - return -EINVAL; - } - - rx_mix_1_reg_1 = TOMTOM_A_CDC_CONN_RX1_B1_CTL; - - for (j = 0; j < NUM_INTERPOLATORS - 1; j++) { - rx_mix_1_reg_2 = rx_mix_1_reg_1 + 1; - - rx_mix_1_reg_1_val = snd_soc_read(codec, - rx_mix_1_reg_1); - rx_mix_1_reg_2_val = snd_soc_read(codec, - rx_mix_1_reg_2); - - if (((rx_mix_1_reg_1_val & 0x0F) == rx_mix1_inp) || - (((rx_mix_1_reg_1_val >> 4) & 0x0F) - == rx_mix1_inp) || - ((rx_mix_1_reg_2_val & 0x0F) == rx_mix1_inp)) { - - rx_fs_reg = TOMTOM_A_CDC_RX1_B5_CTL + 8 * j; - - pr_debug("%s: AIF_PB DAI(%d) connected to RX%u\n", - __func__, dai->id, j + 1); - - pr_debug("%s: set RX%u sample rate to %u\n", - __func__, j + 1, sample_rate); - - snd_soc_update_bits(codec, rx_fs_reg, - 0xE0, rx_fs_rate_reg_val); - - if (comp_rx_path[j] < COMPANDER_MAX) - tomtom->comp_fs[comp_rx_path[j]] - = compander_fs; - } - if (j < 2) - rx_mix_1_reg_1 += 3; - else - rx_mix_1_reg_1 += 2; - } - - /* RX8 interpolator path */ - rx_mix_1_reg_1_val = snd_soc_read(codec, - TOMTOM_A_CDC_CONN_RX8_B1_CTL); - if (((rx_mix_1_reg_1_val & 0x0F) == rx8_mix1_inp) || - (((rx_mix_1_reg_1_val >> 4) & 0x0F) == rx8_mix1_inp)) { - snd_soc_update_bits(codec, TOMTOM_A_CDC_RX8_B5_CTL, - 0xE0, rx_fs_rate_reg_val); - pr_debug("%s: AIF_PB DAI(%d) connected to RX%u\n", - __func__, dai->id, NUM_INTERPOLATORS); - - pr_debug("%s: set RX%u sample rate to %u\n", - __func__, NUM_INTERPOLATORS, - sample_rate); - if (comp_rx_path[NUM_INTERPOLATORS - 1] < COMPANDER_MAX) - tomtom->comp_fs[comp_rx_path[j]] = - compander_fs; - } - } - return 0; -} - -static int tomtom_set_decimator_rate(struct snd_soc_dai *dai, - u8 tx_fs_rate_reg_val, u32 sample_rate) -{ - struct snd_soc_codec *codec = dai->codec; - struct wcd9xxx_ch *ch; - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - u32 tx_port; - u16 tx_port_reg, tx_fs_reg; - u8 tx_port_reg_val; - s8 decimator; - - list_for_each_entry(ch, &tomtom->dai[dai->id].wcd9xxx_ch_list, list) { - - tx_port = ch->port + 1; - pr_debug("%s: dai->id = %d, tx_port = %d", - __func__, dai->id, tx_port); - - if ((tx_port < 1) || (tx_port > NUM_DECIMATORS)) { - pr_err("%s: Invalid SLIM TX%u port. DAI ID is %d\n", - __func__, tx_port, dai->id); - return -EINVAL; - } - - tx_port_reg = TOMTOM_A_CDC_CONN_TX_SB_B1_CTL + (tx_port - 1); - tx_port_reg_val = snd_soc_read(codec, tx_port_reg); - - decimator = 0; - - if ((tx_port >= 1) && (tx_port <= 6)) { - - tx_port_reg_val = tx_port_reg_val & 0x0F; - if (tx_port_reg_val == 0x8) - decimator = tx_port; - - } else if ((tx_port >= 7) && (tx_port <= NUM_DECIMATORS)) { - - tx_port_reg_val = tx_port_reg_val & 0x1F; - - if ((tx_port_reg_val >= 0x8) && - (tx_port_reg_val <= 0x11)) { - - decimator = (tx_port_reg_val - 0x8) + 1; - } - } - - if (decimator) { /* SLIM_TX port has a DEC as input */ - - tx_fs_reg = TOMTOM_A_CDC_TX1_CLK_FS_CTL + - 8 * (decimator - 1); - - pr_debug("%s: set DEC%u (-> SLIM_TX%u) rate to %u\n", - __func__, decimator, tx_port, sample_rate); - - snd_soc_update_bits(codec, tx_fs_reg, 0x07, - tx_fs_rate_reg_val); - - } else { - if ((tx_port_reg_val >= 0x1) && - (tx_port_reg_val <= 0x7)) { - - pr_debug("%s: RMIX%u going to SLIM TX%u\n", - __func__, tx_port_reg_val, tx_port); - - } else if ((tx_port_reg_val >= 0x8) && - (tx_port_reg_val <= 0x11)) { - - pr_err("%s: ERROR: Should not be here\n", - __func__); - pr_err("%s: ERROR: DEC connected to SLIM TX%u\n", - __func__, tx_port); - return -EINVAL; - - } else if (tx_port_reg_val == 0) { - pr_debug("%s: no signal to SLIM TX%u\n", - __func__, tx_port); - } else { - pr_err("%s: ERROR: wrong signal to SLIM TX%u\n", - __func__, tx_port); - pr_err("%s: ERROR: wrong signal = %u\n", - __func__, tx_port_reg_val); - return -EINVAL; - } - } - } - return 0; -} - -static void tomtom_set_rxsb_port_format(struct snd_pcm_hw_params *params, - struct snd_soc_dai *dai) -{ - struct snd_soc_codec *codec = dai->codec; - struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec); - struct wcd9xxx_codec_dai_data *cdc_dai; - struct wcd9xxx_ch *ch; - int port; - u8 bit_sel; - u16 sb_ctl_reg, field_shift; - - switch (params_width(params)) { - case 16: - bit_sel = 0x2; - tomtom_p->dai[dai->id].bit_width = 16; - break; - case 24: - bit_sel = 0x0; - tomtom_p->dai[dai->id].bit_width = 24; - break; - default: - dev_err(codec->dev, "Invalid format\n"); - return; - } - - cdc_dai = &tomtom_p->dai[dai->id]; - - list_for_each_entry(ch, &cdc_dai->wcd9xxx_ch_list, list) { - port = wcd9xxx_get_slave_port(ch->ch_num); - if (port < 0 || - !TOMTOM_VALIDATE_RX_SBPORT_RANGE(port)) { - dev_warn(codec->dev, - "%s: invalid port ID %d returned for RX DAI\n", - __func__, port); - return; - } - - port = TOMTOM_CONVERT_RX_SBPORT_ID(port); - - if (port <= 3) { - sb_ctl_reg = TOMTOM_A_CDC_CONN_RX_SB_B1_CTL; - field_shift = port << 1; - } else if (port <= 7) { - sb_ctl_reg = TOMTOM_A_CDC_CONN_RX_SB_B2_CTL; - field_shift = (port - 4) << 1; - } else { /* should not happen */ - dev_warn(codec->dev, - "%s: bad port ID %d\n", __func__, port); - return; - } - - dev_dbg(codec->dev, "%s: sb_ctl_reg %x field_shift %x\n", - __func__, sb_ctl_reg, field_shift); - snd_soc_update_bits(codec, sb_ctl_reg, 0x3 << field_shift, - bit_sel << field_shift); - } -} - -static void tomtom_set_tx_sb_port_format(struct snd_pcm_hw_params *params, - struct snd_soc_dai *dai) -{ - struct snd_soc_codec *codec = dai->codec; - struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec); - struct wcd9xxx_codec_dai_data *cdc_dai; - struct wcd9xxx_ch *ch; - int port; - u8 bit_sel, bit_shift; - u16 sb_ctl_reg; - - switch (params_width(params)) { - case 16: - bit_sel = 0x2; - tomtom_p->dai[dai->id].bit_width = 16; - break; - case 24: - bit_sel = 0x0; - tomtom_p->dai[dai->id].bit_width = 24; - break; - default: - dev_err(codec->dev, "%s: Invalid format %d\n", __func__, - params_width(params)); - return; - } - - cdc_dai = &tomtom_p->dai[dai->id]; - - list_for_each_entry(ch, &cdc_dai->wcd9xxx_ch_list, list) { - port = wcd9xxx_get_slave_port(ch->ch_num); - if (port < 0 || - !TOMTOM_VALIDATE_TX_SBPORT_RANGE(port)) { - dev_warn(codec->dev, - "%s: invalid port ID %d returned for TX DAI\n", - __func__, port); - return; - } - - if (port < 6) /* 6 = SLIMBUS TX7 */ - bit_shift = TOMTOM_BIT_ADJ_SHIFT_PORT1_6; - else if (port < 10) - bit_shift = TOMTOM_BIT_ADJ_SHIFT_PORT7_10; - else { - dev_warn(codec->dev, - "%s: port ID %d bitwidth is fixed\n", - __func__, port); - return; - } - - sb_ctl_reg = (TOMTOM_A_CDC_CONN_TX_SB_B1_CTL + port); - - dev_dbg(codec->dev, "%s: reg %x bit_sel %x bit_shift %x\n", - __func__, sb_ctl_reg, bit_sel, bit_shift); - snd_soc_update_bits(codec, sb_ctl_reg, 0x3 << - bit_shift, bit_sel << bit_shift); - } -} - -static int tomtom_hw_params(struct snd_pcm_substream *substream, - struct snd_pcm_hw_params *params, - struct snd_soc_dai *dai) -{ - struct snd_soc_codec *codec = dai->codec; - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(dai->codec); - u8 tx_fs_rate, rx_fs_rate, i2s_bit_mode; - u32 compander_fs; - int ret; - - pr_debug("%s: dai_name = %s DAI-ID %x rate %d num_ch %d\n", __func__, - dai->name, dai->id, params_rate(params), - params_channels(params)); - - switch (params_rate(params)) { - case 8000: - tx_fs_rate = 0x00; - rx_fs_rate = 0x00; - compander_fs = COMPANDER_FS_8KHZ; - break; - case 16000: - tx_fs_rate = 0x01; - rx_fs_rate = 0x20; - compander_fs = COMPANDER_FS_16KHZ; - break; - case 32000: - tx_fs_rate = 0x02; - rx_fs_rate = 0x40; - compander_fs = COMPANDER_FS_32KHZ; - break; - case 48000: - tx_fs_rate = 0x03; - rx_fs_rate = 0x60; - compander_fs = COMPANDER_FS_48KHZ; - break; - case 96000: - tx_fs_rate = 0x04; - rx_fs_rate = 0x80; - compander_fs = COMPANDER_FS_96KHZ; - break; - case 192000: - tx_fs_rate = 0x05; - rx_fs_rate = 0xA0; - compander_fs = COMPANDER_FS_192KHZ; - break; - default: - pr_err("%s: Invalid sampling rate %d\n", __func__, - params_rate(params)); - return -EINVAL; - } - - switch (substream->stream) { - case SNDRV_PCM_STREAM_CAPTURE: - if (dai->id != AIF4_VIFEED && - dai->id != AIF4_MAD_TX) { - ret = tomtom_set_decimator_rate(dai, tx_fs_rate, - params_rate(params)); - if (ret < 0) { - pr_err("%s: set decimator rate failed %d\n", - __func__, ret); - return ret; - } - } - - tomtom->dai[dai->id].rate = params_rate(params); - - switch (params_format(params)) { - case SNDRV_PCM_FORMAT_S16_LE: - i2s_bit_mode = 0x01; - tomtom->dai[dai->id].bit_width = 16; - break; - case SNDRV_PCM_FORMAT_S24_LE: - tomtom->dai[dai->id].bit_width = 24; - i2s_bit_mode = 0x00; - break; - case SNDRV_PCM_FORMAT_S32_LE: - tomtom->dai[dai->id].bit_width = 32; - i2s_bit_mode = 0x00; - break; - default: - dev_err(codec->dev, - "%s: Invalid format 0x%x\n", - __func__, params_format(params)); - return -EINVAL; - } - - if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) { - snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_TX_I2S_CTL, - 0x20, i2s_bit_mode << 5); - snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_TX_I2S_CTL, - 0x07, tx_fs_rate); - } else { - /* only generic ports can have sample bit adjustment */ - if (dai->id != AIF4_VIFEED && - dai->id != AIF4_MAD_TX) - tomtom_set_tx_sb_port_format(params, dai); - } - - break; - - case SNDRV_PCM_STREAM_PLAYBACK: - ret = tomtom_set_interpolator_rate(dai, rx_fs_rate, - compander_fs, - params_rate(params)); - if (ret < 0) { - pr_err("%s: set decimator rate failed %d\n", __func__, - ret); - return ret; - } - if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) { - switch (params_format(params)) { - case SNDRV_PCM_FORMAT_S16_LE: - snd_soc_update_bits(codec, - TOMTOM_A_CDC_CLK_RX_I2S_CTL, - 0x20, 0x20); - break; - case SNDRV_PCM_FORMAT_S32_LE: - snd_soc_update_bits(codec, - TOMTOM_A_CDC_CLK_RX_I2S_CTL, - 0x20, 0x00); - break; - default: - pr_err("invalid format\n"); - break; - } - snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_RX_I2S_CTL, - 0x03, (rx_fs_rate >> 0x05)); - } else { - tomtom_set_rxsb_port_format(params, dai); - tomtom->dai[dai->id].rate = params_rate(params); - } - break; - default: - pr_err("%s: Invalid stream type %d\n", __func__, - substream->stream); - return -EINVAL; - } - - return 0; -} - -static struct snd_soc_dai_ops tomtom_dai_ops = { - .startup = tomtom_startup, - .shutdown = tomtom_shutdown, - .hw_params = tomtom_hw_params, - .set_sysclk = tomtom_set_dai_sysclk, - .set_fmt = tomtom_set_dai_fmt, - .set_channel_map = tomtom_set_channel_map, - .get_channel_map = tomtom_get_channel_map, -}; - -static struct snd_soc_dai_driver tomtom_dai[] = { - { - .name = "tomtom_rx1", - .id = AIF1_PB, - .playback = { - .stream_name = "AIF1 Playback", - .rates = WCD9330_RATES, - .formats = TOMTOM_FORMATS_S16_S24_LE, - .rate_max = 192000, - .rate_min = 8000, - .channels_min = 1, - .channels_max = 2, - }, - .ops = &tomtom_dai_ops, - }, - { - .name = "tomtom_tx1", - .id = AIF1_CAP, - .capture = { - .stream_name = "AIF1 Capture", - .rates = WCD9330_RATES, - .formats = TOMTOM_FORMATS, - .rate_max = 192000, - .rate_min = 8000, - .channels_min = 1, - .channels_max = 4, - }, - .ops = &tomtom_dai_ops, - }, - { - .name = "tomtom_rx2", - .id = AIF2_PB, - .playback = { - .stream_name = "AIF2 Playback", - .rates = WCD9330_RATES, - .formats = TOMTOM_FORMATS_S16_S24_LE, - .rate_min = 8000, - .rate_max = 192000, - .channels_min = 1, - .channels_max = 2, - }, - .ops = &tomtom_dai_ops, - }, - { - .name = "tomtom_tx2", - .id = AIF2_CAP, - .capture = { - .stream_name = "AIF2 Capture", - .rates = WCD9330_RATES, - .formats = TOMTOM_FORMATS, - .rate_max = 192000, - .rate_min = 8000, - .channels_min = 1, - .channels_max = 8, - }, - .ops = &tomtom_dai_ops, - }, - { - .name = "tomtom_rx3", - .id = AIF3_PB, - .playback = { - .stream_name = "AIF3 Playback", - .rates = WCD9330_RATES, - .formats = TOMTOM_FORMATS_S16_S24_LE, - .rate_min = 8000, - .rate_max = 192000, - .channels_min = 1, - .channels_max = 2, - }, - .ops = &tomtom_dai_ops, - }, - { - .name = "tomtom_tx3", - .id = AIF3_CAP, - .capture = { - .stream_name = "AIF3 Capture", - .rates = WCD9330_RATES, - .formats = TOMTOM_FORMATS, - .rate_max = 48000, - .rate_min = 8000, - .channels_min = 1, - .channels_max = 2, - }, - .ops = &tomtom_dai_ops, - }, - { - .name = "tomtom_vifeedback", - .id = AIF4_VIFEED, - .capture = { - .stream_name = "VIfeed", - .rates = SNDRV_PCM_RATE_48000, - .formats = TOMTOM_FORMATS, - .rate_max = 48000, - .rate_min = 48000, - .channels_min = 2, - .channels_max = 2, - }, - .ops = &tomtom_dai_ops, - }, - { - .name = "tomtom_mad1", - .id = AIF4_MAD_TX, - .capture = { - .stream_name = "AIF4 MAD TX", - .rates = SNDRV_PCM_RATE_16000, - .formats = TOMTOM_FORMATS_S16_S24_LE, - .rate_min = 16000, - .rate_max = 16000, - .channels_min = 1, - .channels_max = 1, - }, - .ops = &tomtom_dai_ops, - }, -}; - -static struct snd_soc_dai_driver tomtom_i2s_dai[] = { - { - .name = "tomtom_i2s_rx1", - .id = AIF1_PB, - .playback = { - .stream_name = "AIF1 Playback", - .rates = WCD9330_RATES, - .formats = TOMTOM_FORMATS, - .rate_max = 192000, - .rate_min = 8000, - .channels_min = 1, - .channels_max = 4, - }, - .ops = &tomtom_dai_ops, - }, - { - .name = "tomtom_i2s_tx1", - .id = AIF1_CAP, - .capture = { - .stream_name = "AIF1 Capture", - .rates = WCD9330_RATES, - .formats = TOMTOM_FORMATS, - .rate_max = 192000, - .rate_min = 8000, - .channels_min = 1, - .channels_max = 4, - }, - .ops = &tomtom_dai_ops, - }, - { - .name = "tomtom_i2s_rx2", - .id = AIF1_PB, - .playback = { - .stream_name = "AIF2 Playback", - .rates = WCD9330_RATES, - .formats = TOMTOM_FORMATS, - .rate_max = 192000, - .rate_min = 8000, - .channels_min = 1, - .channels_max = 4, - }, - .ops = &tomtom_dai_ops, - }, - { - .name = "tomtom_i2s_tx2", - .id = AIF1_CAP, - .capture = { - .stream_name = "AIF2 Capture", - .rates = WCD9330_RATES, - .formats = TOMTOM_FORMATS, - .rate_max = 192000, - .rate_min = 8000, - .channels_min = 1, - .channels_max = 4, - }, - .ops = &tomtom_dai_ops, - }, -}; - -static int tomtom_codec_enable_slim_chmask(struct wcd9xxx_codec_dai_data *dai, - bool up) -{ - int ret = 0; - struct wcd9xxx_ch *ch; - - if (up) { - list_for_each_entry(ch, &dai->wcd9xxx_ch_list, list) { - ret = wcd9xxx_get_slave_port(ch->ch_num); - if (ret < 0) { - pr_err("%s: Invalid slave port ID: %d\n", - __func__, ret); - ret = -EINVAL; - } else { - set_bit(ret, &dai->ch_mask); - } - } - } else { - ret = wait_event_timeout(dai->dai_wait, (dai->ch_mask == 0), - msecs_to_jiffies( - TOMTOM_SLIM_CLOSE_TIMEOUT)); - if (!ret) { - pr_err("%s: Slim close tx/rx wait timeout\n", __func__); - ret = -ETIMEDOUT; - } else { - ret = 0; - } - } - return ret; -} - -static void tomtom_codec_enable_int_port(struct wcd9xxx_codec_dai_data *dai, - struct snd_soc_codec *codec) -{ - struct wcd9xxx_ch *ch; - struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent); - int port_num = 0; - unsigned short reg = 0; - u8 val = 0; - - if (!dai || !codec) { - pr_err("%s: Invalid params\n", __func__); - return; - } - list_for_each_entry(ch, &dai->wcd9xxx_ch_list, list) { - if (ch->port >= TOMTOM_RX_PORT_START_NUMBER) { - port_num = ch->port - TOMTOM_RX_PORT_START_NUMBER; - reg = TOMTOM_SLIM_PGD_PORT_INT_EN0 + (port_num / 8); - val = wcd9xxx_interface_reg_read(wcd9xxx, - reg); - if (!(val & (1 << (port_num % 8)))) { - val |= (1 << (port_num % 8)); - wcd9xxx_interface_reg_write( - wcd9xxx, reg, val); - val = wcd9xxx_interface_reg_read( - wcd9xxx, reg); - } - } else { - port_num = ch->port; - reg = TOMTOM_SLIM_PGD_PORT_INT_TX_EN0 + (port_num / 8); - val = wcd9xxx_interface_reg_read(wcd9xxx, - reg); - if (!(val & (1 << (port_num % 8)))) { - val |= (1 << (port_num % 8)); - wcd9xxx_interface_reg_write(wcd9xxx, - reg, val); - val = wcd9xxx_interface_reg_read( - wcd9xxx, reg); - } - } - } -} - -static int tomtom_codec_enable_slimrx(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, - int event) -{ - struct wcd9xxx *core; - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec); - int ret = 0; - struct wcd9xxx_codec_dai_data *dai; - - core = dev_get_drvdata(codec->dev->parent); - - pr_debug("%s: event called! codec name %s num_dai %d\n" - "stream name %s event %d\n", - __func__, codec->component.name, - codec->component.num_dai, w->sname, event); - - /* Execute the callback only if interface type is slimbus */ - if (tomtom_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS) - return 0; - - dai = &tomtom_p->dai[w->shift]; - pr_debug("%s: w->name %s w->shift %d event %d\n", - __func__, w->name, w->shift, event); - - switch (event) { - case SND_SOC_DAPM_POST_PMU: - dai->bus_down_in_recovery = false; - tomtom_codec_enable_int_port(dai, codec); - (void) tomtom_codec_enable_slim_chmask(dai, true); - ret = wcd9xxx_cfg_slim_sch_rx(core, &dai->wcd9xxx_ch_list, - dai->rate, dai->bit_width, - &dai->grph); - break; - case SND_SOC_DAPM_POST_PMD: - ret = wcd9xxx_close_slim_sch_rx(core, &dai->wcd9xxx_ch_list, - dai->grph); - if (!dai->bus_down_in_recovery) - ret = tomtom_codec_enable_slim_chmask(dai, false); - else - pr_debug("%s: bus in recovery skip enable slim_chmask", - __func__); - if (ret < 0) { - ret = wcd9xxx_disconnect_port(core, - &dai->wcd9xxx_ch_list, - dai->grph); - pr_debug("%s: Disconnect RX port, ret = %d\n", - __func__, ret); - } - break; - } - return ret; -} - -static int tomtom_codec_enable_slimvi_feedback(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, - int event) -{ - struct wcd9xxx *core = NULL; - struct snd_soc_codec *codec = NULL; - struct tomtom_priv *tomtom_p = NULL; - u32 ret = 0; - struct wcd9xxx_codec_dai_data *dai = NULL; - - if (!w) { - pr_err("%s invalid params\n", __func__); - return -EINVAL; - } - codec = snd_soc_dapm_to_codec(w->dapm); - tomtom_p = snd_soc_codec_get_drvdata(codec); - core = dev_get_drvdata(codec->dev->parent); - - pr_debug("%s: event called! codec name %s num_dai %d stream name %s\n", - __func__, codec->component.name, - codec->component.num_dai, w->sname); - - /* Execute the callback only if interface type is slimbus */ - if (tomtom_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS) { - pr_err("%s Interface is not correct", __func__); - return 0; - } - - pr_debug("%s(): w->name %s event %d w->shift %d\n", - __func__, w->name, event, w->shift); - if (w->shift != AIF4_VIFEED) { - pr_err("%s Error in enabling the tx path\n", __func__); - ret = -EINVAL; - goto out_vi; - } - dai = &tomtom_p->dai[w->shift]; - switch (event) { - case SND_SOC_DAPM_POST_PMU: - /*Enable V&I sensing*/ - snd_soc_update_bits(codec, TOMTOM_A_SPKR1_PROT_EN, - 0x88, 0x88); - /*Enable spkr VI clocks*/ - snd_soc_update_bits(codec, - TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL, 0xC, 0xC); - dai->bus_down_in_recovery = false; - tomtom_codec_enable_int_port(dai, codec); - (void) tomtom_codec_enable_slim_chmask(dai, true); - ret = wcd9xxx_cfg_slim_sch_tx(core, &dai->wcd9xxx_ch_list, - dai->rate, dai->bit_width, - &dai->grph); - break; - case SND_SOC_DAPM_POST_PMD: - ret = wcd9xxx_close_slim_sch_tx(core, &dai->wcd9xxx_ch_list, - dai->grph); - if (ret) - pr_err("%s error in close_slim_sch_tx %d\n", - __func__, ret); - if (!dai->bus_down_in_recovery) - ret = tomtom_codec_enable_slim_chmask(dai, false); - if (ret < 0) { - ret = wcd9xxx_disconnect_port(core, - &dai->wcd9xxx_ch_list, - dai->grph); - pr_debug("%s: Disconnect TX port, ret = %d\n", - __func__, ret); - } - - snd_soc_update_bits(codec, TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL, - 0xC, 0x0); - /*Disable V&I sensing*/ - snd_soc_update_bits(codec, TOMTOM_A_SPKR1_PROT_EN, - 0x88, 0x00); - break; - } -out_vi: - return ret; -} - -/* __tomtom_codec_enable_slimtx: Enable the slimbus slave port - * for TX path - * @codec: Handle to the codec for which the slave port is to be - * enabled. - * @dai_data: The dai specific data for dai which is enabled. - */ -static int __tomtom_codec_enable_slimtx(struct snd_soc_codec *codec, - int event, struct wcd9xxx_codec_dai_data *dai_data) -{ - struct wcd9xxx *core; - int ret = 0; - - core = dev_get_drvdata(codec->dev->parent); - - switch (event) { - case SND_SOC_DAPM_POST_PMU: - dai_data->bus_down_in_recovery = false; - tomtom_codec_enable_int_port(dai_data, codec); - (void) tomtom_codec_enable_slim_chmask(dai_data, true); - ret = wcd9xxx_cfg_slim_sch_tx(core, &dai_data->wcd9xxx_ch_list, - dai_data->rate, - dai_data->bit_width, - &dai_data->grph); - break; - case SND_SOC_DAPM_POST_PMD: - ret = wcd9xxx_close_slim_sch_tx(core, - &dai_data->wcd9xxx_ch_list, - dai_data->grph); - if (!dai_data->bus_down_in_recovery) - ret = tomtom_codec_enable_slim_chmask(dai_data, false); - if (ret < 0) { - ret = wcd9xxx_disconnect_port(core, - &dai_data->wcd9xxx_ch_list, - dai_data->grph); - dev_dbg(codec->dev, - "%s: Disconnect TX port, ret = %d\n", - __func__, ret); - } - break; - } - - return ret; -} - -/* - * tomtom_codec_enable_slimtx_mad: Callback function that will be invoked - * to setup the slave port for MAD. - * @codec: Handle to the codec - * @event: Indicates whether to enable or disable the slave port - */ -static int tomtom_codec_enable_slimtx_mad(struct snd_soc_codec *codec, - u8 event) -{ - struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec); - struct wcd9xxx_codec_dai_data *dai; - int dapm_event = SND_SOC_DAPM_POST_PMU; - - dai = &tomtom_p->dai[AIF4_MAD_TX]; - - if (event == 0) - dapm_event = SND_SOC_DAPM_POST_PMD; - - dev_dbg(codec->dev, - "%s: mad_channel, event = 0x%x\n", - __func__, event); - return __tomtom_codec_enable_slimtx(codec, dapm_event, dai); -} - -/* - * tomtom_codec_enable_slimtx: DAPM widget allback for TX widgets - * @w: widget for which this callback is invoked - * @kcontrol: kcontrol associated with this widget - * @event: DAPM supplied event indicating enable/disable - */ -static int tomtom_codec_enable_slimtx(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, - int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec); - struct wcd9xxx_codec_dai_data *dai; - - dev_dbg(codec->dev, "%s: event called! codec name %s num_dai %d stream name %s\n", - __func__, codec->component.name, - codec->component.num_dai, w->sname); - - /* Execute the callback only if interface type is slimbus */ - if (tomtom_p->intf_type != WCD9XXX_INTERFACE_TYPE_SLIMBUS) - return 0; - - dev_dbg(codec->dev, - "%s(): w->name %s event %d w->shift %d\n", - __func__, w->name, event, w->shift); - - dai = &tomtom_p->dai[w->shift]; - return __tomtom_codec_enable_slimtx(codec, event, dai); -} - -static int tomtom_codec_enable_ear_pa(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec); - - pr_debug("%s %s %d\n", __func__, w->name, event); - - switch (event) { - case SND_SOC_DAPM_POST_PMU: - wcd9xxx_clsh_fsm(codec, &tomtom_p->clsh_d, - WCD9XXX_CLSH_STATE_EAR, - WCD9XXX_CLSH_REQ_ENABLE, - WCD9XXX_CLSH_EVENT_POST_PA); - - usleep_range(5000, 5100); - break; - } - return 0; -} - -static int tomtom_codec_ear_dac_event(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - struct tomtom_priv *tomtom_p = snd_soc_codec_get_drvdata(codec); - - pr_debug("%s %s %d\n", __func__, w->name, event); - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - wcd9xxx_clsh_fsm(codec, &tomtom_p->clsh_d, - WCD9XXX_CLSH_STATE_EAR, - WCD9XXX_CLSH_REQ_ENABLE, - WCD9XXX_CLSH_EVENT_PRE_DAC); - break; - case SND_SOC_DAPM_POST_PMD: - wcd9xxx_clsh_fsm(codec, &tomtom_p->clsh_d, - WCD9XXX_CLSH_STATE_EAR, - WCD9XXX_CLSH_REQ_DISABLE, - WCD9XXX_CLSH_EVENT_POST_PA); - usleep_range(5000, 5100); - break; - default: - break; - } - return 0; -} - -static int tomtom_codec_set_iir_gain(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - - pr_debug("%s: event = %d\n", __func__, event); - - switch (event) { - case SND_SOC_DAPM_POST_PMU: /* fall through */ - case SND_SOC_DAPM_PRE_PMD: - if (strnstr(w->name, "IIR1", sizeof("IIR1"))) { - snd_soc_write(codec, TOMTOM_A_CDC_IIR1_GAIN_B1_CTL, - snd_soc_read(codec, - TOMTOM_A_CDC_IIR1_GAIN_B1_CTL)); - snd_soc_write(codec, TOMTOM_A_CDC_IIR1_GAIN_B2_CTL, - snd_soc_read(codec, - TOMTOM_A_CDC_IIR1_GAIN_B2_CTL)); - snd_soc_write(codec, TOMTOM_A_CDC_IIR1_GAIN_B3_CTL, - snd_soc_read(codec, - TOMTOM_A_CDC_IIR1_GAIN_B3_CTL)); - snd_soc_write(codec, TOMTOM_A_CDC_IIR1_GAIN_B4_CTL, - snd_soc_read(codec, - TOMTOM_A_CDC_IIR1_GAIN_B4_CTL)); - } else { - snd_soc_write(codec, TOMTOM_A_CDC_IIR2_GAIN_B1_CTL, - snd_soc_read(codec, - TOMTOM_A_CDC_IIR2_GAIN_B1_CTL)); - snd_soc_write(codec, TOMTOM_A_CDC_IIR2_GAIN_B2_CTL, - snd_soc_read(codec, - TOMTOM_A_CDC_IIR2_GAIN_B2_CTL)); - snd_soc_write(codec, TOMTOM_A_CDC_IIR2_GAIN_B3_CTL, - snd_soc_read(codec, - TOMTOM_A_CDC_IIR2_GAIN_B3_CTL)); - snd_soc_write(codec, TOMTOM_A_CDC_IIR2_GAIN_B4_CTL, - snd_soc_read(codec, - TOMTOM_A_CDC_IIR2_GAIN_B4_CTL)); - } - break; - } - return 0; -} - -static int tomtom_codec_dsm_mux_event(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - u8 reg_val, zoh_mux_val = 0x00; - - pr_debug("%s: event = %d\n", __func__, event); - - switch (event) { - case SND_SOC_DAPM_POST_PMU: - reg_val = snd_soc_read(codec, TOMTOM_A_CDC_CONN_CLSH_CTL); - - if ((reg_val & 0x30) == 0x10) - zoh_mux_val = 0x04; - else if ((reg_val & 0x30) == 0x20) - zoh_mux_val = 0x08; - - if (zoh_mux_val != 0x00) - snd_soc_update_bits(codec, - TOMTOM_A_CDC_CONN_CLSH_CTL, - 0x0C, zoh_mux_val); - break; - - case SND_SOC_DAPM_POST_PMD: - snd_soc_update_bits(codec, TOMTOM_A_CDC_CONN_CLSH_CTL, - 0x0C, 0x00); - break; - } - return 0; -} - -static int tomtom_codec_enable_anc_ear(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - int ret = 0; - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - ret = tomtom_codec_enable_anc(w, kcontrol, event); - msleep(50); - snd_soc_update_bits(codec, TOMTOM_A_RX_EAR_EN, 0x10, 0x10); - break; - case SND_SOC_DAPM_POST_PMU: - ret = tomtom_codec_enable_ear_pa(w, kcontrol, event); - break; - case SND_SOC_DAPM_PRE_PMD: - snd_soc_update_bits(codec, TOMTOM_A_RX_EAR_EN, 0x10, 0x00); - msleep(40); - ret |= tomtom_codec_enable_anc(w, kcontrol, event); - break; - case SND_SOC_DAPM_POST_PMD: - ret = tomtom_codec_enable_ear_pa(w, kcontrol, event); - break; - } - return ret; -} - -/* Todo: Have separate dapm widgets for I2S and Slimbus. - * Might Need to have callbacks registered only for slimbus - */ -static const struct snd_soc_dapm_widget tomtom_dapm_widgets[] = { - /*RX stuff */ - SND_SOC_DAPM_OUTPUT("EAR"), - - SND_SOC_DAPM_PGA_E("EAR PA", TOMTOM_A_RX_EAR_EN, 4, 0, NULL, 0, - tomtom_codec_enable_ear_pa, SND_SOC_DAPM_POST_PMU), - - SND_SOC_DAPM_MIXER_E("DAC1", TOMTOM_A_RX_EAR_EN, 6, 0, dac1_switch, - ARRAY_SIZE(dac1_switch), tomtom_codec_ear_dac_event, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_AIF_IN_E("AIF1 PB", "AIF1 Playback", 0, SND_SOC_NOPM, - AIF1_PB, 0, tomtom_codec_enable_slimrx, - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_AIF_IN_E("AIF2 PB", "AIF2 Playback", 0, SND_SOC_NOPM, - AIF2_PB, 0, tomtom_codec_enable_slimrx, - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_AIF_IN_E("AIF3 PB", "AIF3 Playback", 0, SND_SOC_NOPM, - AIF3_PB, 0, tomtom_codec_enable_slimrx, - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_MUX("SLIM RX1 MUX", SND_SOC_NOPM, TOMTOM_RX1, 0, - &slim_rx_mux[TOMTOM_RX1]), - SND_SOC_DAPM_MUX("SLIM RX2 MUX", SND_SOC_NOPM, TOMTOM_RX2, 0, - &slim_rx_mux[TOMTOM_RX2]), - SND_SOC_DAPM_MUX("SLIM RX3 MUX", SND_SOC_NOPM, TOMTOM_RX3, 0, - &slim_rx_mux[TOMTOM_RX3]), - SND_SOC_DAPM_MUX("SLIM RX4 MUX", SND_SOC_NOPM, TOMTOM_RX4, 0, - &slim_rx_mux[TOMTOM_RX4]), - SND_SOC_DAPM_MUX("SLIM RX5 MUX", SND_SOC_NOPM, TOMTOM_RX5, 0, - &slim_rx_mux[TOMTOM_RX5]), - SND_SOC_DAPM_MUX("SLIM RX6 MUX", SND_SOC_NOPM, TOMTOM_RX6, 0, - &slim_rx_mux[TOMTOM_RX6]), - SND_SOC_DAPM_MUX("SLIM RX7 MUX", SND_SOC_NOPM, TOMTOM_RX7, 0, - &slim_rx_mux[TOMTOM_RX7]), - SND_SOC_DAPM_MUX("SLIM RX8 MUX", SND_SOC_NOPM, TOMTOM_RX8, 0, - &slim_rx_mux[TOMTOM_RX8]), - - SND_SOC_DAPM_MIXER("SLIM RX1", SND_SOC_NOPM, 0, 0, NULL, 0), - SND_SOC_DAPM_MIXER("SLIM RX2", SND_SOC_NOPM, 0, 0, NULL, 0), - SND_SOC_DAPM_MIXER("SLIM RX3", SND_SOC_NOPM, 0, 0, NULL, 0), - SND_SOC_DAPM_MIXER("SLIM RX4", SND_SOC_NOPM, 0, 0, NULL, 0), - SND_SOC_DAPM_MIXER("SLIM RX5", SND_SOC_NOPM, 0, 0, NULL, 0), - SND_SOC_DAPM_MIXER("SLIM RX6", SND_SOC_NOPM, 0, 0, NULL, 0), - SND_SOC_DAPM_MIXER("SLIM RX7", SND_SOC_NOPM, 0, 0, NULL, 0), - SND_SOC_DAPM_MIXER("SLIM RX8", SND_SOC_NOPM, 0, 0, NULL, 0), - - /* Headphone */ - SND_SOC_DAPM_OUTPUT("HEADPHONE"), - SND_SOC_DAPM_PGA_E("HPHL", TOMTOM_A_RX_HPH_CNP_EN, 5, 0, NULL, 0, - tomtom_hph_pa_event, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD | - SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_MIXER_E("HPHL DAC", TOMTOM_A_RX_HPH_L_DAC_CTL, 7, 0, - hphl_switch, ARRAY_SIZE(hphl_switch), tomtom_hphl_dac_event, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_PGA_E("HPHR", TOMTOM_A_RX_HPH_CNP_EN, 4, 0, NULL, 0, - tomtom_hph_pa_event, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD | - SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_DAC_E("HPHR DAC", NULL, TOMTOM_A_RX_HPH_R_DAC_CTL, 7, 0, - tomtom_hphr_dac_event, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), - - /* Speaker */ - SND_SOC_DAPM_OUTPUT("LINEOUT1"), - SND_SOC_DAPM_OUTPUT("LINEOUT2"), - SND_SOC_DAPM_OUTPUT("LINEOUT3"), - SND_SOC_DAPM_OUTPUT("LINEOUT4"), - SND_SOC_DAPM_OUTPUT("SPK_OUT"), - - SND_SOC_DAPM_PGA_E("LINEOUT1 PA", TOMTOM_A_RX_LINE_CNP_EN, 0, 0, NULL, - 0, tomtom_codec_enable_lineout, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_PGA_E("LINEOUT2 PA", TOMTOM_A_RX_LINE_CNP_EN, 1, 0, NULL, - 0, tomtom_codec_enable_lineout, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_PGA_E("LINEOUT3 PA", TOMTOM_A_RX_LINE_CNP_EN, 2, 0, NULL, - 0, tomtom_codec_enable_lineout, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_PGA_E("LINEOUT4 PA", TOMTOM_A_RX_LINE_CNP_EN, 3, 0, NULL, - 0, tomtom_codec_enable_lineout, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_PGA_E("SPK PA", SND_SOC_NOPM, 0, 0, NULL, - 0, tomtom_codec_enable_spk_pa, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_PGA_E("SPK2 PA", SND_SOC_NOPM, 0, 0, NULL, - 0, tomtom_codec_enable_spk_pa, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_DAC_E("LINEOUT1 DAC", NULL, TOMTOM_A_RX_LINE_1_DAC_CTL, 7, - 0, tomtom_lineout_dac_event, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_DAC_E("LINEOUT2 DAC", NULL, TOMTOM_A_RX_LINE_2_DAC_CTL, 7, - 0, tomtom_lineout_dac_event, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_DAC_E("LINEOUT3 DAC", NULL, TOMTOM_A_RX_LINE_3_DAC_CTL, 7, - 0, tomtom_lineout_dac_event, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_SWITCH("LINEOUT3 DAC GROUND", SND_SOC_NOPM, 0, 0, - &lineout3_ground_switch), - SND_SOC_DAPM_DAC_E("LINEOUT4 DAC", NULL, TOMTOM_A_RX_LINE_4_DAC_CTL, 7, - 0, tomtom_lineout_dac_event, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_SWITCH("LINEOUT4 DAC GROUND", SND_SOC_NOPM, 0, 0, - &lineout4_ground_switch), - - SND_SOC_DAPM_DAC_E("SPK DAC", NULL, TOMTOM_A_CDC_BOOST_TRGR_EN, 0, 0, - tomtom_spk_dac_event, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_DAC_E("SPK2 DAC", NULL, TOMTOM_A_CDC_BOOST_TRGR_EN, 1, 0, - tomtom_spk_dac_event, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_SUPPLY("VDD_SPKDRV", SND_SOC_NOPM, 0, 0, - tomtom_codec_enable_vdd_spkr, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_SUPPLY("VDD_SPKDRV2", SND_SOC_NOPM, 0, 0, - tomtom_codec_enable_vdd_spkr2, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_MIXER("RX1 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0), - SND_SOC_DAPM_MIXER("RX2 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0), - SND_SOC_DAPM_MIXER("RX7 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0), - - SND_SOC_DAPM_MIXER("RX1 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0), - SND_SOC_DAPM_MIXER("RX2 MIX2", SND_SOC_NOPM, 0, 0, NULL, 0), - - SND_SOC_DAPM_MIXER_E("RX3 MIX1", TOMTOM_A_CDC_CLK_RX_B1_CTL, 2, 0, NULL, - 0, tomtom_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMU), - SND_SOC_DAPM_MIXER_E("RX4 MIX1", TOMTOM_A_CDC_CLK_RX_B1_CTL, 3, 0, NULL, - 0, tomtom_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMU), - SND_SOC_DAPM_MIXER_E("RX5 MIX1", TOMTOM_A_CDC_CLK_RX_B1_CTL, 4, 0, NULL, - 0, tomtom_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMU), - SND_SOC_DAPM_MIXER_E("RX6 MIX1", TOMTOM_A_CDC_CLK_RX_B1_CTL, 5, 0, NULL, - 0, tomtom_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMU), - SND_SOC_DAPM_MIXER_E("RX7 MIX2", TOMTOM_A_CDC_CLK_RX_B1_CTL, 6, 0, NULL, - 0, tomtom_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMU), - SND_SOC_DAPM_MIXER_E("RX8 MIX1", TOMTOM_A_CDC_CLK_RX_B1_CTL, 7, 0, NULL, - 0, tomtom_codec_enable_interpolator, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMU), - - SND_SOC_DAPM_MUX_E("RX1 INTERP", TOMTOM_A_CDC_CLK_RX_B1_CTL, 0, 0, - &rx1_interp_mux, tomtom_codec_enable_interpolator, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), - SND_SOC_DAPM_MUX_E("RX2 INTERP", TOMTOM_A_CDC_CLK_RX_B1_CTL, 1, 0, - &rx2_interp_mux, tomtom_codec_enable_interpolator, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU), - - - SND_SOC_DAPM_MIXER("RX1 CHAIN", TOMTOM_A_CDC_RX1_B6_CTL, 5, 0, NULL, 0), - SND_SOC_DAPM_MIXER("RX2 CHAIN", TOMTOM_A_CDC_RX2_B6_CTL, 5, 0, NULL, 0), - - SND_SOC_DAPM_MUX("RX1 MIX1 INP1", SND_SOC_NOPM, 0, 0, - &rx_mix1_inp1_mux), - SND_SOC_DAPM_MUX("RX1 MIX1 INP2", SND_SOC_NOPM, 0, 0, - &rx_mix1_inp2_mux), - SND_SOC_DAPM_MUX("RX1 MIX1 INP3", SND_SOC_NOPM, 0, 0, - &rx_mix1_inp3_mux), - SND_SOC_DAPM_MUX("RX2 MIX1 INP1", SND_SOC_NOPM, 0, 0, - &rx2_mix1_inp1_mux), - SND_SOC_DAPM_MUX("RX2 MIX1 INP2", SND_SOC_NOPM, 0, 0, - &rx2_mix1_inp2_mux), - SND_SOC_DAPM_MUX("RX3 MIX1 INP1", SND_SOC_NOPM, 0, 0, - &rx3_mix1_inp1_mux), - SND_SOC_DAPM_MUX("RX3 MIX1 INP2", SND_SOC_NOPM, 0, 0, - &rx3_mix1_inp2_mux), - SND_SOC_DAPM_MUX("RX4 MIX1 INP1", SND_SOC_NOPM, 0, 0, - &rx4_mix1_inp1_mux), - SND_SOC_DAPM_MUX("RX4 MIX1 INP2", SND_SOC_NOPM, 0, 0, - &rx4_mix1_inp2_mux), - SND_SOC_DAPM_MUX("RX5 MIX1 INP1", SND_SOC_NOPM, 0, 0, - &rx5_mix1_inp1_mux), - SND_SOC_DAPM_MUX("RX5 MIX1 INP2", SND_SOC_NOPM, 0, 0, - &rx5_mix1_inp2_mux), - SND_SOC_DAPM_MUX("RX6 MIX1 INP1", SND_SOC_NOPM, 0, 0, - &rx6_mix1_inp1_mux), - SND_SOC_DAPM_MUX("RX6 MIX1 INP2", SND_SOC_NOPM, 0, 0, - &rx6_mix1_inp2_mux), - SND_SOC_DAPM_MUX("RX7 MIX1 INP1", SND_SOC_NOPM, 0, 0, - &rx7_mix1_inp1_mux), - SND_SOC_DAPM_MUX("RX7 MIX1 INP2", SND_SOC_NOPM, 0, 0, - &rx7_mix1_inp2_mux), - SND_SOC_DAPM_MUX("RX8 MIX1 INP1", SND_SOC_NOPM, 0, 0, - &rx8_mix1_inp1_mux), - SND_SOC_DAPM_MUX("RX8 MIX1 INP2", SND_SOC_NOPM, 0, 0, - &rx8_mix1_inp2_mux), - SND_SOC_DAPM_MUX("RX1 MIX2 INP1", SND_SOC_NOPM, 0, 0, - &rx1_mix2_inp1_mux), - SND_SOC_DAPM_MUX("RX1 MIX2 INP2", SND_SOC_NOPM, 0, 0, - &rx1_mix2_inp2_mux), - SND_SOC_DAPM_MUX("RX2 MIX2 INP1", SND_SOC_NOPM, 0, 0, - &rx2_mix2_inp1_mux), - SND_SOC_DAPM_MUX("RX2 MIX2 INP2", SND_SOC_NOPM, 0, 0, - &rx2_mix2_inp2_mux), - SND_SOC_DAPM_MUX("RX7 MIX2 INP1", SND_SOC_NOPM, 0, 0, - &rx7_mix2_inp1_mux), - SND_SOC_DAPM_MUX("RX7 MIX2 INP2", SND_SOC_NOPM, 0, 0, - &rx7_mix2_inp2_mux), - - SND_SOC_DAPM_MUX("RDAC5 MUX", SND_SOC_NOPM, 0, 0, - &rx_dac5_mux), - SND_SOC_DAPM_MUX("RDAC7 MUX", SND_SOC_NOPM, 0, 0, - &rx_dac7_mux), - - SND_SOC_DAPM_MUX("MAD_SEL MUX", SND_SOC_NOPM, 0, 0, - &mad_sel_mux), - - SND_SOC_DAPM_MUX_E("CLASS_H_DSM MUX", SND_SOC_NOPM, 0, 0, - &class_h_dsm_mux, tomtom_codec_dsm_mux_event, - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_SUPPLY("RX_BIAS", SND_SOC_NOPM, 0, 0, - tomtom_codec_enable_rx_bias, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_SUPPLY("CDC_I2S_RX_CONN", WCD9XXX_A_CDC_CLK_OTHR_CTL, 5, 0, - NULL, 0), - - /* TX */ - - SND_SOC_DAPM_SUPPLY("CDC_CONN", WCD9XXX_A_CDC_CLK_OTHR_CTL, 2, 0, NULL, - 0), - - SND_SOC_DAPM_SUPPLY("LDO_H", SND_SOC_NOPM, 7, 0, - tomtom_codec_enable_ldo_h, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), - /* - * DAPM 'LDO_H Standalone' is to be powered by mbhc driver after - * acquring codec_resource lock. - * So call __tomtom_codec_enable_ldo_h instead and avoid deadlock. - */ - SND_SOC_DAPM_SUPPLY("LDO_H Standalone", SND_SOC_NOPM, 7, 0, - __tomtom_codec_enable_ldo_h, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_SUPPLY("COMP0_CLK", SND_SOC_NOPM, 0, 0, - tomtom_config_compander, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_PRE_PMD), - SND_SOC_DAPM_SUPPLY("COMP1_CLK", SND_SOC_NOPM, 1, 0, - tomtom_config_compander, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_PRE_PMD), - SND_SOC_DAPM_SUPPLY("COMP2_CLK", SND_SOC_NOPM, 2, 0, - tomtom_config_compander, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_PRE_PMD), - - - SND_SOC_DAPM_INPUT("AMIC1"), - SND_SOC_DAPM_MICBIAS_E("MIC BIAS1 External", SND_SOC_NOPM, 7, 0, - tomtom_codec_enable_micbias, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_MICBIAS_E("MIC BIAS1 Internal1", SND_SOC_NOPM, 7, 0, - tomtom_codec_enable_micbias, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_MICBIAS_E("MIC BIAS1 Internal2", SND_SOC_NOPM, 7, 0, - tomtom_codec_enable_micbias, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_INPUT("AMIC3"), - - SND_SOC_DAPM_INPUT("AMIC4"), - - SND_SOC_DAPM_INPUT("AMIC5"), - - SND_SOC_DAPM_INPUT("AMIC6"), - - SND_SOC_DAPM_MUX_E("DEC1 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 0, 0, - &dec1_mux, tomtom_codec_enable_dec, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_MUX_E("DEC2 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 1, 0, - &dec2_mux, tomtom_codec_enable_dec, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_MUX_E("DEC3 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 2, 0, - &dec3_mux, tomtom_codec_enable_dec, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_MUX_E("DEC4 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 3, 0, - &dec4_mux, tomtom_codec_enable_dec, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_MUX_E("DEC5 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 4, 0, - &dec5_mux, tomtom_codec_enable_dec, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_MUX_E("DEC6 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 5, 0, - &dec6_mux, tomtom_codec_enable_dec, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_MUX_E("DEC7 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 6, 0, - &dec7_mux, tomtom_codec_enable_dec, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_MUX_E("DEC8 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, 7, 0, - &dec8_mux, tomtom_codec_enable_dec, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_MUX_E("DEC9 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL, 0, 0, - &dec9_mux, tomtom_codec_enable_dec, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_MUX_E("DEC10 MUX", TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL, 1, 0, - &dec10_mux, tomtom_codec_enable_dec, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_MUX("ANC1 MUX", SND_SOC_NOPM, 0, 0, &anc1_mux), - SND_SOC_DAPM_MUX("ANC2 MUX", SND_SOC_NOPM, 0, 0, &anc2_mux), - - SND_SOC_DAPM_OUTPUT("ANC HEADPHONE"), - SND_SOC_DAPM_PGA_E("ANC HPHL", SND_SOC_NOPM, 5, 0, NULL, 0, - tomtom_codec_enable_anc_hph, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD | - SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_POST_PMU), - SND_SOC_DAPM_PGA_E("ANC HPHR", SND_SOC_NOPM, 4, 0, NULL, 0, - tomtom_codec_enable_anc_hph, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD | - SND_SOC_DAPM_POST_PMU), - SND_SOC_DAPM_OUTPUT("ANC EAR"), - SND_SOC_DAPM_PGA_E("ANC EAR PA", SND_SOC_NOPM, 0, 0, NULL, 0, - tomtom_codec_enable_anc_ear, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD | - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_MUX("ANC1 FB MUX", SND_SOC_NOPM, 0, 0, &anc1_fb_mux), - - SND_SOC_DAPM_INPUT("AMIC2"), - SND_SOC_DAPM_MICBIAS_E(DAPM_MICBIAS2_EXTERNAL_STANDALONE, SND_SOC_NOPM, - 7, 0, tomtom_codec_enable_micbias, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 External", SND_SOC_NOPM, 7, 0, - tomtom_codec_enable_micbias, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 Internal1", SND_SOC_NOPM, 7, 0, - tomtom_codec_enable_micbias, - SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 Internal2", SND_SOC_NOPM, 7, 0, - tomtom_codec_enable_micbias, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_MICBIAS_E("MIC BIAS2 Internal3", SND_SOC_NOPM, 7, 0, - tomtom_codec_enable_micbias, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_MICBIAS_E("MIC BIAS3 External", SND_SOC_NOPM, 7, 0, - tomtom_codec_enable_micbias, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_MICBIAS_E("MIC BIAS3 Internal1", SND_SOC_NOPM, 7, 0, - tomtom_codec_enable_micbias, - SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_MICBIAS_E("MIC BIAS3 Internal2", SND_SOC_NOPM, 7, 0, - tomtom_codec_enable_micbias, - SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_MICBIAS_E("MIC BIAS4 External", SND_SOC_NOPM, 7, - 0, tomtom_codec_enable_micbias, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_AIF_OUT_E("AIF1 CAP", "AIF1 Capture", 0, SND_SOC_NOPM, - AIF1_CAP, 0, tomtom_codec_enable_slimtx, - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_AIF_OUT_E("AIF2 CAP", "AIF2 Capture", 0, SND_SOC_NOPM, - AIF2_CAP, 0, tomtom_codec_enable_slimtx, - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_AIF_OUT_E("AIF3 CAP", "AIF3 Capture", 0, SND_SOC_NOPM, - AIF3_CAP, 0, tomtom_codec_enable_slimtx, - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_AIF_OUT_E("AIF4 VI", "VIfeed", 0, SND_SOC_NOPM, - AIF4_VIFEED, 0, tomtom_codec_enable_slimvi_feedback, - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_AIF_OUT_E("AIF4 MAD", "AIF4 MAD TX", 0, - SND_SOC_NOPM, 0, 0, - tomtom_codec_enable_mad, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_SWITCH("MADONOFF", SND_SOC_NOPM, 0, 0, - &aif4_mad_switch), - SND_SOC_DAPM_INPUT("MADINPUT"), - SND_SOC_DAPM_INPUT("MAD_CPE_INPUT"), - - SND_SOC_DAPM_MIXER("AIF1_CAP Mixer", SND_SOC_NOPM, AIF1_CAP, 0, - aif1_cap_mixer, ARRAY_SIZE(aif1_cap_mixer)), - - SND_SOC_DAPM_MIXER("AIF2_CAP Mixer", SND_SOC_NOPM, AIF2_CAP, 0, - aif2_cap_mixer, ARRAY_SIZE(aif2_cap_mixer)), - - SND_SOC_DAPM_MIXER("AIF3_CAP Mixer", SND_SOC_NOPM, AIF3_CAP, 0, - aif3_cap_mixer, ARRAY_SIZE(aif3_cap_mixer)), - - SND_SOC_DAPM_MUX("SLIM TX1 MUX", SND_SOC_NOPM, TOMTOM_TX1, 0, - &sb_tx1_mux), - SND_SOC_DAPM_MUX("SLIM TX2 MUX", SND_SOC_NOPM, TOMTOM_TX2, 0, - &sb_tx2_mux), - SND_SOC_DAPM_MUX("SLIM TX3 MUX", SND_SOC_NOPM, TOMTOM_TX3, 0, - &sb_tx3_mux), - SND_SOC_DAPM_MUX("SLIM TX4 MUX", SND_SOC_NOPM, TOMTOM_TX4, 0, - &sb_tx4_mux), - SND_SOC_DAPM_MUX("SLIM TX5 MUX", SND_SOC_NOPM, TOMTOM_TX5, 0, - &sb_tx5_mux), - SND_SOC_DAPM_MUX("SLIM TX6 MUX", SND_SOC_NOPM, TOMTOM_TX6, 0, - &sb_tx6_mux), - SND_SOC_DAPM_MUX("SLIM TX7 MUX", SND_SOC_NOPM, TOMTOM_TX7, 0, - &sb_tx7_mux), - SND_SOC_DAPM_MUX("SLIM TX8 MUX", SND_SOC_NOPM, TOMTOM_TX8, 0, - &sb_tx8_mux), - SND_SOC_DAPM_MUX("SLIM TX9 MUX", SND_SOC_NOPM, TOMTOM_TX9, 0, - &sb_tx9_mux), - SND_SOC_DAPM_MUX("SLIM TX10 MUX", SND_SOC_NOPM, TOMTOM_TX10, 0, - &sb_tx10_mux), - - /* Digital Mic Inputs */ - SND_SOC_DAPM_ADC_E("DMIC1", NULL, SND_SOC_NOPM, 0, 0, - tomtom_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_ADC_E("DMIC2", NULL, SND_SOC_NOPM, 0, 0, - tomtom_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_ADC_E("DMIC3", NULL, SND_SOC_NOPM, 0, 0, - tomtom_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_ADC_E("DMIC4", NULL, SND_SOC_NOPM, 0, 0, - tomtom_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_ADC_E("DMIC5", NULL, SND_SOC_NOPM, 0, 0, - tomtom_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_ADC_E("DMIC6", NULL, SND_SOC_NOPM, 0, 0, - tomtom_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMD), - - /* Sidetone */ - SND_SOC_DAPM_MUX("IIR1 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp1_mux), - - SND_SOC_DAPM_MUX("IIR1 INP2 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp2_mux), - - SND_SOC_DAPM_MUX("IIR1 INP3 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp3_mux), - - SND_SOC_DAPM_MUX("IIR1 INP4 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp4_mux), - - SND_SOC_DAPM_MIXER_E("IIR1", TOMTOM_A_CDC_CLK_SD_CTL, 0, 0, NULL, 0, - tomtom_codec_set_iir_gain, SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_PRE_PMD), - - SND_SOC_DAPM_MUX("IIR2 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir2_inp1_mux), - - SND_SOC_DAPM_MUX("IIR2 INP2 MUX", SND_SOC_NOPM, 0, 0, &iir2_inp2_mux), - - SND_SOC_DAPM_MUX("IIR2 INP3 MUX", SND_SOC_NOPM, 0, 0, &iir2_inp3_mux), - - SND_SOC_DAPM_MUX("IIR2 INP4 MUX", SND_SOC_NOPM, 0, 0, &iir2_inp4_mux), - - SND_SOC_DAPM_MIXER_E("IIR2", TOMTOM_A_CDC_CLK_SD_CTL, 1, 0, NULL, 0, - tomtom_codec_set_iir_gain, SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_PRE_PMD), - - /* AUX PGA */ - SND_SOC_DAPM_ADC_E("AUX_PGA_Left", NULL, TOMTOM_A_RX_AUX_SW_CTL, 7, 0, - tomtom_codec_enable_aux_pga, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMD), - - SND_SOC_DAPM_ADC_E("AUX_PGA_Right", NULL, TOMTOM_A_RX_AUX_SW_CTL, 6, 0, - tomtom_codec_enable_aux_pga, SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMD), - - /* Lineout, ear and HPH PA Mixers */ - - SND_SOC_DAPM_MIXER("EAR_PA_MIXER", SND_SOC_NOPM, 0, 0, - ear_pa_mix, ARRAY_SIZE(ear_pa_mix)), - - SND_SOC_DAPM_MIXER("HPHL_PA_MIXER", SND_SOC_NOPM, 0, 0, - hphl_pa_mix, ARRAY_SIZE(hphl_pa_mix)), - - SND_SOC_DAPM_MIXER("HPHR_PA_MIXER", SND_SOC_NOPM, 0, 0, - hphr_pa_mix, ARRAY_SIZE(hphr_pa_mix)), - - SND_SOC_DAPM_MIXER("LINEOUT1_PA_MIXER", SND_SOC_NOPM, 0, 0, - lineout1_pa_mix, ARRAY_SIZE(lineout1_pa_mix)), - - SND_SOC_DAPM_MIXER("LINEOUT2_PA_MIXER", SND_SOC_NOPM, 0, 0, - lineout2_pa_mix, ARRAY_SIZE(lineout2_pa_mix)), - - SND_SOC_DAPM_MIXER("LINEOUT3_PA_MIXER", SND_SOC_NOPM, 0, 0, - lineout3_pa_mix, ARRAY_SIZE(lineout3_pa_mix)), - - SND_SOC_DAPM_MIXER("LINEOUT4_PA_MIXER", SND_SOC_NOPM, 0, 0, - lineout4_pa_mix, ARRAY_SIZE(lineout4_pa_mix)), - - SND_SOC_DAPM_SWITCH("VIONOFF", SND_SOC_NOPM, 0, 0, - &aif4_vi_switch), - - SND_SOC_DAPM_INPUT("VIINPUT"), -}; - -static irqreturn_t tomtom_slimbus_irq(int irq, void *data) -{ - struct tomtom_priv *priv = data; - struct snd_soc_codec *codec = priv->codec; - struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent); - unsigned long status = 0; - int i, j, port_id, k; - u32 bit; - u8 val, int_val = 0; - bool tx, cleared; - unsigned short reg = 0; - - for (i = TOMTOM_SLIM_PGD_PORT_INT_STATUS_RX_0, j = 0; - i <= TOMTOM_SLIM_PGD_PORT_INT_STATUS_TX_1; i++, j++) { - val = wcd9xxx_interface_reg_read(wcd9xxx, i); - status |= ((u32)val << (8 * j)); - } - - for_each_set_bit(j, &status, 32) { - tx = (j >= 16 ? true : false); - port_id = (tx ? j - 16 : j); - val = wcd9xxx_interface_reg_read(wcd9xxx, - TOMTOM_SLIM_PGD_PORT_INT_RX_SOURCE0 + j); - if (val) { - if (!tx) - reg = TOMTOM_SLIM_PGD_PORT_INT_EN0 + - (port_id / 8); - else - reg = TOMTOM_SLIM_PGD_PORT_INT_TX_EN0 + - (port_id / 8); - int_val = wcd9xxx_interface_reg_read( - wcd9xxx, reg); - /* - * Ignore interrupts for ports for which the - * interrupts are not specifically enabled. - */ - if (!(int_val & (1 << (port_id % 8)))) - continue; - } - if (val & TOMTOM_SLIM_IRQ_OVERFLOW) - pr_err_ratelimited( - "%s: overflow error on %s port %d, value %x\n", - __func__, (tx ? "TX" : "RX"), port_id, val); - if (val & TOMTOM_SLIM_IRQ_UNDERFLOW) - pr_err_ratelimited( - "%s: underflow error on %s port %d, value %x\n", - __func__, (tx ? "TX" : "RX"), port_id, val); - if ((val & TOMTOM_SLIM_IRQ_OVERFLOW) || - (val & TOMTOM_SLIM_IRQ_UNDERFLOW)) { - if (!tx) - reg = TOMTOM_SLIM_PGD_PORT_INT_EN0 + - (port_id / 8); - else - reg = TOMTOM_SLIM_PGD_PORT_INT_TX_EN0 + - (port_id / 8); - int_val = wcd9xxx_interface_reg_read(wcd9xxx, reg); - if (int_val & (1 << (port_id % 8))) { - int_val = int_val ^ (1 << (port_id % 8)); - wcd9xxx_interface_reg_write(wcd9xxx, reg, - int_val); - } - } - if (val & TOMTOM_SLIM_IRQ_PORT_CLOSED) { - /* - * INT SOURCE register starts from RX to TX - * but port number in the ch_mask is in opposite way - */ - bit = (tx ? j - 16 : j + 16); - pr_debug("%s: %s port %d closed value %x, bit %u\n", - __func__, (tx ? "TX" : "RX"), port_id, val, - bit); - for (k = 0, cleared = false; k < NUM_CODEC_DAIS; k++) { - pr_debug("%s: priv->dai[%d].ch_mask = 0x%lx\n", - __func__, k, priv->dai[k].ch_mask); - if (test_and_clear_bit(bit, - &priv->dai[k].ch_mask)) { - cleared = true; - if (!priv->dai[k].ch_mask) - wake_up(&priv->dai[k].dai_wait); - /* - * There are cases when multiple DAIs - * might be using the same slimbus - * channel. Hence don't break here. - */ - } - } - WARN(!cleared, - "Couldn't find slimbus %s port %d for closing\n", - (tx ? "TX" : "RX"), port_id); - } - wcd9xxx_interface_reg_write(wcd9xxx, - TOMTOM_SLIM_PGD_PORT_INT_CLR_RX_0 + - (j / 8), - 1 << (j % 8)); - } - - return IRQ_HANDLED; -} - -static int tomtom_handle_pdata(struct tomtom_priv *tomtom) -{ - struct snd_soc_codec *codec = tomtom->codec; - struct wcd9xxx_pdata *pdata = tomtom->resmgr.pdata; - int k1, k2, k3, dec, rc = 0; - u8 leg_mode, txfe_bypass, txfe_buff, flag; - u8 i = 0, j = 0; - u8 val_txfe = 0, value = 0; - u8 dmic_ctl_val, mad_dmic_ctl_val; - u8 anc_ctl_value = 0; - u32 def_dmic_rate; - u16 tx_dmic_ctl_reg; - - if (!pdata) { - pr_err("%s: NULL pdata\n", __func__); - rc = -ENODEV; - goto done; - } - - leg_mode = pdata->amic_settings.legacy_mode; - txfe_bypass = pdata->amic_settings.txfe_enable; - txfe_buff = pdata->amic_settings.txfe_buff; - flag = pdata->amic_settings.use_pdata; - - /* Make sure settings are correct */ - if ((pdata->micbias.ldoh_v > WCD9XXX_LDOH_3P0_V) || - (pdata->micbias.bias1_cfilt_sel > WCD9XXX_CFILT3_SEL) || - (pdata->micbias.bias2_cfilt_sel > WCD9XXX_CFILT3_SEL) || - (pdata->micbias.bias3_cfilt_sel > WCD9XXX_CFILT3_SEL) || - (pdata->micbias.bias4_cfilt_sel > WCD9XXX_CFILT3_SEL)) { - rc = -EINVAL; - goto done; - } - /* figure out k value */ - k1 = wcd9xxx_resmgr_get_k_val(&tomtom->resmgr, - pdata->micbias.cfilt1_mv); - k2 = wcd9xxx_resmgr_get_k_val(&tomtom->resmgr, - pdata->micbias.cfilt2_mv); - k3 = wcd9xxx_resmgr_get_k_val(&tomtom->resmgr, - pdata->micbias.cfilt3_mv); - if (k1 < 0 || k2 < 0 || k3 < 0) { - rc = -EINVAL; - goto done; - } - /* Set voltage level and always use LDO */ - snd_soc_update_bits(codec, TOMTOM_A_LDO_H_MODE_1, 0x0C, - (pdata->micbias.ldoh_v << 2)); - - snd_soc_update_bits(codec, TOMTOM_A_MICB_CFILT_1_VAL, 0xFC, (k1 << 2)); - snd_soc_update_bits(codec, TOMTOM_A_MICB_CFILT_2_VAL, 0xFC, (k2 << 2)); - snd_soc_update_bits(codec, TOMTOM_A_MICB_CFILT_3_VAL, 0xFC, (k3 << 2)); - - snd_soc_update_bits(codec, TOMTOM_A_MICB_1_CTL, 0x60, - (pdata->micbias.bias1_cfilt_sel << 5)); - snd_soc_update_bits(codec, TOMTOM_A_MICB_2_CTL, 0x60, - (pdata->micbias.bias2_cfilt_sel << 5)); - snd_soc_update_bits(codec, TOMTOM_A_MICB_3_CTL, 0x60, - (pdata->micbias.bias3_cfilt_sel << 5)); - snd_soc_update_bits(codec, tomtom->resmgr.reg_addr->micb_4_ctl, 0x60, - (pdata->micbias.bias4_cfilt_sel << 5)); - - for (i = 0; i < 6; j++, i += 2) { - if (flag & (0x01 << i)) { - val_txfe = (txfe_bypass & (0x01 << i)) ? 0x20 : 0x00; - val_txfe = val_txfe | - ((txfe_buff & (0x01 << i)) ? 0x10 : 0x00); - snd_soc_update_bits(codec, - TOMTOM_A_TX_1_2_TEST_EN + j * 10, - 0x30, val_txfe); - } - if (flag & (0x01 << (i + 1))) { - val_txfe = (txfe_bypass & - (0x01 << (i + 1))) ? 0x02 : 0x00; - val_txfe |= (txfe_buff & - (0x01 << (i + 1))) ? 0x01 : 0x00; - snd_soc_update_bits(codec, - TOMTOM_A_TX_1_2_TEST_EN + j * 10, - 0x03, val_txfe); - } - } - if (flag & 0x40) { - value = (leg_mode & 0x40) ? 0x10 : 0x00; - value = value | ((txfe_bypass & 0x40) ? 0x02 : 0x00); - value = value | ((txfe_buff & 0x40) ? 0x01 : 0x00); - snd_soc_update_bits(codec, TOMTOM_A_TX_7_MBHC_EN, - 0x13, value); - } - - if (pdata->ocp.use_pdata) { - /* not defined in CODEC specification */ - if (pdata->ocp.hph_ocp_limit == 1 || - pdata->ocp.hph_ocp_limit == 5) { - rc = -EINVAL; - goto done; - } - snd_soc_update_bits(codec, TOMTOM_A_RX_COM_OCP_CTL, - 0x0F, pdata->ocp.num_attempts); - snd_soc_write(codec, TOMTOM_A_RX_COM_OCP_COUNT, - ((pdata->ocp.run_time << 4) | pdata->ocp.wait_time)); - snd_soc_update_bits(codec, TOMTOM_A_RX_HPH_OCP_CTL, - 0xE0, (pdata->ocp.hph_ocp_limit << 5)); - } - - for (i = 0; i < ARRAY_SIZE(pdata->regulator); i++) { - if (pdata->regulator[i].name && - !strcmp(pdata->regulator[i].name, "CDC_VDDA_RX")) { - if (pdata->regulator[i].min_uV == 1800000 && - pdata->regulator[i].max_uV == 1800000) { - snd_soc_write(codec, TOMTOM_A_BIAS_REF_CTL, - 0x1C); - } else if (pdata->regulator[i].min_uV == 2200000 && - pdata->regulator[i].max_uV == 2200000) { - snd_soc_write(codec, TOMTOM_A_BIAS_REF_CTL, - 0x1E); - } else { - pr_err("%s: unsupported CDC_VDDA_RX voltage\n" - "min %d, max %d\n", __func__, - pdata->regulator[i].min_uV, - pdata->regulator[i].max_uV); - rc = -EINVAL; - } - break; - } - } - - /* Set micbias capless mode with tail current */ - value = (pdata->micbias.bias1_cap_mode == MICBIAS_EXT_BYP_CAP ? - 0x00 : 0x16); - snd_soc_update_bits(codec, TOMTOM_A_MICB_1_CTL, 0x1E, value); - value = (pdata->micbias.bias2_cap_mode == MICBIAS_EXT_BYP_CAP ? - 0x00 : 0x16); - snd_soc_update_bits(codec, TOMTOM_A_MICB_2_CTL, 0x1E, value); - value = (pdata->micbias.bias3_cap_mode == MICBIAS_EXT_BYP_CAP ? - 0x00 : 0x16); - snd_soc_update_bits(codec, TOMTOM_A_MICB_3_CTL, 0x1E, value); - value = (pdata->micbias.bias4_cap_mode == MICBIAS_EXT_BYP_CAP ? - 0x00 : 0x16); - snd_soc_update_bits(codec, TOMTOM_A_MICB_4_CTL, 0x1E, value); - - /* Set the DMIC sample rate */ - switch (pdata->mclk_rate) { - case TOMTOM_MCLK_CLK_9P6MHZ: - def_dmic_rate = - WCD9XXX_DMIC_SAMPLE_RATE_4P8MHZ; - break; - case TOMTOM_MCLK_CLK_12P288MHZ: - def_dmic_rate = - WCD9XXX_DMIC_SAMPLE_RATE_4P096MHZ; - break; - default: - /* should never happen */ - pr_err("%s: Invalid mclk_rate %d\n", - __func__, pdata->mclk_rate); - rc = -EINVAL; - goto done; - } - - if (pdata->dmic_sample_rate == - WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED) { - pr_info("%s: dmic_rate invalid default = %d\n", - __func__, def_dmic_rate); - pdata->dmic_sample_rate = def_dmic_rate; - } - - if (pdata->mad_dmic_sample_rate == - WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED) { - pr_info("%s: mad_dmic_rate invalid default = %d\n", - __func__, def_dmic_rate); - /* - * use dmic_sample_rate as the default for MAD - * if mad dmic sample rate is undefined - */ - pdata->mad_dmic_sample_rate = pdata->dmic_sample_rate; - } - - /* - * Default the DMIC clk rates to mad_dmic_sample_rate, - * whereas, the anc/txfe dmic rates to dmic_sample_rate - * since the anc/txfe are independent of mad block. - */ - mad_dmic_ctl_val = tomtom_get_dmic_clk_val(tomtom->codec, - pdata->mclk_rate, - pdata->mad_dmic_sample_rate); - snd_soc_update_bits(codec, TOMTOM_A_DMIC_B1_CTL, - 0xE0, mad_dmic_ctl_val << 5); - snd_soc_update_bits(codec, TOMTOM_A_DMIC_B2_CTL, - 0x70, mad_dmic_ctl_val << 4); - snd_soc_update_bits(codec, TOMTOM_A_DMIC_B2_CTL, - 0x0E, mad_dmic_ctl_val << 1); - - dmic_ctl_val = tomtom_get_dmic_clk_val(tomtom->codec, - pdata->mclk_rate, - pdata->dmic_sample_rate); - - if (dmic_ctl_val == WCD9330_DMIC_CLK_DIV_2) - anc_ctl_value = WCD9XXX_ANC_DMIC_X2_ON; - else - anc_ctl_value = WCD9XXX_ANC_DMIC_X2_OFF; - - for (dec = 0; dec < NUM_DECIMATORS; dec++) { - tx_dmic_ctl_reg = - TOMTOM_A_CDC_TX1_DMIC_CTL + (8 * dec); - snd_soc_update_bits(codec, tx_dmic_ctl_reg, - 0x07, dmic_ctl_val); - } - snd_soc_update_bits(codec, TOMTOM_A_CDC_ANC1_B2_CTL, - 0x1, anc_ctl_value); - snd_soc_update_bits(codec, TOMTOM_A_CDC_ANC2_B2_CTL, - 0x1, anc_ctl_value); -done: - return rc; -} - -static const struct wcd9xxx_reg_mask_val tomtom_reg_defaults[] = { - - /* set MCLk to 9.6 */ - TOMTOM_REG_VAL(TOMTOM_A_CHIP_CTL, 0x02), - - /* EAR PA deafults */ - TOMTOM_REG_VAL(TOMTOM_A_RX_EAR_CMBUFF, 0x05), - - /* RX deafults */ - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX1_B5_CTL, 0x79), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX2_B5_CTL, 0x79), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX3_B5_CTL, 0x79), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX4_B5_CTL, 0x79), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX5_B5_CTL, 0x79), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX6_B5_CTL, 0x79), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX7_B5_CTL, 0x79), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX8_B5_CTL, 0x79), - - /* RX1 and RX2 defaults */ - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX1_B6_CTL, 0xA0), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX2_B6_CTL, 0xA0), - - /* RX3 to RX7 defaults */ - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX3_B6_CTL, 0x80), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX4_B6_CTL, 0x80), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX5_B6_CTL, 0x80), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX6_B6_CTL, 0x80), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX7_B6_CTL, 0x80), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX8_B6_CTL, 0x80), - - /* MAD registers */ - TOMTOM_REG_VAL(TOMTOM_A_MAD_ANA_CTRL, 0xF1), - TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_MAIN_CTL_1, 0x00), - TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_MAIN_CTL_2, 0x00), - TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_1, 0x00), - /* Set SAMPLE_TX_EN bit */ - TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_2, 0x03), - TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_3, 0x00), - TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_4, 0x00), - TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_5, 0x00), - TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_6, 0x00), - TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_7, 0x00), - TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_CTL_8, 0x00), - TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR, 0x00), - TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL, 0x40), - TOMTOM_REG_VAL(TOMTOM_A_CDC_DEBUG_B7_CTL, 0x00), - TOMTOM_REG_VAL(TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL, 0x00), - TOMTOM_REG_VAL(TOMTOM_A_CDC_CLK_OTHR_CTL, 0x00), - TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_INP_SEL, 0x01), - - /* Set HPH Path to low power mode */ - TOMTOM_REG_VAL(TOMTOM_A_RX_HPH_BIAS_PA, 0x57), - - /* BUCK default */ - TOMTOM_REG_VAL(TOMTOM_A_BUCK_CTRL_CCL_4, 0x51), - TOMTOM_REG_VAL(TOMTOM_A_BUCK_CTRL_CCL_1, 0x5B), -}; - -/* - * Don't update TOMTOM_A_CHIP_CTL, TOMTOM_A_BUCK_CTRL_CCL_1 and - * TOMTOM_A_RX_EAR_CMBUFF as those are updated in tomtom_reg_defaults - */ -static const struct wcd9xxx_reg_mask_val tomtom_1_0_reg_defaults[] = { - TOMTOM_REG_VAL(TOMTOM_A_TX_1_GAIN, 0x2), - TOMTOM_REG_VAL(TOMTOM_A_TX_2_GAIN, 0x2), - TOMTOM_REG_VAL(TOMTOM_A_TX_1_2_ADC_IB, 0x44), - TOMTOM_REG_VAL(TOMTOM_A_TX_3_GAIN, 0x2), - TOMTOM_REG_VAL(TOMTOM_A_TX_4_GAIN, 0x2), - TOMTOM_REG_VAL(TOMTOM_A_TX_3_4_ADC_IB, 0x44), - TOMTOM_REG_VAL(TOMTOM_A_TX_5_GAIN, 0x2), - TOMTOM_REG_VAL(TOMTOM_A_TX_6_GAIN, 0x2), - TOMTOM_REG_VAL(TOMTOM_A_TX_5_6_ADC_IB, 0x44), - TOMTOM_REG_VAL(WCD9XXX_A_BUCK_MODE_3, 0xCE), - TOMTOM_REG_VAL(WCD9XXX_A_BUCK_CTRL_VCL_1, 0x8), - TOMTOM_REG_VAL(TOMTOM_A_BUCK_CTRL_CCL_4, 0x51), - TOMTOM_REG_VAL(TOMTOM_A_NCP_DTEST, 0x10), - TOMTOM_REG_VAL(TOMTOM_A_RX_HPH_CHOP_CTL, 0xA4), - TOMTOM_REG_VAL(TOMTOM_A_RX_HPH_OCP_CTL, 0x69), - TOMTOM_REG_VAL(TOMTOM_A_RX_HPH_CNP_WG_CTL, 0xDA), - TOMTOM_REG_VAL(TOMTOM_A_RX_HPH_CNP_WG_TIME, 0x15), - TOMTOM_REG_VAL(TOMTOM_A_RX_EAR_BIAS_PA, 0x76), - TOMTOM_REG_VAL(TOMTOM_A_RX_EAR_CNP, 0xC0), - TOMTOM_REG_VAL(TOMTOM_A_RX_LINE_BIAS_PA, 0x78), - TOMTOM_REG_VAL(TOMTOM_A_RX_LINE_1_TEST, 0x2), - TOMTOM_REG_VAL(TOMTOM_A_RX_LINE_2_TEST, 0x2), - TOMTOM_REG_VAL(TOMTOM_A_RX_LINE_3_TEST, 0x2), - TOMTOM_REG_VAL(TOMTOM_A_RX_LINE_4_TEST, 0x2), - TOMTOM_REG_VAL(TOMTOM_A_SPKR_DRV1_OCP_CTL, 0x97), - TOMTOM_REG_VAL(TOMTOM_A_SPKR_DRV1_CLIP_DET, 0x1), - TOMTOM_REG_VAL(TOMTOM_A_SPKR_DRV1_IEC, 0x0), - TOMTOM_REG_VAL(TOMTOM_A_SPKR_DRV2_OCP_CTL, 0x97), - TOMTOM_REG_VAL(TOMTOM_A_SPKR_DRV2_CLIP_DET, 0x1), - TOMTOM_REG_VAL(TOMTOM_A_CDC_TX1_MUX_CTL, 0x4A), - TOMTOM_REG_VAL(TOMTOM_A_CDC_TX2_MUX_CTL, 0x4A), - TOMTOM_REG_VAL(TOMTOM_A_CDC_TX3_MUX_CTL, 0x4A), - TOMTOM_REG_VAL(TOMTOM_A_CDC_TX4_MUX_CTL, 0x4A), - TOMTOM_REG_VAL(TOMTOM_A_CDC_TX5_MUX_CTL, 0x4A), - TOMTOM_REG_VAL(TOMTOM_A_CDC_TX6_MUX_CTL, 0x4A), - TOMTOM_REG_VAL(TOMTOM_A_CDC_TX7_MUX_CTL, 0x4A), - TOMTOM_REG_VAL(TOMTOM_A_CDC_TX8_MUX_CTL, 0x4A), - TOMTOM_REG_VAL(TOMTOM_A_CDC_TX9_MUX_CTL, 0x4A), - TOMTOM_REG_VAL(TOMTOM_A_CDC_TX10_MUX_CTL, 0x4A), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX1_B4_CTL, 0xB), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX2_B4_CTL, 0xB), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX3_B4_CTL, 0xB), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX4_B4_CTL, 0xB), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX5_B4_CTL, 0xB), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX6_B4_CTL, 0xB), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX7_B4_CTL, 0xB), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX8_B4_CTL, 0xB), - TOMTOM_REG_VAL(TOMTOM_A_CDC_VBAT_GAIN_UPD_MON, 0x0), - TOMTOM_REG_VAL(TOMTOM_A_CDC_PA_RAMP_B1_CTL, 0x0), - TOMTOM_REG_VAL(TOMTOM_A_CDC_PA_RAMP_B2_CTL, 0x0), - TOMTOM_REG_VAL(TOMTOM_A_CDC_PA_RAMP_B3_CTL, 0x0), - TOMTOM_REG_VAL(TOMTOM_A_CDC_PA_RAMP_B4_CTL, 0x0), - TOMTOM_REG_VAL(TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL, 0x0), - TOMTOM_REG_VAL(TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL, 0x0), - TOMTOM_REG_VAL(TOMTOM_A_CDC_COMP0_B4_CTL, 0x37), - TOMTOM_REG_VAL(TOMTOM_A_CDC_COMP0_B5_CTL, 0x7f), - TOMTOM_REG_VAL(TOMTOM_A_CDC_COMP0_B5_CTL, 0x7f), -}; - -static const struct wcd9xxx_reg_mask_val tomtom_2_0_reg_defaults[] = { - TOMTOM_REG_VAL(TOMTOM_A_CDC_MAD_MAIN_CTL_2, 0x32), - TOMTOM_REG_VAL(TOMTOM_A_RCO_CTRL, 0x10), - TOMTOM_REG_VAL(TOMTOM_A_RX_HPH_L_TEST, 0x0A), - TOMTOM_REG_VAL(TOMTOM_A_RX_HPH_R_TEST, 0x0A), - TOMTOM_REG_VAL(TOMTOM_A_PIN_CTL_OE0, 0xC3), - TOMTOM_REG_VAL(TOMTOM_A_PIN_CTL_DATA0, 0x00), - TOMTOM_REG_VAL(TOMTOM_A_CDC_TX_I2S_SCK_MODE, 0x04), - TOMTOM_REG_VAL(TOMTOM_A_CDC_TX_I2S_WS_MODE, 0x04), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX_I2S_SCK_MODE, 0x04), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX_I2S_WS_MODE, 0x04), - TOMTOM_REG_VAL(TOMTOM_A_PIN_CTL_OE1, 0xE0), - TOMTOM_REG_VAL(TOMTOM_A_PIN_CTL_OE2, 0x03), - TOMTOM_REG_VAL(TOMTOM_A_CDC_JTCK_MODE, 0x04), - TOMTOM_REG_VAL(TOMTOM_A_CDC_JTDI_MODE, 0x04), - TOMTOM_REG_VAL(TOMTOM_A_CDC_JTMS_MODE, 0x04), - TOMTOM_REG_VAL(TOMTOM_A_CDC_JTDO_MODE, 0x04), - TOMTOM_REG_VAL(TOMTOM_A_CDC_JTRST_MODE, 0x04), -}; - -static const struct wcd9xxx_reg_mask_val tomtom_2_0_reg_i2c_defaults[] = { - TOMTOM_REG_VAL(TOMTOM_A_PIN_CTL_OE0, 0x00), - TOMTOM_REG_VAL(TOMTOM_A_CDC_TX_I2S_SCK_MODE, 0x0), - TOMTOM_REG_VAL(TOMTOM_A_CDC_TX_I2S_WS_MODE, 0x0), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX_I2S_SCK_MODE, 0x0), - TOMTOM_REG_VAL(TOMTOM_A_CDC_RX_I2S_WS_MODE, 0x0), - TOMTOM_REG_VAL(TOMTOM_A_PIN_CTL_OE1, 0x0), - TOMTOM_REG_VAL(TOMTOM_A_PIN_CTL_OE2, 0x0), -}; - -static void tomtom_update_reg_defaults(struct snd_soc_codec *codec) -{ - u32 i; - struct wcd9xxx *tomtom_core = dev_get_drvdata(codec->dev->parent); - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - - for (i = 0; i < ARRAY_SIZE(tomtom_reg_defaults); i++) - snd_soc_write(codec, tomtom_reg_defaults[i].reg, - tomtom_reg_defaults[i].val); - - for (i = 0; i < ARRAY_SIZE(tomtom_1_0_reg_defaults); i++) - snd_soc_write(codec, tomtom_1_0_reg_defaults[i].reg, - tomtom_1_0_reg_defaults[i].val); - - if (!TOMTOM_IS_1_0(tomtom_core->version)) { - for (i = 0; i < ARRAY_SIZE(tomtom_2_0_reg_defaults); i++) - snd_soc_write(codec, tomtom_2_0_reg_defaults[i].reg, - tomtom_2_0_reg_defaults[i].val); - - if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) { - for (i = 0; i < ARRAY_SIZE(tomtom_2_0_reg_i2c_defaults); - i++) - snd_soc_write(codec, - tomtom_2_0_reg_i2c_defaults[i].reg, - tomtom_2_0_reg_i2c_defaults[i].val); - } - } -} - -static const struct wcd9xxx_reg_mask_val tomtom_codec_reg_init_val[] = { - /* Initialize current threshold to 350MA - * number of wait and run cycles to 4096 - */ - {TOMTOM_A_RX_HPH_OCP_CTL, 0xE1, 0x61}, - {TOMTOM_A_RX_COM_OCP_COUNT, 0xFF, 0xFF}, - {TOMTOM_A_RX_HPH_L_TEST, 0x01, 0x01}, - {TOMTOM_A_RX_HPH_R_TEST, 0x01, 0x01}, - - /* Initialize gain registers to use register gain */ - {TOMTOM_A_RX_HPH_L_GAIN, 0x20, 0x20}, - {TOMTOM_A_RX_HPH_R_GAIN, 0x20, 0x20}, - {TOMTOM_A_RX_LINE_1_GAIN, 0x20, 0x20}, - {TOMTOM_A_RX_LINE_2_GAIN, 0x20, 0x20}, - {TOMTOM_A_RX_LINE_3_GAIN, 0x20, 0x20}, - {TOMTOM_A_RX_LINE_4_GAIN, 0x20, 0x20}, - {TOMTOM_A_SPKR_DRV1_GAIN, 0x04, 0x04}, - {TOMTOM_A_SPKR_DRV2_GAIN, 0x04, 0x04}, - - /* Use 16 bit sample size for TX1 to TX6 */ - {TOMTOM_A_CDC_CONN_TX_SB_B1_CTL, 0x30, 0x20}, - {TOMTOM_A_CDC_CONN_TX_SB_B2_CTL, 0x30, 0x20}, - {TOMTOM_A_CDC_CONN_TX_SB_B3_CTL, 0x30, 0x20}, - {TOMTOM_A_CDC_CONN_TX_SB_B4_CTL, 0x30, 0x20}, - {TOMTOM_A_CDC_CONN_TX_SB_B5_CTL, 0x30, 0x20}, - {TOMTOM_A_CDC_CONN_TX_SB_B6_CTL, 0x30, 0x20}, - - /* Use 16 bit sample size for TX7 to TX10 */ - {TOMTOM_A_CDC_CONN_TX_SB_B7_CTL, 0x60, 0x40}, - {TOMTOM_A_CDC_CONN_TX_SB_B8_CTL, 0x60, 0x40}, - {TOMTOM_A_CDC_CONN_TX_SB_B9_CTL, 0x60, 0x40}, - {TOMTOM_A_CDC_CONN_TX_SB_B10_CTL, 0x60, 0x40}, - - /*enable HPF filter for TX paths */ - {TOMTOM_A_CDC_TX1_MUX_CTL, 0x8, 0x0}, - {TOMTOM_A_CDC_TX2_MUX_CTL, 0x8, 0x0}, - {TOMTOM_A_CDC_TX3_MUX_CTL, 0x8, 0x0}, - {TOMTOM_A_CDC_TX4_MUX_CTL, 0x8, 0x0}, - {TOMTOM_A_CDC_TX5_MUX_CTL, 0x8, 0x0}, - {TOMTOM_A_CDC_TX6_MUX_CTL, 0x8, 0x0}, - {TOMTOM_A_CDC_TX7_MUX_CTL, 0x8, 0x0}, - {TOMTOM_A_CDC_TX8_MUX_CTL, 0x8, 0x0}, - {TOMTOM_A_CDC_TX9_MUX_CTL, 0x8, 0x0}, - {TOMTOM_A_CDC_TX10_MUX_CTL, 0x8, 0x0}, - - /* Compander zone selection */ - {TOMTOM_A_CDC_COMP0_B4_CTL, 0x3F, 0x37}, - {TOMTOM_A_CDC_COMP1_B4_CTL, 0x3F, 0x37}, - {TOMTOM_A_CDC_COMP2_B4_CTL, 0x3F, 0x37}, - {TOMTOM_A_CDC_COMP0_B5_CTL, 0x7F, 0x7F}, - {TOMTOM_A_CDC_COMP1_B5_CTL, 0x7F, 0x7F}, - {TOMTOM_A_CDC_COMP2_B5_CTL, 0x7F, 0x7F}, - - /* - * Setup wavegen timer to 20msec and disable chopper - * as default. This corresponds to Compander OFF - */ - {TOMTOM_A_RX_HPH_CNP_WG_CTL, 0xFF, 0xDB}, - {TOMTOM_A_RX_HPH_CNP_WG_TIME, 0xFF, 0x58}, - {TOMTOM_A_RX_HPH_BIAS_WG_OCP, 0xFF, 0x1A}, - {TOMTOM_A_RX_HPH_CHOP_CTL, 0xFF, 0x24}, - - /* Choose max non-overlap time for NCP */ - {TOMTOM_A_NCP_CLK, 0xFF, 0xFC}, - - /* Program the 0.85 volt VBG_REFERENCE */ - {TOMTOM_A_BIAS_CURR_CTL_2, 0xFF, 0x04}, - - /* set MAD input MIC to DMIC1 */ - {TOMTOM_A_CDC_MAD_INP_SEL, 0x0F, 0x08}, - - {TOMTOM_A_INTR_MODE, 0x04, 0x04}, -}; - -static const struct wcd9xxx_reg_mask_val tomtom_codec_2_0_reg_init_val[] = { - {TOMTOM_A_RX_HPH_L_TEST, 0x08, 0x00}, - {TOMTOM_A_RX_HPH_R_TEST, 0x08, 0x00}, - {TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD, 0xFF, 0x00}, - {TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD, 0xFF, 0x00}, - {TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING, 0x01, 0x01}, - {TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING, 0x01, 0x01}, - {TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL, 0x01, 0x00}, - {TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL, 0x01, 0x00}, -}; - -static void tomtom_codec_init_reg(struct snd_soc_codec *codec) -{ - u32 i; - struct wcd9xxx *tomtom_core = dev_get_drvdata(codec->dev->parent); - - for (i = 0; i < ARRAY_SIZE(tomtom_codec_reg_init_val); i++) - snd_soc_update_bits(codec, tomtom_codec_reg_init_val[i].reg, - tomtom_codec_reg_init_val[i].mask, - tomtom_codec_reg_init_val[i].val); - - if (!TOMTOM_IS_1_0(tomtom_core->version)) { - for (i = 0; i < ARRAY_SIZE(tomtom_codec_2_0_reg_init_val); i++) - snd_soc_update_bits(codec, - tomtom_codec_2_0_reg_init_val[i].reg, - tomtom_codec_2_0_reg_init_val[i].mask, - tomtom_codec_2_0_reg_init_val[i].val); - } - -} - -static void tomtom_slim_interface_init_reg(struct snd_soc_codec *codec) -{ - int i; - struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent); - - for (i = 0; i < WCD9XXX_SLIM_NUM_PORT_REG; i++) - wcd9xxx_interface_reg_write(wcd9xxx, - TOMTOM_SLIM_PGD_PORT_INT_EN0 + i, - 0xFF); -} - -static int tomtom_setup_irqs(struct tomtom_priv *tomtom) -{ - int ret = 0; - struct snd_soc_codec *codec = tomtom->codec; - struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent); - struct wcd9xxx_core_resource *core_res = - &wcd9xxx->core_res; - - ret = wcd9xxx_request_irq(core_res, WCD9XXX_IRQ_SLIMBUS, - tomtom_slimbus_irq, "SLIMBUS Slave", tomtom); - if (ret) - pr_err("%s: Failed to request irq %d\n", __func__, - WCD9XXX_IRQ_SLIMBUS); - else - tomtom_slim_interface_init_reg(codec); - - return ret; -} - -static void tomtom_cleanup_irqs(struct tomtom_priv *tomtom) -{ - struct snd_soc_codec *codec = tomtom->codec; - struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent); - struct wcd9xxx_core_resource *core_res = - &wcd9xxx->core_res; - - wcd9xxx_free_irq(core_res, WCD9XXX_IRQ_SLIMBUS, tomtom); -} - -static -struct firmware_cal *tomtom_get_hwdep_fw_cal(struct snd_soc_codec *codec, - enum wcd_cal_type type) -{ - struct tomtom_priv *tomtom; - struct firmware_cal *hwdep_cal; - - if (!codec) { - pr_err("%s: NULL codec pointer\n", __func__); - return NULL; - } - tomtom = snd_soc_codec_get_drvdata(codec); - hwdep_cal = wcdcal_get_fw_cal(tomtom->fw_data, type); - if (!hwdep_cal) { - dev_err(codec->dev, "%s: cal not sent by %d\n", - __func__, type); - return NULL; - } else { - return hwdep_cal; - } -} - -int tomtom_hs_detect(struct snd_soc_codec *codec, - struct wcd9xxx_mbhc_config *mbhc_cfg) -{ - int rc; - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - - if (mbhc_cfg->insert_detect) { - rc = wcd9xxx_mbhc_start(&tomtom->mbhc, mbhc_cfg); - if (!rc) - tomtom->mbhc_started = true; - } else { - /* MBHC is disabled, so disable Auto pulldown */ - snd_soc_update_bits(codec, TOMTOM_A_MBHC_INSERT_DETECT2, 0xC0, - 0x00); - snd_soc_update_bits(codec, TOMTOM_A_MICB_CFILT_2_CTL, 0x01, - 0x00); - tomtom->mbhc.mbhc_cfg = NULL; - rc = 0; - } - return rc; -} -EXPORT_SYMBOL(tomtom_hs_detect); - -void tomtom_hs_detect_exit(struct snd_soc_codec *codec) -{ - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - - wcd9xxx_mbhc_stop(&tomtom->mbhc); - tomtom->mbhc_started = false; -} -EXPORT_SYMBOL(tomtom_hs_detect_exit); - -void tomtom_event_register( - int (*machine_event_cb)(struct snd_soc_codec *codec, - enum wcd9xxx_codec_event), - struct snd_soc_codec *codec) -{ - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - - tomtom->machine_codec_event_cb = machine_event_cb; -} -EXPORT_SYMBOL(tomtom_event_register); - -void tomtom_register_ext_clk_cb( - int (*codec_ext_clk_en)(struct snd_soc_codec *codec, - int enable, bool dapm), - int (*get_ext_clk_cnt)(void), - struct snd_soc_codec *codec) -{ - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - - tomtom->codec_ext_clk_en_cb = codec_ext_clk_en; - tomtom->codec_get_ext_clk_cnt = get_ext_clk_cnt; -} -EXPORT_SYMBOL(tomtom_register_ext_clk_cb); - -static void tomtom_init_slim_slave_cfg(struct snd_soc_codec *codec) -{ - struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec); - struct afe_param_cdc_slimbus_slave_cfg *cfg; - struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent); - uint64_t eaddr = 0; - - cfg = &priv->slimbus_slave_cfg; - cfg->minor_version = 1; - cfg->tx_slave_port_offset = 0; - cfg->rx_slave_port_offset = 16; - - memcpy(&eaddr, &wcd9xxx->slim->e_addr, sizeof(wcd9xxx->slim->e_addr)); - WARN_ON(sizeof(wcd9xxx->slim->e_addr) != 6); - cfg->device_enum_addr_lsw = eaddr & 0xFFFFFFFF; - cfg->device_enum_addr_msw = eaddr >> 32; - - pr_debug("%s: slimbus logical address 0x%llx\n", __func__, eaddr); -} - -static int tomtom_device_down(struct wcd9xxx *wcd9xxx) -{ - int count; - struct snd_soc_codec *codec; - struct tomtom_priv *priv; - - codec = (struct snd_soc_codec *)(wcd9xxx->ssr_priv); - priv = snd_soc_codec_get_drvdata(codec); - wcd_cpe_ssr_event(priv->cpe_core, WCD_CPE_BUS_DOWN_EVENT); - snd_soc_card_change_online_state(codec->component.card, 0); - set_bit(BUS_DOWN, &priv->status_mask); - - for (count = 0; count < NUM_CODEC_DAIS; count++) - priv->dai[count].bus_down_in_recovery = true; - return 0; -} - -static int wcd9xxx_prepare_static_pa(struct wcd9xxx_mbhc *mbhc, - struct list_head *lh) -{ - int i; - struct snd_soc_codec *codec = mbhc->codec; - u32 delay; - - const struct wcd9xxx_reg_mask_val reg_set_paon[] = { - {TOMTOM_A_TX_COM_BIAS, 0xff, 0xF0}, - {WCD9XXX_A_CDC_RX1_B6_CTL, 0xff, 0x81}, - {WCD9XXX_A_CDC_CLK_RX_B1_CTL, 0x01, 0x01}, - {WCD9XXX_A_BUCK_MODE_2, 0xff, 0xEF}, - {WCD9XXX_A_BUCK_MODE_2, 0xff, 0xEE}, - {TOMTOM_A_NCP_DTEST, 0xff, 0x20}, - {WCD9XXX_A_CDC_CLK_OTHR_CTL, 0xff, 0x21}, - {WCD9XXX_A_CDC_RX2_B6_CTL, 0xff, 0x81}, - {WCD9XXX_A_CDC_CLK_RX_B1_CTL, 0x02, 0x02}, - - {WCD9XXX_A_BUCK_MODE_2, 0xff, 0xAE}, - {WCD9XXX_A_BUCK_MODE_2, 0xff, 0xAA}, - {WCD9XXX_A_NCP_CLK, 0xff, 0x9C}, - {WCD9XXX_A_NCP_CLK, 0xff, 0xFC}, - {WCD9XXX_A_RX_COM_BIAS, 0xff, 0xA0}, - {WCD9XXX_A_BUCK_MODE_3, 0xff, 0xC6}, - {WCD9XXX_A_BUCK_MODE_4, 0xff, 0xE6}, - {WCD9XXX_A_BUCK_MODE_5, 0xff, 0x02}, - {WCD9XXX_A_BUCK_MODE_1, 0xff, 0xA1}, - /* Add a delay of 1ms after this reg write */ - - {WCD9XXX_A_NCP_STATIC, 0xff, 0x28}, - {WCD9XXX_A_NCP_EN, 0xff, 0xFF}, - /* Add a delay of 1ms after this reg write */ - - /* set HPHL */ - {WCD9XXX_A_RX_HPH_L_TEST, 0xff, 0x00}, - {TOMTOM_A_RX_HPH_L_PA_CTL, 0xff, 0x42}, - {TOMTOM_A_RX_HPH_BIAS_LDO, 0xff, 0x8C}, - {TOMTOM_A_RX_HPH_CHOP_CTL, 0xff, 0xA4}, - {WCD9XXX_A_RX_HPH_L_GAIN, 0xff, 0xE0}, - {WCD9XXX_A_RX_HPH_L_GAIN, 0xff, 0xEC}, - - /* set HPHR */ - {WCD9XXX_A_RX_HPH_R_TEST, 0xff, 0x00}, - {TOMTOM_A_RX_HPH_R_PA_CTL, 0xff, 0x42}, - {WCD9XXX_A_RX_HPH_R_GAIN, 0xff, 0x20}, - {WCD9XXX_A_RX_HPH_R_GAIN, 0xff, 0x2C}, - - /* set HPH PAs */ - {WCD9XXX_A_RX_HPH_BIAS_WG_OCP, 0xff, 0x2A}, - {WCD9XXX_A_RX_HPH_CNP_WG_CTL, 0xff, 0xDA}, - {WCD9XXX_A_RX_HPH_CNP_WG_TIME, 0xff, 0x15}, - {WCD9XXX_A_CDC_CLSH_B1_CTL, 0xff, 0xE6}, - {WCD9XXX_A_RX_HPH_L_DAC_CTL, 0xff, 0x40}, - {WCD9XXX_A_RX_HPH_L_DAC_CTL, 0xff, 0xC0}, - {WCD9XXX_A_RX_HPH_R_DAC_CTL, 0xff, 0x40}, - {WCD9XXX_A_RX_HPH_R_DAC_CTL, 0xff, 0xC0}, - - {TOMTOM_A_RX_HPH_L_ATEST, 0xff, 0x00}, - {TOMTOM_A_RX_HPH_R_ATEST, 0xff, 0x00}, - }; - - for (i = 0; i < ARRAY_SIZE(reg_set_paon); i++) { - /* - * Some of the codec registers like BUCK_MODE_1 - * and NCP_EN requires 1ms wait time for them - * to take effect. Other register writes for - * PA configuration do not require any wait time. - */ - if (reg_set_paon[i].reg == WCD9XXX_A_BUCK_MODE_1 || - reg_set_paon[i].reg == WCD9XXX_A_NCP_EN) - delay = 1000; - else - delay = 0; - wcd9xxx_soc_update_bits_push(codec, lh, - reg_set_paon[i].reg, - reg_set_paon[i].mask, - reg_set_paon[i].val, delay); - } - pr_debug("%s: PAs are prepared\n", __func__); - - return 0; -} - -static int wcd9xxx_enable_static_pa(struct wcd9xxx_mbhc *mbhc, bool enable, - u8 hph_pa) -{ - struct snd_soc_codec *codec = mbhc->codec; - const int wg_time = snd_soc_read(codec, WCD9XXX_A_RX_HPH_CNP_WG_TIME) * - TOMTOM_WG_TIME_FACTOR_US; - u8 mask = (hph_pa << 4); - u8 pa_en = enable ? mask : ~mask; - - snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CNP_EN, mask, pa_en); - /* Wait for wave gen time to avoid pop noise */ - usleep_range(wg_time, wg_time + WCD9XXX_USLEEP_RANGE_MARGIN_US); - pr_debug("%s: PAs are %s as static mode (wg_time %d)\n", __func__, - enable ? "enabled" : "disabled", wg_time); - return 0; -} - -static int tomtom_setup_zdet(struct wcd9xxx_mbhc *mbhc, - enum mbhc_impedance_detect_stages stage) -{ - int ret = 0; - struct snd_soc_codec *codec = mbhc->codec; - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - -#define __wr(reg, mask, value) \ - do { \ - ret = wcd9xxx_soc_update_bits_push(codec, \ - &tomtom->reg_save_restore, \ - reg, mask, value, 0); \ - if (ret < 0) \ - return ret; \ - } while (0) - - switch (stage) { - - case MBHC_ZDET_PRE_MEASURE: - INIT_LIST_HEAD(&tomtom->reg_save_restore); - wcd9xxx_prepare_static_pa(mbhc, &tomtom->reg_save_restore); - /* Set HPH_MBHC for zdet */ - __wr(WCD9XXX_A_MBHC_HPH, 0xff, 0xC4); - usleep_range(10, 10 + WCD9XXX_USLEEP_RANGE_MARGIN_US); - wcd9xxx_enable_static_pa(mbhc, HPH_PA_ENABLE, HPH_PA_L_R); - - /* save old value of registers and write the new value */ - __wr(WCD9XXX_A_RX_HPH_OCP_CTL, 0xff, 0x69); - __wr(WCD9XXX_A_CDC_RX1_B6_CTL, 0xff, 0x80); - __wr(WCD9XXX_A_CDC_RX2_B6_CTL, 0xff, 0x80); - /* Enable MBHC MUX, Set MUX current to 37.5uA and ADC7 */ - __wr(WCD9XXX_A_MBHC_SCALING_MUX_1, 0xff, 0xC0); - __wr(WCD9XXX_A_MBHC_SCALING_MUX_2, 0xff, 0xF0); - __wr(TOMTOM_A_TX_7_TXFE_CLKDIV, 0xff, 0x8B); - __wr(WCD9XXX_A_TX_7_MBHC_TEST_CTL, 0xff, 0x78); - __wr(WCD9XXX_A_TX_7_MBHC_EN, 0xff, 0x8C); - __wr(WCD9XXX_A_CDC_MBHC_B1_CTL, 0xff, 0xDC); - /* Reset MBHC and set it up for STA */ - __wr(WCD9XXX_A_CDC_MBHC_CLK_CTL, 0xff, 0x0A); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x00); - __wr(WCD9XXX_A_CDC_MBHC_CLK_CTL, 0xff, 0x02); - __wr(WCD9XXX_A_CDC_MBHC_TIMER_B5_CTL, 0xff, 0x80); - __wr(WCD9XXX_A_CDC_MBHC_TIMER_B4_CTL, 0xff, 0x25); - /* Wait for ~50us to let MBHC hardware settle down */ - usleep_range(50, 50 + WCD9XXX_USLEEP_RANGE_MARGIN_US); - break; - case MBHC_ZDET_POST_MEASURE: - /* 0x69 for 105 number of samples for PA RAMP */ - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B4_CTL, 0x69); - /* Program the PA Ramp to FS_16K, L shift 1 */ - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B3_CTL, - 0x1 << 4 | 0x6); - /* Reset the PA Ramp */ - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x1C); - /* - * Connect the PA Ramp to PA chain and release reset with - * keep it connected. - */ - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x1F); - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x03); - - /* Start the PA ramp on HPH L and R */ - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x05); - /* Ramp generator takes ~30ms */ - usleep_range(TOMTOM_HPH_PA_RAMP_DELAY, - TOMTOM_HPH_PA_RAMP_DELAY + - WCD9XXX_USLEEP_RANGE_MARGIN_US); - - /* - * Set the multiplication factor for zdet calculation - * based on the Ramp voltage and Gain used - */ - tomtom->zdet_gain_mul_fact = TOMTOM_ZDET_MUL_FACTOR_1X; - break; - case MBHC_ZDET_GAIN_0: - /* Set Gain at 1x */ - snd_soc_write(codec, TOMTOM_A_RX_HPH_L_ATEST, 0x00); - snd_soc_write(codec, TOMTOM_A_RX_HPH_R_ATEST, 0x00); - snd_soc_write(codec, TOMTOM_A_RX_HPH_L_PA_CTL, 0x42); - /* Allow 100us for gain registers to settle */ - usleep_range(100, - 100 + WCD9XXX_USLEEP_RANGE_MARGIN_US); - break; - case MBHC_ZDET_GAIN_UPDATE_1X: - /* - * Set the multiplication factor for zdet calculation - * based on the Gain value used - */ - tomtom->zdet_gain_mul_fact = TOMTOM_ZDET_MUL_FACTOR_1X; - break; - case MBHC_ZDET_GAIN_1: - /* Set Gain at 10x */ - snd_soc_write(codec, TOMTOM_A_RX_HPH_L_ATEST, 0x10); - snd_soc_write(codec, TOMTOM_A_RX_HPH_R_ATEST, 0x00); - snd_soc_write(codec, TOMTOM_A_RX_HPH_L_PA_CTL, 0x42); - /* Allow 100us for gain registers to settle */ - usleep_range(100, - 100 + WCD9XXX_USLEEP_RANGE_MARGIN_US); - - /* - * Set the multiplication factor for zdet calculation - * based on the Gain value used - */ - tomtom->zdet_gain_mul_fact = TOMTOM_ZDET_MUL_FACTOR_10X; - break; - case MBHC_ZDET_GAIN_2: - /* Set Gain at 100x */ - snd_soc_write(codec, TOMTOM_A_RX_HPH_L_ATEST, 0x00); - snd_soc_write(codec, TOMTOM_A_RX_HPH_R_ATEST, 0x10); - snd_soc_write(codec, TOMTOM_A_RX_HPH_L_PA_CTL, 0x43); - /* Allow 100us for gain registers to settle */ - usleep_range(100, - 100 + WCD9XXX_USLEEP_RANGE_MARGIN_US); - - /* - * Set the multiplication factor for zdet calculation - * based on the Gain value used - */ - tomtom->zdet_gain_mul_fact = TOMTOM_ZDET_MUL_FACTOR_100X; - break; - case MBHC_ZDET_RAMP_DISABLE: - /* Ramp HPH L & R back to Zero */ - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x00); - /* 0x69 for 105 number of samples for PA RAMP */ - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B4_CTL, 0x69); - /* Program the PA Ramp to FS_16K, L shift 1 */ - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B3_CTL, - 0x1 << 4 | 0x6); - /* Reset the PA Ramp */ - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x17); - /* - * Connect the PA Ramp to PA chain and release reset with - * keep it connected. - */ - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x03); - /* Start the PA ramp on HPH L and R */ - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x0A); - /* Ramp generator takes ~30ms to settle down */ - usleep_range(TOMTOM_HPH_PA_RAMP_DELAY, - TOMTOM_HPH_PA_RAMP_DELAY + - WCD9XXX_USLEEP_RANGE_MARGIN_US); - break; - case MBHC_ZDET_HPHR_RAMP_DISABLE: - /* Ramp HPHR back to Zero */ - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x00); - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B4_CTL, 0x69); - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B3_CTL, - 0x1 << 4 | 0x6); - /* Reset the PA Ramp */ - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x17); - /* - * Connect the PA Ramp to PA chain and release reset with - * keep it connected. - */ - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x03); - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x08); - /* Ramp generator takes ~30ms to settle down */ - usleep_range(TOMTOM_HPH_PA_RAMP_DELAY, - TOMTOM_HPH_PA_RAMP_DELAY + - WCD9XXX_USLEEP_RANGE_MARGIN_US); - break; - case MBHC_ZDET_HPHL_RAMP_DISABLE: - /* Ramp back to Zero */ - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x00); - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B4_CTL, 0x69); - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B3_CTL, - 0x1 << 4 | 0x6); - /* Reset the PA Ramp */ - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x17); - /* - * Connect the PA Ramp to PA chain and release reset with - * keep it connected. - */ - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x03); - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x02); - /* Ramp generator takes ~30ms to settle down */ - usleep_range(TOMTOM_HPH_PA_RAMP_DELAY, - TOMTOM_HPH_PA_RAMP_DELAY + - WCD9XXX_USLEEP_RANGE_MARGIN_US); - break; - case MBHC_ZDET_HPHR_PA_DISABLE: - /* Disable PA */ - wcd9xxx_enable_static_pa(mbhc, HPH_PA_DISABLE, HPH_PA_R); - break; - case MBHC_ZDET_PA_DISABLE: - /* Disable PA */ - if (!mbhc->hph_pa_dac_state && - (!(test_bit(MBHC_EVENT_PA_HPHL, &mbhc->event_state) || - test_bit(MBHC_EVENT_PA_HPHR, &mbhc->event_state)))) - wcd9xxx_enable_static_pa(mbhc, HPH_PA_DISABLE, - HPH_PA_L_R); - else if (!(snd_soc_read(codec, WCD9XXX_A_RX_HPH_CNP_EN) & 0x10)) - wcd9xxx_enable_static_pa(mbhc, HPH_PA_ENABLE, HPH_PA_R); - - /* Turn off PA ramp generator */ - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B1_CTL, 0x00); - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B2_CTL, 0x00); - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B3_CTL, 0x00); - snd_soc_write(codec, WCD9XXX_A_CDC_PA_RAMP_B4_CTL, 0x00); - - /* Restore registers */ - wcd9xxx_restore_registers(codec, &tomtom->reg_save_restore); - break; - } -#undef __wr - - return ret; -} - -/* Calculate final impedance values for HPH left and right based on formulae */ -static void tomtom_compute_impedance(struct wcd9xxx_mbhc *mbhc, s16 *l, s16 *r, - uint32_t *zl, uint32_t *zr) -{ - s64 zln, zrn; - int zld, zrd; - s64 rl = 0, rr = 0; - struct snd_soc_codec *codec; - struct tomtom_priv *tomtom; - - if (!mbhc) { - pr_err("%s: Invalid parameters mbhc = %pK\n", - __func__, mbhc); - return; - } - codec = mbhc->codec; - tomtom = snd_soc_codec_get_drvdata(codec); - - if (l && zl) { - zln = (s64) (l[1] - l[0]) * tomtom->zdet_gain_mul_fact; - zld = (l[2] - l[0]); - if (zld) - rl = div_s64(zln, zld); - else - /* If L0 and L2 are same, Z has to be on Zone 3. - * Assign a default value so that atleast the value - * is read again with Ramp-up - */ - rl = TOMTOM_ZDET_ZONE_3_DEFAULT_VAL; - - /* 32-bit LSBs are enough to hold Impedance values */ - *zl = (u32) rl; - } - if (r && zr) { - zrn = (s64) (r[1] - r[0]) * tomtom->zdet_gain_mul_fact; - zrd = (r[2] - r[0]); - if (zrd) - rr = div_s64(zrn, zrd); - else - /* If R0 and R2 are same, Z has to be on Zone 3. - * Assign a default value so that atleast the value - * is read again with Ramp-up - */ - rr = TOMTOM_ZDET_ZONE_3_DEFAULT_VAL; - - /* 32-bit LSBs are enough to hold Impedance values */ - *zr = (u32) rr; - } -} - -/* - * Calculate error approximation of impedance values for HPH left - * and HPH right based on QFuse values - */ -static void tomtom_zdet_error_approx(struct wcd9xxx_mbhc *mbhc, uint32_t *zl, - uint32_t *zr) -{ - struct snd_soc_codec *codec; - struct tomtom_priv *tomtom; - s8 q1_t, q2_t; - s8 q1_m, q2_m; - s8 q1, q2; - u8 div_shift; - int rl_alpha = 0, rr_alpha = 0; - int rl_beta = 0, rr_beta = 0; - u64 rl = 0, rr = 0; - const int mult_factor = TOMTOM_ZDET_ERROR_APPROX_MUL_FACTOR; - const int shift = TOMTOM_ZDET_ERROR_APPROX_SHIFT; - - if (!zl || !zr || !mbhc) { - pr_err("%s: Invalid parameters zl = %pK zr = %pK, mbhc = %pK\n", - __func__, zl, zr, mbhc); - return; - } - codec = mbhc->codec; - tomtom = snd_soc_codec_get_drvdata(codec); - - if ((tomtom->zdet_gain_mul_fact == TOMTOM_ZDET_MUL_FACTOR_1X) || - (tomtom->zdet_gain_mul_fact == TOMTOM_ZDET_MUL_FACTOR_10X)) { - q1_t = ((snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT0) & - 0x3) << 0x5); - q1_t |= ((snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT1) & - 0xF8) >> 0x3); - q2_t = ((snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT1) & - 0x7) << 0x4); - q2_t |= ((snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT2) & - 0xF0) >> 0x4); - /* Take out the numeric part of the Qfuse value */ - q1_m = q1_t & 0x3F; - q2_m = q2_t & 0x3F; - /* Check the sign part of the Qfuse and adjust value */ - q1 = (q1_t & 0x40) ? -q1_m : q1_m; - q2 = (q2_t & 0x40) ? -q2_m : q2_m; - div_shift = 1; - } else { - q1_t = ((snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT2) & - 0xF) << 0x2); - q1_t |= ((snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT3) & - 0xC0) >> 0x6); - q2_t = (snd_soc_read(codec, TOMTOM_A_QFUSE_DATA_OUT3) & 0x3F); - /* Take out the numeric part of the Qfuse value */ - q1_m = q1_t & 0x1F; - q2_m = q2_t & 0x1F; - /* Check the sign part of the Qfuse and adjust value */ - q1 = (q1_t & 0x20) ? -q1_m : q1_m; - q2 = (q2_t & 0x20) ? -q2_m : q2_m; - div_shift = 0; - } - - dev_dbg(codec->dev, "%s: qfuse1 = %d, qfuse2 = %d\n", - __func__, q1, q2); - if (!q1 && !q2) { - dev_dbg(codec->dev, "%s: qfuse1 and qfuse2 are 0. Exiting\n", - __func__); - return; - } - - /* - * Use multiplication and shift to avoid floating point math - * The Z value is calculated with the below formulae using - * the Qfuse value- - * zl = zl * [1 - {(Q1 / div) / 100}] (Include sign for Q1) - * zr = zr * [1 - {(Q2 / div) / 100}] (Include sign for Q2) - * We multiply by 65536 and shift 16 times to get the approx result - * div = 4 for 1x gain, div = 2 for 10x/100x gain - */ - /* Q1/4 */ - rl_alpha = q1 >> div_shift; - rl_alpha = 100 - rl_alpha; - /* {rl_alpha/100} * 65536 */ - rl_beta = rl_alpha * mult_factor; - rl = (u64) *zl * rl_beta; - /* rl/65536 */ - rl = (u64) rl >> shift; - - rr_alpha = q2 >> div_shift; - rr_alpha = 100 - rr_alpha; - rr_beta = rr_alpha * mult_factor; - rr = (u64) *zr * rr_beta; - rr = (u64) rr >> shift; - - dev_dbg(codec->dev, "%s: rl = 0x%llx (%lld) \t rr = 0x%llx (%lld)\n", - __func__, rl, rl, rr, rr); - - *zl = (u32) rl; - *zr = (u32) rr; -} - -static enum wcd9xxx_cdc_type tomtom_get_cdc_type(void) -{ - return WCD9XXX_CDC_TYPE_TOMTOM; -} - -static bool tomtom_mbhc_ins_rem_status(struct snd_soc_codec *codec) -{ - return !(snd_soc_read(codec, WCD9XXX_A_MBHC_INSERT_DET_STATUS) & - (1 << 4)); -} - -static void tomtom_mbhc_micb_pulldown_ctrl(struct wcd9xxx_mbhc *mbhc, - bool enable) -{ - struct snd_soc_codec *codec = mbhc->codec; - - if (!enable) { - /* Remove automatic pulldown on micbias */ - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl, - 0x01, 0x00); - } else { - /* Enable automatic pulldown on micbias */ - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl, - 0x01, 0x01); - } -} - -static void tomtom_codec_hph_auto_pull_down(struct snd_soc_codec *codec, - bool enable) -{ - struct wcd9xxx *tomtom_core = dev_get_drvdata(codec->dev->parent); - - if (TOMTOM_IS_1_0(tomtom_core->version)) - return; - - dev_dbg(codec->dev, "%s: %s auto pull down\n", __func__, - enable ? "enable" : "disable"); - if (enable) { - snd_soc_update_bits(codec, TOMTOM_A_RX_HPH_L_TEST, 0x08, 0x08); - snd_soc_update_bits(codec, TOMTOM_A_RX_HPH_R_TEST, 0x08, 0x08); - } else { - snd_soc_update_bits(codec, TOMTOM_A_RX_HPH_L_TEST, 0x08, 0x00); - snd_soc_update_bits(codec, TOMTOM_A_RX_HPH_R_TEST, 0x08, 0x00); - } -} - -static const struct wcd9xxx_mbhc_cb mbhc_cb = { - .get_cdc_type = tomtom_get_cdc_type, - .setup_zdet = tomtom_setup_zdet, - .compute_impedance = tomtom_compute_impedance, - .zdet_error_approx = tomtom_zdet_error_approx, - .insert_rem_status = tomtom_mbhc_ins_rem_status, - .micbias_pulldown_ctrl = tomtom_mbhc_micb_pulldown_ctrl, - .codec_rco_ctrl = tomtom_codec_internal_rco_ctrl, - .hph_auto_pulldown_ctrl = tomtom_codec_hph_auto_pull_down, - .get_hwdep_fw_cal = tomtom_get_hwdep_fw_cal, -}; - -static const struct wcd9xxx_mbhc_intr cdc_intr_ids = { - .poll_plug_rem = WCD9XXX_IRQ_MBHC_REMOVAL, - .shortavg_complete = WCD9XXX_IRQ_MBHC_SHORT_TERM, - .potential_button_press = WCD9XXX_IRQ_MBHC_PRESS, - .button_release = WCD9XXX_IRQ_MBHC_RELEASE, - .dce_est_complete = WCD9XXX_IRQ_MBHC_POTENTIAL, - .insertion = WCD9XXX_IRQ_MBHC_INSERTION, - .hph_left_ocp = WCD9XXX_IRQ_HPH_PA_OCPL_FAULT, - .hph_right_ocp = WCD9XXX_IRQ_HPH_PA_OCPR_FAULT, - .hs_jack_switch = WCD9330_IRQ_MBHC_JACK_SWITCH, -}; - -static int tomtom_post_reset_cb(struct wcd9xxx *wcd9xxx) -{ - int ret = 0; - struct snd_soc_codec *codec; - struct tomtom_priv *tomtom; - int rco_clk_rate; - - codec = (struct snd_soc_codec *)(wcd9xxx->ssr_priv); - tomtom = snd_soc_codec_get_drvdata(codec); - - snd_soc_card_change_online_state(codec->component.card, 1); - clear_bit(BUS_DOWN, &tomtom->status_mask); - - mutex_lock(&tomtom->codec_mutex); - - tomtom_update_reg_defaults(codec); - if (wcd9xxx->mclk_rate == TOMTOM_MCLK_CLK_12P288MHZ) - snd_soc_update_bits(codec, TOMTOM_A_CHIP_CTL, 0x06, 0x0); - else if (wcd9xxx->mclk_rate == TOMTOM_MCLK_CLK_9P6MHZ) - snd_soc_update_bits(codec, TOMTOM_A_CHIP_CTL, 0x06, 0x2); - tomtom_codec_init_reg(codec); - - snd_soc_cache_sync(codec); - - ret = tomtom_handle_pdata(tomtom); - if (ret < 0) - pr_err("%s: bad pdata\n", __func__); - - tomtom_init_slim_slave_cfg(codec); - tomtom_slim_interface_init_reg(codec); - wcd_cpe_ssr_event(tomtom->cpe_core, WCD_CPE_BUS_UP_EVENT); - wcd9xxx_resmgr_post_ssr(&tomtom->resmgr); - - if (tomtom->mbhc_started) { - wcd9xxx_mbhc_deinit(&tomtom->mbhc); - tomtom->mbhc_started = false; - - if (wcd9xxx->mclk_rate == TOMTOM_MCLK_CLK_12P288MHZ) - rco_clk_rate = TOMTOM_MCLK_CLK_12P288MHZ; - else - rco_clk_rate = TOMTOM_MCLK_CLK_9P6MHZ; - - ret = wcd9xxx_mbhc_init(&tomtom->mbhc, &tomtom->resmgr, codec, - tomtom_enable_mbhc_micbias, - &mbhc_cb, &cdc_intr_ids, - rco_clk_rate, TOMTOM_ZDET_SUPPORTED); - if (ret) - pr_err("%s: mbhc init failed %d\n", __func__, ret); - else - tomtom_hs_detect(codec, tomtom->mbhc.mbhc_cfg); - } - - if (tomtom->machine_codec_event_cb) - tomtom->machine_codec_event_cb(codec, - WCD9XXX_CODEC_EVENT_CODEC_UP); - - tomtom_cleanup_irqs(tomtom); - ret = tomtom_setup_irqs(tomtom); - if (ret) - pr_err("%s: Failed to setup irq: %d\n", __func__, ret); - - /* - * After SSR, the qfuse sensing is lost. - * Perform qfuse sensing again after SSR - * handling is finished. - */ - tomtom_enable_qfuse_sensing(codec); - mutex_unlock(&tomtom->codec_mutex); - return ret; -} - -void *tomtom_get_afe_config(struct snd_soc_codec *codec, - enum afe_config_type config_type) -{ - struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec); - - switch (config_type) { - case AFE_SLIMBUS_SLAVE_CONFIG: - return &priv->slimbus_slave_cfg; - case AFE_CDC_REGISTERS_CONFIG: - return &tomtom_audio_reg_cfg; - case AFE_SLIMBUS_SLAVE_PORT_CONFIG: - return &tomtom_slimbus_slave_port_cfg; - case AFE_AANC_VERSION: - return &tomtom_cdc_aanc_version; - case AFE_CLIP_BANK_SEL: - return &clip_bank_sel; - case AFE_CDC_CLIP_REGISTERS_CONFIG: - return &tomtom_clip_reg_cfg; - default: - pr_err("%s: Unknown config_type 0x%x\n", __func__, config_type); - return NULL; - } -} - -static struct wcd9xxx_reg_address tomtom_reg_address = { - .micb_4_mbhc = TOMTOM_A_MICB_4_MBHC, - .micb_4_int_rbias = TOMTOM_A_MICB_4_INT_RBIAS, - .micb_4_ctl = TOMTOM_A_MICB_4_CTL, -}; - -static int wcd9xxx_ssr_register(struct wcd9xxx *control, - int (*device_down_cb)(struct wcd9xxx *wcd9xxx), - int (*device_up_cb)(struct wcd9xxx *wcd9xxx), - void *priv) -{ - control->dev_down = device_down_cb; - control->post_reset = device_up_cb; - control->ssr_priv = priv; - return 0; -} - -static const struct snd_soc_dapm_widget tomtom_1_dapm_widgets[] = { - SND_SOC_DAPM_ADC_E("ADC1", NULL, TOMTOM_A_TX_1_GAIN, 7, 0, - tomtom_codec_enable_adc, - SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_ADC_E("ADC2", NULL, TOMTOM_A_TX_2_GAIN, 7, 0, - tomtom_codec_enable_adc, - SND_SOC_DAPM_PRE_PMU | - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_ADC_E("ADC3", NULL, TOMTOM_A_TX_3_GAIN, 7, 0, - tomtom_codec_enable_adc, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_ADC_E("ADC4", NULL, TOMTOM_A_TX_4_GAIN, 7, 0, - tomtom_codec_enable_adc, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_ADC_E("ADC5", NULL, TOMTOM_A_TX_5_GAIN, 7, 0, - tomtom_codec_enable_adc, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_POST_PMD), - SND_SOC_DAPM_ADC_E("ADC6", NULL, TOMTOM_A_TX_6_GAIN, 7, 0, - tomtom_codec_enable_adc, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU | - SND_SOC_DAPM_POST_PMD), -}; - -static struct regulator *tomtom_codec_find_regulator(struct snd_soc_codec *cdc, - const char *name) -{ - int i; - struct wcd9xxx *core = dev_get_drvdata(cdc->dev->parent); - - for (i = 0; i < core->num_of_supplies; i++) { - if (core->supplies[i].supply && - !strcmp(core->supplies[i].supply, name)) - return core->supplies[i].consumer; - } - - return NULL; -} - -static struct wcd_cpe_core *tomtom_codec_get_cpe_core( - struct snd_soc_codec *codec) -{ - struct tomtom_priv *priv = snd_soc_codec_get_drvdata(codec); - - return priv->cpe_core; -} - -static int tomtom_codec_fll_enable(struct snd_soc_codec *codec, - bool enable) -{ - struct wcd9xxx *wcd9xxx; - - if (!codec || !codec->control_data) { - pr_err("%s: Invalid codec handle, %pK\n", - __func__, codec); - return -EINVAL; - } - - wcd9xxx = codec->control_data; - - dev_dbg(codec->dev, "%s: %s, mclk_rate = %d\n", - __func__, (enable ? "enable" : "disable"), - wcd9xxx->mclk_rate); - - switch (wcd9xxx->mclk_rate) { - case TOMTOM_MCLK_CLK_9P6MHZ: - snd_soc_update_bits(codec, TOMTOM_A_FLL_NREF, - 0x1F, 0x15); - snd_soc_update_bits(codec, TOMTOM_A_FLL_KDCO_TUNE, - 0x07, 0x06); - snd_soc_write(codec, TOMTOM_A_FLL_LOCK_THRESH, 0xD1); - snd_soc_write(codec, TOMTOM_A_FLL_LOCK_DET_COUNT, - 0x40); - break; - case TOMTOM_MCLK_CLK_12P288MHZ: - snd_soc_update_bits(codec, TOMTOM_A_FLL_NREF, - 0x1F, 0x11); - snd_soc_update_bits(codec, TOMTOM_A_FLL_KDCO_TUNE, - 0x07, 0x05); - snd_soc_write(codec, TOMTOM_A_FLL_LOCK_THRESH, 0xB1); - snd_soc_write(codec, TOMTOM_A_FLL_LOCK_DET_COUNT, - 0x40); - break; - } - - return 0; -} - -static int tomtom_codec_slim_reserve_bw(struct snd_soc_codec *codec, - u32 bw_ops, bool commit) -{ - struct wcd9xxx *wcd9xxx; - - if (!codec) { - pr_err("%s: Invalid handle to codec\n", - __func__); - return -EINVAL; - } - - wcd9xxx = dev_get_drvdata(codec->dev->parent); - - if (!wcd9xxx) { - dev_err(codec->dev, "%s: Invalid parent drv_data\n", - __func__); - return -EINVAL; - } - - return wcd9xxx_slim_reserve_bw(wcd9xxx, bw_ops, commit); -} - -static int tomtom_codec_vote_max_bw(struct snd_soc_codec *codec, - bool vote) -{ - u32 bw_ops; - - if (vote) - bw_ops = SLIM_BW_CLK_GEAR_9; - else - bw_ops = SLIM_BW_UNVOTE; - - return tomtom_codec_slim_reserve_bw(codec, - bw_ops, true); -} - -static const struct wcd9xxx_resmgr_cb resmgr_cb = { - .cdc_rco_ctrl = tomtom_codec_internal_rco_ctrl, -}; - -static int tomtom_cpe_err_irq_control(struct snd_soc_codec *codec, - enum cpe_err_irq_cntl_type cntl_type, u8 *status) -{ - switch (cntl_type) { - case CPE_ERR_IRQ_MASK: - snd_soc_update_bits(codec, - TOMTOM_A_SVASS_INT_MASK, - 0x3F, 0x3F); - break; - case CPE_ERR_IRQ_UNMASK: - snd_soc_update_bits(codec, - TOMTOM_A_SVASS_INT_MASK, - 0x3F, 0x0C); - break; - case CPE_ERR_IRQ_CLEAR: - snd_soc_update_bits(codec, - TOMTOM_A_SVASS_INT_CLR, - 0x3F, 0x3F); - break; - case CPE_ERR_IRQ_STATUS: - if (!status) - return -EINVAL; - *status = snd_soc_read(codec, - TOMTOM_A_SVASS_INT_STATUS); - break; - } - - return 0; -} - -static const struct wcd_cpe_cdc_cb cpe_cb = { - .cdc_clk_en = tomtom_codec_internal_rco_ctrl, - .cpe_clk_en = tomtom_codec_fll_enable, - .lab_cdc_ch_ctl = tomtom_codec_enable_slimtx_mad, - .cdc_ext_clk = tomtom_codec_ext_clk_en, - .bus_vote_bw = tomtom_codec_vote_max_bw, - .cpe_err_irq_control = tomtom_cpe_err_irq_control, -}; - -static struct cpe_svc_init_param cpe_svc_params = { - .version = 0, - .query_freq_plans_cb = NULL, - .change_freq_plan_cb = NULL, -}; - -static int tomtom_cpe_initialize(struct snd_soc_codec *codec) -{ - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - struct wcd_cpe_params cpe_params; - - memset(&cpe_params, 0, - sizeof(struct wcd_cpe_params)); - cpe_params.codec = codec; - cpe_params.get_cpe_core = tomtom_codec_get_cpe_core; - cpe_params.cdc_cb = &cpe_cb; - cpe_params.dbg_mode = cpe_debug_mode; - cpe_params.cdc_major_ver = CPE_SVC_CODEC_TOMTOM; - cpe_params.cdc_minor_ver = CPE_SVC_CODEC_V1P0; - cpe_params.cdc_id = CPE_SVC_CODEC_TOMTOM; - - cpe_params.cdc_irq_info.cpe_engine_irq = - WCD9330_IRQ_SVASS_ENGINE; - cpe_params.cdc_irq_info.cpe_err_irq = - WCD9330_IRQ_SVASS_ERR_EXCEPTION; - cpe_params.cdc_irq_info.cpe_fatal_irqs = - TOMTOM_CPE_FATAL_IRQS; - - cpe_svc_params.context = codec; - cpe_params.cpe_svc_params = &cpe_svc_params; - - tomtom->cpe_core = wcd_cpe_init("cpe", codec, - &cpe_params); - if (IS_ERR_OR_NULL(tomtom->cpe_core)) { - dev_err(codec->dev, - "%s: Failed to enable CPE\n", - __func__); - return -EINVAL; - } - - return 0; -} - -static int tomtom_codec_probe(struct snd_soc_codec *codec) -{ - struct wcd9xxx *control; - struct tomtom_priv *tomtom; - struct wcd9xxx_pdata *pdata; - struct wcd9xxx *wcd9xxx; - struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec); - int ret = 0; - int i, rco_clk_rate; - void *ptr = NULL; - struct wcd9xxx_core_resource *core_res; - struct clk *wcd_ext_clk = NULL; - - dev_info(codec->dev, "%s()\n", __func__); - - control = dev_get_drvdata(codec->dev->parent); - - tomtom = snd_soc_codec_get_drvdata(codec); - - wcd9xxx_ssr_register(control, tomtom_device_down, - tomtom_post_reset_cb, (void *)codec); - - for (i = 0; i < NUM_DECIMATORS; i++) { - tx_hpf_work[i].tomtom = tomtom; - tx_hpf_work[i].decimator = i + 1; - tx_hpf_work[i].tx_hpf_bypass = false; - INIT_DELAYED_WORK(&tx_hpf_work[i].dwork, - tx_hpf_corner_freq_callback); - } - - wcd9xxx = control; - if (!of_find_property(wcd9xxx->dev->of_node, "clock-names", NULL)) { - dev_dbg(wcd9xxx->dev, "%s: codec not using audio-ext-clk driver\n", - __func__); - } else { - wcd_ext_clk = clk_get(wcd9xxx->dev, "wcd_clk"); - if (IS_ERR(wcd_ext_clk)) { - dev_err(codec->dev, "%s: clk get %s failed\n", - __func__, "wcd_ext_clk"); - goto err_nomem_slimch; - } - } - tomtom->wcd_ext_clk = wcd_ext_clk; - core_res = &wcd9xxx->core_res; - pdata = dev_get_platdata(codec->dev->parent); - /* codec resmgr module init */ - ret = wcd9xxx_resmgr_init(&tomtom->resmgr, codec, core_res, pdata, - &pdata->micbias, &tomtom_reg_address, - &resmgr_cb, WCD9XXX_CDC_TYPE_TOMTOM); - if (ret) { - pr_err("%s: wcd9xxx init failed %d\n", __func__, ret); - goto err_nomem_slimch; - } - - tomtom->clsh_d.buck_mv = tomtom_codec_get_buck_mv(codec); - /* TomTom does not support dynamic switching of vdd_cp */ - tomtom->clsh_d.is_dynamic_vdd_cp = false; - wcd9xxx_clsh_init(&tomtom->clsh_d, &tomtom->resmgr); - - if (wcd9xxx->mclk_rate == TOMTOM_MCLK_CLK_12P288MHZ) - rco_clk_rate = TOMTOM_MCLK_CLK_12P288MHZ; - else - rco_clk_rate = TOMTOM_MCLK_CLK_9P6MHZ; - - tomtom->fw_data = kzalloc(sizeof(*(tomtom->fw_data)), GFP_KERNEL); - if (!tomtom->fw_data) - goto err_nomem_slimch; - set_bit(WCD9XXX_ANC_CAL, tomtom->fw_data->cal_bit); - set_bit(WCD9XXX_MAD_CAL, tomtom->fw_data->cal_bit); - set_bit(WCD9XXX_MBHC_CAL, tomtom->fw_data->cal_bit); - ret = wcd_cal_create_hwdep(tomtom->fw_data, - WCD9XXX_CODEC_HWDEP_NODE, codec); - if (ret < 0) { - dev_err(codec->dev, "%s hwdep failed %d\n", __func__, ret); - goto err_hwdep; - } - - /* init and start mbhc */ - ret = wcd9xxx_mbhc_init(&tomtom->mbhc, &tomtom->resmgr, codec, - tomtom_enable_mbhc_micbias, - &mbhc_cb, &cdc_intr_ids, - rco_clk_rate, TOMTOM_ZDET_SUPPORTED); - if (ret) { - pr_err("%s: mbhc init failed %d\n", __func__, ret); - goto err_hwdep; - } - - tomtom->codec = codec; - for (i = 0; i < COMPANDER_MAX; i++) { - tomtom->comp_enabled[i] = 0; - tomtom->comp_fs[i] = COMPANDER_FS_48KHZ; - } - tomtom->intf_type = wcd9xxx_get_intf_type(); - tomtom->aux_pga_cnt = 0; - tomtom->aux_l_gain = 0x1F; - tomtom->aux_r_gain = 0x1F; - tomtom->ldo_h_users = 0; - tomtom->micb_2_users = 0; - tomtom_update_reg_defaults(codec); - pr_debug("%s: MCLK Rate = %x\n", __func__, wcd9xxx->mclk_rate); - if (wcd9xxx->mclk_rate == TOMTOM_MCLK_CLK_12P288MHZ) - snd_soc_update_bits(codec, TOMTOM_A_CHIP_CTL, 0x06, 0x0); - else if (wcd9xxx->mclk_rate == TOMTOM_MCLK_CLK_9P6MHZ) - snd_soc_update_bits(codec, TOMTOM_A_CHIP_CTL, 0x06, 0x2); - tomtom_codec_init_reg(codec); - - ret = tomtom_handle_pdata(tomtom); - if (ret < 0) { - pr_err("%s: bad pdata\n", __func__); - goto err_hwdep; - } - - tomtom->spkdrv_reg = tomtom_codec_find_regulator(codec, - WCD9XXX_VDD_SPKDRV_NAME); - tomtom->spkdrv2_reg = tomtom_codec_find_regulator(codec, - WCD9XXX_VDD_SPKDRV2_NAME); - - ptr = kmalloc((sizeof(tomtom_rx_chs) + - sizeof(tomtom_tx_chs)), GFP_KERNEL); - if (!ptr) { - ret = -ENOMEM; - goto err_hwdep; - } - - if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_I2C) { - snd_soc_dapm_new_controls(dapm, tomtom_dapm_i2s_widgets, - ARRAY_SIZE(tomtom_dapm_i2s_widgets)); - snd_soc_dapm_add_routes(dapm, audio_i2s_map, - ARRAY_SIZE(audio_i2s_map)); - for (i = 0; i < ARRAY_SIZE(tomtom_i2s_dai); i++) - INIT_LIST_HEAD(&tomtom->dai[i].wcd9xxx_ch_list); - } else if (tomtom->intf_type == WCD9XXX_INTERFACE_TYPE_SLIMBUS) { - for (i = 0; i < NUM_CODEC_DAIS; i++) { - INIT_LIST_HEAD(&tomtom->dai[i].wcd9xxx_ch_list); - init_waitqueue_head(&tomtom->dai[i].dai_wait); - } - tomtom_slimbus_slave_port_cfg.slave_dev_intfdev_la = - control->slim_slave->laddr; - tomtom_slimbus_slave_port_cfg.slave_dev_pgd_la = - control->slim->laddr; - tomtom_slimbus_slave_port_cfg.slave_port_mapping[0] = - TOMTOM_MAD_SLIMBUS_TX_PORT; - - tomtom_init_slim_slave_cfg(codec); - } - - snd_soc_dapm_new_controls(dapm, tomtom_1_dapm_widgets, - ARRAY_SIZE(tomtom_1_dapm_widgets)); - snd_soc_add_codec_controls(codec, - tomtom_1_x_analog_gain_controls, - ARRAY_SIZE(tomtom_1_x_analog_gain_controls)); - - snd_soc_add_codec_controls(codec, impedance_detect_controls, - ARRAY_SIZE(impedance_detect_controls)); - snd_soc_add_codec_controls(codec, hph_type_detect_controls, - ARRAY_SIZE(hph_type_detect_controls)); - - control->num_rx_port = TOMTOM_RX_MAX; - control->rx_chs = ptr; - memcpy(control->rx_chs, tomtom_rx_chs, sizeof(tomtom_rx_chs)); - control->num_tx_port = TOMTOM_TX_MAX; - control->tx_chs = ptr + sizeof(tomtom_rx_chs); - memcpy(control->tx_chs, tomtom_tx_chs, sizeof(tomtom_tx_chs)); - - snd_soc_dapm_sync(dapm); - - ret = tomtom_setup_irqs(tomtom); - if (ret) { - pr_err("%s: tomtom irq setup failed %d\n", __func__, ret); - goto err_pdata; - } - - atomic_set(&kp_tomtom_priv, (unsigned long)tomtom); - mutex_lock(&tomtom->codec_mutex); - snd_soc_dapm_disable_pin(dapm, "ANC HPHL"); - snd_soc_dapm_disable_pin(dapm, "ANC HPHR"); - snd_soc_dapm_disable_pin(dapm, "ANC HEADPHONE"); - snd_soc_dapm_disable_pin(dapm, "ANC EAR PA"); - snd_soc_dapm_disable_pin(dapm, "ANC EAR"); - mutex_unlock(&tomtom->codec_mutex); - snd_soc_dapm_sync(dapm); - - codec->component.ignore_pmdown_time = 1; - ret = tomtom_cpe_initialize(codec); - if (ret) { - dev_info(codec->dev, - "%s: cpe initialization failed, ret = %d\n", - __func__, ret); - /* Do not fail probe if CPE failed */ - ret = 0; - } - return ret; - -err_pdata: - kfree(ptr); - control->rx_chs = NULL; - control->tx_chs = NULL; -err_hwdep: - kfree(tomtom->fw_data); - tomtom->fw_data = NULL; -err_nomem_slimch: - devm_kfree(codec->dev, tomtom); - return ret; -} -static int tomtom_codec_remove(struct snd_soc_codec *codec) -{ - struct tomtom_priv *tomtom = snd_soc_codec_get_drvdata(codec); - struct wcd9xxx *control; - - WCD9XXX_BG_CLK_LOCK(&tomtom->resmgr); - atomic_set(&kp_tomtom_priv, 0); - - WCD9XXX_BG_CLK_UNLOCK(&tomtom->resmgr); - - control = dev_get_drvdata(codec->dev->parent); - control->rx_chs = NULL; - control->tx_chs = NULL; - - if (tomtom->wcd_ext_clk) - clk_put(tomtom->wcd_ext_clk); - tomtom_cleanup_irqs(tomtom); - - /* cleanup MBHC */ - wcd9xxx_mbhc_deinit(&tomtom->mbhc); - /* cleanup resmgr */ - wcd9xxx_resmgr_deinit(&tomtom->resmgr); - - tomtom->spkdrv_reg = NULL; - tomtom->spkdrv2_reg = NULL; - - devm_kfree(codec->dev, tomtom); - return 0; -} - -static struct regmap *tomtom_get_regmap(struct device *dev) -{ - struct wcd9xxx *control = dev_get_drvdata(dev->parent); - - return control->regmap; -} - -static struct snd_soc_codec_driver soc_codec_dev_tomtom = { - .probe = tomtom_codec_probe, - .remove = tomtom_codec_remove, - .get_regmap = tomtom_get_regmap, - .component_driver = { - .controls = tomtom_snd_controls, - .num_controls = ARRAY_SIZE(tomtom_snd_controls), - .dapm_widgets = tomtom_dapm_widgets, - .num_dapm_widgets = ARRAY_SIZE(tomtom_dapm_widgets), - .dapm_routes = audio_map, - .num_dapm_routes = ARRAY_SIZE(audio_map), - }, -}; - -#ifdef CONFIG_PM -static int tomtom_suspend(struct device *dev) -{ - dev_dbg(dev, "%s: system suspend\n", __func__); - return 0; -} - -static int tomtom_resume(struct device *dev) -{ - struct platform_device *pdev = to_platform_device(dev); - struct tomtom_priv *tomtom = platform_get_drvdata(pdev); - - if (!tomtom) { - dev_err(dev, "%s: tomtom private data is NULL\n", __func__); - return -EINVAL; - } - dev_dbg(dev, "%s: system resume\n", __func__); - /* Notify */ - wcd9xxx_resmgr_notifier_call(&tomtom->resmgr, - WCD9XXX_EVENT_POST_RESUME); - return 0; -} - -static const struct dev_pm_ops tomtom_pm_ops = { - .suspend = tomtom_suspend, - .resume = tomtom_resume, -}; -#endif - -static int tomtom_probe(struct platform_device *pdev) -{ - int ret = 0; - struct tomtom_priv *tomtom; - - tomtom = devm_kzalloc(&pdev->dev, sizeof(struct tomtom_priv), - GFP_KERNEL); - if (!tomtom) - return -ENOMEM; - - platform_set_drvdata(pdev, tomtom); - - if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_SLIMBUS) - ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_tomtom, - tomtom_dai, ARRAY_SIZE(tomtom_dai)); - else if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C) - ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_tomtom, - tomtom_i2s_dai, ARRAY_SIZE(tomtom_i2s_dai)); - mutex_init(&tomtom->codec_mutex); - return ret; -} -static int tomtom_remove(struct platform_device *pdev) -{ - struct tomtom_priv *tomtom = platform_get_drvdata(pdev); - - mutex_destroy(&tomtom->codec_mutex); - snd_soc_unregister_codec(&pdev->dev); - return 0; -} -static struct platform_driver tomtom_codec_driver = { - .probe = tomtom_probe, - .remove = tomtom_remove, - .driver = { - .name = "tomtom_codec", - .owner = THIS_MODULE, -#ifdef CONFIG_PM - .pm = &tomtom_pm_ops, -#endif - }, -}; - -static int __init tomtom_codec_init(void) -{ - return platform_driver_register(&tomtom_codec_driver); -} - -static void __exit tomtom_codec_exit(void) -{ - platform_driver_unregister(&tomtom_codec_driver); -} - -module_init(tomtom_codec_init); -module_exit(tomtom_codec_exit); - -MODULE_DESCRIPTION("TomTom codec driver"); -MODULE_LICENSE("GPL v2"); diff --git a/sound/soc/codecs/wcd9330.h b/sound/soc/codecs/wcd9330.h deleted file mode 100644 index 8679d013729a..000000000000 --- a/sound/soc/codecs/wcd9330.h +++ /dev/null @@ -1,128 +0,0 @@ -/* Copyright (c) 2012-2015, 2017 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#ifndef WCD9330_H -#define WCD9330_H - -#include -#include -#include -#include -#include "wcd9xxx-mbhc.h" -#include "wcd9xxx-resmgr.h" -#include "wcd9xxx-common.h" - -#define TOMTOM_NUM_REGISTERS 0x400 -#define TOMTOM_MAX_REGISTER (TOMTOM_NUM_REGISTERS-1) -#define TOMTOM_CACHE_SIZE TOMTOM_NUM_REGISTERS - -#define TOMTOM_REG_VAL(reg, val) {reg, 0, val} -#define TOMTOM_MCLK_ID 0 - -#define TOMTOM_REGISTER_START_OFFSET 0x800 -#define TOMTOM_SB_PGD_PORT_RX_BASE 0x40 -#define TOMTOM_SB_PGD_PORT_TX_BASE 0x50 - -#define WCD9330_DMIC_CLK_DIV_2 0x00 -#define WCD9330_DMIC_CLK_DIV_3 0x01 -#define WCD9330_DMIC_CLK_DIV_4 0x02 -#define WCD9330_DMIC_CLK_DIV_6 0x03 -#define WCD9330_DMIC_CLK_DIV_16 0x04 - -#define TOMTOM_ZDET_SUPPORTED true - -extern const u8 tomtom_reset_reg_defaults[TOMTOM_CACHE_SIZE]; -struct tomtom_codec_dai_data { - u32 rate; - u32 *ch_num; - u32 ch_act; - u32 ch_tot; -}; - -enum tomtom_pid_current { - TOMTOM_PID_MIC_2P5_UA, - TOMTOM_PID_MIC_5_UA, - TOMTOM_PID_MIC_10_UA, - TOMTOM_PID_MIC_20_UA, -}; - -enum tomtom_mbhc_analog_pwr_cfg { - TOMTOM_ANALOG_PWR_COLLAPSED = 0, - TOMTOM_ANALOG_PWR_ON, - TOMTOM_NUM_ANALOG_PWR_CONFIGS, -}; - -enum { - HPH_PA_NONE = 0, - HPH_PA_R, - HPH_PA_L, - HPH_PA_L_R, -}; - -/* Number of input and output Slimbus port */ -enum { - TOMTOM_RX1 = 0, - TOMTOM_RX2, - TOMTOM_RX3, - TOMTOM_RX4, - TOMTOM_RX5, - TOMTOM_RX6, - TOMTOM_RX7, - TOMTOM_RX8, - TOMTOM_RX9, - TOMTOM_RX10, - TOMTOM_RX11, - TOMTOM_RX12, - TOMTOM_RX13, - TOMTOM_RX_MAX, -}; - -enum { - TOMTOM_TX1 = 0, - TOMTOM_TX2, - TOMTOM_TX3, - TOMTOM_TX4, - TOMTOM_TX5, - TOMTOM_TX6, - TOMTOM_TX7, - TOMTOM_TX8, - TOMTOM_TX9, - TOMTOM_TX10, - TOMTOM_TX11, - TOMTOM_TX12, - TOMTOM_TX13, - TOMTOM_TX14, - TOMTOM_TX15, - TOMTOM_TX16, - TOMTOM_TX_MAX, -}; - -extern int tomtom_mclk_enable(struct snd_soc_codec *codec, int mclk_enable, - bool dapm); -extern int tomtom_codec_mclk_enable(struct snd_soc_codec *codec, - int mclk_enable, bool dapm); -extern int tomtom_hs_detect(struct snd_soc_codec *codec, - struct wcd9xxx_mbhc_config *mbhc_cfg); -extern void tomtom_hs_detect_exit(struct snd_soc_codec *codec); -extern void *tomtom_get_afe_config(struct snd_soc_codec *codec, - enum afe_config_type config_type); - -extern void tomtom_event_register( - int (*machine_event_cb)(struct snd_soc_codec *codec, - enum wcd9xxx_codec_event), - struct snd_soc_codec *codec); -extern void tomtom_register_ext_clk_cb( - int (*codec_ext_clk_en)(struct snd_soc_codec *codec, - int enable, bool dapm), - int (*get_ext_clk_cnt)(void), - struct snd_soc_codec *codec); -extern int tomtom_enable_qfuse_sensing(struct snd_soc_codec *codec); -#endif diff --git a/sound/soc/codecs/wcd9xxx-common.c b/sound/soc/codecs/wcd9xxx-common.c deleted file mode 100644 index 7b2e68a211b0..000000000000 --- a/sound/soc/codecs/wcd9xxx-common.c +++ /dev/null @@ -1,1480 +0,0 @@ -/* Copyright (c) 2013-2015, 2017 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include -#include -#include "wcd9xxx-common.h" - -#define CLSH_COMPUTE_EAR 0x01 -#define CLSH_COMPUTE_HPH_L 0x02 -#define CLSH_COMPUTE_HPH_R 0x03 - -#define BUCK_VREF_0P494V 0x3F -#define BUCK_VREF_2V 0xFF -#define BUCK_VREF_0P494V 0x3F -#define BUCK_VREF_1P8V 0xE6 - -#define BUCK_SETTLE_TIME_US 50 -#define NCP_SETTLE_TIME_US 50 - -#define MAX_IMPED_PARAMS 13 - -#define USLEEP_RANGE_MARGIN_US 100 - -struct wcd9xxx_imped_val { - u32 imped_val; - u8 index; -}; - -static const struct wcd9xxx_reg_mask_val imped_table[][MAX_IMPED_PARAMS] = { - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x46}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x04}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x11}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x9B}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x15}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x04}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x0C}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x47}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x05}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x11}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x9B}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x15}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x05}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x0C}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x49}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x07}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x12}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x35}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x4E}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x06}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x0E}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x49}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x16}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAC}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x17}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x5F}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xCF}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x06}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x0F}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x59}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x15}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x9C}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xCE}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xBD}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x07}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x10}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x66}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x04}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x9A}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x02}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2E}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xBD}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xA6}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x07}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x11}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x79}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x04}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x11}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x37}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xA6}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAD}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x08}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x12}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x76}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x04}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x11}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x4E}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAD}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAC}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x09}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x12}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x78}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x05}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x12}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xD0}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAC}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x13}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x0A}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x13}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x7A}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x06}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x14}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xB7}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x13}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x14}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x0B}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x14}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x60}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x09}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xA4}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x14}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1F}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x0C}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x14}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x79}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x17}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x25}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAE}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1F}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1D}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x0D}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x15}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x78}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x16}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2C}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAC}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1D}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x0E}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x16}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x89}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x05}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x40}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x13}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x10}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x16}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x97}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x05}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xD0}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x14}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x12}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x17}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x8A}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x06}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xB7}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x10}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x24}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x13}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x17}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x8A}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x07}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xA4}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1D}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x24}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x25}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x15}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x18}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x9A}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x08}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAE}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x25}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x27}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x18}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x19}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x8B}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x18}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAC}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x20}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2E}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x1A}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x19}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x9A}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x17}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x13}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1B}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2E}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2D}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x1D}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x1A}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0xA9}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x06}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x14}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x24}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2D}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2C}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x1F}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x19}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0xB9}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x06}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x10}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x25}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2C}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2C}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x23}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x18}, - }, - { - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0xA9}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x07}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1D}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x27}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2C}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x35}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0xff, 0x26}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0xff, 0x16}, - }, -}; - -static const struct wcd9xxx_imped_val imped_index[] = { - {4000, 0}, - {4500, 1}, - {5000, 2}, - {5500, 3}, - {6000, 4}, - {6500, 5}, - {7000, 6}, - {7700, 7}, - {8470, 8}, - {9317, 9}, - {10248, 10}, - {11273, 11}, - {12400, 12}, - {13641, 13}, - {15005, 14}, - {16505, 15}, - {18156, 16}, - {19971, 17}, - {21969, 18}, - {24165, 19}, - {26582, 20}, - {29240, 21}, - {32164, 22}, -}; - -static inline void -wcd9xxx_enable_clsh_block(struct snd_soc_codec *codec, - struct wcd9xxx_clsh_cdc_data *clsh_d, bool enable) -{ - if ((enable && ++clsh_d->clsh_users == 1) || - (!enable && --clsh_d->clsh_users == 0)) - snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLSH_B1_CTL, - 0x01, enable ? 0x01 : 0x00); - dev_dbg(codec->dev, "%s: clsh_users %d, enable %d", __func__, - clsh_d->clsh_users, enable); -} - -static inline void wcd9xxx_enable_anc_delay( - struct snd_soc_codec *codec, - bool on) -{ - snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLSH_B1_CTL, - 0x02, on ? 0x02 : 0x00); -} - -static inline void -wcd9xxx_enable_buck(struct snd_soc_codec *codec, - struct wcd9xxx_clsh_cdc_data *clsh_d, bool enable) -{ - if ((enable && ++clsh_d->buck_users == 1) || - (!enable && --clsh_d->buck_users == 0)) - snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_1, - 0x80, enable ? 0x80 : 0x00); - dev_dbg(codec->dev, "%s: buck_users %d, enable %d", __func__, - clsh_d->buck_users, enable); -} - -static void (*clsh_state_fp[NUM_CLSH_STATES])(struct snd_soc_codec *, - struct wcd9xxx_clsh_cdc_data *, - u8 req_state, bool req_type); - -static const char *state_to_str(u8 state, char *buf, size_t buflen) -{ - int i; - int cnt = 0; - /* - * This array of strings should match with enum wcd9xxx_clsh_state_bit. - */ - static const char *const states[] = { - "STATE_EAR", - "STATE_HPH_L", - "STATE_HPH_R", - "STATE_LO", - }; - - if (state == WCD9XXX_CLSH_STATE_IDLE) { - snprintf(buf, buflen, "[STATE_IDLE]"); - goto done; - } - - buf[0] = '\0'; - for (i = 0; i < ARRAY_SIZE(states); i++) { - if (!(state & (1 << i))) - continue; - cnt = snprintf(buf, buflen - cnt - 1, "%s%s%s", buf, - buf[0] == '\0' ? "[" : "|", - states[i]); - } - if (cnt > 0) - strlcat(buf + cnt, "]", buflen); - -done: - if (buf[0] == '\0') - snprintf(buf, buflen, "[STATE_UNKNOWN]"); - return buf; -} - -static void wcd9xxx_cfg_clsh_param_common( - struct snd_soc_codec *codec) -{ - int i; - const struct wcd9xxx_reg_mask_val reg_set[] = { - {WCD9XXX_A_CDC_CLSH_BUCK_NCP_VARS, 0x3 << 0, 0}, - {WCD9XXX_A_CDC_CLSH_BUCK_NCP_VARS, 0x3 << 2, 1 << 2}, - {WCD9XXX_A_CDC_CLSH_BUCK_NCP_VARS, (0x1 << 4), 0}, - {WCD9XXX_A_CDC_CLSH_B2_CTL, (0x3 << 0), 0x01}, - {WCD9XXX_A_CDC_CLSH_B2_CTL, (0x3 << 2), (0x01 << 2)}, - {WCD9XXX_A_CDC_CLSH_B2_CTL, (0xf << 4), (0x03 << 4)}, - {WCD9XXX_A_CDC_CLSH_B3_CTL, (0xf << 4), (0x03 << 4)}, - {WCD9XXX_A_CDC_CLSH_B3_CTL, (0xf << 0), (0x0B)}, - {WCD9XXX_A_CDC_CLSH_B1_CTL, (0x1 << 5), (0x01 << 5)}, - {WCD9XXX_A_CDC_CLSH_B1_CTL, (0x1 << 1), (0x01 << 1)}, - }; - - for (i = 0; i < ARRAY_SIZE(reg_set); i++) - snd_soc_update_bits(codec, reg_set[i].reg, reg_set[i].mask, - reg_set[i].val); - - dev_dbg(codec->dev, "%s: Programmed class H controller common parameters", - __func__); -} - -static void wcd9xxx_chargepump_request(struct snd_soc_codec *codec, bool on) -{ - static int cp_count; - - if (on && (++cp_count == 1)) { - snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL, - 0x01, 0x01); - dev_dbg(codec->dev, "%s: Charge Pump enabled, count = %d\n", - __func__, cp_count); - } else if (!on) { - if (--cp_count < 0) { - dev_dbg(codec->dev, - "%s: Unbalanced disable for charge pump\n", - __func__); - if (snd_soc_read(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL) & - 0x01) { - dev_dbg(codec->dev, - "%s: Actual chargepump is ON\n", - __func__); - } - cp_count = 0; - WARN_ON(1); - } - - if (cp_count == 0) { - snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL, - 0x01, 0x00); - dev_dbg(codec->dev, - "%s: Charge pump disabled, count = %d\n", - __func__, cp_count); - } - } -} - -void wcd9xxx_enable_high_perf_mode(struct snd_soc_codec *codec, - struct wcd9xxx_clsh_cdc_data *clsh_d, - u8 uhqa_mode, u8 req_state, bool req_type) -{ - dev_dbg(codec->dev, "%s: users fclk8 %d, fclk5 %d", __func__, - clsh_d->ncp_users[NCP_FCLK_LEVEL_8], - clsh_d->ncp_users[NCP_FCLK_LEVEL_5]); - - if (req_type == WCD9XXX_CLSAB_REQ_ENABLE) { - clsh_d->ncp_users[NCP_FCLK_LEVEL_8]++; - snd_soc_write(codec, WCD9XXX_A_RX_HPH_BIAS_PA, - WCD9XXX_A_RX_HPH_BIAS_PA__POR); - snd_soc_write(codec, WCD9XXX_A_RX_HPH_L_PA_CTL, 0x48); - snd_soc_write(codec, WCD9XXX_A_RX_HPH_R_PA_CTL, 0x48); - if (uhqa_mode) - snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CHOP_CTL, - 0x20, 0x00); - wcd9xxx_chargepump_request(codec, true); - wcd9xxx_enable_anc_delay(codec, true); - wcd9xxx_enable_buck(codec, clsh_d, false); - if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] > 0) - snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, - 0x0F, 0x08); - snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x30, 0x30); - - /* Enable NCP and wait until settles down */ - if (snd_soc_update_bits(codec, WCD9XXX_A_NCP_EN, 0x01, 0x01)) - usleep_range(NCP_SETTLE_TIME_US, NCP_SETTLE_TIME_US+10); - } else { - snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CHOP_CTL, - 0x20, 0x20); - snd_soc_write(codec, WCD9XXX_A_RX_HPH_L_PA_CTL, - WCD9XXX_A_RX_HPH_L_PA_CTL__POR); - snd_soc_write(codec, WCD9XXX_A_RX_HPH_R_PA_CTL, - WCD9XXX_A_RX_HPH_R_PA_CTL__POR); - snd_soc_write(codec, WCD9XXX_A_RX_HPH_BIAS_PA, 0x57); - wcd9xxx_enable_buck(codec, clsh_d, true); - wcd9xxx_chargepump_request(codec, false); - wcd9xxx_enable_anc_delay(codec, false); - clsh_d->ncp_users[NCP_FCLK_LEVEL_8]--; - if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] == 0 && - clsh_d->ncp_users[NCP_FCLK_LEVEL_5] == 0) - snd_soc_update_bits(codec, WCD9XXX_A_NCP_EN, - 0x01, 0x00); - else if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] == 0) - snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, - 0x0F, 0x05); - } - dev_dbg(codec->dev, "%s: leave\n", __func__); -} -EXPORT_SYMBOL(wcd9xxx_enable_high_perf_mode); - -static int get_impedance_index(u32 imped) -{ - int i = 0; - - if (imped < imped_index[i].imped_val) { - pr_debug("%s, detected impedance is less than 4 Ohm\n", - __func__); - goto ret; - } - if (imped >= imped_index[ARRAY_SIZE(imped_index) - 1].imped_val) { - pr_debug("%s, detected impedance is greater than 32164 Ohm\n", - __func__); - i = ARRAY_SIZE(imped_index) - 1; - goto ret; - } - for (i = 0; i < ARRAY_SIZE(imped_index) - 1; i++) { - if (imped >= imped_index[i].imped_val && - imped < imped_index[i + 1].imped_val) - break; - } -ret: - pr_debug("%s: selected impedance index = %d\n", - __func__, imped_index[i].index); - return imped_index[i].index; -} - -void wcd9xxx_clsh_imped_config(struct snd_soc_codec *codec, - int imped) -{ - int i = 0; - int index = 0; - - index = get_impedance_index(imped); - if (index >= ARRAY_SIZE(imped_index)) { - pr_err("%s, invalid imped = %d\n", __func__, imped); - return; - } - for (i = 0; i < MAX_IMPED_PARAMS; i++) - snd_soc_write(codec, imped_table[index][i].reg, - imped_table[index][i].val); -} - -static void wcd9xxx_clsh_comp_req(struct snd_soc_codec *codec, - struct wcd9xxx_clsh_cdc_data *clsh_d, - int compute_pa, bool on) -{ - u8 shift; - - if (compute_pa == CLSH_COMPUTE_EAR) { - snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLSH_B1_CTL, 0x10, - (on ? 0x10 : 0)); - } else { - if (compute_pa == CLSH_COMPUTE_HPH_L) { - shift = 3; - } else if (compute_pa == CLSH_COMPUTE_HPH_R) { - shift = 2; - } else { - dev_dbg(codec->dev, - "%s: classh computation request is incorrect\n", - __func__); - return; - } - - if (on) - wcd9xxx_resmgr_add_cond_update_bits(clsh_d->resmgr, - WCD9XXX_COND_HPH, - WCD9XXX_A_CDC_CLSH_B1_CTL, - shift, false); - else - wcd9xxx_resmgr_rm_cond_update_bits(clsh_d->resmgr, - WCD9XXX_COND_HPH, - WCD9XXX_A_CDC_CLSH_B1_CTL, - shift, false); - } -} - -int wcd9xxx_soc_update_bits_push(struct snd_soc_codec *codec, - struct list_head *list, - uint16_t reg, uint8_t mask, - uint8_t value, int delay) -{ - int rc; - struct wcd9xxx_register_save_node *node; - - node = kmalloc(sizeof(*node), GFP_KERNEL); - if (unlikely(!node)) { - pr_err("%s: Not enough memory\n", __func__); - return -ENOMEM; - } - node->reg = reg; - node->value = snd_soc_read(codec, reg); - list_add(&node->lh, list); - if (mask == 0xFF) - rc = snd_soc_write(codec, reg, value); - else - rc = snd_soc_update_bits(codec, reg, mask, value); - if (delay) - usleep_range(delay, delay + USLEEP_RANGE_MARGIN_US); - return rc; -} -EXPORT_SYMBOL(wcd9xxx_soc_update_bits_push); - -void wcd9xxx_restore_registers(struct snd_soc_codec *codec, - struct list_head *lh) -{ - struct wcd9xxx_register_save_node *node, *nodetmp; - - list_for_each_entry_safe(node, nodetmp, lh, lh) { - snd_soc_write(codec, node->reg, node->value); - list_del(&node->lh); - kfree(node); - } -} -EXPORT_SYMBOL(wcd9xxx_restore_registers); - -static void wcd9xxx_dynamic_bypass_buck_ctrl_lo(struct snd_soc_codec *cdc, - bool enable) -{ - int i; - const struct wcd9xxx_reg_mask_val reg_set[] = { - {WCD9XXX_A_BUCK_MODE_3, (0x1 << 3), (enable << 3)}, - {WCD9XXX_A_BUCK_MODE_5, enable ? 0xFF : 0x02, 0x02}, - {WCD9XXX_A_BUCK_MODE_5, 0x1, 0x01} - }; - - if (!enable) { - snd_soc_update_bits(cdc, WCD9XXX_A_BUCK_MODE_1, - (0x1 << 3), 0x00); - snd_soc_update_bits(cdc, WCD9XXX_A_BUCK_MODE_4, - 0xFF, BUCK_VREF_2V); - } - for (i = 0; i < ARRAY_SIZE(reg_set); i++) - snd_soc_update_bits(cdc, reg_set[i].reg, reg_set[i].mask, - reg_set[i].val); - - /* 50us sleep is reqd. as per the class H HW design sequence */ - usleep_range(BUCK_SETTLE_TIME_US, BUCK_SETTLE_TIME_US+10); -} - -static void wcd9xxx_dynamic_bypass_buck_ctrl(struct snd_soc_codec *cdc, - bool enable) -{ - int i; - const struct wcd9xxx_reg_mask_val reg_set[] = { - {WCD9XXX_A_BUCK_MODE_3, (0x1 << 3), (enable << 3)}, - {WCD9XXX_A_BUCK_MODE_5, (0x1 << 1), ((!enable) << 1)}, - {WCD9XXX_A_BUCK_MODE_5, 0x1, !enable} - }; - if (!enable) { - snd_soc_update_bits(cdc, WCD9XXX_A_BUCK_MODE_1, - (0x1 << 3), 0x00); - snd_soc_update_bits(cdc, WCD9XXX_A_BUCK_MODE_4, - 0xFF, BUCK_VREF_2V); - } - for (i = 0; i < ARRAY_SIZE(reg_set); i++) - snd_soc_update_bits(cdc, reg_set[i].reg, reg_set[i].mask, - reg_set[i].val); - - /* 50us sleep is reqd. as per the class H HW design sequence */ - usleep_range(BUCK_SETTLE_TIME_US, BUCK_SETTLE_TIME_US+10); -} - -static void wcd9xxx_set_buck_mode(struct snd_soc_codec *codec, u8 buck_vref) -{ - int i; - const struct wcd9xxx_reg_mask_val reg_set[] = { - {WCD9XXX_A_BUCK_MODE_5, 0x02, 0x02}, - {WCD9XXX_A_BUCK_MODE_4, 0xFF, buck_vref}, - {WCD9XXX_A_BUCK_MODE_1, 0x04, 0x04}, - {WCD9XXX_A_BUCK_MODE_3, 0x04, 0x00}, - {WCD9XXX_A_BUCK_MODE_3, 0x08, 0x00}, - }; - - for (i = 0; i < ARRAY_SIZE(reg_set); i++) - snd_soc_update_bits(codec, reg_set[i].reg, - reg_set[i].mask, reg_set[i].val); - - dev_dbg(codec->dev, "%s: Done\n", __func__); - usleep_range(BUCK_SETTLE_TIME_US, BUCK_SETTLE_TIME_US + 10); -} - - -/* This will be called for all states except Lineout */ -static void wcd9xxx_clsh_enable_post_pa(struct snd_soc_codec *codec, - struct wcd9xxx_clsh_cdc_data *cdc_clsh_d) -{ - int i; - const struct wcd9xxx_reg_mask_val reg_set[] = { - {WCD9XXX_A_BUCK_MODE_5, 0x02, 0x00}, - {WCD9XXX_A_NCP_STATIC, 0x20, 0x00}, - {WCD9XXX_A_BUCK_MODE_3, 0x04, 0x04}, - }; - - for (i = 0; i < ARRAY_SIZE(reg_set); i++) - snd_soc_update_bits(codec, reg_set[i].reg, - reg_set[i].mask, reg_set[i].val); - - if (!cdc_clsh_d->is_dynamic_vdd_cp) - snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_3, - 0x08, 0x08); - - dev_dbg(codec->dev, "%s: completed clsh mode settings after PA enable\n", - __func__); - -} - -static void wcd9xxx_set_fclk_get_ncp(struct snd_soc_codec *codec, - struct wcd9xxx_clsh_cdc_data *clsh_d, - enum ncp_fclk_level fclk_level) -{ - clsh_d->ncp_users[fclk_level]++; - - pr_debug("%s: enter ncp type %d users fclk8 %d, fclk5 %d\n", __func__, - fclk_level, clsh_d->ncp_users[NCP_FCLK_LEVEL_8], - clsh_d->ncp_users[NCP_FCLK_LEVEL_5]); - - snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x10, 0x00); - /* fclk level 8 dominates level 5 */ - if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] > 0) - snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x0F, 0x08); - else if (clsh_d->ncp_users[NCP_FCLK_LEVEL_5] > 0) - snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x0F, 0x05); - else - WARN_ONCE(1, "Unexpected users %d,%d\n", - clsh_d->ncp_users[NCP_FCLK_LEVEL_8], - clsh_d->ncp_users[NCP_FCLK_LEVEL_5]); - snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x20, 0x20); - - /* enable NCP and wait until settles down */ - if (snd_soc_update_bits(codec, WCD9XXX_A_NCP_EN, 0x01, 0x01)) - usleep_range(NCP_SETTLE_TIME_US, NCP_SETTLE_TIME_US + 50); - pr_debug("%s: leave\n", __func__); -} - -static void wcd9xxx_set_fclk_put_ncp(struct snd_soc_codec *codec, - struct wcd9xxx_clsh_cdc_data *clsh_d, - enum ncp_fclk_level fclk_level) -{ - clsh_d->ncp_users[fclk_level]--; - - pr_debug("%s: enter ncp type %d users fclk8 %d, fclk5 %d\n", __func__, - fclk_level, clsh_d->ncp_users[NCP_FCLK_LEVEL_8], - clsh_d->ncp_users[NCP_FCLK_LEVEL_5]); - - if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] == 0 && - clsh_d->ncp_users[NCP_FCLK_LEVEL_5] == 0) - snd_soc_update_bits(codec, WCD9XXX_A_NCP_EN, 0x01, 0x00); - else if (clsh_d->ncp_users[NCP_FCLK_LEVEL_8] == 0) - /* if dominating level 8 has gone, switch to 5 */ - snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, 0x0F, 0x05); - pr_debug("%s: leave\n", __func__); -} - -static void wcd9xxx_cfg_clsh_param_ear(struct snd_soc_codec *codec) -{ - int i; - const struct wcd9xxx_reg_mask_val reg_set[] = { - {WCD9XXX_A_CDC_CLSH_B1_CTL, (0x1 << 7), 0}, - {WCD9XXX_A_CDC_CLSH_V_PA_HD_EAR, (0x3f << 0), 0x0D}, - {WCD9XXX_A_CDC_CLSH_V_PA_MIN_EAR, (0x3f << 0), 0x3A}, - - /* Under assumption that EAR load is 10.7ohm */ - {WCD9XXX_A_CDC_CLSH_IDLE_EAR_THSD, (0x3f << 0), 0x26}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_EAR_THSD, (0x3f << 0), 0x2C}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_EAR_L, 0xff, 0xA9}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_EAR_U, 0xff, 0x07}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, (0x1 << 7), 0}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, (0xf << 0), 0x08}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1b}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2d}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x36}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x37}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - }; - - for (i = 0; i < ARRAY_SIZE(reg_set); i++) - snd_soc_update_bits(codec, reg_set[i].reg, - reg_set[i].mask, reg_set[i].val); - - dev_dbg(codec->dev, "%s: Programmed Class H controller EAR specific params\n", - __func__); -} - -static void wcd9xxx_cfg_clsh_param_hph(struct snd_soc_codec *codec) -{ - int i; - const struct wcd9xxx_reg_mask_val reg_set[] = { - {WCD9XXX_A_CDC_CLSH_B1_CTL, (0x1 << 6), 0}, - {WCD9XXX_A_CDC_CLSH_V_PA_HD_HPH, 0x3f, 0x0D}, - {WCD9XXX_A_CDC_CLSH_V_PA_MIN_HPH, 0x3f, 0x1D}, - - /* Under assumption that HPH load is 16ohm per channel */ - {WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0x3f, 0x13}, - {WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0x1f, 0x19}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x97}, - {WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x05}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, (0x1 << 7), 0}, - {WCD9XXX_A_CDC_CLSH_K_ADDR, 0x0f, 0}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAE}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x24}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x25}, - {WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00}, - }; - - for (i = 0; i < ARRAY_SIZE(reg_set); i++) - snd_soc_update_bits(codec, reg_set[i].reg, reg_set[i].mask, - reg_set[i].val); - dev_dbg(codec->dev, "%s: Programmed Class H controller HPH specific params\n", - __func__); -} - -static void wcd9xxx_ncp_bypass_enable(struct snd_soc_codec *cdc, bool enable) -{ - snd_soc_update_bits(cdc, WCD9XXX_A_NCP_STATIC, 0x10, (enable << 4)); - /* 50us sleep is reqd. as per the class H HW design sequence */ - usleep_range(BUCK_SETTLE_TIME_US, BUCK_SETTLE_TIME_US+10); -} - -static void wcd9xxx_clsh_set_Iest(struct snd_soc_codec *codec, - u8 value) -{ - snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_5, - 0x01, (0x01 & 0x03)); - snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_5, - 0xFC, (value << 2)); -} - -static void wcd9xxx_clsh_state_hph_ear(struct snd_soc_codec *codec, - struct wcd9xxx_clsh_cdc_data *clsh_d, - u8 req_state, bool is_enable) -{ - int compute_pa = 0; - - dev_dbg(codec->dev, "%s: enter %s\n", __func__, - is_enable ? "enable" : "disable"); - - if (is_enable) { - /* - * The below check condition is required to make sure - * functions inside if condition will execute only once. - */ - if ((clsh_d->state == WCD9XXX_CLSH_STATE_EAR) || - (req_state == WCD9XXX_CLSH_STATE_EAR)) { - wcd9xxx_dynamic_bypass_buck_ctrl(codec, false); - wcd9xxx_ncp_bypass_enable(codec, true); - } - switch (req_state) { - case WCD9XXX_CLSH_STATE_HPHL: - compute_pa = CLSH_COMPUTE_HPH_L; - break; - case WCD9XXX_CLSH_STATE_HPHR: - compute_pa = CLSH_COMPUTE_HPH_R; - break; - case WCD9XXX_CLSH_STATE_EAR: - compute_pa = CLSH_COMPUTE_EAR; - break; - default: - dev_dbg(codec->dev, - "%s:Invalid state:0x%x,enable:0x%x\n", - __func__, req_state, is_enable); - break; - } - wcd9xxx_clsh_comp_req(codec, clsh_d, compute_pa, true); - - dev_dbg(codec->dev, "%s: Enabled hph+ear mode clsh\n", - __func__); - } else { - switch (req_state) { - case WCD9XXX_CLSH_STATE_HPHL: - compute_pa = CLSH_COMPUTE_HPH_L; - break; - case WCD9XXX_CLSH_STATE_HPHR: - compute_pa = CLSH_COMPUTE_HPH_R; - break; - case WCD9XXX_CLSH_STATE_EAR: - compute_pa = CLSH_COMPUTE_EAR; - break; - default: - dev_dbg(codec->dev, - "%s:Invalid state:0x%x,enable:0x%x\n", - __func__, req_state, is_enable); - break; - } - wcd9xxx_clsh_comp_req(codec, clsh_d, compute_pa, false); - - if (((clsh_d->state & (~req_state)) == - WCD9XXX_CLSH_STATE_EAR) || - (req_state == WCD9XXX_CLSH_STATE_EAR)) { - wcd9xxx_ncp_bypass_enable(codec, false); - wcd9xxx_dynamic_bypass_buck_ctrl(codec, true); - } - } -} - -static void wcd9xxx_clsh_state_hph_lo(struct snd_soc_codec *codec, - struct wcd9xxx_clsh_cdc_data *clsh_d, - u8 req_state, bool is_enable) -{ - - dev_dbg(codec->dev, "%s: enter %s\n", __func__, - is_enable ? "enable" : "disable"); - if (is_enable) { - if ((clsh_d->state == WCD9XXX_CLSH_STATE_LO) || - (req_state == WCD9XXX_CLSH_STATE_LO)) { - wcd9xxx_dynamic_bypass_buck_ctrl_lo(codec, false); - wcd9xxx_enable_buck(codec, clsh_d, true); - wcd9xxx_ncp_bypass_enable(codec, true); - if (req_state & WCD9XXX_CLSH_STATE_HPH_ST) { - wcd9xxx_set_fclk_get_ncp(codec, clsh_d, - NCP_FCLK_LEVEL_8); - wcd9xxx_set_fclk_put_ncp(codec, clsh_d, - NCP_FCLK_LEVEL_5); - wcd9xxx_enable_clsh_block(codec, clsh_d, true); - wcd9xxx_chargepump_request(codec, true); - wcd9xxx_enable_anc_delay(codec, true); - } - } - if (req_state == WCD9XXX_CLSH_STATE_HPHL) - wcd9xxx_clsh_comp_req(codec, clsh_d, - CLSH_COMPUTE_HPH_L, true); - if (req_state == WCD9XXX_CLSH_STATE_HPHR) - wcd9xxx_clsh_comp_req(codec, clsh_d, - CLSH_COMPUTE_HPH_R, true); - } else { - switch (req_state) { - case WCD9XXX_CLSH_STATE_LO: - snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, - 0x20, 0x00); - wcd9xxx_dynamic_bypass_buck_ctrl_lo(codec, true); - break; - case WCD9XXX_CLSH_STATE_HPHL: - wcd9xxx_clsh_comp_req(codec, clsh_d, - CLSH_COMPUTE_HPH_L, false); - break; - case WCD9XXX_CLSH_STATE_HPHR: - wcd9xxx_clsh_comp_req(codec, clsh_d, - CLSH_COMPUTE_HPH_R, false); - break; - default: - dev_dbg(codec->dev, - "%s:Invalid state:0x%x,enable:0x%x\n", - __func__, req_state, is_enable); - break; - } - if ((req_state == WCD9XXX_CLSH_STATE_LO) || - ((clsh_d->state & (~req_state)) == WCD9XXX_CLSH_STATE_LO)) { - wcd9xxx_ncp_bypass_enable(codec, false); - - if ((clsh_d->state & (~req_state)) == - WCD9XXX_CLSH_STATE_LO) { - wcd9xxx_set_fclk_get_ncp(codec, clsh_d, - NCP_FCLK_LEVEL_5); - wcd9xxx_set_fclk_put_ncp(codec, clsh_d, - NCP_FCLK_LEVEL_8); - } - - if (req_state & WCD9XXX_CLSH_STATE_HPH_ST) { - usleep_range(BUCK_SETTLE_TIME_US, - BUCK_SETTLE_TIME_US + 10); - if (clsh_d->buck_mv == - WCD9XXX_CDC_BUCK_MV_1P8) { - wcd9xxx_enable_buck(codec, clsh_d, - false); - wcd9xxx_ncp_bypass_enable(codec, true); - } else { - /* - *NCP settle time recommended by codec - *specification - */ - usleep_range(NCP_SETTLE_TIME_US, - NCP_SETTLE_TIME_US + 10); - wcd9xxx_clsh_set_Iest(codec, 0x02); - } - snd_soc_update_bits(codec, - WCD9XXX_A_BUCK_MODE_1, - 0x04, 0x00); - snd_soc_update_bits(codec, - WCD9XXX_A_BUCK_MODE_4, - 0xFF, BUCK_VREF_1P8V); - } - } - } -} - -static void wcd9xxx_clsh_state_ear_lo(struct snd_soc_codec *codec, - struct wcd9xxx_clsh_cdc_data *clsh_d, - u8 req_state, bool is_enable) -{ - - dev_dbg(codec->dev, "%s: enter %s\n", __func__, - is_enable ? "enable" : "disable"); - if (is_enable) { - wcd9xxx_dynamic_bypass_buck_ctrl(codec, false); - wcd9xxx_enable_buck(codec, clsh_d, true); - wcd9xxx_ncp_bypass_enable(codec, true); - if (req_state & WCD9XXX_CLSH_STATE_EAR) { - wcd9xxx_set_fclk_get_ncp(codec, clsh_d, - NCP_FCLK_LEVEL_8); - wcd9xxx_set_fclk_put_ncp(codec, clsh_d, - NCP_FCLK_LEVEL_5); - wcd9xxx_enable_clsh_block(codec, clsh_d, true); - wcd9xxx_chargepump_request(codec, true); - wcd9xxx_enable_anc_delay(codec, true); - wcd9xxx_clsh_comp_req(codec, clsh_d, - CLSH_COMPUTE_EAR, true); - } - } else { - wcd9xxx_ncp_bypass_enable(codec, false); - - if ((clsh_d->state & (~req_state)) == WCD9XXX_CLSH_STATE_LO) { - wcd9xxx_set_fclk_get_ncp(codec, clsh_d, - NCP_FCLK_LEVEL_5); - wcd9xxx_set_fclk_put_ncp(codec, clsh_d, - NCP_FCLK_LEVEL_8); - } - - if (req_state & WCD9XXX_CLSH_STATE_LO) { - snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, - 0x20, 0x00); - wcd9xxx_dynamic_bypass_buck_ctrl(codec, true); - } else if (req_state & WCD9XXX_CLSH_STATE_EAR) { - wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_EAR, - false); - /*sleep 5ms*/ - if (clsh_d->buck_mv == WCD9XXX_CDC_BUCK_MV_1P8) { - wcd9xxx_enable_buck(codec, clsh_d, false); - wcd9xxx_ncp_bypass_enable(codec, true); - } else { - /* NCP settle time recommended by codec spec */ - usleep_range(NCP_SETTLE_TIME_US, - NCP_SETTLE_TIME_US + 10); - wcd9xxx_clsh_set_Iest(codec, 0x02); - } - snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_1, - 0x04, 0x00); - snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_4, - 0xFF, BUCK_VREF_1P8V); - } - } -} - -static void wcd9xxx_clsh_state_hph_ear_lo(struct snd_soc_codec *codec, - struct wcd9xxx_clsh_cdc_data *clsh_d, - u8 req_state, bool is_enable) -{ - dev_dbg(codec->dev, "%s: enter %s\n", __func__, - is_enable ? "enable" : "disable"); - - if (req_state & WCD9XXX_CLSH_STATE_HPHL) - wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_L, - is_enable); - - if (req_state & WCD9XXX_CLSH_STATE_HPHR) - wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_R, - is_enable); - - if (req_state & WCD9XXX_CLSH_STATE_EAR) - wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_EAR, - is_enable); -} - -static void wcd9xxx_clsh_state_ear(struct snd_soc_codec *codec, - struct wcd9xxx_clsh_cdc_data *clsh_d, - u8 req_state, bool is_enable) -{ - pr_debug("%s: enter %s\n", __func__, is_enable ? "enable" : "disable"); - if (is_enable) { - wcd9xxx_cfg_clsh_param_common(codec); - wcd9xxx_cfg_clsh_param_ear(codec); - wcd9xxx_enable_clsh_block(codec, clsh_d, true); - wcd9xxx_chargepump_request(codec, true); - wcd9xxx_enable_anc_delay(codec, true); - wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_EAR, true); - wcd9xxx_set_buck_mode(codec, BUCK_VREF_2V); - wcd9xxx_enable_buck(codec, clsh_d, true); - wcd9xxx_set_fclk_get_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8); - - dev_dbg(codec->dev, "%s: Enabled ear mode class h\n", __func__); - } else { - dev_dbg(codec->dev, "%s: stub fallback to ear\n", __func__); - wcd9xxx_set_fclk_put_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8); - wcd9xxx_enable_buck(codec, clsh_d, false); - wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_EAR, false); - wcd9xxx_chargepump_request(codec, false); - wcd9xxx_enable_clsh_block(codec, clsh_d, false); - } -} - -static void wcd9xxx_clsh_state_hph_l(struct snd_soc_codec *codec, - struct wcd9xxx_clsh_cdc_data *clsh_d, - u8 req_state, bool is_enable) -{ - pr_debug("%s: enter %s\n", __func__, is_enable ? "enable" : "disable"); - - if (is_enable) { - wcd9xxx_cfg_clsh_param_common(codec); - wcd9xxx_cfg_clsh_param_hph(codec); - wcd9xxx_enable_clsh_block(codec, clsh_d, true); - wcd9xxx_chargepump_request(codec, true); - wcd9xxx_enable_anc_delay(codec, true); - wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_L, true); - wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_R, true); - wcd9xxx_set_buck_mode(codec, BUCK_VREF_0P494V); - wcd9xxx_enable_buck(codec, clsh_d, true); - wcd9xxx_set_fclk_get_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8); - - dev_dbg(codec->dev, "%s: Done\n", __func__); - } else { - wcd9xxx_set_fclk_put_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8); - wcd9xxx_enable_buck(codec, clsh_d, false); - wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_L, false); - wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_R, false); - wcd9xxx_enable_clsh_block(codec, clsh_d, false); - wcd9xxx_chargepump_request(codec, false); - } -} - -static void wcd9xxx_clsh_state_hph_r(struct snd_soc_codec *codec, - struct wcd9xxx_clsh_cdc_data *clsh_d, - u8 req_state, bool is_enable) -{ - pr_debug("%s: enter %s\n", __func__, is_enable ? "enable" : "disable"); - - if (is_enable) { - wcd9xxx_cfg_clsh_param_common(codec); - wcd9xxx_cfg_clsh_param_hph(codec); - wcd9xxx_enable_clsh_block(codec, clsh_d, true); - wcd9xxx_chargepump_request(codec, true); - wcd9xxx_enable_anc_delay(codec, true); - wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_L, true); - wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_R, true); - wcd9xxx_set_buck_mode(codec, BUCK_VREF_0P494V); - wcd9xxx_enable_buck(codec, clsh_d, true); - wcd9xxx_set_fclk_get_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8); - - dev_dbg(codec->dev, "%s: Done\n", __func__); - } else { - wcd9xxx_set_fclk_put_ncp(codec, clsh_d, NCP_FCLK_LEVEL_8); - wcd9xxx_enable_buck(codec, clsh_d, false); - wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_L, false); - wcd9xxx_clsh_comp_req(codec, clsh_d, CLSH_COMPUTE_HPH_R, false); - wcd9xxx_enable_clsh_block(codec, clsh_d, false); - wcd9xxx_chargepump_request(codec, false); - } -} - -static void wcd9xxx_clsh_state_hph_st(struct snd_soc_codec *codec, - struct wcd9xxx_clsh_cdc_data *clsh_d, - u8 req_state, bool is_enable) -{ - pr_debug("%s: enter %s\n", __func__, is_enable ? "enable" : "disable"); - - if (is_enable) - dev_dbg(codec->dev, "%s: stub fallback to hph_st\n", __func__); - else - dev_dbg(codec->dev, "%s: stub fallback to hph_st\n", __func__); -} - -static void wcd9xxx_clsh_state_lo(struct snd_soc_codec *codec, - struct wcd9xxx_clsh_cdc_data *clsh_d, - u8 req_state, bool is_enable) -{ - pr_debug("%s: enter %s, buck_mv %d\n", __func__, - is_enable ? "enable" : "disable", clsh_d->buck_mv); - - if (is_enable) { - wcd9xxx_set_buck_mode(codec, BUCK_VREF_1P8V); - wcd9xxx_enable_buck(codec, clsh_d, true); - wcd9xxx_set_fclk_get_ncp(codec, clsh_d, NCP_FCLK_LEVEL_5); - - if (clsh_d->buck_mv == WCD9XXX_CDC_BUCK_MV_1P8) { - wcd9xxx_enable_buck(codec, clsh_d, false); - snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC, - 1 << 4, 1 << 4); - /* NCP settle time recommended by codec specification */ - usleep_range(NCP_SETTLE_TIME_US, - NCP_SETTLE_TIME_US + 10); - } else { - /* NCP settle time recommended by codec specification */ - usleep_range(NCP_SETTLE_TIME_US, - NCP_SETTLE_TIME_US + 10); - snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_5, - 0x01, (0x01 & 0x03)); - snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_5, - 0xFC, (0xFC & 0xB)); - } - snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_1, 0x04, 0x00); - } else { - dev_dbg(codec->dev, "%s: stub fallback to lineout\n", __func__); - wcd9xxx_set_fclk_put_ncp(codec, clsh_d, NCP_FCLK_LEVEL_5); - if (clsh_d->buck_mv != WCD9XXX_CDC_BUCK_MV_1P8) - wcd9xxx_enable_buck(codec, clsh_d, false); - } -} - -static void wcd9xxx_clsh_state_err(struct snd_soc_codec *codec, - struct wcd9xxx_clsh_cdc_data *clsh_d, - u8 req_state, bool is_enable) -{ - char msg[128]; - - dev_dbg(codec->dev, - "%s Wrong request for class H state machine requested to %s %s", - __func__, is_enable ? "enable" : "disable", - state_to_str(req_state, msg, sizeof(msg))); - WARN_ON(1); -} - -/* - * Function: wcd9xxx_clsh_is_state_valid - * Params: state - * Description: - * Provides information on valid states of Class H configuration - */ -static int wcd9xxx_clsh_is_state_valid(u8 state) -{ - switch (state) { - case WCD9XXX_CLSH_STATE_IDLE: - case WCD9XXX_CLSH_STATE_EAR: - case WCD9XXX_CLSH_STATE_HPHL: - case WCD9XXX_CLSH_STATE_HPHR: - case WCD9XXX_CLSH_STATE_HPH_ST: - case WCD9XXX_CLSH_STATE_LO: - case WCD9XXX_CLSH_STATE_HPHL_EAR: - case WCD9XXX_CLSH_STATE_HPHR_EAR: - case WCD9XXX_CLSH_STATE_HPH_ST_EAR: - case WCD9XXX_CLSH_STATE_HPHL_LO: - case WCD9XXX_CLSH_STATE_HPHR_LO: - case WCD9XXX_CLSH_STATE_HPH_ST_LO: - case WCD9XXX_CLSH_STATE_EAR_LO: - case WCD9XXX_CLSH_STATE_HPHL_EAR_LO: - case WCD9XXX_CLSH_STATE_HPHR_EAR_LO: - case WCD9XXX_CLSH_STATE_HPH_ST_EAR_LO: - return 1; - default: - break; - } - return 0; -} - -/* - * Function: wcd9xxx_clsh_fsm - * Params: codec, cdc_clsh_d, req_state, req_type, clsh_event - * Description: - * This function handles PRE DAC and POST DAC conditions of different devices - * and updates class H configuration of different combination of devices - * based on validity of their states. cdc_clsh_d will contain current - * class h state information - */ -void wcd9xxx_clsh_fsm(struct snd_soc_codec *codec, - struct wcd9xxx_clsh_cdc_data *cdc_clsh_d, - u8 req_state, bool req_type, u8 clsh_event) -{ - u8 old_state, new_state; - char msg0[128], msg1[128]; - - switch (clsh_event) { - case WCD9XXX_CLSH_EVENT_PRE_DAC: - /* PRE_DAC event should be used only for Enable */ - BUG_ON(req_type != WCD9XXX_CLSH_REQ_ENABLE); - - old_state = cdc_clsh_d->state; - new_state = old_state | req_state; - - if (!wcd9xxx_clsh_is_state_valid(new_state)) { - dev_dbg(codec->dev, - "%s: classH not a valid new state: %s\n", - __func__, - state_to_str(new_state, msg0, sizeof(msg0))); - return; - } - if (new_state == old_state) { - dev_dbg(codec->dev, - "%s: classH already in requested state: %s\n", - __func__, - state_to_str(new_state, msg0, sizeof(msg0))); - return; - } - (*clsh_state_fp[new_state]) (codec, cdc_clsh_d, req_state, - req_type); - cdc_clsh_d->state = new_state; - dev_dbg(codec->dev, - "%s: ClassH state transition from %s to %s\n", - __func__, state_to_str(old_state, msg0, sizeof(msg0)), - state_to_str(cdc_clsh_d->state, msg1, sizeof(msg1))); - - break; - case WCD9XXX_CLSH_EVENT_POST_PA: - if (req_type == WCD9XXX_CLSH_REQ_DISABLE) { - old_state = cdc_clsh_d->state; - new_state = old_state & (~req_state); - - if (new_state < NUM_CLSH_STATES) { - if (!wcd9xxx_clsh_is_state_valid(old_state)) { - dev_dbg(codec->dev, - "%s:Invalid old state:%s\n", - __func__, - state_to_str(old_state, msg0, - sizeof(msg0))); - return; - } - if (new_state == old_state) { - dev_dbg(codec->dev, - "%s: clsH already in old state: %s\n", - __func__, - state_to_str(new_state, msg0, - sizeof(msg0))); - return; - } - (*clsh_state_fp[old_state]) (codec, cdc_clsh_d, - req_state, - req_type); - cdc_clsh_d->state = new_state; - dev_dbg(codec->dev, "%s: ClassH state transition from %s to %s\n", - __func__, state_to_str(old_state, msg0, - sizeof(msg0)), - state_to_str(cdc_clsh_d->state, msg1, - sizeof(msg1))); - - } else { - dev_dbg(codec->dev, "%s:wrong new state=0x%x\n", - __func__, new_state); - } - } else if (!(cdc_clsh_d->state & WCD9XXX_CLSH_STATE_LO)) { - wcd9xxx_clsh_enable_post_pa(codec, cdc_clsh_d); - } - - break; - } - -} -EXPORT_SYMBOL(wcd9xxx_clsh_fsm); - -void wcd9xxx_clsh_init(struct wcd9xxx_clsh_cdc_data *clsh, - struct wcd9xxx_resmgr *resmgr) -{ - int i; - - clsh->state = WCD9XXX_CLSH_STATE_IDLE; - clsh->resmgr = resmgr; - - for (i = 0; i < NUM_CLSH_STATES; i++) - clsh_state_fp[i] = wcd9xxx_clsh_state_err; - - clsh_state_fp[WCD9XXX_CLSH_STATE_EAR] = wcd9xxx_clsh_state_ear; - clsh_state_fp[WCD9XXX_CLSH_STATE_HPHL] = - wcd9xxx_clsh_state_hph_l; - clsh_state_fp[WCD9XXX_CLSH_STATE_HPHR] = - wcd9xxx_clsh_state_hph_r; - clsh_state_fp[WCD9XXX_CLSH_STATE_HPH_ST] = - wcd9xxx_clsh_state_hph_st; - clsh_state_fp[WCD9XXX_CLSH_STATE_LO] = wcd9xxx_clsh_state_lo; - clsh_state_fp[WCD9XXX_CLSH_STATE_HPHL_EAR] = - wcd9xxx_clsh_state_hph_ear; - clsh_state_fp[WCD9XXX_CLSH_STATE_HPHR_EAR] = - wcd9xxx_clsh_state_hph_ear; - clsh_state_fp[WCD9XXX_CLSH_STATE_HPH_ST_EAR] = - wcd9xxx_clsh_state_hph_ear; - clsh_state_fp[WCD9XXX_CLSH_STATE_HPHL_LO] = wcd9xxx_clsh_state_hph_lo; - clsh_state_fp[WCD9XXX_CLSH_STATE_HPHR_LO] = wcd9xxx_clsh_state_hph_lo; - clsh_state_fp[WCD9XXX_CLSH_STATE_HPH_ST_LO] = - wcd9xxx_clsh_state_hph_lo; - clsh_state_fp[WCD9XXX_CLSH_STATE_EAR_LO] = wcd9xxx_clsh_state_ear_lo; - clsh_state_fp[WCD9XXX_CLSH_STATE_HPHL_EAR_LO] = - wcd9xxx_clsh_state_hph_ear_lo; - clsh_state_fp[WCD9XXX_CLSH_STATE_HPHR_EAR_LO] = - wcd9xxx_clsh_state_hph_ear_lo; - clsh_state_fp[WCD9XXX_CLSH_STATE_HPH_ST_EAR_LO] = - wcd9xxx_clsh_state_hph_ear_lo; - -} -EXPORT_SYMBOL(wcd9xxx_clsh_init); - -MODULE_DESCRIPTION("WCD9XXX Common"); -MODULE_LICENSE("GPL v2"); diff --git a/sound/soc/codecs/wcd9xxx-common.h b/sound/soc/codecs/wcd9xxx-common.h deleted file mode 100644 index 5c0c4a98f3fc..000000000000 --- a/sound/soc/codecs/wcd9xxx-common.h +++ /dev/null @@ -1,286 +0,0 @@ -/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef WCD9XXX_CODEC_COMMON - -#define WCD9XXX_CODEC_COMMON - -#include "wcd9xxx-resmgr.h" - -#define WCD9XXX_CLSH_REQ_ENABLE true -#define WCD9XXX_CLSH_REQ_DISABLE false - -#define WCD9XXX_CLSH_EVENT_PRE_DAC 0x01 -#define WCD9XXX_CLSH_EVENT_POST_PA 0x02 - -/* Basic states for Class H state machine. - * represented as a bit mask within a u8 data type - * bit 0: EAR mode - * bit 1: HPH Left mode - * bit 2: HPH Right mode - * bit 3: Lineout mode - * bit 4: Ultrasound mode - */ -#define WCD9XXX_CLSH_STATE_IDLE 0x00 -#define WCD9XXX_CLSH_STATE_EAR (0x01 << 0) -#define WCD9XXX_CLSH_STATE_HPHL (0x01 << 1) -#define WCD9XXX_CLSH_STATE_HPHR (0x01 << 2) -#define WCD9XXX_CLSH_STATE_LO (0x01 << 3) -#define NUM_CLSH_STATES (0x01 << 4) - -#define WCD9XXX_CLSAB_STATE_IDLE 0x00 -#define WCD9XXX_CLSAB_STATE_HPHL (0x01 << 1) -#define WCD9XXX_CLSAB_STATE_HPHR (0x01 << 2) - -#define WCD9XXX_CLSAB_REQ_ENABLE true -#define WCD9XXX_CLSAB_REQ_DISABLE false - -#define WCD9XXX_NON_UHQA_MODE 0 - -#define WCD9XXX_DMIC_SAMPLE_RATE_DIV_2 0x0 -#define WCD9XXX_DMIC_SAMPLE_RATE_DIV_3 0x1 -#define WCD9XXX_DMIC_SAMPLE_RATE_DIV_4 0x2 - -#define WCD9XXX_DMIC_B1_CTL_DIV_2 0x00 -#define WCD9XXX_DMIC_B1_CTL_DIV_3 0x22 -#define WCD9XXX_DMIC_B1_CTL_DIV_4 0x44 - -#define WCD9XXX_DMIC_B2_CTL_DIV_2 0x00 -#define WCD9XXX_DMIC_B2_CTL_DIV_3 0x02 -#define WCD9XXX_DMIC_B2_CTL_DIV_4 0x04 - -#define WCD9XXX_ANC_DMIC_X2_ON 0x1 -#define WCD9XXX_ANC_DMIC_X2_OFF 0x0 - -/* Derived State: Bits 1 and 2 should be set for Headphone stereo */ -#define WCD9XXX_CLSH_STATE_HPH_ST (WCD9XXX_CLSH_STATE_HPHL | \ - WCD9XXX_CLSH_STATE_HPHR) - -#define WCD9XXX_CLSH_STATE_HPHL_EAR (WCD9XXX_CLSH_STATE_HPHL | \ - WCD9XXX_CLSH_STATE_EAR) -#define WCD9XXX_CLSH_STATE_HPHR_EAR (WCD9XXX_CLSH_STATE_HPHR | \ - WCD9XXX_CLSH_STATE_EAR) - -#define WCD9XXX_CLSH_STATE_HPH_ST_EAR (WCD9XXX_CLSH_STATE_HPH_ST | \ - WCD9XXX_CLSH_STATE_EAR) - -#define WCD9XXX_CLSH_STATE_HPHL_LO (WCD9XXX_CLSH_STATE_HPHL | \ - WCD9XXX_CLSH_STATE_LO) -#define WCD9XXX_CLSH_STATE_HPHR_LO (WCD9XXX_CLSH_STATE_HPHR | \ - WCD9XXX_CLSH_STATE_LO) - -#define WCD9XXX_CLSH_STATE_HPH_ST_LO (WCD9XXX_CLSH_STATE_HPH_ST | \ - WCD9XXX_CLSH_STATE_LO) - -#define WCD9XXX_CLSH_STATE_EAR_LO (WCD9XXX_CLSH_STATE_EAR | \ - WCD9XXX_CLSH_STATE_LO) - -#define WCD9XXX_CLSH_STATE_HPHL_EAR_LO (WCD9XXX_CLSH_STATE_HPHL | \ - WCD9XXX_CLSH_STATE_EAR | \ - WCD9XXX_CLSH_STATE_LO) -#define WCD9XXX_CLSH_STATE_HPHR_EAR_LO (WCD9XXX_CLSH_STATE_HPHR | \ - WCD9XXX_CLSH_STATE_EAR | \ - WCD9XXX_CLSH_STATE_LO) -#define WCD9XXX_CLSH_STATE_HPH_ST_EAR_LO (WCD9XXX_CLSH_STATE_HPH_ST | \ - WCD9XXX_CLSH_STATE_EAR | \ - WCD9XXX_CLSH_STATE_LO) - -struct wcd9xxx_reg_mask_val { - u16 reg; - u8 mask; - u8 val; -}; - -enum ncp_fclk_level { - NCP_FCLK_LEVEL_8, - NCP_FCLK_LEVEL_5, - NCP_FCLK_LEVEL_MAX, -}; - -/* Class H data that the codec driver will maintain */ -struct wcd9xxx_clsh_cdc_data { - u8 state; - int buck_mv; - bool is_dynamic_vdd_cp; - int clsh_users; - int buck_users; - int ncp_users[NCP_FCLK_LEVEL_MAX]; - struct wcd9xxx_resmgr *resmgr; -}; - -struct wcd9xxx_anc_header { - u32 reserved[3]; - u32 num_anc_slots; -}; - -enum wcd9xxx_buck_volt { - WCD9XXX_CDC_BUCK_UNSUPPORTED = 0, - WCD9XXX_CDC_BUCK_MV_1P8 = 1800000, - WCD9XXX_CDC_BUCK_MV_2P15 = 2150000, -}; - -struct mad_audio_header { - u32 reserved[3]; - u32 num_reg_cfg; -}; - -struct mad_microphone_info { - uint8_t input_microphone; - uint8_t cycle_time; - uint8_t settle_time; - uint8_t padding; -} __packed; - -struct mad_micbias_info { - uint8_t micbias; - uint8_t k_factor; - uint8_t external_bypass_capacitor; - uint8_t internal_biasing; - uint8_t cfilter; - uint8_t padding[3]; -} __packed; - -struct mad_rms_audio_beacon_info { - uint8_t rms_omit_samples; - uint8_t rms_comp_time; - uint8_t detection_mechanism; - uint8_t rms_diff_threshold; - uint8_t rms_threshold_lsb; - uint8_t rms_threshold_msb; - uint8_t padding[2]; - uint8_t iir_coefficients[36]; -} __packed; - -struct mad_rms_ultrasound_info { - uint8_t rms_comp_time; - uint8_t detection_mechanism; - uint8_t rms_diff_threshold; - uint8_t rms_threshold_lsb; - uint8_t rms_threshold_msb; - uint8_t padding[3]; - uint8_t iir_coefficients[36]; -} __packed; - -struct mad_audio_cal { - uint32_t version; - struct mad_microphone_info microphone_info; - struct mad_micbias_info micbias_info; - struct mad_rms_audio_beacon_info audio_info; - struct mad_rms_audio_beacon_info beacon_info; - struct mad_rms_ultrasound_info ultrasound_info; -} __packed; - -extern void wcd9xxx_clsh_fsm(struct snd_soc_codec *codec, - struct wcd9xxx_clsh_cdc_data *cdc_clsh_d, - u8 req_state, bool req_type, u8 clsh_event); - -extern void wcd9xxx_enable_high_perf_mode(struct snd_soc_codec *codec, - struct wcd9xxx_clsh_cdc_data *clsh_d, - u8 uhqa_mode, u8 req_state, bool req_type); - -extern void wcd9xxx_clsh_init(struct wcd9xxx_clsh_cdc_data *clsh, - struct wcd9xxx_resmgr *resmgr); - -extern void wcd9xxx_clsh_imped_config(struct snd_soc_codec *codec, - int imped); - -enum wcd9xxx_codec_event { - WCD9XXX_CODEC_EVENT_CODEC_UP = 0, -}; - -struct wcd9xxx_register_save_node { - struct list_head lh; - u16 reg; - u16 value; -}; - -extern int wcd9xxx_soc_update_bits_push(struct snd_soc_codec *codec, - struct list_head *lh, - uint16_t reg, uint8_t mask, - uint8_t value, int delay); -extern void wcd9xxx_restore_registers(struct snd_soc_codec *codec, - struct list_head *lh); -enum { - RESERVED = 0, - AANC_LPF_FF_FB = 1, - AANC_LPF_COEFF_MSB, - AANC_LPF_COEFF_LSB, - HW_MAD_AUDIO_ENABLE, - HW_MAD_ULTR_ENABLE, - HW_MAD_BEACON_ENABLE, - HW_MAD_AUDIO_SLEEP_TIME, - HW_MAD_ULTR_SLEEP_TIME, - HW_MAD_BEACON_SLEEP_TIME, - HW_MAD_TX_AUDIO_SWITCH_OFF, - HW_MAD_TX_ULTR_SWITCH_OFF, - HW_MAD_TX_BEACON_SWITCH_OFF, - MAD_AUDIO_INT_DEST_SELECT_REG, - MAD_ULT_INT_DEST_SELECT_REG, - MAD_BEACON_INT_DEST_SELECT_REG, - MAD_CLIP_INT_DEST_SELECT_REG, - MAD_VBAT_INT_DEST_SELECT_REG, - MAD_AUDIO_INT_MASK_REG, - MAD_ULT_INT_MASK_REG, - MAD_BEACON_INT_MASK_REG, - MAD_CLIP_INT_MASK_REG, - MAD_VBAT_INT_MASK_REG, - MAD_AUDIO_INT_STATUS_REG, - MAD_ULT_INT_STATUS_REG, - MAD_BEACON_INT_STATUS_REG, - MAD_CLIP_INT_STATUS_REG, - MAD_VBAT_INT_STATUS_REG, - MAD_AUDIO_INT_CLEAR_REG, - MAD_ULT_INT_CLEAR_REG, - MAD_BEACON_INT_CLEAR_REG, - MAD_CLIP_INT_CLEAR_REG, - MAD_VBAT_INT_CLEAR_REG, - SB_PGD_PORT_TX_WATERMARK_N, - SB_PGD_PORT_TX_ENABLE_N, - SB_PGD_PORT_RX_WATERMARK_N, - SB_PGD_PORT_RX_ENABLE_N, - SB_PGD_TX_PORTn_MULTI_CHNL_0, - SB_PGD_TX_PORTn_MULTI_CHNL_1, - SB_PGD_RX_PORTn_MULTI_CHNL_0, - SB_PGD_RX_PORTn_MULTI_CHNL_1, - AANC_FF_GAIN_ADAPTIVE, - AANC_FFGAIN_ADAPTIVE_EN, - AANC_GAIN_CONTROL, - SPKR_CLIP_PIPE_BANK_SEL, - SPKR_CLIPDET_VAL0, - SPKR_CLIPDET_VAL1, - SPKR_CLIPDET_VAL2, - SPKR_CLIPDET_VAL3, - SPKR_CLIPDET_VAL4, - SPKR_CLIPDET_VAL5, - SPKR_CLIPDET_VAL6, - SPKR_CLIPDET_VAL7, - VBAT_RELEASE_INT_DEST_SELECT_REG, - VBAT_RELEASE_INT_MASK_REG, - VBAT_RELEASE_INT_STATUS_REG, - VBAT_RELEASE_INT_CLEAR_REG, - MAD2_CLIP_INT_DEST_SELECT_REG, - MAD2_CLIP_INT_MASK_REG, - MAD2_CLIP_INT_STATUS_REG, - MAD2_CLIP_INT_CLEAR_REG, - SPKR2_CLIP_PIPE_BANK_SEL, - SPKR2_CLIPDET_VAL0, - SPKR2_CLIPDET_VAL1, - SPKR2_CLIPDET_VAL2, - SPKR2_CLIPDET_VAL3, - SPKR2_CLIPDET_VAL4, - SPKR2_CLIPDET_VAL5, - SPKR2_CLIPDET_VAL6, - SPKR2_CLIPDET_VAL7, - MAX_CFG_REGISTERS, -}; - -#endif diff --git a/sound/soc/codecs/wcd9xxx-mbhc.c b/sound/soc/codecs/wcd9xxx-mbhc.c deleted file mode 100644 index 3754b5709e3b..000000000000 --- a/sound/soc/codecs/wcd9xxx-mbhc.c +++ /dev/null @@ -1,5671 +0,0 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "wcd9xxx-mbhc.h" -#include "wcdcal-hwdep.h" -#include "wcd9xxx-resmgr.h" -#include "wcd9xxx-common.h" - -#define WCD9XXX_JACK_MASK (SND_JACK_HEADSET | SND_JACK_OC_HPHL | \ - SND_JACK_OC_HPHR | SND_JACK_LINEOUT | \ - SND_JACK_UNSUPPORTED | SND_JACK_MICROPHONE2 | \ - SND_JACK_MECHANICAL) -#define WCD9XXX_JACK_BUTTON_MASK (SND_JACK_BTN_0 | SND_JACK_BTN_1 | \ - SND_JACK_BTN_2 | SND_JACK_BTN_3 | \ - SND_JACK_BTN_4 | SND_JACK_BTN_5) - -#define NUM_DCE_PLUG_DETECT 3 -#define NUM_DCE_PLUG_INS_DETECT 5 -#define NUM_ATTEMPTS_INSERT_DETECT 25 -#define NUM_ATTEMPTS_TO_REPORT 5 - -#define FAKE_INS_LOW 10 -#define FAKE_INS_HIGH 80 -#define FAKE_INS_HIGH_NO_SWCH 150 -#define FAKE_REMOVAL_MIN_PERIOD_MS 50 -#define FAKE_INS_DELTA_SCALED_MV 300 - -#define BUTTON_MIN 0x8000 -#define STATUS_REL_DETECTION 0x0C - -#define HS_DETECT_PLUG_TIME_MS (5 * 1000) -#define ANC_HPH_DETECT_PLUG_TIME_MS (5 * 1000) -#define HS_DETECT_PLUG_INERVAL_MS 100 -#define SWCH_REL_DEBOUNCE_TIME_MS 50 -#define SWCH_IRQ_DEBOUNCE_TIME_US 5000 -#define BTN_RELEASE_DEBOUNCE_TIME_MS 25 - -#define GND_MIC_SWAP_THRESHOLD 2 -#define OCP_ATTEMPT 1 - -#define FW_READ_ATTEMPTS 15 -#define FW_READ_TIMEOUT 4000000 - -#define BUTTON_POLLING_SUPPORTED true - -#define MCLK_RATE_12288KHZ 12288000 -#define MCLK_RATE_9600KHZ 9600000 - -#define DEFAULT_DCE_STA_WAIT 55 -#define DEFAULT_DCE_WAIT 60000 -#define DEFAULT_STA_WAIT 5000 - -#define VDDIO_MICBIAS_MV 1800 - -#define WCD9XXX_MICBIAS_PULLDOWN_SETTLE_US 5000 - -#define WCD9XXX_HPHL_STATUS_READY_WAIT_US 1000 -#define WCD9XXX_MUX_SWITCH_READY_WAIT_MS 50 -#define WCD9XXX_MEAS_DELTA_MAX_MV 120 -#define WCD9XXX_MEAS_INVALD_RANGE_LOW_MV 20 -#define WCD9XXX_MEAS_INVALD_RANGE_HIGH_MV 80 - -/* Threshold in milliohm used for mono/stereo - * plug classification - */ -#define WCD9XXX_MONO_HS_DIFF_THR 20000000 -#define WCD9XXX_MONO_HS_MIN_THR 2000 - -/* - * Invalid voltage range for the detection - * of plug type with current source - */ -#define WCD9XXX_CS_MEAS_INVALD_RANGE_LOW_MV 160 -#define WCD9XXX_CS_MEAS_INVALD_RANGE_HIGH_MV 265 - -/* - * Threshold used to detect euro headset - * with current source - */ -#define WCD9XXX_CS_GM_SWAP_THRES_MIN_MV 10 -#define WCD9XXX_CS_GM_SWAP_THRES_MAX_MV 40 - -#define WCD9XXX_MBHC_NSC_CS 9 -#define WCD9XXX_GM_SWAP_THRES_MIN_MV 150 -#define WCD9XXX_GM_SWAP_THRES_MAX_MV 650 -#define WCD9XXX_THRESHOLD_MIC_THRESHOLD 200 - -#define WCD9XXX_USLEEP_RANGE_MARGIN_US 100 - -/* RX_HPH_CNP_WG_TIME increases by 0.24ms */ -#define WCD9XXX_WG_TIME_FACTOR_US 240 - -#define WCD9XXX_V_CS_HS_MAX 500 -#define WCD9XXX_V_CS_NO_MIC 5 -#define WCD9XXX_MB_MEAS_DELTA_MAX_MV 80 -#define WCD9XXX_CS_MEAS_DELTA_MAX_MV 12 - -#define WCD9XXX_ZDET_ZONE_1 80000 -#define WCD9XXX_ZDET_ZONE_2 800000 - -#define WCD9XXX_IS_IN_ZDET_ZONE_1(x) (x < WCD9XXX_ZDET_ZONE_1 ? 1 : 0) -#define WCD9XXX_IS_IN_ZDET_ZONE_2(x) ((x > WCD9XXX_ZDET_ZONE_1 && \ - x < WCD9XXX_ZDET_ZONE_2) ? 1 : 0) -#define WCD9XXX_IS_IN_ZDET_ZONE_3(x) (x > WCD9XXX_ZDET_ZONE_2 ? 1 : 0) -#define WCD9XXX_BOX_CAR_AVRG_MIN 1 -#define WCD9XXX_BOX_CAR_AVRG_MAX 10 - -/* - * Need to report LINEIN if H/L impedance - * is larger than 5K ohm - */ -#define WCD9XXX_LINEIN_THRESHOLD 5000000 - -static int impedance_detect_en; -module_param(impedance_detect_en, int, 0664); -MODULE_PARM_DESC(impedance_detect_en, "enable/disable impedance detect"); -static unsigned int z_det_box_car_avg = 1; -module_param(z_det_box_car_avg, int, 0664); -MODULE_PARM_DESC(z_det_box_car_avg, - "Number of samples for impedance detection"); - -static bool detect_use_vddio_switch; - -struct wcd9xxx_mbhc_detect { - u16 dce; - u16 sta; - u16 hphl_status; - bool swap_gnd; - bool vddio; - bool hwvalue; - bool mic_bias; - /* internal purpose from here */ - bool _above_no_mic; - bool _below_v_hs_max; - s16 _vdces; - enum wcd9xxx_mbhc_plug_type _type; -}; - -enum meas_type { - STA = 0, - DCE, -}; - -enum { - MBHC_USE_HPHL_TRIGGER = 1, - MBHC_USE_MB_TRIGGER = 2 -}; - -/* - * Flags to track of PA and DAC state. - * PA and DAC should be tracked separately as AUXPGA loopback requires - * only PA to be turned on without DAC being on. - */ -enum pa_dac_ack_flags { - WCD9XXX_HPHL_PA_OFF_ACK = 0, - WCD9XXX_HPHR_PA_OFF_ACK, - WCD9XXX_HPHL_DAC_OFF_ACK, - WCD9XXX_HPHR_DAC_OFF_ACK -}; - -enum wcd9xxx_current_v_idx { - WCD9XXX_CURRENT_V_INS_H, - WCD9XXX_CURRENT_V_INS_HU, - WCD9XXX_CURRENT_V_B1_H, - WCD9XXX_CURRENT_V_B1_HU, - WCD9XXX_CURRENT_V_BR_H, -}; - -static int wcd9xxx_detect_impedance(struct wcd9xxx_mbhc *mbhc, uint32_t *zl, - uint32_t *zr); -static s16 wcd9xxx_get_current_v(struct wcd9xxx_mbhc *mbhc, - const enum wcd9xxx_current_v_idx idx); -static void wcd9xxx_get_z(struct wcd9xxx_mbhc *mbhc, s16 *dce_z, s16 *sta_z, - struct mbhc_micbias_regs *micb_regs, - bool norel); - -static void wcd9xxx_mbhc_calc_thres(struct wcd9xxx_mbhc *mbhc); - -static u16 wcd9xxx_codec_v_sta_dce(struct wcd9xxx_mbhc *mbhc, - enum meas_type dce, s16 vin_mv, - bool cs_enable); - -static bool wcd9xxx_mbhc_polling(struct wcd9xxx_mbhc *mbhc) -{ - return snd_soc_read(mbhc->codec, WCD9XXX_A_CDC_MBHC_EN_CTL) & 0x1; -} - -static void wcd9xxx_turn_onoff_override(struct wcd9xxx_mbhc *mbhc, bool on) -{ - struct snd_soc_codec *codec = mbhc->codec; - - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, - 0x04, on ? 0x04 : 0x00); -} - -/* called under codec_resource_lock acquisition */ -static void wcd9xxx_pause_hs_polling(struct wcd9xxx_mbhc *mbhc) -{ - struct snd_soc_codec *codec = mbhc->codec; - - pr_debug("%s: enter\n", __func__); - if (!mbhc->polling_active) { - pr_debug("polling not active, nothing to pause\n"); - return; - } - - /* Soft reset MBHC block */ - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, 0x8); - pr_debug("%s: leave\n", __func__); -} - -/* called under codec_resource_lock acquisition */ -static void wcd9xxx_start_hs_polling(struct wcd9xxx_mbhc *mbhc) -{ - struct snd_soc_codec *codec = mbhc->codec; - int mbhc_state = mbhc->mbhc_state; - - pr_debug("%s: enter\n", __func__); - if (!mbhc->polling_active) { - pr_debug("Polling is not active, do not start polling\n"); - return; - } - - /* - * setup internal micbias if codec uses internal micbias for - * headset detection - */ - if (mbhc->mbhc_cfg->use_int_rbias) { - if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias) - mbhc->mbhc_cb->setup_int_rbias(codec, true); - else - pr_err("%s: internal bias requested but codec did not provide callback\n", - __func__); - } - - snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x04); - if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block) - mbhc->mbhc_cb->enable_mux_bias_block(codec); - else - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, - 0x80, 0x80); - - if (!mbhc->no_mic_headset_override && - mbhc_state == MBHC_STATE_POTENTIAL) { - pr_debug("%s recovering MBHC state machine\n", __func__); - mbhc->mbhc_state = MBHC_STATE_POTENTIAL_RECOVERY; - /* set to max button press threshold */ - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B2_CTL, 0x7F); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B1_CTL, 0xFF); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL, 0x7F); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL, 0xFF); - /* set to max */ - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B6_CTL, 0x7F); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B5_CTL, 0xFF); - } - - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x1); - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, 0x0); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x1); - pr_debug("%s: leave\n", __func__); -} - -static int __wcd9xxx_resmgr_get_k_val(struct wcd9xxx_mbhc *mbhc, - unsigned int cfilt_mv) -{ - return wcd9xxx_resmgr_get_k_val(mbhc->resmgr, cfilt_mv); -} - -/* - * called under codec_resource_lock acquisition - * return old status - */ -static bool __wcd9xxx_switch_micbias(struct wcd9xxx_mbhc *mbhc, - int vddio_switch, bool restartpolling, - bool checkpolling) -{ - bool ret; - int cfilt_k_val; - bool override; - struct snd_soc_codec *codec; - struct mbhc_internal_cal_data *d = &mbhc->mbhc_data; - - codec = mbhc->codec; - - if (mbhc->micbias_enable) { - pr_debug("%s: micbias is already on\n", __func__); - ret = mbhc->mbhc_micbias_switched; - return ret; - } - - ret = mbhc->mbhc_micbias_switched; - if (vddio_switch && !mbhc->mbhc_micbias_switched && - (!checkpolling || mbhc->polling_active)) { - if (restartpolling) - wcd9xxx_pause_hs_polling(mbhc); - override = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL) & - 0x04; - if (!override) - wcd9xxx_turn_onoff_override(mbhc, true); - - snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, - 0x10, 0x00); - snd_soc_update_bits(codec, WCD9XXX_A_LDO_H_MODE_1, - 0x20, 0x00); - /* Adjust threshold if Mic Bias voltage changes */ - if (d->micb_mv != VDDIO_MICBIAS_MV) { - cfilt_k_val = __wcd9xxx_resmgr_get_k_val(mbhc, - VDDIO_MICBIAS_MV); - usleep_range(10000, 10100); - snd_soc_update_bits(codec, - mbhc->mbhc_bias_regs.cfilt_val, - 0xFC, (cfilt_k_val << 2)); - usleep_range(10000, 10100); - /* Threshods for insertion/removal */ - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B1_CTL, - d->v_ins_hu[MBHC_V_IDX_VDDIO] & 0xFF); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B2_CTL, - (d->v_ins_hu[MBHC_V_IDX_VDDIO] >> 8) & - 0xFF); - - if (mbhc->mbhc_state != MBHC_STATE_POTENTIAL_RECOVERY) { - /* Threshods for button press */ - snd_soc_write(codec, - WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL, - d->v_b1_hu[MBHC_V_IDX_VDDIO] & 0xFF); - snd_soc_write(codec, - WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL, - (d->v_b1_hu[MBHC_V_IDX_VDDIO] >> 8) & - 0xFF); - snd_soc_write(codec, - WCD9XXX_A_CDC_MBHC_VOLT_B5_CTL, - d->v_b1_h[MBHC_V_IDX_VDDIO] & 0xFF); - snd_soc_write(codec, - WCD9XXX_A_CDC_MBHC_VOLT_B6_CTL, - (d->v_b1_h[MBHC_V_IDX_VDDIO] >> 8) & - 0xFF); - /* Threshods for button release */ - snd_soc_write(codec, - WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL, - d->v_brh[MBHC_V_IDX_VDDIO] & 0xFF); - snd_soc_write(codec, - WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL, - (d->v_brh[MBHC_V_IDX_VDDIO] >> 8) & - 0xFF); - } - pr_debug("%s: Programmed MBHC thresholds to VDDIO\n", - __func__); - } - - /* Enable MIC BIAS Switch to VDDIO */ - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, - 0x80, 0x80); - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, - 0x10, 0x00); - if (!override) - wcd9xxx_turn_onoff_override(mbhc, false); - if (restartpolling) - wcd9xxx_start_hs_polling(mbhc); - - mbhc->mbhc_micbias_switched = true; - pr_debug("%s: VDDIO switch enabled\n", __func__); - } else if (!vddio_switch && mbhc->mbhc_micbias_switched) { - if ((!checkpolling || mbhc->polling_active) && - restartpolling) - wcd9xxx_pause_hs_polling(mbhc); - - snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, - 0x10, 0x10); - snd_soc_update_bits(codec, WCD9XXX_A_LDO_H_MODE_1, - 0x20, 0x20); - /* Reprogram thresholds */ - if (d->micb_mv != VDDIO_MICBIAS_MV) { - cfilt_k_val = - __wcd9xxx_resmgr_get_k_val(mbhc, - d->micb_mv); - snd_soc_update_bits(codec, - mbhc->mbhc_bias_regs.cfilt_val, - 0xFC, (cfilt_k_val << 2)); - usleep_range(10000, 10100); - /* Revert threshods for insertion/removal */ - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B1_CTL, - d->v_ins_hu[MBHC_V_IDX_CFILT] & 0xFF); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B2_CTL, - (d->v_ins_hu[MBHC_V_IDX_CFILT] >> 8) & - 0xFF); - if (mbhc->mbhc_state != MBHC_STATE_POTENTIAL_RECOVERY) { - /* Revert threshods for button press */ - snd_soc_write(codec, - WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL, - d->v_b1_hu[MBHC_V_IDX_CFILT] & 0xFF); - snd_soc_write(codec, - WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL, - (d->v_b1_hu[MBHC_V_IDX_CFILT] >> 8) & - 0xFF); - snd_soc_write(codec, - WCD9XXX_A_CDC_MBHC_VOLT_B5_CTL, - d->v_b1_h[MBHC_V_IDX_CFILT] & 0xFF); - snd_soc_write(codec, - WCD9XXX_A_CDC_MBHC_VOLT_B6_CTL, - (d->v_b1_h[MBHC_V_IDX_CFILT] >> 8) & - 0xFF); - /* Revert threshods for button release */ - snd_soc_write(codec, - WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL, - d->v_brh[MBHC_V_IDX_CFILT] & 0xFF); - snd_soc_write(codec, - WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL, - (d->v_brh[MBHC_V_IDX_CFILT] >> 8) & - 0xFF); - } - pr_debug("%s: Programmed MBHC thresholds to MICBIAS\n", - __func__); - } - - /* Disable MIC BIAS Switch to VDDIO */ - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x80, - 0x00); - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x10, - 0x00); - - if ((!checkpolling || mbhc->polling_active) && restartpolling) - wcd9xxx_start_hs_polling(mbhc); - - mbhc->mbhc_micbias_switched = false; - pr_debug("%s: VDDIO switch disabled\n", __func__); - } - - return ret; -} - -static void wcd9xxx_switch_micbias(struct wcd9xxx_mbhc *mbhc, int vddio_switch) -{ - __wcd9xxx_switch_micbias(mbhc, vddio_switch, true, true); -} - -static s16 wcd9xxx_get_current_v(struct wcd9xxx_mbhc *mbhc, - const enum wcd9xxx_current_v_idx idx) -{ - enum mbhc_v_index vidx; - s16 ret = -EINVAL; - - if ((mbhc->mbhc_data.micb_mv != VDDIO_MICBIAS_MV) && - mbhc->mbhc_micbias_switched) - vidx = MBHC_V_IDX_VDDIO; - else - vidx = MBHC_V_IDX_CFILT; - - switch (idx) { - case WCD9XXX_CURRENT_V_INS_H: - ret = (s16)mbhc->mbhc_data.v_ins_h[vidx]; - break; - case WCD9XXX_CURRENT_V_INS_HU: - ret = (s16)mbhc->mbhc_data.v_ins_hu[vidx]; - break; - case WCD9XXX_CURRENT_V_B1_H: - ret = (s16)mbhc->mbhc_data.v_b1_h[vidx]; - break; - case WCD9XXX_CURRENT_V_B1_HU: - ret = (s16)mbhc->mbhc_data.v_b1_hu[vidx]; - break; - case WCD9XXX_CURRENT_V_BR_H: - ret = (s16)mbhc->mbhc_data.v_brh[vidx]; - break; - } - - return ret; -} - -void *wcd9xxx_mbhc_cal_btn_det_mp( - const struct wcd9xxx_mbhc_btn_detect_cfg *btn_det, - const enum wcd9xxx_mbhc_btn_det_mem mem) -{ - void *ret = (void *)&btn_det->_v_btn_low; - - switch (mem) { - case MBHC_BTN_DET_GAIN: - ret += sizeof(btn_det->_n_cic); - /* fallthrough */ - case MBHC_BTN_DET_N_CIC: - ret += sizeof(btn_det->_n_ready); - /* fallthrough */ - case MBHC_BTN_DET_N_READY: - ret += sizeof(btn_det->_v_btn_high[0]) * btn_det->num_btn; - /* fallthrough */ - case MBHC_BTN_DET_V_BTN_HIGH: - ret += sizeof(btn_det->_v_btn_low[0]) * btn_det->num_btn; - /* fallthrough */ - case MBHC_BTN_DET_V_BTN_LOW: - /* do nothing */ - break; - default: - ret = NULL; - } - - return ret; -} -EXPORT_SYMBOL(wcd9xxx_mbhc_cal_btn_det_mp); - -static void wcd9xxx_calibrate_hs_polling(struct wcd9xxx_mbhc *mbhc) -{ - struct snd_soc_codec *codec = mbhc->codec; - const s16 v_ins_hu = wcd9xxx_get_current_v(mbhc, - WCD9XXX_CURRENT_V_INS_HU); - const s16 v_b1_hu = wcd9xxx_get_current_v(mbhc, - WCD9XXX_CURRENT_V_B1_HU); - const s16 v_b1_h = wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_H); - const s16 v_brh = wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_BR_H); - - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B1_CTL, v_ins_hu & 0xFF); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B2_CTL, - (v_ins_hu >> 8) & 0xFF); - - if (mbhc->mbhc_state != MBHC_STATE_POTENTIAL_RECOVERY) { - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL, v_b1_hu & - 0xFF); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL, - (v_b1_hu >> 8) & 0xFF); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B5_CTL, v_b1_h & - 0xFF); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B6_CTL, - (v_b1_h >> 8) & 0xFF); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL, v_brh & - 0xFF); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL, - (v_brh >> 8) & 0xFF); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B11_CTL, - mbhc->mbhc_data.v_brl & 0xFF); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B12_CTL, - (mbhc->mbhc_data.v_brl >> 8) & 0xFF); - } -} - -static void wcd9xxx_codec_switch_cfilt_mode(struct wcd9xxx_mbhc *mbhc, - bool fast) -{ - struct snd_soc_codec *codec = mbhc->codec; - struct wcd9xxx_cfilt_mode cfilt_mode; - - if (mbhc->mbhc_cb && mbhc->mbhc_cb->switch_cfilt_mode) { - cfilt_mode = mbhc->mbhc_cb->switch_cfilt_mode(mbhc, fast); - } else { - if (fast) - cfilt_mode.reg_mode_val = WCD9XXX_CFILT_FAST_MODE; - else - cfilt_mode.reg_mode_val = WCD9XXX_CFILT_SLOW_MODE; - - cfilt_mode.reg_mask = 0x40; - cfilt_mode.cur_mode_val = - snd_soc_read(codec, mbhc->mbhc_bias_regs.cfilt_ctl) & 0x40; - } - - if (cfilt_mode.cur_mode_val - != cfilt_mode.reg_mode_val) { - if (mbhc->polling_active && wcd9xxx_mbhc_polling(mbhc)) - wcd9xxx_pause_hs_polling(mbhc); - snd_soc_update_bits(codec, - mbhc->mbhc_bias_regs.cfilt_ctl, - cfilt_mode.reg_mask, - cfilt_mode.reg_mode_val); - if (mbhc->polling_active && wcd9xxx_mbhc_polling(mbhc)) - wcd9xxx_start_hs_polling(mbhc); - pr_debug("%s: CFILT mode change (%x to %x)\n", __func__, - cfilt_mode.cur_mode_val, - cfilt_mode.reg_mode_val); - } else { - pr_debug("%s: CFILT Value is already %x\n", - __func__, cfilt_mode.cur_mode_val); - } -} - -static void wcd9xxx_jack_report(struct wcd9xxx_mbhc *mbhc, - struct snd_soc_jack *jack, int status, int mask) -{ - if (jack == &mbhc->headset_jack) { - wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr, - WCD9XXX_COND_HPH_MIC, - status & SND_JACK_MICROPHONE); - wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr, - WCD9XXX_COND_HPH, - status & SND_JACK_HEADPHONE); - } - - snd_soc_jack_report(jack, status, mask); -} - -static void __hphocp_off_report(struct wcd9xxx_mbhc *mbhc, u32 jack_status, - int irq) -{ - struct snd_soc_codec *codec; - - pr_debug("%s: clear ocp status %x\n", __func__, jack_status); - codec = mbhc->codec; - if (mbhc->hph_status & jack_status) { - mbhc->hph_status &= ~jack_status; - wcd9xxx_jack_report(mbhc, &mbhc->headset_jack, - mbhc->hph_status, WCD9XXX_JACK_MASK); - snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10, - 0x00); - snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10, - 0x10); - /* - * reset retry counter as PA is turned off signifying - * start of new OCP detection session - */ - if (mbhc->intr_ids->hph_left_ocp) - mbhc->hphlocp_cnt = 0; - else - mbhc->hphrocp_cnt = 0; - wcd9xxx_enable_irq(mbhc->resmgr->core_res, irq); - } -} - -static void hphrocp_off_report(struct wcd9xxx_mbhc *mbhc, u32 jack_status) -{ - __hphocp_off_report(mbhc, SND_JACK_OC_HPHR, - mbhc->intr_ids->hph_right_ocp); -} - -static void hphlocp_off_report(struct wcd9xxx_mbhc *mbhc, u32 jack_status) -{ - __hphocp_off_report(mbhc, SND_JACK_OC_HPHL, - mbhc->intr_ids->hph_left_ocp); -} - -static void wcd9xxx_get_mbhc_micbias_regs(struct wcd9xxx_mbhc *mbhc, - enum wcd9xxx_mbhc_micbias_type mb_type) -{ - unsigned int cfilt; - struct wcd9xxx_micbias_setting *micbias_pdata = - mbhc->resmgr->micbias_pdata; - struct mbhc_micbias_regs *micbias_regs; - enum wcd9xxx_micbias_num mb_num; - - if (mb_type == MBHC_ANC_MIC_MB) { - micbias_regs = &mbhc->mbhc_anc_bias_regs; - mb_num = mbhc->mbhc_cfg->anc_micbias; - } else { - micbias_regs = &mbhc->mbhc_bias_regs; - mb_num = mbhc->mbhc_cfg->micbias; - } - - switch (mb_num) { - case MBHC_MICBIAS1: - cfilt = micbias_pdata->bias1_cfilt_sel; - micbias_regs->mbhc_reg = WCD9XXX_A_MICB_1_MBHC; - micbias_regs->int_rbias = WCD9XXX_A_MICB_1_INT_RBIAS; - micbias_regs->ctl_reg = WCD9XXX_A_MICB_1_CTL; - break; - case MBHC_MICBIAS2: - cfilt = micbias_pdata->bias2_cfilt_sel; - micbias_regs->mbhc_reg = WCD9XXX_A_MICB_2_MBHC; - micbias_regs->int_rbias = WCD9XXX_A_MICB_2_INT_RBIAS; - micbias_regs->ctl_reg = WCD9XXX_A_MICB_2_CTL; - break; - case MBHC_MICBIAS3: - cfilt = micbias_pdata->bias3_cfilt_sel; - micbias_regs->mbhc_reg = WCD9XXX_A_MICB_3_MBHC; - micbias_regs->int_rbias = WCD9XXX_A_MICB_3_INT_RBIAS; - micbias_regs->ctl_reg = WCD9XXX_A_MICB_3_CTL; - break; - case MBHC_MICBIAS4: - cfilt = micbias_pdata->bias4_cfilt_sel; - micbias_regs->mbhc_reg = mbhc->resmgr->reg_addr->micb_4_mbhc; - micbias_regs->int_rbias = - mbhc->resmgr->reg_addr->micb_4_int_rbias; - micbias_regs->ctl_reg = mbhc->resmgr->reg_addr->micb_4_ctl; - break; - default: - /* Should never reach here */ - pr_err("%s: Invalid MIC BIAS for MBHC\n", __func__); - return; - } - - micbias_regs->cfilt_sel = cfilt; - - switch (cfilt) { - case WCD9XXX_CFILT1_SEL: - micbias_regs->cfilt_val = WCD9XXX_A_MICB_CFILT_1_VAL; - micbias_regs->cfilt_ctl = WCD9XXX_A_MICB_CFILT_1_CTL; - break; - case WCD9XXX_CFILT2_SEL: - micbias_regs->cfilt_val = WCD9XXX_A_MICB_CFILT_2_VAL; - micbias_regs->cfilt_ctl = WCD9XXX_A_MICB_CFILT_2_CTL; - break; - case WCD9XXX_CFILT3_SEL: - micbias_regs->cfilt_val = WCD9XXX_A_MICB_CFILT_3_VAL; - micbias_regs->cfilt_ctl = WCD9XXX_A_MICB_CFILT_3_CTL; - break; - } - - if (mb_type == MBHC_PRIMARY_MIC_MB) { - switch (cfilt) { - case WCD9XXX_CFILT1_SEL: - mbhc->mbhc_data.micb_mv = micbias_pdata->cfilt1_mv; - break; - case WCD9XXX_CFILT2_SEL: - mbhc->mbhc_data.micb_mv = micbias_pdata->cfilt2_mv; - break; - case WCD9XXX_CFILT3_SEL: - mbhc->mbhc_data.micb_mv = micbias_pdata->cfilt3_mv; - break; - } - } - -} - -static void wcd9xxx_clr_and_turnon_hph_padac(struct wcd9xxx_mbhc *mbhc) -{ - bool pa_turned_on = false; - struct snd_soc_codec *codec = mbhc->codec; - u8 wg_time; - - wg_time = snd_soc_read(codec, WCD9XXX_A_RX_HPH_CNP_WG_TIME); - wg_time += 1; - - if (test_and_clear_bit(WCD9XXX_HPHR_DAC_OFF_ACK, - &mbhc->hph_pa_dac_state)) { - pr_debug("%s: HPHR clear flag and enable DAC\n", __func__); - snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_R_DAC_CTL, - 0xC0, 0xC0); - } - if (test_and_clear_bit(WCD9XXX_HPHL_DAC_OFF_ACK, - &mbhc->hph_pa_dac_state)) { - pr_debug("%s: HPHL clear flag and enable DAC\n", __func__); - snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_L_DAC_CTL, - 0x80, 0x80); - } - - if (test_and_clear_bit(WCD9XXX_HPHR_PA_OFF_ACK, - &mbhc->hph_pa_dac_state)) { - pr_debug("%s: HPHR clear flag and enable PA\n", __func__); - snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CNP_EN, 0x10, - 1 << 4); - pa_turned_on = true; - } - if (test_and_clear_bit(WCD9XXX_HPHL_PA_OFF_ACK, - &mbhc->hph_pa_dac_state)) { - pr_debug("%s: HPHL clear flag and enable PA\n", __func__); - snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CNP_EN, 0x20, 1 - << 5); - pa_turned_on = true; - } - - if (pa_turned_on) { - pr_debug("%s: PA was turned on by MBHC and not by DAPM\n", - __func__); - usleep_range(wg_time * 1000, wg_time * 1000 + 50); - } -} - -static int wcd9xxx_cancel_btn_work(struct wcd9xxx_mbhc *mbhc) -{ - int r; - - r = cancel_delayed_work_sync(&mbhc->mbhc_btn_dwork); - if (r) - /* if scheduled mbhc.mbhc_btn_dwork is canceled from here, - * we have to unlock from here instead btn_work - */ - wcd9xxx_unlock_sleep(mbhc->resmgr->core_res); - return r; -} - -static bool wcd9xxx_is_hph_dac_on(struct snd_soc_codec *codec, int left) -{ - u8 hph_reg_val = 0; - - if (left) - hph_reg_val = snd_soc_read(codec, WCD9XXX_A_RX_HPH_L_DAC_CTL); - else - hph_reg_val = snd_soc_read(codec, WCD9XXX_A_RX_HPH_R_DAC_CTL); - - return (hph_reg_val & 0xC0) ? true : false; -} - -static bool wcd9xxx_is_hph_pa_on(struct snd_soc_codec *codec) -{ - u8 hph_reg_val = 0; - - hph_reg_val = snd_soc_read(codec, WCD9XXX_A_RX_HPH_CNP_EN); - - return (hph_reg_val & 0x30) ? true : false; -} - -/* called under codec_resource_lock acquisition */ -static void wcd9xxx_set_and_turnoff_hph_padac(struct wcd9xxx_mbhc *mbhc) -{ - u8 wg_time; - struct snd_soc_codec *codec = mbhc->codec; - - wg_time = snd_soc_read(codec, WCD9XXX_A_RX_HPH_CNP_WG_TIME); - wg_time += 1; - - /* If headphone PA is on, check if userspace receives - * removal event to sync-up PA's state - */ - if (wcd9xxx_is_hph_pa_on(codec)) { - pr_debug("%s PA is on, setting PA_OFF_ACK\n", __func__); - set_bit(WCD9XXX_HPHL_PA_OFF_ACK, &mbhc->hph_pa_dac_state); - set_bit(WCD9XXX_HPHR_PA_OFF_ACK, &mbhc->hph_pa_dac_state); - } else { - pr_debug("%s PA is off\n", __func__); - } - - if (wcd9xxx_is_hph_dac_on(codec, 1)) - set_bit(WCD9XXX_HPHL_DAC_OFF_ACK, &mbhc->hph_pa_dac_state); - if (wcd9xxx_is_hph_dac_on(codec, 0)) - set_bit(WCD9XXX_HPHR_DAC_OFF_ACK, &mbhc->hph_pa_dac_state); - - snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_CNP_EN, 0x30, 0x00); - snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_L_DAC_CTL, 0x80, 0x00); - snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_R_DAC_CTL, 0xC0, 0x00); - usleep_range(wg_time * 1000, wg_time * 1000 + 50); -} - -static void wcd9xxx_insert_detect_setup(struct wcd9xxx_mbhc *mbhc, bool ins) -{ - if (!mbhc->mbhc_cfg->insert_detect) - return; - pr_debug("%s: Setting up %s detection\n", __func__, - ins ? "insert" : "removal"); - /* Disable detection to avoid glitch */ - snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MBHC_INSERT_DETECT, 1, 0); - if (mbhc->mbhc_cfg->gpio_level_insert) - snd_soc_write(mbhc->codec, WCD9XXX_A_MBHC_INSERT_DETECT, - (0x68 | (ins ? (1 << 1) : 0))); - else - snd_soc_write(mbhc->codec, WCD9XXX_A_MBHC_INSERT_DETECT, - (0x6C | (ins ? (1 << 1) : 0))); - /* Re-enable detection */ - snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MBHC_INSERT_DETECT, 1, 1); -} - -/* called under codec_resource_lock acquisition */ -static void wcd9xxx_report_plug(struct wcd9xxx_mbhc *mbhc, int insertion, - enum snd_jack_types jack_type) -{ - WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr); - - pr_debug("%s: enter insertion %d hph_status %x\n", - __func__, insertion, mbhc->hph_status); - if (!insertion) { - /* Report removal */ - mbhc->hph_status &= ~jack_type; - /* - * cancel possibly scheduled btn work and - * report release if we reported button press - */ - if (wcd9xxx_cancel_btn_work(mbhc)) - pr_debug("%s: button press is canceled\n", __func__); - else if (mbhc->buttons_pressed) { - pr_debug("%s: release of button press%d\n", - __func__, jack_type); - wcd9xxx_jack_report(mbhc, &mbhc->button_jack, 0, - mbhc->buttons_pressed); - mbhc->buttons_pressed &= - ~WCD9XXX_JACK_BUTTON_MASK; - } - - if (mbhc->micbias_enable && mbhc->micbias_enable_cb) { - pr_debug("%s: Disabling micbias\n", __func__); - mbhc->micbias_enable = false; - mbhc->micbias_enable_cb(mbhc->codec, false, - mbhc->mbhc_cfg->micbias); - } - mbhc->zl = mbhc->zr = 0; - mbhc->hph_type = MBHC_HPH_NONE; - pr_debug("%s: Reporting removal %d(%x)\n", __func__, - jack_type, mbhc->hph_status); - wcd9xxx_jack_report(mbhc, &mbhc->headset_jack, mbhc->hph_status, - WCD9XXX_JACK_MASK); - wcd9xxx_set_and_turnoff_hph_padac(mbhc); - hphrocp_off_report(mbhc, SND_JACK_OC_HPHR); - hphlocp_off_report(mbhc, SND_JACK_OC_HPHL); - mbhc->current_plug = PLUG_TYPE_NONE; - mbhc->polling_active = false; - if (mbhc->mbhc_cb && mbhc->mbhc_cb->hph_auto_pulldown_ctrl) - mbhc->mbhc_cb->hph_auto_pulldown_ctrl(mbhc->codec, - false); - } else { - /* - * Report removal of current jack type. - * Headphone to headset shouldn't report headphone - * removal. - */ - if (mbhc->mbhc_cfg->detect_extn_cable && - !(mbhc->current_plug == PLUG_TYPE_HEADPHONE && - jack_type == SND_JACK_HEADSET) && - (mbhc->hph_status && mbhc->hph_status != jack_type)) { - if (mbhc->micbias_enable && mbhc->micbias_enable_cb && - mbhc->hph_status == SND_JACK_HEADSET) { - pr_debug("%s: Disabling micbias\n", __func__); - mbhc->micbias_enable = false; - mbhc->micbias_enable_cb(mbhc->codec, false, - mbhc->mbhc_cfg->micbias); - } - - pr_debug("%s: Reporting removal (%x)\n", - __func__, mbhc->hph_status); - mbhc->zl = mbhc->zr = 0; - wcd9xxx_jack_report(mbhc, &mbhc->headset_jack, - 0, WCD9XXX_JACK_MASK); - mbhc->hph_status &= ~(SND_JACK_HEADSET | - SND_JACK_LINEOUT | - SND_JACK_ANC_HEADPHONE | - SND_JACK_UNSUPPORTED); - if (mbhc->mbhc_cb && - mbhc->mbhc_cb->hph_auto_pulldown_ctrl) - mbhc->mbhc_cb->hph_auto_pulldown_ctrl( - mbhc->codec, - false); - } - - /* Report insertion */ - if (jack_type == SND_JACK_HEADPHONE) { - mbhc->current_plug = PLUG_TYPE_HEADPHONE; - } else if (jack_type == SND_JACK_UNSUPPORTED) { - mbhc->current_plug = PLUG_TYPE_GND_MIC_SWAP; - } else if (jack_type == SND_JACK_HEADSET) { - mbhc->polling_active = BUTTON_POLLING_SUPPORTED; - mbhc->current_plug = PLUG_TYPE_HEADSET; - mbhc->update_z = true; - } else if (jack_type == SND_JACK_LINEOUT) { - mbhc->current_plug = PLUG_TYPE_HIGH_HPH; - } else if (jack_type == SND_JACK_ANC_HEADPHONE) { - mbhc->polling_active = BUTTON_POLLING_SUPPORTED; - mbhc->current_plug = PLUG_TYPE_ANC_HEADPHONE; - } - - if (mbhc->impedance_detect && impedance_detect_en) { - wcd9xxx_detect_impedance(mbhc, - &mbhc->zl, &mbhc->zr); - if ((mbhc->zl > WCD9XXX_LINEIN_THRESHOLD) && - (mbhc->zr > WCD9XXX_LINEIN_THRESHOLD)) { - jack_type = SND_JACK_LINEOUT; - mbhc->current_plug = PLUG_TYPE_HIGH_HPH; - pr_debug("%s: Replace with SND_JACK_LINEOUT\n", - __func__); - } - } - - mbhc->hph_status |= jack_type; - - if (mbhc->micbias_enable && mbhc->micbias_enable_cb) { - pr_debug("%s: Enabling micbias\n", __func__); - mbhc->micbias_enable_cb(mbhc->codec, true, - mbhc->mbhc_cfg->micbias); - } - - pr_debug("%s: Reporting insertion %d(%x)\n", __func__, - jack_type, mbhc->hph_status); - wcd9xxx_jack_report(mbhc, &mbhc->headset_jack, - (mbhc->hph_status | SND_JACK_MECHANICAL), - WCD9XXX_JACK_MASK); - /* - * if PA is already on, switch micbias - * source to VDDIO - */ - if (((mbhc->current_plug == PLUG_TYPE_HEADSET) || - (mbhc->current_plug == PLUG_TYPE_ANC_HEADPHONE)) && - ((mbhc->event_state & (1 << MBHC_EVENT_PA_HPHL | - 1 << MBHC_EVENT_PA_HPHR)))) - __wcd9xxx_switch_micbias(mbhc, 1, false, - false); - wcd9xxx_clr_and_turnon_hph_padac(mbhc); - } - /* Setup insert detect */ - wcd9xxx_insert_detect_setup(mbhc, !insertion); - - pr_debug("%s: leave hph_status %x\n", __func__, mbhc->hph_status); -} - -/* should be called under interrupt context that hold suspend */ -static void wcd9xxx_schedule_hs_detect_plug(struct wcd9xxx_mbhc *mbhc, - struct work_struct *work) -{ - pr_debug("%s: scheduling wcd9xxx_correct_swch_plug\n", __func__); - WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr); - mbhc->hs_detect_work_stop = false; - wcd9xxx_lock_sleep(mbhc->resmgr->core_res); - schedule_work(work); -} - -/* called under codec_resource_lock acquisition */ -static void wcd9xxx_cancel_hs_detect_plug(struct wcd9xxx_mbhc *mbhc, - struct work_struct *work) -{ - pr_debug("%s: Canceling correct_plug_swch\n", __func__); - WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr); - mbhc->hs_detect_work_stop = true; - - /* Make sure mbhc state update complete before unlocking. */ - wmb(); - WCD9XXX_BCL_UNLOCK(mbhc->resmgr); - if (cancel_work_sync(work)) { - pr_debug("%s: correct_plug_swch is canceled\n", - __func__); - wcd9xxx_unlock_sleep(mbhc->resmgr->core_res); - } - WCD9XXX_BCL_LOCK(mbhc->resmgr); -} - -static s16 scale_v_micb_vddio(struct wcd9xxx_mbhc *mbhc, int v, bool tovddio) -{ - int r; - int vddio_k, mb_k; - - vddio_k = __wcd9xxx_resmgr_get_k_val(mbhc, VDDIO_MICBIAS_MV); - mb_k = __wcd9xxx_resmgr_get_k_val(mbhc, mbhc->mbhc_data.micb_mv); - if (tovddio) - r = v * (vddio_k + 4) / (mb_k + 4); - else - r = v * (mb_k + 4) / (vddio_k + 4); - return r; -} - -static s16 wcd9xxx_get_current_v_hs_max(struct wcd9xxx_mbhc *mbhc) -{ - s16 v_hs_max; - struct wcd9xxx_mbhc_plug_type_cfg *plug_type; - - plug_type = WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration); - if ((mbhc->mbhc_data.micb_mv != VDDIO_MICBIAS_MV) && - mbhc->mbhc_micbias_switched) - v_hs_max = scale_v_micb_vddio(mbhc, plug_type->v_hs_max, true); - else - v_hs_max = plug_type->v_hs_max; - return v_hs_max; -} - -static short wcd9xxx_read_sta_result(struct snd_soc_codec *codec) -{ - u8 bias_msb, bias_lsb; - short bias_value; - - bias_msb = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B3_STATUS); - bias_lsb = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B2_STATUS); - bias_value = (bias_msb << 8) | bias_lsb; - return bias_value; -} - -static short wcd9xxx_read_dce_result(struct snd_soc_codec *codec) -{ - u8 bias_msb, bias_lsb; - short bias_value; - - bias_msb = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B5_STATUS); - bias_lsb = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B4_STATUS); - bias_value = (bias_msb << 8) | bias_lsb; - return bias_value; -} - -static void wcd9xxx_turn_onoff_rel_detection(struct snd_soc_codec *codec, - bool on) -{ - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x02, on << 1); -} - -static short __wcd9xxx_codec_sta_dce(struct wcd9xxx_mbhc *mbhc, int dce, - bool override_bypass, bool noreldetection) -{ - short bias_value; - struct snd_soc_codec *codec = mbhc->codec; - - wcd9xxx_disable_irq(mbhc->resmgr->core_res, - mbhc->intr_ids->dce_est_complete); - if (noreldetection) - wcd9xxx_turn_onoff_rel_detection(codec, false); - - if (mbhc->mbhc_cfg->do_recalibration) - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x2, - 0x0); - /* Turn on the override */ - if (!override_bypass) - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x4, 0x4); - if (dce) { - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, - 0x8); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x4); - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, - 0x0); - if (mbhc->mbhc_cfg->do_recalibration) - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, - 0x2, 0x2); - usleep_range(mbhc->mbhc_data.t_sta_dce, - mbhc->mbhc_data.t_sta_dce + 50); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x4); - usleep_range(mbhc->mbhc_data.t_dce, mbhc->mbhc_data.t_dce + 50); - bias_value = wcd9xxx_read_dce_result(codec); - } else { - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, - 0x8); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x2); - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, - 0x0); - if (mbhc->mbhc_cfg->do_recalibration) - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, - 0x2, 0x2); - usleep_range(mbhc->mbhc_data.t_sta_dce, - mbhc->mbhc_data.t_sta_dce + 50); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x2); - usleep_range(mbhc->mbhc_data.t_sta, - mbhc->mbhc_data.t_sta + 50); - bias_value = wcd9xxx_read_sta_result(codec); - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, - 0x8); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x0); - } - /* Turn off the override after measuring mic voltage */ - if (!override_bypass) - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x04, - 0x00); - - if (noreldetection) - wcd9xxx_turn_onoff_rel_detection(codec, true); - wcd9xxx_enable_irq(mbhc->resmgr->core_res, - mbhc->intr_ids->dce_est_complete); - - return bias_value; -} - -static short wcd9xxx_codec_sta_dce(struct wcd9xxx_mbhc *mbhc, int dce, - bool norel) -{ - bool override_bypass; - - /* Bypass override if it is already enabled */ - override_bypass = (snd_soc_read(mbhc->codec, - WCD9XXX_A_CDC_MBHC_B1_CTL) & - 0x04) ? true : false; - - return __wcd9xxx_codec_sta_dce(mbhc, dce, override_bypass, norel); -} - -static s32 __wcd9xxx_codec_sta_dce_v(struct wcd9xxx_mbhc *mbhc, s8 dce, - u16 bias_value, s16 z, u32 micb_mv) -{ - s16 value, mb; - s32 mv = 0; - - value = bias_value; - if (dce) { - mb = (mbhc->mbhc_data.dce_mb); - if (mb - z) - mv = (value - z) * (s32)micb_mv / (mb - z); - } else { - mb = (mbhc->mbhc_data.sta_mb); - if (mb - z) - mv = (value - z) * (s32)micb_mv / (mb - z); - } - - return mv; -} - -static s32 wcd9xxx_codec_sta_dce_v(struct wcd9xxx_mbhc *mbhc, s8 dce, - u16 bias_value) -{ - s16 z; - - z = dce ? (s16)mbhc->mbhc_data.dce_z : (s16)mbhc->mbhc_data.sta_z; - return __wcd9xxx_codec_sta_dce_v(mbhc, dce, bias_value, z, - mbhc->mbhc_data.micb_mv); -} - -/* To enable/disable bandgap and RC oscillator */ -static void wcd9xxx_mbhc_ctrl_clk_bandgap(struct wcd9xxx_mbhc *mbhc, - bool enable) -{ - if (enable) { - WCD9XXX_BG_CLK_LOCK(mbhc->resmgr); - wcd9xxx_resmgr_get_bandgap(mbhc->resmgr, - WCD9XXX_BANDGAP_AUDIO_MODE); - if (mbhc->mbhc_cb && mbhc->mbhc_cb->codec_rco_ctrl) { - WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr); - mbhc->mbhc_cb->codec_rco_ctrl(mbhc->codec, true); - } else { - wcd9xxx_resmgr_get_clk_block(mbhc->resmgr, - WCD9XXX_CLK_RCO); - WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr); - } - } else { - if (mbhc->mbhc_cb && mbhc->mbhc_cb->codec_rco_ctrl) { - mbhc->mbhc_cb->codec_rco_ctrl(mbhc->codec, false); - WCD9XXX_BG_CLK_LOCK(mbhc->resmgr); - } else { - WCD9XXX_BG_CLK_LOCK(mbhc->resmgr); - wcd9xxx_resmgr_put_clk_block(mbhc->resmgr, - WCD9XXX_CLK_RCO); - } - wcd9xxx_resmgr_put_bandgap(mbhc->resmgr, - WCD9XXX_BANDGAP_AUDIO_MODE); - WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr); - } -} - -/* called only from interrupt which is under codec_resource_lock acquisition */ -static short wcd9xxx_mbhc_setup_hs_polling(struct wcd9xxx_mbhc *mbhc, - struct mbhc_micbias_regs *mbhc_micb_regs, - bool is_cs_enable) -{ - struct snd_soc_codec *codec = mbhc->codec; - short bias_value; - u8 cfilt_mode; - - WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr); - - pr_debug("%s: enter\n", __func__); - if (!mbhc->mbhc_cfg->calibration) { - pr_err("%s: Error, no calibration exists\n", __func__); - return -ENODEV; - } - - /* Enable external voltage source to micbias if present */ - if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mb_source) - mbhc->mbhc_cb->enable_mb_source(codec, true, true); - - /* - * setup internal micbias if codec uses internal micbias for - * headset detection - */ - if (mbhc->mbhc_cfg->use_int_rbias) { - if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias) - mbhc->mbhc_cb->setup_int_rbias(codec, true); - else - pr_err("%s: internal bias requested but codec did not provide callback\n", - __func__); - } - - snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x05, 0x01); - - /* Make sure CFILT is in fast mode, save current mode */ - cfilt_mode = snd_soc_read(codec, mbhc_micb_regs->cfilt_ctl); - if (mbhc->mbhc_cb && mbhc->mbhc_cb->cfilt_fast_mode) - mbhc->mbhc_cb->cfilt_fast_mode(codec, mbhc); - else - snd_soc_update_bits(codec, mbhc_micb_regs->cfilt_ctl, - 0x70, 0x00); - - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x2, 0x2); - snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, - mbhc->scaling_mux_in); - pr_debug("%s: scaling_mux_input: %d\n", __func__, - mbhc->scaling_mux_in); - - if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block) - mbhc->mbhc_cb->enable_mux_bias_block(codec); - else - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, - 0x80, 0x80); - - snd_soc_update_bits(codec, WCD9XXX_A_TX_7_MBHC_EN, 0x80, 0x80); - snd_soc_update_bits(codec, WCD9XXX_A_TX_7_MBHC_EN, 0x1F, 0x1C); - snd_soc_update_bits(codec, WCD9XXX_A_TX_7_MBHC_TEST_CTL, 0x40, 0x40); - - snd_soc_update_bits(codec, WCD9XXX_A_TX_7_MBHC_EN, 0x80, 0x00); - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, 0x8); - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, 0x00); - - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x2, 0x2); - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x8, 0x8); - - if (!mbhc->mbhc_cfg->do_recalibration) { - if (!is_cs_enable) - wcd9xxx_calibrate_hs_polling(mbhc); - } - - /* don't flip override */ - bias_value = __wcd9xxx_codec_sta_dce(mbhc, 1, true, true); - snd_soc_write(codec, mbhc_micb_regs->cfilt_ctl, cfilt_mode); - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x13, 0x00); - - return bias_value; -} - -static void wcd9xxx_recalibrate(struct wcd9xxx_mbhc *mbhc, - struct mbhc_micbias_regs *mbhc_micb_regs, - bool is_cs_enable) -{ - struct snd_soc_codec *codec = mbhc->codec; - s16 reg; - int change; - struct wcd9xxx_mbhc_btn_detect_cfg *btn_det; - s16 sta_z = 0, dce_z = 0; - - btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration); - - if (mbhc->mbhc_cfg->do_recalibration) { - /* recalibrate dce_z and sta_z */ - reg = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL); - change = snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, - 0x78, btn_det->mbhc_nsc << 3); - wcd9xxx_get_z(mbhc, &dce_z, &sta_z, mbhc_micb_regs, true); - if (change) - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, reg); - if (dce_z && sta_z) { - pr_debug("%s: sta_z 0x%x -> 0x%x, dce_z 0x%x -> 0x%x\n", - __func__, - mbhc->mbhc_data.sta_z, sta_z & 0xffff, - mbhc->mbhc_data.dce_z, dce_z & 0xffff); - mbhc->mbhc_data.dce_z = dce_z; - mbhc->mbhc_data.sta_z = sta_z; - wcd9xxx_mbhc_calc_thres(mbhc); - wcd9xxx_calibrate_hs_polling(mbhc); - } else { - pr_warn("%s: failed get new dce_z/sta_z 0x%x/0x%x\n", - __func__, dce_z, sta_z); - } - - if (is_cs_enable) { - /* recalibrate dce_nsc_cs_z */ - reg = snd_soc_read(mbhc->codec, - WCD9XXX_A_CDC_MBHC_B1_CTL); - snd_soc_update_bits(mbhc->codec, - WCD9XXX_A_CDC_MBHC_B1_CTL, - 0x78, WCD9XXX_MBHC_NSC_CS << 3); - wcd9xxx_get_z(mbhc, &dce_z, NULL, mbhc_micb_regs, - true); - snd_soc_write(mbhc->codec, WCD9XXX_A_CDC_MBHC_B1_CTL, - reg); - if (dce_z) { - mbhc->mbhc_data.dce_nsc_cs_z = dce_z; - /* update v_cs_ins_h with new dce_nsc_cs_z */ - mbhc->mbhc_data.v_cs_ins_h = - wcd9xxx_codec_v_sta_dce( - mbhc, DCE, - WCD9XXX_V_CS_HS_MAX, - is_cs_enable); - pr_debug("%s: dce_nsc_cs_z 0x%x -> 0x%x, v_cs_ins_h 0x%x\n", - __func__, - mbhc->mbhc_data.dce_nsc_cs_z, - dce_z & 0xffff, - mbhc->mbhc_data.v_cs_ins_h); - } else { - pr_debug("%s: failed get new dce_nsc_cs_z\n", - __func__); - } - } - } -} - -static void wcd9xxx_shutdown_hs_removal_detect(struct wcd9xxx_mbhc *mbhc) -{ - struct snd_soc_codec *codec = mbhc->codec; - const struct wcd9xxx_mbhc_general_cfg *generic = - WCD9XXX_MBHC_CAL_GENERAL_PTR(mbhc->mbhc_cfg->calibration); - - /* Need MBHC clock */ - if (mbhc->mbhc_cb && mbhc->mbhc_cb->codec_rco_ctrl) - mbhc->mbhc_cb->codec_rco_ctrl(mbhc->codec, true); - else { - WCD9XXX_BG_CLK_LOCK(mbhc->resmgr); - wcd9xxx_resmgr_get_clk_block(mbhc->resmgr, WCD9XXX_CLK_RCO); - WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr); - } - - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x2, 0x2); - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x6, 0x0); - __wcd9xxx_switch_micbias(mbhc, 0, false, false); - - usleep_range(generic->t_shutdown_plug_rem, - generic->t_shutdown_plug_rem + 50); - - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0xA, 0x8); - - if (mbhc->mbhc_cb && mbhc->mbhc_cb->codec_rco_ctrl) - mbhc->mbhc_cb->codec_rco_ctrl(mbhc->codec, false); - else { - WCD9XXX_BG_CLK_LOCK(mbhc->resmgr); - /* Put requested CLK back */ - wcd9xxx_resmgr_put_clk_block(mbhc->resmgr, WCD9XXX_CLK_RCO); - WCD9XXX_BG_CLK_UNLOCK(mbhc->resmgr); - } - - snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x00); -} - -static void wcd9xxx_cleanup_hs_polling(struct wcd9xxx_mbhc *mbhc) -{ - - pr_debug("%s: enter\n", __func__); - WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr); - - wcd9xxx_shutdown_hs_removal_detect(mbhc); - - - /* Disable external voltage source to micbias if present */ - if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mb_source) - mbhc->mbhc_cb->enable_mb_source(mbhc->codec, false, true); - - mbhc->polling_active = false; - mbhc->mbhc_state = MBHC_STATE_NONE; - pr_debug("%s: leave\n", __func__); -} - -/* called under codec_resource_lock acquisition */ -static void wcd9xxx_codec_hphr_gnd_switch(struct snd_soc_codec *codec, bool on) -{ - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x01, on); - if (on) - usleep_range(5000, 5100); -} - -static void wcd9xxx_onoff_vddio_switch(struct wcd9xxx_mbhc *mbhc, bool on) -{ - pr_debug("%s: vddio %d\n", __func__, on); - - if (mbhc->mbhc_cb && mbhc->mbhc_cb->pull_mb_to_vddio) { - mbhc->mbhc_cb->pull_mb_to_vddio(mbhc->codec, on); - goto exit; - } - - if (on) { - snd_soc_update_bits(mbhc->codec, mbhc->mbhc_bias_regs.mbhc_reg, - 1 << 7, 1 << 7); - snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MAD_ANA_CTRL, - 1 << 4, 0); - } else { - snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MAD_ANA_CTRL, - 1 << 4, 1 << 4); - snd_soc_update_bits(mbhc->codec, mbhc->mbhc_bias_regs.mbhc_reg, - 1 << 7, 0); - } - -exit: - /* - * Wait for the micbias to settle down to vddio - * when the micbias to vddio switch is enabled. - */ - if (on) - usleep_range(10000, 10100); -} - -static int wcd9xxx_hphl_status(struct wcd9xxx_mbhc *mbhc) -{ - u16 hph, status; - struct snd_soc_codec *codec = mbhc->codec; - - WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr); - hph = snd_soc_read(codec, WCD9XXX_A_MBHC_HPH); - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x12, 0x02); - usleep_range(WCD9XXX_HPHL_STATUS_READY_WAIT_US, - WCD9XXX_HPHL_STATUS_READY_WAIT_US + - WCD9XXX_USLEEP_RANGE_MARGIN_US); - status = snd_soc_read(codec, WCD9XXX_A_RX_HPH_L_STATUS); - snd_soc_write(codec, WCD9XXX_A_MBHC_HPH, hph); - return status; -} - -static enum wcd9xxx_mbhc_plug_type -wcd9xxx_cs_find_plug_type(struct wcd9xxx_mbhc *mbhc, - struct wcd9xxx_mbhc_detect *dt, const int size, - bool highhph, - unsigned long event_state) -{ - int i; - int vdce, mb_mv; - int ch, sz, delta_thr; - int minv = 0, maxv = INT_MIN; - struct wcd9xxx_mbhc_detect *d = dt; - struct wcd9xxx_mbhc_detect *dprev = d, *dmicbias = NULL, *dgnd = NULL; - enum wcd9xxx_mbhc_plug_type type = PLUG_TYPE_INVALID; - - const struct wcd9xxx_mbhc_plug_type_cfg *plug_type = - WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration); - s16 hs_max, no_mic, dce_z; - int highhph_cnt = 0; - - pr_debug("%s: enter\n", __func__); - pr_debug("%s: event_state 0x%lx\n", __func__, event_state); - - sz = size - 1; - for (i = 0, d = dt, ch = 0; i < sz; i++, d++) { - if (d->mic_bias) { - dce_z = mbhc->mbhc_data.dce_z; - mb_mv = mbhc->mbhc_data.micb_mv; - hs_max = plug_type->v_hs_max; - no_mic = plug_type->v_no_mic; - } else { - dce_z = mbhc->mbhc_data.dce_nsc_cs_z; - mb_mv = VDDIO_MICBIAS_MV; - hs_max = WCD9XXX_V_CS_HS_MAX; - no_mic = WCD9XXX_V_CS_NO_MIC; - } - - vdce = __wcd9xxx_codec_sta_dce_v(mbhc, true, d->dce, - dce_z, (u32)mb_mv); - d->_vdces = vdce; - if (d->_vdces < no_mic) - d->_type = PLUG_TYPE_HEADPHONE; - else if (d->_vdces >= hs_max) { - d->_type = PLUG_TYPE_HIGH_HPH; - highhph_cnt++; - } else - d->_type = PLUG_TYPE_HEADSET; - - pr_debug("%s: DCE #%d, %04x, V %04d(%04d), HPHL %d TYPE %d\n", - __func__, i, d->dce, vdce, d->_vdces, - d->hphl_status & 0x01, - d->_type); - - ch += d->hphl_status & 0x01; - if (!d->swap_gnd && !d->mic_bias) { - if (maxv < d->_vdces) - maxv = d->_vdces; - if (!minv || minv > d->_vdces) - minv = d->_vdces; - } - if ((!d->mic_bias && - (d->_vdces >= WCD9XXX_CS_MEAS_INVALD_RANGE_LOW_MV && - d->_vdces <= WCD9XXX_CS_MEAS_INVALD_RANGE_HIGH_MV)) || - (d->mic_bias && - (d->_vdces >= WCD9XXX_MEAS_INVALD_RANGE_LOW_MV && - d->_vdces <= WCD9XXX_MEAS_INVALD_RANGE_HIGH_MV))) { - pr_debug("%s: within invalid range\n", __func__); - type = PLUG_TYPE_INVALID; - goto exit; - } - } - - delta_thr = ((highhph_cnt == sz) || highhph) ? - WCD9XXX_MB_MEAS_DELTA_MAX_MV : - WCD9XXX_CS_MEAS_DELTA_MAX_MV; - - for (i = 0, d = dt; i < sz; i++, d++) { - if ((i > 0) && !d->mic_bias && !d->swap_gnd && - (d->_type != dprev->_type)) { - pr_debug("%s: Invalid, inconsistent types\n", __func__); - type = PLUG_TYPE_INVALID; - goto exit; - } - - if (!d->swap_gnd && !d->mic_bias && - (abs(minv - d->_vdces) > delta_thr || - abs(maxv - d->_vdces) > delta_thr)) { - pr_debug("%s: Invalid, delta %dmv, %dmv and %dmv\n", - __func__, d->_vdces, minv, maxv); - type = PLUG_TYPE_INVALID; - goto exit; - } else if (d->swap_gnd) { - dgnd = d; - } - - if (!d->mic_bias && !d->swap_gnd) - dprev = d; - else if (d->mic_bias) - dmicbias = d; - } - if (dgnd && dt->_type != PLUG_TYPE_HEADSET && - dt->_type != dgnd->_type) { - pr_debug("%s: Invalid, inconsistent types\n", __func__); - type = PLUG_TYPE_INVALID; - goto exit; - } - - type = dt->_type; - if (dmicbias) { - if (dmicbias->_type == PLUG_TYPE_HEADSET && - (dt->_type == PLUG_TYPE_HIGH_HPH || - dt->_type == PLUG_TYPE_HEADSET)) { - type = PLUG_TYPE_HEADSET; - if (dt->_type == PLUG_TYPE_HIGH_HPH) { - pr_debug("%s: Headset with threshold on MIC detected\n", - __func__); - if (mbhc->mbhc_cfg->micbias_enable_flags & - (1 << MBHC_MICBIAS_ENABLE_THRESHOLD_HEADSET)) - mbhc->micbias_enable = true; - } - } - } - - if (type == PLUG_TYPE_HEADSET && dgnd && !dgnd->mic_bias) { - /* if plug type is Headphone report as GND_MIC_SWAP */ - if (dgnd->_type == PLUG_TYPE_HEADPHONE) { - pr_debug("%s: GND_MIC_SWAP\n", __func__); - type = PLUG_TYPE_GND_MIC_SWAP; - /* - * if type is GND_MIC_SWAP we should not check - * HPHL status hence goto exit - */ - goto exit; - } else if (dgnd->_type != PLUG_TYPE_HEADSET && !dmicbias) { - pr_debug("%s: Invalid, inconsistent types\n", __func__); - type = PLUG_TYPE_INVALID; - } - } - - if (event_state & (1 << MBHC_EVENT_PA_HPHL)) { - pr_debug("%s: HPHL PA was ON\n", __func__); - } else if (ch != sz && ch > 0) { - pr_debug("%s: Invalid, inconsistent HPHL..\n", __func__); - type = PLUG_TYPE_INVALID; - goto exit; - } - - if (!(event_state & (1UL << MBHC_EVENT_PA_HPHL))) { - if (((type == PLUG_TYPE_HEADSET || - type == PLUG_TYPE_HEADPHONE) && ch != sz)) { - pr_debug("%s: Invalid, not fully inserted, TYPE %d\n", - __func__, type); - type = PLUG_TYPE_INVALID; - } - } - - if (type == PLUG_TYPE_HEADSET && - (mbhc->mbhc_cfg->micbias_enable_flags & - (1 << MBHC_MICBIAS_ENABLE_REGULAR_HEADSET))) - mbhc->micbias_enable = true; - -exit: - pr_debug("%s: Plug type %d detected\n", __func__, type); - return type; -} - -/* - * wcd9xxx_find_plug_type : Find out and return the best plug type with given - * list of wcd9xxx_mbhc_detect structure. - * param mbhc wcd9xxx_mbhc structure - * param dt collected measurements - * param size array size of dt - * param event_state mbhc->event_state when dt is collected - */ -static enum wcd9xxx_mbhc_plug_type -wcd9xxx_find_plug_type(struct wcd9xxx_mbhc *mbhc, - struct wcd9xxx_mbhc_detect *dt, const int size, - unsigned long event_state) -{ - int i; - int ch; - enum wcd9xxx_mbhc_plug_type type; - int vdce; - struct wcd9xxx_mbhc_detect *d, *dprev, *dgnd = NULL, *dvddio = NULL; - int maxv = 0, minv = 0; - const struct wcd9xxx_mbhc_plug_type_cfg *plug_type = - WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration); - const s16 hs_max = plug_type->v_hs_max; - const s16 no_mic = plug_type->v_no_mic; - - pr_debug("%s: event_state 0x%lx\n", __func__, event_state); - - for (i = 0, d = dt, ch = 0; i < size; i++, d++) { - vdce = wcd9xxx_codec_sta_dce_v(mbhc, true, d->dce); - if (d->vddio) - d->_vdces = scale_v_micb_vddio(mbhc, vdce, false); - else - d->_vdces = vdce; - - if (d->_vdces >= no_mic && d->_vdces < hs_max) - d->_type = PLUG_TYPE_HEADSET; - else if (d->_vdces < no_mic) - d->_type = PLUG_TYPE_HEADPHONE; - else - d->_type = PLUG_TYPE_HIGH_HPH; - - ch += d->hphl_status & 0x01; - if (!d->swap_gnd && !d->hwvalue && !d->vddio) { - if (maxv < d->_vdces) - maxv = d->_vdces; - if (!minv || minv > d->_vdces) - minv = d->_vdces; - } - - pr_debug("%s: DCE #%d, %04x, V %04d(%04d), GND %d, VDDIO %d, HPHL %d TYPE %d\n", - __func__, i, d->dce, vdce, d->_vdces, - d->swap_gnd, d->vddio, d->hphl_status & 0x01, - d->_type); - - - /* - * If GND and MIC prongs are aligned to HPHR and GND of - * headphone, codec measures the voltage based on - * impedance between HPHR and GND which results in ~80mv. - * Avoid this. - */ - if (d->_vdces >= WCD9XXX_MEAS_INVALD_RANGE_LOW_MV && - d->_vdces <= WCD9XXX_MEAS_INVALD_RANGE_HIGH_MV) { - pr_debug("%s: within invalid range\n", __func__); - type = PLUG_TYPE_INVALID; - goto exit; - } - } - - if (event_state & (1 << MBHC_EVENT_PA_HPHL)) { - pr_debug("%s: HPHL PA was ON\n", __func__); - } else if (ch != size && ch > 0) { - pr_debug("%s: Invalid, inconsistent HPHL\n", __func__); - type = PLUG_TYPE_INVALID; - goto exit; - } - - for (i = 0, dprev = NULL, d = dt; i < size; i++, d++) { - if (d->vddio) { - dvddio = d; - continue; - } - - if ((i > 0) && (dprev != NULL) && (d->_type != dprev->_type)) { - pr_debug("%s: Invalid, inconsistent types\n", __func__); - type = PLUG_TYPE_INVALID; - goto exit; - } - - if (!d->swap_gnd && !d->hwvalue && - (abs(minv - d->_vdces) > WCD9XXX_MEAS_DELTA_MAX_MV || - abs(maxv - d->_vdces) > WCD9XXX_MEAS_DELTA_MAX_MV)) { - pr_debug("%s: Invalid, delta %dmv, %dmv and %dmv\n", - __func__, d->_vdces, minv, maxv); - type = PLUG_TYPE_INVALID; - goto exit; - } else if (d->swap_gnd) { - dgnd = d; - } - dprev = d; - } - - WARN_ON(i != size); - type = dt->_type; - if (type == PLUG_TYPE_HEADSET && dgnd) { - if ((dgnd->_vdces + WCD9XXX_GM_SWAP_THRES_MIN_MV < - minv) && - (dgnd->_vdces + WCD9XXX_GM_SWAP_THRES_MAX_MV > - maxv)) - type = PLUG_TYPE_GND_MIC_SWAP; - } - - /* if HPHL PA was on, we cannot use hphl status */ - if (!(event_state & (1UL << MBHC_EVENT_PA_HPHL))) { - if (((type == PLUG_TYPE_HEADSET || - type == PLUG_TYPE_HEADPHONE) && ch != size) || - (type == PLUG_TYPE_GND_MIC_SWAP && ch)) { - pr_debug("%s: Invalid, not fully inserted, TYPE %d\n", - __func__, type); - type = PLUG_TYPE_INVALID; - } - } - - if (type == PLUG_TYPE_HEADSET) { - if (dvddio && ((dvddio->_vdces > hs_max) || - (dvddio->_vdces > minv + WCD9XXX_THRESHOLD_MIC_THRESHOLD))) { - pr_debug("%s: Headset with threshold on MIC detected\n", - __func__); - if (mbhc->mbhc_cfg->micbias_enable_flags & - (1 << MBHC_MICBIAS_ENABLE_THRESHOLD_HEADSET)) - mbhc->micbias_enable = true; - } else { - pr_debug("%s: Headset with regular MIC detected\n", - __func__); - if (mbhc->mbhc_cfg->micbias_enable_flags & - (1 << MBHC_MICBIAS_ENABLE_REGULAR_HEADSET)) - mbhc->micbias_enable = true; - } - } -exit: - pr_debug("%s: Plug type %d detected, micbias_enable %d\n", __func__, - type, mbhc->micbias_enable); - return type; -} - -/* - * Pull down MBHC micbias for provided duration in microsecond. - */ -static int wcd9xxx_pull_down_micbias(struct wcd9xxx_mbhc *mbhc, int us) -{ - bool micbiasconn = false; - struct snd_soc_codec *codec = mbhc->codec; - const u16 ctlreg = mbhc->mbhc_bias_regs.ctl_reg; - - /* - * Disable MBHC to micbias connection to pull down - * micbias and pull down micbias for a moment. - */ - if ((snd_soc_read(mbhc->codec, ctlreg) & 0x01)) { - WARN_ONCE(1, "MBHC micbias is already pulled down unexpectedly\n"); - return -EFAULT; - } - - if ((snd_soc_read(mbhc->codec, WCD9XXX_A_MAD_ANA_CTRL) & 1 << 4)) { - snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MAD_ANA_CTRL, - 1 << 4, 0); - micbiasconn = true; - } - - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x01); - - /* - * Pull down for 1ms to discharge bias. Give small margin (10us) to be - * able to get consistent result across DCEs. - */ - usleep_range(1000, 1000 + 10); - - if (micbiasconn) - snd_soc_update_bits(mbhc->codec, WCD9XXX_A_MAD_ANA_CTRL, - 1 << 4, 1 << 4); - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x00); - usleep_range(us, us + WCD9XXX_USLEEP_RANGE_MARGIN_US); - - return 0; -} - -/* Called under codec resource lock acquisition */ -void wcd9xxx_turn_onoff_current_source(struct wcd9xxx_mbhc *mbhc, - struct mbhc_micbias_regs *mbhc_micb_regs, - bool on, bool highhph) -{ - struct snd_soc_codec *codec; - struct wcd9xxx_mbhc_btn_detect_cfg *btn_det; - const struct wcd9xxx_mbhc_plug_detect_cfg *plug_det = - WCD9XXX_MBHC_CAL_PLUG_DET_PTR(mbhc->mbhc_cfg->calibration); - - btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration); - codec = mbhc->codec; - - WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr); - - if ((on && mbhc->is_cs_enabled) || - (!on && !mbhc->is_cs_enabled)) { - pr_debug("%s: Current source is already %s\n", - __func__, on ? "ON" : "OFF"); - return; - } - - if (on) { - pr_debug("%s: enabling current source\n", __func__); - /* Nsc to 9 */ - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, - 0x78, 0x48); - /* pull down diode bit to 0 */ - snd_soc_update_bits(codec, mbhc_micb_regs->mbhc_reg, - 0x01, 0x00); - /* - * Keep the low power insertion/removal - * detection (reg 0x3DD) disabled - */ - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, - 0x01, 0x00); - /* - * Enable the Mic Bias current source - * Write bits[6:5] of register MICB_2_MBHC to 0x3 (V_20_UA) - * Write bit[7] of register MICB_2_MBHC to 1 - * (INS_DET_ISRC_EN__ENABLE) - * MICB_2_MBHC__SCHT_TRIG_EN to 1 - */ - snd_soc_update_bits(codec, mbhc_micb_regs->mbhc_reg, - 0xF0, 0xF0); - /* Disconnect MBHC Override from MicBias and LDOH */ - snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, 0x10, 0x00); - mbhc->is_cs_enabled = true; - } else { - pr_debug("%s: disabling current source\n", __func__); - /* Connect MBHC Override from MicBias and LDOH */ - snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, 0x10, 0x10); - /* INS_DET_ISRC_CTL to acdb value */ - snd_soc_update_bits(codec, mbhc_micb_regs->mbhc_reg, - 0x60, plug_det->mic_current << 5); - if (!highhph) { - /* INS_DET_ISRC_EN__ENABLE to 0 */ - snd_soc_update_bits(codec, - mbhc_micb_regs->mbhc_reg, - 0x80, 0x00); - /* MICB_2_MBHC__SCHT_TRIG_EN to 0 */ - snd_soc_update_bits(codec, - mbhc_micb_regs->mbhc_reg, - 0x10, 0x00); - } - /* Nsc to acdb value */ - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x78, - btn_det->mbhc_nsc << 3); - mbhc->is_cs_enabled = false; - } -} - -static enum wcd9xxx_mbhc_plug_type -wcd9xxx_codec_cs_get_plug_type(struct wcd9xxx_mbhc *mbhc, bool highhph) -{ - struct snd_soc_codec *codec = mbhc->codec; - struct wcd9xxx_mbhc_detect rt[NUM_DCE_PLUG_INS_DETECT]; - enum wcd9xxx_mbhc_plug_type type = PLUG_TYPE_INVALID; - int i; - - pr_debug("%s: enter\n", __func__); - WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr); - - BUG_ON(NUM_DCE_PLUG_INS_DETECT < 4); - - wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, true); - rt[0].swap_gnd = false; - rt[0].vddio = false; - rt[0].hwvalue = true; - rt[0].hphl_status = wcd9xxx_hphl_status(mbhc); - rt[0].dce = wcd9xxx_mbhc_setup_hs_polling(mbhc, &mbhc->mbhc_bias_regs, - true); - rt[0].mic_bias = false; - - for (i = 1; i < NUM_DCE_PLUG_INS_DETECT - 1; i++) { - rt[i].swap_gnd = (i == NUM_DCE_PLUG_INS_DETECT - 3); - rt[i].mic_bias = ((i == NUM_DCE_PLUG_INS_DETECT - 4) && - highhph); - rt[i].hphl_status = wcd9xxx_hphl_status(mbhc); - if (rt[i].swap_gnd) - wcd9xxx_codec_hphr_gnd_switch(codec, true); - - if (rt[i].mic_bias) - wcd9xxx_turn_onoff_current_source(mbhc, - &mbhc->mbhc_bias_regs, - false, false); - - rt[i].dce = __wcd9xxx_codec_sta_dce(mbhc, 1, !highhph, true); - if (rt[i].mic_bias) - wcd9xxx_turn_onoff_current_source(mbhc, - &mbhc->mbhc_bias_regs, - true, false); - if (rt[i].swap_gnd) - wcd9xxx_codec_hphr_gnd_switch(codec, false); - } - - /* recalibrate DCE/STA GND voltages */ - wcd9xxx_recalibrate(mbhc, &mbhc->mbhc_bias_regs, true); - - type = wcd9xxx_cs_find_plug_type(mbhc, rt, ARRAY_SIZE(rt), highhph, - mbhc->event_state); - - wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false); - pr_debug("%s: plug_type:%d\n", __func__, type); - - return type; -} - -static enum wcd9xxx_mbhc_plug_type -wcd9xxx_codec_get_plug_type(struct wcd9xxx_mbhc *mbhc, bool highhph) -{ - int i; - bool vddioon; - struct wcd9xxx_mbhc_plug_type_cfg *plug_type_ptr; - struct wcd9xxx_mbhc_detect rt[NUM_DCE_PLUG_INS_DETECT]; - enum wcd9xxx_mbhc_plug_type type = PLUG_TYPE_INVALID; - struct snd_soc_codec *codec = mbhc->codec; - - pr_debug("%s: enter\n", __func__); - WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr); - - /* make sure override is on */ - WARN_ON(!(snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL) & 0x04)); - - /* GND and MIC swap detection requires at least 2 rounds of DCE */ - BUG_ON(NUM_DCE_PLUG_INS_DETECT < 2); - detect_use_vddio_switch = mbhc->mbhc_cfg->use_vddio_meas; - - /* - * There are chances vddio switch is on and cfilt voltage is adjusted - * to vddio voltage even after plug type removal reported. - */ - vddioon = __wcd9xxx_switch_micbias(mbhc, 0, false, false); - pr_debug("%s: vddio switch was %s\n", __func__, vddioon ? "on" : "off"); - - plug_type_ptr = - WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration); - - /* - * cfilter in fast mode requires 1ms to charge up and down micbias - * fully. - */ - (void) wcd9xxx_pull_down_micbias(mbhc, - WCD9XXX_MICBIAS_PULLDOWN_SETTLE_US); - - wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, true); - rt[0].hphl_status = wcd9xxx_hphl_status(mbhc); - rt[0].dce = wcd9xxx_mbhc_setup_hs_polling(mbhc, &mbhc->mbhc_bias_regs, - false); - rt[0].swap_gnd = false; - rt[0].vddio = false; - rt[0].hwvalue = true; - for (i = 1; i < NUM_DCE_PLUG_INS_DETECT; i++) { - rt[i].swap_gnd = (i == NUM_DCE_PLUG_INS_DETECT - 2); - if (detect_use_vddio_switch) - rt[i].vddio = (i == 1); - else - rt[i].vddio = false; - rt[i].hphl_status = wcd9xxx_hphl_status(mbhc); - rt[i].hwvalue = false; - if (rt[i].swap_gnd) - wcd9xxx_codec_hphr_gnd_switch(codec, true); - if (rt[i].vddio) - wcd9xxx_onoff_vddio_switch(mbhc, true); - /* - * Pull down micbias to detect headset with mic which has - * threshold and to have more consistent voltage measurements. - * - * cfilter in fast mode requires 1ms to charge up and down - * micbias fully. - */ - (void) wcd9xxx_pull_down_micbias(mbhc, - WCD9XXX_MICBIAS_PULLDOWN_SETTLE_US); - rt[i].dce = __wcd9xxx_codec_sta_dce(mbhc, 1, true, true); - if (rt[i].vddio) - wcd9xxx_onoff_vddio_switch(mbhc, false); - if (rt[i].swap_gnd) - wcd9xxx_codec_hphr_gnd_switch(codec, false); - } - /* recalibrate DCE/STA GND voltages */ - wcd9xxx_recalibrate(mbhc, &mbhc->mbhc_bias_regs, false); - - if (vddioon) - __wcd9xxx_switch_micbias(mbhc, 1, false, false); - - type = wcd9xxx_find_plug_type(mbhc, rt, ARRAY_SIZE(rt), - mbhc->event_state); - - wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false); - pr_debug("%s: leave\n", __func__); - return type; -} - -static bool wcd9xxx_swch_level_remove(struct wcd9xxx_mbhc *mbhc) -{ - if (mbhc->mbhc_cfg->gpio) - return (gpio_get_value_cansleep(mbhc->mbhc_cfg->gpio) != - mbhc->mbhc_cfg->gpio_level_insert); - else if (mbhc->mbhc_cfg->insert_detect) { - if (mbhc->mbhc_cb && mbhc->mbhc_cb->insert_rem_status) - return mbhc->mbhc_cb->insert_rem_status(mbhc->codec); - else - return snd_soc_read(mbhc->codec, - WCD9XXX_A_MBHC_INSERT_DET_STATUS) & - (1 << 2); - } else - WARN(1, "Invalid jack detection configuration\n"); - - return true; -} - -static bool is_clk_active(struct snd_soc_codec *codec) -{ - return !!(snd_soc_read(codec, WCD9XXX_A_CDC_CLK_MCLK_CTL) & 0x05); -} - -static int wcd9xxx_enable_hs_detect(struct wcd9xxx_mbhc *mbhc, - int insertion, int trigger, bool padac_off) -{ - struct snd_soc_codec *codec = mbhc->codec; - int central_bias_enabled = 0; - const struct wcd9xxx_mbhc_general_cfg *generic = - WCD9XXX_MBHC_CAL_GENERAL_PTR(mbhc->mbhc_cfg->calibration); - const struct wcd9xxx_mbhc_plug_detect_cfg *plug_det = - WCD9XXX_MBHC_CAL_PLUG_DET_PTR(mbhc->mbhc_cfg->calibration); - - pr_debug("%s: enter insertion(%d) trigger(0x%x)\n", - __func__, insertion, trigger); - - if (!mbhc->mbhc_cfg->calibration) { - pr_err("Error, no wcd9xxx calibration\n"); - return -EINVAL; - } - - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, 0x1, 0); - - /* - * Make sure mic bias and Mic line schmitt trigger - * are turned OFF - */ - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x01); - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x90, 0x00); - - if (insertion) { - wcd9xxx_switch_micbias(mbhc, 0); - - /* DAPM can manipulate PA/DAC bits concurrently */ - if (padac_off == true) - wcd9xxx_set_and_turnoff_hph_padac(mbhc); - - if (trigger & MBHC_USE_HPHL_TRIGGER) { - /* Enable HPH Schmitt Trigger */ - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x11, - 0x11); - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x0C, - plug_det->hph_current << 2); - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x02, - 0x02); - } - if (trigger & MBHC_USE_MB_TRIGGER) { - /* enable the mic line schmitt trigger */ - snd_soc_update_bits(codec, - mbhc->mbhc_bias_regs.mbhc_reg, - 0x60, plug_det->mic_current << 5); - snd_soc_update_bits(codec, - mbhc->mbhc_bias_regs.mbhc_reg, - 0x80, 0x80); - usleep_range(plug_det->t_mic_pid, plug_det->t_mic_pid + - WCD9XXX_USLEEP_RANGE_MARGIN_US); - snd_soc_update_bits(codec, - mbhc->mbhc_bias_regs.ctl_reg, 0x01, - 0x00); - snd_soc_update_bits(codec, - mbhc->mbhc_bias_regs.mbhc_reg, - 0x10, 0x10); - } - - /* setup for insetion detection */ - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, 0x2, 0); - } else { - pr_debug("setup for removal detection\n"); - /* Make sure the HPH schmitt trigger is OFF */ - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x12, 0x00); - - /* enable the mic line schmitt trigger */ - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, - 0x01, 0x00); - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x60, - plug_det->mic_current << 5); - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, - 0x80, 0x80); - usleep_range(plug_det->t_mic_pid, plug_det->t_mic_pid + - WCD9XXX_USLEEP_RANGE_MARGIN_US); - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, - 0x10, 0x10); - - /* Setup for low power removal detection */ - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, 0x2, - 0x2); - } - - if (snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL) & 0x4) { - /* called by interrupt */ - if (!is_clk_active(codec)) { - wcd9xxx_resmgr_enable_config_mode(mbhc->resmgr, 1); - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, - 0x06, 0); - usleep_range(generic->t_shutdown_plug_rem, - generic->t_shutdown_plug_rem + - WCD9XXX_USLEEP_RANGE_MARGIN_US); - wcd9xxx_resmgr_enable_config_mode(mbhc->resmgr, 0); - } else - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, - 0x06, 0); - } - - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.int_rbias, 0x80, 0); - - /* If central bandgap disabled */ - if (!(snd_soc_read(codec, WCD9XXX_A_PIN_CTL_OE1) & 1)) { - snd_soc_update_bits(codec, WCD9XXX_A_PIN_CTL_OE1, 0x3, 0x3); - usleep_range(generic->t_bg_fast_settle, - generic->t_bg_fast_settle + - WCD9XXX_USLEEP_RANGE_MARGIN_US); - central_bias_enabled = 1; - } - - /* If LDO_H disabled */ - if (snd_soc_read(codec, WCD9XXX_A_PIN_CTL_OE0) & 0x80) { - snd_soc_update_bits(codec, WCD9XXX_A_PIN_CTL_OE0, 0x10, 0); - snd_soc_update_bits(codec, WCD9XXX_A_PIN_CTL_OE0, 0x80, 0x80); - usleep_range(generic->t_ldoh, generic->t_ldoh + - WCD9XXX_USLEEP_RANGE_MARGIN_US); - snd_soc_update_bits(codec, WCD9XXX_A_PIN_CTL_OE0, 0x80, 0); - - if (central_bias_enabled) - snd_soc_update_bits(codec, WCD9XXX_A_PIN_CTL_OE1, 0x1, - 0); - } - - if (mbhc->resmgr->reg_addr && mbhc->resmgr->reg_addr->micb_4_mbhc) - snd_soc_update_bits(codec, mbhc->resmgr->reg_addr->micb_4_mbhc, - 0x3, mbhc->mbhc_cfg->micbias); - - wcd9xxx_enable_irq(mbhc->resmgr->core_res, mbhc->intr_ids->insertion); - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, 0x1, 0x1); - pr_debug("%s: leave\n", __func__); - - return 0; -} - -/* - * Function to determine whether anc microphone is preset or not. - * Return true if anc microphone is detected or false if not detected. - */ -static bool wcd9xxx_detect_anc_plug_type(struct wcd9xxx_mbhc *mbhc) -{ - struct wcd9xxx_mbhc_detect rt[NUM_DCE_PLUG_INS_DETECT - 1]; - bool anc_mic_found = true; - int i, mb_mv; - const struct wcd9xxx_mbhc_plug_type_cfg *plug_type = - WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration); - s16 hs_max, dce_z; - s16 no_mic; - bool override_en; - bool timedout; - unsigned long timeout, retry = 0; - enum wcd9xxx_mbhc_plug_type type; - bool cs_enable; - - if (mbhc->mbhc_cfg->anc_micbias != MBHC_MICBIAS3 && - mbhc->mbhc_cfg->anc_micbias != MBHC_MICBIAS2) - return false; - - pr_debug("%s: enter\n", __func__); - - override_en = (snd_soc_read(mbhc->codec, WCD9XXX_A_CDC_MBHC_B1_CTL) & - 0x04) ? true : false; - cs_enable = ((mbhc->mbhc_cfg->cs_enable_flags & - (1 << MBHC_CS_ENABLE_DET_ANC)) != 0) && - (!(snd_soc_read(mbhc->codec, - mbhc->mbhc_anc_bias_regs.ctl_reg) & 0x80)) && - (mbhc->mbhc_cfg->micbias != mbhc->mbhc_cfg->anc_micbias); - - if (cs_enable) { - wcd9xxx_turn_onoff_current_source(mbhc, - &mbhc->mbhc_anc_bias_regs, - true, false); - } else { - if (mbhc->mbhc_cfg->anc_micbias == MBHC_MICBIAS3) { - if (mbhc->micbias_enable_cb) - mbhc->micbias_enable_cb(mbhc->codec, true, - mbhc->mbhc_cfg->anc_micbias); - else - return false; - } else { - /* Enable override */ - if (!override_en) - wcd9xxx_turn_onoff_override(mbhc, true); - } - } - - if (!cs_enable) { - hs_max = plug_type->v_hs_max; - no_mic = plug_type->v_no_mic; - dce_z = mbhc->mbhc_data.dce_z; - mb_mv = mbhc->mbhc_data.micb_mv; - } else { - hs_max = WCD9XXX_V_CS_HS_MAX; - no_mic = WCD9XXX_V_CS_NO_MIC; - mb_mv = VDDIO_MICBIAS_MV; - dce_z = mbhc->mbhc_data.dce_nsc_cs_z; - } - - wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, true); - - timeout = jiffies + msecs_to_jiffies(ANC_HPH_DETECT_PLUG_TIME_MS); - anc_mic_found = true; - - while (!(timedout = time_after(jiffies, timeout))) { - retry++; - - if (wcd9xxx_swch_level_remove(mbhc)) { - pr_debug("%s: Switch level is low\n", __func__); - anc_mic_found = false; - break; - } - - pr_debug("%s: Retry attempt %lu", __func__, retry - 1); - - rt[0].hphl_status = wcd9xxx_hphl_status(mbhc); - rt[0].dce = wcd9xxx_mbhc_setup_hs_polling(mbhc, - &mbhc->mbhc_anc_bias_regs, - cs_enable); - rt[0]._vdces = __wcd9xxx_codec_sta_dce_v(mbhc, true, rt[0].dce, - dce_z, (u32)mb_mv); - - if (rt[0]._vdces >= no_mic && rt[0]._vdces < hs_max) - rt[0]._type = PLUG_TYPE_HEADSET; - else if (rt[0]._vdces < no_mic) - rt[0]._type = PLUG_TYPE_HEADPHONE; - else - rt[0]._type = PLUG_TYPE_HIGH_HPH; - - pr_debug("%s: DCE #%d, V %04d, HPHL %d TYPE %d\n", - __func__, 0, rt[0]._vdces, - rt[0].hphl_status & 0x01, - rt[0]._type); - - for (i = 1; i < NUM_DCE_PLUG_INS_DETECT - 1; i++) { - rt[i].dce = __wcd9xxx_codec_sta_dce(mbhc, 1, - true, true); - rt[i]._vdces = __wcd9xxx_codec_sta_dce_v(mbhc, true, - rt[i].dce, dce_z, - (u32) mb_mv); - - if (rt[i]._vdces >= no_mic && rt[i]._vdces < hs_max) - rt[i]._type = PLUG_TYPE_HEADSET; - else if (rt[i]._vdces < no_mic) - rt[i]._type = PLUG_TYPE_HEADPHONE; - else - rt[i]._type = PLUG_TYPE_HIGH_HPH; - - rt[i].hphl_status = wcd9xxx_hphl_status(mbhc); - - pr_debug("%s: DCE #%d, V %04d, HPHL %d TYPE %d\n", - __func__, i, rt[i]._vdces, - rt[i].hphl_status & 0x01, - rt[i]._type); - } - - /* - * Check for the "type" of all the 4 measurements - * If all 4 measurements have the Type as PLUG_TYPE_HEADSET - * then it is proper mic and declare that the plug has two mics - */ - for (i = 0; i < NUM_DCE_PLUG_INS_DETECT - 1; i++) { - if (i > 0 && (rt[i - 1]._type != rt[i]._type)) { - type = PLUG_TYPE_INVALID; - break; - } else { - type = rt[0]._type; - } - } - - pr_debug("%s: Plug type found in ANC detection :%d", - __func__, type); - - if (type != PLUG_TYPE_HEADSET) - anc_mic_found = false; - if (anc_mic_found || (type == PLUG_TYPE_HEADPHONE && - mbhc->mbhc_cfg->hw_jack_type == FIVE_POLE_JACK) || - (type == PLUG_TYPE_HIGH_HPH && - mbhc->mbhc_cfg->hw_jack_type == SIX_POLE_JACK)) - break; - } - - wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false); - if (cs_enable) { - wcd9xxx_turn_onoff_current_source(mbhc, - &mbhc->mbhc_anc_bias_regs, - false, false); - } else { - if (mbhc->mbhc_cfg->anc_micbias == MBHC_MICBIAS3) { - if (mbhc->micbias_enable_cb) - mbhc->micbias_enable_cb(mbhc->codec, false, - mbhc->mbhc_cfg->anc_micbias); - } else { - /* Disable override */ - if (!override_en) - wcd9xxx_turn_onoff_override(mbhc, false); - } - } - pr_debug("%s: leave\n", __func__); - return anc_mic_found; -} - -/* called under codec_resource_lock acquisition */ -static void wcd9xxx_find_plug_and_report(struct wcd9xxx_mbhc *mbhc, - enum wcd9xxx_mbhc_plug_type plug_type) -{ - bool anc_mic_found = false; - - pr_debug("%s: enter current_plug(%d) new_plug(%d)\n", - __func__, mbhc->current_plug, plug_type); - - WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr); - - if (plug_type == PLUG_TYPE_HEADPHONE && - mbhc->current_plug == PLUG_TYPE_NONE) { - /* - * Nothing was reported previously - * report a headphone or unsupported - */ - wcd9xxx_report_plug(mbhc, 1, SND_JACK_HEADPHONE); - wcd9xxx_cleanup_hs_polling(mbhc); - } else if (plug_type == PLUG_TYPE_GND_MIC_SWAP) { - if (!mbhc->mbhc_cfg->detect_extn_cable) { - if (mbhc->current_plug == PLUG_TYPE_HEADSET) - wcd9xxx_report_plug(mbhc, 0, - SND_JACK_HEADSET); - else if (mbhc->current_plug == PLUG_TYPE_HEADPHONE) - wcd9xxx_report_plug(mbhc, 0, - SND_JACK_HEADPHONE); - } - wcd9xxx_report_plug(mbhc, 1, SND_JACK_UNSUPPORTED); - wcd9xxx_cleanup_hs_polling(mbhc); - } else if (plug_type == PLUG_TYPE_HEADSET) { - - if (mbhc->mbhc_cfg->enable_anc_mic_detect) { - /* - * Do not report Headset, because at this point - * it could be a ANC headphone having two mics. - * So, proceed further to detect if there is a - * second mic. - */ - mbhc->scaling_mux_in = 0x08; - anc_mic_found = wcd9xxx_detect_anc_plug_type(mbhc); - } - - if (anc_mic_found) { - /* Report ANC headphone */ - wcd9xxx_report_plug(mbhc, 1, SND_JACK_ANC_HEADPHONE); - } else { - /* - * If Headphone was reported previously, this will - * only report the mic line - */ - wcd9xxx_report_plug(mbhc, 1, SND_JACK_HEADSET); - } - /* Button detection required RC oscillator */ - wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, true); - /* - * sleep so that audio path completely tears down - * before report plug insertion to the user space - */ - msleep(100); - - wcd9xxx_start_hs_polling(mbhc); - } else if (plug_type == PLUG_TYPE_HIGH_HPH) { - if (mbhc->mbhc_cfg->detect_extn_cable) { - /* High impedance device found. Report as LINEOUT*/ - if (mbhc->current_plug == PLUG_TYPE_NONE) - wcd9xxx_report_plug(mbhc, 1, SND_JACK_LINEOUT); - wcd9xxx_cleanup_hs_polling(mbhc); - pr_debug("%s: setup mic trigger for further detection\n", - __func__); - mbhc->lpi_enabled = true; - /* - * Do not enable HPHL trigger. If playback is active, - * it might lead to continuous false HPHL triggers - */ - wcd9xxx_enable_hs_detect(mbhc, 1, MBHC_USE_MB_TRIGGER, - false); - } else { - if (mbhc->current_plug == PLUG_TYPE_NONE) - wcd9xxx_report_plug(mbhc, 1, - SND_JACK_HEADPHONE); - wcd9xxx_cleanup_hs_polling(mbhc); - pr_debug("setup mic trigger for further detection\n"); - mbhc->lpi_enabled = true; - wcd9xxx_enable_hs_detect(mbhc, 1, MBHC_USE_MB_TRIGGER | - MBHC_USE_HPHL_TRIGGER, - false); - } - } else { - WARN(1, "Unexpected current plug_type %d, plug_type %d\n", - mbhc->current_plug, plug_type); - } - pr_debug("%s: leave\n", __func__); -} - -/* called under codec_resource_lock acquisition */ -static void wcd9xxx_mbhc_decide_swch_plug(struct wcd9xxx_mbhc *mbhc) -{ - enum wcd9xxx_mbhc_plug_type plug_type; - bool current_source_enable; - - pr_debug("%s: enter\n", __func__); - - WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr); - - current_source_enable = (((mbhc->mbhc_cfg->cs_enable_flags & - (1 << MBHC_CS_ENABLE_INSERTION)) != 0) && - (!(snd_soc_read(mbhc->codec, - mbhc->mbhc_bias_regs.ctl_reg) & 0x80))); - - mbhc->scaling_mux_in = 0x04; - - if (current_source_enable) { - wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs, - true, false); - plug_type = wcd9xxx_codec_cs_get_plug_type(mbhc, false); - /* - * For other plug types, the current source disable - * will be done from wcd9xxx_correct_swch_plug - */ - if (plug_type == PLUG_TYPE_HEADSET) - wcd9xxx_turn_onoff_current_source(mbhc, - &mbhc->mbhc_bias_regs, - false, false); - } else { - wcd9xxx_turn_onoff_override(mbhc, true); - plug_type = wcd9xxx_codec_get_plug_type(mbhc, true); - wcd9xxx_turn_onoff_override(mbhc, false); - } - - if (wcd9xxx_swch_level_remove(mbhc)) { - if (current_source_enable && mbhc->is_cs_enabled) { - wcd9xxx_turn_onoff_current_source(mbhc, - &mbhc->mbhc_bias_regs, - false, false); - } - pr_debug("%s: Switch level is low when determining plug\n", - __func__); - return; - } - - if (plug_type == PLUG_TYPE_INVALID || - plug_type == PLUG_TYPE_GND_MIC_SWAP) { - wcd9xxx_cleanup_hs_polling(mbhc); - wcd9xxx_schedule_hs_detect_plug(mbhc, - &mbhc->correct_plug_swch); - } else if (plug_type == PLUG_TYPE_HEADPHONE) { - wcd9xxx_report_plug(mbhc, 1, SND_JACK_HEADPHONE); - wcd9xxx_cleanup_hs_polling(mbhc); - wcd9xxx_schedule_hs_detect_plug(mbhc, - &mbhc->correct_plug_swch); - } else if (plug_type == PLUG_TYPE_HIGH_HPH) { - wcd9xxx_cleanup_hs_polling(mbhc); - wcd9xxx_schedule_hs_detect_plug(mbhc, - &mbhc->correct_plug_swch); - } else { - pr_debug("%s: Valid plug found, determine plug type %d\n", - __func__, plug_type); - wcd9xxx_find_plug_and_report(mbhc, plug_type); - } - pr_debug("%s: leave\n", __func__); -} - -/* called under codec_resource_lock acquisition */ -static void wcd9xxx_mbhc_detect_plug_type(struct wcd9xxx_mbhc *mbhc) -{ - pr_debug("%s: enter\n", __func__); - WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr); - - if (wcd9xxx_swch_level_remove(mbhc)) - pr_debug("%s: Switch level low when determining plug\n", - __func__); - else - wcd9xxx_mbhc_decide_swch_plug(mbhc); - pr_debug("%s: leave\n", __func__); -} - -/* called only from interrupt which is under codec_resource_lock acquisition */ -static void wcd9xxx_hs_insert_irq_swch(struct wcd9xxx_mbhc *mbhc, - bool is_removal) -{ - if (!is_removal) { - pr_debug("%s: MIC trigger insertion interrupt\n", __func__); - - /* Make sure memory read is completed before reading - * lpi_enabled. - */ - rmb(); - if (mbhc->lpi_enabled) - msleep(100); - - /* Make sure memory read is completed before reading - * lpi_enabled. - */ - rmb(); - if (!mbhc->lpi_enabled) { - pr_debug("%s: lpi is disabled\n", __func__); - } else if (!wcd9xxx_swch_level_remove(mbhc)) { - pr_debug("%s: Valid insertion, detect plug type\n", - __func__); - wcd9xxx_mbhc_decide_swch_plug(mbhc); - } else { - pr_debug("%s: Invalid insertion stop plug detection\n", - __func__); - } - } else if (mbhc->mbhc_cfg->detect_extn_cable) { - pr_debug("%s: Removal\n", __func__); - if (!wcd9xxx_swch_level_remove(mbhc)) { - /* - * Switch indicates, something is still inserted. - * This could be extension cable i.e. headset is - * removed from extension cable. - */ - /* cancel detect plug */ - wcd9xxx_cancel_hs_detect_plug(mbhc, - &mbhc->correct_plug_swch); - wcd9xxx_mbhc_decide_swch_plug(mbhc); - } - } else { - pr_err("%s: Switch IRQ used, invalid MBHC Removal\n", __func__); - } -} - -static bool is_valid_mic_voltage(struct wcd9xxx_mbhc *mbhc, s32 mic_mv, - bool cs_enable) -{ - const struct wcd9xxx_mbhc_plug_type_cfg *plug_type = - WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration); - const s16 v_hs_max = wcd9xxx_get_current_v_hs_max(mbhc); - - if (cs_enable) - return ((mic_mv > WCD9XXX_V_CS_NO_MIC) && - (mic_mv < WCD9XXX_V_CS_HS_MAX)) ? true : false; - else - return (!(mic_mv > WCD9XXX_MEAS_INVALD_RANGE_LOW_MV && - mic_mv < WCD9XXX_MEAS_INVALD_RANGE_HIGH_MV) && - (mic_mv > plug_type->v_no_mic) && - (mic_mv < v_hs_max)) ? true : false; -} - -/* - * called under codec_resource_lock acquisition - * returns true if mic voltage range is back to normal insertion - * returns false either if timedout or removed - */ -static bool wcd9xxx_hs_remove_settle(struct wcd9xxx_mbhc *mbhc) -{ - int i; - bool timedout, settled = false; - s32 mic_mv[NUM_DCE_PLUG_DETECT]; - short mb_v[NUM_DCE_PLUG_DETECT]; - unsigned long retry = 0, timeout; - bool cs_enable; - - cs_enable = (((mbhc->mbhc_cfg->cs_enable_flags & - (1 << MBHC_CS_ENABLE_REMOVAL)) != 0) && - (!(snd_soc_read(mbhc->codec, - mbhc->mbhc_bias_regs.ctl_reg) & 0x80))); - if (cs_enable) - wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs, - true, false); - - timeout = jiffies + msecs_to_jiffies(HS_DETECT_PLUG_TIME_MS); - while (!(timedout = time_after(jiffies, timeout))) { - retry++; - if (wcd9xxx_swch_level_remove(mbhc)) { - pr_debug("%s: Switch indicates removal\n", __func__); - break; - } - - if (retry > 1) - msleep(250); - else - msleep(50); - - if (wcd9xxx_swch_level_remove(mbhc)) { - pr_debug("%s: Switch indicates removal\n", __func__); - break; - } - - if (cs_enable) { - for (i = 0; i < NUM_DCE_PLUG_DETECT; i++) { - mb_v[i] = __wcd9xxx_codec_sta_dce(mbhc, 1, - true, true); - mic_mv[i] = __wcd9xxx_codec_sta_dce_v(mbhc, - true, - mb_v[i], - mbhc->mbhc_data.dce_nsc_cs_z, - (u32)VDDIO_MICBIAS_MV); - pr_debug("%s : DCE run %lu, mic_mv = %d(%x)\n", - __func__, retry, mic_mv[i], mb_v[i]); - } - } else { - for (i = 0; i < NUM_DCE_PLUG_DETECT; i++) { - mb_v[i] = wcd9xxx_codec_sta_dce(mbhc, 1, - true); - mic_mv[i] = wcd9xxx_codec_sta_dce_v(mbhc, 1, - mb_v[i]); - pr_debug("%s : DCE run %lu, mic_mv = %d(%x)\n", - __func__, retry, mic_mv[i], - mb_v[i]); - } - } - - if (wcd9xxx_swch_level_remove(mbhc)) { - pr_debug("%s: Switcn indicates removal\n", __func__); - break; - } - - if (mbhc->current_plug == PLUG_TYPE_NONE) { - pr_debug("%s : headset/headphone is removed\n", - __func__); - break; - } - - for (i = 0; i < NUM_DCE_PLUG_DETECT; i++) - if (!is_valid_mic_voltage(mbhc, mic_mv[i], cs_enable)) - break; - - if (i == NUM_DCE_PLUG_DETECT) { - pr_debug("%s: MIC voltage settled\n", __func__); - settled = true; - msleep(200); - break; - } - } - - if (cs_enable) - wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs, - false, false); - - if (timedout) - pr_debug("%s: Microphone did not settle in %d seconds\n", - __func__, HS_DETECT_PLUG_TIME_MS); - return settled; -} - -/* called only from interrupt which is under codec_resource_lock acquisition */ -static void wcd9xxx_hs_remove_irq_swch(struct wcd9xxx_mbhc *mbhc) -{ - pr_debug("%s: enter\n", __func__); - if (wcd9xxx_hs_remove_settle(mbhc)) - wcd9xxx_start_hs_polling(mbhc); - pr_debug("%s: leave\n", __func__); -} - -/* called only from interrupt which is under codec_resource_lock acquisition */ -static void wcd9xxx_hs_remove_irq_noswch(struct wcd9xxx_mbhc *mbhc) -{ - s16 dce, dcez; - unsigned long timeout; - bool removed = true; - struct snd_soc_codec *codec = mbhc->codec; - const struct wcd9xxx_mbhc_general_cfg *generic = - WCD9XXX_MBHC_CAL_GENERAL_PTR(mbhc->mbhc_cfg->calibration); - bool cs_enable; - s16 cur_v_ins_h; - u32 mb_mv; - - pr_debug("%s: enter\n", __func__); - if (mbhc->current_plug != PLUG_TYPE_HEADSET && - mbhc->current_plug != PLUG_TYPE_ANC_HEADPHONE) { - pr_debug("%s(): Headset is not inserted, ignore removal\n", - __func__); - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, - 0x08, 0x08); - return; - } - - usleep_range(generic->t_shutdown_plug_rem, - generic->t_shutdown_plug_rem + - WCD9XXX_USLEEP_RANGE_MARGIN_US); - - /* If micbias is enabled, don't enable current source */ - cs_enable = (((mbhc->mbhc_cfg->cs_enable_flags & - (1 << MBHC_CS_ENABLE_REMOVAL)) != 0) && - (!(snd_soc_read(codec, - mbhc->mbhc_bias_regs.ctl_reg) & 0x80))); - if (cs_enable) - wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs, - true, false); - - timeout = jiffies + msecs_to_jiffies(FAKE_REMOVAL_MIN_PERIOD_MS); - do { - if (cs_enable) { - dce = __wcd9xxx_codec_sta_dce(mbhc, 1, true, true); - dcez = mbhc->mbhc_data.dce_nsc_cs_z; - mb_mv = VDDIO_MICBIAS_MV; - } else { - dce = wcd9xxx_codec_sta_dce(mbhc, 1, true); - dcez = mbhc->mbhc_data.dce_z; - mb_mv = mbhc->mbhc_data.micb_mv; - } - - pr_debug("%s: DCE 0x%x,%d\n", __func__, dce, - __wcd9xxx_codec_sta_dce_v(mbhc, true, dce, - dcez, mb_mv)); - - cur_v_ins_h = cs_enable ? (s16) mbhc->mbhc_data.v_cs_ins_h : - (wcd9xxx_get_current_v(mbhc, - WCD9XXX_CURRENT_V_INS_H)); - - if (dce < cur_v_ins_h) { - removed = false; - break; - } - } while (!time_after(jiffies, timeout)); - pr_debug("%s: headset %sactually removed\n", __func__, - removed ? "" : "not "); - - if (cs_enable) - wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs, - false, false); - - if (removed) { - if (mbhc->mbhc_cfg->detect_extn_cable) { - if (!wcd9xxx_swch_level_remove(mbhc)) { - /* - * extension cable is still plugged in - * report it as LINEOUT device - */ - if (mbhc->hph_status == SND_JACK_HEADSET) - wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, - false); - wcd9xxx_report_plug(mbhc, 1, SND_JACK_LINEOUT); - wcd9xxx_cleanup_hs_polling(mbhc); - wcd9xxx_enable_hs_detect(mbhc, 1, - MBHC_USE_MB_TRIGGER, - false); - } - } else { - /* Cancel possibly running hs_detect_work */ - wcd9xxx_cancel_hs_detect_plug(mbhc, - &mbhc->correct_plug_noswch); - /* - * If this removal is not false, first check the micbias - * switch status and switch it to LDOH if it is already - * switched to VDDIO. - */ - wcd9xxx_switch_micbias(mbhc, 0); - - wcd9xxx_report_plug(mbhc, 0, SND_JACK_HEADSET); - wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false); - wcd9xxx_cleanup_hs_polling(mbhc); - wcd9xxx_enable_hs_detect(mbhc, 1, MBHC_USE_MB_TRIGGER | - MBHC_USE_HPHL_TRIGGER, - true); - } - } else { - wcd9xxx_start_hs_polling(mbhc); - } - pr_debug("%s: leave\n", __func__); -} - -/* called only from interrupt which is under codec_resource_lock acquisition */ -static void wcd9xxx_hs_insert_irq_extn(struct wcd9xxx_mbhc *mbhc, - bool is_mb_trigger) -{ - /* Cancel possibly running hs_detect_work */ - wcd9xxx_cancel_hs_detect_plug(mbhc, &mbhc->correct_plug_swch); - - if (is_mb_trigger) { - pr_debug("%s: Waiting for Headphone left trigger\n", __func__); - wcd9xxx_enable_hs_detect(mbhc, 1, MBHC_USE_HPHL_TRIGGER, false); - } else { - pr_debug("%s: HPHL trigger received, detecting plug type\n", - __func__); - wcd9xxx_mbhc_detect_plug_type(mbhc); - } -} - -static irqreturn_t wcd9xxx_hs_remove_irq(int irq, void *data) -{ - struct wcd9xxx_mbhc *mbhc = data; - - pr_debug("%s: enter, removal interrupt\n", __func__); - WCD9XXX_BCL_LOCK(mbhc->resmgr); - /* - * While we don't know whether MIC is there or not, let the resmgr know - * so micbias can be disabled temporarily - */ - if (mbhc->current_plug == PLUG_TYPE_HEADSET) { - wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr, - WCD9XXX_COND_HPH_MIC, false); - wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr, - WCD9XXX_COND_HPH, false); - } else if (mbhc->current_plug == PLUG_TYPE_HEADPHONE) { - wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr, - WCD9XXX_COND_HPH, false); - } - - if (mbhc->mbhc_cfg->detect_extn_cable && - !wcd9xxx_swch_level_remove(mbhc)) - wcd9xxx_hs_remove_irq_noswch(mbhc); - else - wcd9xxx_hs_remove_irq_swch(mbhc); - - if (mbhc->current_plug == PLUG_TYPE_HEADSET) { - wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr, - WCD9XXX_COND_HPH, true); - wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr, - WCD9XXX_COND_HPH_MIC, true); - } else if (mbhc->current_plug == PLUG_TYPE_HEADPHONE) { - wcd9xxx_resmgr_cond_update_cond(mbhc->resmgr, - WCD9XXX_COND_HPH, true); - } - WCD9XXX_BCL_UNLOCK(mbhc->resmgr); - - return IRQ_HANDLED; -} - -static irqreturn_t wcd9xxx_hs_insert_irq(int irq, void *data) -{ - bool is_mb_trigger, is_removal; - struct wcd9xxx_mbhc *mbhc = data; - struct snd_soc_codec *codec = mbhc->codec; - - pr_debug("%s: enter\n", __func__); - WCD9XXX_BCL_LOCK(mbhc->resmgr); - wcd9xxx_disable_irq(mbhc->resmgr->core_res, mbhc->intr_ids->insertion); - - is_mb_trigger = !!(snd_soc_read(codec, mbhc->mbhc_bias_regs.mbhc_reg) & - 0x10); - is_removal = !!(snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_INT_CTL) & 0x02); - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_INT_CTL, 0x03, 0x00); - - /* Turn off both HPH and MIC line schmitt triggers */ - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x90, 0x00); - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x13, 0x00); - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x00); - - if (mbhc->mbhc_cfg->detect_extn_cable && - mbhc->current_plug == PLUG_TYPE_HIGH_HPH) - wcd9xxx_hs_insert_irq_extn(mbhc, is_mb_trigger); - else - wcd9xxx_hs_insert_irq_swch(mbhc, is_removal); - - WCD9XXX_BCL_UNLOCK(mbhc->resmgr); - return IRQ_HANDLED; -} - -static void wcd9xxx_btn_lpress_fn(struct work_struct *work) -{ - struct delayed_work *dwork; - short bias_value; - int dce_mv, sta_mv; - struct wcd9xxx_mbhc *mbhc; - - pr_debug("%s:\n", __func__); - - dwork = to_delayed_work(work); - mbhc = container_of(dwork, struct wcd9xxx_mbhc, mbhc_btn_dwork); - - bias_value = wcd9xxx_read_sta_result(mbhc->codec); - sta_mv = wcd9xxx_codec_sta_dce_v(mbhc, 0, bias_value); - - bias_value = wcd9xxx_read_dce_result(mbhc->codec); - dce_mv = wcd9xxx_codec_sta_dce_v(mbhc, 1, bias_value); - pr_debug("%s: STA: %d, DCE: %d\n", __func__, sta_mv, dce_mv); - - pr_debug("%s: Reporting long button press event\n", __func__); - wcd9xxx_jack_report(mbhc, &mbhc->button_jack, mbhc->buttons_pressed, - mbhc->buttons_pressed); - - pr_debug("%s: leave\n", __func__); - wcd9xxx_unlock_sleep(mbhc->resmgr->core_res); -} - -static void wcd9xxx_mbhc_insert_work(struct work_struct *work) -{ - struct delayed_work *dwork; - struct wcd9xxx_mbhc *mbhc; - struct snd_soc_codec *codec; - struct wcd9xxx_core_resource *core_res; - - dwork = to_delayed_work(work); - mbhc = container_of(dwork, struct wcd9xxx_mbhc, mbhc_insert_dwork); - codec = mbhc->codec; - core_res = mbhc->resmgr->core_res; - - pr_debug("%s:\n", __func__); - - /* Turn off both HPH and MIC line schmitt triggers */ - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.mbhc_reg, 0x90, 0x00); - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x13, 0x00); - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x00); - wcd9xxx_disable_irq_sync(core_res, mbhc->intr_ids->insertion); - wcd9xxx_mbhc_detect_plug_type(mbhc); - wcd9xxx_unlock_sleep(core_res); -} - -static bool wcd9xxx_mbhc_fw_validate(const void *data, size_t size) -{ - u32 cfg_offset; - struct wcd9xxx_mbhc_imped_detect_cfg *imped_cfg; - struct wcd9xxx_mbhc_btn_detect_cfg *btn_cfg; - struct firmware_cal fw; - - fw.data = (void *)data; - fw.size = size; - - if (fw.size < WCD9XXX_MBHC_CAL_MIN_SIZE) - return false; - - /* - * Previous check guarantees that there is enough fw data up - * to num_btn - */ - btn_cfg = WCD9XXX_MBHC_CAL_BTN_DET_PTR(fw.data); - cfg_offset = (u32) ((void *) btn_cfg - (void *) fw.data); - if (fw.size < (cfg_offset + WCD9XXX_MBHC_CAL_BTN_SZ(btn_cfg))) - return false; - - /* - * Previous check guarantees that there is enough fw data up - * to start of impedance detection configuration - */ - imped_cfg = WCD9XXX_MBHC_CAL_IMPED_DET_PTR(fw.data); - cfg_offset = (u32) ((void *) imped_cfg - (void *) fw.data); - - if (fw.size < (cfg_offset + WCD9XXX_MBHC_CAL_IMPED_MIN_SZ)) - return false; - - if (fw.size < (cfg_offset + WCD9XXX_MBHC_CAL_IMPED_SZ(imped_cfg))) - return false; - - return true; -} - -static u16 wcd9xxx_codec_v_sta_dce(struct wcd9xxx_mbhc *mbhc, - enum meas_type dce, s16 vin_mv, - bool cs_enable) -{ - s16 diff, zero; - u32 mb_mv, in; - u16 value; - s16 dce_z; - - mb_mv = mbhc->mbhc_data.micb_mv; - dce_z = mbhc->mbhc_data.dce_z; - - if (mb_mv == 0) { - pr_err("%s: Mic Bias voltage is set to zero\n", __func__); - return -EINVAL; - } - if (cs_enable) { - mb_mv = VDDIO_MICBIAS_MV; - dce_z = mbhc->mbhc_data.dce_nsc_cs_z; - } - - if (dce) { - diff = (mbhc->mbhc_data.dce_mb) - (dce_z); - zero = (dce_z); - } else { - diff = (mbhc->mbhc_data.sta_mb) - (mbhc->mbhc_data.sta_z); - zero = (mbhc->mbhc_data.sta_z); - } - in = (u32) diff * vin_mv; - - value = (u16) (in / mb_mv) + zero; - return value; -} - -static void wcd9xxx_mbhc_calc_thres(struct wcd9xxx_mbhc *mbhc) -{ - struct snd_soc_codec *codec; - s16 adj_v_hs_max; - s16 btn_mv = 0, btn_mv_sta[MBHC_V_IDX_NUM], btn_mv_dce[MBHC_V_IDX_NUM]; - struct wcd9xxx_mbhc_btn_detect_cfg *btn_det; - struct wcd9xxx_mbhc_plug_type_cfg *plug_type; - u16 *btn_high; - int i; - - pr_debug("%s: enter\n", __func__); - codec = mbhc->codec; - btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration); - plug_type = WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(mbhc->mbhc_cfg->calibration); - - mbhc->mbhc_data.v_ins_hu[MBHC_V_IDX_CFILT] = - wcd9xxx_codec_v_sta_dce(mbhc, STA, plug_type->v_hs_max, false); - mbhc->mbhc_data.v_ins_h[MBHC_V_IDX_CFILT] = - wcd9xxx_codec_v_sta_dce(mbhc, DCE, plug_type->v_hs_max, false); - - mbhc->mbhc_data.v_inval_ins_low = FAKE_INS_LOW; - mbhc->mbhc_data.v_inval_ins_high = FAKE_INS_HIGH; - - if (mbhc->mbhc_data.micb_mv != VDDIO_MICBIAS_MV) { - adj_v_hs_max = scale_v_micb_vddio(mbhc, plug_type->v_hs_max, - true); - mbhc->mbhc_data.v_ins_hu[MBHC_V_IDX_VDDIO] = - wcd9xxx_codec_v_sta_dce(mbhc, STA, adj_v_hs_max, false); - mbhc->mbhc_data.v_ins_h[MBHC_V_IDX_VDDIO] = - wcd9xxx_codec_v_sta_dce(mbhc, DCE, adj_v_hs_max, false); - mbhc->mbhc_data.v_inval_ins_low = - scale_v_micb_vddio(mbhc, mbhc->mbhc_data.v_inval_ins_low, - false); - mbhc->mbhc_data.v_inval_ins_high = - scale_v_micb_vddio(mbhc, mbhc->mbhc_data.v_inval_ins_high, - false); - } - mbhc->mbhc_data.v_cs_ins_h = wcd9xxx_codec_v_sta_dce(mbhc, DCE, - WCD9XXX_V_CS_HS_MAX, - true); - pr_debug("%s: v_ins_h for current source: 0x%x\n", __func__, - mbhc->mbhc_data.v_cs_ins_h); - - btn_high = wcd9xxx_mbhc_cal_btn_det_mp(btn_det, - MBHC_BTN_DET_V_BTN_HIGH); - for (i = 0; i < btn_det->num_btn; i++) - btn_mv = btn_high[i] > btn_mv ? btn_high[i] : btn_mv; - - btn_mv_sta[MBHC_V_IDX_CFILT] = btn_mv + btn_det->v_btn_press_delta_sta; - btn_mv_dce[MBHC_V_IDX_CFILT] = btn_mv + btn_det->v_btn_press_delta_cic; - btn_mv_sta[MBHC_V_IDX_VDDIO] = - scale_v_micb_vddio(mbhc, btn_mv_sta[MBHC_V_IDX_CFILT], true); - btn_mv_dce[MBHC_V_IDX_VDDIO] = - scale_v_micb_vddio(mbhc, btn_mv_dce[MBHC_V_IDX_CFILT], true); - - mbhc->mbhc_data.v_b1_hu[MBHC_V_IDX_CFILT] = - wcd9xxx_codec_v_sta_dce(mbhc, STA, btn_mv_sta[MBHC_V_IDX_CFILT], - false); - mbhc->mbhc_data.v_b1_h[MBHC_V_IDX_CFILT] = - wcd9xxx_codec_v_sta_dce(mbhc, DCE, btn_mv_dce[MBHC_V_IDX_CFILT], - false); - mbhc->mbhc_data.v_b1_hu[MBHC_V_IDX_VDDIO] = - wcd9xxx_codec_v_sta_dce(mbhc, STA, btn_mv_sta[MBHC_V_IDX_VDDIO], - false); - mbhc->mbhc_data.v_b1_h[MBHC_V_IDX_VDDIO] = - wcd9xxx_codec_v_sta_dce(mbhc, DCE, btn_mv_dce[MBHC_V_IDX_VDDIO], - false); - - mbhc->mbhc_data.v_brh[MBHC_V_IDX_CFILT] = - mbhc->mbhc_data.v_b1_h[MBHC_V_IDX_CFILT]; - mbhc->mbhc_data.v_brh[MBHC_V_IDX_VDDIO] = - mbhc->mbhc_data.v_b1_h[MBHC_V_IDX_VDDIO]; - - mbhc->mbhc_data.v_brl = BUTTON_MIN; - - mbhc->mbhc_data.v_no_mic = - wcd9xxx_codec_v_sta_dce(mbhc, STA, plug_type->v_no_mic, false); - pr_debug("%s: leave\n", __func__); -} - -static void wcd9xxx_onoff_ext_mclk(struct wcd9xxx_mbhc *mbhc, bool on) -{ - /* - * XXX: {codec}_mclk_enable holds WCD9XXX_BCL_LOCK, - * therefore wcd9xxx_onoff_ext_mclk caller SHOULDN'T hold - * WCD9XXX_BCL_LOCK when it calls wcd9xxx_onoff_ext_mclk() - */ - if (mbhc && mbhc->mbhc_cfg && mbhc->mbhc_cfg->mclk_cb_fn) - mbhc->mbhc_cfg->mclk_cb_fn(mbhc->codec, on, false); -} - -/* - * Mic Bias Enable Decision - * Return true if high_hph_cnt is a power of 2 (!= 2) - * otherwise return false - */ -static bool wcd9xxx_mbhc_enable_mb_decision(int high_hph_cnt) -{ - return (high_hph_cnt > 2) && !(high_hph_cnt & (high_hph_cnt - 1)); -} - -static inline void wcd9xxx_handle_gnd_mic_swap(struct wcd9xxx_mbhc *mbhc, - int pt_gnd_mic_swap_cnt, - enum wcd9xxx_mbhc_plug_type plug_type) -{ - if (mbhc->mbhc_cfg->swap_gnd_mic && - (pt_gnd_mic_swap_cnt == GND_MIC_SWAP_THRESHOLD)) { - /* - * if switch is toggled, check again, - * otherwise report unsupported plug - */ - mbhc->mbhc_cfg->swap_gnd_mic(mbhc->codec); - } else if (pt_gnd_mic_swap_cnt >= GND_MIC_SWAP_THRESHOLD) { - /* Report UNSUPPORTED plug - * and continue polling - */ - WCD9XXX_BCL_LOCK(mbhc->resmgr); - if (!mbhc->mbhc_cfg->detect_extn_cable) { - if (mbhc->current_plug == PLUG_TYPE_HEADPHONE) - wcd9xxx_report_plug(mbhc, 0, - SND_JACK_HEADPHONE); - else if (mbhc->current_plug == PLUG_TYPE_HEADSET) - wcd9xxx_report_plug(mbhc, 0, - SND_JACK_HEADSET); - } - if (mbhc->current_plug != plug_type) - wcd9xxx_report_plug(mbhc, 1, - SND_JACK_UNSUPPORTED); - WCD9XXX_BCL_UNLOCK(mbhc->resmgr); - } -} - -static void wcd9xxx_correct_swch_plug(struct work_struct *work) -{ - struct wcd9xxx_mbhc *mbhc; - struct snd_soc_codec *codec; - enum wcd9xxx_mbhc_plug_type plug_type = PLUG_TYPE_INVALID; - unsigned long timeout; - int retry = 0, pt_gnd_mic_swap_cnt = 0; - int highhph_cnt = 0; - bool correction = false; - bool current_source_enable; - bool wrk_complete = true, highhph = false; - - pr_debug("%s: enter\n", __func__); - - mbhc = container_of(work, struct wcd9xxx_mbhc, correct_plug_swch); - codec = mbhc->codec; - - current_source_enable = (((mbhc->mbhc_cfg->cs_enable_flags & - (1 << MBHC_CS_ENABLE_POLLING)) != 0) && - (!(snd_soc_read(codec, - mbhc->mbhc_bias_regs.ctl_reg) & 0x80))); - - wcd9xxx_onoff_ext_mclk(mbhc, true); - - /* - * Keep override on during entire plug type correction work. - * - * This is okay under the assumption that any switch irqs which use - * MBHC block cancel and sync this work so override is off again - * prior to switch interrupt handler's MBHC block usage. - * Also while this correction work is running, we can guarantee - * DAPM doesn't use any MBHC block as this work only runs with - * headphone detection. - */ - if (current_source_enable) { - WCD9XXX_BCL_LOCK(mbhc->resmgr); - wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs, - true, false); - WCD9XXX_BCL_UNLOCK(mbhc->resmgr); - } else { - wcd9xxx_turn_onoff_override(mbhc, true); - } - - timeout = jiffies + msecs_to_jiffies(HS_DETECT_PLUG_TIME_MS); - while (!time_after(jiffies, timeout)) { - ++retry; - - /* Make sure any pending memory read is completed, before - * hs_detect_work_stop value is read. - */ - rmb(); - if (mbhc->hs_detect_work_stop) { - wrk_complete = false; - pr_debug("%s: stop requested\n", __func__); - break; - } - - msleep(HS_DETECT_PLUG_INERVAL_MS); - if (wcd9xxx_swch_level_remove(mbhc)) { - wrk_complete = false; - pr_debug("%s: Switch level is low\n", __func__); - break; - } - - /* can race with removal interrupt */ - WCD9XXX_BCL_LOCK(mbhc->resmgr); - if (current_source_enable) - plug_type = wcd9xxx_codec_cs_get_plug_type(mbhc, - highhph); - else - plug_type = wcd9xxx_codec_get_plug_type(mbhc, true); - WCD9XXX_BCL_UNLOCK(mbhc->resmgr); - - pr_debug("%s: attempt(%d) current_plug(%d) new_plug(%d)\n", - __func__, retry, mbhc->current_plug, plug_type); - - highhph_cnt = (plug_type == PLUG_TYPE_HIGH_HPH) ? - (highhph_cnt + 1) : - 0; - highhph = wcd9xxx_mbhc_enable_mb_decision(highhph_cnt); - if (plug_type == PLUG_TYPE_INVALID) { - pr_debug("Invalid plug in attempt # %d\n", retry); - if (!mbhc->mbhc_cfg->detect_extn_cable && - retry == NUM_ATTEMPTS_TO_REPORT && - mbhc->current_plug == PLUG_TYPE_NONE) { - WCD9XXX_BCL_LOCK(mbhc->resmgr); - wcd9xxx_report_plug(mbhc, 1, - SND_JACK_HEADPHONE); - WCD9XXX_BCL_UNLOCK(mbhc->resmgr); - } - } else if (plug_type == PLUG_TYPE_HEADPHONE) { - pr_debug("Good headphone detected, continue polling\n"); - WCD9XXX_BCL_LOCK(mbhc->resmgr); - if (mbhc->mbhc_cfg->detect_extn_cable) { - if (mbhc->current_plug != plug_type) - wcd9xxx_report_plug(mbhc, 1, - SND_JACK_HEADPHONE); - } else if (mbhc->current_plug == PLUG_TYPE_NONE) { - wcd9xxx_report_plug(mbhc, 1, - SND_JACK_HEADPHONE); - } - WCD9XXX_BCL_UNLOCK(mbhc->resmgr); - } else if (plug_type == PLUG_TYPE_HIGH_HPH) { - pr_debug("%s: High HPH detected, continue polling\n", - __func__); - WCD9XXX_BCL_LOCK(mbhc->resmgr); - if (mbhc->mbhc_cfg->detect_extn_cable) { - if (mbhc->current_plug != plug_type) - wcd9xxx_report_plug(mbhc, 1, - SND_JACK_LINEOUT); - } else if (mbhc->current_plug == PLUG_TYPE_NONE) { - wcd9xxx_report_plug(mbhc, 1, - SND_JACK_HEADPHONE); - } - WCD9XXX_BCL_UNLOCK(mbhc->resmgr); - } else { - if (plug_type == PLUG_TYPE_GND_MIC_SWAP) { - pt_gnd_mic_swap_cnt++; - if (pt_gnd_mic_swap_cnt >= - GND_MIC_SWAP_THRESHOLD) - wcd9xxx_handle_gnd_mic_swap(mbhc, - pt_gnd_mic_swap_cnt, - plug_type); - pr_debug("%s: unsupported HS detected, continue polling\n", - __func__); - continue; - } else { - pt_gnd_mic_swap_cnt = 0; - - WCD9XXX_BCL_LOCK(mbhc->resmgr); - /* Turn off override/current source */ - if (current_source_enable) - wcd9xxx_turn_onoff_current_source(mbhc, - &mbhc->mbhc_bias_regs, - false, false); - else - wcd9xxx_turn_onoff_override(mbhc, - false); - /* - * The valid plug also includes - * PLUG_TYPE_GND_MIC_SWAP - */ - wcd9xxx_find_plug_and_report(mbhc, plug_type); - WCD9XXX_BCL_UNLOCK(mbhc->resmgr); - pr_debug("Attempt %d found correct plug %d\n", - retry, - plug_type); - correction = true; - } - break; - } - } - - highhph = false; - if (wrk_complete && plug_type == PLUG_TYPE_HIGH_HPH) { - pr_debug("%s: polling is done, still HPH, so enabling MIC trigger\n", - __func__); - WCD9XXX_BCL_LOCK(mbhc->resmgr); - wcd9xxx_find_plug_and_report(mbhc, plug_type); - highhph = true; - WCD9XXX_BCL_UNLOCK(mbhc->resmgr); - } - - if (plug_type == PLUG_TYPE_HEADPHONE) { - if (mbhc->mbhc_cb && mbhc->mbhc_cb->hph_auto_pulldown_ctrl) - mbhc->mbhc_cb->hph_auto_pulldown_ctrl(codec, true); - } - - if (!correction && current_source_enable) { - WCD9XXX_BCL_LOCK(mbhc->resmgr); - wcd9xxx_turn_onoff_current_source(mbhc, &mbhc->mbhc_bias_regs, - false, highhph); - WCD9XXX_BCL_UNLOCK(mbhc->resmgr); - } else if (!correction) { - wcd9xxx_turn_onoff_override(mbhc, false); - } - - wcd9xxx_onoff_ext_mclk(mbhc, false); - - if (mbhc->mbhc_cfg->detect_extn_cable) { - WCD9XXX_BCL_LOCK(mbhc->resmgr); - if ((mbhc->current_plug == PLUG_TYPE_HEADPHONE && - wrk_complete) || - mbhc->current_plug == PLUG_TYPE_GND_MIC_SWAP || - mbhc->current_plug == PLUG_TYPE_INVALID || - (plug_type == PLUG_TYPE_INVALID && wrk_complete)) { - /* Enable removal detection */ - wcd9xxx_cleanup_hs_polling(mbhc); - wcd9xxx_enable_hs_detect(mbhc, 0, 0, false); - } - WCD9XXX_BCL_UNLOCK(mbhc->resmgr); - } - pr_debug("%s: leave current_plug(%d)\n", __func__, mbhc->current_plug); - /* unlock sleep */ - wcd9xxx_unlock_sleep(mbhc->resmgr->core_res); -} - -static void wcd9xxx_swch_irq_handler(struct wcd9xxx_mbhc *mbhc) -{ - bool insert; - bool is_removed = false; - struct snd_soc_codec *codec = mbhc->codec; - - pr_debug("%s: enter\n", __func__); - - mbhc->in_swch_irq_handler = true; - /* Wait here for debounce time */ - usleep_range(SWCH_IRQ_DEBOUNCE_TIME_US, SWCH_IRQ_DEBOUNCE_TIME_US + - WCD9XXX_USLEEP_RANGE_MARGIN_US); - - WCD9XXX_BCL_LOCK(mbhc->resmgr); - - /* cancel pending button press */ - if (wcd9xxx_cancel_btn_work(mbhc)) - pr_debug("%s: button press is canceled\n", __func__); - - insert = !wcd9xxx_swch_level_remove(mbhc); - pr_debug("%s: Current plug type %d, insert %d\n", __func__, - mbhc->current_plug, insert); - if ((mbhc->current_plug == PLUG_TYPE_NONE) && insert) { - - mbhc->lpi_enabled = false; - - /* Make sure mbhc state update complete before cancel detect - * plug. - */ - wmb(); - /* cancel detect plug */ - wcd9xxx_cancel_hs_detect_plug(mbhc, - &mbhc->correct_plug_swch); - - if ((mbhc->current_plug != PLUG_TYPE_NONE) && - (mbhc->current_plug != PLUG_TYPE_HIGH_HPH) && - !(snd_soc_read(codec, WCD9XXX_A_MBHC_INSERT_DETECT) & - (1 << 1))) { - pr_debug("%s: current plug: %d\n", __func__, - mbhc->current_plug); - goto exit; - } - - /* Disable Mic Bias pull down and HPH Switch to GND */ - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, - 0x00); - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x01, 0x00); - wcd9xxx_mbhc_detect_plug_type(mbhc); - } else if ((mbhc->current_plug != PLUG_TYPE_NONE) && !insert) { - mbhc->lpi_enabled = false; - - /* Make sure mbhc state update complete before cancel detect - * plug. - */ - wmb(); - /* cancel detect plug */ - wcd9xxx_cancel_hs_detect_plug(mbhc, - &mbhc->correct_plug_swch); - - if (mbhc->current_plug == PLUG_TYPE_HEADPHONE) { - wcd9xxx_report_plug(mbhc, 0, SND_JACK_HEADPHONE); - is_removed = true; - } else if (mbhc->current_plug == PLUG_TYPE_GND_MIC_SWAP) { - wcd9xxx_report_plug(mbhc, 0, SND_JACK_UNSUPPORTED); - is_removed = true; - } else if (mbhc->current_plug == PLUG_TYPE_HEADSET) { - wcd9xxx_pause_hs_polling(mbhc); - wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false); - wcd9xxx_cleanup_hs_polling(mbhc); - wcd9xxx_report_plug(mbhc, 0, SND_JACK_HEADSET); - is_removed = true; - } else if (mbhc->current_plug == PLUG_TYPE_HIGH_HPH) { - wcd9xxx_report_plug(mbhc, 0, SND_JACK_LINEOUT); - is_removed = true; - } else if (mbhc->current_plug == PLUG_TYPE_ANC_HEADPHONE) { - wcd9xxx_pause_hs_polling(mbhc); - wcd9xxx_mbhc_ctrl_clk_bandgap(mbhc, false); - wcd9xxx_cleanup_hs_polling(mbhc); - wcd9xxx_report_plug(mbhc, 0, SND_JACK_ANC_HEADPHONE); - is_removed = true; - } - - if (is_removed) { - snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, - 0x00); - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, - 0x02, 0x00); - - /* Enable Mic Bias pull down and HPH Switch to GND */ - snd_soc_update_bits(codec, - mbhc->mbhc_bias_regs.ctl_reg, 0x01, - 0x01); - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x01, - 0x01); - /* Make sure mic trigger is turned off */ - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, - 0x01, 0x01); - snd_soc_update_bits(codec, - mbhc->mbhc_bias_regs.mbhc_reg, - 0x90, 0x00); - /* Reset MBHC State Machine */ - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, - 0x08, 0x08); - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, - 0x08, 0x00); - /* Turn off override */ - wcd9xxx_turn_onoff_override(mbhc, false); - } - } -exit: - mbhc->in_swch_irq_handler = false; - WCD9XXX_BCL_UNLOCK(mbhc->resmgr); - pr_debug("%s: leave\n", __func__); -} - -static irqreturn_t wcd9xxx_mech_plug_detect_irq(int irq, void *data) -{ - int r = IRQ_HANDLED; - struct wcd9xxx_mbhc *mbhc = data; - - pr_debug("%s: enter\n", __func__); - if (unlikely(wcd9xxx_lock_sleep(mbhc->resmgr->core_res) == false)) { - pr_warn("%s: failed to hold suspend\n", __func__); - r = IRQ_NONE; - } else { - /* Call handler */ - wcd9xxx_swch_irq_handler(mbhc); - wcd9xxx_unlock_sleep(mbhc->resmgr->core_res); - } - - pr_debug("%s: leave %d\n", __func__, r); - return r; -} - -static int wcd9xxx_is_false_press(struct wcd9xxx_mbhc *mbhc) -{ - s16 mb_v; - int i = 0; - int r = 0; - const s16 v_ins_hu = - wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_INS_HU); - const s16 v_ins_h = - wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_INS_H); - const s16 v_b1_hu = - wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_HU); - const s16 v_b1_h = - wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_H); - const unsigned long timeout = - jiffies + msecs_to_jiffies(BTN_RELEASE_DEBOUNCE_TIME_MS); - - while (time_before(jiffies, timeout)) { - /* - * This function needs to run measurements just few times during - * release debounce time. Make 1ms interval to avoid - * unnecessary excessive measurements. - */ - usleep_range(1000, 1000 + WCD9XXX_USLEEP_RANGE_MARGIN_US); - if (i == 0) { - mb_v = wcd9xxx_codec_sta_dce(mbhc, 0, true); - pr_debug("%s: STA[0]: %d,%d\n", __func__, mb_v, - wcd9xxx_codec_sta_dce_v(mbhc, 0, mb_v)); - if (mb_v < v_b1_hu || mb_v > v_ins_hu) { - r = 1; - break; - } - } else { - mb_v = wcd9xxx_codec_sta_dce(mbhc, 1, true); - pr_debug("%s: DCE[%d]: %d,%d\n", __func__, i, mb_v, - wcd9xxx_codec_sta_dce_v(mbhc, 1, mb_v)); - if (mb_v < v_b1_h || mb_v > v_ins_h) { - r = 1; - break; - } - } - i++; - } - - return r; -} - -/* called under codec_resource_lock acquisition */ -static int wcd9xxx_determine_button(const struct wcd9xxx_mbhc *mbhc, - const s32 micmv) -{ - s16 *v_btn_low, *v_btn_high; - struct wcd9xxx_mbhc_btn_detect_cfg *btn_det; - int i, btn = -1; - - btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration); - v_btn_low = wcd9xxx_mbhc_cal_btn_det_mp(btn_det, - MBHC_BTN_DET_V_BTN_LOW); - v_btn_high = wcd9xxx_mbhc_cal_btn_det_mp(btn_det, - MBHC_BTN_DET_V_BTN_HIGH); - - for (i = 0; i < btn_det->num_btn; i++) { - if ((v_btn_low[i] <= micmv) && (v_btn_high[i] >= micmv)) { - btn = i; - break; - } - } - - if (btn == -1) - pr_debug("%s: couldn't find button number for mic mv %d\n", - __func__, micmv); - - return btn; -} - -static int wcd9xxx_get_button_mask(const int btn) -{ - int mask = 0; - - switch (btn) { - case 0: - mask = SND_JACK_BTN_0; - break; - case 1: - mask = SND_JACK_BTN_1; - break; - case 2: - mask = SND_JACK_BTN_2; - break; - case 3: - mask = SND_JACK_BTN_3; - break; - case 4: - mask = SND_JACK_BTN_4; - break; - case 5: - mask = SND_JACK_BTN_5; - break; - } - return mask; -} - -static void wcd9xxx_get_z(struct wcd9xxx_mbhc *mbhc, s16 *dce_z, s16 *sta_z, - struct mbhc_micbias_regs *micb_regs, - bool norel_detection) -{ - s16 reg0, reg1; - int change; - struct snd_soc_codec *codec = mbhc->codec; - - WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr); - /* Pull down micbias to ground and disconnect vddio switch */ - reg0 = snd_soc_read(codec, micb_regs->ctl_reg); - snd_soc_update_bits(codec, micb_regs->ctl_reg, 0x81, 0x1); - reg1 = snd_soc_read(codec, micb_regs->mbhc_reg); - snd_soc_update_bits(codec, micb_regs->mbhc_reg, 1 << 7, 0); - - /* Disconnect override from micbias */ - change = snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, 1 << 4, - 1 << 0); - usleep_range(1000, 1000 + 1000); - if (sta_z) { - *sta_z = wcd9xxx_codec_sta_dce(mbhc, 0, norel_detection); - pr_debug("%s: sta_z 0x%x\n", __func__, *sta_z & 0xFFFF); - } - if (dce_z) { - *dce_z = wcd9xxx_codec_sta_dce(mbhc, 1, norel_detection); - pr_debug("%s: dce_z 0x%x\n", __func__, *dce_z & 0xFFFF); - } - - /* Connect override from micbias */ - if (change) - snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, 1 << 4, - 1 << 4); - /* Disable pull down micbias to ground */ - snd_soc_write(codec, micb_regs->mbhc_reg, reg1); - snd_soc_write(codec, micb_regs->ctl_reg, reg0); -} - -/* - * This function recalibrates dce_z and sta_z parameters. - * No release detection will be false when this function is - * used. - */ -void wcd9xxx_update_z(struct wcd9xxx_mbhc *mbhc) -{ - const u16 sta_z = mbhc->mbhc_data.sta_z; - const u16 dce_z = mbhc->mbhc_data.dce_z; - - wcd9xxx_get_z(mbhc, &mbhc->mbhc_data.dce_z, &mbhc->mbhc_data.sta_z, - &mbhc->mbhc_bias_regs, false); - pr_debug("%s: sta_z 0x%x,dce_z 0x%x -> sta_z 0x%x,dce_z 0x%x\n", - __func__, sta_z & 0xFFFF, dce_z & 0xFFFF, - mbhc->mbhc_data.sta_z & 0xFFFF, - mbhc->mbhc_data.dce_z & 0xFFFF); - - wcd9xxx_mbhc_calc_thres(mbhc); - wcd9xxx_calibrate_hs_polling(mbhc); -} - -/* - * wcd9xxx_update_rel_threshold : update mbhc release upper bound threshold - * to ceilmv + buffer - */ -static int wcd9xxx_update_rel_threshold(struct wcd9xxx_mbhc *mbhc, int ceilmv, - bool vddio) -{ - u16 v_brh, v_b1_hu; - int mv; - struct wcd9xxx_mbhc_btn_detect_cfg *btn_det; - void *calibration = mbhc->mbhc_cfg->calibration; - struct snd_soc_codec *codec = mbhc->codec; - - btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(calibration); - mv = ceilmv + btn_det->v_btn_press_delta_cic; - if (vddio) - mv = scale_v_micb_vddio(mbhc, mv, true); - pr_debug("%s: reprogram vb1hu/vbrh to %dmv\n", __func__, mv); - - if (mbhc->mbhc_state != MBHC_STATE_POTENTIAL_RECOVERY) { - /* - * update LSB first so mbhc hardware block - * doesn't see too low value. - */ - v_b1_hu = wcd9xxx_codec_v_sta_dce(mbhc, STA, mv, false); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B3_CTL, v_b1_hu & - 0xFF); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B4_CTL, - (v_b1_hu >> 8) & 0xFF); - v_brh = wcd9xxx_codec_v_sta_dce(mbhc, DCE, mv, false); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B9_CTL, v_brh & - 0xFF); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_VOLT_B10_CTL, - (v_brh >> 8) & 0xFF); - } - return 0; -} - -irqreturn_t wcd9xxx_dce_handler(int irq, void *data) -{ - int i, mask; - bool vddio; - u8 mbhc_status; - s16 dce_z, sta_z; - s32 stamv, stamv_s; - s16 *v_btn_high; - struct wcd9xxx_mbhc_btn_detect_cfg *btn_det; - int btn = -1, meas = 0; - struct wcd9xxx_mbhc *mbhc = data; - const struct wcd9xxx_mbhc_btn_detect_cfg *d = - WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration); - short btnmeas[d->n_btn_meas + 1]; - short dce[d->n_btn_meas + 1], sta; - s32 mv[d->n_btn_meas + 1], mv_s[d->n_btn_meas + 1]; - struct snd_soc_codec *codec = mbhc->codec; - struct wcd9xxx_core_resource *core_res = mbhc->resmgr->core_res; - int n_btn_meas = d->n_btn_meas; - void *calibration = mbhc->mbhc_cfg->calibration; - - pr_debug("%s: enter\n", __func__); - - WCD9XXX_BCL_LOCK(mbhc->resmgr); - mutex_lock(&mbhc->mbhc_lock); - mbhc_status = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_STATUS) & 0x3E; - - if (mbhc->mbhc_state == MBHC_STATE_POTENTIAL_RECOVERY) { - pr_debug("%s: mbhc is being recovered, skip button press\n", - __func__); - goto done; - } - - mbhc->mbhc_state = MBHC_STATE_POTENTIAL; - - if (!mbhc->polling_active) { - pr_warn("%s: mbhc polling is not active, skip button press\n", - __func__); - goto done; - } - - /* If switch nterrupt already kicked in, ignore button press */ - if (mbhc->in_swch_irq_handler) { - pr_debug("%s: Swtich level changed, ignore button press\n", - __func__); - btn = -1; - goto done; - } - - /* - * setup internal micbias if codec uses internal micbias for - * headset detection - */ - if (mbhc->mbhc_cfg->use_int_rbias) { - if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias) - mbhc->mbhc_cb->setup_int_rbias(codec, true); - else - pr_err("%s: internal bias requested but codec did not provide callback\n", - __func__); - } - - - /* Measure scaled HW DCE */ - vddio = (mbhc->mbhc_data.micb_mv != VDDIO_MICBIAS_MV && - mbhc->mbhc_micbias_switched); - - dce_z = mbhc->mbhc_data.dce_z; - sta_z = mbhc->mbhc_data.sta_z; - - /* Measure scaled HW STA */ - dce[0] = wcd9xxx_read_dce_result(codec); - sta = wcd9xxx_read_sta_result(codec); - if (mbhc_status != STATUS_REL_DETECTION) { - if (mbhc->mbhc_last_resume && - !time_after(jiffies, mbhc->mbhc_last_resume + HZ)) { - pr_debug("%s: Button is released after resume\n", - __func__); - n_btn_meas = 0; - } else { - pr_debug("%s: Button is released without resume", - __func__); - if (mbhc->update_z) { - wcd9xxx_update_z(mbhc); - dce_z = mbhc->mbhc_data.dce_z; - sta_z = mbhc->mbhc_data.sta_z; - mbhc->update_z = true; - } - stamv = __wcd9xxx_codec_sta_dce_v(mbhc, 0, sta, sta_z, - mbhc->mbhc_data.micb_mv); - if (vddio) - stamv_s = scale_v_micb_vddio(mbhc, stamv, - false); - else - stamv_s = stamv; - mv[0] = __wcd9xxx_codec_sta_dce_v(mbhc, 1, dce[0], - dce_z, mbhc->mbhc_data.micb_mv); - mv_s[0] = vddio ? scale_v_micb_vddio(mbhc, mv[0], - false) : mv[0]; - btn = wcd9xxx_determine_button(mbhc, mv_s[0]); - if (btn != wcd9xxx_determine_button(mbhc, stamv_s)) - btn = -1; - goto done; - } - } - - for (meas = 1; ((d->n_btn_meas) && (meas < (d->n_btn_meas + 1))); - meas++) - dce[meas] = wcd9xxx_codec_sta_dce(mbhc, 1, false); - - if (mbhc->update_z) { - wcd9xxx_update_z(mbhc); - dce_z = mbhc->mbhc_data.dce_z; - sta_z = mbhc->mbhc_data.sta_z; - mbhc->update_z = true; - } - - stamv = __wcd9xxx_codec_sta_dce_v(mbhc, 0, sta, sta_z, - mbhc->mbhc_data.micb_mv); - if (vddio) - stamv_s = scale_v_micb_vddio(mbhc, stamv, false); - else - stamv_s = stamv; - pr_debug("%s: Meas HW - STA 0x%x,%d,%d\n", __func__, - sta & 0xFFFF, stamv, stamv_s); - - /* determine pressed button */ - mv[0] = __wcd9xxx_codec_sta_dce_v(mbhc, 1, dce[0], dce_z, - mbhc->mbhc_data.micb_mv); - mv_s[0] = vddio ? scale_v_micb_vddio(mbhc, mv[0], false) : mv[0]; - btnmeas[0] = wcd9xxx_determine_button(mbhc, mv_s[0]); - pr_debug("%s: Meas HW - DCE 0x%x,%d,%d button %d\n", __func__, - dce[0] & 0xFFFF, mv[0], mv_s[0], btnmeas[0]); - if (n_btn_meas == 0) - btn = btnmeas[0]; - for (meas = 1; (n_btn_meas && d->n_btn_meas && - (meas < (d->n_btn_meas + 1))); meas++) { - mv[meas] = __wcd9xxx_codec_sta_dce_v(mbhc, 1, dce[meas], dce_z, - mbhc->mbhc_data.micb_mv); - mv_s[meas] = vddio ? scale_v_micb_vddio(mbhc, mv[meas], false) : - mv[meas]; - btnmeas[meas] = wcd9xxx_determine_button(mbhc, mv_s[meas]); - pr_debug("%s: Meas %d - DCE 0x%x,%d,%d button %d\n", - __func__, meas, dce[meas] & 0xFFFF, mv[meas], - mv_s[meas], btnmeas[meas]); - /* - * if large enough measurements are collected, - * start to check if last all n_btn_con measurements were - * in same button low/high range - */ - if (meas + 1 >= d->n_btn_con) { - for (i = 0; i < d->n_btn_con; i++) - if ((btnmeas[meas] < 0) || - (btnmeas[meas] != btnmeas[meas - i])) - break; - if (i == d->n_btn_con) { - /* button pressed */ - btn = btnmeas[meas]; - break; - } else if ((n_btn_meas - meas) < (d->n_btn_con - 1)) { - /* - * if left measurements are less than n_btn_con, - * it's impossible to find button number - */ - break; - } - } - } - - if (btn >= 0) { - if (mbhc->in_swch_irq_handler) { - pr_debug( - "%s: Switch irq triggered, ignore button press\n", - __func__); - goto done; - } - btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(calibration); - v_btn_high = wcd9xxx_mbhc_cal_btn_det_mp(btn_det, - MBHC_BTN_DET_V_BTN_HIGH); - WARN_ON(btn >= btn_det->num_btn); - /* reprogram release threshold to catch voltage ramp up early */ - wcd9xxx_update_rel_threshold(mbhc, v_btn_high[btn], vddio); - - mask = wcd9xxx_get_button_mask(btn); - mbhc->buttons_pressed |= mask; - wcd9xxx_lock_sleep(core_res); - if (schedule_delayed_work(&mbhc->mbhc_btn_dwork, - msecs_to_jiffies(400)) == 0) { - WARN(1, "Button pressed twice without release event\n"); - wcd9xxx_unlock_sleep(core_res); - } - } else { - pr_debug("%s: bogus button press, too short press?\n", - __func__); - } - - done: - pr_debug("%s: leave\n", __func__); - mutex_unlock(&mbhc->mbhc_lock); - WCD9XXX_BCL_UNLOCK(mbhc->resmgr); - return IRQ_HANDLED; -} - -static irqreturn_t wcd9xxx_release_handler(int irq, void *data) -{ - int ret; - bool waitdebounce = true; - struct wcd9xxx_mbhc *mbhc = data; - - pr_debug("%s: enter\n", __func__); - WCD9XXX_BCL_LOCK(mbhc->resmgr); - mbhc->mbhc_state = MBHC_STATE_RELEASE; - - if (mbhc->buttons_pressed & WCD9XXX_JACK_BUTTON_MASK) { - ret = wcd9xxx_cancel_btn_work(mbhc); - if (ret == 0) { - pr_debug("%s: Reporting long button release event\n", - __func__); - wcd9xxx_jack_report(mbhc, &mbhc->button_jack, 0, - mbhc->buttons_pressed); - } else { - if (wcd9xxx_is_false_press(mbhc)) { - pr_debug("%s: Fake button press interrupt\n", - __func__); - } else { - if (mbhc->in_swch_irq_handler) { - pr_debug("%s: Switch irq kicked in, ignore\n", - __func__); - } else { - pr_debug("%s: Reporting btn press\n", - __func__); - wcd9xxx_jack_report(mbhc, - &mbhc->button_jack, - mbhc->buttons_pressed, - mbhc->buttons_pressed); - pr_debug("%s: Reporting btn release\n", - __func__); - wcd9xxx_jack_report(mbhc, - &mbhc->button_jack, - 0, mbhc->buttons_pressed); - waitdebounce = false; - } - } - } - - mbhc->buttons_pressed &= ~WCD9XXX_JACK_BUTTON_MASK; - } - - wcd9xxx_calibrate_hs_polling(mbhc); - - if (waitdebounce) - msleep(SWCH_REL_DEBOUNCE_TIME_MS); - wcd9xxx_start_hs_polling(mbhc); - - pr_debug("%s: leave\n", __func__); - WCD9XXX_BCL_UNLOCK(mbhc->resmgr); - return IRQ_HANDLED; -} - -static irqreturn_t wcd9xxx_hphl_ocp_irq(int irq, void *data) -{ - struct wcd9xxx_mbhc *mbhc = data; - struct snd_soc_codec *codec; - - pr_info("%s: received HPHL OCP irq\n", __func__); - - if (mbhc) { - codec = mbhc->codec; - if ((mbhc->hphlocp_cnt < OCP_ATTEMPT) && - (!mbhc->hphrocp_cnt)) { - pr_info("%s: retry\n", __func__); - mbhc->hphlocp_cnt++; - snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, - 0x10, 0x00); - snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, - 0x10, 0x10); - } else { - wcd9xxx_disable_irq(mbhc->resmgr->core_res, - mbhc->intr_ids->hph_left_ocp); - mbhc->hph_status |= SND_JACK_OC_HPHL; - wcd9xxx_jack_report(mbhc, &mbhc->headset_jack, - mbhc->hph_status, - WCD9XXX_JACK_MASK); - } - } else { - pr_err("%s: Bad wcd9xxx private data\n", __func__); - } - - return IRQ_HANDLED; -} - -static irqreturn_t wcd9xxx_hphr_ocp_irq(int irq, void *data) -{ - struct wcd9xxx_mbhc *mbhc = data; - struct snd_soc_codec *codec; - - pr_info("%s: received HPHR OCP irq\n", __func__); - codec = mbhc->codec; - if ((mbhc->hphrocp_cnt < OCP_ATTEMPT) && - (!mbhc->hphlocp_cnt)) { - pr_info("%s: retry\n", __func__); - mbhc->hphrocp_cnt++; - snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10, - 0x00); - snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10, - 0x10); - } else { - wcd9xxx_disable_irq(mbhc->resmgr->core_res, - mbhc->intr_ids->hph_right_ocp); - mbhc->hph_status |= SND_JACK_OC_HPHR; - wcd9xxx_jack_report(mbhc, &mbhc->headset_jack, - mbhc->hph_status, WCD9XXX_JACK_MASK); - } - - return IRQ_HANDLED; -} - -static int wcd9xxx_acdb_mclk_index(const int rate) -{ - if (rate == MCLK_RATE_12288KHZ) - return 0; - else if (rate == MCLK_RATE_9600KHZ) - return 1; - else { - BUG_ON(1); - return -EINVAL; - } -} - -static void wcd9xxx_update_mbhc_clk_rate(struct wcd9xxx_mbhc *mbhc, u32 rate) -{ - u32 dce_wait, sta_wait; - u8 ncic, nmeas, navg; - void *calibration; - u8 *n_cic, *n_ready; - struct wcd9xxx_mbhc_btn_detect_cfg *btn_det; - u8 npoll = 4, nbounce_wait = 30; - struct snd_soc_codec *codec = mbhc->codec; - int idx = wcd9xxx_acdb_mclk_index(rate); - int idxmclk = wcd9xxx_acdb_mclk_index(mbhc->mbhc_cfg->mclk_rate); - - pr_debug("%s: Updating clock rate dependents, rate = %u\n", __func__, - rate); - calibration = mbhc->mbhc_cfg->calibration; - - /* - * First compute the DCE / STA wait times depending on tunable - * parameters. The value is computed in microseconds - */ - btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(calibration); - n_ready = wcd9xxx_mbhc_cal_btn_det_mp(btn_det, MBHC_BTN_DET_N_READY); - n_cic = wcd9xxx_mbhc_cal_btn_det_mp(btn_det, MBHC_BTN_DET_N_CIC); - nmeas = WCD9XXX_MBHC_CAL_BTN_DET_PTR(calibration)->n_meas; - navg = WCD9XXX_MBHC_CAL_GENERAL_PTR(calibration)->mbhc_navg; - - /* ncic stays with the same what we had during calibration */ - ncic = n_cic[idxmclk]; - dce_wait = (1000 * 512 * ncic * (nmeas + 1)) / (rate / 1000); - sta_wait = (1000 * 128 * (navg + 1)) / (rate / 1000); - mbhc->mbhc_data.t_dce = dce_wait; - /* give extra margin to sta for safety */ - mbhc->mbhc_data.t_sta = sta_wait + 250; - mbhc->mbhc_data.t_sta_dce = ((1000 * 256) / (rate / 1000) * - n_ready[idx]) + 10; - - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_TIMER_B1_CTL, n_ready[idx]); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_TIMER_B6_CTL, ncic); - - if (rate == MCLK_RATE_12288KHZ) { - npoll = 4; - nbounce_wait = 30; - } else if (rate == MCLK_RATE_9600KHZ) { - npoll = 3; - nbounce_wait = 23; - } - - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_TIMER_B2_CTL, npoll); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_TIMER_B3_CTL, nbounce_wait); - pr_debug("%s: leave\n", __func__); -} - -static void wcd9xxx_mbhc_cal(struct wcd9xxx_mbhc *mbhc) -{ - u8 cfilt_mode; - u16 reg0, reg1, reg2; - struct snd_soc_codec *codec = mbhc->codec; - - pr_debug("%s: enter\n", __func__); - wcd9xxx_disable_irq(mbhc->resmgr->core_res, - mbhc->intr_ids->dce_est_complete); - wcd9xxx_turn_onoff_rel_detection(codec, false); - - /* t_dce and t_sta are updated by wcd9xxx_update_mbhc_clk_rate() */ - WARN_ON(!mbhc->mbhc_data.t_dce); - WARN_ON(!mbhc->mbhc_data.t_sta); - - /* - * LDOH and CFILT are already configured during pdata handling. - * Only need to make sure CFILT and bandgap are in Fast mode. - * Need to restore defaults once calculation is done. - * - * In case when Micbias is powered by external source, request - * turn on the external voltage source for Calibration. - */ - if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mb_source) - mbhc->mbhc_cb->enable_mb_source(codec, true, false); - - cfilt_mode = snd_soc_read(codec, mbhc->mbhc_bias_regs.cfilt_ctl); - if (mbhc->mbhc_cb && mbhc->mbhc_cb->cfilt_fast_mode) - mbhc->mbhc_cb->cfilt_fast_mode(codec, mbhc); - else - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl, - 0x40, 0x00); - - if (mbhc->mbhc_cb && mbhc->mbhc_cb->micbias_pulldown_ctrl) - mbhc->mbhc_cb->micbias_pulldown_ctrl(mbhc, false); - - /* - * Micbias, CFILT, LDOH, MBHC MUX mode settings - * to perform ADC calibration - */ - if (mbhc->mbhc_cb && mbhc->mbhc_cb->select_cfilt) - mbhc->mbhc_cb->select_cfilt(codec, mbhc); - else - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x60, - mbhc->mbhc_cfg->micbias << 5); - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x00); - snd_soc_update_bits(codec, WCD9XXX_A_LDO_H_MODE_1, 0x60, 0x60); - snd_soc_write(codec, WCD9XXX_A_TX_7_MBHC_TEST_CTL, 0x78); - if (mbhc->mbhc_cb && mbhc->mbhc_cb->codec_specific_cal) - mbhc->mbhc_cb->codec_specific_cal(codec, mbhc); - else - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, - 0x04, 0x04); - - /* Pull down micbias to ground */ - reg0 = snd_soc_read(codec, mbhc->mbhc_bias_regs.ctl_reg); - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 1, 1); - /* Disconnect override from micbias */ - reg1 = snd_soc_read(codec, WCD9XXX_A_MAD_ANA_CTRL); - snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, 1 << 4, 1 << 0); - /* Connect the MUX to micbias */ - snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x02); - if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block) - mbhc->mbhc_cb->enable_mux_bias_block(codec); - else - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, - 0x80, 0x80); - /* - * Hardware that has external cap can delay mic bias ramping down up - * to 50ms. - */ - msleep(WCD9XXX_MUX_SWITCH_READY_WAIT_MS); - /* DCE measurement for 0 voltage */ - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02); - mbhc->mbhc_data.dce_z = __wcd9xxx_codec_sta_dce(mbhc, 1, true, false); - - /* compute dce_z for current source */ - reg2 = snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL); - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x78, - WCD9XXX_MBHC_NSC_CS << 3); - - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02); - mbhc->mbhc_data.dce_nsc_cs_z = __wcd9xxx_codec_sta_dce(mbhc, 1, true, - false); - pr_debug("%s: dce_z with nsc cs: 0x%x\n", __func__, - mbhc->mbhc_data.dce_nsc_cs_z); - - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, reg2); - - /* STA measurement for 0 voltage */ - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02); - mbhc->mbhc_data.sta_z = __wcd9xxx_codec_sta_dce(mbhc, 0, true, false); - - /* Restore registers */ - snd_soc_write(codec, mbhc->mbhc_bias_regs.ctl_reg, reg0); - snd_soc_write(codec, WCD9XXX_A_MAD_ANA_CTRL, reg1); - - /* DCE measurment for MB voltage */ - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02); - snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x02); - if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block) - mbhc->mbhc_cb->enable_mux_bias_block(codec); - else - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, - 0x80, 0x80); - /* - * Hardware that has external cap can delay mic bias ramping down up - * to 50ms. - */ - msleep(WCD9XXX_MUX_SWITCH_READY_WAIT_MS); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x04); - usleep_range(mbhc->mbhc_data.t_dce, mbhc->mbhc_data.t_dce + - WCD9XXX_USLEEP_RANGE_MARGIN_US); - mbhc->mbhc_data.dce_mb = wcd9xxx_read_dce_result(codec); - - /* STA Measurement for MB Voltage */ - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x0A); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x02); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_CLK_CTL, 0x02); - snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x02); - if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block) - mbhc->mbhc_cb->enable_mux_bias_block(codec); - else - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, - 0x80, 0x80); - /* - * Hardware that has external cap can delay mic bias ramping down up - * to 50ms. - */ - msleep(WCD9XXX_MUX_SWITCH_READY_WAIT_MS); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x02); - usleep_range(mbhc->mbhc_data.t_sta, mbhc->mbhc_data.t_sta + - WCD9XXX_USLEEP_RANGE_MARGIN_US); - mbhc->mbhc_data.sta_mb = wcd9xxx_read_sta_result(codec); - - /* Restore default settings. */ - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x04, 0x00); - snd_soc_write(codec, mbhc->mbhc_bias_regs.cfilt_ctl, cfilt_mode); - snd_soc_write(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0x04); - if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mux_bias_block) - mbhc->mbhc_cb->enable_mux_bias_block(codec); - else - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, - 0x80, 0x80); - usleep_range(100, 110); - - if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mb_source) - mbhc->mbhc_cb->enable_mb_source(codec, false, false); - - if (mbhc->mbhc_cb && mbhc->mbhc_cb->micbias_pulldown_ctrl) - mbhc->mbhc_cb->micbias_pulldown_ctrl(mbhc, true); - - wcd9xxx_enable_irq(mbhc->resmgr->core_res, - mbhc->intr_ids->dce_est_complete); - wcd9xxx_turn_onoff_rel_detection(codec, true); - - pr_debug("%s: leave\n", __func__); -} - -static void wcd9xxx_mbhc_setup(struct wcd9xxx_mbhc *mbhc) -{ - int n; - u8 *gain; - struct wcd9xxx_mbhc_general_cfg *generic; - struct wcd9xxx_mbhc_btn_detect_cfg *btn_det; - struct snd_soc_codec *codec = mbhc->codec; - const int idx = wcd9xxx_acdb_mclk_index(mbhc->mbhc_cfg->mclk_rate); - - pr_debug("%s: enter\n", __func__); - generic = WCD9XXX_MBHC_CAL_GENERAL_PTR(mbhc->mbhc_cfg->calibration); - btn_det = WCD9XXX_MBHC_CAL_BTN_DET_PTR(mbhc->mbhc_cfg->calibration); - - for (n = 0; n < 8; n++) { - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_FIR_B1_CFG, - 0x07, n); - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_FIR_B2_CFG, - btn_det->c[n]); - } - - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B2_CTL, 0x07, - btn_det->nc); - - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_TIMER_B4_CTL, 0x70, - generic->mbhc_nsa << 4); - - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_TIMER_B4_CTL, 0x0F, - btn_det->n_meas); - - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_TIMER_B5_CTL, - generic->mbhc_navg); - - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x80, 0x80); - - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x78, - btn_det->mbhc_nsc << 3); - - if (mbhc->mbhc_cb && mbhc->mbhc_cb->get_cdc_type && - mbhc->mbhc_cb->get_cdc_type() != - WCD9XXX_CDC_TYPE_HELICON) { - if (mbhc->resmgr->reg_addr->micb_4_mbhc) - snd_soc_update_bits(codec, - mbhc->resmgr->reg_addr->micb_4_mbhc, - 0x03, MBHC_MICBIAS2); - } - - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B1_CTL, 0x02, 0x02); - - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_2, 0xF0, 0xF0); - - gain = wcd9xxx_mbhc_cal_btn_det_mp(btn_det, MBHC_BTN_DET_GAIN); - snd_soc_update_bits(codec, WCD9XXX_A_CDC_MBHC_B2_CTL, 0x78, - gain[idx] << 3); - snd_soc_update_bits(codec, WCD9XXX_A_MICB_2_MBHC, 0x04, 0x04); - - pr_debug("%s: leave\n", __func__); -} - -static int wcd9xxx_setup_jack_detect_irq(struct wcd9xxx_mbhc *mbhc) -{ - int ret = 0; - void *core_res = mbhc->resmgr->core_res; - - if (mbhc->mbhc_cfg->gpio) { - ret = request_threaded_irq(mbhc->mbhc_cfg->gpio_irq, NULL, - wcd9xxx_mech_plug_detect_irq, - (IRQF_TRIGGER_RISING | - IRQF_TRIGGER_FALLING), - "headset detect", mbhc); - if (ret) { - pr_err("%s: Failed to request gpio irq %d\n", __func__, - mbhc->mbhc_cfg->gpio_irq); - } else { - ret = enable_irq_wake(mbhc->mbhc_cfg->gpio_irq); - if (ret) - pr_err("%s: Failed to enable wake up irq %d\n", - __func__, mbhc->mbhc_cfg->gpio_irq); - } - } else if (mbhc->mbhc_cfg->insert_detect) { - /* Enable HPHL_10K_SW */ - snd_soc_update_bits(mbhc->codec, WCD9XXX_A_RX_HPH_OCP_CTL, - 1 << 1, 1 << 1); - - ret = wcd9xxx_request_irq(core_res, - mbhc->intr_ids->hs_jack_switch, - wcd9xxx_mech_plug_detect_irq, - "Jack Detect", - mbhc); - if (ret) - pr_err("%s: Failed to request insert detect irq %d\n", - __func__, mbhc->intr_ids->hs_jack_switch); - } - - return ret; -} - -static int wcd9xxx_init_and_calibrate(struct wcd9xxx_mbhc *mbhc) -{ - int ret = 0; - struct snd_soc_codec *codec = mbhc->codec; - - pr_debug("%s: enter\n", __func__); - - /* Enable MCLK during calibration */ - wcd9xxx_onoff_ext_mclk(mbhc, true); - wcd9xxx_mbhc_setup(mbhc); - wcd9xxx_mbhc_cal(mbhc); - wcd9xxx_mbhc_calc_thres(mbhc); - wcd9xxx_onoff_ext_mclk(mbhc, false); - wcd9xxx_calibrate_hs_polling(mbhc); - - /* Enable Mic Bias pull down and HPH Switch to GND */ - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, 0x01, 0x01); - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_HPH, 0x01, 0x01); - INIT_WORK(&mbhc->correct_plug_swch, wcd9xxx_correct_swch_plug); - - snd_soc_update_bits(codec, WCD9XXX_A_RX_HPH_OCP_CTL, 0x10, - 0x10); - wcd9xxx_enable_irq(mbhc->resmgr->core_res, - mbhc->intr_ids->hph_left_ocp); - wcd9xxx_enable_irq(mbhc->resmgr->core_res, - mbhc->intr_ids->hph_right_ocp); - - /* Initialize mechanical mbhc */ - ret = wcd9xxx_setup_jack_detect_irq(mbhc); - - if (!ret && mbhc->mbhc_cfg->gpio) { - /* Requested with IRQF_DISABLED */ - enable_irq(mbhc->mbhc_cfg->gpio_irq); - - /* Bootup time detection */ - wcd9xxx_swch_irq_handler(mbhc); - } else if (!ret && mbhc->mbhc_cfg->insert_detect) { - pr_debug("%s: Setting up codec own insert detection\n", - __func__); - /* Setup for insertion detection */ - wcd9xxx_insert_detect_setup(mbhc, true); - } - - pr_debug("%s: leave\n", __func__); - - return ret; -} - -static void wcd9xxx_mbhc_fw_read(struct work_struct *work) -{ - struct delayed_work *dwork; - struct wcd9xxx_mbhc *mbhc; - struct snd_soc_codec *codec; - const struct firmware *fw; - struct firmware_cal *fw_data = NULL; - int ret = -1, retry = 0; - bool use_default_cal = false; - - dwork = to_delayed_work(work); - mbhc = container_of(dwork, struct wcd9xxx_mbhc, mbhc_firmware_dwork); - codec = mbhc->codec; - - while (retry < FW_READ_ATTEMPTS) { - retry++; - pr_info("%s:Attempt %d to request MBHC firmware\n", - __func__, retry); - if (mbhc->mbhc_cb->get_hwdep_fw_cal) - fw_data = mbhc->mbhc_cb->get_hwdep_fw_cal(codec, - WCD9XXX_MBHC_CAL); - if (!fw_data) - ret = request_firmware(&fw, "wcd9320/wcd9320_mbhc.bin", - codec->dev); - /* - * if request_firmware and hwdep cal both fail then - * retry for few times before bailing out - */ - if ((ret != 0) && !fw_data) { - usleep_range(FW_READ_TIMEOUT, FW_READ_TIMEOUT + - WCD9XXX_USLEEP_RANGE_MARGIN_US); - } else { - pr_info("%s: MBHC Firmware read successful\n", - __func__); - break; - } - } - if (!fw_data) - pr_info("%s: using request_firmware\n", __func__); - else - pr_info("%s: using hwdep cal\n", __func__); - if (ret != 0 && !fw_data) { - pr_err("%s: Cannot load MBHC firmware use default cal\n", - __func__); - use_default_cal = true; - } - if (!use_default_cal) { - const void *data; - size_t size; - - if (fw_data) { - data = fw_data->data; - size = fw_data->size; - } else { - data = fw->data; - size = fw->size; - } - if (wcd9xxx_mbhc_fw_validate(data, size) == false) { - pr_err("%s: Invalid MBHC cal data size use default cal\n", - __func__); - if (!fw_data) - release_firmware(fw); - } else { - if (fw_data) { - mbhc->mbhc_cfg->calibration = - (void *)fw_data->data; - mbhc->mbhc_cal = fw_data; - } else { - mbhc->mbhc_cfg->calibration = - (void *)fw->data; - mbhc->mbhc_fw = fw; - } - } - } - - (void) wcd9xxx_init_and_calibrate(mbhc); -} - -#ifdef CONFIG_DEBUG_FS -ssize_t codec_mbhc_debug_read(struct file *file, char __user *buf, - size_t count, loff_t *pos) -{ - const int size = 768; - char buffer[size]; - int n = 0; - struct wcd9xxx_mbhc *mbhc = file->private_data; - const struct mbhc_internal_cal_data *p = &mbhc->mbhc_data; - const s16 v_ins_hu = - wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_INS_HU); - const s16 v_ins_h = - wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_INS_H); - const s16 v_b1_hu = - wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_HU); - const s16 v_b1_h = - wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_B1_H); - const s16 v_br_h = - wcd9xxx_get_current_v(mbhc, WCD9XXX_CURRENT_V_BR_H); - - n = scnprintf(buffer, size - n, "dce_z = %x(%dmv)\n", - p->dce_z, wcd9xxx_codec_sta_dce_v(mbhc, 1, p->dce_z)); - n += scnprintf(buffer + n, size - n, "dce_mb = %x(%dmv)\n", - p->dce_mb, wcd9xxx_codec_sta_dce_v(mbhc, 1, p->dce_mb)); - n += scnprintf(buffer + n, size - n, "dce_nsc_cs_z = %x(%dmv)\n", - p->dce_nsc_cs_z, - __wcd9xxx_codec_sta_dce_v(mbhc, 1, p->dce_nsc_cs_z, - p->dce_nsc_cs_z, - VDDIO_MICBIAS_MV)); - n += scnprintf(buffer + n, size - n, "sta_z = %x(%dmv)\n", - p->sta_z, wcd9xxx_codec_sta_dce_v(mbhc, 0, p->sta_z)); - n += scnprintf(buffer + n, size - n, "sta_mb = %x(%dmv)\n", - p->sta_mb, wcd9xxx_codec_sta_dce_v(mbhc, 0, p->sta_mb)); - n += scnprintf(buffer + n, size - n, "t_dce = %d\n", p->t_dce); - n += scnprintf(buffer + n, size - n, "t_sta = %d\n", p->t_sta); - n += scnprintf(buffer + n, size - n, "micb_mv = %dmv\n", p->micb_mv); - n += scnprintf(buffer + n, size - n, "v_ins_hu = %x(%dmv)\n", - v_ins_hu, wcd9xxx_codec_sta_dce_v(mbhc, 0, v_ins_hu)); - n += scnprintf(buffer + n, size - n, "v_ins_h = %x(%dmv)\n", - v_ins_h, wcd9xxx_codec_sta_dce_v(mbhc, 1, v_ins_h)); - n += scnprintf(buffer + n, size - n, "v_b1_hu = %x(%dmv)\n", - v_b1_hu, wcd9xxx_codec_sta_dce_v(mbhc, 0, v_b1_hu)); - n += scnprintf(buffer + n, size - n, "v_b1_h = %x(%dmv)\n", - v_b1_h, wcd9xxx_codec_sta_dce_v(mbhc, 1, v_b1_h)); - n += scnprintf(buffer + n, size - n, "v_brh = %x(%dmv)\n", - v_br_h, wcd9xxx_codec_sta_dce_v(mbhc, 1, v_br_h)); - n += scnprintf(buffer + n, size - n, "v_brl = %x(%dmv)\n", p->v_brl, - wcd9xxx_codec_sta_dce_v(mbhc, 0, p->v_brl)); - n += scnprintf(buffer + n, size - n, "v_no_mic = %x(%dmv)\n", - p->v_no_mic, - wcd9xxx_codec_sta_dce_v(mbhc, 0, p->v_no_mic)); - n += scnprintf(buffer + n, size - n, "v_inval_ins_low = %d\n", - p->v_inval_ins_low); - n += scnprintf(buffer + n, size - n, "v_inval_ins_high = %d\n", - p->v_inval_ins_high); - n += scnprintf(buffer + n, size - n, "Insert detect insert = %d\n", - !wcd9xxx_swch_level_remove(mbhc)); - buffer[n] = 0; - - return simple_read_from_buffer(buf, count, pos, buffer, n); -} - -static int codec_debug_open(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - -static ssize_t codec_debug_write(struct file *filp, - const char __user *ubuf, size_t cnt, - loff_t *ppos) -{ - char lbuf[32]; - char *buf; - int rc; - struct wcd9xxx_mbhc *mbhc = filp->private_data; - - if (cnt > sizeof(lbuf) - 1) - return -EINVAL; - - rc = copy_from_user(lbuf, ubuf, cnt); - if (rc) - return -EFAULT; - - lbuf[cnt] = '\0'; - buf = (char *)lbuf; - mbhc->no_mic_headset_override = (*strsep(&buf, " ") == '0') ? - false : true; - return rc; -} - -static const struct file_operations mbhc_trrs_debug_ops = { - .open = codec_debug_open, - .write = codec_debug_write, -}; - -static const struct file_operations mbhc_debug_ops = { - .open = codec_debug_open, - .read = codec_mbhc_debug_read, -}; - -static void wcd9xxx_init_debugfs(struct wcd9xxx_mbhc *mbhc) -{ - mbhc->debugfs_poke = - debugfs_create_file("TRRS", S_IFREG | 0444, NULL, mbhc, - &mbhc_trrs_debug_ops); - mbhc->debugfs_mbhc = - debugfs_create_file("wcd9xxx_mbhc", S_IFREG | 0444, - NULL, mbhc, &mbhc_debug_ops); -} - -static void wcd9xxx_cleanup_debugfs(struct wcd9xxx_mbhc *mbhc) -{ - debugfs_remove(mbhc->debugfs_poke); - debugfs_remove(mbhc->debugfs_mbhc); -} -#else -static void wcd9xxx_init_debugfs(struct wcd9xxx_mbhc *mbhc) -{ -} - -static void wcd9xxx_cleanup_debugfs(struct wcd9xxx_mbhc *mbhc) -{ -} -#endif - -int wcd9xxx_mbhc_set_keycode(struct wcd9xxx_mbhc *mbhc) -{ - enum snd_jack_types type = SND_JACK_BTN_0; - int i, ret, result = 0; - int *btn_key_code; - - btn_key_code = mbhc->mbhc_cfg->key_code; - - for (i = 0 ; i < 8 ; i++) { - if (btn_key_code[i] != 0) { - switch (i) { - case 0: - type = SND_JACK_BTN_0; - break; - case 1: - type = SND_JACK_BTN_1; - break; - case 2: - type = SND_JACK_BTN_2; - break; - case 3: - type = SND_JACK_BTN_3; - break; - case 4: - type = SND_JACK_BTN_4; - break; - case 5: - type = SND_JACK_BTN_5; - break; - default: - WARN_ONCE(1, "Wrong button number:%d\n", i); - result = -1; - break; - } - ret = snd_jack_set_key(mbhc->button_jack.jack, - type, - btn_key_code[i]); - if (ret) { - pr_err("%s: Failed to set code for %d\n", - __func__, btn_key_code[i]); - result = -1; - } - input_set_capability( - mbhc->button_jack.jack->input_dev, - EV_KEY, btn_key_code[i]); - pr_debug("%s: set btn%d key code:%d\n", __func__, - i, btn_key_code[i]); - } - } - return result; -} - -int wcd9xxx_mbhc_start(struct wcd9xxx_mbhc *mbhc, - struct wcd9xxx_mbhc_config *mbhc_cfg) -{ - int rc = 0; - struct snd_soc_codec *codec = mbhc->codec; - - pr_debug("%s: enter\n", __func__); - - if (!codec) { - pr_err("%s: no codec\n", __func__); - return -EINVAL; - } - - if (mbhc_cfg->mclk_rate != MCLK_RATE_12288KHZ && - mbhc_cfg->mclk_rate != MCLK_RATE_9600KHZ) { - pr_err("Error: unsupported clock rate %d\n", - mbhc_cfg->mclk_rate); - return -EINVAL; - } - - /* Save mbhc config */ - mbhc->mbhc_cfg = mbhc_cfg; - - /* Set btn key code */ - if (wcd9xxx_mbhc_set_keycode(mbhc)) - pr_err("Set btn key code error!!!\n"); - - /* Get HW specific mbhc registers' address */ - wcd9xxx_get_mbhc_micbias_regs(mbhc, MBHC_PRIMARY_MIC_MB); - - /* Get HW specific mbhc registers' address for anc */ - wcd9xxx_get_mbhc_micbias_regs(mbhc, MBHC_ANC_MIC_MB); - - /* Put CFILT in fast mode by default */ - if (mbhc->mbhc_cb && mbhc->mbhc_cb->cfilt_fast_mode) - mbhc->mbhc_cb->cfilt_fast_mode(codec, mbhc); - else - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.cfilt_ctl, - 0x40, WCD9XXX_CFILT_FAST_MODE); - - /* - * setup internal micbias if codec uses internal micbias for - * headset detection - */ - if (mbhc->mbhc_cfg->use_int_rbias) { - if (mbhc->mbhc_cb && mbhc->mbhc_cb->setup_int_rbias) { - mbhc->mbhc_cb->setup_int_rbias(codec, true); - } else { - pr_info("%s: internal bias requested but codec did not provide callback\n", - __func__); - } - } - - /* - * If codec has specific clock gating for MBHC, - * remove the clock gate - */ - if (mbhc->mbhc_cb && - mbhc->mbhc_cb->enable_clock_gate) - mbhc->mbhc_cb->enable_clock_gate(mbhc->codec, true); - - if (!mbhc->mbhc_cfg->read_fw_bin || - (mbhc->mbhc_cfg->read_fw_bin && mbhc->mbhc_fw) || - (mbhc->mbhc_cfg->read_fw_bin && mbhc->mbhc_cal)) { - rc = wcd9xxx_init_and_calibrate(mbhc); - } else { - if (!mbhc->mbhc_fw || !mbhc->mbhc_cal) - schedule_delayed_work(&mbhc->mbhc_firmware_dwork, - usecs_to_jiffies(FW_READ_TIMEOUT)); - else - pr_debug("%s: Skipping to read mbhc fw, 0x%pK %pK\n", - __func__, mbhc->mbhc_fw, mbhc->mbhc_cal); - } - - pr_debug("%s: leave %d\n", __func__, rc); - return rc; -} -EXPORT_SYMBOL(wcd9xxx_mbhc_start); - -void wcd9xxx_mbhc_stop(struct wcd9xxx_mbhc *mbhc) -{ - if (mbhc->mbhc_fw || mbhc->mbhc_cal) { - cancel_delayed_work_sync(&mbhc->mbhc_firmware_dwork); - if (!mbhc->mbhc_cal) - release_firmware(mbhc->mbhc_fw); - mbhc->mbhc_fw = NULL; - mbhc->mbhc_cal = NULL; - } -} -EXPORT_SYMBOL(wcd9xxx_mbhc_stop); - -static enum wcd9xxx_micbias_num -wcd9xxx_event_to_micbias(const enum wcd9xxx_notify_event event) -{ - enum wcd9xxx_micbias_num ret; - - switch (event) { - case WCD9XXX_EVENT_PRE_MICBIAS_1_ON: - case WCD9XXX_EVENT_PRE_MICBIAS_1_OFF: - case WCD9XXX_EVENT_POST_MICBIAS_1_ON: - case WCD9XXX_EVENT_POST_MICBIAS_1_OFF: - ret = MBHC_MICBIAS1; - break; - case WCD9XXX_EVENT_PRE_MICBIAS_2_ON: - case WCD9XXX_EVENT_PRE_MICBIAS_2_OFF: - case WCD9XXX_EVENT_POST_MICBIAS_2_ON: - case WCD9XXX_EVENT_POST_MICBIAS_2_OFF: - ret = MBHC_MICBIAS2; - break; - case WCD9XXX_EVENT_PRE_MICBIAS_3_ON: - case WCD9XXX_EVENT_PRE_MICBIAS_3_OFF: - case WCD9XXX_EVENT_POST_MICBIAS_3_ON: - case WCD9XXX_EVENT_POST_MICBIAS_3_OFF: - ret = MBHC_MICBIAS3; - break; - case WCD9XXX_EVENT_PRE_MICBIAS_4_ON: - case WCD9XXX_EVENT_PRE_MICBIAS_4_OFF: - case WCD9XXX_EVENT_POST_MICBIAS_4_ON: - case WCD9XXX_EVENT_POST_MICBIAS_4_OFF: - ret = MBHC_MICBIAS4; - break; - default: - WARN_ONCE(1, "Cannot convert event %d to micbias\n", event); - ret = MBHC_MICBIAS_INVALID; - break; - } - return ret; -} - -static int wcd9xxx_event_to_cfilt(const enum wcd9xxx_notify_event event) -{ - int ret; - - switch (event) { - case WCD9XXX_EVENT_PRE_CFILT_1_OFF: - case WCD9XXX_EVENT_POST_CFILT_1_OFF: - case WCD9XXX_EVENT_PRE_CFILT_1_ON: - case WCD9XXX_EVENT_POST_CFILT_1_ON: - ret = WCD9XXX_CFILT1_SEL; - break; - case WCD9XXX_EVENT_PRE_CFILT_2_OFF: - case WCD9XXX_EVENT_POST_CFILT_2_OFF: - case WCD9XXX_EVENT_PRE_CFILT_2_ON: - case WCD9XXX_EVENT_POST_CFILT_2_ON: - ret = WCD9XXX_CFILT2_SEL; - break; - case WCD9XXX_EVENT_PRE_CFILT_3_OFF: - case WCD9XXX_EVENT_POST_CFILT_3_OFF: - case WCD9XXX_EVENT_PRE_CFILT_3_ON: - case WCD9XXX_EVENT_POST_CFILT_3_ON: - ret = WCD9XXX_CFILT3_SEL; - break; - default: - ret = -1; - } - return ret; -} - -static int wcd9xxx_get_mbhc_cfilt_sel(struct wcd9xxx_mbhc *mbhc) -{ - int cfilt; - const struct wcd9xxx_micbias_setting *mb_pdata = - mbhc->resmgr->micbias_pdata; - - switch (mbhc->mbhc_cfg->micbias) { - case MBHC_MICBIAS1: - cfilt = mb_pdata->bias1_cfilt_sel; - break; - case MBHC_MICBIAS2: - cfilt = mb_pdata->bias2_cfilt_sel; - break; - case MBHC_MICBIAS3: - cfilt = mb_pdata->bias3_cfilt_sel; - break; - case MBHC_MICBIAS4: - cfilt = mb_pdata->bias4_cfilt_sel; - break; - default: - cfilt = MBHC_MICBIAS_INVALID; - break; - } - return cfilt; -} - -static void wcd9xxx_enable_mbhc_txfe(struct wcd9xxx_mbhc *mbhc, bool on) -{ - if (mbhc->mbhc_cb && mbhc->mbhc_cb->enable_mbhc_txfe) - mbhc->mbhc_cb->enable_mbhc_txfe(mbhc->codec, on); - else - snd_soc_update_bits(mbhc->codec, WCD9XXX_A_TX_7_MBHC_TEST_CTL, - 0x40, on ? 0x40 : 0x00); -} - -static int wcd9xxx_event_notify(struct notifier_block *self, unsigned long val, - void *data) -{ - int ret = 0; - struct wcd9xxx_mbhc *mbhc = ((struct wcd9xxx_resmgr *)data)->mbhc; - struct snd_soc_codec *codec; - enum wcd9xxx_notify_event event = (enum wcd9xxx_notify_event)val; - - pr_debug("%s: enter event %s(%d)\n", __func__, - wcd9xxx_get_event_string(event), event); - - if (!mbhc || !mbhc->mbhc_cfg) { - pr_debug("mbhc not initialized\n"); - return 0; - } - codec = mbhc->codec; - mutex_lock(&mbhc->mbhc_lock); - switch (event) { - /* MICBIAS usage change */ - case WCD9XXX_EVENT_PRE_MICBIAS_1_ON: - case WCD9XXX_EVENT_PRE_MICBIAS_2_ON: - case WCD9XXX_EVENT_PRE_MICBIAS_3_ON: - case WCD9XXX_EVENT_PRE_MICBIAS_4_ON: - if (mbhc->mbhc_cfg && mbhc->mbhc_cfg->micbias == - wcd9xxx_event_to_micbias(event)) { - wcd9xxx_switch_micbias(mbhc, 0); - /* - * Enable MBHC TxFE whenever micbias is - * turned ON and polling is active - */ - if (mbhc->polling_active) - wcd9xxx_enable_mbhc_txfe(mbhc, true); - } - break; - case WCD9XXX_EVENT_POST_MICBIAS_1_ON: - case WCD9XXX_EVENT_POST_MICBIAS_2_ON: - case WCD9XXX_EVENT_POST_MICBIAS_3_ON: - case WCD9XXX_EVENT_POST_MICBIAS_4_ON: - if (mbhc->mbhc_cfg && mbhc->mbhc_cfg->micbias == - wcd9xxx_event_to_micbias(event) && - wcd9xxx_mbhc_polling(mbhc)) { - /* if polling is on, restart it */ - wcd9xxx_pause_hs_polling(mbhc); - wcd9xxx_start_hs_polling(mbhc); - } - break; - case WCD9XXX_EVENT_POST_MICBIAS_1_OFF: - case WCD9XXX_EVENT_POST_MICBIAS_2_OFF: - case WCD9XXX_EVENT_POST_MICBIAS_3_OFF: - case WCD9XXX_EVENT_POST_MICBIAS_4_OFF: - if (mbhc->mbhc_cfg && mbhc->mbhc_cfg->micbias == - wcd9xxx_event_to_micbias(event)) { - if (mbhc->event_state & - (1 << MBHC_EVENT_PA_HPHL | 1 << MBHC_EVENT_PA_HPHR)) - wcd9xxx_switch_micbias(mbhc, 1); - /* - * Disable MBHC TxFE, in case it was enabled earlier - * when micbias was enabled and polling is not active. - */ - if (!mbhc->polling_active) - wcd9xxx_enable_mbhc_txfe(mbhc, false); - } - if (mbhc->micbias_enable && mbhc->polling_active && - !(snd_soc_read(mbhc->codec, mbhc->mbhc_bias_regs.ctl_reg) - & 0x80)) { - pr_debug("%s:Micbias turned off by recording, set up again", - __func__); - snd_soc_update_bits(codec, mbhc->mbhc_bias_regs.ctl_reg, - 0x80, 0x80); - } - break; - /* PA usage change */ - case WCD9XXX_EVENT_PRE_HPHL_PA_ON: - set_bit(MBHC_EVENT_PA_HPHL, &mbhc->event_state); - if (!(snd_soc_read(codec, mbhc->mbhc_bias_regs.ctl_reg) & 0x80)) - /* if micbias is not enabled, switch to vddio */ - wcd9xxx_switch_micbias(mbhc, 1); - break; - case WCD9XXX_EVENT_PRE_HPHR_PA_ON: - set_bit(MBHC_EVENT_PA_HPHR, &mbhc->event_state); - break; - case WCD9XXX_EVENT_POST_HPHL_PA_OFF: - clear_bit(MBHC_EVENT_PA_HPHL, &mbhc->event_state); - /* if HPH PAs are off, report OCP and switch back to CFILT */ - clear_bit(WCD9XXX_HPHL_PA_OFF_ACK, &mbhc->hph_pa_dac_state); - clear_bit(WCD9XXX_HPHL_DAC_OFF_ACK, &mbhc->hph_pa_dac_state); - if (mbhc->hph_status & SND_JACK_OC_HPHL) - hphlocp_off_report(mbhc, SND_JACK_OC_HPHL); - if (!(mbhc->event_state & - (1 << MBHC_EVENT_PA_HPHL | 1 << MBHC_EVENT_PA_HPHR | - 1 << MBHC_EVENT_PRE_TX_3_ON))) - wcd9xxx_switch_micbias(mbhc, 0); - break; - case WCD9XXX_EVENT_POST_HPHR_PA_OFF: - clear_bit(MBHC_EVENT_PA_HPHR, &mbhc->event_state); - /* if HPH PAs are off, report OCP and switch back to CFILT */ - clear_bit(WCD9XXX_HPHR_PA_OFF_ACK, &mbhc->hph_pa_dac_state); - clear_bit(WCD9XXX_HPHR_DAC_OFF_ACK, &mbhc->hph_pa_dac_state); - if (mbhc->hph_status & SND_JACK_OC_HPHR) - hphrocp_off_report(mbhc, SND_JACK_OC_HPHL); - if (!(mbhc->event_state & - (1 << MBHC_EVENT_PA_HPHL | 1 << MBHC_EVENT_PA_HPHR | - 1 << MBHC_EVENT_PRE_TX_3_ON))) - wcd9xxx_switch_micbias(mbhc, 0); - break; - /* Clock usage change */ - case WCD9XXX_EVENT_PRE_MCLK_ON: - break; - case WCD9XXX_EVENT_POST_MCLK_ON: - /* Change to lower TxAAF frequency */ - snd_soc_update_bits(codec, WCD9XXX_A_TX_COM_BIAS, 1 << 4, - 1 << 4); - /* Re-calibrate clock rate dependent values */ - wcd9xxx_update_mbhc_clk_rate(mbhc, mbhc->mbhc_cfg->mclk_rate); - /* If clock source changes, stop and restart polling */ - if (wcd9xxx_mbhc_polling(mbhc)) { - wcd9xxx_calibrate_hs_polling(mbhc); - wcd9xxx_start_hs_polling(mbhc); - } - break; - case WCD9XXX_EVENT_PRE_MCLK_OFF: - /* If clock source changes, stop and restart polling */ - if (wcd9xxx_mbhc_polling(mbhc)) - wcd9xxx_pause_hs_polling(mbhc); - break; - case WCD9XXX_EVENT_POST_MCLK_OFF: - break; - case WCD9XXX_EVENT_PRE_RCO_ON: - break; - case WCD9XXX_EVENT_POST_RCO_ON: - /* Change to higher TxAAF frequency */ - snd_soc_update_bits(codec, WCD9XXX_A_TX_COM_BIAS, 1 << 4, - 0 << 4); - /* Re-calibrate clock rate dependent values */ - wcd9xxx_update_mbhc_clk_rate(mbhc, mbhc->rco_clk_rate); - /* If clock source changes, stop and restart polling */ - if (wcd9xxx_mbhc_polling(mbhc)) { - wcd9xxx_calibrate_hs_polling(mbhc); - wcd9xxx_start_hs_polling(mbhc); - } - break; - case WCD9XXX_EVENT_PRE_RCO_OFF: - /* If clock source changes, stop and restart polling */ - if (wcd9xxx_mbhc_polling(mbhc)) - wcd9xxx_pause_hs_polling(mbhc); - break; - case WCD9XXX_EVENT_POST_RCO_OFF: - break; - /* CFILT usage change */ - case WCD9XXX_EVENT_PRE_CFILT_1_ON: - case WCD9XXX_EVENT_PRE_CFILT_2_ON: - case WCD9XXX_EVENT_PRE_CFILT_3_ON: - if (wcd9xxx_get_mbhc_cfilt_sel(mbhc) == - wcd9xxx_event_to_cfilt(event)) - /* - * Switch CFILT to slow mode if MBHC CFILT is being - * used. - */ - wcd9xxx_codec_switch_cfilt_mode(mbhc, false); - break; - case WCD9XXX_EVENT_POST_CFILT_1_OFF: - case WCD9XXX_EVENT_POST_CFILT_2_OFF: - case WCD9XXX_EVENT_POST_CFILT_3_OFF: - if (wcd9xxx_get_mbhc_cfilt_sel(mbhc) == - wcd9xxx_event_to_cfilt(event)) - /* - * Switch CFILT to fast mode if MBHC CFILT is not - * used anymore. - */ - wcd9xxx_codec_switch_cfilt_mode(mbhc, true); - break; - /* System resume */ - case WCD9XXX_EVENT_POST_RESUME: - mbhc->mbhc_last_resume = jiffies; - break; - /* BG mode chage */ - case WCD9XXX_EVENT_PRE_BG_OFF: - case WCD9XXX_EVENT_POST_BG_OFF: - case WCD9XXX_EVENT_PRE_BG_AUDIO_ON: - case WCD9XXX_EVENT_POST_BG_AUDIO_ON: - case WCD9XXX_EVENT_PRE_BG_MBHC_ON: - case WCD9XXX_EVENT_POST_BG_MBHC_ON: - /* Not used for now */ - break; - case WCD9XXX_EVENT_PRE_TX_3_ON: - /* - * if polling is ON, mbhc micbias not enabled - * switch micbias source to VDDIO - */ - set_bit(MBHC_EVENT_PRE_TX_3_ON, &mbhc->event_state); - if (!(snd_soc_read(codec, mbhc->mbhc_bias_regs.ctl_reg) - & 0x80) && - mbhc->polling_active && !mbhc->mbhc_micbias_switched) - wcd9xxx_switch_micbias(mbhc, 1); - break; - case WCD9XXX_EVENT_POST_TX_3_OFF: - /* - * Switch back to micbias if HPH PA or TX3 path - * is disabled - */ - clear_bit(MBHC_EVENT_PRE_TX_3_ON, &mbhc->event_state); - if (mbhc->polling_active && mbhc->mbhc_micbias_switched && - !(mbhc->event_state & (1 << MBHC_EVENT_PA_HPHL | - 1 << MBHC_EVENT_PA_HPHR))) - wcd9xxx_switch_micbias(mbhc, 0); - break; - default: - WARN(1, "Unknown event %d\n", event); - ret = -EINVAL; - } - mutex_unlock(&mbhc->mbhc_lock); - - pr_debug("%s: leave\n", __func__); - - return ret; -} - -static s16 wcd9xxx_read_impedance_regs(struct wcd9xxx_mbhc *mbhc) -{ - struct snd_soc_codec *codec = mbhc->codec; - short bias_value; - int i; - s32 z_t = 0; - s32 z_loop = z_det_box_car_avg; - - /* Box Car avrg of less than a particular loop count will not be - * accomodated. Similarly if the count is more than a particular number - * it will not be counted. Set z_loop counter to a limit, if its more - * or less than the value in WCD9XXX_BOX_CAR_AVRG_MAX or - * WCD9XXX_BOX_CAR_AVRG_MIN - */ - if (z_loop < WCD9XXX_BOX_CAR_AVRG_MIN) { - dev_dbg(codec->dev, - "%s: Box Car avrg counter < %d. Limiting it to %d\n", - __func__, WCD9XXX_BOX_CAR_AVRG_MIN, - WCD9XXX_BOX_CAR_AVRG_MIN); - z_loop = WCD9XXX_BOX_CAR_AVRG_MIN; - } else if (z_loop > WCD9XXX_BOX_CAR_AVRG_MAX) { - dev_dbg(codec->dev, - "%s: Box Car avrg counter > %d. Limiting it to %d\n", - __func__, WCD9XXX_BOX_CAR_AVRG_MAX, - WCD9XXX_BOX_CAR_AVRG_MAX); - z_loop = WCD9XXX_BOX_CAR_AVRG_MAX; - } - - /* Take box car average if needed */ - for (i = 0; i < z_loop; i++) { - snd_soc_write(codec, WCD9XXX_A_CDC_MBHC_EN_CTL, 0x2); - /* Wait for atleast 1800uS to let register write to settle */ - usleep_range(1800, 1800 + WCD9XXX_USLEEP_RANGE_MARGIN_US); - z_t += wcd9xxx_read_sta_result(codec); - } - /* Take average of the Z values read */ - bias_value = (s16) (z_t / z_loop); - return bias_value; -} - -static int wcd9xxx_remeasure_z_values(struct wcd9xxx_mbhc *mbhc, - s16 l[3], s16 r[3], - uint32_t *zl, uint32_t *zr, - u32 *zl_stereo, u32 *zl_mono) -{ - s16 l_t[3] = {0}, r_t[3] = {0}; - s16 l2_stereo, l2_mono; - bool left, right; - struct snd_soc_codec *codec = mbhc->codec; - - if (!mbhc->mbhc_cb || !mbhc->mbhc_cb->setup_zdet || - !mbhc->mbhc_cb->compute_impedance) { - dev_err(codec->dev, "%s: Invalid parameters\n", __func__); - return -EINVAL; - } - - left = !!(l); - right = !!(r); - - dev_dbg(codec->dev, "%s: Remeasuring impedance values\n", __func__); - dev_dbg(codec->dev, "%s: l: %pK, r: %pK, left=%d, right=%d\n", __func__, - l, r, left, right); - - /* Remeasure V2 values */ - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_2, 0xFF, 0xF0); - if (right) - r_t[2] = wcd9xxx_read_impedance_regs(mbhc); - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0xFF, 0xC0); - if (left) - l_t[2] = wcd9xxx_read_impedance_regs(mbhc); - - /* Ramp down HPHR */ - mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_HPHR_RAMP_DISABLE); - - if (right) { - /* Take R0'/R1' */ - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_2, - 0xFF, 0xF8); - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, - 0xFF, 0xA0); - r_t[1] = wcd9xxx_read_impedance_regs(mbhc); - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_2, - 0xFF, 0xF0); - r_t[0] = wcd9xxx_read_impedance_regs(mbhc); - } - - /* Put back gain to 1x */ - if (!left && right) - mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_GAIN_0); - - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_1, 0xFF, 0xC0); - /* Take L2'' measurement */ - l2_stereo = wcd9xxx_read_impedance_regs(mbhc); - - /* Turn off HPHR PA and take L2''' */ - mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_HPHR_PA_DISABLE); - l2_mono = wcd9xxx_read_impedance_regs(mbhc); - - /* Ramp HPHL from -15mV to 0V */ - mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_HPHL_RAMP_DISABLE); - - /* Take L0' and L1' with iCal */ - l_t[0] = wcd9xxx_read_impedance_regs(mbhc); - snd_soc_update_bits(codec, WCD9XXX_A_MBHC_SCALING_MUX_2, 0xFF, 0xF8); - l_t[1] = wcd9xxx_read_impedance_regs(mbhc); - - if (left) { - l[0] = l_t[0]; - l[1] = l_t[1]; - l[2] = l_t[2]; - } - if (right) { - r[0] = r_t[0]; - r[1] = r_t[1]; - r[2] = r_t[2]; - } - - /* compute the new impedance values */ - mbhc->mbhc_cb->compute_impedance(mbhc, l, r, zl, zr); - - if (!left && right) - mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_GAIN_UPDATE_1X); - /* compute the new ZL'' value */ - l_t[2] = l2_stereo; - mbhc->mbhc_cb->compute_impedance(mbhc, l_t, NULL, zl_stereo, NULL); - /* compute the new ZL''' value */ - l_t[2] = l2_mono; - mbhc->mbhc_cb->compute_impedance(mbhc, l_t, NULL, zl_mono, NULL); - - pr_debug("%s: L0': 0x%x, L1': 0x%x L2_stereo: 0x%x, L2_mono: 0x%x\n", - __func__, l_t[0] & 0xffff, l_t[1] & 0xffff, - l2_stereo & 0xffff, l2_mono & 0xffff); - pr_debug("%s: ZL_stereo = %u, ZL_mono = %u\n", - __func__, *zl_stereo, *zl_mono); - - return 0; -} - -static enum mbhc_zdet_zones wcd9xxx_assign_zdet_zone(uint32_t zl, uint32_t zr, - int32_t *gain) -{ - enum mbhc_zdet_zones zdet_zone; - - if (WCD9XXX_IS_IN_ZDET_ZONE_1(zl) && - WCD9XXX_IS_IN_ZDET_ZONE_1(zr)) { - zdet_zone = ZL_ZONE1__ZR_ZONE1; - *gain = 0; - } else if (WCD9XXX_IS_IN_ZDET_ZONE_2(zl) && - WCD9XXX_IS_IN_ZDET_ZONE_2(zr)) { - zdet_zone = ZL_ZONE2__ZR_ZONE2; - *gain = MBHC_ZDET_GAIN_1; - } else if (WCD9XXX_IS_IN_ZDET_ZONE_3(zl) && - WCD9XXX_IS_IN_ZDET_ZONE_3(zr)) { - zdet_zone = ZL_ZONE3__ZR_ZONE3; - *gain = MBHC_ZDET_GAIN_2; - } else if (WCD9XXX_IS_IN_ZDET_ZONE_2(zl) && - WCD9XXX_IS_IN_ZDET_ZONE_1(zr)) { - zdet_zone = ZL_ZONE2__ZR_ZONE1; - *gain = MBHC_ZDET_GAIN_1; - } else if (WCD9XXX_IS_IN_ZDET_ZONE_3(zl) && - WCD9XXX_IS_IN_ZDET_ZONE_1(zr)) { - zdet_zone = ZL_ZONE3__ZR_ZONE1; - *gain = MBHC_ZDET_GAIN_2; - } else if (WCD9XXX_IS_IN_ZDET_ZONE_1(zl) && - WCD9XXX_IS_IN_ZDET_ZONE_2(zr)) { - zdet_zone = ZL_ZONE1__ZR_ZONE2; - *gain = MBHC_ZDET_GAIN_1; - } else if (WCD9XXX_IS_IN_ZDET_ZONE_1(zl) && - WCD9XXX_IS_IN_ZDET_ZONE_3(zr)) { - zdet_zone = ZL_ZONE1__ZR_ZONE3; - *gain = MBHC_ZDET_GAIN_2; - } else { - zdet_zone = ZL_ZR_NOT_IN_ZONE1; - *gain = MBHC_ZDET_GAIN_1; - } - - return zdet_zone; -} - -static int wcd9xxx_detect_impedance(struct wcd9xxx_mbhc *mbhc, uint32_t *zl, - uint32_t *zr) -{ - int i; - int ret = 0; - u8 micb_mbhc_val; - s16 l[3], r[3]; - s16 *z[] = { - &l[0], &r[0], &r[1], &l[1], &l[2], &r[2], - }; - u32 zl_stereo, zl_mono; - u32 zl_diff_1, zl_diff_2; - bool override_en; - struct snd_soc_codec *codec = mbhc->codec; - const int mux_wait_us = 25; - const struct wcd9xxx_reg_mask_val reg_set_mux[] = { - /* Phase 1 */ - /* Set MBHC_MUX for HPHL without ical */ - {WCD9XXX_A_MBHC_SCALING_MUX_2, 0xFF, 0xF0}, - /* Set MBHC_MUX for HPHR without ical */ - {WCD9XXX_A_MBHC_SCALING_MUX_1, 0xFF, 0xA0}, - /* Set MBHC_MUX for HPHR with ical */ - {WCD9XXX_A_MBHC_SCALING_MUX_2, 0xFF, 0xF8}, - /* Set MBHC_MUX for HPHL with ical */ - {WCD9XXX_A_MBHC_SCALING_MUX_1, 0xFF, 0xC0}, - - /* Phase 2 */ - {WCD9XXX_A_MBHC_SCALING_MUX_2, 0xFF, 0xF0}, - /* Set MBHC_MUX for HPHR without ical and wait for 25us */ - {WCD9XXX_A_MBHC_SCALING_MUX_1, 0xFF, 0xA0}, - }; - - pr_debug("%s: enter\n", __func__); - WCD9XXX_BCL_ASSERT_LOCKED(mbhc->resmgr); - - if (!mbhc->mbhc_cb || !mbhc->mbhc_cb->setup_zdet || - !mbhc->mbhc_cb->compute_impedance || !zl || !zr) { - return -EINVAL; - } - - /* - * Impedance detection is an intrusive function as it mutes RX paths, - * enable PAs and etc. Therefore codec drvier including ALSA - * shouldn't read and write hardware registers during detection. - */ - wcd9xxx_onoff_ext_mclk(mbhc, true); - - /* - * For impedance detection, make sure to disable micbias from - * override signal so that override does not cause micbias - * to be enabled. This setting will be undone after completing - * impedance measurement. - */ - micb_mbhc_val = snd_soc_read(codec, WCD9XXX_A_MAD_ANA_CTRL); - snd_soc_update_bits(codec, WCD9XXX_A_MAD_ANA_CTRL, - 0x10, 0x00); - - override_en = (snd_soc_read(codec, WCD9XXX_A_CDC_MBHC_B1_CTL) & 0x04) ? - true : false; - if (!override_en) - wcd9xxx_turn_onoff_override(mbhc, true); - pr_debug("%s: Setting impedance detection\n", __func__); - - /* Codec specific setup for L0, R0, L1 and R1 measurements */ - mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_PRE_MEASURE); - - pr_debug("%s: Performing impedance detection\n", __func__); - for (i = 0; i < ARRAY_SIZE(reg_set_mux) - 2; i++) { - snd_soc_update_bits(codec, reg_set_mux[i].reg, - reg_set_mux[i].mask, - reg_set_mux[i].val); - if (mbhc->mbhc_cb->get_cdc_type && - mbhc->mbhc_cb->get_cdc_type() == - WCD9XXX_CDC_TYPE_TOMTOM) { - *(z[i]) = wcd9xxx_read_impedance_regs(mbhc); - } else { - if (mbhc->mbhc_cb->enable_mux_bias_block) - mbhc->mbhc_cb->enable_mux_bias_block(codec); - else - snd_soc_update_bits(codec, - WCD9XXX_A_MBHC_SCALING_MUX_1, - 0x80, 0x80); - /* 25us is required after mux change to settle down */ - usleep_range(mux_wait_us, - mux_wait_us + WCD9XXX_USLEEP_RANGE_MARGIN_US); - *(z[i]) = __wcd9xxx_codec_sta_dce(mbhc, 0, - true, false); - } - } - - /* Codec specific setup for L2 and R2 measurements */ - mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_POST_MEASURE); - - for (; i < ARRAY_SIZE(reg_set_mux); i++) { - snd_soc_update_bits(codec, reg_set_mux[i].reg, - reg_set_mux[i].mask, - reg_set_mux[i].val); - if (mbhc->mbhc_cb->get_cdc_type && - mbhc->mbhc_cb->get_cdc_type() == - WCD9XXX_CDC_TYPE_TOMTOM) { - *(z[i]) = wcd9xxx_read_impedance_regs(mbhc); - } else { - if (mbhc->mbhc_cb->enable_mux_bias_block) - mbhc->mbhc_cb->enable_mux_bias_block(codec); - else - snd_soc_update_bits(codec, - WCD9XXX_A_MBHC_SCALING_MUX_1, - 0x80, 0x80); - /* 25us is required after mux change to settle down */ - usleep_range(mux_wait_us, - mux_wait_us + WCD9XXX_USLEEP_RANGE_MARGIN_US); - *(z[i]) = __wcd9xxx_codec_sta_dce(mbhc, 0, - true, false); - } - } - - mbhc->mbhc_cb->compute_impedance(mbhc, l, r, zl, zr); - - /* - * For some codecs, an additional step of zdet is needed - * to overcome effects of noise and for better accuracy of - * z values - */ - if (mbhc->mbhc_cb->get_cdc_type && - mbhc->mbhc_cb->get_cdc_type() == WCD9XXX_CDC_TYPE_TOMTOM) { - uint32_t zl_t = 0, zr_t = 0; - s16 *l_p, *r_p; - enum mbhc_zdet_zones zdet_zone; - int32_t gain; - - zdet_zone = wcd9xxx_assign_zdet_zone(*zl, *zr, &gain); - switch (zdet_zone) { - case ZL_ZONE1__ZR_ZONE1: - l_p = NULL; - r_p = NULL; - break; - case ZL_ZONE2__ZR_ZONE2: - case ZL_ZONE3__ZR_ZONE3: - case ZL_ZR_NOT_IN_ZONE1: - l_p = l; - r_p = r; - break; - case ZL_ZONE2__ZR_ZONE1: - case ZL_ZONE3__ZR_ZONE1: - /* If ZR falls in Zone 1, further computations with - * gain update are not required - */ - l_p = l; - r_p = NULL; - break; - case ZL_ZONE1__ZR_ZONE2: - case ZL_ZONE1__ZR_ZONE3: - /* If ZL falls in Zone 1, further computations with - * gain update are not required - */ - l_p = NULL; - r_p = r; - break; - } - pr_debug("%s:zdet_zone = %d, gain = %d\n", __func__, - zdet_zone, gain); - if (gain) - mbhc->mbhc_cb->setup_zdet(mbhc, gain); - - wcd9xxx_remeasure_z_values(mbhc, l_p, r_p, &zl_t, &zr_t, - &zl_stereo, &zl_mono); - - *zl = (zl_t) ? zl_t : *zl; - *zr = (zr_t) ? zr_t : *zr; - - /* Check for Mono/Stereo Type - * Conditions to classify Mono/Stereo - * i. Difference of zl_stereo and zl_mono > (1/2) of zl_mono - * ii. Absolute difference of zl and zr above a threshold - */ - zl_diff_1 = (zl_mono > zl_stereo) ? (zl_mono - zl_stereo) : - (zl_stereo - zl_mono); - zl_diff_2 = (*zl > *zr) ? (*zl - *zr) : (*zr - *zl); - - mbhc->hph_type = MBHC_HPH_NONE; - if (mbhc->current_plug != PLUG_TYPE_HIGH_HPH) { - if ((zl_diff_1 > (zl_mono >> 1)) || - (zl_diff_2 > WCD9XXX_MONO_HS_DIFF_THR) || - ((*zl < WCD9XXX_MONO_HS_MIN_THR) && - (*zr > WCD9XXX_MONO_HS_MIN_THR)) || - ((*zr < WCD9XXX_MONO_HS_MIN_THR) && - (*zl > WCD9XXX_MONO_HS_MIN_THR))) { - pr_debug("%s: MONO plug type detected\n", - __func__); - mbhc->hph_type = MBHC_HPH_MONO; - *zl = zl_mono; - } else { - pr_debug("%s: STEREO plug type detected\n", - __func__); - mbhc->hph_type = MBHC_HPH_STEREO; - } - } - } - - mbhc->mbhc_cb->setup_zdet(mbhc, MBHC_ZDET_PA_DISABLE); - - /* Calculate z values based on the Q-fuse registers, if used */ - if (mbhc->mbhc_cb->zdet_error_approx) - mbhc->mbhc_cb->zdet_error_approx(mbhc, zl, zr); - - wcd9xxx_onoff_ext_mclk(mbhc, false); - - if (!override_en) - wcd9xxx_turn_onoff_override(mbhc, false); - - /* Undo the micbias disable for override */ - snd_soc_write(codec, WCD9XXX_A_MAD_ANA_CTRL, micb_mbhc_val); - - pr_debug("%s: L0: 0x%x(%d), L1: 0x%x(%d), L2: 0x%x(%d)\n", - __func__, - l[0] & 0xffff, l[0], l[1] & 0xffff, l[1], l[2] & 0xffff, l[2]); - pr_debug("%s: R0: 0x%x(%d), R1: 0x%x(%d), R2: 0x%x(%d)\n", - __func__, - r[0] & 0xffff, r[0], r[1] & 0xffff, r[1], r[2] & 0xffff, r[2]); - pr_debug("%s: RL %u milliohm, RR %u milliohm\n", __func__, *zl, *zr); - pr_debug("%s: Impedance detection completed\n", __func__); - - return ret; -} - -int wcd9xxx_mbhc_get_impedance(struct wcd9xxx_mbhc *mbhc, uint32_t *zl, - uint32_t *zr) -{ - *zl = mbhc->zl; - *zr = mbhc->zr; - - if (*zl && *zr) - return 0; - else - return -EINVAL; -} - -/* - * wcd9xxx_mbhc_init : initialize MBHC internal structures. - * - * NOTE: mbhc->mbhc_cfg is not YET configure so shouldn't be used - */ -int wcd9xxx_mbhc_init(struct wcd9xxx_mbhc *mbhc, struct wcd9xxx_resmgr *resmgr, - struct snd_soc_codec *codec, - int (*micbias_enable_cb)(struct snd_soc_codec*, bool, - enum wcd9xxx_micbias_num), - const struct wcd9xxx_mbhc_cb *mbhc_cb, - const struct wcd9xxx_mbhc_intr *mbhc_cdc_intr_ids, - int rco_clk_rate, - bool impedance_det_en) -{ - int ret; - void *core_res; - - pr_debug("%s: enter\n", __func__); - memset(&mbhc->mbhc_bias_regs, 0, sizeof(struct mbhc_micbias_regs)); - memset(&mbhc->mbhc_data, 0, sizeof(struct mbhc_internal_cal_data)); - - mbhc->mbhc_data.t_sta_dce = DEFAULT_DCE_STA_WAIT; - mbhc->mbhc_data.t_dce = DEFAULT_DCE_WAIT; - mbhc->mbhc_data.t_sta = DEFAULT_STA_WAIT; - mbhc->mbhc_micbias_switched = false; - mbhc->polling_active = false; - mbhc->mbhc_state = MBHC_STATE_NONE; - mbhc->in_swch_irq_handler = false; - mbhc->current_plug = PLUG_TYPE_NONE; - mbhc->lpi_enabled = false; - mbhc->no_mic_headset_override = false; - mbhc->mbhc_last_resume = 0; - mbhc->codec = codec; - mbhc->resmgr = resmgr; - mbhc->resmgr->mbhc = mbhc; - mbhc->micbias_enable_cb = micbias_enable_cb; - mbhc->rco_clk_rate = rco_clk_rate; - mbhc->mbhc_cb = mbhc_cb; - mbhc->intr_ids = mbhc_cdc_intr_ids; - mbhc->impedance_detect = impedance_det_en; - mbhc->hph_type = MBHC_HPH_NONE; - - if (mbhc->intr_ids == NULL) { - pr_err("%s: Interrupt mapping not provided\n", __func__); - return -EINVAL; - } - - if (mbhc->headset_jack.jack == NULL) { - ret = snd_soc_card_jack_new(codec->component.card, - "Headset Jack", WCD9XXX_JACK_MASK, - &mbhc->headset_jack, NULL, 0); - if (ret) { - pr_err("%s: Failed to create new jack\n", __func__); - return ret; - } - - ret = snd_soc_card_jack_new(codec->component.card, - "Button Jack", - WCD9XXX_JACK_BUTTON_MASK, - &mbhc->button_jack, NULL, 0); - if (ret) { - pr_err("Failed to create new jack\n"); - return ret; - } - - ret = snd_jack_set_key(mbhc->button_jack.jack, - SND_JACK_BTN_0, - KEY_MEDIA); - if (ret) { - pr_err("%s: Failed to set code for btn-0\n", - __func__); - return ret; - } - - INIT_DELAYED_WORK(&mbhc->mbhc_firmware_dwork, - wcd9xxx_mbhc_fw_read); - INIT_DELAYED_WORK(&mbhc->mbhc_btn_dwork, wcd9xxx_btn_lpress_fn); - INIT_DELAYED_WORK(&mbhc->mbhc_insert_dwork, - wcd9xxx_mbhc_insert_work); - } - - mutex_init(&mbhc->mbhc_lock); - - /* Register event notifier */ - mbhc->nblock.notifier_call = wcd9xxx_event_notify; - ret = wcd9xxx_resmgr_register_notifier(mbhc->resmgr, &mbhc->nblock); - if (ret) { - pr_err("%s: Failed to register notifier %d\n", __func__, ret); - mutex_destroy(&mbhc->mbhc_lock); - return ret; - } - - wcd9xxx_init_debugfs(mbhc); - - /* Disable Impedance detection by default for certain codec types */ - if (mbhc->mbhc_cb && mbhc->mbhc_cb->get_cdc_type && - (mbhc->mbhc_cb->get_cdc_type() == WCD9XXX_CDC_TYPE_HELICON)) - impedance_detect_en = 0; - else - impedance_detect_en = impedance_det_en ? 1 : 0; - - core_res = mbhc->resmgr->core_res; - ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->insertion, - wcd9xxx_hs_insert_irq, - "Headset insert detect", mbhc); - if (ret) { - pr_err("%s: Failed to request irq %d, ret = %d\n", __func__, - mbhc->intr_ids->insertion, ret); - goto err_insert_irq; - } - wcd9xxx_disable_irq(core_res, mbhc->intr_ids->insertion); - - ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->poll_plug_rem, - wcd9xxx_hs_remove_irq, - "Headset remove detect", mbhc); - if (ret) { - pr_err("%s: Failed to request irq %d\n", __func__, - mbhc->intr_ids->poll_plug_rem); - goto err_remove_irq; - } - - ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->dce_est_complete, - wcd9xxx_dce_handler, "DC Estimation detect", - mbhc); - if (ret) { - pr_err("%s: Failed to request irq %d\n", __func__, - mbhc->intr_ids->dce_est_complete); - goto err_potential_irq; - } - - ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->button_release, - wcd9xxx_release_handler, - "Button Release detect", mbhc); - if (ret) { - pr_err("%s: Failed to request irq %d\n", __func__, - mbhc->intr_ids->button_release); - goto err_release_irq; - } - - ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->hph_left_ocp, - wcd9xxx_hphl_ocp_irq, "HPH_L OCP detect", - mbhc); - if (ret) { - pr_err("%s: Failed to request irq %d\n", __func__, - mbhc->intr_ids->hph_left_ocp); - goto err_hphl_ocp_irq; - } - wcd9xxx_disable_irq(core_res, mbhc->intr_ids->hph_left_ocp); - - ret = wcd9xxx_request_irq(core_res, mbhc->intr_ids->hph_right_ocp, - wcd9xxx_hphr_ocp_irq, "HPH_R OCP detect", - mbhc); - if (ret) { - pr_err("%s: Failed to request irq %d\n", __func__, - mbhc->intr_ids->hph_right_ocp); - goto err_hphr_ocp_irq; - } - wcd9xxx_disable_irq(core_res, mbhc->intr_ids->hph_right_ocp); - - wcd9xxx_regmgr_cond_register(resmgr, 1 << WCD9XXX_COND_HPH_MIC | - 1 << WCD9XXX_COND_HPH); - - pr_debug("%s: leave ret %d\n", __func__, ret); - return ret; - -err_hphr_ocp_irq: - wcd9xxx_free_irq(core_res, mbhc->intr_ids->hph_left_ocp, mbhc); -err_hphl_ocp_irq: - wcd9xxx_free_irq(core_res, mbhc->intr_ids->button_release, mbhc); -err_release_irq: - wcd9xxx_free_irq(core_res, mbhc->intr_ids->dce_est_complete, mbhc); -err_potential_irq: - wcd9xxx_free_irq(core_res, mbhc->intr_ids->poll_plug_rem, mbhc); -err_remove_irq: - wcd9xxx_free_irq(core_res, mbhc->intr_ids->insertion, mbhc); -err_insert_irq: - wcd9xxx_resmgr_unregister_notifier(mbhc->resmgr, &mbhc->nblock); - - mutex_destroy(&mbhc->mbhc_lock); - - pr_debug("%s: leave ret %d\n", __func__, ret); - return ret; -} -EXPORT_SYMBOL(wcd9xxx_mbhc_init); - -void wcd9xxx_mbhc_deinit(struct wcd9xxx_mbhc *mbhc) -{ - struct wcd9xxx_core_resource *core_res = - mbhc->resmgr->core_res; - - wcd9xxx_regmgr_cond_deregister(mbhc->resmgr, 1 << WCD9XXX_COND_HPH_MIC | - 1 << WCD9XXX_COND_HPH); - - wcd9xxx_free_irq(core_res, mbhc->intr_ids->button_release, mbhc); - wcd9xxx_free_irq(core_res, mbhc->intr_ids->dce_est_complete, mbhc); - wcd9xxx_free_irq(core_res, mbhc->intr_ids->poll_plug_rem, mbhc); - wcd9xxx_free_irq(core_res, mbhc->intr_ids->insertion, mbhc); - wcd9xxx_free_irq(core_res, mbhc->intr_ids->hs_jack_switch, mbhc); - wcd9xxx_free_irq(core_res, mbhc->intr_ids->hph_left_ocp, mbhc); - wcd9xxx_free_irq(core_res, mbhc->intr_ids->hph_right_ocp, mbhc); - - mutex_destroy(&mbhc->mbhc_lock); - wcd9xxx_resmgr_unregister_notifier(mbhc->resmgr, &mbhc->nblock); - wcd9xxx_cleanup_debugfs(mbhc); -} -EXPORT_SYMBOL(wcd9xxx_mbhc_deinit); - -MODULE_DESCRIPTION("wcd9xxx MBHC module"); -MODULE_LICENSE("GPL v2"); diff --git a/sound/soc/codecs/wcd9xxx-mbhc.h b/sound/soc/codecs/wcd9xxx-mbhc.h deleted file mode 100644 index e35f7d4adc2d..000000000000 --- a/sound/soc/codecs/wcd9xxx-mbhc.h +++ /dev/null @@ -1,492 +0,0 @@ -/* Copyright (c) 2012-2015, 2017 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#ifndef __WCD9XXX_MBHC_H__ -#define __WCD9XXX_MBHC_H__ - -#include "wcd9xxx-resmgr.h" -#include "wcdcal-hwdep.h" - -#define WCD9XXX_CFILT_FAST_MODE 0x00 -#define WCD9XXX_CFILT_SLOW_MODE 0x40 -#define WCD9XXX_CFILT_EXT_PRCHG_EN 0x30 -#define WCD9XXX_CFILT_EXT_PRCHG_DSBL 0x00 - -#define WCD9XXX_USLEEP_RANGE_MARGIN_US 100 - -struct mbhc_micbias_regs { - u16 cfilt_val; - u16 cfilt_ctl; - u16 mbhc_reg; - u16 int_rbias; - u16 ctl_reg; - u8 cfilt_sel; -}; - -enum mbhc_v_index { - MBHC_V_IDX_CFILT, - MBHC_V_IDX_VDDIO, - MBHC_V_IDX_NUM, -}; - -enum mbhc_cal_type { - MBHC_CAL_MCLK, - MBHC_CAL_RCO, - MBHC_CAL_NUM, -}; - -enum mbhc_impedance_detect_stages { - MBHC_ZDET_PRE_MEASURE, - MBHC_ZDET_POST_MEASURE, - MBHC_ZDET_GAIN_0, - MBHC_ZDET_GAIN_1, - MBHC_ZDET_GAIN_2, - MBHC_ZDET_HPHR_RAMP_DISABLE, - MBHC_ZDET_HPHL_RAMP_DISABLE, - MBHC_ZDET_RAMP_DISABLE, - MBHC_ZDET_HPHR_PA_DISABLE, - MBHC_ZDET_PA_DISABLE, - MBHC_ZDET_GAIN_UPDATE_1X, -}; - -/* Zone assignments used in WCD9330 for Zdet */ -enum mbhc_zdet_zones { - ZL_ZONE1__ZR_ZONE1, - ZL_ZONE2__ZR_ZONE2, - ZL_ZONE3__ZR_ZONE3, - ZL_ZONE2__ZR_ZONE1, - ZL_ZONE3__ZR_ZONE1, - ZL_ZONE1__ZR_ZONE2, - ZL_ZONE1__ZR_ZONE3, - ZL_ZR_NOT_IN_ZONE1, -}; - -/* Data used by MBHC */ -struct mbhc_internal_cal_data { - u16 dce_z; - u16 dce_nsc_cs_z; - u16 dce_mb; - u16 sta_z; - u16 sta_mb; - u32 t_sta_dce; - u32 t_dce; - u32 t_sta; - u32 micb_mv; - u16 v_ins_hu[MBHC_V_IDX_NUM]; - u16 v_ins_h[MBHC_V_IDX_NUM]; - u16 v_b1_hu[MBHC_V_IDX_NUM]; - u16 v_b1_h[MBHC_V_IDX_NUM]; - u16 v_brh[MBHC_V_IDX_NUM]; - u16 v_brl; - u16 v_no_mic; - s16 v_inval_ins_low; - s16 v_inval_ins_high; - u16 v_cs_ins_h; -}; - -enum wcd9xxx_mbhc_plug_type { - PLUG_TYPE_INVALID = -1, - PLUG_TYPE_NONE, - PLUG_TYPE_HEADSET, - PLUG_TYPE_HEADPHONE, - PLUG_TYPE_HIGH_HPH, - PLUG_TYPE_GND_MIC_SWAP, - PLUG_TYPE_ANC_HEADPHONE, -}; - -enum wcd9xxx_mbhc_micbias_type { - MBHC_PRIMARY_MIC_MB, - MBHC_ANC_MIC_MB, -}; - -enum wcd9xxx_micbias_num { - MBHC_MICBIAS_INVALID = -1, - MBHC_MICBIAS1, - MBHC_MICBIAS2, - MBHC_MICBIAS3, - MBHC_MICBIAS4, -}; - -enum hw_jack_type { - FOUR_POLE_JACK = 0, - FIVE_POLE_JACK, - SIX_POLE_JACK, -}; - -enum wcd9xx_mbhc_micbias_enable_bits { - MBHC_MICBIAS_ENABLE_THRESHOLD_HEADSET, - MBHC_MICBIAS_ENABLE_REGULAR_HEADSET, -}; - -enum wcd9xx_mbhc_cs_enable_bits { - MBHC_CS_ENABLE_POLLING, - MBHC_CS_ENABLE_INSERTION, - MBHC_CS_ENABLE_REMOVAL, - MBHC_CS_ENABLE_DET_ANC, -}; - -enum wcd9xxx_mbhc_state { - MBHC_STATE_NONE = -1, - MBHC_STATE_POTENTIAL, - MBHC_STATE_POTENTIAL_RECOVERY, - MBHC_STATE_RELEASE, -}; - -enum wcd9xxx_mbhc_btn_det_mem { - MBHC_BTN_DET_V_BTN_LOW, - MBHC_BTN_DET_V_BTN_HIGH, - MBHC_BTN_DET_N_READY, - MBHC_BTN_DET_N_CIC, - MBHC_BTN_DET_GAIN -}; - -enum wcd9xxx_mbhc_clk_freq { - TAIKO_MCLK_12P2MHZ = 0, - TAIKO_MCLK_9P6MHZ, - TAIKO_NUM_CLK_FREQS, -}; - -enum wcd9xxx_mbhc_event_state { - MBHC_EVENT_PA_HPHL, - MBHC_EVENT_PA_HPHR, - MBHC_EVENT_PRE_TX_3_ON, - MBHC_EVENT_POST_TX_3_OFF, -}; - -enum mbhc_hph_type { - MBHC_HPH_NONE = 0, - MBHC_HPH_MONO, - MBHC_HPH_STEREO, -}; - -struct wcd9xxx_mbhc_general_cfg { - u8 t_ldoh; - u8 t_bg_fast_settle; - u8 t_shutdown_plug_rem; - u8 mbhc_nsa; - u8 mbhc_navg; - u8 v_micbias_l; - u8 v_micbias; - u8 mbhc_reserved; - u16 settle_wait; - u16 t_micbias_rampup; - u16 t_micbias_rampdown; - u16 t_supply_bringup; -} __packed; - -struct wcd9xxx_mbhc_plug_detect_cfg { - u32 mic_current; - u32 hph_current; - u16 t_mic_pid; - u16 t_ins_complete; - u16 t_ins_retry; - u16 v_removal_delta; - u8 micbias_slow_ramp; - u8 reserved0; - u8 reserved1; - u8 reserved2; -} __packed; - -struct wcd9xxx_mbhc_plug_type_cfg { - u8 av_detect; - u8 mono_detect; - u8 num_ins_tries; - u8 reserved0; - s16 v_no_mic; - s16 v_av_min; - s16 v_av_max; - s16 v_hs_min; - s16 v_hs_max; - u16 reserved1; -} __packed; - -struct wcd9xxx_mbhc_btn_detect_cfg { - s8 c[8]; - u8 nc; - u8 n_meas; - u8 mbhc_nsc; - u8 n_btn_meas; - u8 n_btn_con; - u8 num_btn; - u8 reserved0; - u8 reserved1; - u16 t_poll; - u16 t_bounce_wait; - u16 t_rel_timeout; - s16 v_btn_press_delta_sta; - s16 v_btn_press_delta_cic; - u16 t_btn0_timeout; - s16 _v_btn_low[0]; /* v_btn_low[num_btn] */ - s16 _v_btn_high[0]; /* v_btn_high[num_btn] */ - u8 _n_ready[TAIKO_NUM_CLK_FREQS]; - u8 _n_cic[TAIKO_NUM_CLK_FREQS]; - u8 _gain[TAIKO_NUM_CLK_FREQS]; -} __packed; - -struct wcd9xxx_mbhc_imped_detect_cfg { - u8 _hs_imped_detect; - u8 _n_rload; - u8 _hph_keep_on; - u8 _repeat_rload_calc; - u16 _t_dac_ramp_time; - u16 _rhph_high; - u16 _rhph_low; - u16 _rload[0]; /* rload[n_rload] */ - u16 _alpha[0]; /* alpha[n_rload] */ - u16 _beta[3]; -} __packed; - -struct wcd9xxx_mbhc_config { - bool read_fw_bin; - /* - * void* calibration contains: - * struct wcd9xxx_mbhc_general_cfg generic; - * struct wcd9xxx_mbhc_plug_detect_cfg plug_det; - * struct wcd9xxx_mbhc_plug_type_cfg plug_type; - * struct wcd9xxx_mbhc_btn_detect_cfg btn_det; - * struct wcd9xxx_mbhc_imped_detect_cfg imped_det; - * Note: various size depends on btn_det->num_btn - */ - void *calibration; - enum wcd9xxx_micbias_num micbias; - enum wcd9xxx_micbias_num anc_micbias; - int (*mclk_cb_fn)(struct snd_soc_codec*, int, bool); - unsigned int mclk_rate; - unsigned int gpio; - unsigned int gpio_irq; - int gpio_level_insert; - bool insert_detect; /* codec has own MBHC_INSERT_DETECT */ - bool detect_extn_cable; - /* bit mask of enum wcd9xx_mbhc_micbias_enable_bits */ - unsigned long micbias_enable_flags; - /* swap_gnd_mic returns true if extern GND/MIC swap switch toggled */ - bool (*swap_gnd_mic)(struct snd_soc_codec *); - unsigned long cs_enable_flags; - bool use_int_rbias; - bool do_recalibration; - bool use_vddio_meas; - bool enable_anc_mic_detect; - enum hw_jack_type hw_jack_type; - int key_code[8]; -}; - -struct wcd9xxx_cfilt_mode { - u8 reg_mode_val; - u8 cur_mode_val; - u8 reg_mask; -}; - -struct wcd9xxx_mbhc_intr { - int poll_plug_rem; - int shortavg_complete; - int potential_button_press; - int button_release; - int dce_est_complete; - int insertion; - int hph_left_ocp; - int hph_right_ocp; - int hs_jack_switch; -}; - -struct wcd9xxx_mbhc_cb { - void (*enable_mux_bias_block)(struct snd_soc_codec *); - void (*cfilt_fast_mode)(struct snd_soc_codec *, struct wcd9xxx_mbhc *); - void (*codec_specific_cal)(struct snd_soc_codec *, - struct wcd9xxx_mbhc *); - struct wcd9xxx_cfilt_mode (*switch_cfilt_mode)(struct wcd9xxx_mbhc *, - bool); - void (*select_cfilt)(struct snd_soc_codec *, struct wcd9xxx_mbhc *); - enum wcd9xxx_cdc_type (*get_cdc_type)(void); - void (*enable_clock_gate)(struct snd_soc_codec *, bool); - int (*setup_zdet)(struct wcd9xxx_mbhc *, - enum mbhc_impedance_detect_stages stage); - void (*compute_impedance)(struct wcd9xxx_mbhc *, s16 *, s16 *, - uint32_t *, uint32_t *); - void (*zdet_error_approx)(struct wcd9xxx_mbhc *, uint32_t *, - uint32_t *); - void (*enable_mbhc_txfe)(struct snd_soc_codec *, bool); - int (*enable_mb_source)(struct snd_soc_codec *, bool, bool); - void (*setup_int_rbias)(struct snd_soc_codec *, bool); - void (*pull_mb_to_vddio)(struct snd_soc_codec *, bool); - bool (*insert_rem_status)(struct snd_soc_codec *); - void (*micbias_pulldown_ctrl)(struct wcd9xxx_mbhc *, bool); - int (*codec_rco_ctrl)(struct snd_soc_codec *, bool); - void (*hph_auto_pulldown_ctrl)(struct snd_soc_codec *, bool); - struct firmware_cal * (*get_hwdep_fw_cal)(struct snd_soc_codec *, - enum wcd_cal_type); -}; - -struct wcd9xxx_mbhc { - bool polling_active; - /* Delayed work to report long button press */ - struct delayed_work mbhc_btn_dwork; - int buttons_pressed; - enum wcd9xxx_mbhc_state mbhc_state; - struct wcd9xxx_mbhc_config *mbhc_cfg; - const struct wcd9xxx_mbhc_cb *mbhc_cb; - - struct mbhc_internal_cal_data mbhc_data; - - struct mbhc_micbias_regs mbhc_bias_regs; - struct mbhc_micbias_regs mbhc_anc_bias_regs; - - bool mbhc_micbias_switched; - - u32 hph_status; /* track headhpone status */ - u8 hphlocp_cnt; /* headphone left ocp retry */ - u8 hphrocp_cnt; /* headphone right ocp retry */ - - /* Work to perform MBHC Firmware Read */ - struct delayed_work mbhc_firmware_dwork; - const struct firmware *mbhc_fw; - struct firmware_cal *mbhc_cal; - - struct delayed_work mbhc_insert_dwork; - - u8 current_plug; - struct work_struct correct_plug_swch; - /* - * Work to perform polling on microphone voltage - * in order to correct plug type once plug type - * is detected as headphone - */ - struct work_struct correct_plug_noswch; - bool hs_detect_work_stop; - - bool lpi_enabled; /* low power insertion detection */ - bool in_swch_irq_handler; - - struct wcd9xxx_resmgr *resmgr; - struct snd_soc_codec *codec; - - bool no_mic_headset_override; - - /* track PA/DAC state to sync with userspace */ - unsigned long hph_pa_dac_state; - /* - * save codec's state with resmgr event notification - * bit flags of enum wcd9xxx_mbhc_event_state - */ - unsigned long event_state; - - unsigned long mbhc_last_resume; /* in jiffies */ - - bool insert_detect_level_insert; - - struct snd_soc_jack headset_jack; - struct snd_soc_jack button_jack; - - struct notifier_block nblock; - - bool micbias_enable; - int (*micbias_enable_cb)(struct snd_soc_codec*, bool, - enum wcd9xxx_micbias_num); - - bool impedance_detect; - /* impedance of hphl and hphr */ - uint32_t zl, zr; - - u32 rco_clk_rate; - - bool update_z; - - u8 scaling_mux_in; - /* Holds codec specific interrupt mapping */ - const struct wcd9xxx_mbhc_intr *intr_ids; - - /* Indicates status of current source switch */ - bool is_cs_enabled; - - /* Holds type of Headset - Mono/Stereo */ - enum mbhc_hph_type hph_type; - -#ifdef CONFIG_DEBUG_FS - struct dentry *debugfs_poke; - struct dentry *debugfs_mbhc; -#endif - - struct mutex mbhc_lock; -}; - -#define WCD9XXX_MBHC_CAL_SIZE(buttons, rload) ( \ - sizeof(enum wcd9xxx_micbias_num) + \ - sizeof(struct wcd9xxx_mbhc_general_cfg) + \ - sizeof(struct wcd9xxx_mbhc_plug_detect_cfg) + \ - ((sizeof(s16) + sizeof(s16)) * buttons) + \ - sizeof(struct wcd9xxx_mbhc_plug_type_cfg) + \ - sizeof(struct wcd9xxx_mbhc_btn_detect_cfg) + \ - sizeof(struct wcd9xxx_mbhc_imped_detect_cfg) + \ - ((sizeof(u16) + sizeof(u16)) * rload) \ - ) - -#define WCD9XXX_MBHC_CAL_GENERAL_PTR(cali) ( \ - (struct wcd9xxx_mbhc_general_cfg *) cali) -#define WCD9XXX_MBHC_CAL_PLUG_DET_PTR(cali) ( \ - (struct wcd9xxx_mbhc_plug_detect_cfg *) \ - &(WCD9XXX_MBHC_CAL_GENERAL_PTR(cali)[1])) -#define WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(cali) ( \ - (struct wcd9xxx_mbhc_plug_type_cfg *) \ - &(WCD9XXX_MBHC_CAL_PLUG_DET_PTR(cali)[1])) -#define WCD9XXX_MBHC_CAL_BTN_DET_PTR(cali) ( \ - (struct wcd9xxx_mbhc_btn_detect_cfg *) \ - &(WCD9XXX_MBHC_CAL_PLUG_TYPE_PTR(cali)[1])) -#define WCD9XXX_MBHC_CAL_IMPED_DET_PTR(cali) ( \ - (struct wcd9xxx_mbhc_imped_detect_cfg *) \ - (((void *)&WCD9XXX_MBHC_CAL_BTN_DET_PTR(cali)[1]) + \ - (WCD9XXX_MBHC_CAL_BTN_DET_PTR(cali)->num_btn * \ - (sizeof(WCD9XXX_MBHC_CAL_BTN_DET_PTR(cali)->_v_btn_low[0]) + \ - sizeof(WCD9XXX_MBHC_CAL_BTN_DET_PTR(cali)->_v_btn_high[0])))) \ - ) - -/* minimum size of calibration data assuming there is only one button and - * one rload. - */ -#define WCD9XXX_MBHC_CAL_MIN_SIZE ( \ - sizeof(struct wcd9xxx_mbhc_general_cfg) + \ - sizeof(struct wcd9xxx_mbhc_plug_detect_cfg) + \ - sizeof(struct wcd9xxx_mbhc_plug_type_cfg) + \ - sizeof(struct wcd9xxx_mbhc_btn_detect_cfg) + \ - sizeof(struct wcd9xxx_mbhc_imped_detect_cfg) + \ - (sizeof(u16) * 2) \ - ) - -#define WCD9XXX_MBHC_CAL_BTN_SZ(cfg_ptr) ( \ - sizeof(struct wcd9xxx_mbhc_btn_detect_cfg) + \ - (cfg_ptr->num_btn * (sizeof(cfg_ptr->_v_btn_low[0]) + \ - sizeof(cfg_ptr->_v_btn_high[0])))) - -#define WCD9XXX_MBHC_CAL_IMPED_MIN_SZ ( \ - sizeof(struct wcd9xxx_mbhc_imped_detect_cfg) + sizeof(u16) * 2) - -#define WCD9XXX_MBHC_CAL_IMPED_SZ(cfg_ptr) ( \ - sizeof(struct wcd9xxx_mbhc_imped_detect_cfg) + \ - (cfg_ptr->_n_rload * \ - (sizeof(cfg_ptr->_rload[0]) + sizeof(cfg_ptr->_alpha[0])))) - -int wcd9xxx_mbhc_set_keycode(struct wcd9xxx_mbhc *mbhc); -int wcd9xxx_mbhc_start(struct wcd9xxx_mbhc *mbhc, - struct wcd9xxx_mbhc_config *mbhc_cfg); -void wcd9xxx_mbhc_stop(struct wcd9xxx_mbhc *mbhc); -int wcd9xxx_mbhc_init(struct wcd9xxx_mbhc *mbhc, struct wcd9xxx_resmgr *resmgr, - struct snd_soc_codec *codec, - int (*micbias_enable_cb)(struct snd_soc_codec*, bool, - enum wcd9xxx_micbias_num), - const struct wcd9xxx_mbhc_cb *mbhc_cb, - const struct wcd9xxx_mbhc_intr *mbhc_cdc_intr_ids, - int rco_clk_rate, - bool impedance_det_en); -void wcd9xxx_mbhc_deinit(struct wcd9xxx_mbhc *mbhc); -void *wcd9xxx_mbhc_cal_btn_det_mp( - const struct wcd9xxx_mbhc_btn_detect_cfg *btn_det, - const enum wcd9xxx_mbhc_btn_det_mem mem); -int wcd9xxx_mbhc_get_impedance(struct wcd9xxx_mbhc *mbhc, uint32_t *zl, - uint32_t *zr); -#endif /* __WCD9XXX_MBHC_H__ */ diff --git a/sound/soc/codecs/wcd9xxx-resmgr.c b/sound/soc/codecs/wcd9xxx-resmgr.c deleted file mode 100644 index 4b0265241901..000000000000 --- a/sound/soc/codecs/wcd9xxx-resmgr.c +++ /dev/null @@ -1,1099 +0,0 @@ -/* Copyright (c) 2012-2014, 2016-2017 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "wcd9xxx-resmgr.h" - -static char wcd9xxx_event_string[][64] = { - "WCD9XXX_EVENT_INVALID", - - "WCD9XXX_EVENT_PRE_RCO_ON", - "WCD9XXX_EVENT_POST_RCO_ON", - "WCD9XXX_EVENT_PRE_RCO_OFF", - "WCD9XXX_EVENT_POST_RCO_OFF", - - "WCD9XXX_EVENT_PRE_MCLK_ON", - "WCD9XXX_EVENT_POST_MCLK_ON", - "WCD9XXX_EVENT_PRE_MCLK_OFF", - "WCD9XXX_EVENT_POST_MCLK_OFF", - - "WCD9XXX_EVENT_PRE_BG_OFF", - "WCD9XXX_EVENT_POST_BG_OFF", - "WCD9XXX_EVENT_PRE_BG_AUDIO_ON", - "WCD9XXX_EVENT_POST_BG_AUDIO_ON", - "WCD9XXX_EVENT_PRE_BG_MBHC_ON", - "WCD9XXX_EVENT_POST_BG_MBHC_ON", - - "WCD9XXX_EVENT_PRE_MICBIAS_1_OFF", - "WCD9XXX_EVENT_POST_MICBIAS_1_OFF", - "WCD9XXX_EVENT_PRE_MICBIAS_2_OFF", - "WCD9XXX_EVENT_POST_MICBIAS_2_OFF", - "WCD9XXX_EVENT_PRE_MICBIAS_3_OFF", - "WCD9XXX_EVENT_POST_MICBIAS_3_OFF", - "WCD9XXX_EVENT_PRE_MICBIAS_4_OFF", - "WCD9XXX_EVENT_POST_MICBIAS_4_OFF", - "WCD9XXX_EVENT_PRE_MICBIAS_1_ON", - "WCD9XXX_EVENT_POST_MICBIAS_1_ON", - "WCD9XXX_EVENT_PRE_MICBIAS_2_ON", - "WCD9XXX_EVENT_POST_MICBIAS_2_ON", - "WCD9XXX_EVENT_PRE_MICBIAS_3_ON", - "WCD9XXX_EVENT_POST_MICBIAS_3_ON", - "WCD9XXX_EVENT_PRE_MICBIAS_4_ON", - "WCD9XXX_EVENT_POST_MICBIAS_4_ON", - - "WCD9XXX_EVENT_PRE_CFILT_1_OFF", - "WCD9XXX_EVENT_POST_CFILT_1_OFF", - "WCD9XXX_EVENT_PRE_CFILT_2_OFF", - "WCD9XXX_EVENT_POST_CFILT_2_OFF", - "WCD9XXX_EVENT_PRE_CFILT_3_OFF", - "WCD9XXX_EVENT_POST_CFILT_3_OFF", - "WCD9XXX_EVENT_PRE_CFILT_1_ON", - "WCD9XXX_EVENT_POST_CFILT_1_ON", - "WCD9XXX_EVENT_PRE_CFILT_2_ON", - "WCD9XXX_EVENT_POST_CFILT_2_ON", - "WCD9XXX_EVENT_PRE_CFILT_3_ON", - "WCD9XXX_EVENT_POST_CFILT_3_ON", - - "WCD9XXX_EVENT_PRE_HPHL_PA_ON", - "WCD9XXX_EVENT_POST_HPHL_PA_OFF", - "WCD9XXX_EVENT_PRE_HPHR_PA_ON", - "WCD9XXX_EVENT_POST_HPHR_PA_OFF", - - "WCD9XXX_EVENT_POST_RESUME", - - "WCD9XXX_EVENT_PRE_TX_3_ON", - "WCD9XXX_EVENT_POST_TX_3_OFF", - - "WCD9XXX_EVENT_LAST", -}; - -#define WCD9XXX_RCO_CALIBRATION_RETRY_COUNT 5 -#define WCD9XXX_RCO_CALIBRATION_DELAY_US 5000 -#define WCD9XXX_USLEEP_RANGE_MARGIN_US 100 -#define WCD9XXX_RCO_CALIBRATION_DELAY_INC_US 1000 - -struct wcd9xxx_resmgr_cond_entry { - unsigned short reg; - int shift; - bool invert; - enum wcd9xxx_resmgr_cond cond; - struct list_head list; -}; - -static enum wcd9xxx_clock_type wcd9xxx_save_clock(struct wcd9xxx_resmgr - *resmgr); -static void wcd9xxx_restore_clock(struct wcd9xxx_resmgr *resmgr, - enum wcd9xxx_clock_type type); - -const char *wcd9xxx_get_event_string(enum wcd9xxx_notify_event type) -{ - return wcd9xxx_event_string[type]; -} - -void wcd9xxx_resmgr_notifier_call(struct wcd9xxx_resmgr *resmgr, - const enum wcd9xxx_notify_event e) -{ - pr_debug("%s: notifier call event %d\n", __func__, e); - blocking_notifier_call_chain(&resmgr->notifier, e, resmgr); -} - -static void wcd9xxx_disable_bg(struct wcd9xxx_resmgr *resmgr) -{ - /* Notify bg mode change */ - wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_BG_OFF); - /* Disable bg */ - snd_soc_update_bits(resmgr->codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, - 0x03, 0x00); - usleep_range(100, 110); - /* Notify bg mode change */ - wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_BG_OFF); -} - -/* - * BG enablement should always enable in slow mode. - * The fast mode doesn't need to be enabled as fast mode BG is to be driven - * by MBHC override. - */ -static void wcd9xxx_enable_bg(struct wcd9xxx_resmgr *resmgr) -{ - struct snd_soc_codec *codec = resmgr->codec; - - /* Enable BG in slow mode and precharge */ - snd_soc_update_bits(codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x80, 0x80); - snd_soc_update_bits(codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x04, 0x04); - snd_soc_update_bits(codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x01, 0x01); - usleep_range(1000, 1100); - snd_soc_update_bits(codec, WCD9XXX_A_BIAS_CENTRAL_BG_CTL, 0x80, 0x00); -} - -static void wcd9xxx_enable_bg_audio(struct wcd9xxx_resmgr *resmgr) -{ - /* Notify bandgap mode change */ - wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_BG_AUDIO_ON); - wcd9xxx_enable_bg(resmgr); - /* Notify bandgap mode change */ - wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_BG_AUDIO_ON); -} - -static void wcd9xxx_enable_bg_mbhc(struct wcd9xxx_resmgr *resmgr) -{ - struct snd_soc_codec *codec = resmgr->codec; - - /* Notify bandgap mode change */ - wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_BG_MBHC_ON); - - /* - * mclk should be off or clk buff source souldn't be VBG - * Let's turn off mclk always - */ - WARN_ON(snd_soc_read(codec, WCD9XXX_A_CLK_BUFF_EN2) & (1 << 2)); - - wcd9xxx_enable_bg(resmgr); - /* Notify bandgap mode change */ - wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_POST_BG_MBHC_ON); -} - -static void wcd9xxx_disable_clock_block(struct wcd9xxx_resmgr *resmgr) -{ - struct snd_soc_codec *codec = resmgr->codec; - - pr_debug("%s: enter\n", __func__); - WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr); - - /* Notify */ - if (resmgr->clk_type == WCD9XXX_CLK_RCO) - wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_RCO_OFF); - else - wcd9xxx_resmgr_notifier_call(resmgr, - WCD9XXX_EVENT_PRE_MCLK_OFF); - - switch (resmgr->codec_type) { - case WCD9XXX_CDC_TYPE_TOMTOM: - snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x04, 0x00); - usleep_range(50, 55); - snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02, 0x02); - snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x40, 0x40); - snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x40, 0x00); - snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x01, 0x00); - break; - default: - snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x04, 0x00); - usleep_range(50, 55); - snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02, 0x02); - snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, 0x05, 0x00); - break; - } - usleep_range(50, 55); - /* Notify */ - if (resmgr->clk_type == WCD9XXX_CLK_RCO) { - wcd9xxx_resmgr_notifier_call(resmgr, - WCD9XXX_EVENT_POST_RCO_OFF); - } else { - wcd9xxx_resmgr_notifier_call(resmgr, - WCD9XXX_EVENT_POST_MCLK_OFF); - } - pr_debug("%s: leave\n", __func__); -} - -static void wcd9xxx_resmgr_cdc_specific_get_clk(struct wcd9xxx_resmgr *resmgr, - int clk_users) -{ - /* Caller of this function should have acquired - * BG_CLK lock - */ - WCD9XXX_BG_CLK_UNLOCK(resmgr); - if (clk_users) { - if (resmgr->resmgr_cb && - resmgr->resmgr_cb->cdc_rco_ctrl) { - while (clk_users--) - resmgr->resmgr_cb->cdc_rco_ctrl(resmgr->codec, - true); - } - } - /* Acquire BG_CLK lock before return */ - WCD9XXX_BG_CLK_LOCK(resmgr); -} - -void wcd9xxx_resmgr_post_ssr(struct wcd9xxx_resmgr *resmgr) -{ - int old_bg_audio_users, old_bg_mbhc_users; - int old_clk_rco_users, old_clk_mclk_users; - - pr_debug("%s: enter\n", __func__); - - WCD9XXX_BG_CLK_LOCK(resmgr); - old_bg_audio_users = resmgr->bg_audio_users; - old_bg_mbhc_users = resmgr->bg_mbhc_users; - old_clk_rco_users = resmgr->clk_rco_users; - old_clk_mclk_users = resmgr->clk_mclk_users; - resmgr->bg_audio_users = 0; - resmgr->bg_mbhc_users = 0; - resmgr->bandgap_type = WCD9XXX_BANDGAP_OFF; - resmgr->clk_rco_users = 0; - resmgr->clk_mclk_users = 0; - resmgr->clk_type = WCD9XXX_CLK_OFF; - - if (old_bg_audio_users) { - while (old_bg_audio_users--) - wcd9xxx_resmgr_get_bandgap(resmgr, - WCD9XXX_BANDGAP_AUDIO_MODE); - } - - if (old_bg_mbhc_users) { - while (old_bg_mbhc_users--) - wcd9xxx_resmgr_get_bandgap(resmgr, - WCD9XXX_BANDGAP_MBHC_MODE); - } - - if (old_clk_mclk_users) { - while (old_clk_mclk_users--) - wcd9xxx_resmgr_get_clk_block(resmgr, WCD9XXX_CLK_MCLK); - } - - if (resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) { - wcd9xxx_resmgr_cdc_specific_get_clk(resmgr, old_clk_rco_users); - } else if (old_clk_rco_users) { - while (old_clk_rco_users--) - wcd9xxx_resmgr_get_clk_block(resmgr, - WCD9XXX_CLK_RCO); - } - WCD9XXX_BG_CLK_UNLOCK(resmgr); - pr_debug("%s: leave\n", __func__); -} - -/* - * wcd9xxx_resmgr_get_bandgap : Vote for bandgap ref - * choice : WCD9XXX_BANDGAP_AUDIO_MODE, WCD9XXX_BANDGAP_MBHC_MODE - */ -void wcd9xxx_resmgr_get_bandgap(struct wcd9xxx_resmgr *resmgr, - const enum wcd9xxx_bandgap_type choice) -{ - enum wcd9xxx_clock_type clock_save = WCD9XXX_CLK_OFF; - - pr_debug("%s: enter, wants %d\n", __func__, choice); - - WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr); - switch (choice) { - case WCD9XXX_BANDGAP_AUDIO_MODE: - resmgr->bg_audio_users++; - if (resmgr->bg_audio_users == 1 && resmgr->bg_mbhc_users) { - /* - * Current bg is MBHC mode, about to switch to - * audio mode. - */ - WARN_ON(resmgr->bandgap_type != - WCD9XXX_BANDGAP_MBHC_MODE); - - /* BG mode can be changed only with clock off */ - if (resmgr->codec_type != WCD9XXX_CDC_TYPE_TOMTOM) - clock_save = wcd9xxx_save_clock(resmgr); - /* Swtich BG mode */ - wcd9xxx_disable_bg(resmgr); - wcd9xxx_enable_bg_audio(resmgr); - /* restore clock */ - if (resmgr->codec_type != WCD9XXX_CDC_TYPE_TOMTOM) - wcd9xxx_restore_clock(resmgr, clock_save); - } else if (resmgr->bg_audio_users == 1) { - /* currently off, just enable it */ - WARN_ON(resmgr->bandgap_type != WCD9XXX_BANDGAP_OFF); - wcd9xxx_enable_bg_audio(resmgr); - } - resmgr->bandgap_type = WCD9XXX_BANDGAP_AUDIO_MODE; - break; - case WCD9XXX_BANDGAP_MBHC_MODE: - resmgr->bg_mbhc_users++; - if (resmgr->bandgap_type == WCD9XXX_BANDGAP_MBHC_MODE || - resmgr->bandgap_type == WCD9XXX_BANDGAP_AUDIO_MODE) - /* do nothing */ - break; - - /* bg mode can be changed only with clock off */ - clock_save = wcd9xxx_save_clock(resmgr); - /* enable bg with MBHC mode */ - wcd9xxx_enable_bg_mbhc(resmgr); - /* restore clock */ - wcd9xxx_restore_clock(resmgr, clock_save); - /* save current mode */ - resmgr->bandgap_type = WCD9XXX_BANDGAP_MBHC_MODE; - break; - default: - pr_err("%s: Error, Invalid bandgap settings\n", __func__); - break; - } - - pr_debug("%s: bg users audio %d, mbhc %d\n", __func__, - resmgr->bg_audio_users, resmgr->bg_mbhc_users); -} - -/* - * wcd9xxx_resmgr_put_bandgap : Unvote bandgap ref that has been voted - * choice : WCD9XXX_BANDGAP_AUDIO_MODE, WCD9XXX_BANDGAP_MBHC_MODE - */ -void wcd9xxx_resmgr_put_bandgap(struct wcd9xxx_resmgr *resmgr, - enum wcd9xxx_bandgap_type choice) -{ - enum wcd9xxx_clock_type clock_save; - - pr_debug("%s: enter choice %d\n", __func__, choice); - - WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr); - switch (choice) { - case WCD9XXX_BANDGAP_AUDIO_MODE: - if (--resmgr->bg_audio_users == 0) { - if (resmgr->bg_mbhc_users) { - /* bg mode can be changed only with clock off */ - clock_save = wcd9xxx_save_clock(resmgr); - /* switch to MBHC mode */ - wcd9xxx_enable_bg_mbhc(resmgr); - /* restore clock */ - wcd9xxx_restore_clock(resmgr, clock_save); - resmgr->bandgap_type = - WCD9XXX_BANDGAP_MBHC_MODE; - } else { - /* turn off */ - wcd9xxx_disable_bg(resmgr); - resmgr->bandgap_type = WCD9XXX_BANDGAP_OFF; - } - } - break; - case WCD9XXX_BANDGAP_MBHC_MODE: - WARN(resmgr->bandgap_type == WCD9XXX_BANDGAP_OFF, - "Unexpected bandgap type %d\n", resmgr->bandgap_type); - if (--resmgr->bg_mbhc_users == 0 && - resmgr->bandgap_type == WCD9XXX_BANDGAP_MBHC_MODE) { - wcd9xxx_disable_bg(resmgr); - resmgr->bandgap_type = WCD9XXX_BANDGAP_OFF; - } - break; - default: - pr_err("%s: Error, Invalid bandgap settings\n", __func__); - break; - } - - pr_debug("%s: bg users audio %d, mbhc %d\n", __func__, - resmgr->bg_audio_users, resmgr->bg_mbhc_users); -} - -void wcd9xxx_resmgr_enable_rx_bias(struct wcd9xxx_resmgr *resmgr, u32 enable) -{ - struct snd_soc_codec *codec = resmgr->codec; - - if (enable) { - resmgr->rx_bias_count++; - if (resmgr->rx_bias_count == 1) - snd_soc_update_bits(codec, WCD9XXX_A_RX_COM_BIAS, - 0x80, 0x80); - } else { - resmgr->rx_bias_count--; - if (!resmgr->rx_bias_count) - snd_soc_update_bits(codec, WCD9XXX_A_RX_COM_BIAS, - 0x80, 0x00); - } -} - -int wcd9xxx_resmgr_enable_config_mode(struct wcd9xxx_resmgr *resmgr, int enable) -{ - struct snd_soc_codec *codec = resmgr->codec; - - pr_debug("%s: enable = %d\n", __func__, enable); - if (enable) { - snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_FREQ, 0x10, 0); - /* bandgap mode to fast */ - if (resmgr->pdata->mclk_rate == WCD9XXX_MCLK_CLK_12P288MHZ) - /* Set current value to 200nA for 12.288MHz clock */ - snd_soc_write(codec, WCD9XXX_A_BIAS_OSC_BG_CTL, 0x37); - else - snd_soc_write(codec, WCD9XXX_A_BIAS_OSC_BG_CTL, 0x17); - - usleep_range(5, 10); - snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_FREQ, 0x80, 0x80); - snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_TEST, 0x80, 0x80); - usleep_range(10, 20); - snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_TEST, 0x80, 0); - usleep_range(10000, 10100); - - if (resmgr->pdata->mclk_rate != WCD9XXX_MCLK_CLK_12P288MHZ) - snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, - 0x08, 0x08); - } else { - snd_soc_update_bits(codec, WCD9XXX_A_BIAS_OSC_BG_CTL, 0x1, 0); - snd_soc_update_bits(codec, WCD9XXX_A_RC_OSC_FREQ, 0x80, 0); - } - - return 0; -} - -static void wcd9xxx_enable_clock_block(struct wcd9xxx_resmgr *resmgr, - enum wcd9xxx_clock_config_mode config_mode) -{ - struct snd_soc_codec *codec = resmgr->codec; - unsigned long delay = WCD9XXX_RCO_CALIBRATION_DELAY_US; - int num_retry = 0; - unsigned int valr; - unsigned int valr1; - unsigned int valw[] = {0x01, 0x01, 0x10, 0x00}; - - pr_debug("%s: config_mode = %d\n", __func__, config_mode); - - /* transit to RCO requires mclk off */ - if (resmgr->codec_type != WCD9XXX_CDC_TYPE_TOMTOM) - WARN_ON(snd_soc_read(codec, WCD9XXX_A_CLK_BUFF_EN2) & (1 << 2)); - - if (config_mode == WCD9XXX_CFG_RCO) { - /* Notify */ - wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_RCO_ON); - /* enable RCO and switch to it */ - wcd9xxx_resmgr_enable_config_mode(resmgr, 1); - snd_soc_write(codec, WCD9XXX_A_CLK_BUFF_EN2, 0x02); - usleep_range(1000, 1100); - } else if (config_mode == WCD9XXX_CFG_CAL_RCO) { - snd_soc_update_bits(codec, TOMTOM_A_BIAS_OSC_BG_CTL, - 0x01, 0x01); - /* 1ms sleep required after BG enabled */ - usleep_range(1000, 1100); - - if (resmgr->pdata->mclk_rate == WCD9XXX_MCLK_CLK_12P288MHZ) { - /* - * Set RCO clock rate as 12.288MHz rate explicitly - * as the Qfuse values are incorrect for this rate - */ - snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL, - 0x50, 0x50); - } else { - snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL, - 0x18, 0x10); - valr = snd_soc_read(codec, - TOMTOM_A_QFUSE_DATA_OUT0) & (0x04); - valr1 = snd_soc_read(codec, - TOMTOM_A_QFUSE_DATA_OUT1) & (0x08); - valr = (valr >> 1) | (valr1 >> 3); - snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL, 0x60, - valw[valr] << 5); - } - snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL, 0x80, 0x80); - - do { - snd_soc_update_bits(codec, - TOMTOM_A_RCO_CALIBRATION_CTRL1, - 0x80, 0x80); - snd_soc_update_bits(codec, - TOMTOM_A_RCO_CALIBRATION_CTRL1, - 0x80, 0x00); - /* RCO calibration takes approx. 5ms */ - usleep_range(delay, delay + - WCD9XXX_USLEEP_RANGE_MARGIN_US); - if (!(snd_soc_read(codec, - TOMTOM_A_RCO_CALIBRATION_RESULT1) & 0x10)) - break; - if (num_retry >= 3) { - delay = delay + - WCD9XXX_RCO_CALIBRATION_DELAY_INC_US; - } - } while (num_retry++ < WCD9XXX_RCO_CALIBRATION_RETRY_COUNT); - } else { - /* Notify */ - wcd9xxx_resmgr_notifier_call(resmgr, WCD9XXX_EVENT_PRE_MCLK_ON); - /* switch to MCLK */ - - switch (resmgr->codec_type) { - case WCD9XXX_CDC_TYPE_TOMTOM: - snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, - 0x08, 0x00); - snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, - 0x40, 0x40); - snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, - 0x40, 0x00); - /* clk source to ext clk and clk buff ref to VBG */ - snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, - 0x0C, 0x04); - break; - default: - snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, - 0x08, 0x00); - /* if RCO is enabled, switch from it */ - if (snd_soc_read(codec, WCD9XXX_A_RC_OSC_FREQ) & 0x80) { - snd_soc_write(codec, WCD9XXX_A_CLK_BUFF_EN2, - 0x02); - wcd9xxx_resmgr_enable_config_mode(resmgr, 0); - } - /* clk source to ext clk and clk buff ref to VBG */ - snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, - 0x0C, 0x04); - break; - } - } - - if (config_mode != WCD9XXX_CFG_CAL_RCO) { - snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN1, - 0x01, 0x01); - /* - * sleep required by codec hardware to - * enable clock buffer - */ - usleep_range(1000, 1200); - snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, - 0x02, 0x00); - /* on MCLK */ - snd_soc_update_bits(codec, WCD9XXX_A_CLK_BUFF_EN2, - 0x04, 0x04); - snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_MCLK_CTL, - 0x01, 0x01); - } - usleep_range(50, 55); - - /* Notify */ - if (config_mode == WCD9XXX_CFG_RCO) - wcd9xxx_resmgr_notifier_call(resmgr, - WCD9XXX_EVENT_POST_RCO_ON); - else if (config_mode == WCD9XXX_CFG_MCLK) - wcd9xxx_resmgr_notifier_call(resmgr, - WCD9XXX_EVENT_POST_MCLK_ON); -} - -/* - * disable clock and return previous clock state - */ -static enum wcd9xxx_clock_type wcd9xxx_save_clock(struct wcd9xxx_resmgr *resmgr) -{ - WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr); - if (resmgr->clk_type != WCD9XXX_CLK_OFF) - wcd9xxx_disable_clock_block(resmgr); - return resmgr->clk_type != WCD9XXX_CLK_OFF; -} - -static void wcd9xxx_restore_clock(struct wcd9xxx_resmgr *resmgr, - enum wcd9xxx_clock_type type) -{ - if (type != WCD9XXX_CLK_OFF) - wcd9xxx_enable_clock_block(resmgr, type == WCD9XXX_CLK_RCO); -} - -void wcd9xxx_resmgr_get_clk_block(struct wcd9xxx_resmgr *resmgr, - enum wcd9xxx_clock_type type) -{ - struct snd_soc_codec *codec = resmgr->codec; - - pr_debug("%s: current %d, requested %d, rco_users %d, mclk_users %d\n", - __func__, resmgr->clk_type, type, - resmgr->clk_rco_users, resmgr->clk_mclk_users); - WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr); - switch (type) { - case WCD9XXX_CLK_RCO: - if (++resmgr->clk_rco_users == 1 && - resmgr->clk_type == WCD9XXX_CLK_OFF) { - /* enable RCO and switch to it */ - wcd9xxx_enable_clock_block(resmgr, WCD9XXX_CFG_RCO); - resmgr->clk_type = WCD9XXX_CLK_RCO; - } else if (resmgr->clk_rco_users == 1 && - resmgr->clk_type == WCD9XXX_CLK_MCLK && - resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) { - /* - * Enable RCO but do not switch CLK MUX to RCO - * unless ext_clk_users is 1, which indicates - * EXT CLK is enabled for RCO calibration - */ - wcd9xxx_enable_clock_block(resmgr, WCD9XXX_CFG_CAL_RCO); - if (resmgr->ext_clk_users == 1) { - /* Notify */ - wcd9xxx_resmgr_notifier_call(resmgr, - WCD9XXX_EVENT_PRE_RCO_ON); - /* CLK MUX to RCO */ - if (resmgr->pdata->mclk_rate != - WCD9XXX_MCLK_CLK_12P288MHZ) - snd_soc_update_bits(codec, - WCD9XXX_A_CLK_BUFF_EN1, - 0x08, 0x08); - resmgr->clk_type = WCD9XXX_CLK_RCO; - wcd9xxx_resmgr_notifier_call(resmgr, - WCD9XXX_EVENT_POST_RCO_ON); - } - } - break; - case WCD9XXX_CLK_MCLK: - if (++resmgr->clk_mclk_users == 1 && - resmgr->clk_type == WCD9XXX_CLK_OFF) { - /* switch to MCLK */ - wcd9xxx_enable_clock_block(resmgr, WCD9XXX_CFG_MCLK); - resmgr->clk_type = WCD9XXX_CLK_MCLK; - } else if (resmgr->clk_mclk_users == 1 && - resmgr->clk_type == WCD9XXX_CLK_RCO) { - /* RCO to MCLK switch, with RCO still powered on */ - if (resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) { - wcd9xxx_resmgr_notifier_call(resmgr, - WCD9XXX_EVENT_PRE_MCLK_ON); - snd_soc_update_bits(codec, - WCD9XXX_A_BIAS_CENTRAL_BG_CTL, - 0x40, 0x00); - /* Enable clock buffer */ - snd_soc_update_bits(codec, - WCD9XXX_A_CLK_BUFF_EN1, - 0x01, 0x01); - snd_soc_update_bits(codec, - WCD9XXX_A_CLK_BUFF_EN1, - 0x08, 0x00); - wcd9xxx_resmgr_notifier_call(resmgr, - WCD9XXX_EVENT_POST_MCLK_ON); - } else { - /* if RCO is enabled, switch from it */ - WARN_ON(!(snd_soc_read(resmgr->codec, - WCD9XXX_A_RC_OSC_FREQ) & 0x80)); - /* disable clock block */ - wcd9xxx_disable_clock_block(resmgr); - /* switch to MCLK */ - wcd9xxx_enable_clock_block(resmgr, - WCD9XXX_CFG_MCLK); - } - resmgr->clk_type = WCD9XXX_CLK_MCLK; - } - break; - default: - pr_err("%s: Error, Invalid clock get request %d\n", __func__, - type); - break; - } - pr_debug("%s: leave\n", __func__); -} - -void wcd9xxx_resmgr_put_clk_block(struct wcd9xxx_resmgr *resmgr, - enum wcd9xxx_clock_type type) -{ - struct snd_soc_codec *codec = resmgr->codec; - - pr_debug("%s: current %d, put %d\n", __func__, resmgr->clk_type, type); - - WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr); - switch (type) { - case WCD9XXX_CLK_RCO: - if (--resmgr->clk_rco_users == 0 && - resmgr->clk_type == WCD9XXX_CLK_RCO) { - wcd9xxx_disable_clock_block(resmgr); - if (resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) { - /* Powerdown RCO */ - snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL, - 0x80, 0x00); - snd_soc_update_bits(codec, - TOMTOM_A_BIAS_OSC_BG_CTL, - 0x01, 0x00); - } else { - /* if RCO is enabled, switch from it */ - if (snd_soc_read(resmgr->codec, - WCD9XXX_A_RC_OSC_FREQ) - & 0x80) { - snd_soc_write(resmgr->codec, - WCD9XXX_A_CLK_BUFF_EN2, - 0x02); - wcd9xxx_resmgr_enable_config_mode( - resmgr, 0); - } - } - resmgr->clk_type = WCD9XXX_CLK_OFF; - } - break; - case WCD9XXX_CLK_MCLK: - if (--resmgr->clk_mclk_users == 0 && - resmgr->clk_rco_users == 0) { - wcd9xxx_disable_clock_block(resmgr); - - if ((resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) && - (snd_soc_read(codec, TOMTOM_A_RCO_CTRL) & 0x80)) { - /* powerdown RCO*/ - snd_soc_update_bits(codec, TOMTOM_A_RCO_CTRL, - 0x80, 0x00); - snd_soc_update_bits(codec, - TOMTOM_A_BIAS_OSC_BG_CTL, - 0x01, 0x00); - } - resmgr->clk_type = WCD9XXX_CLK_OFF; - } else if (resmgr->clk_mclk_users == 0 && - resmgr->clk_rco_users) { - if (resmgr->codec_type == WCD9XXX_CDC_TYPE_TOMTOM) { - if (!(snd_soc_read(codec, TOMTOM_A_RCO_CTRL) & - 0x80)) { - dev_dbg(codec->dev, "%s: Enabling RCO\n", - __func__); - wcd9xxx_enable_clock_block(resmgr, - WCD9XXX_CFG_CAL_RCO); - snd_soc_update_bits(codec, - WCD9XXX_A_CLK_BUFF_EN1, - 0x01, 0x00); - } else { - wcd9xxx_resmgr_notifier_call(resmgr, - WCD9XXX_EVENT_PRE_MCLK_OFF); - snd_soc_update_bits(codec, - WCD9XXX_A_CLK_BUFF_EN1, - 0x08, 0x08); - snd_soc_update_bits(codec, - WCD9XXX_A_CLK_BUFF_EN1, - 0x01, 0x00); - wcd9xxx_resmgr_notifier_call(resmgr, - WCD9XXX_EVENT_POST_MCLK_OFF); - /* CLK Mux changed to RCO, notify that - * RCO is ON - */ - wcd9xxx_resmgr_notifier_call(resmgr, - WCD9XXX_EVENT_POST_RCO_ON); - } - } else { - /* disable clock */ - wcd9xxx_disable_clock_block(resmgr); - /* switch to RCO */ - wcd9xxx_enable_clock_block(resmgr, - WCD9XXX_CFG_RCO); - } - resmgr->clk_type = WCD9XXX_CLK_RCO; - } - break; - default: - pr_err("%s: Error, Invalid clock get request %d\n", __func__, - type); - break; - } - WARN_ON(resmgr->clk_rco_users < 0); - WARN_ON(resmgr->clk_mclk_users < 0); - - pr_debug("%s: new rco_users %d, mclk_users %d\n", __func__, - resmgr->clk_rco_users, resmgr->clk_mclk_users); -} - -/* - * wcd9xxx_resmgr_get_clk_type() - * Returns clk type that is currently enabled - */ -int wcd9xxx_resmgr_get_clk_type(struct wcd9xxx_resmgr *resmgr) -{ - return resmgr->clk_type; -} - -static void wcd9xxx_resmgr_update_cfilt_usage(struct wcd9xxx_resmgr *resmgr, - enum wcd9xxx_cfilt_sel cfilt_sel, - bool inc) -{ - u16 micb_cfilt_reg; - enum wcd9xxx_notify_event e_pre_on, e_post_off; - struct snd_soc_codec *codec = resmgr->codec; - - switch (cfilt_sel) { - case WCD9XXX_CFILT1_SEL: - micb_cfilt_reg = WCD9XXX_A_MICB_CFILT_1_CTL; - e_pre_on = WCD9XXX_EVENT_PRE_CFILT_1_ON; - e_post_off = WCD9XXX_EVENT_POST_CFILT_1_OFF; - break; - case WCD9XXX_CFILT2_SEL: - micb_cfilt_reg = WCD9XXX_A_MICB_CFILT_2_CTL; - e_pre_on = WCD9XXX_EVENT_PRE_CFILT_2_ON; - e_post_off = WCD9XXX_EVENT_POST_CFILT_2_OFF; - break; - case WCD9XXX_CFILT3_SEL: - micb_cfilt_reg = WCD9XXX_A_MICB_CFILT_3_CTL; - e_pre_on = WCD9XXX_EVENT_PRE_CFILT_3_ON; - e_post_off = WCD9XXX_EVENT_POST_CFILT_3_OFF; - break; - default: - WARN(1, "Invalid CFILT selection %d\n", cfilt_sel); - return; /* should not happen */ - } - - if (inc) { - if ((resmgr->cfilt_users[cfilt_sel]++) == 0) { - /* Notify */ - wcd9xxx_resmgr_notifier_call(resmgr, e_pre_on); - /* Enable CFILT */ - snd_soc_update_bits(codec, micb_cfilt_reg, 0x80, 0x80); - } - } else { - /* - * Check if count not zero, decrease - * then check if zero, go ahead disable cfilter - */ - WARN(resmgr->cfilt_users[cfilt_sel] == 0, - "Invalid CFILT use count 0\n"); - if ((--resmgr->cfilt_users[cfilt_sel]) == 0) { - /* Disable CFILT */ - snd_soc_update_bits(codec, micb_cfilt_reg, 0x80, 0); - /* Notify MBHC so MBHC can switch CFILT to fast mode */ - wcd9xxx_resmgr_notifier_call(resmgr, e_post_off); - } - } -} - -void wcd9xxx_resmgr_cfilt_get(struct wcd9xxx_resmgr *resmgr, - enum wcd9xxx_cfilt_sel cfilt_sel) -{ - return wcd9xxx_resmgr_update_cfilt_usage(resmgr, cfilt_sel, true); -} - -void wcd9xxx_resmgr_cfilt_put(struct wcd9xxx_resmgr *resmgr, - enum wcd9xxx_cfilt_sel cfilt_sel) -{ - return wcd9xxx_resmgr_update_cfilt_usage(resmgr, cfilt_sel, false); -} - -int wcd9xxx_resmgr_get_k_val(struct wcd9xxx_resmgr *resmgr, - unsigned int cfilt_mv) -{ - int rc = -EINVAL; - unsigned int ldoh_v = resmgr->micbias_pdata->ldoh_v; - unsigned int min_mv, max_mv; - - switch (ldoh_v) { - case WCD9XXX_LDOH_1P95_V: - min_mv = 160; - max_mv = 1800; - break; - case WCD9XXX_LDOH_2P35_V: - min_mv = 200; - max_mv = 2200; - break; - case WCD9XXX_LDOH_2P75_V: - min_mv = 240; - max_mv = 2600; - break; - case WCD9XXX_LDOH_3P0_V: - min_mv = 260; - max_mv = 2875; - break; - default: - goto done; - } - - if (cfilt_mv < min_mv || cfilt_mv > max_mv) - goto done; - - for (rc = 4; rc <= 44; rc++) { - min_mv = max_mv * (rc) / 44; - if (min_mv >= cfilt_mv) { - rc -= 4; - break; - } - } -done: - return rc; -} - -static void wcd9xxx_resmgr_cond_trigger_cond(struct wcd9xxx_resmgr *resmgr, - enum wcd9xxx_resmgr_cond cond) -{ - struct list_head *l; - struct wcd9xxx_resmgr_cond_entry *e; - bool set; - - pr_debug("%s: enter\n", __func__); - /* update bit if cond isn't available or cond is set */ - set = !test_bit(cond, &resmgr->cond_avail_flags) || - !!test_bit(cond, &resmgr->cond_flags); - list_for_each(l, &resmgr->update_bit_cond_h) { - e = list_entry(l, struct wcd9xxx_resmgr_cond_entry, list); - if (e->cond == cond) - snd_soc_update_bits(resmgr->codec, e->reg, - 1 << e->shift, - (set ? !e->invert : e->invert) - << e->shift); - } - pr_debug("%s: leave\n", __func__); -} - -/* - * wcd9xxx_regmgr_cond_register : notify resmgr conditions in the condbits are - * available and notified. - * condbits : contains bitmask of enum wcd9xxx_resmgr_cond - */ -void wcd9xxx_regmgr_cond_register(struct wcd9xxx_resmgr *resmgr, - unsigned long condbits) -{ - unsigned int cond; - - for_each_set_bit(cond, &condbits, BITS_PER_BYTE * sizeof(condbits)) { - mutex_lock(&resmgr->update_bit_cond_lock); - WARN(test_bit(cond, &resmgr->cond_avail_flags), - "Condition 0x%0x is already registered\n", cond); - set_bit(cond, &resmgr->cond_avail_flags); - wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond); - mutex_unlock(&resmgr->update_bit_cond_lock); - pr_debug("%s: Condition 0x%x is registered\n", __func__, cond); - } -} - -void wcd9xxx_regmgr_cond_deregister(struct wcd9xxx_resmgr *resmgr, - unsigned long condbits) -{ - unsigned int cond; - - for_each_set_bit(cond, &condbits, BITS_PER_BYTE * sizeof(condbits)) { - mutex_lock(&resmgr->update_bit_cond_lock); - WARN(!test_bit(cond, &resmgr->cond_avail_flags), - "Condition 0x%0x isn't registered\n", cond); - clear_bit(cond, &resmgr->cond_avail_flags); - wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond); - mutex_unlock(&resmgr->update_bit_cond_lock); - pr_debug("%s: Condition 0x%x is deregistered\n", __func__, - cond); - } -} - -void wcd9xxx_resmgr_cond_update_cond(struct wcd9xxx_resmgr *resmgr, - enum wcd9xxx_resmgr_cond cond, bool set) -{ - mutex_lock(&resmgr->update_bit_cond_lock); - if ((set && !test_and_set_bit(cond, &resmgr->cond_flags)) || - (!set && test_and_clear_bit(cond, &resmgr->cond_flags))) { - pr_debug("%s: Resource %d condition changed to %s\n", __func__, - cond, set ? "set" : "clear"); - wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond); - } - mutex_unlock(&resmgr->update_bit_cond_lock); -} - -int wcd9xxx_resmgr_add_cond_update_bits(struct wcd9xxx_resmgr *resmgr, - enum wcd9xxx_resmgr_cond cond, - unsigned short reg, int shift, - bool invert) -{ - struct wcd9xxx_resmgr_cond_entry *entry; - - entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return -ENOMEM; - - entry->cond = cond; - entry->reg = reg; - entry->shift = shift; - entry->invert = invert; - - mutex_lock(&resmgr->update_bit_cond_lock); - list_add_tail(&entry->list, &resmgr->update_bit_cond_h); - - wcd9xxx_resmgr_cond_trigger_cond(resmgr, cond); - mutex_unlock(&resmgr->update_bit_cond_lock); - - return 0; -} - -/* - * wcd9xxx_resmgr_rm_cond_update_bits : - * Clear bit and remove from the conditional bit update list - */ -int wcd9xxx_resmgr_rm_cond_update_bits(struct wcd9xxx_resmgr *resmgr, - enum wcd9xxx_resmgr_cond cond, - unsigned short reg, int shift, - bool invert) -{ - struct list_head *l, *next; - struct wcd9xxx_resmgr_cond_entry *e = NULL; - - pr_debug("%s: enter\n", __func__); - mutex_lock(&resmgr->update_bit_cond_lock); - list_for_each_safe(l, next, &resmgr->update_bit_cond_h) { - e = list_entry(l, struct wcd9xxx_resmgr_cond_entry, list); - if (e->reg == reg && e->shift == shift && e->invert == invert) { - snd_soc_update_bits(resmgr->codec, e->reg, - 1 << e->shift, - e->invert << e->shift); - list_del(&e->list); - mutex_unlock(&resmgr->update_bit_cond_lock); - kfree(e); - return 0; - } - } - mutex_unlock(&resmgr->update_bit_cond_lock); - pr_err("%s: Cannot find update bit entry reg 0x%x, shift %d\n", - __func__, e ? e->reg : 0, e ? e->shift : 0); - - return -EINVAL; -} - -int wcd9xxx_resmgr_register_notifier(struct wcd9xxx_resmgr *resmgr, - struct notifier_block *nblock) -{ - return blocking_notifier_chain_register(&resmgr->notifier, nblock); -} - -int wcd9xxx_resmgr_unregister_notifier(struct wcd9xxx_resmgr *resmgr, - struct notifier_block *nblock) -{ - return blocking_notifier_chain_unregister(&resmgr->notifier, nblock); -} - -int wcd9xxx_resmgr_init(struct wcd9xxx_resmgr *resmgr, - struct snd_soc_codec *codec, - struct wcd9xxx_core_resource *core_res, - struct wcd9xxx_pdata *pdata, - struct wcd9xxx_micbias_setting *micbias_pdata, - struct wcd9xxx_reg_address *reg_addr, - const struct wcd9xxx_resmgr_cb *resmgr_cb, - enum wcd9xxx_cdc_type cdc_type) -{ - WARN(ARRAY_SIZE(wcd9xxx_event_string) != WCD9XXX_EVENT_LAST + 1, - "Event string table isn't up to date!, %zd != %d\n", - ARRAY_SIZE(wcd9xxx_event_string), WCD9XXX_EVENT_LAST + 1); - - resmgr->bandgap_type = WCD9XXX_BANDGAP_OFF; - resmgr->codec = codec; - resmgr->codec_type = cdc_type; - /* This gives access of core handle to lock/unlock suspend */ - resmgr->core_res = core_res; - resmgr->pdata = pdata; - resmgr->micbias_pdata = micbias_pdata; - resmgr->reg_addr = reg_addr; - resmgr->resmgr_cb = resmgr_cb; - - INIT_LIST_HEAD(&resmgr->update_bit_cond_h); - - BLOCKING_INIT_NOTIFIER_HEAD(&resmgr->notifier); - - mutex_init(&resmgr->codec_resource_lock); - mutex_init(&resmgr->codec_bg_clk_lock); - mutex_init(&resmgr->update_bit_cond_lock); - - return 0; -} - -void wcd9xxx_resmgr_deinit(struct wcd9xxx_resmgr *resmgr) -{ - mutex_destroy(&resmgr->update_bit_cond_lock); - mutex_destroy(&resmgr->codec_bg_clk_lock); - mutex_destroy(&resmgr->codec_resource_lock); -} - -void wcd9xxx_resmgr_bcl_lock(struct wcd9xxx_resmgr *resmgr) -{ - mutex_lock(&resmgr->codec_resource_lock); -} - -void wcd9xxx_resmgr_bcl_unlock(struct wcd9xxx_resmgr *resmgr) -{ - mutex_unlock(&resmgr->codec_resource_lock); -} - -MODULE_DESCRIPTION("wcd9xxx resmgr module"); -MODULE_LICENSE("GPL v2"); diff --git a/sound/soc/codecs/wcd9xxx-resmgr.h b/sound/soc/codecs/wcd9xxx-resmgr.h deleted file mode 100644 index e35d6161d488..000000000000 --- a/sound/soc/codecs/wcd9xxx-resmgr.h +++ /dev/null @@ -1,280 +0,0 @@ -/* Copyright (c) 2012-2014, 2016 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ -#ifndef __WCD9XXX_COMMON_H__ -#define __WCD9XXX_COMMON_H__ - -#include -#include -#include - -enum wcd9xxx_bandgap_type { - WCD9XXX_BANDGAP_OFF, - WCD9XXX_BANDGAP_AUDIO_MODE, - WCD9XXX_BANDGAP_MBHC_MODE, -}; - -enum wcd9xxx_cdc_type { - WCD9XXX_CDC_TYPE_INVALID = 0, - WCD9XXX_CDC_TYPE_TAIKO, - WCD9XXX_CDC_TYPE_TAPAN, - WCD9XXX_CDC_TYPE_HELICON, - WCD9XXX_CDC_TYPE_TOMTOM, -}; - -enum wcd9xxx_clock_type { - WCD9XXX_CLK_OFF, - WCD9XXX_CLK_RCO, - WCD9XXX_CLK_MCLK, -}; - -enum wcd9xxx_clock_config_mode { - WCD9XXX_CFG_MCLK = 0, - WCD9XXX_CFG_RCO, - WCD9XXX_CFG_CAL_RCO, -}; - -enum wcd9xxx_cfilt_sel { - WCD9XXX_CFILT1_SEL, - WCD9XXX_CFILT2_SEL, - WCD9XXX_CFILT3_SEL, - WCD9XXX_NUM_OF_CFILT, -}; - -struct wcd9xxx_reg_address { - u16 micb_4_ctl; - u16 micb_4_int_rbias; - u16 micb_4_mbhc; -}; - -enum wcd9xxx_notify_event { - WCD9XXX_EVENT_INVALID, - - WCD9XXX_EVENT_PRE_RCO_ON, - WCD9XXX_EVENT_POST_RCO_ON, - WCD9XXX_EVENT_PRE_RCO_OFF, - WCD9XXX_EVENT_POST_RCO_OFF, - - WCD9XXX_EVENT_PRE_MCLK_ON, - WCD9XXX_EVENT_POST_MCLK_ON, - WCD9XXX_EVENT_PRE_MCLK_OFF, - WCD9XXX_EVENT_POST_MCLK_OFF, - - WCD9XXX_EVENT_PRE_BG_OFF, - WCD9XXX_EVENT_POST_BG_OFF, - WCD9XXX_EVENT_PRE_BG_AUDIO_ON, - WCD9XXX_EVENT_POST_BG_AUDIO_ON, - WCD9XXX_EVENT_PRE_BG_MBHC_ON, - WCD9XXX_EVENT_POST_BG_MBHC_ON, - - WCD9XXX_EVENT_PRE_MICBIAS_1_OFF, - WCD9XXX_EVENT_POST_MICBIAS_1_OFF, - WCD9XXX_EVENT_PRE_MICBIAS_2_OFF, - WCD9XXX_EVENT_POST_MICBIAS_2_OFF, - WCD9XXX_EVENT_PRE_MICBIAS_3_OFF, - WCD9XXX_EVENT_POST_MICBIAS_3_OFF, - WCD9XXX_EVENT_PRE_MICBIAS_4_OFF, - WCD9XXX_EVENT_POST_MICBIAS_4_OFF, - WCD9XXX_EVENT_PRE_MICBIAS_1_ON, - WCD9XXX_EVENT_POST_MICBIAS_1_ON, - WCD9XXX_EVENT_PRE_MICBIAS_2_ON, - WCD9XXX_EVENT_POST_MICBIAS_2_ON, - WCD9XXX_EVENT_PRE_MICBIAS_3_ON, - WCD9XXX_EVENT_POST_MICBIAS_3_ON, - WCD9XXX_EVENT_PRE_MICBIAS_4_ON, - WCD9XXX_EVENT_POST_MICBIAS_4_ON, - - WCD9XXX_EVENT_PRE_CFILT_1_OFF, - WCD9XXX_EVENT_POST_CFILT_1_OFF, - WCD9XXX_EVENT_PRE_CFILT_2_OFF, - WCD9XXX_EVENT_POST_CFILT_2_OFF, - WCD9XXX_EVENT_PRE_CFILT_3_OFF, - WCD9XXX_EVENT_POST_CFILT_3_OFF, - WCD9XXX_EVENT_PRE_CFILT_1_ON, - WCD9XXX_EVENT_POST_CFILT_1_ON, - WCD9XXX_EVENT_PRE_CFILT_2_ON, - WCD9XXX_EVENT_POST_CFILT_2_ON, - WCD9XXX_EVENT_PRE_CFILT_3_ON, - WCD9XXX_EVENT_POST_CFILT_3_ON, - - WCD9XXX_EVENT_PRE_HPHL_PA_ON, - WCD9XXX_EVENT_POST_HPHL_PA_OFF, - WCD9XXX_EVENT_PRE_HPHR_PA_ON, - WCD9XXX_EVENT_POST_HPHR_PA_OFF, - - WCD9XXX_EVENT_POST_RESUME, - - WCD9XXX_EVENT_PRE_TX_3_ON, - WCD9XXX_EVENT_POST_TX_3_OFF, - - WCD9XXX_EVENT_LAST, -}; - -struct wcd9xxx_resmgr_cb { - int (*cdc_rco_ctrl)(struct snd_soc_codec *, bool); -}; - -struct wcd9xxx_resmgr { - struct snd_soc_codec *codec; - struct wcd9xxx_core_resource *core_res; - - u32 rx_bias_count; - - /* - * bandgap_type, bg_audio_users and bg_mbhc_users have to be - * referred/manipulated after acquiring codec_bg_clk_lock mutex - */ - enum wcd9xxx_bandgap_type bandgap_type; - u16 bg_audio_users; - u16 bg_mbhc_users; - - /* - * clk_type, clk_rco_users and clk_mclk_users have to be - * referred/manipulated after acquiring codec_bg_clk_lock mutex - */ - enum wcd9xxx_clock_type clk_type; - u16 clk_rco_users; - u16 clk_mclk_users; - u16 ext_clk_users; - - /* cfilt users per cfilts */ - u16 cfilt_users[WCD9XXX_NUM_OF_CFILT]; - - struct wcd9xxx_reg_address *reg_addr; - - struct wcd9xxx_pdata *pdata; - - struct wcd9xxx_micbias_setting *micbias_pdata; - - struct blocking_notifier_head notifier; - /* Notifier needs mbhc pointer with resmgr */ - struct wcd9xxx_mbhc *mbhc; - - unsigned long cond_flags; - unsigned long cond_avail_flags; - struct list_head update_bit_cond_h; - struct mutex update_bit_cond_lock; - - /* - * Currently, only used for mbhc purpose, to protect - * concurrent execution of mbhc threaded irq handlers and - * kill race between DAPM and MBHC. But can serve as a - * general lock to protect codec resource - */ - struct mutex codec_resource_lock; - struct mutex codec_bg_clk_lock; - - enum wcd9xxx_cdc_type codec_type; - - const struct wcd9xxx_resmgr_cb *resmgr_cb; -}; - -int wcd9xxx_resmgr_init(struct wcd9xxx_resmgr *resmgr, - struct snd_soc_codec *codec, - struct wcd9xxx_core_resource *core_res, - struct wcd9xxx_pdata *pdata, - struct wcd9xxx_micbias_setting *micbias_pdata, - struct wcd9xxx_reg_address *reg_addr, - const struct wcd9xxx_resmgr_cb *resmgr_cb, - enum wcd9xxx_cdc_type cdc_type); -void wcd9xxx_resmgr_deinit(struct wcd9xxx_resmgr *resmgr); - -int wcd9xxx_resmgr_enable_config_mode(struct wcd9xxx_resmgr *resmgr, - int enable); - -void wcd9xxx_resmgr_enable_rx_bias(struct wcd9xxx_resmgr *resmgr, u32 enable); -void wcd9xxx_resmgr_get_clk_block(struct wcd9xxx_resmgr *resmgr, - enum wcd9xxx_clock_type type); -void wcd9xxx_resmgr_put_clk_block(struct wcd9xxx_resmgr *resmgr, - enum wcd9xxx_clock_type type); -void wcd9xxx_resmgr_get_bandgap(struct wcd9xxx_resmgr *resmgr, - const enum wcd9xxx_bandgap_type choice); -void wcd9xxx_resmgr_put_bandgap(struct wcd9xxx_resmgr *resmgr, - enum wcd9xxx_bandgap_type choice); -void wcd9xxx_resmgr_cfilt_get(struct wcd9xxx_resmgr *resmgr, - enum wcd9xxx_cfilt_sel cfilt_sel); -void wcd9xxx_resmgr_cfilt_put(struct wcd9xxx_resmgr *resmgr, - enum wcd9xxx_cfilt_sel cfilt_sel); -int wcd9xxx_resmgr_get_clk_type(struct wcd9xxx_resmgr *resmgr); - -void wcd9xxx_resmgr_bcl_lock(struct wcd9xxx_resmgr *resmgr); -void wcd9xxx_resmgr_post_ssr(struct wcd9xxx_resmgr *resmgr); -#define WCD9XXX_BCL_LOCK(resmgr) \ -{ \ - pr_debug("%s: Acquiring BCL\n", __func__); \ - wcd9xxx_resmgr_bcl_lock(resmgr); \ - pr_debug("%s: Acquiring BCL done\n", __func__); \ -} - -void wcd9xxx_resmgr_bcl_unlock(struct wcd9xxx_resmgr *resmgr); -#define WCD9XXX_BCL_UNLOCK(resmgr) \ -{ \ - pr_debug("%s: Release BCL\n", __func__); \ - wcd9xxx_resmgr_bcl_unlock(resmgr); \ -} - -#define WCD9XXX_BCL_ASSERT_LOCKED(resmgr) \ -{ \ - WARN_ONCE(!mutex_is_locked(&resmgr->codec_resource_lock), \ - "%s: BCL should have acquired\n", __func__); \ -} - -#define WCD9XXX_BG_CLK_LOCK(resmgr) \ -{ \ - struct wcd9xxx_resmgr *__resmgr = resmgr; \ - pr_debug("%s: Acquiring BG_CLK\n", __func__); \ - mutex_lock(&__resmgr->codec_bg_clk_lock); \ - pr_debug("%s: Acquiring BG_CLK done\n", __func__); \ -} - -#define WCD9XXX_BG_CLK_UNLOCK(resmgr) \ -{ \ - struct wcd9xxx_resmgr *__resmgr = resmgr; \ - pr_debug("%s: Releasing BG_CLK\n", __func__); \ - mutex_unlock(&__resmgr->codec_bg_clk_lock); \ -} - -#define WCD9XXX_BG_CLK_ASSERT_LOCKED(resmgr) \ -{ \ - WARN_ONCE(!mutex_is_locked(&resmgr->codec_bg_clk_lock), \ - "%s: BG_CLK lock should have acquired\n", __func__); \ -} - -const char *wcd9xxx_get_event_string(enum wcd9xxx_notify_event type); -int wcd9xxx_resmgr_get_k_val(struct wcd9xxx_resmgr *resmgr, - unsigned int cfilt_mv); -int wcd9xxx_resmgr_register_notifier(struct wcd9xxx_resmgr *resmgr, - struct notifier_block *nblock); -int wcd9xxx_resmgr_unregister_notifier(struct wcd9xxx_resmgr *resmgr, - struct notifier_block *nblock); -void wcd9xxx_resmgr_notifier_call(struct wcd9xxx_resmgr *resmgr, - const enum wcd9xxx_notify_event e); - -enum wcd9xxx_resmgr_cond { - WCD9XXX_COND_HPH = 0x01, /* Headphone */ - WCD9XXX_COND_HPH_MIC = 0x02, /* Microphone on the headset */ -}; -void wcd9xxx_regmgr_cond_register(struct wcd9xxx_resmgr *resmgr, - unsigned long condbits); -void wcd9xxx_regmgr_cond_deregister(struct wcd9xxx_resmgr *resmgr, - unsigned long condbits); -int wcd9xxx_resmgr_rm_cond_update_bits(struct wcd9xxx_resmgr *resmgr, - enum wcd9xxx_resmgr_cond cond, - unsigned short reg, int shift, - bool invert); -int wcd9xxx_resmgr_add_cond_update_bits(struct wcd9xxx_resmgr *resmgr, - enum wcd9xxx_resmgr_cond cond, - unsigned short reg, int shift, - bool invert); -void wcd9xxx_resmgr_cond_update_cond(struct wcd9xxx_resmgr *resmgr, - enum wcd9xxx_resmgr_cond cond, bool set); - -#endif /* __WCD9XXX_COMMON_H__ */ -- GitLab From f6e545c42dc4d37134cc85455dab489458cebc39 Mon Sep 17 00:00:00 2001 From: Banajit Goswami Date: Sat, 13 May 2017 01:36:01 -0700 Subject: [PATCH 352/786] mfd: remove unused wcd9330 codec related files wcd9330 codec is unused and removed from the tree. Remove the regmap and register files for wcd9330. Change-Id: I1cfec17700e034a280fc0a7bf88fcac6945a7c00 Signed-off-by: Banajit Goswami --- drivers/mfd/Kconfig | 14 - drivers/mfd/Makefile | 2 - drivers/mfd/wcd9330-regmap.c | 990 ---------- include/linux/mfd/wcd9xxx/wcd9330_registers.h | 1626 ----------------- 4 files changed, 2632 deletions(-) delete mode 100644 drivers/mfd/wcd9330-regmap.c delete mode 100644 include/linux/mfd/wcd9xxx/wcd9330_registers.h diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 1239e68e5808..505f99d962aa 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -1645,20 +1645,6 @@ config WCD9XXX_CODEC_UTIL functions. This driver also hides the underlying bus related functionalities. -config WCD9330_CODEC - tristate "WCD9330 Codec" - select SLIMBUS - select MFD_CORE - select WCD9XXX_CODEC_UTIL - select MSM_CDC_SUPPLY - select REGMAP_ALLOW_WRITE_DEBUGFS - help - Enables the WCD9xxx codec core driver. The core driver provides - read/write capability to registers which are part of the - WCD9330 core and gives the ability to use the WCD9330 codec. - The WCD9330 codec support either I2C/I2S or Slimbus for - control and data exchnage with master processor. - config WCD9335_CODEC tristate "WCD9335 Codec" select SLIMBUS diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index b2fe74b4ad5d..8e54d3253650 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -208,8 +208,6 @@ obj-$(CONFIG_MFD_SKY81452) += sky81452.o obj-$(CONFIG_MSM_CDC_PINCTRL) += msm-cdc-pinctrl.o obj-$(CONFIG_MSM_CDC_SUPPLY) += msm-cdc-supply.o obj-$(CONFIG_WCD9XXX_CODEC_UTIL) += wcd9xxx-utils.o -obj-$(CONFIG_WCD9330_CODEC) += wcd9xxx-core.o wcd9xxx-irq.o wcd9xxx-slimslave.o\ - wcd9330-regmap.o obj-$(CONFIG_WCD9335_CODEC) += wcd9xxx-core.o wcd9xxx-irq.o wcd9xxx-slimslave.o\ wcd9335-regmap.o wcd9335-tables.o obj-$(CONFIG_WCD934X_CODEC) += wcd9xxx-core.o wcd9xxx-irq.o wcd9xxx-slimslave.o\ diff --git a/drivers/mfd/wcd9330-regmap.c b/drivers/mfd/wcd9330-regmap.c deleted file mode 100644 index 878ea72f1b1d..000000000000 --- a/drivers/mfd/wcd9330-regmap.c +++ /dev/null @@ -1,990 +0,0 @@ -/* - * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include -#include -#include -#include -#include "wcd9xxx-regmap.h" - -static struct reg_default wcd9330_defaults[] = { - { TOMTOM_A_CHIP_CTL, TOMTOM_A_CHIP_CTL__POR }, - { TOMTOM_A_CHIP_STATUS, TOMTOM_A_CHIP_STATUS__POR }, - { TOMTOM_A_CHIP_ID_BYTE_0, TOMTOM_A_CHIP_ID_BYTE_0__POR }, - { TOMTOM_A_CHIP_ID_BYTE_1, TOMTOM_A_CHIP_ID_BYTE_1__POR }, - { TOMTOM_A_CHIP_ID_BYTE_2, TOMTOM_A_CHIP_ID_BYTE_2__POR }, - { TOMTOM_A_CHIP_ID_BYTE_3, TOMTOM_A_CHIP_ID_BYTE_3__POR }, - { TOMTOM_A_CHIP_I2C_SLAVE_ID, TOMTOM_A_CHIP_I2C_SLAVE_ID__POR }, - { TOMTOM_A_SLAVE_ID_1, TOMTOM_A_SLAVE_ID_1__POR }, - { TOMTOM_A_SLAVE_ID_2, TOMTOM_A_SLAVE_ID_2__POR }, - { TOMTOM_A_SLAVE_ID_3, TOMTOM_A_SLAVE_ID_3__POR }, - { TOMTOM_A_PIN_CTL_OE0, TOMTOM_A_PIN_CTL_OE0__POR }, - { TOMTOM_A_PIN_CTL_OE1, TOMTOM_A_PIN_CTL_OE1__POR }, - { TOMTOM_A_PIN_CTL_OE2, TOMTOM_A_PIN_CTL_OE2__POR }, - { TOMTOM_A_PIN_CTL_DATA0, TOMTOM_A_PIN_CTL_DATA0__POR }, - { TOMTOM_A_PIN_CTL_DATA1, TOMTOM_A_PIN_CTL_DATA1__POR }, - { TOMTOM_A_PIN_CTL_DATA2, TOMTOM_A_PIN_CTL_DATA2__POR }, - { TOMTOM_A_HDRIVE_GENERIC, TOMTOM_A_HDRIVE_GENERIC__POR }, - { TOMTOM_A_HDRIVE_OVERRIDE, TOMTOM_A_HDRIVE_OVERRIDE__POR }, - { TOMTOM_A_ANA_CSR_WAIT_STATE, TOMTOM_A_ANA_CSR_WAIT_STATE__POR }, - { TOMTOM_A_PROCESS_MONITOR_CTL0, TOMTOM_A_PROCESS_MONITOR_CTL0__POR }, - { TOMTOM_A_PROCESS_MONITOR_CTL1, TOMTOM_A_PROCESS_MONITOR_CTL1__POR }, - { TOMTOM_A_PROCESS_MONITOR_CTL2, TOMTOM_A_PROCESS_MONITOR_CTL2__POR }, - { TOMTOM_A_PROCESS_MONITOR_CTL3, TOMTOM_A_PROCESS_MONITOR_CTL3__POR }, - { TOMTOM_A_QFUSE_CTL, TOMTOM_A_QFUSE_CTL__POR }, - { TOMTOM_A_QFUSE_STATUS, TOMTOM_A_QFUSE_STATUS__POR }, - { TOMTOM_A_QFUSE_DATA_OUT0, TOMTOM_A_QFUSE_DATA_OUT0__POR }, - { TOMTOM_A_QFUSE_DATA_OUT1, TOMTOM_A_QFUSE_DATA_OUT1__POR }, - { TOMTOM_A_QFUSE_DATA_OUT2, TOMTOM_A_QFUSE_DATA_OUT2__POR }, - { TOMTOM_A_QFUSE_DATA_OUT3, TOMTOM_A_QFUSE_DATA_OUT3__POR }, - { TOMTOM_A_QFUSE_DATA_OUT4, TOMTOM_A_QFUSE_DATA_OUT4__POR }, - { TOMTOM_A_QFUSE_DATA_OUT5, TOMTOM_A_QFUSE_DATA_OUT5__POR }, - { TOMTOM_A_QFUSE_DATA_OUT6, TOMTOM_A_QFUSE_DATA_OUT6__POR }, - { TOMTOM_A_QFUSE_DATA_OUT7, TOMTOM_A_QFUSE_DATA_OUT7__POR }, - { TOMTOM_A_CDC_CTL, TOMTOM_A_CDC_CTL__POR }, - { TOMTOM_A_LEAKAGE_CTL, TOMTOM_A_LEAKAGE_CTL__POR }, - { TOMTOM_A_SVASS_MEM_PTR0, TOMTOM_A_SVASS_MEM_PTR0__POR }, - { TOMTOM_A_SVASS_MEM_PTR1, TOMTOM_A_SVASS_MEM_PTR1__POR }, - { TOMTOM_A_SVASS_MEM_PTR2, TOMTOM_A_SVASS_MEM_PTR2__POR }, - { TOMTOM_A_SVASS_MEM_CTL, TOMTOM_A_SVASS_MEM_CTL__POR }, - { TOMTOM_A_SVASS_MEM_BANK, TOMTOM_A_SVASS_MEM_BANK__POR }, - { TOMTOM_A_DMIC_B1_CTL, TOMTOM_A_DMIC_B1_CTL__POR }, - { TOMTOM_A_DMIC_B2_CTL, TOMTOM_A_DMIC_B2_CTL__POR }, - { TOMTOM_A_SVASS_CLKRST_CTL, TOMTOM_A_SVASS_CLKRST_CTL__POR }, - { TOMTOM_A_SVASS_CPAR_CFG, TOMTOM_A_SVASS_CPAR_CFG__POR }, - { TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD, - TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD__POR }, - { TOMTOM_A_SVASS_CPAR_WDOG_CFG, TOMTOM_A_SVASS_CPAR_WDOG_CFG__POR }, - { TOMTOM_A_SVASS_CFG, TOMTOM_A_SVASS_CFG__POR }, - { TOMTOM_A_SVASS_SPE_CFG, TOMTOM_A_SVASS_SPE_CFG__POR }, - { TOMTOM_A_SVASS_STATUS, TOMTOM_A_SVASS_STATUS__POR }, - { TOMTOM_A_SVASS_INT_MASK, TOMTOM_A_SVASS_INT_MASK__POR }, - { TOMTOM_A_SVASS_INT_STATUS, TOMTOM_A_SVASS_INT_STATUS__POR }, - { TOMTOM_A_SVASS_INT_CLR, TOMTOM_A_SVASS_INT_CLR__POR }, - { TOMTOM_A_SVASS_DEBUG, TOMTOM_A_SVASS_DEBUG__POR }, - { TOMTOM_A_SVASS_SPE_BKUP_INT, TOMTOM_A_SVASS_SPE_BKUP_INT__POR }, - { TOMTOM_A_SVASS_MEM_ACC, TOMTOM_A_SVASS_MEM_ACC__POR }, - { TOMTOM_A_MEM_LEAKAGE_CTL, TOMTOM_A_MEM_LEAKAGE_CTL__POR }, - { TOMTOM_A_SVASS_SPE_INBOX_TRG, TOMTOM_A_SVASS_SPE_INBOX_TRG__POR }, - { TOMTOM_A_SVASS_SPE_INBOX_0, TOMTOM_A_SVASS_SPE_INBOX_0__POR }, - { TOMTOM_A_SVASS_SPE_INBOX_1, TOMTOM_A_SVASS_SPE_INBOX_1__POR }, - { TOMTOM_A_SVASS_SPE_INBOX_2, TOMTOM_A_SVASS_SPE_INBOX_2__POR }, - { TOMTOM_A_SVASS_SPE_INBOX_3, TOMTOM_A_SVASS_SPE_INBOX_3__POR }, - { TOMTOM_A_SVASS_SPE_INBOX_4, TOMTOM_A_SVASS_SPE_INBOX_4__POR }, - { TOMTOM_A_SVASS_SPE_INBOX_5, TOMTOM_A_SVASS_SPE_INBOX_5__POR }, - { TOMTOM_A_SVASS_SPE_INBOX_6, TOMTOM_A_SVASS_SPE_INBOX_6__POR }, - { TOMTOM_A_SVASS_SPE_INBOX_7, TOMTOM_A_SVASS_SPE_INBOX_7__POR }, - { TOMTOM_A_SVASS_SPE_INBOX_8, TOMTOM_A_SVASS_SPE_INBOX_8__POR }, - { TOMTOM_A_SVASS_SPE_INBOX_9, TOMTOM_A_SVASS_SPE_INBOX_9__POR }, - { TOMTOM_A_SVASS_SPE_INBOX_10, TOMTOM_A_SVASS_SPE_INBOX_10__POR }, - { TOMTOM_A_SVASS_SPE_INBOX_11, TOMTOM_A_SVASS_SPE_INBOX_11__POR }, - { TOMTOM_A_SVASS_SPE_OUTBOX_0, TOMTOM_A_SVASS_SPE_OUTBOX_0__POR }, - { TOMTOM_A_SVASS_SPE_OUTBOX_1, TOMTOM_A_SVASS_SPE_OUTBOX_1__POR }, - { TOMTOM_A_SVASS_SPE_OUTBOX_2, TOMTOM_A_SVASS_SPE_OUTBOX_2__POR }, - { TOMTOM_A_SVASS_SPE_OUTBOX_3, TOMTOM_A_SVASS_SPE_OUTBOX_3__POR }, - { TOMTOM_A_SVASS_SPE_OUTBOX_4, TOMTOM_A_SVASS_SPE_OUTBOX_4__POR }, - { TOMTOM_A_SVASS_SPE_OUTBOX_5, TOMTOM_A_SVASS_SPE_OUTBOX_5__POR }, - { TOMTOM_A_SVASS_SPE_OUTBOX_6, TOMTOM_A_SVASS_SPE_OUTBOX_6__POR }, - { TOMTOM_A_SVASS_SPE_OUTBOX_7, TOMTOM_A_SVASS_SPE_OUTBOX_7__POR }, - { TOMTOM_A_SVASS_SPE_OUTBOX_8, TOMTOM_A_SVASS_SPE_OUTBOX_8__POR }, - { TOMTOM_A_SVASS_SPE_OUTBOX_9, TOMTOM_A_SVASS_SPE_OUTBOX_9__POR }, - { TOMTOM_A_SVASS_SPE_OUTBOX_10, TOMTOM_A_SVASS_SPE_OUTBOX_10__POR }, - { TOMTOM_A_SVASS_SPE_OUTBOX_11, TOMTOM_A_SVASS_SPE_OUTBOX_11__POR }, - { TOMTOM_A_INTR_MODE, TOMTOM_A_INTR_MODE__POR }, - { TOMTOM_A_INTR1_MASK0, TOMTOM_A_INTR1_MASK0__POR }, - { TOMTOM_A_INTR1_MASK1, TOMTOM_A_INTR1_MASK1__POR }, - { TOMTOM_A_INTR1_MASK2, TOMTOM_A_INTR1_MASK2__POR }, - { TOMTOM_A_INTR1_MASK3, TOMTOM_A_INTR1_MASK3__POR }, - { TOMTOM_A_INTR1_STATUS0, TOMTOM_A_INTR1_STATUS0__POR }, - { TOMTOM_A_INTR1_STATUS1, TOMTOM_A_INTR1_STATUS1__POR }, - { TOMTOM_A_INTR1_STATUS2, TOMTOM_A_INTR1_STATUS2__POR }, - { TOMTOM_A_INTR1_STATUS3, TOMTOM_A_INTR1_STATUS3__POR }, - { TOMTOM_A_INTR1_CLEAR0, TOMTOM_A_INTR1_CLEAR0__POR }, - { TOMTOM_A_INTR1_CLEAR1, TOMTOM_A_INTR1_CLEAR1__POR }, - { TOMTOM_A_INTR1_CLEAR2, TOMTOM_A_INTR1_CLEAR2__POR }, - { TOMTOM_A_INTR1_CLEAR3, TOMTOM_A_INTR1_CLEAR3__POR }, - { TOMTOM_A_INTR1_LEVEL0, TOMTOM_A_INTR1_LEVEL0__POR }, - { TOMTOM_A_INTR1_LEVEL1, TOMTOM_A_INTR1_LEVEL1__POR }, - { TOMTOM_A_INTR1_LEVEL2, TOMTOM_A_INTR1_LEVEL2__POR }, - { TOMTOM_A_INTR1_LEVEL3, TOMTOM_A_INTR1_LEVEL3__POR }, - { TOMTOM_A_INTR1_TEST0, TOMTOM_A_INTR1_TEST0__POR }, - { TOMTOM_A_INTR1_TEST1, TOMTOM_A_INTR1_TEST1__POR }, - { TOMTOM_A_INTR1_TEST2, TOMTOM_A_INTR1_TEST2__POR }, - { TOMTOM_A_INTR1_TEST3, TOMTOM_A_INTR1_TEST3__POR }, - { TOMTOM_A_INTR1_SET0, TOMTOM_A_INTR1_SET0__POR }, - { TOMTOM_A_INTR1_SET1, TOMTOM_A_INTR1_SET1__POR }, - { TOMTOM_A_INTR1_SET2, TOMTOM_A_INTR1_SET2__POR }, - { TOMTOM_A_INTR1_SET3, TOMTOM_A_INTR1_SET3__POR }, - { TOMTOM_A_INTR2_MASK0, TOMTOM_A_INTR2_MASK0__POR }, - { TOMTOM_A_INTR2_STATUS0, TOMTOM_A_INTR2_STATUS0__POR }, - { TOMTOM_A_INTR2_CLEAR0, TOMTOM_A_INTR2_CLEAR0__POR }, - { TOMTOM_A_INTR2_LEVEL0, TOMTOM_A_INTR2_LEVEL0__POR }, - { TOMTOM_A_INTR2_TEST0, TOMTOM_A_INTR2_TEST0__POR }, - { TOMTOM_A_INTR2_SET0, TOMTOM_A_INTR2_SET0__POR }, - { TOMTOM_A_CDC_TX_I2S_SCK_MODE, TOMTOM_A_CDC_TX_I2S_SCK_MODE__POR }, - { TOMTOM_A_CDC_TX_I2S_WS_MODE, TOMTOM_A_CDC_TX_I2S_WS_MODE__POR }, - { TOMTOM_A_CDC_DMIC_DATA0_MODE, TOMTOM_A_CDC_DMIC_DATA0_MODE__POR }, - { TOMTOM_A_CDC_DMIC_CLK0_MODE, TOMTOM_A_CDC_DMIC_CLK0_MODE__POR }, - { TOMTOM_A_CDC_DMIC_DATA1_MODE, TOMTOM_A_CDC_DMIC_DATA1_MODE__POR }, - { TOMTOM_A_CDC_DMIC_CLK1_MODE, TOMTOM_A_CDC_DMIC_CLK1_MODE__POR }, - { TOMTOM_A_CDC_RX_I2S_SCK_MODE, TOMTOM_A_CDC_RX_I2S_SCK_MODE__POR }, - { TOMTOM_A_CDC_RX_I2S_WS_MODE, TOMTOM_A_CDC_RX_I2S_WS_MODE__POR }, - { TOMTOM_A_CDC_DMIC_DATA2_MODE, TOMTOM_A_CDC_DMIC_DATA2_MODE__POR }, - { TOMTOM_A_CDC_DMIC_CLK2_MODE, TOMTOM_A_CDC_DMIC_CLK2_MODE__POR }, - { TOMTOM_A_CDC_INTR1_MODE, TOMTOM_A_CDC_INTR1_MODE__POR }, - { TOMTOM_A_CDC_SB_NRZ_SEL_MODE, TOMTOM_A_CDC_SB_NRZ_SEL_MODE__POR }, - { TOMTOM_A_CDC_INTR2_MODE, TOMTOM_A_CDC_INTR2_MODE__POR }, - { TOMTOM_A_CDC_RF_PA_ON_MODE, TOMTOM_A_CDC_RF_PA_ON_MODE__POR }, - { TOMTOM_A_CDC_BOOST_MODE, TOMTOM_A_CDC_BOOST_MODE__POR }, - { TOMTOM_A_CDC_JTCK_MODE, TOMTOM_A_CDC_JTCK_MODE__POR }, - { TOMTOM_A_CDC_JTDI_MODE, TOMTOM_A_CDC_JTDI_MODE__POR }, - { TOMTOM_A_CDC_JTMS_MODE, TOMTOM_A_CDC_JTMS_MODE__POR }, - { TOMTOM_A_CDC_JTDO_MODE, TOMTOM_A_CDC_JTDO_MODE__POR }, - { TOMTOM_A_CDC_JTRST_MODE, TOMTOM_A_CDC_JTRST_MODE__POR }, - { TOMTOM_A_CDC_BIST_MODE_MODE, TOMTOM_A_CDC_BIST_MODE_MODE__POR }, - { TOMTOM_A_CDC_MAD_MAIN_CTL_1, TOMTOM_A_CDC_MAD_MAIN_CTL_1__POR }, - { TOMTOM_A_CDC_MAD_MAIN_CTL_2, TOMTOM_A_CDC_MAD_MAIN_CTL_2__POR }, - { TOMTOM_A_CDC_MAD_AUDIO_CTL_1, TOMTOM_A_CDC_MAD_AUDIO_CTL_1__POR }, - { TOMTOM_A_CDC_MAD_AUDIO_CTL_2, TOMTOM_A_CDC_MAD_AUDIO_CTL_2__POR }, - { TOMTOM_A_CDC_MAD_AUDIO_CTL_3, TOMTOM_A_CDC_MAD_AUDIO_CTL_3__POR }, - { TOMTOM_A_CDC_MAD_AUDIO_CTL_4, TOMTOM_A_CDC_MAD_AUDIO_CTL_4__POR }, - { TOMTOM_A_CDC_MAD_AUDIO_CTL_5, TOMTOM_A_CDC_MAD_AUDIO_CTL_5__POR }, - { TOMTOM_A_CDC_MAD_AUDIO_CTL_6, TOMTOM_A_CDC_MAD_AUDIO_CTL_6__POR }, - { TOMTOM_A_CDC_MAD_AUDIO_CTL_7, TOMTOM_A_CDC_MAD_AUDIO_CTL_7__POR }, - { TOMTOM_A_CDC_MAD_AUDIO_CTL_8, TOMTOM_A_CDC_MAD_AUDIO_CTL_8__POR }, - { TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR, - TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR__POR }, - { TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL, - TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL__POR }, - { TOMTOM_A_CDC_MAD_ULTR_CTL_1, TOMTOM_A_CDC_MAD_ULTR_CTL_1__POR }, - { TOMTOM_A_CDC_MAD_ULTR_CTL_2, TOMTOM_A_CDC_MAD_ULTR_CTL_2__POR }, - { TOMTOM_A_CDC_MAD_ULTR_CTL_3, TOMTOM_A_CDC_MAD_ULTR_CTL_3__POR }, - { TOMTOM_A_CDC_MAD_ULTR_CTL_4, TOMTOM_A_CDC_MAD_ULTR_CTL_4__POR }, - { TOMTOM_A_CDC_MAD_ULTR_CTL_5, TOMTOM_A_CDC_MAD_ULTR_CTL_5__POR }, - { TOMTOM_A_CDC_MAD_ULTR_CTL_6, TOMTOM_A_CDC_MAD_ULTR_CTL_6__POR }, - { TOMTOM_A_CDC_MAD_ULTR_CTL_7, TOMTOM_A_CDC_MAD_ULTR_CTL_7__POR }, - { TOMTOM_A_CDC_MAD_BEACON_CTL_1, TOMTOM_A_CDC_MAD_BEACON_CTL_1__POR }, - { TOMTOM_A_CDC_MAD_BEACON_CTL_2, TOMTOM_A_CDC_MAD_BEACON_CTL_2__POR }, - { TOMTOM_A_CDC_MAD_BEACON_CTL_3, TOMTOM_A_CDC_MAD_BEACON_CTL_3__POR }, - { TOMTOM_A_CDC_MAD_BEACON_CTL_4, TOMTOM_A_CDC_MAD_BEACON_CTL_4__POR }, - { TOMTOM_A_CDC_MAD_BEACON_CTL_5, TOMTOM_A_CDC_MAD_BEACON_CTL_5__POR }, - { TOMTOM_A_CDC_MAD_BEACON_CTL_6, TOMTOM_A_CDC_MAD_BEACON_CTL_6__POR }, - { TOMTOM_A_CDC_MAD_BEACON_CTL_7, TOMTOM_A_CDC_MAD_BEACON_CTL_7__POR }, - { TOMTOM_A_CDC_MAD_BEACON_CTL_8, TOMTOM_A_CDC_MAD_BEACON_CTL_8__POR }, - { TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR, - TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR__POR }, - { TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL, - TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL__POR }, - { TOMTOM_A_CDC_MAD_INP_SEL, TOMTOM_A_CDC_MAD_INP_SEL__POR }, - { TOMTOM_A_BIAS_REF_CTL, TOMTOM_A_BIAS_REF_CTL__POR }, - { TOMTOM_A_BIAS_CENTRAL_BG_CTL, TOMTOM_A_BIAS_CENTRAL_BG_CTL__POR }, - { TOMTOM_A_BIAS_PRECHRG_CTL, TOMTOM_A_BIAS_PRECHRG_CTL__POR }, - { TOMTOM_A_BIAS_CURR_CTL_1, TOMTOM_A_BIAS_CURR_CTL_1__POR }, - { TOMTOM_A_BIAS_CURR_CTL_2, TOMTOM_A_BIAS_CURR_CTL_2__POR }, - { TOMTOM_A_BIAS_OSC_BG_CTL, TOMTOM_A_BIAS_OSC_BG_CTL__POR }, - { TOMTOM_A_CLK_BUFF_EN1, TOMTOM_A_CLK_BUFF_EN1__POR }, - { TOMTOM_A_CLK_BUFF_EN2, TOMTOM_A_CLK_BUFF_EN2__POR }, - { TOMTOM_A_LDO_L_MODE_1, TOMTOM_A_LDO_L_MODE_1__POR }, - { TOMTOM_A_LDO_L_MODE_2, TOMTOM_A_LDO_L_MODE_2__POR }, - { TOMTOM_A_LDO_L_CTRL_1, TOMTOM_A_LDO_L_CTRL_1__POR }, - { TOMTOM_A_LDO_L_CTRL_2, TOMTOM_A_LDO_L_CTRL_2__POR }, - { TOMTOM_A_LDO_L_CTRL_3, TOMTOM_A_LDO_L_CTRL_3__POR }, - { TOMTOM_A_LDO_L_CTRL_4, TOMTOM_A_LDO_L_CTRL_4__POR }, - { TOMTOM_A_LDO_H_MODE_1, TOMTOM_A_LDO_H_MODE_1__POR }, - { TOMTOM_A_LDO_H_MODE_2, TOMTOM_A_LDO_H_MODE_2__POR }, - { TOMTOM_A_LDO_H_LOOP_CTL, TOMTOM_A_LDO_H_LOOP_CTL__POR }, - { TOMTOM_A_LDO_H_COMP_1, TOMTOM_A_LDO_H_COMP_1__POR }, - { TOMTOM_A_LDO_H_COMP_2, TOMTOM_A_LDO_H_COMP_2__POR }, - { TOMTOM_A_LDO_H_BIAS_1, TOMTOM_A_LDO_H_BIAS_1__POR }, - { TOMTOM_A_LDO_H_BIAS_2, TOMTOM_A_LDO_H_BIAS_2__POR }, - { TOMTOM_A_LDO_H_BIAS_3, TOMTOM_A_LDO_H_BIAS_3__POR }, - { TOMTOM_A_VBAT_CLK, TOMTOM_A_VBAT_CLK__POR }, - { TOMTOM_A_VBAT_LOOP, TOMTOM_A_VBAT_LOOP__POR }, - { TOMTOM_A_VBAT_REF, TOMTOM_A_VBAT_REF__POR }, - { TOMTOM_A_VBAT_ADC_TEST, TOMTOM_A_VBAT_ADC_TEST__POR }, - { TOMTOM_A_VBAT_FE, TOMTOM_A_VBAT_FE__POR }, - { TOMTOM_A_VBAT_BIAS_1, TOMTOM_A_VBAT_BIAS_1__POR }, - { TOMTOM_A_VBAT_BIAS_2, TOMTOM_A_VBAT_BIAS_2__POR }, - { TOMTOM_A_VBAT_ADC_DATA_MSB, TOMTOM_A_VBAT_ADC_DATA_MSB__POR }, - { TOMTOM_A_VBAT_ADC_DATA_LSB, TOMTOM_A_VBAT_ADC_DATA_LSB__POR }, - { TOMTOM_A_FLL_NREF, TOMTOM_A_FLL_NREF__POR }, - { TOMTOM_A_FLL_KDCO_TUNE, TOMTOM_A_FLL_KDCO_TUNE__POR }, - { TOMTOM_A_FLL_LOCK_THRESH, TOMTOM_A_FLL_LOCK_THRESH__POR }, - { TOMTOM_A_FLL_LOCK_DET_COUNT, TOMTOM_A_FLL_LOCK_DET_COUNT__POR }, - { TOMTOM_A_FLL_DAC_THRESHOLD, TOMTOM_A_FLL_DAC_THRESHOLD__POR }, - { TOMTOM_A_FLL_TEST_DCO_FREERUN, TOMTOM_A_FLL_TEST_DCO_FREERUN__POR }, - { TOMTOM_A_FLL_TEST_ENABLE, TOMTOM_A_FLL_TEST_ENABLE__POR }, - { TOMTOM_A_MICB_CFILT_1_CTL, TOMTOM_A_MICB_CFILT_1_CTL__POR }, - { TOMTOM_A_MICB_CFILT_1_VAL, TOMTOM_A_MICB_CFILT_1_VAL__POR }, - { TOMTOM_A_MICB_CFILT_1_PRECHRG, TOMTOM_A_MICB_CFILT_1_PRECHRG__POR }, - { TOMTOM_A_MICB_1_CTL, TOMTOM_A_MICB_1_CTL__POR }, - { TOMTOM_A_MICB_1_INT_RBIAS, TOMTOM_A_MICB_1_INT_RBIAS__POR }, - { TOMTOM_A_MICB_1_MBHC, TOMTOM_A_MICB_1_MBHC__POR }, - { TOMTOM_A_MICB_CFILT_2_CTL, TOMTOM_A_MICB_CFILT_2_CTL__POR }, - { TOMTOM_A_MICB_CFILT_2_VAL, TOMTOM_A_MICB_CFILT_2_VAL__POR }, - { TOMTOM_A_MICB_CFILT_2_PRECHRG, TOMTOM_A_MICB_CFILT_2_PRECHRG__POR }, - { TOMTOM_A_MICB_2_CTL, TOMTOM_A_MICB_2_CTL__POR }, - { TOMTOM_A_MICB_2_INT_RBIAS, TOMTOM_A_MICB_2_INT_RBIAS__POR }, - { TOMTOM_A_MICB_2_MBHC, TOMTOM_A_MICB_2_MBHC__POR }, - { TOMTOM_A_MICB_CFILT_3_CTL, TOMTOM_A_MICB_CFILT_3_CTL__POR }, - { TOMTOM_A_MICB_CFILT_3_VAL, TOMTOM_A_MICB_CFILT_3_VAL__POR }, - { TOMTOM_A_MICB_CFILT_3_PRECHRG, TOMTOM_A_MICB_CFILT_3_PRECHRG__POR }, - { TOMTOM_A_MICB_3_CTL, TOMTOM_A_MICB_3_CTL__POR }, - { TOMTOM_A_MICB_3_INT_RBIAS, TOMTOM_A_MICB_3_INT_RBIAS__POR }, - { TOMTOM_A_MICB_3_MBHC, TOMTOM_A_MICB_3_MBHC__POR }, - { TOMTOM_A_MICB_4_CTL, TOMTOM_A_MICB_4_CTL__POR }, - { TOMTOM_A_MICB_4_INT_RBIAS, TOMTOM_A_MICB_4_INT_RBIAS__POR }, - { TOMTOM_A_MICB_4_MBHC, TOMTOM_A_MICB_4_MBHC__POR }, - { TOMTOM_A_SPKR_DRV2_EN, TOMTOM_A_SPKR_DRV2_EN__POR }, - { TOMTOM_A_SPKR_DRV2_GAIN, TOMTOM_A_SPKR_DRV2_GAIN__POR }, - { TOMTOM_A_SPKR_DRV2_DAC_CTL, TOMTOM_A_SPKR_DRV2_DAC_CTL__POR }, - { TOMTOM_A_SPKR_DRV2_OCP_CTL, TOMTOM_A_SPKR_DRV2_OCP_CTL__POR }, - { TOMTOM_A_SPKR_DRV2_CLIP_DET, TOMTOM_A_SPKR_DRV2_CLIP_DET__POR }, - { TOMTOM_A_SPKR_DRV2_DBG_DAC, TOMTOM_A_SPKR_DRV2_DBG_DAC__POR }, - { TOMTOM_A_SPKR_DRV2_DBG_PA, TOMTOM_A_SPKR_DRV2_DBG_PA__POR }, - { TOMTOM_A_SPKR_DRV2_DBG_PWRSTG, TOMTOM_A_SPKR_DRV2_DBG_PWRSTG__POR }, - { TOMTOM_A_SPKR_DRV2_BIAS_LDO, TOMTOM_A_SPKR_DRV2_BIAS_LDO__POR }, - { TOMTOM_A_SPKR_DRV2_BIAS_INT, TOMTOM_A_SPKR_DRV2_BIAS_INT__POR }, - { TOMTOM_A_SPKR_DRV2_BIAS_PA, TOMTOM_A_SPKR_DRV2_BIAS_PA__POR }, - { TOMTOM_A_SPKR_DRV2_STATUS_OCP, TOMTOM_A_SPKR_DRV2_STATUS_OCP__POR }, - { TOMTOM_A_SPKR_DRV2_STATUS_PA, TOMTOM_A_SPKR_DRV2_STATUS_PA__POR }, - { TOMTOM_A_MBHC_INSERT_DETECT, TOMTOM_A_MBHC_INSERT_DETECT__POR }, - { TOMTOM_A_MBHC_INSERT_DET_STATUS, - TOMTOM_A_MBHC_INSERT_DET_STATUS__POR }, - { TOMTOM_A_TX_COM_BIAS, TOMTOM_A_TX_COM_BIAS__POR }, - { TOMTOM_A_MBHC_INSERT_DETECT2, TOMTOM_A_MBHC_INSERT_DETECT2__POR }, - { TOMTOM_A_MBHC_SCALING_MUX_1, TOMTOM_A_MBHC_SCALING_MUX_1__POR }, - { TOMTOM_A_MBHC_SCALING_MUX_2, TOMTOM_A_MBHC_SCALING_MUX_2__POR }, - { TOMTOM_A_MAD_ANA_CTRL, TOMTOM_A_MAD_ANA_CTRL__POR }, - { TOMTOM_A_TX_SUP_SWITCH_CTRL_1, TOMTOM_A_TX_SUP_SWITCH_CTRL_1__POR }, - { TOMTOM_A_TX_SUP_SWITCH_CTRL_2, TOMTOM_A_TX_SUP_SWITCH_CTRL_2__POR }, - { TOMTOM_A_TX_1_GAIN, TOMTOM_A_TX_1_GAIN__POR }, - { TOMTOM_A_TX_1_2_TEST_EN, TOMTOM_A_TX_1_2_TEST_EN__POR }, - { TOMTOM_A_TX_2_GAIN, TOMTOM_A_TX_2_GAIN__POR }, - { TOMTOM_A_TX_1_2_ADC_IB, TOMTOM_A_TX_1_2_ADC_IB__POR }, - { TOMTOM_A_TX_1_2_ATEST_REFCTRL, TOMTOM_A_TX_1_2_ATEST_REFCTRL__POR }, - { TOMTOM_A_TX_1_2_TEST_CTL, TOMTOM_A_TX_1_2_TEST_CTL__POR }, - { TOMTOM_A_TX_1_2_TEST_BLOCK_EN, TOMTOM_A_TX_1_2_TEST_BLOCK_EN__POR }, - { TOMTOM_A_TX_1_2_TXFE_CLKDIV, TOMTOM_A_TX_1_2_TXFE_CLKDIV__POR }, - { TOMTOM_A_TX_1_2_SAR_ERR_CH1, TOMTOM_A_TX_1_2_SAR_ERR_CH1__POR }, - { TOMTOM_A_TX_1_2_SAR_ERR_CH2, TOMTOM_A_TX_1_2_SAR_ERR_CH2__POR }, - { TOMTOM_A_TX_3_GAIN, TOMTOM_A_TX_3_GAIN__POR }, - { TOMTOM_A_TX_3_4_TEST_EN, TOMTOM_A_TX_3_4_TEST_EN__POR }, - { TOMTOM_A_TX_4_GAIN, TOMTOM_A_TX_4_GAIN__POR }, - { TOMTOM_A_TX_3_4_ADC_IB, TOMTOM_A_TX_3_4_ADC_IB__POR }, - { TOMTOM_A_TX_3_4_ATEST_REFCTRL, TOMTOM_A_TX_3_4_ATEST_REFCTRL__POR }, - { TOMTOM_A_TX_3_4_TEST_CTL, TOMTOM_A_TX_3_4_TEST_CTL__POR }, - { TOMTOM_A_TX_3_4_TEST_BLOCK_EN, TOMTOM_A_TX_3_4_TEST_BLOCK_EN__POR }, - { TOMTOM_A_TX_3_4_TXFE_CKDIV, TOMTOM_A_TX_3_4_TXFE_CKDIV__POR }, - { TOMTOM_A_TX_3_4_SAR_ERR_CH3, TOMTOM_A_TX_3_4_SAR_ERR_CH3__POR }, - { TOMTOM_A_TX_3_4_SAR_ERR_CH4, TOMTOM_A_TX_3_4_SAR_ERR_CH4__POR }, - { TOMTOM_A_TX_5_GAIN, TOMTOM_A_TX_5_GAIN__POR }, - { TOMTOM_A_TX_5_6_TEST_EN, TOMTOM_A_TX_5_6_TEST_EN__POR }, - { TOMTOM_A_TX_6_GAIN, TOMTOM_A_TX_6_GAIN__POR }, - { TOMTOM_A_TX_5_6_ADC_IB, TOMTOM_A_TX_5_6_ADC_IB__POR }, - { TOMTOM_A_TX_5_6_ATEST_REFCTRL, TOMTOM_A_TX_5_6_ATEST_REFCTRL__POR }, - { TOMTOM_A_TX_5_6_TEST_CTL, TOMTOM_A_TX_5_6_TEST_CTL__POR }, - { TOMTOM_A_TX_5_6_TEST_BLOCK_EN, TOMTOM_A_TX_5_6_TEST_BLOCK_EN__POR }, - { TOMTOM_A_TX_5_6_TXFE_CKDIV, TOMTOM_A_TX_5_6_TXFE_CKDIV__POR }, - { TOMTOM_A_TX_5_6_SAR_ERR_CH5, TOMTOM_A_TX_5_6_SAR_ERR_CH5__POR }, - { TOMTOM_A_TX_5_6_SAR_ERR_CH6, TOMTOM_A_TX_5_6_SAR_ERR_CH6__POR }, - { TOMTOM_A_TX_7_MBHC_EN, TOMTOM_A_TX_7_MBHC_EN__POR }, - { TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL, - TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL__POR }, - { TOMTOM_A_TX_7_MBHC_ADC, TOMTOM_A_TX_7_MBHC_ADC__POR }, - { TOMTOM_A_TX_7_MBHC_TEST_CTL, TOMTOM_A_TX_7_MBHC_TEST_CTL__POR }, - { TOMTOM_A_TX_7_MBHC_SAR_ERR, TOMTOM_A_TX_7_MBHC_SAR_ERR__POR }, - { TOMTOM_A_TX_7_TXFE_CLKDIV, TOMTOM_A_TX_7_TXFE_CLKDIV__POR }, - { TOMTOM_A_RCO_CTRL, TOMTOM_A_RCO_CTRL__POR }, - { TOMTOM_A_RCO_CALIBRATION_CTRL1, TOMTOM_A_RCO_CALIBRATION_CTRL1__POR }, - { TOMTOM_A_RCO_CALIBRATION_CTRL2, TOMTOM_A_RCO_CALIBRATION_CTRL2__POR }, - { TOMTOM_A_RCO_CALIBRATION_CTRL3, TOMTOM_A_RCO_CALIBRATION_CTRL3__POR }, - { TOMTOM_A_RCO_TEST_CTRL, TOMTOM_A_RCO_TEST_CTRL__POR }, - { TOMTOM_A_RCO_CALIBRATION_RESULT1, - TOMTOM_A_RCO_CALIBRATION_RESULT1__POR }, - { TOMTOM_A_RCO_CALIBRATION_RESULT2, - TOMTOM_A_RCO_CALIBRATION_RESULT2__POR }, - { TOMTOM_A_BUCK_MODE_1, TOMTOM_A_BUCK_MODE_1__POR }, - { TOMTOM_A_BUCK_MODE_2, TOMTOM_A_BUCK_MODE_2__POR }, - { TOMTOM_A_BUCK_MODE_3, TOMTOM_A_BUCK_MODE_3__POR }, - { TOMTOM_A_BUCK_MODE_4, TOMTOM_A_BUCK_MODE_4__POR }, - { TOMTOM_A_BUCK_MODE_5, TOMTOM_A_BUCK_MODE_5__POR }, - { TOMTOM_A_BUCK_CTRL_VCL_1, TOMTOM_A_BUCK_CTRL_VCL_1__POR }, - { TOMTOM_A_BUCK_CTRL_VCL_2, TOMTOM_A_BUCK_CTRL_VCL_2__POR }, - { TOMTOM_A_BUCK_CTRL_VCL_3, TOMTOM_A_BUCK_CTRL_VCL_3__POR }, - { TOMTOM_A_BUCK_CTRL_CCL_1, TOMTOM_A_BUCK_CTRL_CCL_1__POR }, - { TOMTOM_A_BUCK_CTRL_CCL_2, TOMTOM_A_BUCK_CTRL_CCL_2__POR }, - { TOMTOM_A_BUCK_CTRL_CCL_3, TOMTOM_A_BUCK_CTRL_CCL_3__POR }, - { TOMTOM_A_BUCK_CTRL_CCL_4, TOMTOM_A_BUCK_CTRL_CCL_4__POR }, - { TOMTOM_A_BUCK_CTRL_PWM_DRVR_1, TOMTOM_A_BUCK_CTRL_PWM_DRVR_1__POR }, - { TOMTOM_A_BUCK_CTRL_PWM_DRVR_2, TOMTOM_A_BUCK_CTRL_PWM_DRVR_2__POR }, - { TOMTOM_A_BUCK_CTRL_PWM_DRVR_3, TOMTOM_A_BUCK_CTRL_PWM_DRVR_3__POR }, - { TOMTOM_A_BUCK_TMUX_A_D, TOMTOM_A_BUCK_TMUX_A_D__POR }, - { TOMTOM_A_NCP_BUCKREF, TOMTOM_A_NCP_BUCKREF__POR }, - { TOMTOM_A_NCP_EN, TOMTOM_A_NCP_EN__POR }, - { TOMTOM_A_NCP_CLK, TOMTOM_A_NCP_CLK__POR }, - { TOMTOM_A_NCP_STATIC, TOMTOM_A_NCP_STATIC__POR }, - { TOMTOM_A_NCP_VTH_LOW, TOMTOM_A_NCP_VTH_LOW__POR }, - { TOMTOM_A_NCP_VTH_HIGH, TOMTOM_A_NCP_VTH_HIGH__POR }, - { TOMTOM_A_NCP_ATEST, TOMTOM_A_NCP_ATEST__POR }, - { TOMTOM_A_NCP_DTEST, TOMTOM_A_NCP_DTEST__POR }, - { TOMTOM_A_NCP_DLY1, TOMTOM_A_NCP_DLY1__POR }, - { TOMTOM_A_NCP_DLY2, TOMTOM_A_NCP_DLY2__POR }, - { TOMTOM_A_RX_AUX_SW_CTL, TOMTOM_A_RX_AUX_SW_CTL__POR }, - { TOMTOM_A_RX_PA_AUX_IN_CONN, TOMTOM_A_RX_PA_AUX_IN_CONN__POR }, - { TOMTOM_A_RX_COM_TIMER_DIV, TOMTOM_A_RX_COM_TIMER_DIV__POR }, - { TOMTOM_A_RX_COM_OCP_CTL, TOMTOM_A_RX_COM_OCP_CTL__POR }, - { TOMTOM_A_RX_COM_OCP_COUNT, TOMTOM_A_RX_COM_OCP_COUNT__POR }, - { TOMTOM_A_RX_COM_DAC_CTL, TOMTOM_A_RX_COM_DAC_CTL__POR }, - { TOMTOM_A_RX_COM_BIAS, TOMTOM_A_RX_COM_BIAS__POR }, - { TOMTOM_A_RX_HPH_AUTO_CHOP, TOMTOM_A_RX_HPH_AUTO_CHOP__POR }, - { TOMTOM_A_RX_HPH_CHOP_CTL, TOMTOM_A_RX_HPH_CHOP_CTL__POR }, - { TOMTOM_A_RX_HPH_BIAS_PA, TOMTOM_A_RX_HPH_BIAS_PA__POR }, - { TOMTOM_A_RX_HPH_BIAS_LDO, TOMTOM_A_RX_HPH_BIAS_LDO__POR }, - { TOMTOM_A_RX_HPH_BIAS_CNP, TOMTOM_A_RX_HPH_BIAS_CNP__POR }, - { TOMTOM_A_RX_HPH_BIAS_WG_OCP, TOMTOM_A_RX_HPH_BIAS_WG_OCP__POR }, - { TOMTOM_A_RX_HPH_OCP_CTL, TOMTOM_A_RX_HPH_OCP_CTL__POR }, - { TOMTOM_A_RX_HPH_CNP_EN, TOMTOM_A_RX_HPH_CNP_EN__POR }, - { TOMTOM_A_RX_HPH_CNP_WG_CTL, TOMTOM_A_RX_HPH_CNP_WG_CTL__POR }, - { TOMTOM_A_RX_HPH_CNP_WG_TIME, TOMTOM_A_RX_HPH_CNP_WG_TIME__POR }, - { TOMTOM_A_RX_HPH_L_GAIN, TOMTOM_A_RX_HPH_L_GAIN__POR }, - { TOMTOM_A_RX_HPH_L_TEST, TOMTOM_A_RX_HPH_L_TEST__POR }, - { TOMTOM_A_RX_HPH_L_PA_CTL, TOMTOM_A_RX_HPH_L_PA_CTL__POR }, - { TOMTOM_A_RX_HPH_L_DAC_CTL, TOMTOM_A_RX_HPH_L_DAC_CTL__POR }, - { TOMTOM_A_RX_HPH_L_ATEST, TOMTOM_A_RX_HPH_L_ATEST__POR }, - { TOMTOM_A_RX_HPH_L_STATUS, TOMTOM_A_RX_HPH_L_STATUS__POR }, - { TOMTOM_A_RX_HPH_R_GAIN, TOMTOM_A_RX_HPH_R_GAIN__POR }, - { TOMTOM_A_RX_HPH_R_TEST, TOMTOM_A_RX_HPH_R_TEST__POR }, - { TOMTOM_A_RX_HPH_R_PA_CTL, TOMTOM_A_RX_HPH_R_PA_CTL__POR }, - { TOMTOM_A_RX_HPH_R_DAC_CTL, TOMTOM_A_RX_HPH_R_DAC_CTL__POR }, - { TOMTOM_A_RX_HPH_R_ATEST, TOMTOM_A_RX_HPH_R_ATEST__POR }, - { TOMTOM_A_RX_HPH_R_STATUS, TOMTOM_A_RX_HPH_R_STATUS__POR }, - { TOMTOM_A_RX_EAR_BIAS_PA, TOMTOM_A_RX_EAR_BIAS_PA__POR }, - { TOMTOM_A_RX_EAR_BIAS_CMBUFF, TOMTOM_A_RX_EAR_BIAS_CMBUFF__POR }, - { TOMTOM_A_RX_EAR_EN, TOMTOM_A_RX_EAR_EN__POR }, - { TOMTOM_A_RX_EAR_GAIN, TOMTOM_A_RX_EAR_GAIN__POR }, - { TOMTOM_A_RX_EAR_CMBUFF, TOMTOM_A_RX_EAR_CMBUFF__POR }, - { TOMTOM_A_RX_EAR_ICTL, TOMTOM_A_RX_EAR_ICTL__POR }, - { TOMTOM_A_RX_EAR_CCOMP, TOMTOM_A_RX_EAR_CCOMP__POR }, - { TOMTOM_A_RX_EAR_VCM, TOMTOM_A_RX_EAR_VCM__POR }, - { TOMTOM_A_RX_EAR_CNP, TOMTOM_A_RX_EAR_CNP__POR }, - { TOMTOM_A_RX_EAR_DAC_CTL_ATEST, TOMTOM_A_RX_EAR_DAC_CTL_ATEST__POR }, - { TOMTOM_A_RX_EAR_STATUS, TOMTOM_A_RX_EAR_STATUS__POR }, - { TOMTOM_A_RX_LINE_BIAS_PA, TOMTOM_A_RX_LINE_BIAS_PA__POR }, - { TOMTOM_A_RX_BUCK_BIAS1, TOMTOM_A_RX_BUCK_BIAS1__POR }, - { TOMTOM_A_RX_BUCK_BIAS2, TOMTOM_A_RX_BUCK_BIAS2__POR }, - { TOMTOM_A_RX_LINE_COM, TOMTOM_A_RX_LINE_COM__POR }, - { TOMTOM_A_RX_LINE_CNP_EN, TOMTOM_A_RX_LINE_CNP_EN__POR }, - { TOMTOM_A_RX_LINE_CNP_WG_CTL, TOMTOM_A_RX_LINE_CNP_WG_CTL__POR }, - { TOMTOM_A_RX_LINE_CNP_WG_TIME, TOMTOM_A_RX_LINE_CNP_WG_TIME__POR }, - { TOMTOM_A_RX_LINE_1_GAIN, TOMTOM_A_RX_LINE_1_GAIN__POR }, - { TOMTOM_A_RX_LINE_1_TEST, TOMTOM_A_RX_LINE_1_TEST__POR }, - { TOMTOM_A_RX_LINE_1_DAC_CTL, TOMTOM_A_RX_LINE_1_DAC_CTL__POR }, - { TOMTOM_A_RX_LINE_1_STATUS, TOMTOM_A_RX_LINE_1_STATUS__POR }, - { TOMTOM_A_RX_LINE_2_GAIN, TOMTOM_A_RX_LINE_2_GAIN__POR }, - { TOMTOM_A_RX_LINE_2_TEST, TOMTOM_A_RX_LINE_2_TEST__POR }, - { TOMTOM_A_RX_LINE_2_DAC_CTL, TOMTOM_A_RX_LINE_2_DAC_CTL__POR }, - { TOMTOM_A_RX_LINE_2_STATUS, TOMTOM_A_RX_LINE_2_STATUS__POR }, - { TOMTOM_A_RX_LINE_3_GAIN, TOMTOM_A_RX_LINE_3_GAIN__POR }, - { TOMTOM_A_RX_LINE_3_TEST, TOMTOM_A_RX_LINE_3_TEST__POR }, - { TOMTOM_A_RX_LINE_3_DAC_CTL, TOMTOM_A_RX_LINE_3_DAC_CTL__POR }, - { TOMTOM_A_RX_LINE_3_STATUS, TOMTOM_A_RX_LINE_3_STATUS__POR }, - { TOMTOM_A_RX_LINE_4_GAIN, TOMTOM_A_RX_LINE_4_GAIN__POR }, - { TOMTOM_A_RX_LINE_4_TEST, TOMTOM_A_RX_LINE_4_TEST__POR }, - { TOMTOM_A_RX_LINE_4_DAC_CTL, TOMTOM_A_RX_LINE_4_DAC_CTL__POR }, - { TOMTOM_A_RX_LINE_4_STATUS, TOMTOM_A_RX_LINE_4_STATUS__POR }, - { TOMTOM_A_RX_LINE_CNP_DBG, TOMTOM_A_RX_LINE_CNP_DBG__POR }, - { TOMTOM_A_SPKR_DRV1_EN, TOMTOM_A_SPKR_DRV1_EN__POR }, - { TOMTOM_A_SPKR_DRV1_GAIN, TOMTOM_A_SPKR_DRV1_GAIN__POR }, - { TOMTOM_A_SPKR_DRV1_DAC_CTL, TOMTOM_A_SPKR_DRV1_DAC_CTL__POR }, - { TOMTOM_A_SPKR_DRV1_OCP_CTL, TOMTOM_A_SPKR_DRV1_OCP_CTL__POR }, - { TOMTOM_A_SPKR_DRV1_CLIP_DET, TOMTOM_A_SPKR_DRV1_CLIP_DET__POR }, - { TOMTOM_A_SPKR_DRV1_IEC, TOMTOM_A_SPKR_DRV1_IEC__POR }, - { TOMTOM_A_SPKR_DRV1_DBG_DAC, TOMTOM_A_SPKR_DRV1_DBG_DAC__POR }, - { TOMTOM_A_SPKR_DRV1_DBG_PA, TOMTOM_A_SPKR_DRV1_DBG_PA__POR }, - { TOMTOM_A_SPKR_DRV1_DBG_PWRSTG, TOMTOM_A_SPKR_DRV1_DBG_PWRSTG__POR }, - { TOMTOM_A_SPKR_DRV1_BIAS_LDO, TOMTOM_A_SPKR_DRV1_BIAS_LDO__POR }, - { TOMTOM_A_SPKR_DRV1_BIAS_INT, TOMTOM_A_SPKR_DRV1_BIAS_INT__POR }, - { TOMTOM_A_SPKR_DRV1_BIAS_PA, TOMTOM_A_SPKR_DRV1_BIAS_PA__POR }, - { TOMTOM_A_SPKR_DRV1_STATUS_OCP, TOMTOM_A_SPKR_DRV1_STATUS_OCP__POR }, - { TOMTOM_A_SPKR_DRV1_STATUS_PA, TOMTOM_A_SPKR_DRV1_STATUS_PA__POR }, - { TOMTOM_A_SPKR1_PROT_EN, TOMTOM_A_SPKR1_PROT_EN__POR }, - { TOMTOM_A_SPKR1_PROT_ADC_TEST_EN, - TOMTOM_A_SPKR1_PROT_ADC_TEST_EN__POR }, - { TOMTOM_A_SPKR1_PROT_ATEST, TOMTOM_A_SPKR1_PROT_ATEST__POR }, - { TOMTOM_A_SPKR1_PROT_LDO_CTRL, TOMTOM_A_SPKR1_PROT_LDO_CTRL__POR }, - { TOMTOM_A_SPKR1_PROT_ISENSE_CTRL, - TOMTOM_A_SPKR1_PROT_ISENSE_CTRL__POR }, - { TOMTOM_A_SPKR1_PROT_VSENSE_CTRL, - TOMTOM_A_SPKR1_PROT_VSENSE_CTRL__POR }, - { TOMTOM_A_SPKR2_PROT_EN, TOMTOM_A_SPKR2_PROT_EN__POR }, - { TOMTOM_A_SPKR2_PROT_ADC_TEST_EN, - TOMTOM_A_SPKR2_PROT_ADC_TEST_EN__POR }, - { TOMTOM_A_SPKR2_PROT_ATEST, TOMTOM_A_SPKR2_PROT_ATEST__POR }, - { TOMTOM_A_SPKR2_PROT_LDO_CTRL, TOMTOM_A_SPKR2_PROT_LDO_CTRL__POR }, - { TOMTOM_A_SPKR2_PROT_ISENSE_CTRL, - TOMTOM_A_SPKR2_PROT_ISENSE_CTRL__POR }, - { TOMTOM_A_SPKR2_PROT_VSENSE_CTRL, - TOMTOM_A_SPKR2_PROT_VSENSE_CTRL__POR }, - { TOMTOM_A_MBHC_HPH, TOMTOM_A_MBHC_HPH__POR }, - { TOMTOM_A_CDC_ANC1_B1_CTL, TOMTOM_A_CDC_ANC1_B1_CTL__POR }, - { TOMTOM_A_CDC_ANC2_B1_CTL, TOMTOM_A_CDC_ANC2_B1_CTL__POR }, - { TOMTOM_A_CDC_ANC1_SHIFT, TOMTOM_A_CDC_ANC1_SHIFT__POR }, - { TOMTOM_A_CDC_ANC2_SHIFT, TOMTOM_A_CDC_ANC2_SHIFT__POR }, - { TOMTOM_A_CDC_ANC1_IIR_B1_CTL, TOMTOM_A_CDC_ANC1_IIR_B1_CTL__POR }, - { TOMTOM_A_CDC_ANC2_IIR_B1_CTL, TOMTOM_A_CDC_ANC2_IIR_B1_CTL__POR }, - { TOMTOM_A_CDC_ANC1_IIR_B2_CTL, TOMTOM_A_CDC_ANC1_IIR_B2_CTL__POR }, - { TOMTOM_A_CDC_ANC2_IIR_B2_CTL, TOMTOM_A_CDC_ANC2_IIR_B2_CTL__POR }, - { TOMTOM_A_CDC_ANC1_IIR_B3_CTL, TOMTOM_A_CDC_ANC1_IIR_B3_CTL__POR }, - { TOMTOM_A_CDC_ANC2_IIR_B3_CTL, TOMTOM_A_CDC_ANC2_IIR_B3_CTL__POR }, - { TOMTOM_A_CDC_ANC1_LPF_B1_CTL, TOMTOM_A_CDC_ANC1_LPF_B1_CTL__POR }, - { TOMTOM_A_CDC_ANC2_LPF_B1_CTL, TOMTOM_A_CDC_ANC2_LPF_B1_CTL__POR }, - { TOMTOM_A_CDC_ANC1_LPF_B2_CTL, TOMTOM_A_CDC_ANC1_LPF_B2_CTL__POR }, - { TOMTOM_A_CDC_ANC2_LPF_B2_CTL, TOMTOM_A_CDC_ANC2_LPF_B2_CTL__POR }, - { TOMTOM_A_CDC_ANC1_SPARE, TOMTOM_A_CDC_ANC1_SPARE__POR }, - { TOMTOM_A_CDC_ANC2_SPARE, TOMTOM_A_CDC_ANC2_SPARE__POR }, - { TOMTOM_A_CDC_ANC1_SMLPF_CTL, TOMTOM_A_CDC_ANC1_SMLPF_CTL__POR }, - { TOMTOM_A_CDC_ANC2_SMLPF_CTL, TOMTOM_A_CDC_ANC2_SMLPF_CTL__POR }, - { TOMTOM_A_CDC_ANC1_DCFLT_CTL, TOMTOM_A_CDC_ANC1_DCFLT_CTL__POR }, - { TOMTOM_A_CDC_ANC2_DCFLT_CTL, TOMTOM_A_CDC_ANC2_DCFLT_CTL__POR }, - { TOMTOM_A_CDC_ANC1_GAIN_CTL, TOMTOM_A_CDC_ANC1_GAIN_CTL__POR }, - { TOMTOM_A_CDC_ANC2_GAIN_CTL, TOMTOM_A_CDC_ANC2_GAIN_CTL__POR }, - { TOMTOM_A_CDC_ANC1_B2_CTL, TOMTOM_A_CDC_ANC1_B2_CTL__POR }, - { TOMTOM_A_CDC_ANC2_B2_CTL, TOMTOM_A_CDC_ANC2_B2_CTL__POR }, - { TOMTOM_A_CDC_TX1_VOL_CTL_TIMER, TOMTOM_A_CDC_TX1_VOL_CTL_TIMER__POR }, - { TOMTOM_A_CDC_TX2_VOL_CTL_TIMER, TOMTOM_A_CDC_TX2_VOL_CTL_TIMER__POR }, - { TOMTOM_A_CDC_TX3_VOL_CTL_TIMER, TOMTOM_A_CDC_TX3_VOL_CTL_TIMER__POR }, - { TOMTOM_A_CDC_TX4_VOL_CTL_TIMER, TOMTOM_A_CDC_TX4_VOL_CTL_TIMER__POR }, - { TOMTOM_A_CDC_TX5_VOL_CTL_TIMER, TOMTOM_A_CDC_TX5_VOL_CTL_TIMER__POR }, - { TOMTOM_A_CDC_TX6_VOL_CTL_TIMER, TOMTOM_A_CDC_TX6_VOL_CTL_TIMER__POR }, - { TOMTOM_A_CDC_TX7_VOL_CTL_TIMER, TOMTOM_A_CDC_TX7_VOL_CTL_TIMER__POR }, - { TOMTOM_A_CDC_TX8_VOL_CTL_TIMER, TOMTOM_A_CDC_TX8_VOL_CTL_TIMER__POR }, - { TOMTOM_A_CDC_TX9_VOL_CTL_TIMER, TOMTOM_A_CDC_TX9_VOL_CTL_TIMER__POR }, - { TOMTOM_A_CDC_TX10_VOL_CTL_TIMER, - TOMTOM_A_CDC_TX10_VOL_CTL_TIMER__POR }, - { TOMTOM_A_CDC_TX1_VOL_CTL_GAIN, TOMTOM_A_CDC_TX1_VOL_CTL_GAIN__POR }, - { TOMTOM_A_CDC_TX2_VOL_CTL_GAIN, TOMTOM_A_CDC_TX2_VOL_CTL_GAIN__POR }, - { TOMTOM_A_CDC_TX3_VOL_CTL_GAIN, TOMTOM_A_CDC_TX3_VOL_CTL_GAIN__POR }, - { TOMTOM_A_CDC_TX4_VOL_CTL_GAIN, TOMTOM_A_CDC_TX4_VOL_CTL_GAIN__POR }, - { TOMTOM_A_CDC_TX5_VOL_CTL_GAIN, TOMTOM_A_CDC_TX5_VOL_CTL_GAIN__POR }, - { TOMTOM_A_CDC_TX6_VOL_CTL_GAIN, TOMTOM_A_CDC_TX6_VOL_CTL_GAIN__POR }, - { TOMTOM_A_CDC_TX7_VOL_CTL_GAIN, TOMTOM_A_CDC_TX7_VOL_CTL_GAIN__POR }, - { TOMTOM_A_CDC_TX8_VOL_CTL_GAIN, TOMTOM_A_CDC_TX8_VOL_CTL_GAIN__POR }, - { TOMTOM_A_CDC_TX9_VOL_CTL_GAIN, TOMTOM_A_CDC_TX9_VOL_CTL_GAIN__POR }, - { TOMTOM_A_CDC_TX10_VOL_CTL_GAIN, TOMTOM_A_CDC_TX10_VOL_CTL_GAIN__POR }, - { TOMTOM_A_CDC_TX1_VOL_CTL_CFG, TOMTOM_A_CDC_TX1_VOL_CTL_CFG__POR }, - { TOMTOM_A_CDC_TX2_VOL_CTL_CFG, TOMTOM_A_CDC_TX2_VOL_CTL_CFG__POR }, - { TOMTOM_A_CDC_TX3_VOL_CTL_CFG, TOMTOM_A_CDC_TX3_VOL_CTL_CFG__POR }, - { TOMTOM_A_CDC_TX4_VOL_CTL_CFG, TOMTOM_A_CDC_TX4_VOL_CTL_CFG__POR }, - { TOMTOM_A_CDC_TX5_VOL_CTL_CFG, TOMTOM_A_CDC_TX5_VOL_CTL_CFG__POR }, - { TOMTOM_A_CDC_TX6_VOL_CTL_CFG, TOMTOM_A_CDC_TX6_VOL_CTL_CFG__POR }, - { TOMTOM_A_CDC_TX7_VOL_CTL_CFG, TOMTOM_A_CDC_TX7_VOL_CTL_CFG__POR }, - { TOMTOM_A_CDC_TX8_VOL_CTL_CFG, TOMTOM_A_CDC_TX8_VOL_CTL_CFG__POR }, - { TOMTOM_A_CDC_TX9_VOL_CTL_CFG, TOMTOM_A_CDC_TX9_VOL_CTL_CFG__POR }, - { TOMTOM_A_CDC_TX10_VOL_CTL_CFG, TOMTOM_A_CDC_TX10_VOL_CTL_CFG__POR }, - { TOMTOM_A_CDC_TX1_MUX_CTL, TOMTOM_A_CDC_TX1_MUX_CTL__POR }, - { TOMTOM_A_CDC_TX2_MUX_CTL, TOMTOM_A_CDC_TX2_MUX_CTL__POR }, - { TOMTOM_A_CDC_TX3_MUX_CTL, TOMTOM_A_CDC_TX3_MUX_CTL__POR }, - { TOMTOM_A_CDC_TX4_MUX_CTL, TOMTOM_A_CDC_TX4_MUX_CTL__POR }, - { TOMTOM_A_CDC_TX5_MUX_CTL, TOMTOM_A_CDC_TX5_MUX_CTL__POR }, - { TOMTOM_A_CDC_TX6_MUX_CTL, TOMTOM_A_CDC_TX6_MUX_CTL__POR }, - { TOMTOM_A_CDC_TX7_MUX_CTL, TOMTOM_A_CDC_TX7_MUX_CTL__POR }, - { TOMTOM_A_CDC_TX8_MUX_CTL, TOMTOM_A_CDC_TX8_MUX_CTL__POR }, - { TOMTOM_A_CDC_TX9_MUX_CTL, TOMTOM_A_CDC_TX9_MUX_CTL__POR }, - { TOMTOM_A_CDC_TX10_MUX_CTL, TOMTOM_A_CDC_TX10_MUX_CTL__POR }, - { TOMTOM_A_CDC_TX1_CLK_FS_CTL, TOMTOM_A_CDC_TX1_CLK_FS_CTL__POR }, - { TOMTOM_A_CDC_TX2_CLK_FS_CTL, TOMTOM_A_CDC_TX2_CLK_FS_CTL__POR }, - { TOMTOM_A_CDC_TX3_CLK_FS_CTL, TOMTOM_A_CDC_TX3_CLK_FS_CTL__POR }, - { TOMTOM_A_CDC_TX4_CLK_FS_CTL, TOMTOM_A_CDC_TX4_CLK_FS_CTL__POR }, - { TOMTOM_A_CDC_TX5_CLK_FS_CTL, TOMTOM_A_CDC_TX5_CLK_FS_CTL__POR }, - { TOMTOM_A_CDC_TX6_CLK_FS_CTL, TOMTOM_A_CDC_TX6_CLK_FS_CTL__POR }, - { TOMTOM_A_CDC_TX7_CLK_FS_CTL, TOMTOM_A_CDC_TX7_CLK_FS_CTL__POR }, - { TOMTOM_A_CDC_TX8_CLK_FS_CTL, TOMTOM_A_CDC_TX8_CLK_FS_CTL__POR }, - { TOMTOM_A_CDC_TX9_CLK_FS_CTL, TOMTOM_A_CDC_TX9_CLK_FS_CTL__POR }, - { TOMTOM_A_CDC_TX10_CLK_FS_CTL, TOMTOM_A_CDC_TX10_CLK_FS_CTL__POR }, - { TOMTOM_A_CDC_TX1_DMIC_CTL, TOMTOM_A_CDC_TX1_DMIC_CTL__POR }, - { TOMTOM_A_CDC_TX2_DMIC_CTL, TOMTOM_A_CDC_TX2_DMIC_CTL__POR }, - { TOMTOM_A_CDC_TX3_DMIC_CTL, TOMTOM_A_CDC_TX3_DMIC_CTL__POR }, - { TOMTOM_A_CDC_TX4_DMIC_CTL, TOMTOM_A_CDC_TX4_DMIC_CTL__POR }, - { TOMTOM_A_CDC_TX5_DMIC_CTL, TOMTOM_A_CDC_TX5_DMIC_CTL__POR }, - { TOMTOM_A_CDC_TX6_DMIC_CTL, TOMTOM_A_CDC_TX6_DMIC_CTL__POR }, - { TOMTOM_A_CDC_TX7_DMIC_CTL, TOMTOM_A_CDC_TX7_DMIC_CTL__POR }, - { TOMTOM_A_CDC_TX8_DMIC_CTL, TOMTOM_A_CDC_TX8_DMIC_CTL__POR }, - { TOMTOM_A_CDC_TX9_DMIC_CTL, TOMTOM_A_CDC_TX9_DMIC_CTL__POR }, - { TOMTOM_A_CDC_TX10_DMIC_CTL, TOMTOM_A_CDC_TX10_DMIC_CTL__POR }, - { TOMTOM_A_CDC_SPKR_CLIPDET_VAL0, TOMTOM_A_CDC_SPKR_CLIPDET_VAL0__POR }, - { TOMTOM_A_CDC_SPKR_CLIPDET_VAL1, TOMTOM_A_CDC_SPKR_CLIPDET_VAL1__POR }, - { TOMTOM_A_CDC_SPKR_CLIPDET_VAL2, TOMTOM_A_CDC_SPKR_CLIPDET_VAL2__POR }, - { TOMTOM_A_CDC_SPKR_CLIPDET_VAL3, TOMTOM_A_CDC_SPKR_CLIPDET_VAL3__POR }, - { TOMTOM_A_CDC_SPKR_CLIPDET_VAL4, TOMTOM_A_CDC_SPKR_CLIPDET_VAL4__POR }, - { TOMTOM_A_CDC_SPKR_CLIPDET_VAL5, TOMTOM_A_CDC_SPKR_CLIPDET_VAL5__POR }, - { TOMTOM_A_CDC_SPKR_CLIPDET_VAL6, TOMTOM_A_CDC_SPKR_CLIPDET_VAL6__POR }, - { TOMTOM_A_CDC_SPKR_CLIPDET_VAL7, TOMTOM_A_CDC_SPKR_CLIPDET_VAL7__POR }, - { TOMTOM_A_CDC_DEBUG_B1_CTL, TOMTOM_A_CDC_DEBUG_B1_CTL__POR }, - { TOMTOM_A_CDC_DEBUG_B2_CTL, TOMTOM_A_CDC_DEBUG_B2_CTL__POR }, - { TOMTOM_A_CDC_DEBUG_B3_CTL, TOMTOM_A_CDC_DEBUG_B3_CTL__POR }, - { TOMTOM_A_CDC_DEBUG_B4_CTL, TOMTOM_A_CDC_DEBUG_B4_CTL__POR }, - { TOMTOM_A_CDC_DEBUG_B5_CTL, TOMTOM_A_CDC_DEBUG_B5_CTL__POR }, - { TOMTOM_A_CDC_DEBUG_B6_CTL, TOMTOM_A_CDC_DEBUG_B6_CTL__POR }, - { TOMTOM_A_CDC_DEBUG_B7_CTL, TOMTOM_A_CDC_DEBUG_B7_CTL__POR }, - { TOMTOM_A_CDC_SRC1_PDA_CFG, TOMTOM_A_CDC_SRC1_PDA_CFG__POR }, - { TOMTOM_A_CDC_SRC2_PDA_CFG, TOMTOM_A_CDC_SRC2_PDA_CFG__POR }, - { TOMTOM_A_CDC_SRC1_FS_CTL, TOMTOM_A_CDC_SRC1_FS_CTL__POR }, - { TOMTOM_A_CDC_SRC2_FS_CTL, TOMTOM_A_CDC_SRC2_FS_CTL__POR }, - { TOMTOM_A_CDC_RX1_B1_CTL, TOMTOM_A_CDC_RX1_B1_CTL__POR }, - { TOMTOM_A_CDC_RX2_B1_CTL, TOMTOM_A_CDC_RX2_B1_CTL__POR }, - { TOMTOM_A_CDC_RX3_B1_CTL, TOMTOM_A_CDC_RX3_B1_CTL__POR }, - { TOMTOM_A_CDC_RX4_B1_CTL, TOMTOM_A_CDC_RX4_B1_CTL__POR }, - { TOMTOM_A_CDC_RX5_B1_CTL, TOMTOM_A_CDC_RX5_B1_CTL__POR }, - { TOMTOM_A_CDC_RX6_B1_CTL, TOMTOM_A_CDC_RX6_B1_CTL__POR }, - { TOMTOM_A_CDC_RX7_B1_CTL, TOMTOM_A_CDC_RX7_B1_CTL__POR }, - { TOMTOM_A_CDC_RX1_B2_CTL, TOMTOM_A_CDC_RX1_B2_CTL__POR }, - { TOMTOM_A_CDC_RX2_B2_CTL, TOMTOM_A_CDC_RX2_B2_CTL__POR }, - { TOMTOM_A_CDC_RX3_B2_CTL, TOMTOM_A_CDC_RX3_B2_CTL__POR }, - { TOMTOM_A_CDC_RX4_B2_CTL, TOMTOM_A_CDC_RX4_B2_CTL__POR }, - { TOMTOM_A_CDC_RX5_B2_CTL, TOMTOM_A_CDC_RX5_B2_CTL__POR }, - { TOMTOM_A_CDC_RX6_B2_CTL, TOMTOM_A_CDC_RX6_B2_CTL__POR }, - { TOMTOM_A_CDC_RX7_B2_CTL, TOMTOM_A_CDC_RX7_B2_CTL__POR }, - { TOMTOM_A_CDC_RX1_B3_CTL, TOMTOM_A_CDC_RX1_B3_CTL__POR }, - { TOMTOM_A_CDC_RX2_B3_CTL, TOMTOM_A_CDC_RX2_B3_CTL__POR }, - { TOMTOM_A_CDC_RX3_B3_CTL, TOMTOM_A_CDC_RX3_B3_CTL__POR }, - { TOMTOM_A_CDC_RX4_B3_CTL, TOMTOM_A_CDC_RX4_B3_CTL__POR }, - { TOMTOM_A_CDC_RX5_B3_CTL, TOMTOM_A_CDC_RX5_B3_CTL__POR }, - { TOMTOM_A_CDC_RX6_B3_CTL, TOMTOM_A_CDC_RX6_B3_CTL__POR }, - { TOMTOM_A_CDC_RX7_B3_CTL, TOMTOM_A_CDC_RX7_B3_CTL__POR }, - { TOMTOM_A_CDC_RX1_B4_CTL, TOMTOM_A_CDC_RX1_B4_CTL__POR }, - { TOMTOM_A_CDC_RX2_B4_CTL, TOMTOM_A_CDC_RX2_B4_CTL__POR }, - { TOMTOM_A_CDC_RX3_B4_CTL, TOMTOM_A_CDC_RX3_B4_CTL__POR }, - { TOMTOM_A_CDC_RX4_B4_CTL, TOMTOM_A_CDC_RX4_B4_CTL__POR }, - { TOMTOM_A_CDC_RX5_B4_CTL, TOMTOM_A_CDC_RX5_B4_CTL__POR }, - { TOMTOM_A_CDC_RX6_B4_CTL, TOMTOM_A_CDC_RX6_B4_CTL__POR }, - { TOMTOM_A_CDC_RX7_B4_CTL, TOMTOM_A_CDC_RX7_B4_CTL__POR }, - { TOMTOM_A_CDC_RX1_B5_CTL, TOMTOM_A_CDC_RX1_B5_CTL__POR }, - { TOMTOM_A_CDC_RX2_B5_CTL, TOMTOM_A_CDC_RX2_B5_CTL__POR }, - { TOMTOM_A_CDC_RX3_B5_CTL, TOMTOM_A_CDC_RX3_B5_CTL__POR }, - { TOMTOM_A_CDC_RX4_B5_CTL, TOMTOM_A_CDC_RX4_B5_CTL__POR }, - { TOMTOM_A_CDC_RX5_B5_CTL, TOMTOM_A_CDC_RX5_B5_CTL__POR }, - { TOMTOM_A_CDC_RX6_B5_CTL, TOMTOM_A_CDC_RX6_B5_CTL__POR }, - { TOMTOM_A_CDC_RX7_B5_CTL, TOMTOM_A_CDC_RX7_B5_CTL__POR }, - { TOMTOM_A_CDC_RX1_B6_CTL, TOMTOM_A_CDC_RX1_B6_CTL__POR }, - { TOMTOM_A_CDC_RX2_B6_CTL, TOMTOM_A_CDC_RX2_B6_CTL__POR }, - { TOMTOM_A_CDC_RX3_B6_CTL, TOMTOM_A_CDC_RX3_B6_CTL__POR }, - { TOMTOM_A_CDC_RX4_B6_CTL, TOMTOM_A_CDC_RX4_B6_CTL__POR }, - { TOMTOM_A_CDC_RX5_B6_CTL, TOMTOM_A_CDC_RX5_B6_CTL__POR }, - { TOMTOM_A_CDC_RX6_B6_CTL, TOMTOM_A_CDC_RX6_B6_CTL__POR }, - { TOMTOM_A_CDC_RX7_B6_CTL, TOMTOM_A_CDC_RX7_B6_CTL__POR }, - { TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL, - TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL__POR }, - { TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL, - TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL__POR }, - { TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL, - TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL__POR }, - { TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL, - TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL__POR }, - { TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL, - TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL__POR }, - { TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL, - TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL__POR }, - { TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL, - TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL__POR }, - { TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL, - TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL__POR }, - { TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL, - TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL__POR }, - { TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL, - TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL__POR }, - { TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL, - TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL__POR }, - { TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL, - TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL__POR }, - { TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL, - TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL__POR }, - { TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL, - TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL__POR }, - { TOMTOM_A_CDC_VBAT_CFG, TOMTOM_A_CDC_VBAT_CFG__POR }, - { TOMTOM_A_CDC_VBAT_ADC_CAL1, TOMTOM_A_CDC_VBAT_ADC_CAL1__POR }, - { TOMTOM_A_CDC_VBAT_ADC_CAL2, TOMTOM_A_CDC_VBAT_ADC_CAL2__POR }, - { TOMTOM_A_CDC_VBAT_ADC_CAL3, TOMTOM_A_CDC_VBAT_ADC_CAL3__POR }, - { TOMTOM_A_CDC_VBAT_PK_EST1, TOMTOM_A_CDC_VBAT_PK_EST1__POR }, - { TOMTOM_A_CDC_VBAT_PK_EST2, TOMTOM_A_CDC_VBAT_PK_EST2__POR }, - { TOMTOM_A_CDC_VBAT_PK_EST3, TOMTOM_A_CDC_VBAT_PK_EST3__POR }, - { TOMTOM_A_CDC_VBAT_RF_PROC1, TOMTOM_A_CDC_VBAT_RF_PROC1__POR }, - { TOMTOM_A_CDC_VBAT_RF_PROC2, TOMTOM_A_CDC_VBAT_RF_PROC2__POR }, - { TOMTOM_A_CDC_VBAT_TAC1, TOMTOM_A_CDC_VBAT_TAC1__POR }, - { TOMTOM_A_CDC_VBAT_TAC2, TOMTOM_A_CDC_VBAT_TAC2__POR }, - { TOMTOM_A_CDC_VBAT_TAC3, TOMTOM_A_CDC_VBAT_TAC3__POR }, - { TOMTOM_A_CDC_VBAT_TAC4, TOMTOM_A_CDC_VBAT_TAC4__POR }, - { TOMTOM_A_CDC_VBAT_GAIN_UPD1, TOMTOM_A_CDC_VBAT_GAIN_UPD1__POR }, - { TOMTOM_A_CDC_VBAT_GAIN_UPD2, TOMTOM_A_CDC_VBAT_GAIN_UPD2__POR }, - { TOMTOM_A_CDC_VBAT_GAIN_UPD3, TOMTOM_A_CDC_VBAT_GAIN_UPD3__POR }, - { TOMTOM_A_CDC_VBAT_GAIN_UPD4, TOMTOM_A_CDC_VBAT_GAIN_UPD4__POR }, - { TOMTOM_A_CDC_VBAT_DEBUG1, TOMTOM_A_CDC_VBAT_DEBUG1__POR }, - { TOMTOM_A_CDC_VBAT_GAIN_UPD_MON, TOMTOM_A_CDC_VBAT_GAIN_UPD_MON__POR }, - { TOMTOM_A_CDC_VBAT_GAIN_MON_VAL, TOMTOM_A_CDC_VBAT_GAIN_MON_VAL__POR }, - { TOMTOM_A_CDC_CLK_ANC_RESET_CTL, TOMTOM_A_CDC_CLK_ANC_RESET_CTL__POR }, - { TOMTOM_A_CDC_CLK_RX_RESET_CTL, TOMTOM_A_CDC_CLK_RX_RESET_CTL__POR }, - { TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL, - TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL__POR }, - { TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL, - TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL__POR }, - { TOMTOM_A_CDC_CLK_RX_I2S_CTL, TOMTOM_A_CDC_CLK_RX_I2S_CTL__POR }, - { TOMTOM_A_CDC_CLK_TX_I2S_CTL, TOMTOM_A_CDC_CLK_TX_I2S_CTL__POR }, - { TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL, - TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL__POR }, - { TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL, - TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL__POR }, - { TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL, - TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL__POR }, - { TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL, - TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL__POR }, - { TOMTOM_A_CDC_CLK_OTHR_CTL, TOMTOM_A_CDC_CLK_OTHR_CTL__POR }, - { TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL, - TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL__POR }, - { TOMTOM_A_CDC_CLK_RX_B1_CTL, TOMTOM_A_CDC_CLK_RX_B1_CTL__POR }, - { TOMTOM_A_CDC_CLK_RX_B2_CTL, TOMTOM_A_CDC_CLK_RX_B2_CTL__POR }, - { TOMTOM_A_CDC_CLK_MCLK_CTL, TOMTOM_A_CDC_CLK_MCLK_CTL__POR }, - { TOMTOM_A_CDC_CLK_PDM_CTL, TOMTOM_A_CDC_CLK_PDM_CTL__POR }, - { TOMTOM_A_CDC_CLK_SD_CTL, TOMTOM_A_CDC_CLK_SD_CTL__POR }, - { TOMTOM_A_CDC_CLSH_B1_CTL, TOMTOM_A_CDC_CLSH_B1_CTL__POR }, - { TOMTOM_A_CDC_CLSH_B2_CTL, TOMTOM_A_CDC_CLSH_B2_CTL__POR }, - { TOMTOM_A_CDC_CLSH_B3_CTL, TOMTOM_A_CDC_CLSH_B3_CTL__POR }, - { TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS, - TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS__POR }, - { TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD, - TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD__POR }, - { TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD, - TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD__POR }, - { TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD, - TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD__POR }, - { TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD, - TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD__POR }, - { TOMTOM_A_CDC_CLSH_K_ADDR, TOMTOM_A_CDC_CLSH_K_ADDR__POR }, - { TOMTOM_A_CDC_CLSH_K_DATA, TOMTOM_A_CDC_CLSH_K_DATA__POR }, - { TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L, - TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L__POR }, - { TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U, - TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U__POR }, - { TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L, - TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L__POR }, - { TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U, - TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U__POR }, - { TOMTOM_A_CDC_CLSH_V_PA_HD_EAR, TOMTOM_A_CDC_CLSH_V_PA_HD_EAR__POR }, - { TOMTOM_A_CDC_CLSH_V_PA_HD_HPH, TOMTOM_A_CDC_CLSH_V_PA_HD_HPH__POR }, - { TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR, TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR__POR }, - { TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH, TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH__POR }, - { TOMTOM_A_CDC_IIR1_GAIN_B1_CTL, TOMTOM_A_CDC_IIR1_GAIN_B1_CTL__POR }, - { TOMTOM_A_CDC_IIR2_GAIN_B1_CTL, TOMTOM_A_CDC_IIR2_GAIN_B1_CTL__POR }, - { TOMTOM_A_CDC_IIR1_GAIN_B2_CTL, TOMTOM_A_CDC_IIR1_GAIN_B2_CTL__POR }, - { TOMTOM_A_CDC_IIR2_GAIN_B2_CTL, TOMTOM_A_CDC_IIR2_GAIN_B2_CTL__POR }, - { TOMTOM_A_CDC_IIR1_GAIN_B3_CTL, TOMTOM_A_CDC_IIR1_GAIN_B3_CTL__POR }, - { TOMTOM_A_CDC_IIR2_GAIN_B3_CTL, TOMTOM_A_CDC_IIR2_GAIN_B3_CTL__POR }, - { TOMTOM_A_CDC_IIR1_GAIN_B4_CTL, TOMTOM_A_CDC_IIR1_GAIN_B4_CTL__POR }, - { TOMTOM_A_CDC_IIR2_GAIN_B4_CTL, TOMTOM_A_CDC_IIR2_GAIN_B4_CTL__POR }, - { TOMTOM_A_CDC_IIR1_GAIN_B5_CTL, TOMTOM_A_CDC_IIR1_GAIN_B5_CTL__POR }, - { TOMTOM_A_CDC_IIR2_GAIN_B5_CTL, TOMTOM_A_CDC_IIR2_GAIN_B5_CTL__POR }, - { TOMTOM_A_CDC_IIR1_GAIN_B6_CTL, TOMTOM_A_CDC_IIR1_GAIN_B6_CTL__POR }, - { TOMTOM_A_CDC_IIR2_GAIN_B6_CTL, TOMTOM_A_CDC_IIR2_GAIN_B6_CTL__POR }, - { TOMTOM_A_CDC_IIR1_GAIN_B7_CTL, TOMTOM_A_CDC_IIR1_GAIN_B7_CTL__POR }, - { TOMTOM_A_CDC_IIR2_GAIN_B7_CTL, TOMTOM_A_CDC_IIR2_GAIN_B7_CTL__POR }, - { TOMTOM_A_CDC_IIR1_GAIN_B8_CTL, TOMTOM_A_CDC_IIR1_GAIN_B8_CTL__POR }, - { TOMTOM_A_CDC_IIR2_GAIN_B8_CTL, TOMTOM_A_CDC_IIR2_GAIN_B8_CTL__POR }, - { TOMTOM_A_CDC_IIR1_CTL, TOMTOM_A_CDC_IIR1_CTL__POR }, - { TOMTOM_A_CDC_IIR2_CTL, TOMTOM_A_CDC_IIR2_CTL__POR }, - { TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL, - TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL__POR }, - { TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL, - TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL__POR }, - { TOMTOM_A_CDC_IIR1_COEF_B1_CTL, TOMTOM_A_CDC_IIR1_COEF_B1_CTL__POR }, - { TOMTOM_A_CDC_IIR2_COEF_B1_CTL, TOMTOM_A_CDC_IIR2_COEF_B1_CTL__POR }, - { TOMTOM_A_CDC_IIR1_COEF_B2_CTL, TOMTOM_A_CDC_IIR1_COEF_B2_CTL__POR }, - { TOMTOM_A_CDC_IIR2_COEF_B2_CTL, TOMTOM_A_CDC_IIR2_COEF_B2_CTL__POR }, - { TOMTOM_A_CDC_TOP_GAIN_UPDATE, TOMTOM_A_CDC_TOP_GAIN_UPDATE__POR }, - { TOMTOM_A_CDC_PA_RAMP_B1_CTL, TOMTOM_A_CDC_PA_RAMP_B1_CTL__POR }, - { TOMTOM_A_CDC_PA_RAMP_B2_CTL, TOMTOM_A_CDC_PA_RAMP_B2_CTL__POR }, - { TOMTOM_A_CDC_PA_RAMP_B3_CTL, TOMTOM_A_CDC_PA_RAMP_B3_CTL__POR }, - { TOMTOM_A_CDC_PA_RAMP_B4_CTL, TOMTOM_A_CDC_PA_RAMP_B4_CTL__POR }, - { TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL, - TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL__POR }, - { TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL, - TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL__POR }, - { TOMTOM_A_CDC_COMP0_B1_CTL, TOMTOM_A_CDC_COMP0_B1_CTL__POR }, - { TOMTOM_A_CDC_COMP1_B1_CTL, TOMTOM_A_CDC_COMP1_B1_CTL__POR }, - { TOMTOM_A_CDC_COMP2_B1_CTL, TOMTOM_A_CDC_COMP2_B1_CTL__POR }, - { TOMTOM_A_CDC_COMP0_B2_CTL, TOMTOM_A_CDC_COMP0_B2_CTL__POR }, - { TOMTOM_A_CDC_COMP1_B2_CTL, TOMTOM_A_CDC_COMP1_B2_CTL__POR }, - { TOMTOM_A_CDC_COMP2_B2_CTL, TOMTOM_A_CDC_COMP2_B2_CTL__POR }, - { TOMTOM_A_CDC_COMP0_B3_CTL, TOMTOM_A_CDC_COMP0_B3_CTL__POR }, - { TOMTOM_A_CDC_COMP1_B3_CTL, TOMTOM_A_CDC_COMP1_B3_CTL__POR }, - { TOMTOM_A_CDC_COMP2_B3_CTL, TOMTOM_A_CDC_COMP2_B3_CTL__POR }, - { TOMTOM_A_CDC_COMP0_B4_CTL, TOMTOM_A_CDC_COMP0_B4_CTL__POR }, - { TOMTOM_A_CDC_COMP1_B4_CTL, TOMTOM_A_CDC_COMP1_B4_CTL__POR }, - { TOMTOM_A_CDC_COMP2_B4_CTL, TOMTOM_A_CDC_COMP2_B4_CTL__POR }, - { TOMTOM_A_CDC_COMP0_B5_CTL, TOMTOM_A_CDC_COMP0_B5_CTL__POR }, - { TOMTOM_A_CDC_COMP1_B5_CTL, TOMTOM_A_CDC_COMP1_B5_CTL__POR }, - { TOMTOM_A_CDC_COMP2_B5_CTL, TOMTOM_A_CDC_COMP2_B5_CTL__POR }, - { TOMTOM_A_CDC_COMP0_B6_CTL, TOMTOM_A_CDC_COMP0_B6_CTL__POR }, - { TOMTOM_A_CDC_COMP1_B6_CTL, TOMTOM_A_CDC_COMP1_B6_CTL__POR }, - { TOMTOM_A_CDC_COMP2_B6_CTL, TOMTOM_A_CDC_COMP2_B6_CTL__POR }, - { TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS, - TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS__POR }, - { TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS, - TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS__POR }, - { TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS, - TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS__POR }, - { TOMTOM_A_CDC_COMP0_FS_CFG, TOMTOM_A_CDC_COMP0_FS_CFG__POR }, - { TOMTOM_A_CDC_COMP1_FS_CFG, TOMTOM_A_CDC_COMP1_FS_CFG__POR }, - { TOMTOM_A_CDC_COMP2_FS_CFG, TOMTOM_A_CDC_COMP2_FS_CFG__POR }, - { TOMTOM_A_CDC_CONN_RX1_B1_CTL, TOMTOM_A_CDC_CONN_RX1_B1_CTL__POR }, - { TOMTOM_A_CDC_CONN_RX1_B2_CTL, TOMTOM_A_CDC_CONN_RX1_B2_CTL__POR }, - { TOMTOM_A_CDC_CONN_RX1_B3_CTL, TOMTOM_A_CDC_CONN_RX1_B3_CTL__POR }, - { TOMTOM_A_CDC_CONN_RX2_B1_CTL, TOMTOM_A_CDC_CONN_RX2_B1_CTL__POR }, - { TOMTOM_A_CDC_CONN_RX2_B2_CTL, TOMTOM_A_CDC_CONN_RX2_B2_CTL__POR }, - { TOMTOM_A_CDC_CONN_RX2_B3_CTL, TOMTOM_A_CDC_CONN_RX2_B3_CTL__POR }, - { TOMTOM_A_CDC_CONN_RX3_B1_CTL, TOMTOM_A_CDC_CONN_RX3_B1_CTL__POR }, - { TOMTOM_A_CDC_CONN_RX3_B2_CTL, TOMTOM_A_CDC_CONN_RX3_B2_CTL__POR }, - { TOMTOM_A_CDC_CONN_RX4_B1_CTL, TOMTOM_A_CDC_CONN_RX4_B1_CTL__POR }, - { TOMTOM_A_CDC_CONN_RX4_B2_CTL, TOMTOM_A_CDC_CONN_RX4_B2_CTL__POR }, - { TOMTOM_A_CDC_CONN_RX5_B1_CTL, TOMTOM_A_CDC_CONN_RX5_B1_CTL__POR }, - { TOMTOM_A_CDC_CONN_RX5_B2_CTL, TOMTOM_A_CDC_CONN_RX5_B2_CTL__POR }, - { TOMTOM_A_CDC_CONN_RX6_B1_CTL, TOMTOM_A_CDC_CONN_RX6_B1_CTL__POR }, - { TOMTOM_A_CDC_CONN_RX6_B2_CTL, TOMTOM_A_CDC_CONN_RX6_B2_CTL__POR }, - { TOMTOM_A_CDC_CONN_RX7_B1_CTL, TOMTOM_A_CDC_CONN_RX7_B1_CTL__POR }, - { TOMTOM_A_CDC_CONN_RX7_B2_CTL, TOMTOM_A_CDC_CONN_RX7_B2_CTL__POR }, - { TOMTOM_A_CDC_CONN_RX7_B3_CTL, TOMTOM_A_CDC_CONN_RX7_B3_CTL__POR }, - { TOMTOM_A_CDC_CONN_ANC_B1_CTL, TOMTOM_A_CDC_CONN_ANC_B1_CTL__POR }, - { TOMTOM_A_CDC_CONN_ANC_B2_CTL, TOMTOM_A_CDC_CONN_ANC_B2_CTL__POR }, - { TOMTOM_A_CDC_CONN_TX_B1_CTL, TOMTOM_A_CDC_CONN_TX_B1_CTL__POR }, - { TOMTOM_A_CDC_CONN_TX_B2_CTL, TOMTOM_A_CDC_CONN_TX_B2_CTL__POR }, - { TOMTOM_A_CDC_CONN_TX_B3_CTL, TOMTOM_A_CDC_CONN_TX_B3_CTL__POR }, - { TOMTOM_A_CDC_CONN_TX_B4_CTL, TOMTOM_A_CDC_CONN_TX_B4_CTL__POR }, - { TOMTOM_A_CDC_CONN_EQ1_B1_CTL, TOMTOM_A_CDC_CONN_EQ1_B1_CTL__POR }, - { TOMTOM_A_CDC_CONN_EQ1_B2_CTL, TOMTOM_A_CDC_CONN_EQ1_B2_CTL__POR }, - { TOMTOM_A_CDC_CONN_EQ1_B3_CTL, TOMTOM_A_CDC_CONN_EQ1_B3_CTL__POR }, - { TOMTOM_A_CDC_CONN_EQ1_B4_CTL, TOMTOM_A_CDC_CONN_EQ1_B4_CTL__POR }, - { TOMTOM_A_CDC_CONN_EQ2_B1_CTL, TOMTOM_A_CDC_CONN_EQ2_B1_CTL__POR }, - { TOMTOM_A_CDC_CONN_EQ2_B2_CTL, TOMTOM_A_CDC_CONN_EQ2_B2_CTL__POR }, - { TOMTOM_A_CDC_CONN_EQ2_B3_CTL, TOMTOM_A_CDC_CONN_EQ2_B3_CTL__POR }, - { TOMTOM_A_CDC_CONN_EQ2_B4_CTL, TOMTOM_A_CDC_CONN_EQ2_B4_CTL__POR }, - { TOMTOM_A_CDC_CONN_SRC1_B1_CTL, TOMTOM_A_CDC_CONN_SRC1_B1_CTL__POR }, - { TOMTOM_A_CDC_CONN_SRC1_B2_CTL, TOMTOM_A_CDC_CONN_SRC1_B2_CTL__POR }, - { TOMTOM_A_CDC_CONN_SRC2_B1_CTL, TOMTOM_A_CDC_CONN_SRC2_B1_CTL__POR }, - { TOMTOM_A_CDC_CONN_SRC2_B2_CTL, TOMTOM_A_CDC_CONN_SRC2_B2_CTL__POR }, - { TOMTOM_A_CDC_CONN_TX_SB_B1_CTL, TOMTOM_A_CDC_CONN_TX_SB_B1_CTL__POR }, - { TOMTOM_A_CDC_CONN_TX_SB_B2_CTL, TOMTOM_A_CDC_CONN_TX_SB_B2_CTL__POR }, - { TOMTOM_A_CDC_CONN_TX_SB_B3_CTL, TOMTOM_A_CDC_CONN_TX_SB_B3_CTL__POR }, - { TOMTOM_A_CDC_CONN_TX_SB_B4_CTL, TOMTOM_A_CDC_CONN_TX_SB_B4_CTL__POR }, - { TOMTOM_A_CDC_CONN_TX_SB_B5_CTL, TOMTOM_A_CDC_CONN_TX_SB_B5_CTL__POR }, - { TOMTOM_A_CDC_CONN_TX_SB_B6_CTL, TOMTOM_A_CDC_CONN_TX_SB_B6_CTL__POR }, - { TOMTOM_A_CDC_CONN_TX_SB_B7_CTL, TOMTOM_A_CDC_CONN_TX_SB_B7_CTL__POR }, - { TOMTOM_A_CDC_CONN_TX_SB_B8_CTL, TOMTOM_A_CDC_CONN_TX_SB_B8_CTL__POR }, - { TOMTOM_A_CDC_CONN_TX_SB_B9_CTL, TOMTOM_A_CDC_CONN_TX_SB_B9_CTL__POR }, - { TOMTOM_A_CDC_CONN_TX_SB_B10_CTL, - TOMTOM_A_CDC_CONN_TX_SB_B10_CTL__POR }, - { TOMTOM_A_CDC_CONN_TX_SB_B11_CTL, - TOMTOM_A_CDC_CONN_TX_SB_B11_CTL__POR }, - { TOMTOM_A_CDC_CONN_RX_SB_B1_CTL, TOMTOM_A_CDC_CONN_RX_SB_B1_CTL__POR }, - { TOMTOM_A_CDC_CONN_RX_SB_B2_CTL, TOMTOM_A_CDC_CONN_RX_SB_B2_CTL__POR }, - { TOMTOM_A_CDC_CONN_CLSH_CTL, TOMTOM_A_CDC_CONN_CLSH_CTL__POR }, - { TOMTOM_A_CDC_CONN_MISC, TOMTOM_A_CDC_CONN_MISC__POR }, - { TOMTOM_A_CDC_CONN_RX8_B1_CTL, TOMTOM_A_CDC_CONN_RX8_B1_CTL__POR }, - { TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL, - TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL__POR }, - { TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST, - TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST__POR }, - { TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD, - TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD__POR }, - { TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS, - TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS__POR }, - { TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK, - TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK__POR }, - { TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING, - TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING__POR }, - { TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL, - TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL__POR }, - { TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST, - TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST__POR }, - { TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD, - TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD__POR }, - { TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS, - TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS__POR }, - { TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK, - TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK__POR }, - { TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING, - TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING__POR }, - { TOMTOM_A_CDC_MBHC_EN_CTL, TOMTOM_A_CDC_MBHC_EN_CTL__POR }, - { TOMTOM_A_CDC_MBHC_FIR_B1_CFG, TOMTOM_A_CDC_MBHC_FIR_B1_CFG__POR }, - { TOMTOM_A_CDC_MBHC_FIR_B2_CFG, TOMTOM_A_CDC_MBHC_FIR_B2_CFG__POR }, - { TOMTOM_A_CDC_MBHC_TIMER_B1_CTL, TOMTOM_A_CDC_MBHC_TIMER_B1_CTL__POR }, - { TOMTOM_A_CDC_MBHC_TIMER_B2_CTL, TOMTOM_A_CDC_MBHC_TIMER_B2_CTL__POR }, - { TOMTOM_A_CDC_MBHC_TIMER_B3_CTL, TOMTOM_A_CDC_MBHC_TIMER_B3_CTL__POR }, - { TOMTOM_A_CDC_MBHC_TIMER_B4_CTL, TOMTOM_A_CDC_MBHC_TIMER_B4_CTL__POR }, - { TOMTOM_A_CDC_MBHC_TIMER_B5_CTL, TOMTOM_A_CDC_MBHC_TIMER_B5_CTL__POR }, - { TOMTOM_A_CDC_MBHC_TIMER_B6_CTL, TOMTOM_A_CDC_MBHC_TIMER_B6_CTL__POR }, - { TOMTOM_A_CDC_MBHC_B1_STATUS, TOMTOM_A_CDC_MBHC_B1_STATUS__POR }, - { TOMTOM_A_CDC_MBHC_B2_STATUS, TOMTOM_A_CDC_MBHC_B2_STATUS__POR }, - { TOMTOM_A_CDC_MBHC_B3_STATUS, TOMTOM_A_CDC_MBHC_B3_STATUS__POR }, - { TOMTOM_A_CDC_MBHC_B4_STATUS, TOMTOM_A_CDC_MBHC_B4_STATUS__POR }, - { TOMTOM_A_CDC_MBHC_B5_STATUS, TOMTOM_A_CDC_MBHC_B5_STATUS__POR }, - { TOMTOM_A_CDC_MBHC_B1_CTL, TOMTOM_A_CDC_MBHC_B1_CTL__POR }, - { TOMTOM_A_CDC_MBHC_B2_CTL, TOMTOM_A_CDC_MBHC_B2_CTL__POR }, - { TOMTOM_A_CDC_MBHC_VOLT_B1_CTL, TOMTOM_A_CDC_MBHC_VOLT_B1_CTL__POR }, - { TOMTOM_A_CDC_MBHC_VOLT_B2_CTL, TOMTOM_A_CDC_MBHC_VOLT_B2_CTL__POR }, - { TOMTOM_A_CDC_MBHC_VOLT_B3_CTL, TOMTOM_A_CDC_MBHC_VOLT_B3_CTL__POR }, - { TOMTOM_A_CDC_MBHC_VOLT_B4_CTL, TOMTOM_A_CDC_MBHC_VOLT_B4_CTL__POR }, - { TOMTOM_A_CDC_MBHC_VOLT_B5_CTL, TOMTOM_A_CDC_MBHC_VOLT_B5_CTL__POR }, - { TOMTOM_A_CDC_MBHC_VOLT_B6_CTL, TOMTOM_A_CDC_MBHC_VOLT_B6_CTL__POR }, - { TOMTOM_A_CDC_MBHC_VOLT_B7_CTL, TOMTOM_A_CDC_MBHC_VOLT_B7_CTL__POR }, - { TOMTOM_A_CDC_MBHC_VOLT_B8_CTL, TOMTOM_A_CDC_MBHC_VOLT_B8_CTL__POR }, - { TOMTOM_A_CDC_MBHC_VOLT_B9_CTL, TOMTOM_A_CDC_MBHC_VOLT_B9_CTL__POR }, - { TOMTOM_A_CDC_MBHC_VOLT_B10_CTL, TOMTOM_A_CDC_MBHC_VOLT_B10_CTL__POR }, - { TOMTOM_A_CDC_MBHC_VOLT_B11_CTL, TOMTOM_A_CDC_MBHC_VOLT_B11_CTL__POR }, - { TOMTOM_A_CDC_MBHC_VOLT_B12_CTL, TOMTOM_A_CDC_MBHC_VOLT_B12_CTL__POR }, - { TOMTOM_A_CDC_MBHC_CLK_CTL, TOMTOM_A_CDC_MBHC_CLK_CTL__POR }, - { TOMTOM_A_CDC_MBHC_INT_CTL, TOMTOM_A_CDC_MBHC_INT_CTL__POR }, - { TOMTOM_A_CDC_MBHC_DEBUG_CTL, TOMTOM_A_CDC_MBHC_DEBUG_CTL__POR }, - { TOMTOM_A_CDC_MBHC_SPARE, TOMTOM_A_CDC_MBHC_SPARE__POR }, - { TOMTOM_A_CDC_RX8_B1_CTL, TOMTOM_A_CDC_RX8_B1_CTL__POR }, - { TOMTOM_A_CDC_RX8_B2_CTL, TOMTOM_A_CDC_RX8_B2_CTL__POR }, - { TOMTOM_A_CDC_RX8_B3_CTL, TOMTOM_A_CDC_RX8_B3_CTL__POR }, - { TOMTOM_A_CDC_RX8_B4_CTL, TOMTOM_A_CDC_RX8_B4_CTL__POR }, - { TOMTOM_A_CDC_RX8_B5_CTL, TOMTOM_A_CDC_RX8_B5_CTL__POR }, - { TOMTOM_A_CDC_RX8_B6_CTL, TOMTOM_A_CDC_RX8_B6_CTL__POR }, - { TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL, - TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL__POR }, - { TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL, - TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL__POR }, - { TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0, - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0__POR }, - { TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1, - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1__POR }, - { TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2, - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2__POR }, - { TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3, - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3__POR }, - { TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4, - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4__POR }, - { TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5, - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5__POR }, - { TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6, - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6__POR }, - { TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7, - TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7__POR }, - { TOMTOM_A_CDC_BOOST_MODE_CTL, TOMTOM_A_CDC_BOOST_MODE_CTL__POR }, - { TOMTOM_A_CDC_BOOST_THRESHOLD, TOMTOM_A_CDC_BOOST_THRESHOLD__POR }, - { TOMTOM_A_CDC_BOOST_TAP_SEL, TOMTOM_A_CDC_BOOST_TAP_SEL__POR }, - { TOMTOM_A_CDC_BOOST_HOLD_TIME, TOMTOM_A_CDC_BOOST_HOLD_TIME__POR }, - { TOMTOM_A_CDC_BOOST_TRGR_EN, TOMTOM_A_CDC_BOOST_TRGR_EN__POR }, -}; - -static bool wcd9330_is_readable_register(struct device *dev, unsigned int reg) -{ - return tomtom_reg_readable[reg]; -} - -static bool tomtom_is_digital_gain_register(unsigned int reg) -{ - bool rtn = false; - - switch (reg) { - case TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL: - case TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL: - case TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL: - case TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL: - case TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL: - case TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL: - case TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL: - case TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL: - case TOMTOM_A_CDC_TX1_VOL_CTL_GAIN: - case TOMTOM_A_CDC_TX2_VOL_CTL_GAIN: - case TOMTOM_A_CDC_TX3_VOL_CTL_GAIN: - case TOMTOM_A_CDC_TX4_VOL_CTL_GAIN: - case TOMTOM_A_CDC_TX5_VOL_CTL_GAIN: - case TOMTOM_A_CDC_TX6_VOL_CTL_GAIN: - case TOMTOM_A_CDC_TX7_VOL_CTL_GAIN: - case TOMTOM_A_CDC_TX8_VOL_CTL_GAIN: - case TOMTOM_A_CDC_TX9_VOL_CTL_GAIN: - case TOMTOM_A_CDC_TX10_VOL_CTL_GAIN: - rtn = true; - break; - default: - break; - } - return rtn; -} - -static bool wcd9330_is_volatile_register(struct device *dev, unsigned int reg) -{ - - if ((reg >= TOMTOM_A_CDC_MBHC_EN_CTL) || (reg < 0x100)) - return true; - - /* IIR Coeff registers are not cacheable */ - if ((reg >= TOMTOM_A_CDC_IIR1_COEF_B1_CTL) && - (reg <= TOMTOM_A_CDC_IIR2_COEF_B2_CTL)) - return true; - - /* ANC filter registers are not cacheable */ - if ((reg >= TOMTOM_A_CDC_ANC1_IIR_B1_CTL) && - (reg <= TOMTOM_A_CDC_ANC1_LPF_B2_CTL)) - return true; - - if ((reg >= TOMTOM_A_CDC_ANC2_IIR_B1_CTL) && - (reg <= TOMTOM_A_CDC_ANC2_LPF_B2_CTL)) - return true; - - if (((reg >= TOMTOM_A_CDC_SPKR_CLIPDET_VAL0 && - reg <= TOMTOM_A_CDC_SPKR_CLIPDET_VAL7)) || - ((reg >= TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0) && - (reg <= TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7))) - return true; - - /* Digital gain register is not cacheable so we have to write - * the setting even it is the same - */ - if (tomtom_is_digital_gain_register(reg)) - return true; - - switch (reg) { - case TOMTOM_A_RX_HPH_L_STATUS: - case TOMTOM_A_RX_HPH_R_STATUS: - case TOMTOM_A_MBHC_INSERT_DET_STATUS: - case TOMTOM_A_RX_HPH_CNP_EN: - case TOMTOM_A_CDC_VBAT_GAIN_MON_VAL: - case TOMTOM_A_CDC_MAD_MAIN_CTL_1: - case TOMTOM_A_CDC_MAD_AUDIO_CTL_3: - case TOMTOM_A_CDC_MAD_AUDIO_CTL_4: - case TOMTOM_A_INTR_MODE: - case TOMTOM_A_INTR2_MASK0: - case TOMTOM_A_INTR2_STATUS0: - case TOMTOM_A_INTR2_CLEAR0: - case TOMTOM_SB_PGD_PORT_TX_BASE: - case TOMTOM_SB_PGD_PORT_RX_BASE: - case TOMTOM_A_CDC_ANC1_IIR_B1_CTL: - case TOMTOM_A_CDC_ANC1_GAIN_CTL: - case TOMTOM_A_SVASS_SPE_INBOX_TRG: - return true; - default: - return false; - } -} - -struct regmap_config wcd9330_regmap_config = { - .reg_bits = 16, - .val_bits = 8, - .cache_type = REGCACHE_RBTREE, - .reg_defaults = wcd9330_defaults, - .num_reg_defaults = ARRAY_SIZE(wcd9330_defaults), - .max_register = WCD9330_MAX_REGISTER, - .volatile_reg = wcd9330_is_volatile_register, - .readable_reg = wcd9330_is_readable_register, -}; diff --git a/include/linux/mfd/wcd9xxx/wcd9330_registers.h b/include/linux/mfd/wcd9xxx/wcd9330_registers.h deleted file mode 100644 index c37d25f3f528..000000000000 --- a/include/linux/mfd/wcd9xxx/wcd9330_registers.h +++ /dev/null @@ -1,1626 +0,0 @@ -/* Copyright (c) 2014, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef WCD9330_REGISTERS_H -#define WCD9330_REGISTERS_H - -#include - -#define TOMTOM_A_CHIP_CTL (0x000) -#define TOMTOM_A_CHIP_CTL__POR (0x38) -#define TOMTOM_A_CHIP_STATUS (0x001) -#define TOMTOM_A_CHIP_STATUS__POR (0x00) -#define TOMTOM_A_CHIP_ID_BYTE_0 (0x004) -#define TOMTOM_A_CHIP_ID_BYTE_0__POR (0x00) -#define TOMTOM_A_CHIP_ID_BYTE_1 (0x005) -#define TOMTOM_A_CHIP_ID_BYTE_1__POR (0x00) -#define TOMTOM_A_CHIP_ID_BYTE_2 (0x006) -#define TOMTOM_A_CHIP_ID_BYTE_2__POR (0x05) -#define TOMTOM_A_CHIP_ID_BYTE_3 (0x007) -#define TOMTOM_A_CHIP_ID_BYTE_3__POR (0x01) -#define TOMTOM_A_CHIP_I2C_SLAVE_ID (0x008) -#define TOMTOM_A_CHIP_I2C_SLAVE_ID__POR (0x01) -#define TOMTOM_A_SLAVE_ID_1 (0x00C) -#define TOMTOM_A_SLAVE_ID_1__POR (0x77) -#define TOMTOM_A_SLAVE_ID_2 (0x00D) -#define TOMTOM_A_SLAVE_ID_2__POR (0x66) -#define TOMTOM_A_SLAVE_ID_3 (0x00E) -#define TOMTOM_A_SLAVE_ID_3__POR (0x55) -#define TOMTOM_A_PIN_CTL_OE0 (0x010) -#define TOMTOM_A_PIN_CTL_OE0__POR (0x00) -#define TOMTOM_A_PIN_CTL_OE1 (0x011) -#define TOMTOM_A_PIN_CTL_OE1__POR (0x00) -#define TOMTOM_A_PIN_CTL_OE2 (0x012) -#define TOMTOM_A_PIN_CTL_OE2__POR (0x00) -#define TOMTOM_A_PIN_CTL_DATA0 (0x013) -#define TOMTOM_A_PIN_CTL_DATA0__POR (0x00) -#define TOMTOM_A_PIN_CTL_DATA1 (0x014) -#define TOMTOM_A_PIN_CTL_DATA1__POR (0x00) -#define TOMTOM_A_PIN_CTL_DATA2 (0x015) -#define TOMTOM_A_PIN_CTL_DATA2__POR (0x00) -#define TOMTOM_A_HDRIVE_GENERIC (0x018) -#define TOMTOM_A_HDRIVE_GENERIC__POR (0x00) -#define TOMTOM_A_HDRIVE_OVERRIDE (0x019) -#define TOMTOM_A_HDRIVE_OVERRIDE__POR (0x08) -#define TOMTOM_A_ANA_CSR_WAIT_STATE (0x01C) -#define TOMTOM_A_ANA_CSR_WAIT_STATE__POR (0x44) -#define TOMTOM_A_PROCESS_MONITOR_CTL0 (0x020) -#define TOMTOM_A_PROCESS_MONITOR_CTL0__POR (0x80) -#define TOMTOM_A_PROCESS_MONITOR_CTL1 (0x021) -#define TOMTOM_A_PROCESS_MONITOR_CTL1__POR (0x00) -#define TOMTOM_A_PROCESS_MONITOR_CTL2 (0x022) -#define TOMTOM_A_PROCESS_MONITOR_CTL2__POR (0x00) -#define TOMTOM_A_PROCESS_MONITOR_CTL3 (0x023) -#define TOMTOM_A_PROCESS_MONITOR_CTL3__POR (0x01) -#define TOMTOM_A_QFUSE_CTL (0x028) -#define TOMTOM_A_QFUSE_CTL__POR (0x00) -#define TOMTOM_A_QFUSE_STATUS (0x029) -#define TOMTOM_A_QFUSE_STATUS__POR (0x00) -#define TOMTOM_A_QFUSE_DATA_OUT0 (0x02A) -#define TOMTOM_A_QFUSE_DATA_OUT0__POR (0x00) -#define TOMTOM_A_QFUSE_DATA_OUT1 (0x02B) -#define TOMTOM_A_QFUSE_DATA_OUT1__POR (0x00) -#define TOMTOM_A_QFUSE_DATA_OUT2 (0x02C) -#define TOMTOM_A_QFUSE_DATA_OUT2__POR (0x00) -#define TOMTOM_A_QFUSE_DATA_OUT3 (0x02D) -#define TOMTOM_A_QFUSE_DATA_OUT3__POR (0x00) -#define TOMTOM_A_QFUSE_DATA_OUT4 (0x02E) -#define TOMTOM_A_QFUSE_DATA_OUT4__POR (0x00) -#define TOMTOM_A_QFUSE_DATA_OUT5 (0x02F) -#define TOMTOM_A_QFUSE_DATA_OUT5__POR (0x00) -#define TOMTOM_A_QFUSE_DATA_OUT6 (0x030) -#define TOMTOM_A_QFUSE_DATA_OUT6__POR (0x00) -#define TOMTOM_A_QFUSE_DATA_OUT7 (0x031) -#define TOMTOM_A_QFUSE_DATA_OUT7__POR (0x00) -#define TOMTOM_A_CDC_CTL (0x034) -#define TOMTOM_A_CDC_CTL__POR (0x00) -#define TOMTOM_A_LEAKAGE_CTL (0x03C) -#define TOMTOM_A_LEAKAGE_CTL__POR (0x04) -#define TOMTOM_A_SVASS_MEM_PTR0 (0x044) -#define TOMTOM_A_SVASS_MEM_PTR0__POR (0x00) -#define TOMTOM_A_SVASS_MEM_PTR1 (0x045) -#define TOMTOM_A_SVASS_MEM_PTR1__POR (0x00) -#define TOMTOM_A_SVASS_MEM_PTR2 (0x046) -#define TOMTOM_A_SVASS_MEM_PTR2__POR (0x00) -#define TOMTOM_A_SVASS_MEM_CTL (0x048) -#define TOMTOM_A_SVASS_MEM_CTL__POR (0x04) -#define TOMTOM_A_SVASS_MEM_BANK (0x049) -#define TOMTOM_A_SVASS_MEM_BANK__POR (0x00) -#define TOMTOM_A_DMIC_B1_CTL (0x04A) -#define TOMTOM_A_DMIC_B1_CTL__POR (0x00) -#define TOMTOM_A_DMIC_B2_CTL (0x04B) -#define TOMTOM_A_DMIC_B2_CTL__POR (0x00) -#define TOMTOM_A_SVASS_CLKRST_CTL (0x04C) -#define TOMTOM_A_SVASS_CLKRST_CTL__POR (0x00) -#define TOMTOM_A_SVASS_CPAR_CFG (0x04D) -#define TOMTOM_A_SVASS_CPAR_CFG__POR (0x00) -#define TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD (0x04E) -#define TOMTOM_A_SVASS_BUF_RDY_INT_PERIOD__POR (0x14) -#define TOMTOM_A_SVASS_CPAR_WDOG_CFG (0x04F) -#define TOMTOM_A_SVASS_CPAR_WDOG_CFG__POR (0x00) -#define TOMTOM_A_SVASS_CFG (0x050) -#define TOMTOM_A_SVASS_CFG__POR (0x01) -#define TOMTOM_A_SVASS_SPE_CFG (0x051) -#define TOMTOM_A_SVASS_SPE_CFG__POR (0x04) -#define TOMTOM_A_SVASS_STATUS (0x052) -#define TOMTOM_A_SVASS_STATUS__POR (0x00) -#define TOMTOM_A_SVASS_INT_MASK (0x053) -#define TOMTOM_A_SVASS_INT_MASK__POR (0x3F) -#define TOMTOM_A_SVASS_INT_STATUS (0x054) -#define TOMTOM_A_SVASS_INT_STATUS__POR (0x00) -#define TOMTOM_A_SVASS_INT_CLR (0x055) -#define TOMTOM_A_SVASS_INT_CLR__POR (0x00) -#define TOMTOM_A_SVASS_DEBUG (0x056) -#define TOMTOM_A_SVASS_DEBUG__POR (0x00) -#define TOMTOM_A_SVASS_SPE_BKUP_INT (0x057) -#define TOMTOM_A_SVASS_SPE_BKUP_INT__POR (0x00) -#define TOMTOM_A_SVASS_MEM_ACC (0x058) -#define TOMTOM_A_SVASS_MEM_ACC__POR (0x00) -#define TOMTOM_A_MEM_LEAKAGE_CTL (0x059) -#define TOMTOM_A_MEM_LEAKAGE_CTL__POR (0x04) -#define TOMTOM_A_SVASS_SPE_INBOX_TRG (0x05A) -#define TOMTOM_A_SVASS_SPE_INBOX_TRG__POR (0x00) -#define TOMTOM_A_SVASS_SPE_INBOX_0 (0x060) -#define TOMTOM_A_SVASS_SPE_INBOX_0__POR (0x00) -#define TOMTOM_A_SVASS_SPE_INBOX_1 (0x061) -#define TOMTOM_A_SVASS_SPE_INBOX_1__POR (0x00) -#define TOMTOM_A_SVASS_SPE_INBOX_2 (0x062) -#define TOMTOM_A_SVASS_SPE_INBOX_2__POR (0x00) -#define TOMTOM_A_SVASS_SPE_INBOX_3 (0x063) -#define TOMTOM_A_SVASS_SPE_INBOX_3__POR (0x00) -#define TOMTOM_A_SVASS_SPE_INBOX_4 (0x064) -#define TOMTOM_A_SVASS_SPE_INBOX_4__POR (0x00) -#define TOMTOM_A_SVASS_SPE_INBOX_5 (0x065) -#define TOMTOM_A_SVASS_SPE_INBOX_5__POR (0x00) -#define TOMTOM_A_SVASS_SPE_INBOX_6 (0x066) -#define TOMTOM_A_SVASS_SPE_INBOX_6__POR (0x00) -#define TOMTOM_A_SVASS_SPE_INBOX_7 (0x067) -#define TOMTOM_A_SVASS_SPE_INBOX_7__POR (0x00) -#define TOMTOM_A_SVASS_SPE_INBOX_8 (0x068) -#define TOMTOM_A_SVASS_SPE_INBOX_8__POR (0x00) -#define TOMTOM_A_SVASS_SPE_INBOX_9 (0x069) -#define TOMTOM_A_SVASS_SPE_INBOX_9__POR (0x00) -#define TOMTOM_A_SVASS_SPE_INBOX_10 (0x06A) -#define TOMTOM_A_SVASS_SPE_INBOX_10__POR (0x00) -#define TOMTOM_A_SVASS_SPE_INBOX_11 (0x06B) -#define TOMTOM_A_SVASS_SPE_INBOX_11__POR (0x00) -#define TOMTOM_A_SVASS_SPE_OUTBOX_0 (0x070) -#define TOMTOM_A_SVASS_SPE_OUTBOX_0__POR (0x00) -#define TOMTOM_A_SVASS_SPE_OUTBOX_1 (0x071) -#define TOMTOM_A_SVASS_SPE_OUTBOX_1__POR (0x00) -#define TOMTOM_A_SVASS_SPE_OUTBOX_2 (0x072) -#define TOMTOM_A_SVASS_SPE_OUTBOX_2__POR (0x00) -#define TOMTOM_A_SVASS_SPE_OUTBOX_3 (0x073) -#define TOMTOM_A_SVASS_SPE_OUTBOX_3__POR (0x00) -#define TOMTOM_A_SVASS_SPE_OUTBOX_4 (0x074) -#define TOMTOM_A_SVASS_SPE_OUTBOX_4__POR (0x00) -#define TOMTOM_A_SVASS_SPE_OUTBOX_5 (0x075) -#define TOMTOM_A_SVASS_SPE_OUTBOX_5__POR (0x00) -#define TOMTOM_A_SVASS_SPE_OUTBOX_6 (0x076) -#define TOMTOM_A_SVASS_SPE_OUTBOX_6__POR (0x00) -#define TOMTOM_A_SVASS_SPE_OUTBOX_7 (0x077) -#define TOMTOM_A_SVASS_SPE_OUTBOX_7__POR (0x00) -#define TOMTOM_A_SVASS_SPE_OUTBOX_8 (0x078) -#define TOMTOM_A_SVASS_SPE_OUTBOX_8__POR (0x00) -#define TOMTOM_A_SVASS_SPE_OUTBOX_9 (0x079) -#define TOMTOM_A_SVASS_SPE_OUTBOX_9__POR (0x00) -#define TOMTOM_A_SVASS_SPE_OUTBOX_10 (0x07A) -#define TOMTOM_A_SVASS_SPE_OUTBOX_10__POR (0x00) -#define TOMTOM_A_SVASS_SPE_OUTBOX_11 (0x07B) -#define TOMTOM_A_SVASS_SPE_OUTBOX_11__POR (0x00) -#define TOMTOM_A_INTR_MODE (0x090) -#define TOMTOM_A_INTR_MODE__POR (0x00) -#define TOMTOM_A_INTR1_MASK0 (0x094) -#define TOMTOM_A_INTR1_MASK0__POR (0xFF) -#define TOMTOM_A_INTR1_MASK1 (0x095) -#define TOMTOM_A_INTR1_MASK1__POR (0xFF) -#define TOMTOM_A_INTR1_MASK2 (0x096) -#define TOMTOM_A_INTR1_MASK2__POR (0xFF) -#define TOMTOM_A_INTR1_MASK3 (0x097) -#define TOMTOM_A_INTR1_MASK3__POR (0xFF) -#define TOMTOM_A_INTR1_STATUS0 (0x098) -#define TOMTOM_A_INTR1_STATUS0__POR (0x00) -#define TOMTOM_A_INTR1_STATUS1 (0x099) -#define TOMTOM_A_INTR1_STATUS1__POR (0x00) -#define TOMTOM_A_INTR1_STATUS2 (0x09A) -#define TOMTOM_A_INTR1_STATUS2__POR (0x00) -#define TOMTOM_A_INTR1_STATUS3 (0x09B) -#define TOMTOM_A_INTR1_STATUS3__POR (0x00) -#define TOMTOM_A_INTR1_CLEAR0 (0x09C) -#define TOMTOM_A_INTR1_CLEAR0__POR (0x00) -#define TOMTOM_A_INTR1_CLEAR1 (0x09D) -#define TOMTOM_A_INTR1_CLEAR1__POR (0x00) -#define TOMTOM_A_INTR1_CLEAR2 (0x09E) -#define TOMTOM_A_INTR1_CLEAR2__POR (0x00) -#define TOMTOM_A_INTR1_CLEAR3 (0x09F) -#define TOMTOM_A_INTR1_CLEAR3__POR (0x00) -#define TOMTOM_A_INTR1_LEVEL0 (0x0A0) -#define TOMTOM_A_INTR1_LEVEL0__POR (0x01) -#define TOMTOM_A_INTR1_LEVEL1 (0x0A1) -#define TOMTOM_A_INTR1_LEVEL1__POR (0x00) -#define TOMTOM_A_INTR1_LEVEL2 (0x0A2) -#define TOMTOM_A_INTR1_LEVEL2__POR (0x40) -#define TOMTOM_A_INTR1_LEVEL3 (0x0A3) -#define TOMTOM_A_INTR1_LEVEL3__POR (0x00) -#define TOMTOM_A_INTR1_TEST0 (0x0A4) -#define TOMTOM_A_INTR1_TEST0__POR (0x00) -#define TOMTOM_A_INTR1_TEST1 (0x0A5) -#define TOMTOM_A_INTR1_TEST1__POR (0x00) -#define TOMTOM_A_INTR1_TEST2 (0x0A6) -#define TOMTOM_A_INTR1_TEST2__POR (0x00) -#define TOMTOM_A_INTR1_TEST3 (0x0A7) -#define TOMTOM_A_INTR1_TEST3__POR (0x00) -#define TOMTOM_A_INTR1_SET0 (0x0A8) -#define TOMTOM_A_INTR1_SET0__POR (0x00) -#define TOMTOM_A_INTR1_SET1 (0x0A9) -#define TOMTOM_A_INTR1_SET1__POR (0x00) -#define TOMTOM_A_INTR1_SET2 (0x0AA) -#define TOMTOM_A_INTR1_SET2__POR (0x00) -#define TOMTOM_A_INTR1_SET3 (0x0AB) -#define TOMTOM_A_INTR1_SET3__POR (0x00) -#define TOMTOM_A_INTR2_MASK0 (0x0B0) -#define TOMTOM_A_INTR2_MASK0__POR (0xFF) -#define TOMTOM_A_INTR2_STATUS0 (0x0B2) -#define TOMTOM_A_INTR2_STATUS0__POR (0x00) -#define TOMTOM_A_INTR2_CLEAR0 (0x0B4) -#define TOMTOM_A_INTR2_CLEAR0__POR (0x00) -#define TOMTOM_A_INTR2_LEVEL0 (0x0B6) -#define TOMTOM_A_INTR2_LEVEL0__POR (0x00) -#define TOMTOM_A_INTR2_TEST0 (0x0B8) -#define TOMTOM_A_INTR2_TEST0__POR (0x00) -#define TOMTOM_A_INTR2_SET0 (0x0BA) -#define TOMTOM_A_INTR2_SET0__POR (0x00) -#define TOMTOM_A_CDC_TX_I2S_SCK_MODE (0x0C0) -#define TOMTOM_A_CDC_TX_I2S_SCK_MODE__POR (0x00) -#define TOMTOM_A_CDC_TX_I2S_WS_MODE (0x0C1) -#define TOMTOM_A_CDC_TX_I2S_WS_MODE__POR (0x00) -#define TOMTOM_A_CDC_DMIC_DATA0_MODE (0x0C4) -#define TOMTOM_A_CDC_DMIC_DATA0_MODE__POR (0x00) -#define TOMTOM_A_CDC_DMIC_CLK0_MODE (0x0C5) -#define TOMTOM_A_CDC_DMIC_CLK0_MODE__POR (0x00) -#define TOMTOM_A_CDC_DMIC_DATA1_MODE (0x0C6) -#define TOMTOM_A_CDC_DMIC_DATA1_MODE__POR (0x00) -#define TOMTOM_A_CDC_DMIC_CLK1_MODE (0x0C7) -#define TOMTOM_A_CDC_DMIC_CLK1_MODE__POR (0x00) -#define TOMTOM_A_CDC_RX_I2S_SCK_MODE (0x0C8) -#define TOMTOM_A_CDC_RX_I2S_SCK_MODE__POR (0x00) -#define TOMTOM_A_CDC_RX_I2S_WS_MODE (0x0C9) -#define TOMTOM_A_CDC_RX_I2S_WS_MODE__POR (0x00) -#define TOMTOM_A_CDC_DMIC_DATA2_MODE (0x0CA) -#define TOMTOM_A_CDC_DMIC_DATA2_MODE__POR (0x00) -#define TOMTOM_A_CDC_DMIC_CLK2_MODE (0x0CB) -#define TOMTOM_A_CDC_DMIC_CLK2_MODE__POR (0x00) -#define TOMTOM_A_CDC_INTR1_MODE (0x0CC) -#define TOMTOM_A_CDC_INTR1_MODE__POR (0x00) -#define TOMTOM_A_CDC_SB_NRZ_SEL_MODE (0x0CD) -#define TOMTOM_A_CDC_SB_NRZ_SEL_MODE__POR (0x00) -#define TOMTOM_A_CDC_INTR2_MODE (0x0CE) -#define TOMTOM_A_CDC_INTR2_MODE__POR (0x00) -#define TOMTOM_A_CDC_RF_PA_ON_MODE (0x0CF) -#define TOMTOM_A_CDC_RF_PA_ON_MODE__POR (0x00) -#define TOMTOM_A_CDC_BOOST_MODE (0x0D0) -#define TOMTOM_A_CDC_BOOST_MODE__POR (0x00) -#define TOMTOM_A_CDC_JTCK_MODE (0x0D1) -#define TOMTOM_A_CDC_JTCK_MODE__POR (0x00) -#define TOMTOM_A_CDC_JTDI_MODE (0x0D2) -#define TOMTOM_A_CDC_JTDI_MODE__POR (0x00) -#define TOMTOM_A_CDC_JTMS_MODE (0x0D3) -#define TOMTOM_A_CDC_JTMS_MODE__POR (0x00) -#define TOMTOM_A_CDC_JTDO_MODE (0x0D4) -#define TOMTOM_A_CDC_JTDO_MODE__POR (0x00) -#define TOMTOM_A_CDC_JTRST_MODE (0x0D5) -#define TOMTOM_A_CDC_JTRST_MODE__POR (0x00) -#define TOMTOM_A_CDC_BIST_MODE_MODE (0x0D6) -#define TOMTOM_A_CDC_BIST_MODE_MODE__POR (0x00) -#define TOMTOM_A_CDC_MAD_MAIN_CTL_1 (0x0E0) -#define TOMTOM_A_CDC_MAD_MAIN_CTL_1__POR (0x00) -#define TOMTOM_A_CDC_MAD_MAIN_CTL_2 (0x0E1) -#define TOMTOM_A_CDC_MAD_MAIN_CTL_2__POR (0x00) -#define TOMTOM_A_CDC_MAD_AUDIO_CTL_1 (0x0E2) -#define TOMTOM_A_CDC_MAD_AUDIO_CTL_1__POR (0x00) -#define TOMTOM_A_CDC_MAD_AUDIO_CTL_2 (0x0E3) -#define TOMTOM_A_CDC_MAD_AUDIO_CTL_2__POR (0x00) -#define TOMTOM_A_CDC_MAD_AUDIO_CTL_3 (0x0E4) -#define TOMTOM_A_CDC_MAD_AUDIO_CTL_3__POR (0x00) -#define TOMTOM_A_CDC_MAD_AUDIO_CTL_4 (0x0E5) -#define TOMTOM_A_CDC_MAD_AUDIO_CTL_4__POR (0x00) -#define TOMTOM_A_CDC_MAD_AUDIO_CTL_5 (0x0E6) -#define TOMTOM_A_CDC_MAD_AUDIO_CTL_5__POR (0x00) -#define TOMTOM_A_CDC_MAD_AUDIO_CTL_6 (0x0E7) -#define TOMTOM_A_CDC_MAD_AUDIO_CTL_6__POR (0x00) -#define TOMTOM_A_CDC_MAD_AUDIO_CTL_7 (0x0E8) -#define TOMTOM_A_CDC_MAD_AUDIO_CTL_7__POR (0x00) -#define TOMTOM_A_CDC_MAD_AUDIO_CTL_8 (0x0E9) -#define TOMTOM_A_CDC_MAD_AUDIO_CTL_8__POR (0x00) -#define TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR (0x0EA) -#define TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_PTR__POR (0x00) -#define TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL (0x0EB) -#define TOMTOM_A_CDC_MAD_AUDIO_IIR_CTL_VAL__POR (0x40) -#define TOMTOM_A_CDC_MAD_ULTR_CTL_1 (0x0EC) -#define TOMTOM_A_CDC_MAD_ULTR_CTL_1__POR (0x00) -#define TOMTOM_A_CDC_MAD_ULTR_CTL_2 (0x0ED) -#define TOMTOM_A_CDC_MAD_ULTR_CTL_2__POR (0x00) -#define TOMTOM_A_CDC_MAD_ULTR_CTL_3 (0x0EE) -#define TOMTOM_A_CDC_MAD_ULTR_CTL_3__POR (0x00) -#define TOMTOM_A_CDC_MAD_ULTR_CTL_4 (0x0EF) -#define TOMTOM_A_CDC_MAD_ULTR_CTL_4__POR (0x00) -#define TOMTOM_A_CDC_MAD_ULTR_CTL_5 (0x0F0) -#define TOMTOM_A_CDC_MAD_ULTR_CTL_5__POR (0x00) -#define TOMTOM_A_CDC_MAD_ULTR_CTL_6 (0x0F1) -#define TOMTOM_A_CDC_MAD_ULTR_CTL_6__POR (0x00) -#define TOMTOM_A_CDC_MAD_ULTR_CTL_7 (0x0F2) -#define TOMTOM_A_CDC_MAD_ULTR_CTL_7__POR (0x00) -#define TOMTOM_A_CDC_MAD_BEACON_CTL_1 (0x0F3) -#define TOMTOM_A_CDC_MAD_BEACON_CTL_1__POR (0x00) -#define TOMTOM_A_CDC_MAD_BEACON_CTL_2 (0x0F4) -#define TOMTOM_A_CDC_MAD_BEACON_CTL_2__POR (0x00) -#define TOMTOM_A_CDC_MAD_BEACON_CTL_3 (0x0F5) -#define TOMTOM_A_CDC_MAD_BEACON_CTL_3__POR (0x00) -#define TOMTOM_A_CDC_MAD_BEACON_CTL_4 (0x0F6) -#define TOMTOM_A_CDC_MAD_BEACON_CTL_4__POR (0x00) -#define TOMTOM_A_CDC_MAD_BEACON_CTL_5 (0x0F7) -#define TOMTOM_A_CDC_MAD_BEACON_CTL_5__POR (0x00) -#define TOMTOM_A_CDC_MAD_BEACON_CTL_6 (0x0F8) -#define TOMTOM_A_CDC_MAD_BEACON_CTL_6__POR (0x00) -#define TOMTOM_A_CDC_MAD_BEACON_CTL_7 (0x0F9) -#define TOMTOM_A_CDC_MAD_BEACON_CTL_7__POR (0x00) -#define TOMTOM_A_CDC_MAD_BEACON_CTL_8 (0x0FA) -#define TOMTOM_A_CDC_MAD_BEACON_CTL_8__POR (0x00) -#define TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR (0x0FB) -#define TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_PTR__POR (0x00) -#define TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL (0x0FC) -#define TOMTOM_A_CDC_MAD_BEACON_IIR_CTL_VAL__POR (0x00) -#define TOMTOM_A_CDC_MAD_INP_SEL (0x0FD) -#define TOMTOM_A_CDC_MAD_INP_SEL__POR (0x00) -#define TOMTOM_A_BIAS_REF_CTL (0x100) -#define TOMTOM_A_BIAS_REF_CTL__POR (0x1C) -#define TOMTOM_A_BIAS_CENTRAL_BG_CTL (0x101) -#define TOMTOM_A_BIAS_CENTRAL_BG_CTL__POR (0x50) -#define TOMTOM_A_BIAS_PRECHRG_CTL (0x102) -#define TOMTOM_A_BIAS_PRECHRG_CTL__POR (0x07) -#define TOMTOM_A_BIAS_CURR_CTL_1 (0x103) -#define TOMTOM_A_BIAS_CURR_CTL_1__POR (0x52) -#define TOMTOM_A_BIAS_CURR_CTL_2 (0x104) -#define TOMTOM_A_BIAS_CURR_CTL_2__POR (0x00) -#define TOMTOM_A_BIAS_OSC_BG_CTL (0x105) -#define TOMTOM_A_BIAS_OSC_BG_CTL__POR (0x36) -#define TOMTOM_A_CLK_BUFF_EN1 (0x108) -#define TOMTOM_A_CLK_BUFF_EN1__POR (0x04) -#define TOMTOM_A_CLK_BUFF_EN2 (0x109) -#define TOMTOM_A_CLK_BUFF_EN2__POR (0x02) -#define TOMTOM_A_LDO_L_MODE_1 (0x10A) -#define TOMTOM_A_LDO_L_MODE_1__POR (0x08) -#define TOMTOM_A_LDO_L_MODE_2 (0x10B) -#define TOMTOM_A_LDO_L_MODE_2__POR (0x50) -#define TOMTOM_A_LDO_L_CTRL_1 (0x10C) -#define TOMTOM_A_LDO_L_CTRL_1__POR (0x70) -#define TOMTOM_A_LDO_L_CTRL_2 (0x10D) -#define TOMTOM_A_LDO_L_CTRL_2__POR (0x55) -#define TOMTOM_A_LDO_L_CTRL_3 (0x10E) -#define TOMTOM_A_LDO_L_CTRL_3__POR (0x56) -#define TOMTOM_A_LDO_L_CTRL_4 (0x10F) -#define TOMTOM_A_LDO_L_CTRL_4__POR (0x55) -#define TOMTOM_A_LDO_H_MODE_1 (0x110) -#define TOMTOM_A_LDO_H_MODE_1__POR (0x65) -#define TOMTOM_A_LDO_H_MODE_2 (0x111) -#define TOMTOM_A_LDO_H_MODE_2__POR (0xA8) -#define TOMTOM_A_LDO_H_LOOP_CTL (0x112) -#define TOMTOM_A_LDO_H_LOOP_CTL__POR (0x6B) -#define TOMTOM_A_LDO_H_COMP_1 (0x113) -#define TOMTOM_A_LDO_H_COMP_1__POR (0x84) -#define TOMTOM_A_LDO_H_COMP_2 (0x114) -#define TOMTOM_A_LDO_H_COMP_2__POR (0xE0) -#define TOMTOM_A_LDO_H_BIAS_1 (0x115) -#define TOMTOM_A_LDO_H_BIAS_1__POR (0x6D) -#define TOMTOM_A_LDO_H_BIAS_2 (0x116) -#define TOMTOM_A_LDO_H_BIAS_2__POR (0xA5) -#define TOMTOM_A_LDO_H_BIAS_3 (0x117) -#define TOMTOM_A_LDO_H_BIAS_3__POR (0x60) -#define TOMTOM_A_VBAT_CLK (0x118) -#define TOMTOM_A_VBAT_CLK__POR (0x03) -#define TOMTOM_A_VBAT_LOOP (0x119) -#define TOMTOM_A_VBAT_LOOP__POR (0x02) -#define TOMTOM_A_VBAT_REF (0x11A) -#define TOMTOM_A_VBAT_REF__POR (0x20) -#define TOMTOM_A_VBAT_ADC_TEST (0x11B) -#define TOMTOM_A_VBAT_ADC_TEST__POR (0x00) -#define TOMTOM_A_VBAT_FE (0x11C) -#define TOMTOM_A_VBAT_FE__POR (0x48) -#define TOMTOM_A_VBAT_BIAS_1 (0x11D) -#define TOMTOM_A_VBAT_BIAS_1__POR (0x03) -#define TOMTOM_A_VBAT_BIAS_2 (0x11E) -#define TOMTOM_A_VBAT_BIAS_2__POR (0x00) -#define TOMTOM_A_VBAT_ADC_DATA_MSB (0x11F) -#define TOMTOM_A_VBAT_ADC_DATA_MSB__POR (0x00) -#define TOMTOM_A_VBAT_ADC_DATA_LSB (0x120) -#define TOMTOM_A_VBAT_ADC_DATA_LSB__POR (0x00) -#define TOMTOM_A_FLL_NREF (0x121) -#define TOMTOM_A_FLL_NREF__POR (0x12) -#define TOMTOM_A_FLL_KDCO_TUNE (0x122) -#define TOMTOM_A_FLL_KDCO_TUNE__POR (0x05) -#define TOMTOM_A_FLL_LOCK_THRESH (0x123) -#define TOMTOM_A_FLL_LOCK_THRESH__POR (0xC2) -#define TOMTOM_A_FLL_LOCK_DET_COUNT (0x124) -#define TOMTOM_A_FLL_LOCK_DET_COUNT__POR (0x40) -#define TOMTOM_A_FLL_DAC_THRESHOLD (0x125) -#define TOMTOM_A_FLL_DAC_THRESHOLD__POR (0xC8) -#define TOMTOM_A_FLL_TEST_DCO_FREERUN (0x126) -#define TOMTOM_A_FLL_TEST_DCO_FREERUN__POR (0x00) -#define TOMTOM_A_FLL_TEST_ENABLE (0x127) -#define TOMTOM_A_FLL_TEST_ENABLE__POR (0x00) -#define TOMTOM_A_MICB_CFILT_1_CTL (0x128) -#define TOMTOM_A_MICB_CFILT_1_CTL__POR (0x40) -#define TOMTOM_A_MICB_CFILT_1_VAL (0x129) -#define TOMTOM_A_MICB_CFILT_1_VAL__POR (0x80) -#define TOMTOM_A_MICB_CFILT_1_PRECHRG (0x12A) -#define TOMTOM_A_MICB_CFILT_1_PRECHRG__POR (0x38) -#define TOMTOM_A_MICB_1_CTL (0x12B) -#define TOMTOM_A_MICB_1_CTL__POR (0x16) -#define TOMTOM_A_MICB_1_INT_RBIAS (0x12C) -#define TOMTOM_A_MICB_1_INT_RBIAS__POR (0x24) -#define TOMTOM_A_MICB_1_MBHC (0x12D) -#define TOMTOM_A_MICB_1_MBHC__POR (0x01) -#define TOMTOM_A_MICB_CFILT_2_CTL (0x12E) -#define TOMTOM_A_MICB_CFILT_2_CTL__POR (0x41) -#define TOMTOM_A_MICB_CFILT_2_VAL (0x12F) -#define TOMTOM_A_MICB_CFILT_2_VAL__POR (0x80) -#define TOMTOM_A_MICB_CFILT_2_PRECHRG (0x130) -#define TOMTOM_A_MICB_CFILT_2_PRECHRG__POR (0x38) -#define TOMTOM_A_MICB_2_CTL (0x131) -#define TOMTOM_A_MICB_2_CTL__POR (0x16) -#define TOMTOM_A_MICB_2_INT_RBIAS (0x132) -#define TOMTOM_A_MICB_2_INT_RBIAS__POR (0x24) -#define TOMTOM_A_MICB_2_MBHC (0x133) -#define TOMTOM_A_MICB_2_MBHC__POR (0x02) -#define TOMTOM_A_MICB_CFILT_3_CTL (0x134) -#define TOMTOM_A_MICB_CFILT_3_CTL__POR (0x40) -#define TOMTOM_A_MICB_CFILT_3_VAL (0x135) -#define TOMTOM_A_MICB_CFILT_3_VAL__POR (0x80) -#define TOMTOM_A_MICB_CFILT_3_PRECHRG (0x136) -#define TOMTOM_A_MICB_CFILT_3_PRECHRG__POR (0x38) -#define TOMTOM_A_MICB_3_CTL (0x137) -#define TOMTOM_A_MICB_3_CTL__POR (0x16) -#define TOMTOM_A_MICB_3_INT_RBIAS (0x138) -#define TOMTOM_A_MICB_3_INT_RBIAS__POR (0x24) -#define TOMTOM_A_MICB_3_MBHC (0x139) -#define TOMTOM_A_MICB_3_MBHC__POR (0x00) -#define TOMTOM_A_MICB_4_CTL (0x13A) -#define TOMTOM_A_MICB_4_CTL__POR (0x16) -#define TOMTOM_A_MICB_4_INT_RBIAS (0x13B) -#define TOMTOM_A_MICB_4_INT_RBIAS__POR (0x24) -#define TOMTOM_A_MICB_4_MBHC (0x13C) -#define TOMTOM_A_MICB_4_MBHC__POR (0x01) -#define TOMTOM_A_SPKR_DRV2_EN (0x13D) -#define TOMTOM_A_SPKR_DRV2_EN__POR (0x6F) -#define TOMTOM_A_SPKR_DRV2_GAIN (0x13E) -#define TOMTOM_A_SPKR_DRV2_GAIN__POR (0x00) -#define TOMTOM_A_SPKR_DRV2_DAC_CTL (0x13F) -#define TOMTOM_A_SPKR_DRV2_DAC_CTL__POR (0x04) -#define TOMTOM_A_SPKR_DRV2_OCP_CTL (0x140) -#define TOMTOM_A_SPKR_DRV2_OCP_CTL__POR (0x97) -#define TOMTOM_A_SPKR_DRV2_CLIP_DET (0x141) -#define TOMTOM_A_SPKR_DRV2_CLIP_DET__POR (0x01) -#define TOMTOM_A_SPKR_DRV2_DBG_DAC (0x142) -#define TOMTOM_A_SPKR_DRV2_DBG_DAC__POR (0x05) -#define TOMTOM_A_SPKR_DRV2_DBG_PA (0x143) -#define TOMTOM_A_SPKR_DRV2_DBG_PA__POR (0x18) -#define TOMTOM_A_SPKR_DRV2_DBG_PWRSTG (0x144) -#define TOMTOM_A_SPKR_DRV2_DBG_PWRSTG__POR (0x00) -#define TOMTOM_A_SPKR_DRV2_BIAS_LDO (0x145) -#define TOMTOM_A_SPKR_DRV2_BIAS_LDO__POR (0x45) -#define TOMTOM_A_SPKR_DRV2_BIAS_INT (0x146) -#define TOMTOM_A_SPKR_DRV2_BIAS_INT__POR (0xA5) -#define TOMTOM_A_SPKR_DRV2_BIAS_PA (0x147) -#define TOMTOM_A_SPKR_DRV2_BIAS_PA__POR (0x55) -#define TOMTOM_A_SPKR_DRV2_STATUS_OCP (0x148) -#define TOMTOM_A_SPKR_DRV2_STATUS_OCP__POR (0x00) -#define TOMTOM_A_SPKR_DRV2_STATUS_PA (0x149) -#define TOMTOM_A_SPKR_DRV2_STATUS_PA__POR (0x00) -#define TOMTOM_A_MBHC_INSERT_DETECT (0x14A) -#define TOMTOM_A_MBHC_INSERT_DETECT__POR (0x00) -#define TOMTOM_A_MBHC_INSERT_DET_STATUS (0x14B) -#define TOMTOM_A_MBHC_INSERT_DET_STATUS__POR (0x00) -#define TOMTOM_A_TX_COM_BIAS (0x14C) -#define TOMTOM_A_TX_COM_BIAS__POR (0xF0) -#define TOMTOM_A_MBHC_INSERT_DETECT2 (0x14D) -#define TOMTOM_A_MBHC_INSERT_DETECT2__POR (0xD0) -#define TOMTOM_A_MBHC_SCALING_MUX_1 (0x14E) -#define TOMTOM_A_MBHC_SCALING_MUX_1__POR (0x00) -#define TOMTOM_A_MBHC_SCALING_MUX_2 (0x14F) -#define TOMTOM_A_MBHC_SCALING_MUX_2__POR (0x80) -#define TOMTOM_A_MAD_ANA_CTRL (0x150) -#define TOMTOM_A_MAD_ANA_CTRL__POR (0xF1) -#define TOMTOM_A_TX_SUP_SWITCH_CTRL_1 (0x151) -#define TOMTOM_A_TX_SUP_SWITCH_CTRL_1__POR (0x00) -#define TOMTOM_A_TX_SUP_SWITCH_CTRL_2 (0x152) -#define TOMTOM_A_TX_SUP_SWITCH_CTRL_2__POR (0x80) -#define TOMTOM_A_TX_1_GAIN (0x153) -#define TOMTOM_A_TX_1_GAIN__POR (0x02) -#define TOMTOM_A_TX_1_2_TEST_EN (0x154) -#define TOMTOM_A_TX_1_2_TEST_EN__POR (0xCC) -#define TOMTOM_A_TX_2_GAIN (0x155) -#define TOMTOM_A_TX_2_GAIN__POR (0x02) -#define TOMTOM_A_TX_1_2_ADC_IB (0x156) -#define TOMTOM_A_TX_1_2_ADC_IB__POR (0x44) -#define TOMTOM_A_TX_1_2_ATEST_REFCTRL (0x157) -#define TOMTOM_A_TX_1_2_ATEST_REFCTRL__POR (0x00) -#define TOMTOM_A_TX_1_2_TEST_CTL (0x158) -#define TOMTOM_A_TX_1_2_TEST_CTL__POR (0x38) -#define TOMTOM_A_TX_1_2_TEST_BLOCK_EN (0x159) -#define TOMTOM_A_TX_1_2_TEST_BLOCK_EN__POR (0xFC) -#define TOMTOM_A_TX_1_2_TXFE_CLKDIV (0x15A) -#define TOMTOM_A_TX_1_2_TXFE_CLKDIV__POR (0x55) -#define TOMTOM_A_TX_1_2_SAR_ERR_CH1 (0x15B) -#define TOMTOM_A_TX_1_2_SAR_ERR_CH1__POR (0x00) -#define TOMTOM_A_TX_1_2_SAR_ERR_CH2 (0x15C) -#define TOMTOM_A_TX_1_2_SAR_ERR_CH2__POR (0x00) -#define TOMTOM_A_TX_3_GAIN (0x15D) -#define TOMTOM_A_TX_3_GAIN__POR (0x02) -#define TOMTOM_A_TX_3_4_TEST_EN (0x15E) -#define TOMTOM_A_TX_3_4_TEST_EN__POR (0xCC) -#define TOMTOM_A_TX_4_GAIN (0x15F) -#define TOMTOM_A_TX_4_GAIN__POR (0x02) -#define TOMTOM_A_TX_3_4_ADC_IB (0x160) -#define TOMTOM_A_TX_3_4_ADC_IB__POR (0x44) -#define TOMTOM_A_TX_3_4_ATEST_REFCTRL (0x161) -#define TOMTOM_A_TX_3_4_ATEST_REFCTRL__POR (0x00) -#define TOMTOM_A_TX_3_4_TEST_CTL (0x162) -#define TOMTOM_A_TX_3_4_TEST_CTL__POR (0x38) -#define TOMTOM_A_TX_3_4_TEST_BLOCK_EN (0x163) -#define TOMTOM_A_TX_3_4_TEST_BLOCK_EN__POR (0xFC) -#define TOMTOM_A_TX_3_4_TXFE_CKDIV (0x164) -#define TOMTOM_A_TX_3_4_TXFE_CKDIV__POR (0x55) -#define TOMTOM_A_TX_3_4_SAR_ERR_CH3 (0x165) -#define TOMTOM_A_TX_3_4_SAR_ERR_CH3__POR (0x00) -#define TOMTOM_A_TX_3_4_SAR_ERR_CH4 (0x166) -#define TOMTOM_A_TX_3_4_SAR_ERR_CH4__POR (0x00) -#define TOMTOM_A_TX_5_GAIN (0x167) -#define TOMTOM_A_TX_5_GAIN__POR (0x02) -#define TOMTOM_A_TX_5_6_TEST_EN (0x168) -#define TOMTOM_A_TX_5_6_TEST_EN__POR (0xCC) -#define TOMTOM_A_TX_6_GAIN (0x169) -#define TOMTOM_A_TX_6_GAIN__POR (0x02) -#define TOMTOM_A_TX_5_6_ADC_IB (0x16A) -#define TOMTOM_A_TX_5_6_ADC_IB__POR (0x44) -#define TOMTOM_A_TX_5_6_ATEST_REFCTRL (0x16B) -#define TOMTOM_A_TX_5_6_ATEST_REFCTRL__POR (0x00) -#define TOMTOM_A_TX_5_6_TEST_CTL (0x16C) -#define TOMTOM_A_TX_5_6_TEST_CTL__POR (0x38) -#define TOMTOM_A_TX_5_6_TEST_BLOCK_EN (0x16D) -#define TOMTOM_A_TX_5_6_TEST_BLOCK_EN__POR (0xFC) -#define TOMTOM_A_TX_5_6_TXFE_CKDIV (0x16E) -#define TOMTOM_A_TX_5_6_TXFE_CKDIV__POR (0x55) -#define TOMTOM_A_TX_5_6_SAR_ERR_CH5 (0x16F) -#define TOMTOM_A_TX_5_6_SAR_ERR_CH5__POR (0x00) -#define TOMTOM_A_TX_5_6_SAR_ERR_CH6 (0x170) -#define TOMTOM_A_TX_5_6_SAR_ERR_CH6__POR (0x00) -#define TOMTOM_A_TX_7_MBHC_EN (0x171) -#define TOMTOM_A_TX_7_MBHC_EN__POR (0x0C) -#define TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL (0x172) -#define TOMTOM_A_TX_7_MBHC_ATEST_REFCTRL__POR (0x00) -#define TOMTOM_A_TX_7_MBHC_ADC (0x173) -#define TOMTOM_A_TX_7_MBHC_ADC__POR (0x44) -#define TOMTOM_A_TX_7_MBHC_TEST_CTL (0x174) -#define TOMTOM_A_TX_7_MBHC_TEST_CTL__POR (0x38) -#define TOMTOM_A_TX_7_MBHC_SAR_ERR (0x175) -#define TOMTOM_A_TX_7_MBHC_SAR_ERR__POR (0x00) -#define TOMTOM_A_TX_7_TXFE_CLKDIV (0x176) -#define TOMTOM_A_TX_7_TXFE_CLKDIV__POR (0x8B) -#define TOMTOM_A_RCO_CTRL (0x177) -#define TOMTOM_A_RCO_CTRL__POR (0x00) -#define TOMTOM_A_RCO_CALIBRATION_CTRL1 (0x178) -#define TOMTOM_A_RCO_CALIBRATION_CTRL1__POR (0x00) -#define TOMTOM_A_RCO_CALIBRATION_CTRL2 (0x179) -#define TOMTOM_A_RCO_CALIBRATION_CTRL2__POR (0x00) -#define TOMTOM_A_RCO_CALIBRATION_CTRL3 (0x17A) -#define TOMTOM_A_RCO_CALIBRATION_CTRL3__POR (0x00) -#define TOMTOM_A_RCO_TEST_CTRL (0x17B) -#define TOMTOM_A_RCO_TEST_CTRL__POR (0x00) -#define TOMTOM_A_RCO_CALIBRATION_RESULT1 (0x17C) -#define TOMTOM_A_RCO_CALIBRATION_RESULT1__POR (0x00) -#define TOMTOM_A_RCO_CALIBRATION_RESULT2 (0x17D) -#define TOMTOM_A_RCO_CALIBRATION_RESULT2__POR (0x00) -#define TOMTOM_A_BUCK_MODE_1 (0x181) -#define TOMTOM_A_BUCK_MODE_1__POR (0x21) -#define TOMTOM_A_BUCK_MODE_2 (0x182) -#define TOMTOM_A_BUCK_MODE_2__POR (0xFF) -#define TOMTOM_A_BUCK_MODE_3 (0x183) -#define TOMTOM_A_BUCK_MODE_3__POR (0xCE) -#define TOMTOM_A_BUCK_MODE_4 (0x184) -#define TOMTOM_A_BUCK_MODE_4__POR (0x3A) -#define TOMTOM_A_BUCK_MODE_5 (0x185) -#define TOMTOM_A_BUCK_MODE_5__POR (0x00) -#define TOMTOM_A_BUCK_CTRL_VCL_1 (0x186) -#define TOMTOM_A_BUCK_CTRL_VCL_1__POR (0x08) -#define TOMTOM_A_BUCK_CTRL_VCL_2 (0x187) -#define TOMTOM_A_BUCK_CTRL_VCL_2__POR (0xA3) -#define TOMTOM_A_BUCK_CTRL_VCL_3 (0x188) -#define TOMTOM_A_BUCK_CTRL_VCL_3__POR (0x82) -#define TOMTOM_A_BUCK_CTRL_CCL_1 (0x189) -#define TOMTOM_A_BUCK_CTRL_CCL_1__POR (0x5B) -#define TOMTOM_A_BUCK_CTRL_CCL_2 (0x18A) -#define TOMTOM_A_BUCK_CTRL_CCL_2__POR (0xDC) -#define TOMTOM_A_BUCK_CTRL_CCL_3 (0x18B) -#define TOMTOM_A_BUCK_CTRL_CCL_3__POR (0x6A) -#define TOMTOM_A_BUCK_CTRL_CCL_4 (0x18C) -#define TOMTOM_A_BUCK_CTRL_CCL_4__POR (0x51) -#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_1 (0x18D) -#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_1__POR (0x50) -#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_2 (0x18E) -#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_2__POR (0x64) -#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_3 (0x18F) -#define TOMTOM_A_BUCK_CTRL_PWM_DRVR_3__POR (0x77) -#define TOMTOM_A_BUCK_TMUX_A_D (0x190) -#define TOMTOM_A_BUCK_TMUX_A_D__POR (0x00) -#define TOMTOM_A_NCP_BUCKREF (0x191) -#define TOMTOM_A_NCP_BUCKREF__POR (0x00) -#define TOMTOM_A_NCP_EN (0x192) -#define TOMTOM_A_NCP_EN__POR (0xFE) -#define TOMTOM_A_NCP_CLK (0x193) -#define TOMTOM_A_NCP_CLK__POR (0x94) -#define TOMTOM_A_NCP_STATIC (0x194) -#define TOMTOM_A_NCP_STATIC__POR (0x28) -#define TOMTOM_A_NCP_VTH_LOW (0x195) -#define TOMTOM_A_NCP_VTH_LOW__POR (0x88) -#define TOMTOM_A_NCP_VTH_HIGH (0x196) -#define TOMTOM_A_NCP_VTH_HIGH__POR (0xA0) -#define TOMTOM_A_NCP_ATEST (0x197) -#define TOMTOM_A_NCP_ATEST__POR (0x00) -#define TOMTOM_A_NCP_DTEST (0x198) -#define TOMTOM_A_NCP_DTEST__POR (0x10) -#define TOMTOM_A_NCP_DLY1 (0x199) -#define TOMTOM_A_NCP_DLY1__POR (0x06) -#define TOMTOM_A_NCP_DLY2 (0x19A) -#define TOMTOM_A_NCP_DLY2__POR (0x06) -#define TOMTOM_A_RX_AUX_SW_CTL (0x19B) -#define TOMTOM_A_RX_AUX_SW_CTL__POR (0x00) -#define TOMTOM_A_RX_PA_AUX_IN_CONN (0x19C) -#define TOMTOM_A_RX_PA_AUX_IN_CONN__POR (0x00) -#define TOMTOM_A_RX_COM_TIMER_DIV (0x19E) -#define TOMTOM_A_RX_COM_TIMER_DIV__POR (0xE8) -#define TOMTOM_A_RX_COM_OCP_CTL (0x19F) -#define TOMTOM_A_RX_COM_OCP_CTL__POR (0x1F) -#define TOMTOM_A_RX_COM_OCP_COUNT (0x1A0) -#define TOMTOM_A_RX_COM_OCP_COUNT__POR (0x77) -#define TOMTOM_A_RX_COM_DAC_CTL (0x1A1) -#define TOMTOM_A_RX_COM_DAC_CTL__POR (0x00) -#define TOMTOM_A_RX_COM_BIAS (0x1A2) -#define TOMTOM_A_RX_COM_BIAS__POR (0x20) -#define TOMTOM_A_RX_HPH_AUTO_CHOP (0x1A4) -#define TOMTOM_A_RX_HPH_AUTO_CHOP__POR (0x38) -#define TOMTOM_A_RX_HPH_CHOP_CTL (0x1A5) -#define TOMTOM_A_RX_HPH_CHOP_CTL__POR (0xA4) -#define TOMTOM_A_RX_HPH_BIAS_PA (0x1A6) -#define TOMTOM_A_RX_HPH_BIAS_PA__POR (0x7A) -#define TOMTOM_A_RX_HPH_BIAS_LDO (0x1A7) -#define TOMTOM_A_RX_HPH_BIAS_LDO__POR (0x87) -#define TOMTOM_A_RX_HPH_BIAS_CNP (0x1A8) -#define TOMTOM_A_RX_HPH_BIAS_CNP__POR (0x8A) -#define TOMTOM_A_RX_HPH_BIAS_WG_OCP (0x1A9) -#define TOMTOM_A_RX_HPH_BIAS_WG_OCP__POR (0x2A) -#define TOMTOM_A_RX_HPH_OCP_CTL (0x1AA) -#define TOMTOM_A_RX_HPH_OCP_CTL__POR (0x69) -#define TOMTOM_A_RX_HPH_CNP_EN (0x1AB) -#define TOMTOM_A_RX_HPH_CNP_EN__POR (0x80) -#define TOMTOM_A_RX_HPH_CNP_WG_CTL (0x1AC) -#define TOMTOM_A_RX_HPH_CNP_WG_CTL__POR (0xDA) -#define TOMTOM_A_RX_HPH_CNP_WG_TIME (0x1AD) -#define TOMTOM_A_RX_HPH_CNP_WG_TIME__POR (0x15) -#define TOMTOM_A_RX_HPH_L_GAIN (0x1AE) -#define TOMTOM_A_RX_HPH_L_GAIN__POR (0xC0) -#define TOMTOM_A_RX_HPH_L_TEST (0x1AF) -#define TOMTOM_A_RX_HPH_L_TEST__POR (0x02) -#define TOMTOM_A_RX_HPH_L_PA_CTL (0x1B0) -#define TOMTOM_A_RX_HPH_L_PA_CTL__POR (0x42) -#define TOMTOM_A_RX_HPH_L_DAC_CTL (0x1B1) -#define TOMTOM_A_RX_HPH_L_DAC_CTL__POR (0x00) -#define TOMTOM_A_RX_HPH_L_ATEST (0x1B2) -#define TOMTOM_A_RX_HPH_L_ATEST__POR (0x00) -#define TOMTOM_A_RX_HPH_L_STATUS (0x1B3) -#define TOMTOM_A_RX_HPH_L_STATUS__POR (0x00) -#define TOMTOM_A_RX_HPH_R_GAIN (0x1B4) -#define TOMTOM_A_RX_HPH_R_GAIN__POR (0x00) -#define TOMTOM_A_RX_HPH_R_TEST (0x1B5) -#define TOMTOM_A_RX_HPH_R_TEST__POR (0x02) -#define TOMTOM_A_RX_HPH_R_PA_CTL (0x1B6) -#define TOMTOM_A_RX_HPH_R_PA_CTL__POR (0x42) -#define TOMTOM_A_RX_HPH_R_DAC_CTL (0x1B7) -#define TOMTOM_A_RX_HPH_R_DAC_CTL__POR (0x00) -#define TOMTOM_A_RX_HPH_R_ATEST (0x1B8) -#define TOMTOM_A_RX_HPH_R_ATEST__POR (0x00) -#define TOMTOM_A_RX_HPH_R_STATUS (0x1B9) -#define TOMTOM_A_RX_HPH_R_STATUS__POR (0x00) -#define TOMTOM_A_RX_EAR_BIAS_PA (0x1BA) -#define TOMTOM_A_RX_EAR_BIAS_PA__POR (0x76) -#define TOMTOM_A_RX_EAR_BIAS_CMBUFF (0x1BB) -#define TOMTOM_A_RX_EAR_BIAS_CMBUFF__POR (0xA0) -#define TOMTOM_A_RX_EAR_EN (0x1BC) -#define TOMTOM_A_RX_EAR_EN__POR (0x00) -#define TOMTOM_A_RX_EAR_GAIN (0x1BD) -#define TOMTOM_A_RX_EAR_GAIN__POR (0x02) -#define TOMTOM_A_RX_EAR_CMBUFF (0x1BE) -#define TOMTOM_A_RX_EAR_CMBUFF__POR (0x05) -#define TOMTOM_A_RX_EAR_ICTL (0x1BF) -#define TOMTOM_A_RX_EAR_ICTL__POR (0x40) -#define TOMTOM_A_RX_EAR_CCOMP (0x1C0) -#define TOMTOM_A_RX_EAR_CCOMP__POR (0x08) -#define TOMTOM_A_RX_EAR_VCM (0x1C1) -#define TOMTOM_A_RX_EAR_VCM__POR (0x03) -#define TOMTOM_A_RX_EAR_CNP (0x1C2) -#define TOMTOM_A_RX_EAR_CNP__POR (0xC0) -#define TOMTOM_A_RX_EAR_DAC_CTL_ATEST (0x1C3) -#define TOMTOM_A_RX_EAR_DAC_CTL_ATEST__POR (0x00) -#define TOMTOM_A_RX_EAR_STATUS (0x1C5) -#define TOMTOM_A_RX_EAR_STATUS__POR (0x04) -#define TOMTOM_A_RX_LINE_BIAS_PA (0x1C6) -#define TOMTOM_A_RX_LINE_BIAS_PA__POR (0x78) -#define TOMTOM_A_RX_BUCK_BIAS1 (0x1C7) -#define TOMTOM_A_RX_BUCK_BIAS1__POR (0x42) -#define TOMTOM_A_RX_BUCK_BIAS2 (0x1C8) -#define TOMTOM_A_RX_BUCK_BIAS2__POR (0x84) -#define TOMTOM_A_RX_LINE_COM (0x1C9) -#define TOMTOM_A_RX_LINE_COM__POR (0x80) -#define TOMTOM_A_RX_LINE_CNP_EN (0x1CA) -#define TOMTOM_A_RX_LINE_CNP_EN__POR (0x00) -#define TOMTOM_A_RX_LINE_CNP_WG_CTL (0x1CB) -#define TOMTOM_A_RX_LINE_CNP_WG_CTL__POR (0x00) -#define TOMTOM_A_RX_LINE_CNP_WG_TIME (0x1CC) -#define TOMTOM_A_RX_LINE_CNP_WG_TIME__POR (0x04) -#define TOMTOM_A_RX_LINE_1_GAIN (0x1CD) -#define TOMTOM_A_RX_LINE_1_GAIN__POR (0x00) -#define TOMTOM_A_RX_LINE_1_TEST (0x1CE) -#define TOMTOM_A_RX_LINE_1_TEST__POR (0x02) -#define TOMTOM_A_RX_LINE_1_DAC_CTL (0x1CF) -#define TOMTOM_A_RX_LINE_1_DAC_CTL__POR (0x00) -#define TOMTOM_A_RX_LINE_1_STATUS (0x1D0) -#define TOMTOM_A_RX_LINE_1_STATUS__POR (0x00) -#define TOMTOM_A_RX_LINE_2_GAIN (0x1D1) -#define TOMTOM_A_RX_LINE_2_GAIN__POR (0x00) -#define TOMTOM_A_RX_LINE_2_TEST (0x1D2) -#define TOMTOM_A_RX_LINE_2_TEST__POR (0x02) -#define TOMTOM_A_RX_LINE_2_DAC_CTL (0x1D3) -#define TOMTOM_A_RX_LINE_2_DAC_CTL__POR (0x00) -#define TOMTOM_A_RX_LINE_2_STATUS (0x1D4) -#define TOMTOM_A_RX_LINE_2_STATUS__POR (0x00) -#define TOMTOM_A_RX_LINE_3_GAIN (0x1D5) -#define TOMTOM_A_RX_LINE_3_GAIN__POR (0x00) -#define TOMTOM_A_RX_LINE_3_TEST (0x1D6) -#define TOMTOM_A_RX_LINE_3_TEST__POR (0x02) -#define TOMTOM_A_RX_LINE_3_DAC_CTL (0x1D7) -#define TOMTOM_A_RX_LINE_3_DAC_CTL__POR (0x00) -#define TOMTOM_A_RX_LINE_3_STATUS (0x1D8) -#define TOMTOM_A_RX_LINE_3_STATUS__POR (0x00) -#define TOMTOM_A_RX_LINE_4_GAIN (0x1D9) -#define TOMTOM_A_RX_LINE_4_GAIN__POR (0x00) -#define TOMTOM_A_RX_LINE_4_TEST (0x1DA) -#define TOMTOM_A_RX_LINE_4_TEST__POR (0x02) -#define TOMTOM_A_RX_LINE_4_DAC_CTL (0x1DB) -#define TOMTOM_A_RX_LINE_4_DAC_CTL__POR (0x00) -#define TOMTOM_A_RX_LINE_4_STATUS (0x1DC) -#define TOMTOM_A_RX_LINE_4_STATUS__POR (0x00) -#define TOMTOM_A_RX_LINE_CNP_DBG (0x1DD) -#define TOMTOM_A_RX_LINE_CNP_DBG__POR (0x00) -#define TOMTOM_A_SPKR_DRV1_EN (0x1DF) -#define TOMTOM_A_SPKR_DRV1_EN__POR (0x6F) -#define TOMTOM_A_SPKR_DRV1_GAIN (0x1E0) -#define TOMTOM_A_SPKR_DRV1_GAIN__POR (0x00) -#define TOMTOM_A_SPKR_DRV1_DAC_CTL (0x1E1) -#define TOMTOM_A_SPKR_DRV1_DAC_CTL__POR (0x04) -#define TOMTOM_A_SPKR_DRV1_OCP_CTL (0x1E2) -#define TOMTOM_A_SPKR_DRV1_OCP_CTL__POR (0x97) -#define TOMTOM_A_SPKR_DRV1_CLIP_DET (0x1E3) -#define TOMTOM_A_SPKR_DRV1_CLIP_DET__POR (0x01) -#define TOMTOM_A_SPKR_DRV1_IEC (0x1E4) -#define TOMTOM_A_SPKR_DRV1_IEC__POR (0x00) -#define TOMTOM_A_SPKR_DRV1_DBG_DAC (0x1E5) -#define TOMTOM_A_SPKR_DRV1_DBG_DAC__POR (0x05) -#define TOMTOM_A_SPKR_DRV1_DBG_PA (0x1E6) -#define TOMTOM_A_SPKR_DRV1_DBG_PA__POR (0x18) -#define TOMTOM_A_SPKR_DRV1_DBG_PWRSTG (0x1E7) -#define TOMTOM_A_SPKR_DRV1_DBG_PWRSTG__POR (0x00) -#define TOMTOM_A_SPKR_DRV1_BIAS_LDO (0x1E8) -#define TOMTOM_A_SPKR_DRV1_BIAS_LDO__POR (0x45) -#define TOMTOM_A_SPKR_DRV1_BIAS_INT (0x1E9) -#define TOMTOM_A_SPKR_DRV1_BIAS_INT__POR (0xA5) -#define TOMTOM_A_SPKR_DRV1_BIAS_PA (0x1EA) -#define TOMTOM_A_SPKR_DRV1_BIAS_PA__POR (0x55) -#define TOMTOM_A_SPKR_DRV1_STATUS_OCP (0x1EB) -#define TOMTOM_A_SPKR_DRV1_STATUS_OCP__POR (0x00) -#define TOMTOM_A_SPKR_DRV1_STATUS_PA (0x1EC) -#define TOMTOM_A_SPKR_DRV1_STATUS_PA__POR (0x00) -#define TOMTOM_A_SPKR1_PROT_EN (0x1ED) -#define TOMTOM_A_SPKR1_PROT_EN__POR (0x00) -#define TOMTOM_A_SPKR1_PROT_ADC_TEST_EN (0x1EE) -#define TOMTOM_A_SPKR1_PROT_ADC_TEST_EN__POR (0x44) -#define TOMTOM_A_SPKR1_PROT_ATEST (0x1EF) -#define TOMTOM_A_SPKR1_PROT_ATEST__POR (0x00) -#define TOMTOM_A_SPKR1_PROT_LDO_CTRL (0x1F0) -#define TOMTOM_A_SPKR1_PROT_LDO_CTRL__POR (0x00) -#define TOMTOM_A_SPKR1_PROT_ISENSE_CTRL (0x1F1) -#define TOMTOM_A_SPKR1_PROT_ISENSE_CTRL__POR (0x00) -#define TOMTOM_A_SPKR1_PROT_VSENSE_CTRL (0x1F2) -#define TOMTOM_A_SPKR1_PROT_VSENSE_CTRL__POR (0x00) -#define TOMTOM_A_SPKR2_PROT_EN (0x1F3) -#define TOMTOM_A_SPKR2_PROT_EN__POR (0x00) -#define TOMTOM_A_SPKR2_PROT_ADC_TEST_EN (0x1F4) -#define TOMTOM_A_SPKR2_PROT_ADC_TEST_EN__POR (0x44) -#define TOMTOM_A_SPKR2_PROT_ATEST (0x1F5) -#define TOMTOM_A_SPKR2_PROT_ATEST__POR (0x00) -#define TOMTOM_A_SPKR2_PROT_LDO_CTRL (0x1F6) -#define TOMTOM_A_SPKR2_PROT_LDO_CTRL__POR (0x00) -#define TOMTOM_A_SPKR2_PROT_ISENSE_CTRL (0x1F7) -#define TOMTOM_A_SPKR2_PROT_ISENSE_CTRL__POR (0x00) -#define TOMTOM_A_SPKR2_PROT_VSENSE_CTRL (0x1F8) -#define TOMTOM_A_SPKR2_PROT_VSENSE_CTRL__POR (0x00) -#define TOMTOM_A_MBHC_HPH (0x1FE) -#define TOMTOM_A_MBHC_HPH__POR (0x44) -#define TOMTOM_A_CDC_ANC1_B1_CTL (0x200) -#define TOMTOM_A_CDC_ANC1_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_ANC2_B1_CTL (0x280) -#define TOMTOM_A_CDC_ANC2_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_ANC1_SHIFT (0x201) -#define TOMTOM_A_CDC_ANC1_SHIFT__POR (0x00) -#define TOMTOM_A_CDC_ANC2_SHIFT (0x281) -#define TOMTOM_A_CDC_ANC2_SHIFT__POR (0x00) -#define TOMTOM_A_CDC_ANC1_IIR_B1_CTL (0x202) -#define TOMTOM_A_CDC_ANC1_IIR_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_ANC2_IIR_B1_CTL (0x282) -#define TOMTOM_A_CDC_ANC2_IIR_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_ANC1_IIR_B2_CTL (0x203) -#define TOMTOM_A_CDC_ANC1_IIR_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_ANC2_IIR_B2_CTL (0x283) -#define TOMTOM_A_CDC_ANC2_IIR_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_ANC1_IIR_B3_CTL (0x204) -#define TOMTOM_A_CDC_ANC1_IIR_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_ANC2_IIR_B3_CTL (0x284) -#define TOMTOM_A_CDC_ANC2_IIR_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_ANC1_LPF_B1_CTL (0x206) -#define TOMTOM_A_CDC_ANC1_LPF_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_ANC2_LPF_B1_CTL (0x286) -#define TOMTOM_A_CDC_ANC2_LPF_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_ANC1_LPF_B2_CTL (0x207) -#define TOMTOM_A_CDC_ANC1_LPF_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_ANC2_LPF_B2_CTL (0x287) -#define TOMTOM_A_CDC_ANC2_LPF_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_ANC1_SPARE (0x209) -#define TOMTOM_A_CDC_ANC1_SPARE__POR (0x00) -#define TOMTOM_A_CDC_ANC2_SPARE (0x289) -#define TOMTOM_A_CDC_ANC2_SPARE__POR (0x00) -#define TOMTOM_A_CDC_ANC1_SMLPF_CTL (0x20A) -#define TOMTOM_A_CDC_ANC1_SMLPF_CTL__POR (0x00) -#define TOMTOM_A_CDC_ANC2_SMLPF_CTL (0x28A) -#define TOMTOM_A_CDC_ANC2_SMLPF_CTL__POR (0x00) -#define TOMTOM_A_CDC_ANC1_DCFLT_CTL (0x20B) -#define TOMTOM_A_CDC_ANC1_DCFLT_CTL__POR (0x00) -#define TOMTOM_A_CDC_ANC2_DCFLT_CTL (0x28B) -#define TOMTOM_A_CDC_ANC2_DCFLT_CTL__POR (0x00) -#define TOMTOM_A_CDC_ANC1_GAIN_CTL (0x20C) -#define TOMTOM_A_CDC_ANC1_GAIN_CTL__POR (0x00) -#define TOMTOM_A_CDC_ANC2_GAIN_CTL (0x28C) -#define TOMTOM_A_CDC_ANC2_GAIN_CTL__POR (0x00) -#define TOMTOM_A_CDC_ANC1_B2_CTL (0x20D) -#define TOMTOM_A_CDC_ANC1_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_ANC2_B2_CTL (0x28D) -#define TOMTOM_A_CDC_ANC2_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_TX1_VOL_CTL_TIMER (0x220) -#define TOMTOM_A_CDC_TX1_VOL_CTL_TIMER__POR (0x00) -#define TOMTOM_A_CDC_TX2_VOL_CTL_TIMER (0x228) -#define TOMTOM_A_CDC_TX2_VOL_CTL_TIMER__POR (0x00) -#define TOMTOM_A_CDC_TX3_VOL_CTL_TIMER (0x230) -#define TOMTOM_A_CDC_TX3_VOL_CTL_TIMER__POR (0x00) -#define TOMTOM_A_CDC_TX4_VOL_CTL_TIMER (0x238) -#define TOMTOM_A_CDC_TX4_VOL_CTL_TIMER__POR (0x00) -#define TOMTOM_A_CDC_TX5_VOL_CTL_TIMER (0x240) -#define TOMTOM_A_CDC_TX5_VOL_CTL_TIMER__POR (0x00) -#define TOMTOM_A_CDC_TX6_VOL_CTL_TIMER (0x248) -#define TOMTOM_A_CDC_TX6_VOL_CTL_TIMER__POR (0x00) -#define TOMTOM_A_CDC_TX7_VOL_CTL_TIMER (0x250) -#define TOMTOM_A_CDC_TX7_VOL_CTL_TIMER__POR (0x00) -#define TOMTOM_A_CDC_TX8_VOL_CTL_TIMER (0x258) -#define TOMTOM_A_CDC_TX8_VOL_CTL_TIMER__POR (0x00) -#define TOMTOM_A_CDC_TX9_VOL_CTL_TIMER (0x260) -#define TOMTOM_A_CDC_TX9_VOL_CTL_TIMER__POR (0x00) -#define TOMTOM_A_CDC_TX10_VOL_CTL_TIMER (0x268) -#define TOMTOM_A_CDC_TX10_VOL_CTL_TIMER__POR (0x00) -#define TOMTOM_A_CDC_TX1_VOL_CTL_GAIN (0x221) -#define TOMTOM_A_CDC_TX1_VOL_CTL_GAIN__POR (0x00) -#define TOMTOM_A_CDC_TX2_VOL_CTL_GAIN (0x229) -#define TOMTOM_A_CDC_TX2_VOL_CTL_GAIN__POR (0x00) -#define TOMTOM_A_CDC_TX3_VOL_CTL_GAIN (0x231) -#define TOMTOM_A_CDC_TX3_VOL_CTL_GAIN__POR (0x00) -#define TOMTOM_A_CDC_TX4_VOL_CTL_GAIN (0x239) -#define TOMTOM_A_CDC_TX4_VOL_CTL_GAIN__POR (0x00) -#define TOMTOM_A_CDC_TX5_VOL_CTL_GAIN (0x241) -#define TOMTOM_A_CDC_TX5_VOL_CTL_GAIN__POR (0x00) -#define TOMTOM_A_CDC_TX6_VOL_CTL_GAIN (0x249) -#define TOMTOM_A_CDC_TX6_VOL_CTL_GAIN__POR (0x00) -#define TOMTOM_A_CDC_TX7_VOL_CTL_GAIN (0x251) -#define TOMTOM_A_CDC_TX7_VOL_CTL_GAIN__POR (0x00) -#define TOMTOM_A_CDC_TX8_VOL_CTL_GAIN (0x259) -#define TOMTOM_A_CDC_TX8_VOL_CTL_GAIN__POR (0x00) -#define TOMTOM_A_CDC_TX9_VOL_CTL_GAIN (0x261) -#define TOMTOM_A_CDC_TX9_VOL_CTL_GAIN__POR (0x00) -#define TOMTOM_A_CDC_TX10_VOL_CTL_GAIN (0x269) -#define TOMTOM_A_CDC_TX10_VOL_CTL_GAIN__POR (0x00) -#define TOMTOM_A_CDC_TX1_VOL_CTL_CFG (0x222) -#define TOMTOM_A_CDC_TX1_VOL_CTL_CFG__POR (0x00) -#define TOMTOM_A_CDC_TX2_VOL_CTL_CFG (0x22A) -#define TOMTOM_A_CDC_TX2_VOL_CTL_CFG__POR (0x00) -#define TOMTOM_A_CDC_TX3_VOL_CTL_CFG (0x232) -#define TOMTOM_A_CDC_TX3_VOL_CTL_CFG__POR (0x00) -#define TOMTOM_A_CDC_TX4_VOL_CTL_CFG (0x23A) -#define TOMTOM_A_CDC_TX4_VOL_CTL_CFG__POR (0x00) -#define TOMTOM_A_CDC_TX5_VOL_CTL_CFG (0x242) -#define TOMTOM_A_CDC_TX5_VOL_CTL_CFG__POR (0x00) -#define TOMTOM_A_CDC_TX6_VOL_CTL_CFG (0x24A) -#define TOMTOM_A_CDC_TX6_VOL_CTL_CFG__POR (0x00) -#define TOMTOM_A_CDC_TX7_VOL_CTL_CFG (0x252) -#define TOMTOM_A_CDC_TX7_VOL_CTL_CFG__POR (0x00) -#define TOMTOM_A_CDC_TX8_VOL_CTL_CFG (0x25A) -#define TOMTOM_A_CDC_TX8_VOL_CTL_CFG__POR (0x00) -#define TOMTOM_A_CDC_TX9_VOL_CTL_CFG (0x262) -#define TOMTOM_A_CDC_TX9_VOL_CTL_CFG__POR (0x00) -#define TOMTOM_A_CDC_TX10_VOL_CTL_CFG (0x26A) -#define TOMTOM_A_CDC_TX10_VOL_CTL_CFG__POR (0x00) -#define TOMTOM_A_CDC_TX1_MUX_CTL (0x223) -#define TOMTOM_A_CDC_TX1_MUX_CTL__POR (0x48) -#define TOMTOM_A_CDC_TX2_MUX_CTL (0x22B) -#define TOMTOM_A_CDC_TX2_MUX_CTL__POR (0x48) -#define TOMTOM_A_CDC_TX3_MUX_CTL (0x233) -#define TOMTOM_A_CDC_TX3_MUX_CTL__POR (0x48) -#define TOMTOM_A_CDC_TX4_MUX_CTL (0x23B) -#define TOMTOM_A_CDC_TX4_MUX_CTL__POR (0x48) -#define TOMTOM_A_CDC_TX5_MUX_CTL (0x243) -#define TOMTOM_A_CDC_TX5_MUX_CTL__POR (0x48) -#define TOMTOM_A_CDC_TX6_MUX_CTL (0x24B) -#define TOMTOM_A_CDC_TX6_MUX_CTL__POR (0x48) -#define TOMTOM_A_CDC_TX7_MUX_CTL (0x253) -#define TOMTOM_A_CDC_TX7_MUX_CTL__POR (0x48) -#define TOMTOM_A_CDC_TX8_MUX_CTL (0x25B) -#define TOMTOM_A_CDC_TX8_MUX_CTL__POR (0x48) -#define TOMTOM_A_CDC_TX9_MUX_CTL (0x263) -#define TOMTOM_A_CDC_TX9_MUX_CTL__POR (0x48) -#define TOMTOM_A_CDC_TX10_MUX_CTL (0x26B) -#define TOMTOM_A_CDC_TX10_MUX_CTL__POR (0x48) -#define TOMTOM_A_CDC_TX1_CLK_FS_CTL (0x224) -#define TOMTOM_A_CDC_TX1_CLK_FS_CTL__POR (0x03) -#define TOMTOM_A_CDC_TX2_CLK_FS_CTL (0x22C) -#define TOMTOM_A_CDC_TX2_CLK_FS_CTL__POR (0x03) -#define TOMTOM_A_CDC_TX3_CLK_FS_CTL (0x234) -#define TOMTOM_A_CDC_TX3_CLK_FS_CTL__POR (0x03) -#define TOMTOM_A_CDC_TX4_CLK_FS_CTL (0x23C) -#define TOMTOM_A_CDC_TX4_CLK_FS_CTL__POR (0x03) -#define TOMTOM_A_CDC_TX5_CLK_FS_CTL (0x244) -#define TOMTOM_A_CDC_TX5_CLK_FS_CTL__POR (0x03) -#define TOMTOM_A_CDC_TX6_CLK_FS_CTL (0x24C) -#define TOMTOM_A_CDC_TX6_CLK_FS_CTL__POR (0x03) -#define TOMTOM_A_CDC_TX7_CLK_FS_CTL (0x254) -#define TOMTOM_A_CDC_TX7_CLK_FS_CTL__POR (0x03) -#define TOMTOM_A_CDC_TX8_CLK_FS_CTL (0x25C) -#define TOMTOM_A_CDC_TX8_CLK_FS_CTL__POR (0x03) -#define TOMTOM_A_CDC_TX9_CLK_FS_CTL (0x264) -#define TOMTOM_A_CDC_TX9_CLK_FS_CTL__POR (0x03) -#define TOMTOM_A_CDC_TX10_CLK_FS_CTL (0x26C) -#define TOMTOM_A_CDC_TX10_CLK_FS_CTL__POR (0x03) -#define TOMTOM_A_CDC_TX1_DMIC_CTL (0x225) -#define TOMTOM_A_CDC_TX1_DMIC_CTL__POR (0x00) -#define TOMTOM_A_CDC_TX2_DMIC_CTL (0x22D) -#define TOMTOM_A_CDC_TX2_DMIC_CTL__POR (0x00) -#define TOMTOM_A_CDC_TX3_DMIC_CTL (0x235) -#define TOMTOM_A_CDC_TX3_DMIC_CTL__POR (0x00) -#define TOMTOM_A_CDC_TX4_DMIC_CTL (0x23D) -#define TOMTOM_A_CDC_TX4_DMIC_CTL__POR (0x00) -#define TOMTOM_A_CDC_TX5_DMIC_CTL (0x245) -#define TOMTOM_A_CDC_TX5_DMIC_CTL__POR (0x00) -#define TOMTOM_A_CDC_TX6_DMIC_CTL (0x24D) -#define TOMTOM_A_CDC_TX6_DMIC_CTL__POR (0x00) -#define TOMTOM_A_CDC_TX7_DMIC_CTL (0x255) -#define TOMTOM_A_CDC_TX7_DMIC_CTL__POR (0x00) -#define TOMTOM_A_CDC_TX8_DMIC_CTL (0x25D) -#define TOMTOM_A_CDC_TX8_DMIC_CTL__POR (0x00) -#define TOMTOM_A_CDC_TX9_DMIC_CTL (0x265) -#define TOMTOM_A_CDC_TX9_DMIC_CTL__POR (0x00) -#define TOMTOM_A_CDC_TX10_DMIC_CTL (0x26D) -#define TOMTOM_A_CDC_TX10_DMIC_CTL__POR (0x00) -#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL0 (0x270) -#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL0__POR (0x00) -#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL1 (0x271) -#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL1__POR (0x00) -#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL2 (0x272) -#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL2__POR (0x00) -#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL3 (0x273) -#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL3__POR (0x00) -#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL4 (0x274) -#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL4__POR (0x00) -#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL5 (0x275) -#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL5__POR (0x00) -#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL6 (0x276) -#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL6__POR (0x00) -#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL7 (0x277) -#define TOMTOM_A_CDC_SPKR_CLIPDET_VAL7__POR (0x00) -#define TOMTOM_A_CDC_DEBUG_B1_CTL (0x278) -#define TOMTOM_A_CDC_DEBUG_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_DEBUG_B2_CTL (0x279) -#define TOMTOM_A_CDC_DEBUG_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_DEBUG_B3_CTL (0x27A) -#define TOMTOM_A_CDC_DEBUG_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_DEBUG_B4_CTL (0x27B) -#define TOMTOM_A_CDC_DEBUG_B4_CTL__POR (0x00) -#define TOMTOM_A_CDC_DEBUG_B5_CTL (0x27C) -#define TOMTOM_A_CDC_DEBUG_B5_CTL__POR (0x00) -#define TOMTOM_A_CDC_DEBUG_B6_CTL (0x27D) -#define TOMTOM_A_CDC_DEBUG_B6_CTL__POR (0x00) -#define TOMTOM_A_CDC_DEBUG_B7_CTL (0x27E) -#define TOMTOM_A_CDC_DEBUG_B7_CTL__POR (0x00) -#define TOMTOM_A_CDC_SRC1_PDA_CFG (0x2A0) -#define TOMTOM_A_CDC_SRC1_PDA_CFG__POR (0x00) -#define TOMTOM_A_CDC_SRC2_PDA_CFG (0x2A8) -#define TOMTOM_A_CDC_SRC2_PDA_CFG__POR (0x00) -#define TOMTOM_A_CDC_SRC1_FS_CTL (0x2A1) -#define TOMTOM_A_CDC_SRC1_FS_CTL__POR (0x1B) -#define TOMTOM_A_CDC_SRC2_FS_CTL (0x2A9) -#define TOMTOM_A_CDC_SRC2_FS_CTL__POR (0x1B) -#define TOMTOM_A_CDC_RX1_B1_CTL (0x2B0) -#define TOMTOM_A_CDC_RX1_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX2_B1_CTL (0x2B8) -#define TOMTOM_A_CDC_RX2_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX3_B1_CTL (0x2C0) -#define TOMTOM_A_CDC_RX3_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX4_B1_CTL (0x2C8) -#define TOMTOM_A_CDC_RX4_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX5_B1_CTL (0x2D0) -#define TOMTOM_A_CDC_RX5_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX6_B1_CTL (0x2D8) -#define TOMTOM_A_CDC_RX6_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX7_B1_CTL (0x2E0) -#define TOMTOM_A_CDC_RX7_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX1_B2_CTL (0x2B1) -#define TOMTOM_A_CDC_RX1_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX2_B2_CTL (0x2B9) -#define TOMTOM_A_CDC_RX2_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX3_B2_CTL (0x2C1) -#define TOMTOM_A_CDC_RX3_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX4_B2_CTL (0x2C9) -#define TOMTOM_A_CDC_RX4_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX5_B2_CTL (0x2D1) -#define TOMTOM_A_CDC_RX5_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX6_B2_CTL (0x2D9) -#define TOMTOM_A_CDC_RX6_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX7_B2_CTL (0x2E1) -#define TOMTOM_A_CDC_RX7_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX1_B3_CTL (0x2B2) -#define TOMTOM_A_CDC_RX1_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX2_B3_CTL (0x2BA) -#define TOMTOM_A_CDC_RX2_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX3_B3_CTL (0x2C2) -#define TOMTOM_A_CDC_RX3_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX4_B3_CTL (0x2CA) -#define TOMTOM_A_CDC_RX4_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX5_B3_CTL (0x2D2) -#define TOMTOM_A_CDC_RX5_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX6_B3_CTL (0x2DA) -#define TOMTOM_A_CDC_RX6_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX7_B3_CTL (0x2E2) -#define TOMTOM_A_CDC_RX7_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX1_B4_CTL (0x2B3) -#define TOMTOM_A_CDC_RX1_B4_CTL__POR (0x0B) -#define TOMTOM_A_CDC_RX2_B4_CTL (0x2BB) -#define TOMTOM_A_CDC_RX2_B4_CTL__POR (0x0B) -#define TOMTOM_A_CDC_RX3_B4_CTL (0x2C3) -#define TOMTOM_A_CDC_RX3_B4_CTL__POR (0x0B) -#define TOMTOM_A_CDC_RX4_B4_CTL (0x2CB) -#define TOMTOM_A_CDC_RX4_B4_CTL__POR (0x0B) -#define TOMTOM_A_CDC_RX5_B4_CTL (0x2D3) -#define TOMTOM_A_CDC_RX5_B4_CTL__POR (0x0B) -#define TOMTOM_A_CDC_RX6_B4_CTL (0x2DB) -#define TOMTOM_A_CDC_RX6_B4_CTL__POR (0x0B) -#define TOMTOM_A_CDC_RX7_B4_CTL (0x2E3) -#define TOMTOM_A_CDC_RX7_B4_CTL__POR (0x0B) -#define TOMTOM_A_CDC_RX1_B5_CTL (0x2B4) -#define TOMTOM_A_CDC_RX1_B5_CTL__POR (0x78) -#define TOMTOM_A_CDC_RX2_B5_CTL (0x2BC) -#define TOMTOM_A_CDC_RX2_B5_CTL__POR (0x78) -#define TOMTOM_A_CDC_RX3_B5_CTL (0x2C4) -#define TOMTOM_A_CDC_RX3_B5_CTL__POR (0x78) -#define TOMTOM_A_CDC_RX4_B5_CTL (0x2CC) -#define TOMTOM_A_CDC_RX4_B5_CTL__POR (0x78) -#define TOMTOM_A_CDC_RX5_B5_CTL (0x2D4) -#define TOMTOM_A_CDC_RX5_B5_CTL__POR (0x78) -#define TOMTOM_A_CDC_RX6_B5_CTL (0x2DC) -#define TOMTOM_A_CDC_RX6_B5_CTL__POR (0x78) -#define TOMTOM_A_CDC_RX7_B5_CTL (0x2E4) -#define TOMTOM_A_CDC_RX7_B5_CTL__POR (0x78) -#define TOMTOM_A_CDC_RX1_B6_CTL (0x2B5) -#define TOMTOM_A_CDC_RX1_B6_CTL__POR (0x80) -#define TOMTOM_A_CDC_RX2_B6_CTL (0x2BD) -#define TOMTOM_A_CDC_RX2_B6_CTL__POR (0x80) -#define TOMTOM_A_CDC_RX3_B6_CTL (0x2C5) -#define TOMTOM_A_CDC_RX3_B6_CTL__POR (0x80) -#define TOMTOM_A_CDC_RX4_B6_CTL (0x2CD) -#define TOMTOM_A_CDC_RX4_B6_CTL__POR (0x80) -#define TOMTOM_A_CDC_RX5_B6_CTL (0x2D5) -#define TOMTOM_A_CDC_RX5_B6_CTL__POR (0x80) -#define TOMTOM_A_CDC_RX6_B6_CTL (0x2DD) -#define TOMTOM_A_CDC_RX6_B6_CTL__POR (0x80) -#define TOMTOM_A_CDC_RX7_B6_CTL (0x2E5) -#define TOMTOM_A_CDC_RX7_B6_CTL__POR (0x80) -#define TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL (0x2B6) -#define TOMTOM_A_CDC_RX1_VOL_CTL_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL (0x2BE) -#define TOMTOM_A_CDC_RX2_VOL_CTL_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL (0x2C6) -#define TOMTOM_A_CDC_RX3_VOL_CTL_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL (0x2CE) -#define TOMTOM_A_CDC_RX4_VOL_CTL_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL (0x2D6) -#define TOMTOM_A_CDC_RX5_VOL_CTL_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL (0x2DE) -#define TOMTOM_A_CDC_RX6_VOL_CTL_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL (0x2E6) -#define TOMTOM_A_CDC_RX7_VOL_CTL_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL (0x2B7) -#define TOMTOM_A_CDC_RX1_VOL_CTL_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL (0x2BF) -#define TOMTOM_A_CDC_RX2_VOL_CTL_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL (0x2C7) -#define TOMTOM_A_CDC_RX3_VOL_CTL_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL (0x2CF) -#define TOMTOM_A_CDC_RX4_VOL_CTL_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL (0x2D7) -#define TOMTOM_A_CDC_RX5_VOL_CTL_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL (0x2DF) -#define TOMTOM_A_CDC_RX6_VOL_CTL_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL (0x2E7) -#define TOMTOM_A_CDC_RX7_VOL_CTL_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_VBAT_CFG (0x2E8) -#define TOMTOM_A_CDC_VBAT_CFG__POR (0x1A) -#define TOMTOM_A_CDC_VBAT_ADC_CAL1 (0x2E9) -#define TOMTOM_A_CDC_VBAT_ADC_CAL1__POR (0x00) -#define TOMTOM_A_CDC_VBAT_ADC_CAL2 (0x2EA) -#define TOMTOM_A_CDC_VBAT_ADC_CAL2__POR (0x00) -#define TOMTOM_A_CDC_VBAT_ADC_CAL3 (0x2EB) -#define TOMTOM_A_CDC_VBAT_ADC_CAL3__POR (0x04) -#define TOMTOM_A_CDC_VBAT_PK_EST1 (0x2EC) -#define TOMTOM_A_CDC_VBAT_PK_EST1__POR (0xE0) -#define TOMTOM_A_CDC_VBAT_PK_EST2 (0x2ED) -#define TOMTOM_A_CDC_VBAT_PK_EST2__POR (0x01) -#define TOMTOM_A_CDC_VBAT_PK_EST3 (0x2EE) -#define TOMTOM_A_CDC_VBAT_PK_EST3__POR (0x40) -#define TOMTOM_A_CDC_VBAT_RF_PROC1 (0x2EF) -#define TOMTOM_A_CDC_VBAT_RF_PROC1__POR (0x2A) -#define TOMTOM_A_CDC_VBAT_RF_PROC2 (0x2F0) -#define TOMTOM_A_CDC_VBAT_RF_PROC2__POR (0x86) -#define TOMTOM_A_CDC_VBAT_TAC1 (0x2F1) -#define TOMTOM_A_CDC_VBAT_TAC1__POR (0x70) -#define TOMTOM_A_CDC_VBAT_TAC2 (0x2F2) -#define TOMTOM_A_CDC_VBAT_TAC2__POR (0x18) -#define TOMTOM_A_CDC_VBAT_TAC3 (0x2F3) -#define TOMTOM_A_CDC_VBAT_TAC3__POR (0x18) -#define TOMTOM_A_CDC_VBAT_TAC4 (0x2F4) -#define TOMTOM_A_CDC_VBAT_TAC4__POR (0x03) -#define TOMTOM_A_CDC_VBAT_GAIN_UPD1 (0x2F5) -#define TOMTOM_A_CDC_VBAT_GAIN_UPD1__POR (0x01) -#define TOMTOM_A_CDC_VBAT_GAIN_UPD2 (0x2F6) -#define TOMTOM_A_CDC_VBAT_GAIN_UPD2__POR (0x00) -#define TOMTOM_A_CDC_VBAT_GAIN_UPD3 (0x2F7) -#define TOMTOM_A_CDC_VBAT_GAIN_UPD3__POR (0x64) -#define TOMTOM_A_CDC_VBAT_GAIN_UPD4 (0x2F8) -#define TOMTOM_A_CDC_VBAT_GAIN_UPD4__POR (0x01) -#define TOMTOM_A_CDC_VBAT_DEBUG1 (0x2F9) -#define TOMTOM_A_CDC_VBAT_DEBUG1__POR (0x00) -#define TOMTOM_A_CDC_VBAT_GAIN_UPD_MON (0x2FA) -#define TOMTOM_A_CDC_VBAT_GAIN_UPD_MON__POR (0x00) -#define TOMTOM_A_CDC_VBAT_GAIN_MON_VAL (0x2FB) -#define TOMTOM_A_CDC_VBAT_GAIN_MON_VAL__POR (0x00) -#define TOMTOM_A_CDC_CLK_ANC_RESET_CTL (0x300) -#define TOMTOM_A_CDC_CLK_ANC_RESET_CTL__POR (0x00) -#define TOMTOM_A_CDC_CLK_RX_RESET_CTL (0x301) -#define TOMTOM_A_CDC_CLK_RX_RESET_CTL__POR (0x00) -#define TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL (0x302) -#define TOMTOM_A_CDC_CLK_TX_RESET_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL (0x303) -#define TOMTOM_A_CDC_CLK_TX_RESET_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_CLK_RX_I2S_CTL (0x306) -#define TOMTOM_A_CDC_CLK_RX_I2S_CTL__POR (0x03) -#define TOMTOM_A_CDC_CLK_TX_I2S_CTL (0x307) -#define TOMTOM_A_CDC_CLK_TX_I2S_CTL__POR (0x03) -#define TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL (0x308) -#define TOMTOM_A_CDC_CLK_OTHR_RESET_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL (0x309) -#define TOMTOM_A_CDC_CLK_OTHR_RESET_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL (0x30A) -#define TOMTOM_A_CDC_CLK_TX_CLK_EN_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL (0x30B) -#define TOMTOM_A_CDC_CLK_TX_CLK_EN_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_CLK_OTHR_CTL (0x30C) -#define TOMTOM_A_CDC_CLK_OTHR_CTL__POR (0x00) -#define TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL (0x30E) -#define TOMTOM_A_CDC_CLK_ANC_CLK_EN_CTL__POR (0x00) -#define TOMTOM_A_CDC_CLK_RX_B1_CTL (0x30F) -#define TOMTOM_A_CDC_CLK_RX_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_CLK_RX_B2_CTL (0x310) -#define TOMTOM_A_CDC_CLK_RX_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_CLK_MCLK_CTL (0x311) -#define TOMTOM_A_CDC_CLK_MCLK_CTL__POR (0x00) -#define TOMTOM_A_CDC_CLK_PDM_CTL (0x312) -#define TOMTOM_A_CDC_CLK_PDM_CTL__POR (0x00) -#define TOMTOM_A_CDC_CLK_SD_CTL (0x313) -#define TOMTOM_A_CDC_CLK_SD_CTL__POR (0x00) -#define TOMTOM_A_CDC_CLSH_B1_CTL (0x320) -#define TOMTOM_A_CDC_CLSH_B1_CTL__POR (0xE4) -#define TOMTOM_A_CDC_CLSH_B2_CTL (0x321) -#define TOMTOM_A_CDC_CLSH_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_CLSH_B3_CTL (0x322) -#define TOMTOM_A_CDC_CLSH_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS (0x323) -#define TOMTOM_A_CDC_CLSH_BUCK_NCP_VARS__POR (0x00) -#define TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD (0x324) -#define TOMTOM_A_CDC_CLSH_IDLE_HPH_THSD__POR (0x12) -#define TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD (0x325) -#define TOMTOM_A_CDC_CLSH_IDLE_EAR_THSD__POR (0x0C) -#define TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD (0x326) -#define TOMTOM_A_CDC_CLSH_FCLKONLY_HPH_THSD__POR (0x18) -#define TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD (0x327) -#define TOMTOM_A_CDC_CLSH_FCLKONLY_EAR_THSD__POR (0x23) -#define TOMTOM_A_CDC_CLSH_K_ADDR (0x328) -#define TOMTOM_A_CDC_CLSH_K_ADDR__POR (0x00) -#define TOMTOM_A_CDC_CLSH_K_DATA (0x329) -#define TOMTOM_A_CDC_CLSH_K_DATA__POR (0xA4) -#define TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L (0x32A) -#define TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_L__POR (0xD7) -#define TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U (0x32B) -#define TOMTOM_A_CDC_CLSH_I_PA_FACT_HPH_U__POR (0x05) -#define TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L (0x32C) -#define TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_L__POR (0x60) -#define TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U (0x32D) -#define TOMTOM_A_CDC_CLSH_I_PA_FACT_EAR_U__POR (0x09) -#define TOMTOM_A_CDC_CLSH_V_PA_HD_EAR (0x32E) -#define TOMTOM_A_CDC_CLSH_V_PA_HD_EAR__POR (0x00) -#define TOMTOM_A_CDC_CLSH_V_PA_HD_HPH (0x32F) -#define TOMTOM_A_CDC_CLSH_V_PA_HD_HPH__POR (0x00) -#define TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR (0x330) -#define TOMTOM_A_CDC_CLSH_V_PA_MIN_EAR__POR (0x00) -#define TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH (0x331) -#define TOMTOM_A_CDC_CLSH_V_PA_MIN_HPH__POR (0x00) -#define TOMTOM_A_CDC_IIR1_GAIN_B1_CTL (0x340) -#define TOMTOM_A_CDC_IIR1_GAIN_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_IIR2_GAIN_B1_CTL (0x350) -#define TOMTOM_A_CDC_IIR2_GAIN_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_IIR1_GAIN_B2_CTL (0x341) -#define TOMTOM_A_CDC_IIR1_GAIN_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_IIR2_GAIN_B2_CTL (0x351) -#define TOMTOM_A_CDC_IIR2_GAIN_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_IIR1_GAIN_B3_CTL (0x342) -#define TOMTOM_A_CDC_IIR1_GAIN_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_IIR2_GAIN_B3_CTL (0x352) -#define TOMTOM_A_CDC_IIR2_GAIN_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_IIR1_GAIN_B4_CTL (0x343) -#define TOMTOM_A_CDC_IIR1_GAIN_B4_CTL__POR (0x00) -#define TOMTOM_A_CDC_IIR2_GAIN_B4_CTL (0x353) -#define TOMTOM_A_CDC_IIR2_GAIN_B4_CTL__POR (0x00) -#define TOMTOM_A_CDC_IIR1_GAIN_B5_CTL (0x344) -#define TOMTOM_A_CDC_IIR1_GAIN_B5_CTL__POR (0x00) -#define TOMTOM_A_CDC_IIR2_GAIN_B5_CTL (0x354) -#define TOMTOM_A_CDC_IIR2_GAIN_B5_CTL__POR (0x00) -#define TOMTOM_A_CDC_IIR1_GAIN_B6_CTL (0x345) -#define TOMTOM_A_CDC_IIR1_GAIN_B6_CTL__POR (0x00) -#define TOMTOM_A_CDC_IIR2_GAIN_B6_CTL (0x355) -#define TOMTOM_A_CDC_IIR2_GAIN_B6_CTL__POR (0x00) -#define TOMTOM_A_CDC_IIR1_GAIN_B7_CTL (0x346) -#define TOMTOM_A_CDC_IIR1_GAIN_B7_CTL__POR (0x00) -#define TOMTOM_A_CDC_IIR2_GAIN_B7_CTL (0x356) -#define TOMTOM_A_CDC_IIR2_GAIN_B7_CTL__POR (0x00) -#define TOMTOM_A_CDC_IIR1_GAIN_B8_CTL (0x347) -#define TOMTOM_A_CDC_IIR1_GAIN_B8_CTL__POR (0x00) -#define TOMTOM_A_CDC_IIR2_GAIN_B8_CTL (0x357) -#define TOMTOM_A_CDC_IIR2_GAIN_B8_CTL__POR (0x00) -#define TOMTOM_A_CDC_IIR1_CTL (0x348) -#define TOMTOM_A_CDC_IIR1_CTL__POR (0x40) -#define TOMTOM_A_CDC_IIR2_CTL (0x358) -#define TOMTOM_A_CDC_IIR2_CTL__POR (0x40) -#define TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL (0x349) -#define TOMTOM_A_CDC_IIR1_GAIN_TIMER_CTL__POR (0x00) -#define TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL (0x359) -#define TOMTOM_A_CDC_IIR2_GAIN_TIMER_CTL__POR (0x00) -#define TOMTOM_A_CDC_IIR1_COEF_B1_CTL (0x34A) -#define TOMTOM_A_CDC_IIR1_COEF_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_IIR2_COEF_B1_CTL (0x35A) -#define TOMTOM_A_CDC_IIR2_COEF_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_IIR1_COEF_B2_CTL (0x34B) -#define TOMTOM_A_CDC_IIR1_COEF_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_IIR2_COEF_B2_CTL (0x35B) -#define TOMTOM_A_CDC_IIR2_COEF_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_TOP_GAIN_UPDATE (0x360) -#define TOMTOM_A_CDC_TOP_GAIN_UPDATE__POR (0x00) -#define TOMTOM_A_CDC_PA_RAMP_B1_CTL (0x361) -#define TOMTOM_A_CDC_PA_RAMP_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_PA_RAMP_B2_CTL (0x362) -#define TOMTOM_A_CDC_PA_RAMP_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_PA_RAMP_B3_CTL (0x363) -#define TOMTOM_A_CDC_PA_RAMP_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_PA_RAMP_B4_CTL (0x364) -#define TOMTOM_A_CDC_PA_RAMP_B4_CTL__POR (0x00) -#define TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL (0x365) -#define TOMTOM_A_CDC_SPKR_CLIPDET_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL (0x366) -#define TOMTOM_A_CDC_SPKR2_CLIPDET_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_COMP0_B1_CTL (0x368) -#define TOMTOM_A_CDC_COMP0_B1_CTL__POR (0x30) -#define TOMTOM_A_CDC_COMP1_B1_CTL (0x370) -#define TOMTOM_A_CDC_COMP1_B1_CTL__POR (0x30) -#define TOMTOM_A_CDC_COMP2_B1_CTL (0x378) -#define TOMTOM_A_CDC_COMP2_B1_CTL__POR (0x30) -#define TOMTOM_A_CDC_COMP0_B2_CTL (0x369) -#define TOMTOM_A_CDC_COMP0_B2_CTL__POR (0xB5) -#define TOMTOM_A_CDC_COMP1_B2_CTL (0x371) -#define TOMTOM_A_CDC_COMP1_B2_CTL__POR (0xB5) -#define TOMTOM_A_CDC_COMP2_B2_CTL (0x379) -#define TOMTOM_A_CDC_COMP2_B2_CTL__POR (0xB5) -#define TOMTOM_A_CDC_COMP0_B3_CTL (0x36A) -#define TOMTOM_A_CDC_COMP0_B3_CTL__POR (0x28) -#define TOMTOM_A_CDC_COMP1_B3_CTL (0x372) -#define TOMTOM_A_CDC_COMP1_B3_CTL__POR (0x28) -#define TOMTOM_A_CDC_COMP2_B3_CTL (0x37A) -#define TOMTOM_A_CDC_COMP2_B3_CTL__POR (0x28) -#define TOMTOM_A_CDC_COMP0_B4_CTL (0x36B) -#define TOMTOM_A_CDC_COMP0_B4_CTL__POR (0x37) -#define TOMTOM_A_CDC_COMP1_B4_CTL (0x373) -#define TOMTOM_A_CDC_COMP1_B4_CTL__POR (0x37) -#define TOMTOM_A_CDC_COMP2_B4_CTL (0x37B) -#define TOMTOM_A_CDC_COMP2_B4_CTL__POR (0x37) -#define TOMTOM_A_CDC_COMP0_B5_CTL (0x36C) -#define TOMTOM_A_CDC_COMP0_B5_CTL__POR (0x7F) -#define TOMTOM_A_CDC_COMP1_B5_CTL (0x374) -#define TOMTOM_A_CDC_COMP1_B5_CTL__POR (0x7F) -#define TOMTOM_A_CDC_COMP2_B5_CTL (0x37C) -#define TOMTOM_A_CDC_COMP2_B5_CTL__POR (0x7F) -#define TOMTOM_A_CDC_COMP0_B6_CTL (0x36D) -#define TOMTOM_A_CDC_COMP0_B6_CTL__POR (0x00) -#define TOMTOM_A_CDC_COMP1_B6_CTL (0x375) -#define TOMTOM_A_CDC_COMP1_B6_CTL__POR (0x00) -#define TOMTOM_A_CDC_COMP2_B6_CTL (0x37D) -#define TOMTOM_A_CDC_COMP2_B6_CTL__POR (0x00) -#define TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS (0x36E) -#define TOMTOM_A_CDC_COMP0_SHUT_DOWN_STATUS__POR (0x03) -#define TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS (0x376) -#define TOMTOM_A_CDC_COMP1_SHUT_DOWN_STATUS__POR (0x03) -#define TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS (0x37E) -#define TOMTOM_A_CDC_COMP2_SHUT_DOWN_STATUS__POR (0x03) -#define TOMTOM_A_CDC_COMP0_FS_CFG (0x36F) -#define TOMTOM_A_CDC_COMP0_FS_CFG__POR (0x03) -#define TOMTOM_A_CDC_COMP1_FS_CFG (0x377) -#define TOMTOM_A_CDC_COMP1_FS_CFG__POR (0x03) -#define TOMTOM_A_CDC_COMP2_FS_CFG (0x37F) -#define TOMTOM_A_CDC_COMP2_FS_CFG__POR (0x03) -#define TOMTOM_A_CDC_CONN_RX1_B1_CTL (0x380) -#define TOMTOM_A_CDC_CONN_RX1_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_RX1_B2_CTL (0x381) -#define TOMTOM_A_CDC_CONN_RX1_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_RX1_B3_CTL (0x382) -#define TOMTOM_A_CDC_CONN_RX1_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_RX2_B1_CTL (0x383) -#define TOMTOM_A_CDC_CONN_RX2_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_RX2_B2_CTL (0x384) -#define TOMTOM_A_CDC_CONN_RX2_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_RX2_B3_CTL (0x385) -#define TOMTOM_A_CDC_CONN_RX2_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_RX3_B1_CTL (0x386) -#define TOMTOM_A_CDC_CONN_RX3_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_RX3_B2_CTL (0x387) -#define TOMTOM_A_CDC_CONN_RX3_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_RX4_B1_CTL (0x388) -#define TOMTOM_A_CDC_CONN_RX4_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_RX4_B2_CTL (0x389) -#define TOMTOM_A_CDC_CONN_RX4_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_RX5_B1_CTL (0x38A) -#define TOMTOM_A_CDC_CONN_RX5_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_RX5_B2_CTL (0x38B) -#define TOMTOM_A_CDC_CONN_RX5_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_RX6_B1_CTL (0x38C) -#define TOMTOM_A_CDC_CONN_RX6_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_RX6_B2_CTL (0x38D) -#define TOMTOM_A_CDC_CONN_RX6_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_RX7_B1_CTL (0x38E) -#define TOMTOM_A_CDC_CONN_RX7_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_RX7_B2_CTL (0x38F) -#define TOMTOM_A_CDC_CONN_RX7_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_RX7_B3_CTL (0x390) -#define TOMTOM_A_CDC_CONN_RX7_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_ANC_B1_CTL (0x391) -#define TOMTOM_A_CDC_CONN_ANC_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_ANC_B2_CTL (0x392) -#define TOMTOM_A_CDC_CONN_ANC_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_TX_B1_CTL (0x393) -#define TOMTOM_A_CDC_CONN_TX_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_TX_B2_CTL (0x394) -#define TOMTOM_A_CDC_CONN_TX_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_TX_B3_CTL (0x395) -#define TOMTOM_A_CDC_CONN_TX_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_TX_B4_CTL (0x396) -#define TOMTOM_A_CDC_CONN_TX_B4_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_EQ1_B1_CTL (0x397) -#define TOMTOM_A_CDC_CONN_EQ1_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_EQ1_B2_CTL (0x398) -#define TOMTOM_A_CDC_CONN_EQ1_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_EQ1_B3_CTL (0x399) -#define TOMTOM_A_CDC_CONN_EQ1_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_EQ1_B4_CTL (0x39A) -#define TOMTOM_A_CDC_CONN_EQ1_B4_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_EQ2_B1_CTL (0x39B) -#define TOMTOM_A_CDC_CONN_EQ2_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_EQ2_B2_CTL (0x39C) -#define TOMTOM_A_CDC_CONN_EQ2_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_EQ2_B3_CTL (0x39D) -#define TOMTOM_A_CDC_CONN_EQ2_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_EQ2_B4_CTL (0x39E) -#define TOMTOM_A_CDC_CONN_EQ2_B4_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_SRC1_B1_CTL (0x39F) -#define TOMTOM_A_CDC_CONN_SRC1_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_SRC1_B2_CTL (0x3A0) -#define TOMTOM_A_CDC_CONN_SRC1_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_SRC2_B1_CTL (0x3A1) -#define TOMTOM_A_CDC_CONN_SRC2_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_SRC2_B2_CTL (0x3A2) -#define TOMTOM_A_CDC_CONN_SRC2_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_TX_SB_B1_CTL (0x3A3) -#define TOMTOM_A_CDC_CONN_TX_SB_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_TX_SB_B2_CTL (0x3A4) -#define TOMTOM_A_CDC_CONN_TX_SB_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_TX_SB_B3_CTL (0x3A5) -#define TOMTOM_A_CDC_CONN_TX_SB_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_TX_SB_B4_CTL (0x3A6) -#define TOMTOM_A_CDC_CONN_TX_SB_B4_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_TX_SB_B5_CTL (0x3A7) -#define TOMTOM_A_CDC_CONN_TX_SB_B5_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_TX_SB_B6_CTL (0x3A8) -#define TOMTOM_A_CDC_CONN_TX_SB_B6_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_TX_SB_B7_CTL (0x3A9) -#define TOMTOM_A_CDC_CONN_TX_SB_B7_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_TX_SB_B8_CTL (0x3AA) -#define TOMTOM_A_CDC_CONN_TX_SB_B8_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_TX_SB_B9_CTL (0x3AB) -#define TOMTOM_A_CDC_CONN_TX_SB_B9_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_TX_SB_B10_CTL (0x3AC) -#define TOMTOM_A_CDC_CONN_TX_SB_B10_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_TX_SB_B11_CTL (0x3AD) -#define TOMTOM_A_CDC_CONN_TX_SB_B11_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_RX_SB_B1_CTL (0x3AE) -#define TOMTOM_A_CDC_CONN_RX_SB_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_RX_SB_B2_CTL (0x3AF) -#define TOMTOM_A_CDC_CONN_RX_SB_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_CLSH_CTL (0x3B0) -#define TOMTOM_A_CDC_CONN_CLSH_CTL__POR (0x00) -#define TOMTOM_A_CDC_CONN_MISC (0x3B1) -#define TOMTOM_A_CDC_CONN_MISC__POR (0x01) -#define TOMTOM_A_CDC_CONN_RX8_B1_CTL (0x3B3) -#define TOMTOM_A_CDC_CONN_RX8_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL (0x3B4) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_B1_CTL__POR (0x81) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST (0x3B5) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_CLIP_LEVEL_ADJUST__POR (0x00) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD (0x3B6) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_MIN_CLIP_THRESHOLD__POR (0xFF) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS (0x3B7) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_THRESHOLD_STATUS__POR (0x00) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK (0x3B8) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_SAMPLE_MARK__POR (0x04) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING (0x3B9) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR_BOOST_GATING__POR (0x04) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL (0x3BA) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_B1_CTL__POR (0x81) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST (0x3BB) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_CLIP_LEVEL_ADJUST__POR (0x00) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD (0x3BC) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_MIN_CLIP_THRESHOLD__POR (0xFF) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS (0x3BD) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_THRESHOLD_STATUS__POR (0x00) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK (0x3BE) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_SAMPLE_MARK__POR (0x04) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING (0x3BF) -#define TOMTOM_A_CDC_CLIP_ADJ_SPKR2_BOOST_GATING__POR (0x04) -#define TOMTOM_A_CDC_MBHC_EN_CTL (0x3C0) -#define TOMTOM_A_CDC_MBHC_EN_CTL__POR (0x00) -#define TOMTOM_A_CDC_MBHC_FIR_B1_CFG (0x3C1) -#define TOMTOM_A_CDC_MBHC_FIR_B1_CFG__POR (0x00) -#define TOMTOM_A_CDC_MBHC_FIR_B2_CFG (0x3C2) -#define TOMTOM_A_CDC_MBHC_FIR_B2_CFG__POR (0x06) -#define TOMTOM_A_CDC_MBHC_TIMER_B1_CTL (0x3C3) -#define TOMTOM_A_CDC_MBHC_TIMER_B1_CTL__POR (0x03) -#define TOMTOM_A_CDC_MBHC_TIMER_B2_CTL (0x3C4) -#define TOMTOM_A_CDC_MBHC_TIMER_B2_CTL__POR (0x09) -#define TOMTOM_A_CDC_MBHC_TIMER_B3_CTL (0x3C5) -#define TOMTOM_A_CDC_MBHC_TIMER_B3_CTL__POR (0x1E) -#define TOMTOM_A_CDC_MBHC_TIMER_B4_CTL (0x3C6) -#define TOMTOM_A_CDC_MBHC_TIMER_B4_CTL__POR (0x45) -#define TOMTOM_A_CDC_MBHC_TIMER_B5_CTL (0x3C7) -#define TOMTOM_A_CDC_MBHC_TIMER_B5_CTL__POR (0x04) -#define TOMTOM_A_CDC_MBHC_TIMER_B6_CTL (0x3C8) -#define TOMTOM_A_CDC_MBHC_TIMER_B6_CTL__POR (0x78) -#define TOMTOM_A_CDC_MBHC_B1_STATUS (0x3C9) -#define TOMTOM_A_CDC_MBHC_B1_STATUS__POR (0x00) -#define TOMTOM_A_CDC_MBHC_B2_STATUS (0x3CA) -#define TOMTOM_A_CDC_MBHC_B2_STATUS__POR (0x00) -#define TOMTOM_A_CDC_MBHC_B3_STATUS (0x3CB) -#define TOMTOM_A_CDC_MBHC_B3_STATUS__POR (0x00) -#define TOMTOM_A_CDC_MBHC_B4_STATUS (0x3CC) -#define TOMTOM_A_CDC_MBHC_B4_STATUS__POR (0x00) -#define TOMTOM_A_CDC_MBHC_B5_STATUS (0x3CD) -#define TOMTOM_A_CDC_MBHC_B5_STATUS__POR (0x00) -#define TOMTOM_A_CDC_MBHC_B1_CTL (0x3CE) -#define TOMTOM_A_CDC_MBHC_B1_CTL__POR (0xC0) -#define TOMTOM_A_CDC_MBHC_B2_CTL (0x3CF) -#define TOMTOM_A_CDC_MBHC_B2_CTL__POR (0x5D) -#define TOMTOM_A_CDC_MBHC_VOLT_B1_CTL (0x3D0) -#define TOMTOM_A_CDC_MBHC_VOLT_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_MBHC_VOLT_B2_CTL (0x3D1) -#define TOMTOM_A_CDC_MBHC_VOLT_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_MBHC_VOLT_B3_CTL (0x3D2) -#define TOMTOM_A_CDC_MBHC_VOLT_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_MBHC_VOLT_B4_CTL (0x3D3) -#define TOMTOM_A_CDC_MBHC_VOLT_B4_CTL__POR (0x00) -#define TOMTOM_A_CDC_MBHC_VOLT_B5_CTL (0x3D4) -#define TOMTOM_A_CDC_MBHC_VOLT_B5_CTL__POR (0x00) -#define TOMTOM_A_CDC_MBHC_VOLT_B6_CTL (0x3D5) -#define TOMTOM_A_CDC_MBHC_VOLT_B6_CTL__POR (0x00) -#define TOMTOM_A_CDC_MBHC_VOLT_B7_CTL (0x3D6) -#define TOMTOM_A_CDC_MBHC_VOLT_B7_CTL__POR (0xFF) -#define TOMTOM_A_CDC_MBHC_VOLT_B8_CTL (0x3D7) -#define TOMTOM_A_CDC_MBHC_VOLT_B8_CTL__POR (0x07) -#define TOMTOM_A_CDC_MBHC_VOLT_B9_CTL (0x3D8) -#define TOMTOM_A_CDC_MBHC_VOLT_B9_CTL__POR (0xFF) -#define TOMTOM_A_CDC_MBHC_VOLT_B10_CTL (0x3D9) -#define TOMTOM_A_CDC_MBHC_VOLT_B10_CTL__POR (0x7F) -#define TOMTOM_A_CDC_MBHC_VOLT_B11_CTL (0x3DA) -#define TOMTOM_A_CDC_MBHC_VOLT_B11_CTL__POR (0x00) -#define TOMTOM_A_CDC_MBHC_VOLT_B12_CTL (0x3DB) -#define TOMTOM_A_CDC_MBHC_VOLT_B12_CTL__POR (0x80) -#define TOMTOM_A_CDC_MBHC_CLK_CTL (0x3DC) -#define TOMTOM_A_CDC_MBHC_CLK_CTL__POR (0x00) -#define TOMTOM_A_CDC_MBHC_INT_CTL (0x3DD) -#define TOMTOM_A_CDC_MBHC_INT_CTL__POR (0x00) -#define TOMTOM_A_CDC_MBHC_DEBUG_CTL (0x3DE) -#define TOMTOM_A_CDC_MBHC_DEBUG_CTL__POR (0x00) -#define TOMTOM_A_CDC_MBHC_SPARE (0x3DF) -#define TOMTOM_A_CDC_MBHC_SPARE__POR (0x00) -#define TOMTOM_A_CDC_RX8_B1_CTL (0x3E0) -#define TOMTOM_A_CDC_RX8_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX8_B2_CTL (0x3E1) -#define TOMTOM_A_CDC_RX8_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX8_B3_CTL (0x3E2) -#define TOMTOM_A_CDC_RX8_B3_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX8_B4_CTL (0x3E3) -#define TOMTOM_A_CDC_RX8_B4_CTL__POR (0x0B) -#define TOMTOM_A_CDC_RX8_B5_CTL (0x3E4) -#define TOMTOM_A_CDC_RX8_B5_CTL__POR (0x78) -#define TOMTOM_A_CDC_RX8_B6_CTL (0x3E5) -#define TOMTOM_A_CDC_RX8_B6_CTL__POR (0x80) -#define TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL (0x3E6) -#define TOMTOM_A_CDC_RX8_VOL_CTL_B1_CTL__POR (0x00) -#define TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL (0x3E7) -#define TOMTOM_A_CDC_RX8_VOL_CTL_B2_CTL__POR (0x00) -#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0 (0x3E8) -#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL0__POR (0x00) -#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1 (0x3E9) -#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL1__POR (0x00) -#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2 (0x3EA) -#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL2__POR (0x00) -#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3 (0x3EB) -#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL3__POR (0x00) -#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4 (0x3EC) -#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL4__POR (0x00) -#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5 (0x3ED) -#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL5__POR (0x00) -#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6 (0x3EE) -#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL6__POR (0x00) -#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7 (0x3EF) -#define TOMTOM_A_CDC_SPKR2_CLIPDET_VAL7__POR (0x00) -#define TOMTOM_A_CDC_BOOST_MODE_CTL (0x3F0) -#define TOMTOM_A_CDC_BOOST_MODE_CTL__POR (0x00) -#define TOMTOM_A_CDC_BOOST_THRESHOLD (0x3F1) -#define TOMTOM_A_CDC_BOOST_THRESHOLD__POR (0x02) -#define TOMTOM_A_CDC_BOOST_TAP_SEL (0x3F2) -#define TOMTOM_A_CDC_BOOST_TAP_SEL__POR (0x00) -#define TOMTOM_A_CDC_BOOST_HOLD_TIME (0x3F3) -#define TOMTOM_A_CDC_BOOST_HOLD_TIME__POR (0x02) -#define TOMTOM_A_CDC_BOOST_TRGR_EN (0x3F4) -#define TOMTOM_A_CDC_BOOST_TRGR_EN__POR (0x00) - -/* SLIMBUS Slave Registers */ -#define TOMTOM_SLIM_PGD_PORT_INT_EN0 (0x30) -#define TOMTOM_SLIM_PGD_PORT_INT_STATUS_RX_0 (0x34) -#define TOMTOM_SLIM_PGD_PORT_INT_STATUS_RX_1 (0x35) -#define TOMTOM_SLIM_PGD_PORT_INT_STATUS_TX_0 (0x36) -#define TOMTOM_SLIM_PGD_PORT_INT_STATUS_TX_1 (0x37) -#define TOMTOM_SLIM_PGD_PORT_INT_CLR_RX_0 (0x38) -#define TOMTOM_SLIM_PGD_PORT_INT_CLR_RX_1 (0x39) -#define TOMTOM_SLIM_PGD_PORT_INT_CLR_TX_0 (0x3A) -#define TOMTOM_SLIM_PGD_PORT_INT_CLR_TX_1 (0x3B) -#define TOMTOM_SLIM_PGD_PORT_INT_RX_SOURCE0 (0x60) -#define TOMTOM_SLIM_PGD_PORT_INT_TX_SOURCE0 (0x70) - -/* Macros for Packing Register Writes into a U32 */ -#define TOMTOM_PACKED_REG_SIZE sizeof(u32) - -#define TOMTOM_CODEC_PACK_ENTRY(reg, mask, val) ((val & 0xff)|\ - ((mask & 0xff) << 8)|((reg & 0xffff) << 16)) -#define TOMTOM_CODEC_UNPACK_ENTRY(packed, reg, mask, val) \ - do { \ - ((reg) = ((packed >> 16) & (0xffff))); \ - ((mask) = ((packed >> 8) & (0xff))); \ - ((val) = ((packed) & (0xff))); \ - } while (0) - -#define TOMTOM_SB_PGD_PORT_TX_BASE 0x50 -#define TOMTOM_SB_PGD_PORT_RX_BASE 0x40 -#define WCD9330_MAX_REGISTER 0x3FF -extern const u8 tomtom_reg_readable[WCD9330_MAX_REGISTER + 1]; -#endif -- GitLab From e4da9d7414589cf4c1f548fd1e1bae7f59e65fd7 Mon Sep 17 00:00:00 2001 From: Dhaval Patel Date: Mon, 19 Jun 2017 16:51:21 -0700 Subject: [PATCH 353/786] drm/msm/sde: remove out of bound access for qos lut parsing QOS LUT dtsi entries use existing hardware parsing APIs but dos not increase the size of array. This causes out of bound access while reading u32 lut array entry. This patch fixes the array size and also adds checks to avoid future out of bound access. It also fixes the memory leak in qos lut parsing. Change-Id: I98de052d03e1bcfd79d15ab99ca41d7782e56682 Signed-off-by: Dhaval Patel --- drivers/gpu/drm/msm/sde/sde_hw_catalog.c | 90 +++++++++++++++++------- 1 file changed, 64 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c index 1cbbe1e5d7b4..218d4cffdf59 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c @@ -27,11 +27,11 @@ /** * Max hardware block in certain hardware. For ex: sspp pipes - * can have QSEED, pcc, igc, pa, csc, etc. This count is max - * 12 based on software design. It should be increased if any of the + * can have QSEED, pcc, igc, pa, csc, qos entries, etc. This count is + * 64 based on software design. It should be increased if any of the * hardware block has more subblocks. */ -#define MAX_SDE_HW_BLK 12 +#define MAX_SDE_HW_BLK 64 /* each entry will have register address and bit offset in that register */ #define MAX_BIT_OFFSET 2 @@ -569,8 +569,16 @@ static struct sde_prop_type inline_rot_prop[INLINE_ROT_PROP_MAX] = { static int _parse_dt_u32_handler(struct device_node *np, char *prop_name, u32 *offsets, int len, bool mandatory) { - int rc = of_property_read_u32_array(np, prop_name, offsets, len); + int rc = -EINVAL; + if (len > MAX_SDE_HW_BLK) { + SDE_ERROR( + "prop: %s tries out of bound access for u32 array read len: %d\n", + prop_name, len); + return -E2BIG; + } + + rc = of_property_read_u32_array(np, prop_name, offsets, len); if (rc && mandatory) SDE_ERROR("mandatory prop: %s u32 array read len:%d\n", prop_name, len); @@ -592,6 +600,14 @@ static int _parse_dt_bit_offset(struct device_node *np, if (arr) { len /= sizeof(u32); len &= ~0x1; + + if (len > (MAX_SDE_HW_BLK * MAX_BIT_OFFSET)) { + SDE_ERROR( + "prop: %s len: %d will lead to out of bound access\n", + prop_name, len / MAX_BIT_OFFSET); + return -E2BIG; + } + for (i = 0, j = 0; i < len; j++) { PROP_BITVALUE_ACCESS(prop_value, prop_index, j, 0) = be32_to_cpu(arr[i]); @@ -626,8 +642,8 @@ static int _validate_dt_entry(struct device_node *np, sde_prop[0].prop_name); if ((*off_count > MAX_BLOCKS) || (*off_count < 0)) { if (sde_prop[0].is_mandatory) { - SDE_ERROR("invalid hw offset prop name:%s\"\ - count: %d\n", + SDE_ERROR( + "invalid hw offset prop name:%s count: %d\n", sde_prop[0].prop_name, *off_count); rc = -EINVAL; } @@ -670,8 +686,9 @@ static int _validate_dt_entry(struct device_node *np, sde_prop[i].type); break; } - SDE_DEBUG("prop id:%d prop name:%s prop type:%d \"\ - prop_count:%d\n", i, sde_prop[i].prop_name, + SDE_DEBUG( + "prop id:%d prop name:%s prop type:%d prop_count:%d\n", + i, sde_prop[i].prop_name, sde_prop[i].type, prop_count[i]); if (rc && sde_prop[i].is_mandatory && @@ -689,14 +706,16 @@ static int _validate_dt_entry(struct device_node *np, if (off_count && (prop_count[i] != *off_count) && sde_prop[i].is_mandatory) { - SDE_ERROR("prop:%s count:%d is different compared to \"\ - offset array:%d\n", sde_prop[i].prop_name, + SDE_ERROR( + "prop:%s count:%d is different compared to offset array:%d\n", + sde_prop[i].prop_name, prop_count[i], *off_count); rc = -EINVAL; goto end; } else if (off_count && prop_count[i] != *off_count) { - SDE_DEBUG("prop:%s count:%d is different compared to \"\ - offset array:%d\n", sde_prop[i].prop_name, + SDE_DEBUG( + "prop:%s count:%d is different compared to offset array:%d\n", + sde_prop[i].prop_name, prop_count[i], *off_count); rc = 0; prop_count[i] = 0; @@ -732,8 +751,9 @@ static int _read_dt_entry(struct device_node *np, case PROP_TYPE_U32: rc = of_property_read_u32(np, sde_prop[i].prop_name, &PROP_VALUE_ACCESS(prop_value, i, 0)); - SDE_DEBUG("prop id:%d prop name:%s prop type:%d \"\ - value:0x%x\n", i, sde_prop[i].prop_name, + SDE_DEBUG( + "prop id:%d prop name:%s prop type:%d value:0x%x\n", + i, sde_prop[i].prop_name, sde_prop[i].type, PROP_VALUE_ACCESS(prop_value, i, 0)); if (rc) @@ -743,8 +763,9 @@ static int _read_dt_entry(struct device_node *np, PROP_VALUE_ACCESS(prop_value, i, 0) = of_property_read_bool(np, sde_prop[i].prop_name); - SDE_DEBUG("prop id:%d prop name:%s prop type:%d \"\ - value:0x%x\n", i, sde_prop[i].prop_name, + SDE_DEBUG( + "prop id:%d prop name:%s prop type:%d value:0x%x\n", + i, sde_prop[i].prop_name, sde_prop[i].type, PROP_VALUE_ACCESS(prop_value, i, 0)); break; @@ -753,8 +774,9 @@ static int _read_dt_entry(struct device_node *np, &PROP_VALUE_ACCESS(prop_value, i, 0), prop_count[i], sde_prop[i].is_mandatory); if (rc && sde_prop[i].is_mandatory) { - SDE_ERROR("%s prop validation success but \"\ - read failed\n", sde_prop[i].prop_name); + SDE_ERROR( + "%s prop validation success but read failed\n", + sde_prop[i].prop_name); prop_exists[i] = false; goto end; } else { @@ -776,19 +798,21 @@ static int _read_dt_entry(struct device_node *np, prop_value, i, prop_count[i], sde_prop[i].is_mandatory); if (rc && sde_prop[i].is_mandatory) { - SDE_ERROR("%s prop validation success but \"\ - read failed\n", sde_prop[i].prop_name); + SDE_ERROR( + "%s prop validation success but read failed\n", + sde_prop[i].prop_name); prop_exists[i] = false; goto end; } else { if (rc) prop_exists[i] = false; - SDE_DEBUG("prop id:%d prop name:%s prop \"\ - type:%d", i, sde_prop[i].prop_name, + SDE_DEBUG( + "prop id:%d prop name:%s prop type:%d", + i, sde_prop[i].prop_name, sde_prop[i].type); for (j = 0; j < prop_count[i]; j++) - SDE_DEBUG(" count[%d]: bit:0x%x \"\ - off:0x%x \n", j, + SDE_DEBUG( + "count[%d]: bit:0x%x off:0x%x\n", j, PROP_BITVALUE_ACCESS(prop_value, i, j, 0), PROP_BITVALUE_ACCESS(prop_value, @@ -1125,6 +1149,13 @@ static int sde_sspp_parse_dt(struct device_node *np, snprintf(sblk->src_blk.name, SDE_HW_BLK_NAME_LEN, "sspp_src_%u", sspp->id - SSPP_VIG0); + if (sspp->clk_ctrl >= SDE_CLK_CTRL_MAX) { + SDE_ERROR("%s: invalid clk ctrl: %d\n", + sblk->src_blk.name, sspp->clk_ctrl); + rc = -EINVAL; + goto end; + } + sblk->maxhdeciexp = MAX_HORZ_DECIMATION; sblk->maxvdeciexp = MAX_VERT_DECIMATION; @@ -1508,6 +1539,13 @@ static int sde_wb_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg) PROP_VALUE_ACCESS(prop_value, WB_ID, i); wb->xin_id = PROP_VALUE_ACCESS(prop_value, WB_XIN_ID, i); + if (wb->clk_ctrl >= SDE_CLK_CTRL_MAX) { + SDE_ERROR("%s: invalid clk ctrl: %d\n", + wb->name, wb->clk_ctrl); + rc = -EINVAL; + goto end; + } + if (IS_SDE_MAJOR_MINOR_SAME((sde_cfg->hwversion), SDE_HW_VER_170)) wb->vbif_idx = VBIF_NRT; @@ -2512,7 +2550,7 @@ static int sde_perf_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg) goto end; } - prop_value = kzalloc(SDE_PROP_MAX * + prop_value = kzalloc(PERF_PROP_MAX * sizeof(struct sde_prop_value), GFP_KERNEL); if (!prop_value) { rc = -ENOMEM; @@ -2678,7 +2716,7 @@ static int sde_perf_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg) sizeof(struct sde_qos_lut_entry), GFP_KERNEL); if (!cfg->perf.qos_lut_tbl[j].entries) { rc = -ENOMEM; - goto end; + goto freeprop; } for (k = 0, m = 0; k < count; k++, m += entry_size) { -- GitLab From fd6c62ed79d5ac7ef464faa1ab1d2db8380b1582 Mon Sep 17 00:00:00 2001 From: Dhaval Patel Date: Fri, 12 May 2017 16:18:08 -0700 Subject: [PATCH 354/786] drm/msm/sde: disable vblank irqs before triggering panic Disable the generation of VBLANK irqs before triggering a panic via the sde debug facility. Change-Id: I59d35b13ee56e38dbe0bf7715209495e7fda8833 Signed-off-by: Lloyd Atkinson --- drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c | 1 + drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c index 447fdcc9e89c..cdf394fb93eb 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c @@ -275,6 +275,7 @@ static int _sde_encoder_phys_cmd_handle_ppdone_timeout( SDE_EVT32(DRMID(phys_enc->parent), SDE_EVTLOG_FATAL); + sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR); SDE_DBG_DUMP("sde", "dsi0_ctrl", "dsi0_phy", "dsi1_ctrl", "dsi1_phy", "vbif", "dbg_bus", "vbif_dbg_bus", "panic"); diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c index 007738a6541d..179adfa175c6 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c @@ -682,6 +682,7 @@ static void sde_encoder_phys_vid_prepare_for_kickoff( if (rc) { SDE_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n", ctl->idx, rc); + sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC); SDE_DBG_DUMP("panic"); } -- GitLab From ef44e10e87dc520e708327510be349d11a337616 Mon Sep 17 00:00:00 2001 From: Deepak Katragadda Date: Wed, 21 Jun 2017 10:30:46 -0700 Subject: [PATCH 355/786] clk: qcom: Change the init level for the clock drivers to subsys The current init level of core for the clock drivers makes the drivers probe slower since the mailbox and regulator drivers that the clock drivers are dependent on have an arch init level. The clock driver probe gets deferred as a result and is scheduled to much later. Change the initcall level for the clock drivers to subsys instead to make the probes happen sooner. Change-Id: Icd304b6d3d8795b6b6868c2472cf4dc07ed82ac8 Signed-off-by: Deepak Katragadda --- drivers/clk/qcom/camcc-sdm845.c | 2 +- drivers/clk/qcom/clk-cpu-osm.c | 2 +- drivers/clk/qcom/clk-rpmh.c | 2 +- drivers/clk/qcom/dispcc-sdm845.c | 2 +- drivers/clk/qcom/gcc-sdm845.c | 2 +- drivers/clk/qcom/gpucc-sdm845.c | 4 ++-- drivers/clk/qcom/videocc-sdm845.c | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c index 86e148d870e9..1984d4af64f3 100644 --- a/drivers/clk/qcom/camcc-sdm845.c +++ b/drivers/clk/qcom/camcc-sdm845.c @@ -2059,7 +2059,7 @@ static int __init cam_cc_sdm845_init(void) { return platform_driver_register(&cam_cc_sdm845_driver); } -core_initcall(cam_cc_sdm845_init); +subsys_initcall(cam_cc_sdm845_init); static void __exit cam_cc_sdm845_exit(void) { diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c index 7aef887c6f7b..78e0ae52983c 100644 --- a/drivers/clk/qcom/clk-cpu-osm.c +++ b/drivers/clk/qcom/clk-cpu-osm.c @@ -3455,7 +3455,7 @@ static int __init clk_cpu_osm_init(void) { return platform_driver_register(&clk_cpu_osm_driver); } -arch_initcall(clk_cpu_osm_init); +subsys_initcall(clk_cpu_osm_init); static void __exit clk_cpu_osm_exit(void) { diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c index 89bae2e2e7e4..e1cda903ec41 100644 --- a/drivers/clk/qcom/clk-rpmh.c +++ b/drivers/clk/qcom/clk-rpmh.c @@ -451,7 +451,7 @@ static int __init clk_rpmh_init(void) { return platform_driver_register(&clk_rpmh_driver); } -core_initcall(clk_rpmh_init); +subsys_initcall(clk_rpmh_init); static void __exit clk_rpmh_exit(void) { diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c index d6ecf122f8dd..53bfe77bad28 100644 --- a/drivers/clk/qcom/dispcc-sdm845.c +++ b/drivers/clk/qcom/dispcc-sdm845.c @@ -1130,7 +1130,7 @@ static int __init disp_cc_sdm845_init(void) { return platform_driver_register(&disp_cc_sdm845_driver); } -core_initcall(disp_cc_sdm845_init); +subsys_initcall(disp_cc_sdm845_init); static void __exit disp_cc_sdm845_exit(void) { diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c index cd47e14178e4..25f9d62ba53e 100644 --- a/drivers/clk/qcom/gcc-sdm845.c +++ b/drivers/clk/qcom/gcc-sdm845.c @@ -3856,7 +3856,7 @@ static int __init gcc_sdm845_init(void) { return platform_driver_register(&gcc_sdm845_driver); } -core_initcall(gcc_sdm845_init); +subsys_initcall(gcc_sdm845_init); static void __exit gcc_sdm845_exit(void) { diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c index 8442890f52d1..3e9ab78dfaaa 100644 --- a/drivers/clk/qcom/gpucc-sdm845.c +++ b/drivers/clk/qcom/gpucc-sdm845.c @@ -594,7 +594,7 @@ static int __init gpu_cc_gfx_sdm845_init(void) { return platform_driver_register(&gpu_cc_gfx_sdm845_driver); } -arch_initcall(gpu_cc_gfx_sdm845_init); +subsys_initcall(gpu_cc_gfx_sdm845_init); static void __exit gpu_cc_gfx_sdm845_exit(void) { @@ -643,7 +643,7 @@ static int __init gpu_cc_sdm845_init(void) { return platform_driver_register(&gpu_cc_sdm845_driver); } -core_initcall(gpu_cc_sdm845_init); +subsys_initcall(gpu_cc_sdm845_init); static void __exit gpu_cc_sdm845_exit(void) { diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c index 362ea0b4fee7..ba4e591c2070 100644 --- a/drivers/clk/qcom/videocc-sdm845.c +++ b/drivers/clk/qcom/videocc-sdm845.c @@ -402,7 +402,7 @@ static int __init video_cc_sdm845_init(void) { return platform_driver_register(&video_cc_sdm845_driver); } -core_initcall(video_cc_sdm845_init); +subsys_initcall(video_cc_sdm845_init); static void __exit video_cc_sdm845_exit(void) { -- GitLab From 958c81960d5440c70befb0ae6f5523b66b65b125 Mon Sep 17 00:00:00 2001 From: Kyle Yan Date: Tue, 27 Jun 2017 13:16:31 -0700 Subject: [PATCH 356/786] ARM: dts: msm: Add initial overlay for QRD on SDM845 Add initial dtb overlay file for QRD targets on SDM845. Change-Id: I799e7542bfc2ccf446fcaea1c7dfaa1c71d7ad7d Signed-off-by: Kyle Yan --- arch/arm64/boot/dts/qcom/Makefile | 4 ++- .../boot/dts/qcom/sdm845-qrd-overlay.dts | 28 +++++++++++++++++++ arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi | 8 ------ 3 files changed, 31 insertions(+), 9 deletions(-) create mode 100644 arch/arm64/boot/dts/qcom/sdm845-qrd-overlay.dts diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile index 64b250dae824..3c8eaf8f3b2f 100644 --- a/arch/arm64/boot/dts/qcom/Makefile +++ b/arch/arm64/boot/dts/qcom/Makefile @@ -18,10 +18,12 @@ dtb-$(CONFIG_ARCH_SDM845) += sdm845-sim.dtb \ ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y) dtbo-$(CONFIG_ARCH_SDM845) += \ sdm845-cdp-overlay.dtbo \ - sdm845-mtp-overlay.dtbo + sdm845-mtp-overlay.dtbo \ + sdm845-qrd-overlay.dtbo sdm845-cdp-overlay.dtbo-base := sdm845.dtb sdm845-mtp-overlay.dtbo-base := sdm845.dtb +sdm845-qrd-overlay.dtbo-base := sdm845.dtb endif dtb-$(CONFIG_ARCH_SDM670) += sdm670-rumi.dtb \ diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-qrd-overlay.dts new file mode 100644 index 000000000000..6243fec06329 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdm845-qrd-overlay.dts @@ -0,0 +1,28 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/dts-v1/; +/plugin/; + +#include +#include +#include +#include + +#include "sdm845-qrd.dtsi" + +/ { + model = "Qualcomm Technologies, Inc. SDM845 v1 QRD"; + compatible = "qcom,sdm845-qrd", "qcom,sdm845", "qcom,qrd"; + qcom,msm-id = <321 0x0>; + qcom,board-id = <11 0>; +}; diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi index f14293b904c6..a0d62e195bd7 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi @@ -40,14 +40,6 @@ #include "fg-gen3-batterydata-itech-3000mah.dtsi" #include "fg-gen3-batterydata-ascent-3450mah.dtsi" }; - - aliases { - serial0 = &qupv3_se9_2uart; - spi0 = &qupv3_se8_spi; - i2c0 = &qupv3_se10_i2c; - i2c1 = &qupv3_se3_i2c; - hsuart0 = &qupv3_se6_4uart; - }; }; &qupv3_se9_2uart { -- GitLab From c01a7f49dee7da48f065ee929bb7d529c12a054a Mon Sep 17 00:00:00 2001 From: Skylar Chang Date: Thu, 22 Jun 2017 19:59:57 -0700 Subject: [PATCH 357/786] msm: ipa: fix IPA MHI unit tests Connect a test producer pipe for MHI unit tests as the default LAN_PROD pipe is not connected on MHI config. Change-Id: I2fc1ff8758d222a738d570b5babc22ced2b43219 CRs-Fixed: 2066878 Acked-by: Ady Abraham Signed-off-by: Skylar Chang --- drivers/platform/msm/ipa/test/ipa_test_mhi.c | 22 +++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/drivers/platform/msm/ipa/test/ipa_test_mhi.c b/drivers/platform/msm/ipa/test/ipa_test_mhi.c index 5a41d641de4f..3a89c7dffdb0 100644 --- a/drivers/platform/msm/ipa/test/ipa_test_mhi.c +++ b/drivers/platform/msm/ipa/test/ipa_test_mhi.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -326,6 +326,7 @@ struct ipa_test_mhi_context { struct ipa_mem_buffer out_buffer; u32 prod_hdl; u32 cons_hdl; + u32 test_prod_hdl; }; static struct ipa_test_mhi_context *test_mhi_ctx; @@ -774,6 +775,7 @@ static int ipa_mhi_test_setup_data_structures(void) static int ipa_test_mhi_suite_setup(void **ppriv) { int rc = 0; + struct ipa_sys_connect_params sys_in; IPA_UT_DBG("Start Setup\n"); @@ -815,9 +817,22 @@ static int ipa_test_mhi_suite_setup(void **ppriv) goto fail_free_mmio_spc; } + /* connect PROD pipe for remote wakeup */ + memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params)); + sys_in.client = IPA_CLIENT_TEST_PROD; + sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ; + sys_in.ipa_ep_cfg.mode.mode = IPA_DMA; + sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MHI_CONS; + if (ipa_setup_sys_pipe(&sys_in, &test_mhi_ctx->test_prod_hdl)) { + IPA_UT_ERR("setup sys pipe failed.\n"); + goto fail_destroy_data_structures; + } + *ppriv = test_mhi_ctx; return 0; +fail_destroy_data_structures: + ipa_mhi_test_destroy_data_structures(); fail_free_mmio_spc: ipa_test_mhi_free_mmio_space(); fail_iounmap: @@ -838,6 +853,7 @@ static int ipa_test_mhi_suite_teardown(void *priv) if (!test_mhi_ctx) return 0; + ipa_teardown_sys_pipe(test_mhi_ctx->test_prod_hdl); ipa_mhi_test_destroy_data_structures(); ipa_test_mhi_free_mmio_space(); iounmap(test_mhi_ctx->gsi_mmio); @@ -1811,7 +1827,7 @@ static int ipa_mhi_test_create_aggr_open_frame(void) memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1); } - rc = ipa_tx_dp(IPA_CLIENT_MHI_CONS, skb, NULL); + rc = ipa_tx_dp(IPA_CLIENT_TEST_PROD, skb, NULL); if (rc) { IPA_UT_LOG("ipa_tx_dp failed %d\n", rc); IPA_UT_TEST_FAIL_REPORT("ipa tx dp fail"); @@ -1982,7 +1998,7 @@ static int ipa_mhi_test_suspend_host_wakeup(void) memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1); } - rc = ipa_tx_dp(IPA_CLIENT_MHI_CONS, skb, NULL); + rc = ipa_tx_dp(IPA_CLIENT_TEST_PROD, skb, NULL); if (rc) { IPA_UT_LOG("ipa_tx_dp failed %d\n", rc); IPA_UT_TEST_FAIL_REPORT("ipa tx dp fail"); -- GitLab From 364753580ad8a99e218ff81143c8d0da019ede63 Mon Sep 17 00:00:00 2001 From: Skylar Chang Date: Tue, 13 Jun 2017 09:25:47 -0700 Subject: [PATCH 358/786] msm: rmnet_ipa3: fix QMAP command xfer Fix QMAP command packets TX to modem. These packets needs to be sent to Q6_WAN_CONS pipe. Change-Id: Ib718ad7308004ba7727e30e64f4b50bf4e521da3 CRs-Fixed: 2068048 Acked-by: Ady Abraham Signed-off-by: Skylar Chang --- drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c index f408f23536cd..b88296798094 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c @@ -1141,7 +1141,8 @@ static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev) memset(&meta, 0, sizeof(meta)); meta.pkt_init_dst_ep_valid = true; meta.pkt_init_dst_ep_remote = true; - meta.pkt_init_dst_ep = IPA_CLIENT_Q6_LAN_CONS; + meta.pkt_init_dst_ep = + ipa3_get_ep_mapping(IPA_CLIENT_Q6_WAN_CONS); ret = ipa3_tx_dp(IPA_CLIENT_APPS_WAN_PROD, skb, &meta); } else { ret = ipa3_tx_dp(IPA_CLIENT_APPS_WAN_PROD, skb, NULL); -- GitLab From 729fb84e144e5a4895a34a2671adbe242d841d5b Mon Sep 17 00:00:00 2001 From: Vidyakumar Athota Date: Thu, 22 Jun 2017 22:27:08 -0700 Subject: [PATCH 359/786] soc: qcom: avoid calling sleeping functions in interrupt context Since glink callbacks are called in interrupt context, avoid using functions that can sleep in the callbacks. Use separate work queue to free the buffer. Change-Id: I8abae4becb4c4ac1feb8794db4d2a6bb378943ac Signed-off-by: Vidyakumar Athota --- drivers/soc/qcom/wcd-dsp-glink.c | 58 ++++++++++++++++++++++++-------- 1 file changed, 44 insertions(+), 14 deletions(-) diff --git a/drivers/soc/qcom/wcd-dsp-glink.c b/drivers/soc/qcom/wcd-dsp-glink.c index c8bb13d6a16d..870b9f7455d3 100644 --- a/drivers/soc/qcom/wcd-dsp-glink.c +++ b/drivers/soc/qcom/wcd-dsp-glink.c @@ -58,6 +58,7 @@ struct wdsp_glink_rsp_que { struct wdsp_glink_tx_buf { struct work_struct tx_work; + struct work_struct free_tx_work; /* Glink channel information */ struct wdsp_glink_ch *ch; @@ -124,6 +125,46 @@ struct wdsp_glink_priv { static int wdsp_glink_close_ch(struct wdsp_glink_ch *ch); static int wdsp_glink_open_ch(struct wdsp_glink_ch *ch); +/* + * wdsp_glink_free_tx_buf_work - Work function to free tx pkt + * work: Work structure + */ +static void wdsp_glink_free_tx_buf_work(struct work_struct *work) +{ + struct wdsp_glink_tx_buf *tx_buf; + + tx_buf = container_of(work, struct wdsp_glink_tx_buf, + free_tx_work); + vfree(tx_buf); +} + +/* + * wdsp_glink_free_tx_buf - Function to free tx buffer + * priv: Pointer to the channel + * pkt_priv: Pointer to the tx buffer + */ +static void wdsp_glink_free_tx_buf(const void *priv, const void *pkt_priv) +{ + struct wdsp_glink_tx_buf *tx_buf = (struct wdsp_glink_tx_buf *)pkt_priv; + struct wdsp_glink_priv *wpriv; + struct wdsp_glink_ch *ch; + + if (!priv) { + pr_err("%s: Invalid priv\n", __func__); + return; + } + if (!tx_buf) { + pr_err("%s: Invalid tx_buf\n", __func__); + return; + } + + ch = (struct wdsp_glink_ch *)priv; + wpriv = ch->wpriv; + /* Work queue to free tx pkt */ + INIT_WORK(&tx_buf->free_tx_work, wdsp_glink_free_tx_buf_work); + queue_work(wpriv->work_queue, &tx_buf->free_tx_work); +} + /* * wdsp_glink_notify_rx - Glink notify rx callback for responses * handle: Opaque Channel handle returned by GLink @@ -183,14 +224,8 @@ static void wdsp_glink_notify_rx(void *handle, const void *priv, static void wdsp_glink_notify_tx_done(void *handle, const void *priv, const void *pkt_priv, const void *ptr) { - if (!pkt_priv) { - pr_err("%s: Invalid parameter\n", __func__); - return; - } - /* Free tx pkt */ - vfree(pkt_priv); + wdsp_glink_free_tx_buf(priv, pkt_priv); } - /* * wdsp_glink_notify_tx_abort - Glink notify tx abort callback to * free tx buffer @@ -201,12 +236,7 @@ static void wdsp_glink_notify_tx_done(void *handle, const void *priv, static void wdsp_glink_notify_tx_abort(void *handle, const void *priv, const void *pkt_priv) { - if (!pkt_priv) { - pr_err("%s: Invalid parameter\n", __func__); - return; - } - /* Free tx pkt */ - vfree(pkt_priv); + wdsp_glink_free_tx_buf(priv, pkt_priv); } /* @@ -555,7 +585,7 @@ static int wdsp_glink_ch_info_init(struct wdsp_glink_priv *wpriv, goto done; } ch = kcalloc(no_of_channels, sizeof(struct wdsp_glink_ch *), - GFP_KERNEL); + GFP_ATOMIC); if (!ch) { ret = -ENOMEM; goto done; -- GitLab From 065ca4883e037f5fbf54122e0477919c196f2dfd Mon Sep 17 00:00:00 2001 From: Mukesh Kumar Savaliya Date: Tue, 6 Jun 2017 14:44:45 +0530 Subject: [PATCH 360/786] ARM: dts: msm: Populate QUPV3 serial Engine device nodes for SDM670 QUPv3 is a GENI based core with multiple Serial Engines(SE). Each SE instance can be configured to be either an I2C/SPI/UART master for a given platform. Setup a device tree file to declare all possible SE device nodes which the platform specific device tree files can enable. Also enable the console port on se10 for RUMI target. Change-Id: Iaff551a94d9d82d33a0c7cad186df3ab8f42f8c0 Signed-off-by: Mukesh Kumar Savaliya --- arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi | 1098 ++++++++++++++++++ arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi | 699 +++++++++++ arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi | 36 + arch/arm64/boot/dts/qcom/sdm670.dtsi | 2 + 4 files changed, 1835 insertions(+) create mode 100644 arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi diff --git a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi index 09ce9d2936f8..86e29486d14c 100644 --- a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi @@ -19,5 +19,1103 @@ #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; + + /* QUPv3 South SE mappings */ + /* SE 0 pin mappings */ + qupv3_se0_i2c_pins: qupv3_se0_i2c_pins { + qupv3_se0_i2c_active: qupv3_se0_i2c_active { + mux { + pins = "gpio0", "gpio1"; + function = "qup0"; + }; + + config { + pins = "gpio0", "gpio1"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se0_i2c_sleep: qupv3_se0_i2c_sleep { + mux { + pins = "gpio0", "gpio1"; + function = "gpio"; + }; + + config { + pins = "gpio0", "gpio1"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se0_spi_pins: qupv3_se0_spi_pins { + qupv3_se0_spi_active: qupv3_se0_spi_active { + mux { + pins = "gpio0", "gpio1", "gpio2", + "gpio3"; + function = "qup0"; + }; + + config { + pins = "gpio0", "gpio1", "gpio2", + "gpio3"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se0_spi_sleep: qupv3_se0_spi_sleep { + mux { + pins = "gpio0", "gpio1", "gpio2", + "gpio3"; + function = "gpio"; + }; + + config { + pins = "gpio0", "gpio1", "gpio2", + "gpio3"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* SE 1 pin mappings */ + qupv3_se1_i2c_pins: qupv3_se1_i2c_pins { + qupv3_se1_i2c_active: qupv3_se1_i2c_active { + mux { + pins = "gpio17", "gpio18"; + function = "qup1"; + }; + + config { + pins = "gpio17", "gpio18"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se1_i2c_sleep: qupv3_se1_i2c_sleep { + mux { + pins = "gpio17", "gpio18"; + function = "gpio"; + }; + + config { + pins = "gpio17", "gpio18"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se1_spi_pins: qupv3_se1_spi_pins { + qupv3_se1_spi_active: qupv3_se1_spi_active { + mux { + pins = "gpio17", "gpio18", "gpio19", + "gpio20"; + function = "qup1"; + }; + + config { + pins = "gpio17", "gpio18", "gpio19", + "gpio20"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se1_spi_sleep: qupv3_se1_spi_sleep { + mux { + pins = "gpio17", "gpio18", "gpio19", + "gpio20"; + function = "gpio"; + }; + + config { + pins = "gpio17", "gpio18", "gpio19", + "gpio20"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* SE 2 pin mappings */ + qupv3_se2_i2c_pins: qupv3_se2_i2c_pins { + qupv3_se2_i2c_active: qupv3_se2_i2c_active { + mux { + pins = "gpio27", "gpio28"; + function = "qup2"; + }; + + config { + pins = "gpio27", "gpio28"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se2_i2c_sleep: qupv3_se2_i2c_sleep { + mux { + pins = "gpio27", "gpio28"; + function = "gpio"; + }; + + config { + pins = "gpio27", "gpio28"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se2_spi_pins: qupv3_se2_spi_pins { + qupv3_se2_spi_active: qupv3_se2_spi_active { + mux { + pins = "gpio27", "gpio28", "gpio29", + "gpio30"; + function = "qup2"; + }; + + config { + pins = "gpio27", "gpio28", "gpio29", + "gpio30"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se2_spi_sleep: qupv3_se2_spi_sleep { + mux { + pins = "gpio27", "gpio28", "gpio29", + "gpio30"; + function = "gpio"; + }; + + config { + pins = "gpio27", "gpio28", "gpio29", + "gpio30"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* SE 3 pin mappings */ + qupv3_se3_i2c_pins: qupv3_se3_i2c_pins { + qupv3_se3_i2c_active: qupv3_se3_i2c_active { + mux { + pins = "gpio41", "gpio42"; + function = "qup3"; + }; + + config { + pins = "gpio41", "gpio42"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se3_i2c_sleep: qupv3_se3_i2c_sleep { + mux { + pins = "gpio41", "gpio42"; + function = "gpio"; + }; + + config { + pins = "gpio41", "gpio42"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se3_spi_pins: qupv3_se3_spi_pins { + qupv3_se3_spi_active: qupv3_se3_spi_active { + mux { + pins = "gpio41", "gpio42", "gpio43", + "gpio44"; + function = "qup3"; + }; + + config { + pins = "gpio41", "gpio42", "gpio43", + "gpio44"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se3_spi_sleep: qupv3_se3_spi_sleep { + mux { + pins = "gpio41", "gpio42", "gpio43", + "gpio44"; + function = "gpio"; + }; + + config { + pins = "gpio41", "gpio42", "gpio43", + "gpio44"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* SE 4 pin mappings */ + qupv3_se4_i2c_pins: qupv3_se4_i2c_pins { + qupv3_se4_i2c_active: qupv3_se4_i2c_active { + mux { + pins = "gpio89", "gpio90"; + function = "qup4"; + }; + + config { + pins = "gpio89", "gpio90"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se4_i2c_sleep: qupv3_se4_i2c_sleep { + mux { + pins = "gpio89", "gpio90"; + function = "gpio"; + }; + + config { + pins = "gpio89", "gpio90"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se4_spi_pins: qupv3_se4_spi_pins { + qupv3_se4_spi_active: qupv3_se4_spi_active { + mux { + pins = "gpio89", "gpio90", "gpio91", + "gpio92"; + function = "qup4"; + }; + + config { + pins = "gpio89", "gpio90", "gpio91", + "gpio92"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se4_spi_sleep: qupv3_se4_spi_sleep { + mux { + pins = "gpio89", "gpio90", "gpio91", + "gpio92"; + function = "gpio"; + }; + + config { + pins = "gpio89", "gpio90", "gpio91", + "gpio92"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* SE 5 pin mappings */ + qupv3_se5_i2c_pins: qupv3_se5_i2c_pins { + qupv3_se5_i2c_active: qupv3_se5_i2c_active { + mux { + pins = "gpio85", "gpio86"; + function = "qup5"; + }; + + config { + pins = "gpio85", "gpio86"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se5_i2c_sleep: qupv3_se5_i2c_sleep { + mux { + pins = "gpio85", "gpio86"; + function = "gpio"; + }; + + config { + pins = "gpio85", "gpio86"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se5_spi_pins: qupv3_se5_spi_pins { + qupv3_se5_spi_active: qupv3_se5_spi_active { + mux { + pins = "gpio85", "gpio86", "gpio87", + "gpio88"; + function = "qup5"; + }; + + config { + pins = "gpio85", "gpio86", "gpio87", + "gpio88"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se5_spi_sleep: qupv3_se5_spi_sleep { + mux { + pins = "gpio85", "gpio86", "gpio87", + "gpio88"; + function = "gpio"; + }; + + config { + pins = "gpio85", "gpio86", "gpio87", + "gpio88"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* SE 6 pin mappings */ + qupv3_se6_i2c_pins: qupv3_se6_i2c_pins { + qupv3_se6_i2c_active: qupv3_se6_i2c_active { + mux { + pins = "gpio45", "gpio46"; + function = "qup6"; + }; + + config { + pins = "gpio45", "gpio46"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se6_i2c_sleep: qupv3_se6_i2c_sleep { + mux { + pins = "gpio45", "gpio46"; + function = "gpio"; + }; + + config { + pins = "gpio45", "gpio46"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se6_4uart_pins: qupv3_se6_4uart_pins { + qupv3_se6_4uart_active: qupv3_se6_4uart_active { + mux { + pins = "gpio45", "gpio46", "gpio47", + "gpio48"; + function = "qup6"; + }; + + config { + pins = "gpio45", "gpio46", "gpio47", + "gpio48"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se6_4uart_sleep: qupv3_se6_4uart_sleep { + mux { + pins = "gpio45", "gpio46", "gpio47", + "gpio48"; + function = "gpio"; + }; + + config { + pins = "gpio45", "gpio46", "gpio47", + "gpio48"; + drive-strength = <2>; + bias-disable; + }; + }; + }; + + qupv3_se6_spi_pins: qupv3_se6_spi_pins { + qupv3_se6_spi_active: qupv3_se6_spi_active { + mux { + pins = "gpio45", "gpio46", "gpio47", + "gpio48"; + function = "qup6"; + }; + + config { + pins = "gpio45", "gpio46", "gpio47", + "gpio48"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se6_spi_sleep: qupv3_se6_spi_sleep { + mux { + pins = "gpio45", "gpio46", "gpio47", + "gpio48"; + function = "gpio"; + }; + + config { + pins = "gpio45", "gpio46", "gpio47", + "gpio48"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* SE 7 pin mappings */ + qupv3_se7_i2c_pins: qupv3_se7_i2c_pins { + qupv3_se7_i2c_active: qupv3_se7_i2c_active { + mux { + pins = "gpio93", "gpio94"; + function = "qup7"; + }; + + config { + pins = "gpio93", "gpio94"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se7_i2c_sleep: qupv3_se7_i2c_sleep { + mux { + pins = "gpio93", "gpio94"; + function = "gpio"; + }; + + config { + pins = "gpio93", "gpio94"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se7_4uart_pins: qupv3_se7_4uart_pins { + qupv3_se7_4uart_active: qupv3_se7_4uart_active { + mux { + pins = "gpio93", "gpio94", "gpio95", + "gpio96"; + function = "qup7"; + }; + + config { + pins = "gpio93", "gpio94", "gpio95", + "gpio96"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se7_4uart_sleep: qupv3_se7_4uart_sleep { + mux { + pins = "gpio93", "gpio94", "gpio95", + "gpio96"; + function = "gpio"; + }; + + config { + pins = "gpio93", "gpio94", "gpio95", + "gpio96"; + drive-strength = <2>; + bias-disable; + }; + }; + }; + + qupv3_se7_spi_pins: qupv3_se7_spi_pins { + qupv3_se7_spi_active: qupv3_se7_spi_active { + mux { + pins = "gpio93", "gpio94", "gpio95", + "gpio96"; + function = "qup7"; + }; + + config { + pins = "gpio93", "gpio94", "gpio95", + "gpio96"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se7_spi_sleep: qupv3_se7_spi_sleep { + mux { + pins = "gpio93", "gpio94", "gpio95", + "gpio96"; + function = "gpio"; + }; + + config { + pins = "gpio93", "gpio94", "gpio95", + "gpio96"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* QUPv3 North instances */ + /* SE 8 pin mappings */ + qupv3_se8_i2c_pins: qupv3_se8_i2c_pins { + qupv3_se8_i2c_active: qupv3_se8_i2c_active { + mux { + pins = "gpio65", "gpio66"; + function = "qup8"; + }; + + config { + pins = "gpio65", "gpio66"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se8_i2c_sleep: qupv3_se8_i2c_sleep { + mux { + pins = "gpio65", "gpio66"; + function = "gpio"; + }; + + config { + pins = "gpio65", "gpio66"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se8_spi_pins: qupv3_se8_spi_pins { + qupv3_se8_spi_active: qupv3_se8_spi_active { + mux { + pins = "gpio65", "gpio66", "gpio67", + "gpio68"; + function = "qup8"; + }; + + config { + pins = "gpio65", "gpio66", "gpio67", + "gpio68"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se8_spi_sleep: qupv3_se8_spi_sleep { + mux { + pins = "gpio65", "gpio66", "gpio67", + "gpio68"; + function = "gpio"; + }; + + config { + pins = "gpio65", "gpio66", "gpio67", + "gpio68"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* SE 9 pin mappings */ + qupv3_se9_i2c_pins: qupv3_se9_i2c_pins { + qupv3_se9_i2c_active: qupv3_se9_i2c_active { + mux { + pins = "gpio6", "gpio7"; + function = "qup9"; + }; + + config { + pins = "gpio6", "gpio7"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se9_i2c_sleep: qupv3_se9_i2c_sleep { + mux { + pins = "gpio6", "gpio7"; + function = "gpio"; + }; + + config { + pins = "gpio6", "gpio7"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se9_2uart_pins: qupv3_se9_2uart_pins { + qupv3_se9_2uart_active: qupv3_se9_2uart_active { + mux { + pins = "gpio4", "gpio5"; + function = "qup9"; + }; + + config { + pins = "gpio4", "gpio5"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se9_2uart_sleep: qupv3_se9_2uart_sleep { + mux { + pins = "gpio4", "gpio5"; + function = "gpio"; + }; + + config { + pins = "gpio4", "gpio5"; + drive-strength = <2>; + bias-disable; + }; + }; + }; + + qupv3_se9_spi_pins: qupv3_se9_spi_pins { + qupv3_se9_spi_active: qupv3_se9_spi_active { + mux { + pins = "gpio4", "gpio5", "gpio6", + "gpio7"; + function = "qup9"; + }; + + config { + pins = "gpio4", "gpio5", "gpio6", + "gpio7"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se9_spi_sleep: qupv3_se9_spi_sleep { + mux { + pins = "gpio4", "gpio5", "gpio6", + "gpio7"; + function = "gpio"; + }; + + config { + pins = "gpio4", "gpio5", "gpio6", + "gpio7"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* SE 10 pin mappings */ + qupv3_se10_i2c_pins: qupv3_se10_i2c_pins { + qupv3_se10_i2c_active: qupv3_se10_i2c_active { + mux { + pins = "gpio55", "gpio56"; + function = "qup10"; + }; + + config { + pins = "gpio55", "gpio56"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se10_i2c_sleep: qupv3_se10_i2c_sleep { + mux { + pins = "gpio55", "gpio56"; + function = "gpio"; + }; + + config { + pins = "gpio55", "gpio56"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se10_2uart_pins: qupv3_se10_2uart_pins { + qupv3_se10_2uart_active: qupv3_se10_2uart_active { + mux { + pins = "gpio53", "gpio54"; + function = "qup10"; + }; + + config { + pins = "gpio53", "gpio54"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se10_2uart_sleep: qupv3_se10_2uart_sleep { + mux { + pins = "gpio53", "gpio54"; + function = "gpio"; + }; + + config { + pins = "gpio53", "gpio54"; + drive-strength = <2>; + bias-disable; + }; + }; + }; + + qupv3_se10_spi_pins: qupv3_se10_spi_pins { + qupv3_se10_spi_active: qupv3_se10_spi_active { + mux { + pins = "gpio53", "gpio54", "gpio55", + "gpio56"; + function = "qup10"; + }; + + config { + pins = "gpio53", "gpio54", "gpio55", + "gpio56"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se10_spi_sleep: qupv3_se10_spi_sleep { + mux { + pins = "gpio53", "gpio54", "gpio55", + "gpio56"; + function = "gpio"; + }; + + config { + pins = "gpio53", "gpio54", "gpio55", + "gpio56"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* SE 11 pin mappings */ + qupv3_se11_i2c_pins: qupv3_se11_i2c_pins { + qupv3_se11_i2c_active: qupv3_se11_i2c_active { + mux { + pins = "gpio31", "gpio32"; + function = "qup11"; + }; + + config { + pins = "gpio31", "gpio32"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se11_i2c_sleep: qupv3_se11_i2c_sleep { + mux { + pins = "gpio31", "gpio32"; + function = "gpio"; + }; + + config { + pins = "gpio31", "gpio32"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se11_spi_pins: qupv3_se11_spi_pins { + qupv3_se11_spi_active: qupv3_se11_spi_active { + mux { + pins = "gpio31", "gpio32", "gpio33", + "gpio34"; + function = "qup11"; + }; + + config { + pins = "gpio31", "gpio32", "gpio33", + "gpio34"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se11_spi_sleep: qupv3_se11_spi_sleep { + mux { + pins = "gpio31", "gpio32", "gpio33", + "gpio34"; + function = "gpio"; + }; + + config { + pins = "gpio31", "gpio32", "gpio33", + "gpio34"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* SE 12 pin mappings */ + qupv3_se12_i2c_pins: qupv3_se12_i2c_pins { + qupv3_se12_i2c_active: qupv3_se12_i2c_active { + mux { + pins = "gpio49", "gpio50"; + function = "qup12"; + }; + + config { + pins = "gpio49", "gpio50"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se12_i2c_sleep: qupv3_se12_i2c_sleep { + mux { + pins = "gpio49", "gpio50"; + function = "gpio"; + }; + + config { + pins = "gpio49", "gpio50"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se12_spi_pins: qupv3_se12_spi_pins { + qupv3_se12_spi_active: qupv3_se12_spi_active { + mux { + pins = "gpio49", "gpio50", "gpio51", + "gpio52"; + function = "qup12"; + }; + + config { + pins = "gpio49", "gpio50", "gpio51", + "gpio52"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se12_spi_sleep: qupv3_se12_spi_sleep { + mux { + pins = "gpio49", "gpio50", "gpio51", + "gpio52"; + function = "gpio"; + }; + + config { + pins = "gpio49", "gpio50", "gpio51", + "gpio52"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* SE 13 pin mappings */ + qupv3_se13_i2c_pins: qupv3_se13_i2c_pins { + qupv3_se13_i2c_active: qupv3_se13_i2c_active { + mux { + pins = "gpio105", "gpio106"; + function = "qup13"; + }; + + config { + pins = "gpio105", "gpio106"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se13_i2c_sleep: qupv3_se13_i2c_sleep { + mux { + pins = "gpio105", "gpio106"; + function = "gpio"; + }; + + config { + pins = "gpio105", "gpio106"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se13_spi_pins: qupv3_se13_spi_pins { + qupv3_se13_spi_active: qupv3_se13_spi_active { + mux { + pins = "gpio105", "gpio106", "gpio107", + "gpio108"; + function = "qup13"; + }; + + config { + pins = "gpio105", "gpio106", "gpio107", + "gpio108"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se13_spi_sleep: qupv3_se13_spi_sleep { + mux { + pins = "gpio105", "gpio106", "gpio107", + "gpio108"; + function = "gpio"; + }; + + config { + pins = "gpio105", "gpio106", "gpio107", + "gpio108"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* SE 14 pin mappings */ + qupv3_se14_i2c_pins: qupv3_se14_i2c_pins { + qupv3_se14_i2c_active: qupv3_se14_i2c_active { + mux { + pins = "gpio33", "gpio34"; + function = "qup14"; + }; + + config { + pins = "gpio33", "gpio34"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se14_i2c_sleep: qupv3_se14_i2c_sleep { + mux { + pins = "gpio33", "gpio34"; + function = "gpio"; + }; + + config { + pins = "gpio33", "gpio34"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se14_spi_pins: qupv3_se14_spi_pins { + qupv3_se14_spi_active: qupv3_se14_spi_active { + mux { + pins = "gpio31", "gpio32", "gpio33", + "gpio34"; + function = "qup14"; + }; + + config { + pins = "gpio31", "gpio32", "gpio33", + "gpio34"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se14_spi_sleep: qupv3_se14_spi_sleep { + mux { + pins = "gpio31", "gpio32", "gpio33", + "gpio34"; + function = "gpio"; + }; + + config { + pins = "gpio31", "gpio32", "gpio33", + "gpio34"; + drive-strength = <6>; + bias-disable; + }; + }; + }; + + /* SE 15 pin mappings */ + qupv3_se15_i2c_pins: qupv3_se15_i2c_pins { + qupv3_se15_i2c_active: qupv3_se15_i2c_active { + mux { + pins = "gpio81", "gpio82"; + function = "qup15"; + }; + + config { + pins = "gpio81", "gpio82"; + drive-strength = <2>; + bias-disable; + }; + }; + + qupv3_se15_i2c_sleep: qupv3_se15_i2c_sleep { + mux { + pins = "gpio81", "gpio82"; + function = "gpio"; + }; + + config { + pins = "gpio81", "gpio82"; + drive-strength = <2>; + bias-pull-up; + }; + }; + }; + + qupv3_se15_spi_pins: qupv3_se15_spi_pins { + qupv3_se15_spi_active: qupv3_se15_spi_active { + mux { + pins = "gpio81", "gpio82", "gpio83", + "gpio84"; + function = "qup15"; + }; + + config { + pins = "gpio81", "gpio82", "gpio83", + "gpio84"; + drive-strength = <6>; + bias-disable; + }; + }; + + qupv3_se15_spi_sleep: qupv3_se15_spi_sleep { + mux { + pins = "gpio81", "gpio82", "gpio83", + "gpio84"; + function = "gpio"; + }; + + config { + pins = "gpio81", "gpio82", "gpio83", + "gpio84"; + drive-strength = <6>; + bias-disable; + }; + }; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi b/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi new file mode 100644 index 000000000000..0fb455f57ae6 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdm670-qupv3.dtsi @@ -0,0 +1,699 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +&soc { + /* QUPv3 South instances */ + qupv3_0: qcom,qupv3_0_geni_se@8c0000 { + compatible = "qcom,qupv3-geni-se"; + reg = <0x8c0000 0x6000>; + qcom,bus-mas-id = ; + qcom,bus-slv-id = ; + qcom,iommu-s1-bypass; + + iommu_qupv3_0_geni_se_cb: qcom,iommu_qupv3_0_geni_se_cb { + compatible = "qcom,qupv3-geni-se-cb"; + iommus = <&apps_smmu 0x003 0x0>; + }; + }; + + /* + * HS UART instances. HS UART usecases can be supported on these + * instances only. + */ + qupv3_se6_4uart: qcom,qup_uart@0x898000 { + compatible = "qcom,msm-geni-serial-hs", "qcom,msm-geni-uart"; + reg = <0x898000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S6_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se6_4uart_active>; + pinctrl-1 = <&qupv3_se6_4uart_sleep>; + interrupts-extended = <&intc GIC_SPI 607 0>, + <&tlmm 48 0>; + status = "disabled"; + qcom,wakeup-byte = <0xFD>; + qcom,wrapper-core = <&qupv3_0>; + }; + + qupv3_se7_4uart: qcom,qup_uart@0x89c000 { + compatible = "qcom,msm-geni-serial-hs", "qcom,msm-geni-uart"; + reg = <0x89c000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S7_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se7_4uart_active>; + pinctrl-1 = <&qupv3_se7_4uart_sleep>; + interrupts-extended = <&intc GIC_SPI 608 0>, + <&tlmm 96 0>; + status = "disabled"; + qcom,wakeup-byte = <0xFD>; + qcom,wrapper-core = <&qupv3_0>; + }; + + /* I2C */ + qupv3_se0_i2c: i2c@880000 { + compatible = "qcom,i2c-geni"; + reg = <0x880000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se0_i2c_active>; + pinctrl-1 = <&qupv3_se0_i2c_sleep>; + qcom,wrapper-core = <&qupv3_0>; + status = "disabled"; + }; + + qupv3_se1_i2c: i2c@884000 { + compatible = "qcom,i2c-geni"; + reg = <0x884000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S1_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se1_i2c_active>; + pinctrl-1 = <&qupv3_se1_i2c_sleep>; + qcom,wrapper-core = <&qupv3_0>; + status = "disabled"; + }; + + qupv3_se2_i2c: i2c@888000 { + compatible = "qcom,i2c-geni"; + reg = <0x888000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S2_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se2_i2c_active>; + pinctrl-1 = <&qupv3_se2_i2c_sleep>; + qcom,wrapper-core = <&qupv3_0>; + status = "disabled"; + }; + + qupv3_se3_i2c: i2c@88c000 { + compatible = "qcom,i2c-geni"; + reg = <0x88c000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S3_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se3_i2c_active>; + pinctrl-1 = <&qupv3_se3_i2c_sleep>; + qcom,wrapper-core = <&qupv3_0>; + status = "disabled"; + }; + + qupv3_se4_i2c: i2c@890000 { + compatible = "qcom,i2c-geni"; + reg = <0x890000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S4_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se4_i2c_active>; + pinctrl-1 = <&qupv3_se4_i2c_sleep>; + qcom,wrapper-core = <&qupv3_0>; + status = "disabled"; + }; + + qupv3_se5_i2c: i2c@894000 { + compatible = "qcom,i2c-geni"; + reg = <0x894000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S5_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se5_i2c_active>; + pinctrl-1 = <&qupv3_se5_i2c_sleep>; + qcom,wrapper-core = <&qupv3_0>; + status = "disabled"; + }; + + qupv3_se6_i2c: i2c@898000 { + compatible = "qcom,i2c-geni"; + reg = <0x898000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S6_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se6_i2c_active>; + pinctrl-1 = <&qupv3_se6_i2c_sleep>; + qcom,wrapper-core = <&qupv3_0>; + status = "disabled"; + }; + + qupv3_se7_i2c: i2c@89c000 { + compatible = "qcom,i2c-geni"; + reg = <0x89c000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S7_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se7_i2c_active>; + pinctrl-1 = <&qupv3_se7_i2c_sleep>; + qcom,wrapper-core = <&qupv3_0>; + status = "disabled"; + }; + + /* SPI */ + qupv3_se0_spi: spi@880000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x880000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se0_spi_active>; + pinctrl-1 = <&qupv3_se0_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_0>; + status = "disabled"; + }; + + qupv3_se1_spi: spi@884000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x884000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S1_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se1_spi_active>; + pinctrl-1 = <&qupv3_se1_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_0>; + status = "disabled"; + }; + + qupv3_se2_spi: spi@888000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x888000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S2_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se2_spi_active>; + pinctrl-1 = <&qupv3_se2_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_0>; + status = "disabled"; + }; + + qupv3_se3_spi: spi@88c000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x88c000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S3_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se3_spi_active>; + pinctrl-1 = <&qupv3_se3_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_0>; + status = "disabled"; + }; + + qupv3_se4_spi: spi@890000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x890000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S4_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se4_spi_active>; + pinctrl-1 = <&qupv3_se4_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_0>; + status = "disabled"; + }; + + qupv3_se5_spi: spi@894000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x894000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S5_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se5_spi_active>; + pinctrl-1 = <&qupv3_se5_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_0>; + status = "disabled"; + }; + + qupv3_se6_spi: spi@898000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x898000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S6_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se6_spi_active>; + pinctrl-1 = <&qupv3_se6_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_0>; + status = "disabled"; + }; + + qupv3_se7_spi: spi@89c000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x89c000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP0_S7_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se7_spi_active>; + pinctrl-1 = <&qupv3_se7_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_0>; + status = "disabled"; + }; + + /* QUPv3 North Instances */ + qupv3_1: qcom,qupv3_1_geni_se@ac0000 { + compatible = "qcom,qupv3-geni-se"; + reg = <0xac0000 0x6000>; + qcom,bus-mas-id = ; + qcom,bus-slv-id = ; + qcom,iommu-s1-bypass; + + iommu_qupv3_1_geni_se_cb: qcom,iommu_qupv3_1_geni_se_cb { + compatible = "qcom,qupv3-geni-se-cb"; + iommus = <&apps_smmu 0x6c3 0x0>; + }; + }; + + /* 2-wire UART */ + + /* Debug UART Instance for CDP/MTP platform */ + qupv3_se9_2uart: qcom,qup_uart@0xa84000 { + compatible = "qcom,msm-geni-console", "qcom,msm-geni-uart"; + reg = <0xa84000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S1_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se9_2uart_active>; + pinctrl-1 = <&qupv3_se9_2uart_sleep>; + interrupts = ; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + /* Debug UART Instance for RUMI platform */ + qupv3_se10_2uart: qcom,qup_uart@0xa88000 { + compatible = "qcom,msm-geni-console", "qcom,msm-geni-uart"; + reg = <0xa88000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S2_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se10_2uart_active>; + pinctrl-1 = <&qupv3_se10_2uart_sleep>; + interrupts = ; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + /* I2C */ + qupv3_se8_i2c: i2c@a80000 { + compatible = "qcom,i2c-geni"; + reg = <0xa80000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S0_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se8_i2c_active>; + pinctrl-1 = <&qupv3_se8_i2c_sleep>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + qupv3_se9_i2c: i2c@a84000 { + compatible = "qcom,i2c-geni"; + reg = <0xa84000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S1_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se9_i2c_active>; + pinctrl-1 = <&qupv3_se9_i2c_sleep>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + qupv3_se10_i2c: i2c@a88000 { + compatible = "qcom,i2c-geni"; + reg = <0xa88000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S2_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se10_i2c_active>; + pinctrl-1 = <&qupv3_se10_i2c_sleep>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + qupv3_se11_i2c: i2c@a8c000 { + compatible = "qcom,i2c-geni"; + reg = <0xa8c000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S3_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se11_i2c_active>; + pinctrl-1 = <&qupv3_se11_i2c_sleep>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + qupv3_se12_i2c: i2c@a90000 { + compatible = "qcom,i2c-geni"; + reg = <0xa90000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S4_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se12_i2c_active>; + pinctrl-1 = <&qupv3_se12_i2c_sleep>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + qupv3_se13_i2c: i2c@a94000 { + compatible = "qcom,i2c-geni"; + reg = <0xa94000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S5_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se13_i2c_active>; + pinctrl-1 = <&qupv3_se13_i2c_sleep>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + qupv3_se14_i2c: i2c@a98000 { + compatible = "qcom,i2c-geni"; + reg = <0xa98000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S6_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se14_i2c_active>; + pinctrl-1 = <&qupv3_se14_i2c_sleep>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + qupv3_se15_i2c: i2c@a9c000 { + compatible = "qcom,i2c-geni"; + reg = <0xa9c000 0x4000>; + interrupts = ; + #address-cells = <1>; + #size-cells = <0>; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S7_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se15_i2c_active>; + pinctrl-1 = <&qupv3_se15_i2c_sleep>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + /* SPI */ + qupv3_se8_spi: spi@a80000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0xa80000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S0_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se8_spi_active>; + pinctrl-1 = <&qupv3_se8_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + qupv3_se9_spi: spi@a84000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0xa84000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S1_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se9_spi_active>; + pinctrl-1 = <&qupv3_se9_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + qupv3_se10_spi: spi@a88000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0xa88000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S2_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se10_spi_active>; + pinctrl-1 = <&qupv3_se10_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + qupv3_se11_spi: spi@a8c000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0xa8c000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S3_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se11_spi_active>; + pinctrl-1 = <&qupv3_se11_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + qupv3_se12_spi: spi@a90000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0xa90000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S4_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se12_spi_active>; + pinctrl-1 = <&qupv3_se12_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + qupv3_se13_spi: spi@a94000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0xa94000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S5_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se13_spi_active>; + pinctrl-1 = <&qupv3_se13_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + qupv3_se14_spi: spi@a98000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0xa98000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S6_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se14_spi_active>; + pinctrl-1 = <&qupv3_se14_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; + + qupv3_se15_spi: spi@a9c000 { + compatible = "qcom,spi-geni"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0xa9c000 0x4000>; + reg-names = "se_phys"; + clock-names = "se-clk", "m-ahb", "s-ahb"; + clocks = <&clock_gcc GCC_QUPV3_WRAP1_S7_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>, + <&clock_gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>; + pinctrl-names = "default", "sleep"; + pinctrl-0 = <&qupv3_se15_spi_active>; + pinctrl-1 = <&qupv3_se15_spi_sleep>; + interrupts = ; + spi-max-frequency = <50000000>; + qcom,wrapper-core = <&qupv3_1>; + status = "disabled"; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi index 6ea92ee03c96..b8812526a242 100644 --- a/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi @@ -9,3 +9,39 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ + +/{ + aliases { + serial0 = &qupv3_se10_2uart; + serial1 = &qupv3_se9_2uart; + spi0 = &qupv3_se8_spi; + i2c0 = &qupv3_se10_i2c; + i2c1 = &qupv3_se3_i2c; + hsuart0 = &qupv3_se6_4uart; + }; + +}; + +&qupv3_se9_2uart { + status = "disabled"; +}; + +&qupv3_se8_spi { + status = "disabled"; +}; + +&qupv3_se10_2uart { + status = "ok"; +}; + +&qupv3_se3_i2c { + status = "disabled"; +}; + +&qupv3_se10_i2c { + status = "disabled"; +}; + +&qupv3_se6_4uart { + status = "disabled"; +}; diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi index bb5217ef0838..90d454726fba 100644 --- a/arch/arm64/boot/dts/qcom/sdm670.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi @@ -394,6 +394,8 @@ #include "sdm670-ion.dtsi" +#include "sdm670-qupv3.dtsi" + &soc { #address-cells = <1>; #size-cells = <1>; -- GitLab From b989d5645c0811c14b15a0f5d098bd6927e1dda6 Mon Sep 17 00:00:00 2001 From: Manoj Prabhu B Date: Wed, 28 Jun 2017 11:36:20 +0530 Subject: [PATCH 361/786] diag: Add mutex protection while closing diag channels Add mutex protection while opening and closing diag channels to fix the race conditions. Change-Id: I26bc5e4061bd86f112993ac16789a95e6d6e01c8 Signed-off-by: Sreelakshmi Gownipalli Signed-off-by: Manoj Prabhu B --- drivers/char/diag/diagfwd_peripheral.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c index 5a8ef044ff59..119f5acd3e53 100644 --- a/drivers/char/diag/diagfwd_peripheral.c +++ b/drivers/char/diag/diagfwd_peripheral.c @@ -848,7 +848,7 @@ int diagfwd_channel_open(struct diagfwd_info *fwd_info) __func__, fwd_info->peripheral, fwd_info->type); return 0; } - + mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]); fwd_info->ch_open = 1; diagfwd_buffers_init(fwd_info); diagfwd_write_buffers_init(fwd_info); @@ -866,7 +866,7 @@ int diagfwd_channel_open(struct diagfwd_info *fwd_info) if (fwd_info->p_ops && fwd_info->p_ops->open) fwd_info->p_ops->open(fwd_info->ctxt); } - + mutex_unlock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]); return 0; } @@ -877,6 +877,7 @@ int diagfwd_channel_close(struct diagfwd_info *fwd_info) if (!fwd_info) return -EIO; + mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]); fwd_info->ch_open = 0; if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->close) fwd_info->c_ops->close(fwd_info); @@ -892,7 +893,7 @@ int diagfwd_channel_close(struct diagfwd_info *fwd_info) } DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d considered closed\n", fwd_info->peripheral, fwd_info->type); - + mutex_unlock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]); return 0; } -- GitLab From 9d6334b3313604eaed378d553d430e0d02be4f2e Mon Sep 17 00:00:00 2001 From: Lynus Vaz Date: Tue, 27 Jun 2017 14:57:56 +0530 Subject: [PATCH 362/786] msm: kgsl: Correct the fence reference counting The sync_file_create() function takes an additional refcount to the fence. Update the refcounting to avoid a memory leak. Change-Id: I684377e5a27a2c318fc6f26d0e6ba4a21da1cf9f Signed-off-by: Lynus Vaz --- drivers/gpu/msm/kgsl_sync.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c index 96873c43adfe..817a6b10ec9c 100644 --- a/drivers/gpu/msm/kgsl_sync.c +++ b/drivers/gpu/msm/kgsl_sync.c @@ -52,6 +52,10 @@ static struct kgsl_sync_fence *kgsl_sync_fence_create( fence_init(&kfence->fence, &kgsl_sync_fence_ops, &ktimeline->lock, ktimeline->fence_context, timestamp); + /* + * sync_file_create() takes a refcount to the fence. This refcount is + * put when the fence is signaled. + */ kfence->sync_file = sync_file_create(&kfence->fence); if (kfence->sync_file == NULL) { @@ -61,9 +65,6 @@ static struct kgsl_sync_fence *kgsl_sync_fence_create( return NULL; } - /* Get a refcount to the fence. Put when signaled */ - fence_get(&kfence->fence); - spin_lock_irqsave(&ktimeline->lock, flags); list_add_tail(&kfence->child_list, &ktimeline->child_list_head); spin_unlock_irqrestore(&ktimeline->lock, flags); @@ -707,6 +708,14 @@ long kgsl_ioctl_syncsource_create_fence(struct kgsl_device_private *dev_priv, list_add_tail(&sfence->child_list, &syncsource->child_list_head); spin_unlock(&syncsource->lock); out: + /* + * We're transferring ownership of the fence to the sync file. + * The sync file takes an extra refcount when it is created, so put + * our refcount. + */ + if (sync_file) + fence_put(&sfence->fence); + if (ret) { if (sync_file) fput(sync_file->file); -- GitLab From 7b86834562b1ef2b3b4669809380b034938b6ffd Mon Sep 17 00:00:00 2001 From: Udaya Bhaskara Reddy Mallavarapu Date: Mon, 12 Jun 2017 15:07:28 +0530 Subject: [PATCH 363/786] Migrate mpq demux driver from kernel 4.4 to 4.9 This change migrates all the relevant files consisting of the mpq demux driver and its plugins, including the addition of TSPPv1 HW driver. The snapshot is taken as of msm-4.4, 'commit a74dd0fdc772 ("mmc: core: retune after un-gating the clocks")' In addition, introduce a few code changes to reduce checkpatch warnings, typos and other style issues. Change-Id: Ic42821da2d1d27803df2a2c8ed785f0558ffaebf Signed-off-by: Udaya Bhaskara Reddy Mallavarapu --- .../bindings/platform/msm/msm_tspp.txt | 82 + drivers/media/platform/msm/Kconfig | 2 + drivers/media/platform/msm/Makefile | 2 + drivers/media/platform/msm/broadcast/Kconfig | 14 + drivers/media/platform/msm/broadcast/Makefile | 4 + drivers/media/platform/msm/broadcast/tspp.c | 3252 ++++++++ drivers/media/platform/msm/dvb/Kconfig | 10 + drivers/media/platform/msm/dvb/Makefile | 2 + .../media/platform/msm/dvb/adapter/Makefile | 7 + .../platform/msm/dvb/adapter/mpq_adapter.c | 208 + .../msm/dvb/adapter/mpq_stream_buffer.c | 827 ++ drivers/media/platform/msm/dvb/demux/Kconfig | 47 + drivers/media/platform/msm/dvb/demux/Makefile | 14 + .../msm/dvb/demux/mpq_dmx_plugin_common.c | 6712 +++++++++++++++++ .../msm/dvb/demux/mpq_dmx_plugin_common.h | 1116 +++ .../msm/dvb/demux/mpq_dmx_plugin_sw.c | 280 + .../msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c | 1984 +++++ .../media/platform/msm/dvb/demux/mpq_sdmx.c | 1023 +++ .../media/platform/msm/dvb/demux/mpq_sdmx.h | 368 + .../platform/msm/dvb/include/mpq_adapter.h | 222 + .../platform/msm/dvb/include/mpq_dvb_debug.h | 41 + .../msm/dvb/include/mpq_stream_buffer.h | 494 ++ include/linux/qcom_tspp.h | 108 + 23 files changed, 16819 insertions(+) create mode 100644 Documentation/devicetree/bindings/platform/msm/msm_tspp.txt create mode 100644 drivers/media/platform/msm/broadcast/Kconfig create mode 100644 drivers/media/platform/msm/broadcast/Makefile create mode 100644 drivers/media/platform/msm/broadcast/tspp.c create mode 100644 drivers/media/platform/msm/dvb/Kconfig create mode 100644 drivers/media/platform/msm/dvb/Makefile create mode 100644 drivers/media/platform/msm/dvb/adapter/Makefile create mode 100644 drivers/media/platform/msm/dvb/adapter/mpq_adapter.c create mode 100644 drivers/media/platform/msm/dvb/adapter/mpq_stream_buffer.c create mode 100644 drivers/media/platform/msm/dvb/demux/Kconfig create mode 100644 drivers/media/platform/msm/dvb/demux/Makefile create mode 100644 drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c create mode 100644 drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h create mode 100644 drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c create mode 100644 drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c create mode 100644 drivers/media/platform/msm/dvb/demux/mpq_sdmx.c create mode 100644 drivers/media/platform/msm/dvb/demux/mpq_sdmx.h create mode 100644 drivers/media/platform/msm/dvb/include/mpq_adapter.h create mode 100644 drivers/media/platform/msm/dvb/include/mpq_dvb_debug.h create mode 100644 drivers/media/platform/msm/dvb/include/mpq_stream_buffer.h create mode 100644 include/linux/qcom_tspp.h diff --git a/Documentation/devicetree/bindings/platform/msm/msm_tspp.txt b/Documentation/devicetree/bindings/platform/msm/msm_tspp.txt new file mode 100644 index 000000000000..139830959d33 --- /dev/null +++ b/Documentation/devicetree/bindings/platform/msm/msm_tspp.txt @@ -0,0 +1,82 @@ +* TSPP ( QTI Transport Stream Packet Processor ) + +Hardware driver for QTI TSIF 12seg wrapper core, which consists of a TSPP, a +BAM (Bus access manager, used for DMA) and two TSIF inputs. + +The TSPP driver is responsible for: + - TSPP/TSIF hardware configuration (using SPS driver to configure BAM hardware) + - TSIF GPIO/Clocks configuration + - Memory resource management + - Handling TSIF/TSPP interrupts and BAM events + - TSPP Power management + +Required properties: +- compatible : Should be "qcom,msm_tspp" +- reg : Specifies the base physical addresses and sizes of TSIF, TSPP & BAM registers. +- reg-names : Specifies the register names of TSIF, TSPP & BAM base registers. +- interrupts : Specifies the interrupts associated with TSIF 12 seg core. +- interrupt-names: Specifies interrupt names for TSIF, TSPP & BAM interrupts. +- clock-names: Specifies the clock names used for interface & reference clocks. +- clocks: GCC_TSIF_AHB_CLK clock for interface clock & GCC_TSIF_REF_CLK clock for reference clock. +- qcom, msm_bus,name: Should be "tsif" +- qcom, msm_bus,num_cases: Depends on the use cases for bus scaling +- qcom, msm_bus,num_paths: The paths for source and destination ports +- qcom, msm_bus,vectors: Vectors for bus topology. +- pinctrl-names: Names for the TSIF mode configuration to specify which TSIF interface is active. + +Optional properties: + - qcom,lpass-timer-tts : Indicates to add time stamps to TS packets from LPASS timer. + bydefault time stamps will be added from TFIS internal counter. + +Example: + + tspp: msm_tspp@0x8880000 { + compatible = "qcom,msm_tspp"; + reg = <0x088a7000 0x200>, /* MSM_TSIF0_PHYS */ + <0x088a8000 0x200>, /* MSM_TSIF1_PHYS */ + <0x088a9000 0x1000>, /* MSM_TSPP_PHYS */ + <0x08884000 0x23000>; /* MSM_TSPP_BAM_PHYS */ + reg-names = "MSM_TSIF0_PHYS", + "MSM_TSIF1_PHYS", + "MSM_TSPP_PHYS", + "MSM_TSPP_BAM_PHYS"; + interrupts = <0 121 0>, /* TSIF_TSPP_IRQ */ + <0 119 0>, /* TSIF0_IRQ */ + <0 120 0>, /* TSIF1_IRQ */ + <0 122 0>; /* TSIF_BAM_IRQ */ + interrupt-names = "TSIF_TSPP_IRQ", + "TSIF0_IRQ", + "TSIF1_IRQ", + "TSIF_BAM_IRQ"; + + clock-names = "iface_clk", "ref_clk"; + clocks = <&clock_gcc GCC_TSIF_AHB_CLK>, + <&clock_gcc GCC_TSIF_REF_CLK>; + + qcom,msm-bus,name = "tsif"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <82 512 0 0>, /* No vote */ + <82 512 12288 24576>; + /* Max. bandwidth, 2xTSIF, each max of 96Mbps */ + + pinctrl-names = "disabled", + "tsif0-mode1", "tsif0-mode2", + "tsif1-mode1", "tsif1-mode2", + "dual-tsif-mode1", "dual-tsif-mode2"; + + pinctrl-0 = <>; /* disabled */ + pinctrl-1 = <&tsif0_signals_active>; /* tsif0-mode1 */ + pinctrl-2 = <&tsif0_signals_active + &tsif0_sync_active>; /* tsif0-mode2 */ + pinctrl-3 = <&tsif1_signals_active>; /* tsif1-mode1 */ + pinctrl-4 = <&tsif1_signals_active + &tsif1_sync_active>; /* tsif1-mode2 */ + pinctrl-5 = <&tsif0_signals_active + &tsif1_signals_active>; /* dual-tsif-mode1 */ + pinctrl-6 = <&tsif0_signals_active + &tsif0_sync_active + &tsif1_signals_active + &tsif1_sync_active>; /* dual-tsif-mode2 */ + }; diff --git a/drivers/media/platform/msm/Kconfig b/drivers/media/platform/msm/Kconfig index d5d873c62859..484819db17c9 100644 --- a/drivers/media/platform/msm/Kconfig +++ b/drivers/media/platform/msm/Kconfig @@ -16,3 +16,5 @@ menuconfig SPECTRA_CAMERA source "drivers/media/platform/msm/vidc/Kconfig" source "drivers/media/platform/msm/sde/Kconfig" +source "drivers/media/platform/msm/dvb/Kconfig" +source "drivers/media/platform/msm/broadcast/Kconfig" diff --git a/drivers/media/platform/msm/Makefile b/drivers/media/platform/msm/Makefile index adeb2aac9cba..e64bcd1b709a 100644 --- a/drivers/media/platform/msm/Makefile +++ b/drivers/media/platform/msm/Makefile @@ -5,3 +5,5 @@ obj-$(CONFIG_MSM_VIDC_V4L2) += vidc/ obj-y += sde/ obj-$(CONFIG_SPECTRA_CAMERA) += camera/ +obj-y += broadcast/ +obj-$(CONFIG_DVB_MPQ) += dvb/ diff --git a/drivers/media/platform/msm/broadcast/Kconfig b/drivers/media/platform/msm/broadcast/Kconfig new file mode 100644 index 000000000000..cdd1b2091179 --- /dev/null +++ b/drivers/media/platform/msm/broadcast/Kconfig @@ -0,0 +1,14 @@ +# +# MSM Broadcast subsystem drivers +# + +config TSPP + depends on ARCH_QCOM + tristate "TSPP (Transport Stream Packet Processor) Support" + ---help--- + Transport Stream Packet Processor v1 is used to offload the + processing of MPEG transport streams from the main processor. + It is used to process incoming transport streams from TSIF + to supports use-cases such as transport stream live play + and recording. + This can also be compiled as a loadable module. diff --git a/drivers/media/platform/msm/broadcast/Makefile b/drivers/media/platform/msm/broadcast/Makefile new file mode 100644 index 000000000000..3735bdc212ad --- /dev/null +++ b/drivers/media/platform/msm/broadcast/Makefile @@ -0,0 +1,4 @@ +# +# Makefile for MSM Broadcast subsystem drivers. +# +obj-$(CONFIG_TSPP) += tspp.o diff --git a/drivers/media/platform/msm/broadcast/tspp.c b/drivers/media/platform/msm/broadcast/tspp.c new file mode 100644 index 000000000000..43b426de7b2b --- /dev/null +++ b/drivers/media/platform/msm/broadcast/tspp.c @@ -0,0 +1,3252 @@ +/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include /* Just for modules */ +#include /* Only for KERN_INFO */ +#include /* Error macros */ +#include /* Linked list */ +#include +#include /* Needed for the macros */ +#include /* IO macros */ +#include /* Device drivers need this */ +#include /* Externally defined globals */ +#include /* Runtime power management */ +#include +#include /* copy_to_user */ +#include /* kfree, kzalloc */ +#include /* XXX_ mem_region */ +#include /* dma_XXX */ +#include /* DMA pools */ +#include /* msleep */ +#include +#include +#include /* poll() file op */ +#include /* wait() macros, sleeping */ +#include /* BIT() macro */ +#include +#include +#include /* BAM stuff */ +#include /* Locking functions */ +#include /* Timer services */ +#include /* Jiffies counter */ +#include +#include +#include +#include +#include +#include +#include /* tasklet */ +#include /* Timer */ +#include /* Timer */ + +/* + * General defines + */ +#define TSPP_TSIF_INSTANCES 2 +#define TSPP_GPIOS_PER_TSIF 4 +#define TSPP_FILTER_TABLES 3 +#define TSPP_MAX_DEVICES 1 +#define TSPP_NUM_CHANNELS 16 +#define TSPP_NUM_PRIORITIES 16 +#define TSPP_NUM_KEYS 8 +#define INVALID_CHANNEL 0xFFFFFFFF +#define TSPP_BAM_DEFAULT_IPC_LOGLVL 2 +/* + * BAM descriptor FIFO size (in number of descriptors). + * Max number of descriptors allowed by SPS which is 8K-1. + */ +#define TSPP_SPS_DESCRIPTOR_COUNT (8 * 1024 - 1) +#define TSPP_PACKET_LENGTH 188 +#define TSPP_MIN_BUFFER_SIZE (TSPP_PACKET_LENGTH) + +/* Max descriptor buffer size allowed by SPS */ +#define TSPP_MAX_BUFFER_SIZE (32 * 1024 - 1) + +/* + * Returns whether to use DMA pool for TSPP output buffers. + * For buffers smaller than page size, using DMA pool + * provides better memory utilization as dma_alloc_coherent + * allocates minimum of page size. + */ +#define TSPP_USE_DMA_POOL(buff_size) ((buff_size) < PAGE_SIZE) + +/* + * Max allowed TSPP buffers/descriptors. + * If SPS desc FIFO holds X descriptors, we can queue up to X-1 descriptors. + */ +#define TSPP_NUM_BUFFERS (TSPP_SPS_DESCRIPTOR_COUNT - 1) +#define TSPP_TSIF_DEFAULT_TIME_LIMIT 60 +#define SPS_DESCRIPTOR_SIZE 8 +#define MIN_ACCEPTABLE_BUFFER_COUNT 2 +#define TSPP_DEBUG(msg...) + +/* + * TSIF register offsets + */ +#define TSIF_STS_CTL_OFF (0x0) +#define TSIF_TIME_LIMIT_OFF (0x4) +#define TSIF_CLK_REF_OFF (0x8) +#define TSIF_LPBK_FLAGS_OFF (0xc) +#define TSIF_LPBK_DATA_OFF (0x10) +#define TSIF_TEST_CTL_OFF (0x14) +#define TSIF_TEST_MODE_OFF (0x18) +#define TSIF_TEST_RESET_OFF (0x1c) +#define TSIF_TEST_EXPORT_OFF (0x20) +#define TSIF_TEST_CURRENT_OFF (0x24) +#define TSIF_TTS_CTL_OFF (0x38) + +#define TSIF_DATA_PORT_OFF (0x100) + +/* bits for TSIF_STS_CTL register */ +#define TSIF_STS_CTL_EN_IRQ BIT(28) +#define TSIF_STS_CTL_PACK_AVAIL BIT(27) +#define TSIF_STS_CTL_1ST_PACKET BIT(26) +#define TSIF_STS_CTL_OVERFLOW BIT(25) +#define TSIF_STS_CTL_LOST_SYNC BIT(24) +#define TSIF_STS_CTL_TIMEOUT BIT(23) +#define TSIF_STS_CTL_INV_SYNC BIT(21) +#define TSIF_STS_CTL_INV_NULL BIT(20) +#define TSIF_STS_CTL_INV_ERROR BIT(19) +#define TSIF_STS_CTL_INV_ENABLE BIT(18) +#define TSIF_STS_CTL_INV_DATA BIT(17) +#define TSIF_STS_CTL_INV_CLOCK BIT(16) +#define TSIF_STS_CTL_SPARE BIT(15) +#define TSIF_STS_CTL_EN_NULL BIT(11) +#define TSIF_STS_CTL_EN_ERROR BIT(10) +#define TSIF_STS_CTL_LAST_BIT BIT(9) +#define TSIF_STS_CTL_EN_TIME_LIM BIT(8) +#define TSIF_STS_CTL_EN_TCR BIT(7) +#define TSIF_STS_CTL_TEST_MODE BIT(6) +#define TSIF_STS_CTL_MODE_2 BIT(5) +#define TSIF_STS_CTL_EN_DM BIT(4) +#define TSIF_STS_CTL_STOP BIT(3) +#define TSIF_STS_CTL_START BIT(0) + +/* bits for TSIF_TTS_CTRL register */ +#define TSIF_TTS_CTL_TTS_ENDIANNESS BIT(4) +#define TSIF_TTS_CTL_TTS_SOURCE BIT(3) +#define TSIF_TTS_CTL_TTS_LENGTH_1 BIT(1) +#define TSIF_TTS_CTL_TTS_LENGTH_0 BIT(0) + +/* + * TSPP register offsets + */ +#define TSPP_RST 0x00 +#define TSPP_CLK_CONTROL 0x04 +#define TSPP_CONFIG 0x08 +#define TSPP_CONTROL 0x0C +#define TSPP_PS_DISABLE 0x10 +#define TSPP_MSG_IRQ_STATUS 0x14 +#define TSPP_MSG_IRQ_MASK 0x18 +#define TSPP_IRQ_STATUS 0x1C +#define TSPP_IRQ_MASK 0x20 +#define TSPP_IRQ_CLEAR 0x24 +#define TSPP_PIPE_ERROR_STATUS(_n) (0x28 + (_n << 2)) +#define TSPP_STATUS 0x68 +#define TSPP_CURR_TSP_HEADER 0x6C +#define TSPP_CURR_PID_FILTER 0x70 +#define TSPP_SYSTEM_KEY(_n) (0x74 + (_n << 2)) +#define TSPP_CBC_INIT_VAL(_n) (0x94 + (_n << 2)) +#define TSPP_DATA_KEY_RESET 0x9C +#define TSPP_KEY_VALID 0xA0 +#define TSPP_KEY_ERROR 0xA4 +#define TSPP_TEST_CTRL 0xA8 +#define TSPP_VERSION 0xAC +#define TSPP_GENERICS 0xB0 +#define TSPP_NOP 0xB4 + +/* + * Register bit definitions + */ +/* TSPP_RST */ +#define TSPP_RST_RESET BIT(0) + +/* TSPP_CLK_CONTROL */ +#define TSPP_CLK_CONTROL_FORCE_CRYPTO BIT(9) +#define TSPP_CLK_CONTROL_FORCE_PES_PL BIT(8) +#define TSPP_CLK_CONTROL_FORCE_PES_AF BIT(7) +#define TSPP_CLK_CONTROL_FORCE_RAW_CTRL BIT(6) +#define TSPP_CLK_CONTROL_FORCE_PERF_CNT BIT(5) +#define TSPP_CLK_CONTROL_FORCE_CTX_SEARCH BIT(4) +#define TSPP_CLK_CONTROL_FORCE_TSP_PROC BIT(3) +#define TSPP_CLK_CONTROL_FORCE_CONS_AHB2MEM BIT(2) +#define TSPP_CLK_CONTROL_FORCE_TS_AHB2MEM BIT(1) +#define TSPP_CLK_CONTROL_SET_CLKON BIT(0) + +/* TSPP_CONFIG */ +#define TSPP_CONFIG_SET_PACKET_LENGTH(_a, _b) (_a = (_a & 0xF0) | \ +((_b & 0xF) << 8)) +#define TSPP_CONFIG_GET_PACKET_LENGTH(_a) ((_a >> 8) & 0xF) +#define TSPP_CONFIG_DUP_WITH_DISC_EN BIT(7) +#define TSPP_CONFIG_PES_SYNC_ERROR_MASK BIT(6) +#define TSPP_CONFIG_PS_LEN_ERR_MASK BIT(5) +#define TSPP_CONFIG_PS_CONT_ERR_UNSP_MASK BIT(4) +#define TSPP_CONFIG_PS_CONT_ERR_MASK BIT(3) +#define TSPP_CONFIG_PS_DUP_TSP_MASK BIT(2) +#define TSPP_CONFIG_TSP_ERR_IND_MASK BIT(1) +#define TSPP_CONFIG_TSP_SYNC_ERR_MASK BIT(0) + +/* TSPP_CONTROL */ +#define TSPP_CONTROL_PID_FILTER_LOCK BIT(5) +#define TSPP_CONTROL_FORCE_KEY_CALC BIT(4) +#define TSPP_CONTROL_TSP_CONS_SRC_DIS BIT(3) +#define TSPP_CONTROL_TSP_TSIF1_SRC_DIS BIT(2) +#define TSPP_CONTROL_TSP_TSIF0_SRC_DIS BIT(1) +#define TSPP_CONTROL_PERF_COUNT_INIT BIT(0) + +/* TSPP_MSG_IRQ_STATUS + TSPP_MSG_IRQ_MASK */ +#define TSPP_MSG_TSPP_IRQ BIT(2) +#define TSPP_MSG_TSIF_1_IRQ BIT(1) +#define TSPP_MSG_TSIF_0_IRQ BIT(0) + +/* TSPP_IRQ_STATUS + TSPP_IRQ_MASK + TSPP_IRQ_CLEAR */ +#define TSPP_IRQ_STATUS_TSP_RD_CMPL BIT(19) +#define TSPP_IRQ_STATUS_KEY_ERROR BIT(18) +#define TSPP_IRQ_STATUS_KEY_SWITCHED_BAD BIT(17) +#define TSPP_IRQ_STATUS_KEY_SWITCHED BIT(16) +#define TSPP_IRQ_STATUS_PS_BROKEN(_n) BIT((_n)) + +/* TSPP_PIPE_ERROR_STATUS */ +#define TSPP_PIPE_PES_SYNC_ERROR BIT(3) +#define TSPP_PIPE_PS_LENGTH_ERROR BIT(2) +#define TSPP_PIPE_PS_CONTINUITY_ERROR BIT(1) +#define TSPP_PIP_PS_LOST_START BIT(0) + +/* TSPP_STATUS */ +#define TSPP_STATUS_TSP_PKT_AVAIL BIT(10) +#define TSPP_STATUS_TSIF1_DM_REQ BIT(6) +#define TSPP_STATUS_TSIF0_DM_REQ BIT(2) +#define TSPP_CURR_FILTER_TABLE BIT(0) + +/* TSPP_GENERICS */ +#define TSPP_GENERICS_CRYPTO_GEN BIT(12) +#define TSPP_GENERICS_MAX_CONS_PIPES BIT(7) +#define TSPP_GENERICS_MAX_PIPES BIT(2) +#define TSPP_GENERICS_TSIF_1_GEN BIT(1) +#define TSPP_GENERICS_TSIF_0_GEN BIT(0) + +/* + * TSPP memory regions + */ +#define TSPP_PID_FILTER_TABLE0 0x800 +#define TSPP_PID_FILTER_TABLE1 0x880 +#define TSPP_PID_FILTER_TABLE2 0x900 +#define TSPP_GLOBAL_PERFORMANCE 0x980 /* see tspp_global_performance */ +#define TSPP_PIPE_CONTEXT 0x990 /* see tspp_pipe_context */ +#define TSPP_PIPE_PERFORMANCE 0x998 /* see tspp_pipe_performance */ +#define TSPP_TSP_BUFF_WORD(_n) (0xC10 + (_n << 2)) +#define TSPP_DATA_KEY 0xCD0 + +struct debugfs_entry { + const char *name; + mode_t mode; + int offset; +}; + +static const struct debugfs_entry debugfs_tsif_regs[] = { + {"sts_ctl", 0644, TSIF_STS_CTL_OFF}, + {"time_limit", 0644, TSIF_TIME_LIMIT_OFF}, + {"clk_ref", 0644, TSIF_CLK_REF_OFF}, + {"lpbk_flags", 0644, TSIF_LPBK_FLAGS_OFF}, + {"lpbk_data", 0644, TSIF_LPBK_DATA_OFF}, + {"test_ctl", 0644, TSIF_TEST_CTL_OFF}, + {"test_mode", 0644, TSIF_TEST_MODE_OFF}, + {"test_reset", 0200, TSIF_TEST_RESET_OFF}, + {"test_export", 0644, TSIF_TEST_EXPORT_OFF}, + {"test_current", 0444, TSIF_TEST_CURRENT_OFF}, + {"data_port", 0400, TSIF_DATA_PORT_OFF}, + {"tts_source", 0600, TSIF_TTS_CTL_OFF}, +}; + +static const struct debugfs_entry debugfs_tspp_regs[] = { + {"rst", 0644, TSPP_RST}, + {"clk_control", 0644, TSPP_CLK_CONTROL}, + {"config", 0644, TSPP_CONFIG}, + {"control", 0644, TSPP_CONTROL}, + {"ps_disable", 0644, TSPP_PS_DISABLE}, + {"msg_irq_status", 0644, TSPP_MSG_IRQ_STATUS}, + {"msg_irq_mask", 0644, TSPP_MSG_IRQ_MASK}, + {"irq_status", 0644, TSPP_IRQ_STATUS}, + {"irq_mask", 0644, TSPP_IRQ_MASK}, + {"irq_clear", 0644, TSPP_IRQ_CLEAR}, + /* {"pipe_error_status",S_IRUGO | S_IWUSR, TSPP_PIPE_ERROR_STATUS}, */ + {"status", 0644, TSPP_STATUS}, + {"curr_tsp_header", 0644, TSPP_CURR_TSP_HEADER}, + {"curr_pid_filter", 0644, TSPP_CURR_PID_FILTER}, + /* {"system_key", S_IRUGO | S_IWUSR, TSPP_SYSTEM_KEY}, */ + /* {"cbc_init_val", S_IRUGO | S_IWUSR, TSPP_CBC_INIT_VAL}, */ + {"data_key_reset", 0644, TSPP_DATA_KEY_RESET}, + {"key_valid", 0644, TSPP_KEY_VALID}, + {"key_error", 0644, TSPP_KEY_ERROR}, + {"test_ctrl", 0644, TSPP_TEST_CTRL}, + {"version", 0644, TSPP_VERSION}, + {"generics", 0644, TSPP_GENERICS}, + {"pid_filter_table0", 0644, TSPP_PID_FILTER_TABLE0}, + {"pid_filter_table1", 0644, TSPP_PID_FILTER_TABLE1}, + {"pid_filter_table2", 0644, TSPP_PID_FILTER_TABLE2}, + {"tsp_total_num", 0644, TSPP_GLOBAL_PERFORMANCE}, + {"tsp_ignored_num", 0644, TSPP_GLOBAL_PERFORMANCE + 4}, + {"tsp_err_ind_num", 0644, TSPP_GLOBAL_PERFORMANCE + 8}, + {"tsp_sync_err_num", 0644, TSPP_GLOBAL_PERFORMANCE + 16}, + {"pipe_context", 0644, TSPP_PIPE_CONTEXT}, + {"pipe_performance", 0644, TSPP_PIPE_PERFORMANCE}, + {"data_key", 0644, TSPP_DATA_KEY} +}; + +struct tspp_pid_filter { + u32 filter; /* see FILTER_ macros */ + u32 config; /* see FILTER_ macros */ +}; + +/* tsp_info */ +#define FILTER_HEADER_ERROR_MASK BIT(7) +#define FILTER_TRANS_END_DISABLE BIT(6) +#define FILTER_DEC_ON_ERROR_EN BIT(5) +#define FILTER_DECRYPT BIT(4) +#define FILTER_HAS_ENCRYPTION(_p) (_p->config & FILTER_DECRYPT) +#define FILTER_GET_PIPE_NUMBER0(_p) (_p->config & 0xF) +#define FILTER_SET_PIPE_NUMBER0(_p, _b) (_p->config = \ + (_p->config & ~0xF) | (_b & 0xF)) +#define FILTER_GET_PIPE_PROCESS0(_p) ((_p->filter >> 30) & 0x3) +#define FILTER_SET_PIPE_PROCESS0(_p, _b) (_p->filter = \ + (_p->filter & ~(0x3<<30)) | ((_b & 0x3) << 30)) +#define FILTER_GET_PIPE_PID(_p) ((_p->filter >> 13) & 0x1FFF) +#define FILTER_SET_PIPE_PID(_p, _b) (_p->filter = \ + (_p->filter & ~(0x1FFF<<13)) | ((_b & 0x1FFF) << 13)) +#define FILTER_GET_PID_MASK(_p) (_p->filter & 0x1FFF) +#define FILTER_SET_PID_MASK(_p, _b) (_p->filter = \ + (_p->filter & ~0x1FFF) | (_b & 0x1FFF)) +#define FILTER_GET_PIPE_PROCESS1(_p) ((_p->config >> 30) & 0x3) +#define FILTER_SET_PIPE_PROCESS1(_p, _b) (_p->config = \ + (_p->config & ~(0x3<<30)) | ((_b & 0x3) << 30)) +#define FILTER_GET_KEY_NUMBER(_p) ((_p->config >> 8) & 0x7) +#define FILTER_SET_KEY_NUMBER(_p, _b) (_p->config = \ + (_p->config & ~(0x7<<8)) | ((_b & 0x7) << 8)) + +struct tspp_global_performance_regs { + u32 tsp_total; + u32 tsp_ignored; + u32 tsp_error; + u32 tsp_sync; +}; + +struct tspp_pipe_context_regs { + u16 pes_bytes_left; + u16 count; + u32 tsif_suffix; +} __packed; +#define CONTEXT_GET_STATE(_a) (_a & 0x3) +#define CONTEXT_UNSPEC_LENGTH BIT(11) +#define CONTEXT_GET_CONT_COUNT(_a) ((_a >> 12) & 0xF) + +#define MSEC_TO_JIFFIES(msec) ((msec) * HZ / 1000) + +struct tspp_pipe_performance_regs { + u32 tsp_total; + u32 ps_duplicate_tsp; + u32 tsp_no_payload; + u32 tsp_broken_ps; + u32 ps_total_num; + u32 ps_continuity_error; + u32 ps_length_error; + u32 pes_sync_error; +}; + +struct tspp_tsif_device { + void __iomem *base; + u32 time_limit; + u32 ref_count; + enum tspp_tsif_mode mode; + int clock_inverse; + int data_inverse; + int sync_inverse; + int enable_inverse; + u32 tsif_irq; + + /* debugfs */ + struct dentry *dent_tsif; + struct dentry *debugfs_tsif_regs[ARRAY_SIZE(debugfs_tsif_regs)]; + u32 stat_rx; + u32 stat_overflow; + u32 stat_lost_sync; + u32 stat_timeout; + enum tsif_tts_source tts_source; + u32 lpass_timer_enable; +}; + +enum tspp_buf_state { + TSPP_BUF_STATE_EMPTY, /* buffer has been allocated, but not waiting */ + TSPP_BUF_STATE_WAITING, /* buffer is waiting to be filled */ + TSPP_BUF_STATE_DATA, /* buffer is not empty and can be read */ + TSPP_BUF_STATE_LOCKED /* buffer is being read by a client */ +}; + +struct tspp_mem_buffer { + struct tspp_mem_buffer *next; + struct sps_mem_buffer sps; + struct tspp_data_descriptor desc; /* buffer descriptor for kernel api */ + enum tspp_buf_state state; + size_t filled; /* how much data this buffer is holding */ + int read_index; /* where to start reading data from */ +}; + +/* this represents each char device 'channel' */ +struct tspp_channel { + struct tspp_device *pdev; /* can use container_of instead? */ + struct sps_pipe *pipe; + struct sps_connect config; + struct sps_register_event event; + struct tspp_mem_buffer *data; /* list of buffers */ + struct tspp_mem_buffer *read; /* first buffer ready to be read */ + struct tspp_mem_buffer *waiting; /* first outstanding transfer */ + struct tspp_mem_buffer *locked; /* buffer currently being read */ + wait_queue_head_t in_queue; /* set when data is received */ + u32 id; /* channel id (0-15) */ + int used; /* is this channel in use? */ + int key; /* which encryption key index is used */ + u32 buffer_size; /* size of the sps transfer buffers */ + u32 max_buffers; /* how many buffers should be allocated */ + u32 buffer_count; /* how many buffers are actually allocated */ + u32 filter_count; /* how many filters have been added to this channel */ + u32 int_freq; /* generate interrupts every x descriptors */ + enum tspp_source src; + enum tspp_mode mode; + tspp_notifier *notifier; /* used only with kernel api */ + void *notify_data; /* data to be passed with the notifier */ + u32 expiration_period_ms; /* notification on partially filled buffers */ + struct timer_list expiration_timer; + struct dma_pool *dma_pool; + tspp_memfree *memfree; /* user defined memory free function */ + void *user_info; /* user cookie passed to memory alloc/free function */ +}; + +struct tspp_pid_filter_table { + struct tspp_pid_filter filter[TSPP_NUM_PRIORITIES]; +}; + +struct tspp_key_entry { + u32 even_lsb; + u32 even_msb; + u32 odd_lsb; + u32 odd_msb; +}; + +struct tspp_key_table { + struct tspp_key_entry entry[TSPP_NUM_KEYS]; +}; + +struct tspp_pinctrl { + struct pinctrl *pinctrl; + + struct pinctrl_state *disabled; + struct pinctrl_state *tsif0_mode1; + struct pinctrl_state *tsif0_mode2; + struct pinctrl_state *tsif1_mode1; + struct pinctrl_state *tsif1_mode2; + struct pinctrl_state *dual_mode1; + struct pinctrl_state *dual_mode2; + + bool tsif0_active; + bool tsif1_active; +}; + +/* this represents the actual hardware device */ +struct tspp_device { + struct list_head devlist; /* list of all devices */ + struct platform_device *pdev; + void __iomem *base; + uint32_t tsif_bus_client; + unsigned int tspp_irq; + unsigned int bam_irq; + unsigned long bam_handle; + struct sps_bam_props bam_props; + struct wakeup_source ws; + spinlock_t spinlock; + struct tasklet_struct tlet; + struct tspp_tsif_device tsif[TSPP_TSIF_INSTANCES]; + /* clocks */ + struct clk *tsif_pclk; + struct clk *tsif_ref_clk; + /* regulators */ + struct regulator *tsif_vreg; + /* data */ + struct tspp_pid_filter_table *filters[TSPP_FILTER_TABLES]; + struct tspp_channel channels[TSPP_NUM_CHANNELS]; + struct tspp_key_table *tspp_key_table; + struct tspp_global_performance_regs *tspp_global_performance; + struct tspp_pipe_context_regs *tspp_pipe_context; + struct tspp_pipe_performance_regs *tspp_pipe_performance; + bool req_irqs; + /* pinctrl */ + struct mutex mutex; + struct tspp_pinctrl pinctrl; + unsigned int tts_source; /* Time stamp source type LPASS timer/TCR */ + + struct dentry *dent; + struct dentry *debugfs_regs[ARRAY_SIZE(debugfs_tspp_regs)]; +}; + +static int tspp_key_entry; +static u32 channel_id; /* next channel id number to assign */ + +static LIST_HEAD(tspp_devices); + +/*** IRQ ***/ +static irqreturn_t tspp_isr(int irq, void *dev) +{ + struct tspp_device *device = dev; + u32 status, mask; + u32 data; + + status = readl_relaxed(device->base + TSPP_IRQ_STATUS); + mask = readl_relaxed(device->base + TSPP_IRQ_MASK); + status &= mask; + + if (!status) { + dev_warn(&device->pdev->dev, "Spurious interrupt"); + return IRQ_NONE; + } + + /* if (status & TSPP_IRQ_STATUS_TSP_RD_CMPL) */ + + if (status & TSPP_IRQ_STATUS_KEY_ERROR) { + /* read the key error info */ + data = readl_relaxed(device->base + TSPP_KEY_ERROR); + dev_info(&device->pdev->dev, "key error 0x%x", data); + } + if (status & TSPP_IRQ_STATUS_KEY_SWITCHED_BAD) { + data = readl_relaxed(device->base + TSPP_KEY_VALID); + dev_info(&device->pdev->dev, "key invalidated: 0x%x", data); + } + if (status & TSPP_IRQ_STATUS_KEY_SWITCHED) + dev_info(&device->pdev->dev, "key switched"); + + if (status & 0xffff) + dev_info(&device->pdev->dev, "broken pipe %i", status & 0xffff); + + writel_relaxed(status, device->base + TSPP_IRQ_CLEAR); + + /* + * Before returning IRQ_HANDLED to the generic interrupt handling + * framework need to make sure all operations including clearing of + * interrupt status registers in the hardware is performed. + * Thus a barrier after clearing the interrupt status register + * is required to guarantee that the interrupt status register has + * really been cleared by the time we return from this handler. + */ + wmb(); + return IRQ_HANDLED; +} + +static irqreturn_t tsif_isr(int irq, void *dev) +{ + struct tspp_tsif_device *tsif_device = dev; + u32 sts_ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF); + + if (!(sts_ctl & (TSIF_STS_CTL_PACK_AVAIL | + TSIF_STS_CTL_OVERFLOW | + TSIF_STS_CTL_LOST_SYNC | + TSIF_STS_CTL_TIMEOUT))) + return IRQ_NONE; + + if (sts_ctl & TSIF_STS_CTL_OVERFLOW) + tsif_device->stat_overflow++; + + if (sts_ctl & TSIF_STS_CTL_LOST_SYNC) + tsif_device->stat_lost_sync++; + + if (sts_ctl & TSIF_STS_CTL_TIMEOUT) + tsif_device->stat_timeout++; + + iowrite32(sts_ctl, tsif_device->base + TSIF_STS_CTL_OFF); + + /* + * Before returning IRQ_HANDLED to the generic interrupt handling + * framework need to make sure all operations including clearing of + * interrupt status registers in the hardware is performed. + * Thus a barrier after clearing the interrupt status register + * is required to guarantee that the interrupt status register has + * really been cleared by the time we return from this handler. + */ + wmb(); + return IRQ_HANDLED; +} + +/*** callbacks ***/ +static void tspp_sps_complete_cb(struct sps_event_notify *notify) +{ + struct tspp_device *pdev; + + if (!notify || !notify->user) + return; + + pdev = notify->user; + tasklet_schedule(&pdev->tlet); +} + +static void tspp_expiration_timer(unsigned long data) +{ + struct tspp_device *pdev = (struct tspp_device *)data; + + if (pdev) + tasklet_schedule(&pdev->tlet); +} + +/*** tasklet ***/ +static void tspp_sps_complete_tlet(unsigned long data) +{ + int i; + int complete; + unsigned long flags; + struct sps_iovec iovec; + struct tspp_channel *channel; + struct tspp_device *device = (struct tspp_device *)data; + + spin_lock_irqsave(&device->spinlock, flags); + + for (i = 0; i < TSPP_NUM_CHANNELS; i++) { + complete = 0; + channel = &device->channels[i]; + + if (!channel->used || !channel->waiting) + continue; + + /* stop the expiration timer */ + if (channel->expiration_period_ms) + del_timer(&channel->expiration_timer); + + /* get completions */ + while (channel->waiting->state == TSPP_BUF_STATE_WAITING) { + if (sps_get_iovec(channel->pipe, &iovec) != 0) { + pr_err("tspp: Error in iovec on channel %i", + channel->id); + break; + } + if (iovec.size == 0) + break; + + if (DESC_FULL_ADDR(iovec.flags, iovec.addr) + != channel->waiting->sps.phys_base) + pr_err("tspp: buffer mismatch %pa", + &channel->waiting->sps.phys_base); + + complete = 1; + channel->waiting->state = TSPP_BUF_STATE_DATA; + channel->waiting->filled = iovec.size; + channel->waiting->read_index = 0; + + if (channel->src == TSPP_SOURCE_TSIF0) + device->tsif[0].stat_rx++; + else if (channel->src == TSPP_SOURCE_TSIF1) + device->tsif[1].stat_rx++; + + /* update the pointers */ + channel->waiting = channel->waiting->next; + } + + /* wake any waiting processes */ + if (complete) { + wake_up_interruptible(&channel->in_queue); + + /* call notifiers */ + if (channel->notifier) + channel->notifier(channel->id, + channel->notify_data); + } + + /* restart expiration timer */ + if (channel->expiration_period_ms) + mod_timer(&channel->expiration_timer, + jiffies + + MSEC_TO_JIFFIES( + channel->expiration_period_ms)); + } + + spin_unlock_irqrestore(&device->spinlock, flags); +} + +static int tspp_config_gpios(struct tspp_device *device, + enum tspp_source source, + int enable) +{ + int ret; + struct pinctrl_state *s; + struct tspp_pinctrl *p = &device->pinctrl; + bool mode2; + + /* + * TSIF devices are handled separately, however changing of the pinctrl + * state must be protected from race condition. + */ + if (mutex_lock_interruptible(&device->mutex)) + return -ERESTARTSYS; + + switch (source) { + case TSPP_SOURCE_TSIF0: + mode2 = device->tsif[0].mode == TSPP_TSIF_MODE_2; + if (enable == p->tsif1_active) { + if (enable) + /* Both tsif enabled */ + s = mode2 ? p->dual_mode2 : p->dual_mode1; + else + /* Both tsif disabled */ + s = p->disabled; + } else if (enable) { + /* Only tsif0 is enabled */ + s = mode2 ? p->tsif0_mode2 : p->tsif0_mode1; + } else { + /* Only tsif1 is enabled */ + s = mode2 ? p->tsif1_mode2 : p->tsif1_mode1; + } + + ret = pinctrl_select_state(p->pinctrl, s); + if (!ret) + p->tsif0_active = enable; + break; + case TSPP_SOURCE_TSIF1: + mode2 = device->tsif[1].mode == TSPP_TSIF_MODE_2; + if (enable == p->tsif0_active) { + if (enable) + /* Both tsif enabled */ + s = mode2 ? p->dual_mode2 : p->dual_mode1; + else + /* Both tsif disabled */ + s = p->disabled; + } else if (enable) { + /* Only tsif1 is enabled */ + s = mode2 ? p->tsif1_mode2 : p->tsif1_mode1; + } else { + /* Only tsif0 is enabled */ + s = mode2 ? p->tsif0_mode2 : p->tsif0_mode1; + } + + ret = pinctrl_select_state(p->pinctrl, s); + if (!ret) + p->tsif1_active = enable; + break; + default: + pr_err("%s: invalid source %d\n", __func__, source); + mutex_unlock(&device->mutex); + return -EINVAL; + } + + if (ret) + pr_err("%s: failed to change pinctrl state, ret=%d\n", + __func__, ret); + + mutex_unlock(&device->mutex); + return ret; +} + +static int tspp_get_pinctrl(struct tspp_device *device) +{ + struct pinctrl *pinctrl; + struct pinctrl_state *state; + + pinctrl = devm_pinctrl_get(&device->pdev->dev); + if (IS_ERR_OR_NULL(pinctrl)) { + pr_err("%s: Unable to get pinctrl handle\n", __func__); + return -EINVAL; + } + device->pinctrl.pinctrl = pinctrl; + + state = pinctrl_lookup_state(pinctrl, "disabled"); + if (IS_ERR_OR_NULL(state)) { + pr_err("%s: Unable to find state %s\n", + __func__, "disabled"); + return -EINVAL; + } + device->pinctrl.disabled = state; + + state = pinctrl_lookup_state(pinctrl, "tsif0-mode1"); + if (IS_ERR_OR_NULL(state)) { + pr_err("%s: Unable to find state %s\n", + __func__, "tsif0-mode1"); + return -EINVAL; + } + device->pinctrl.tsif0_mode1 = state; + + state = pinctrl_lookup_state(pinctrl, "tsif0-mode2"); + if (IS_ERR_OR_NULL(state)) { + pr_err("%s: Unable to find state %s\n", + __func__, "tsif0-mode2"); + return -EINVAL; + } + device->pinctrl.tsif0_mode2 = state; + + state = pinctrl_lookup_state(pinctrl, "tsif1-mode1"); + if (IS_ERR_OR_NULL(state)) { + pr_err("%s: Unable to find state %s\n", + __func__, "tsif1-mode1"); + return -EINVAL; + } + device->pinctrl.tsif1_mode1 = state; + + state = pinctrl_lookup_state(pinctrl, "tsif1-mode2"); + if (IS_ERR_OR_NULL(state)) { + pr_err("%s: Unable to find state %s\n", + __func__, "tsif1-mode2"); + return -EINVAL; + } + device->pinctrl.tsif1_mode2 = state; + + state = pinctrl_lookup_state(pinctrl, "dual-tsif-mode1"); + if (IS_ERR_OR_NULL(state)) { + pr_err("%s: Unable to find state %s\n", + __func__, "dual-tsif-mode1"); + return -EINVAL; + } + device->pinctrl.dual_mode1 = state; + + state = pinctrl_lookup_state(pinctrl, "dual-tsif-mode2"); + if (IS_ERR_OR_NULL(state)) { + pr_err("%s: Unable to find state %s\n", + __func__, "dual-tsif-mode2"); + return -EINVAL; + } + device->pinctrl.dual_mode2 = state; + + device->pinctrl.tsif0_active = false; + device->pinctrl.tsif1_active = false; + + return 0; +} + + +/*** Clock functions ***/ +static int tspp_clock_start(struct tspp_device *device) +{ + int rc; + + if (device == NULL) { + pr_err("tspp: Can't start clocks, invalid device\n"); + return -EINVAL; + } + + if (device->tsif_bus_client) { + rc = msm_bus_scale_client_update_request( + device->tsif_bus_client, 1); + if (rc) { + pr_err("tspp: Can't enable bus\n"); + return -EBUSY; + } + } + + if (device->tsif_vreg) { + rc = regulator_set_voltage(device->tsif_vreg, + RPMH_REGULATOR_LEVEL_OFF, + RPMH_REGULATOR_LEVEL_MAX); + if (rc) { + pr_err("Unable to set CX voltage.\n"); + if (device->tsif_bus_client) + msm_bus_scale_client_update_request( + device->tsif_bus_client, 0); + return rc; + } + } + + if (device->tsif_pclk && clk_prepare_enable(device->tsif_pclk) != 0) { + pr_err("tspp: Can't start pclk"); + + if (device->tsif_vreg) { + regulator_set_voltage(device->tsif_vreg, + RPMH_REGULATOR_LEVEL_OFF, + RPMH_REGULATOR_LEVEL_MAX); + } + + if (device->tsif_bus_client) + msm_bus_scale_client_update_request( + device->tsif_bus_client, 0); + return -EBUSY; + } + + if (device->tsif_ref_clk && + clk_prepare_enable(device->tsif_ref_clk) != 0) { + pr_err("tspp: Can't start ref clk"); + clk_disable_unprepare(device->tsif_pclk); + if (device->tsif_vreg) { + regulator_set_voltage(device->tsif_vreg, + RPMH_REGULATOR_LEVEL_OFF, + RPMH_REGULATOR_LEVEL_MAX); + } + + if (device->tsif_bus_client) + msm_bus_scale_client_update_request( + device->tsif_bus_client, 0); + return -EBUSY; + } + + return 0; +} + +static void tspp_clock_stop(struct tspp_device *device) +{ + int rc; + + if (device == NULL) { + pr_err("tspp: Can't stop clocks, invalid device\n"); + return; + } + + if (device->tsif_pclk) + clk_disable_unprepare(device->tsif_pclk); + + if (device->tsif_ref_clk) + clk_disable_unprepare(device->tsif_ref_clk); + + if (device->tsif_vreg) { + rc = regulator_set_voltage(device->tsif_vreg, + RPMH_REGULATOR_LEVEL_OFF, + RPMH_REGULATOR_LEVEL_MAX); + if (rc) + pr_err("Unable to set CX voltage.\n"); + } + + if (device->tsif_bus_client) { + rc = msm_bus_scale_client_update_request( + device->tsif_bus_client, 0); + if (rc) + pr_err("tspp: Can't disable bus\n"); + } +} + +/*** TSIF functions ***/ +static int tspp_start_tsif(struct tspp_tsif_device *tsif_device) +{ + int start_hardware = 0; + u32 ctl; + u32 tts_ctl; + int retval; + + if (tsif_device->ref_count == 0) { + start_hardware = 1; + } else if (tsif_device->ref_count > 0) { + ctl = readl_relaxed(tsif_device->base + TSIF_STS_CTL_OFF); + if ((ctl & TSIF_STS_CTL_START) != 1) { + /* this hardware should already be running */ + pr_warn("tspp: tsif hw not started but ref count > 0"); + start_hardware = 1; + } + } + + if (start_hardware) { + ctl = TSIF_STS_CTL_EN_IRQ | + TSIF_STS_CTL_EN_DM | + TSIF_STS_CTL_PACK_AVAIL | + TSIF_STS_CTL_OVERFLOW | + TSIF_STS_CTL_LOST_SYNC; + + if (tsif_device->clock_inverse) + ctl |= TSIF_STS_CTL_INV_CLOCK; + + if (tsif_device->data_inverse) + ctl |= TSIF_STS_CTL_INV_DATA; + + if (tsif_device->sync_inverse) + ctl |= TSIF_STS_CTL_INV_SYNC; + + if (tsif_device->enable_inverse) + ctl |= TSIF_STS_CTL_INV_ENABLE; + + switch (tsif_device->mode) { + case TSPP_TSIF_MODE_LOOPBACK: + ctl |= TSIF_STS_CTL_EN_NULL | + TSIF_STS_CTL_EN_ERROR | + TSIF_STS_CTL_TEST_MODE; + break; + case TSPP_TSIF_MODE_1: + ctl |= TSIF_STS_CTL_EN_TIME_LIM; + if (tsif_device->tts_source != TSIF_TTS_LPASS_TIMER) + ctl |= TSIF_STS_CTL_EN_TCR; + break; + case TSPP_TSIF_MODE_2: + ctl |= TSIF_STS_CTL_EN_TIME_LIM | + TSIF_STS_CTL_MODE_2; + if (tsif_device->tts_source != TSIF_TTS_LPASS_TIMER) + ctl |= TSIF_STS_CTL_EN_TCR; + break; + default: + pr_warn("tspp: unknown tsif mode 0x%x", + tsif_device->mode); + } + /* Set 4bytes Time Stamp for TCR */ + if (tsif_device->tts_source == TSIF_TTS_LPASS_TIMER) { + if (tsif_device->lpass_timer_enable == 0) { + retval = avcs_core_open(); + if (retval < 0) { + pr_warn("tspp: avcs open fail:%d\n", + retval); + return retval; + } + retval = avcs_core_disable_power_collapse(1); + if (retval < 0) { + pr_warn("tspp: avcs power enable:%d\n", + retval); + return retval; + } + tsif_device->lpass_timer_enable = 1; + } + + tts_ctl = readl_relaxed(tsif_device->base + + TSIF_TTS_CTL_OFF); + tts_ctl = 0; + /* Set LPASS Timer TTS source */ + tts_ctl |= TSIF_TTS_CTL_TTS_SOURCE; + /* Set 4 byte TTS */ + tts_ctl |= TSIF_TTS_CTL_TTS_LENGTH_0; + + writel_relaxed(tts_ctl, tsif_device->base + + TSIF_TTS_CTL_OFF); + /* write TTS control register */ + wmb(); + tts_ctl = readl_relaxed(tsif_device->base + + TSIF_TTS_CTL_OFF); + } + + writel_relaxed(ctl, tsif_device->base + TSIF_STS_CTL_OFF); + /* write Status control register */ + wmb(); + writel_relaxed(tsif_device->time_limit, + tsif_device->base + TSIF_TIME_LIMIT_OFF); + /* assure register configuration is done before starting TSIF */ + wmb(); + writel_relaxed(ctl | TSIF_STS_CTL_START, + tsif_device->base + TSIF_STS_CTL_OFF); + /* assure TSIF start configuration */ + wmb(); + } + + ctl = readl_relaxed(tsif_device->base + TSIF_STS_CTL_OFF); + if (!(ctl & TSIF_STS_CTL_START)) + return -EBUSY; + + tsif_device->ref_count++; + return 0; +} + +static void tspp_stop_tsif(struct tspp_tsif_device *tsif_device) +{ + if (tsif_device->ref_count == 0) { + if (tsif_device->lpass_timer_enable == 1) { + if (avcs_core_disable_power_collapse(0) == 0) + tsif_device->lpass_timer_enable = 0; + } + return; + } + + tsif_device->ref_count--; + + if (tsif_device->ref_count == 0) { + writel_relaxed(TSIF_STS_CTL_STOP, + tsif_device->base + TSIF_STS_CTL_OFF); + /* assure TSIF stop configuration */ + wmb(); + } +} + +/*** local TSPP functions ***/ +static int tspp_channels_in_use(struct tspp_device *pdev) +{ + int i; + int count = 0; + + for (i = 0; i < TSPP_NUM_CHANNELS; i++) + count += (pdev->channels[i].used ? 1 : 0); + + return count; +} + +static struct tspp_device *tspp_find_by_id(int id) +{ + struct tspp_device *dev; + + list_for_each_entry(dev, &tspp_devices, devlist) { + if (dev->pdev->id == id) + return dev; + } + return NULL; +} + +static int tspp_get_key_entry(void) +{ + int i; + + for (i = 0; i < TSPP_NUM_KEYS; i++) { + if (!(tspp_key_entry & (1 << i))) { + tspp_key_entry |= (1 << i); + return i; + } + } + return 1 < TSPP_NUM_KEYS; +} + +static void tspp_free_key_entry(int entry) +{ + if (entry > TSPP_NUM_KEYS) { + pr_err("tspp_free_key_entry: index out of bounds"); + return; + } + + tspp_key_entry &= ~(1 << entry); +} + +static int tspp_alloc_buffer(u32 channel_id, struct tspp_data_descriptor *desc, + u32 size, struct dma_pool *dma_pool, tspp_allocator *alloc, void *user) +{ + if (size < TSPP_MIN_BUFFER_SIZE || + size > TSPP_MAX_BUFFER_SIZE) { + pr_err("tspp: bad buffer size %i", size); + return -ENOMEM; + } + + if (alloc) { + TSPP_DEBUG("tspp using alloc function"); + desc->virt_base = alloc(channel_id, size, + &desc->phys_base, user); + } else { + if (!dma_pool) + desc->virt_base = dma_alloc_coherent(NULL, size, + &desc->phys_base, GFP_KERNEL); + else + desc->virt_base = dma_pool_alloc(dma_pool, GFP_KERNEL, + &desc->phys_base); + + if (desc->virt_base == 0) { + pr_err("tspp: dma buffer allocation failed %i\n", size); + return -ENOMEM; + } + } + + desc->size = size; + return 0; +} + +static int tspp_queue_buffer(struct tspp_channel *channel, + struct tspp_mem_buffer *buffer) +{ + int rc; + u32 flags = 0; + + /* make sure the interrupt frequency is valid */ + if (channel->int_freq < 1) + channel->int_freq = 1; + + /* generate interrupt according to requested frequency */ + if (buffer->desc.id % channel->int_freq == channel->int_freq-1) + flags = SPS_IOVEC_FLAG_INT; + + /* start the transfer */ + rc = sps_transfer_one(channel->pipe, + buffer->sps.phys_base, + buffer->sps.size, + flags ? channel->pdev : NULL, + flags); + if (rc < 0) + return rc; + + buffer->state = TSPP_BUF_STATE_WAITING; + + return 0; +} + +static int tspp_global_reset(struct tspp_device *pdev) +{ + u32 i, val; + + /* stop all TSIFs */ + for (i = 0; i < TSPP_TSIF_INSTANCES; i++) { + pdev->tsif[i].ref_count = 1; /* allows stopping hw */ + tspp_stop_tsif(&pdev->tsif[i]); /* will reset ref_count to 0 */ + pdev->tsif[i].time_limit = TSPP_TSIF_DEFAULT_TIME_LIMIT; + pdev->tsif[i].clock_inverse = 0; + pdev->tsif[i].data_inverse = 0; + pdev->tsif[i].sync_inverse = 0; + pdev->tsif[i].enable_inverse = 0; + pdev->tsif[i].lpass_timer_enable = 0; + } + writel_relaxed(TSPP_RST_RESET, pdev->base + TSPP_RST); + /* assure state is reset before continuing with configuration */ + wmb(); + + /* TSPP tables */ + for (i = 0; i < TSPP_FILTER_TABLES; i++) + memset_io(pdev->filters[i], + 0, sizeof(struct tspp_pid_filter_table)); + + /* disable all filters */ + val = (2 << TSPP_NUM_CHANNELS) - 1; + writel_relaxed(val, pdev->base + TSPP_PS_DISABLE); + + /* TSPP registers */ + val = readl_relaxed(pdev->base + TSPP_CONTROL); + writel_relaxed(val | TSPP_CLK_CONTROL_FORCE_PERF_CNT, + pdev->base + TSPP_CONTROL); + /* assure tspp performance count clock is set to 0 */ + wmb(); + memset_io(pdev->tspp_global_performance, 0, + sizeof(struct tspp_global_performance_regs)); + memset_io(pdev->tspp_pipe_context, 0, + sizeof(struct tspp_pipe_context_regs)); + memset_io(pdev->tspp_pipe_performance, 0, + sizeof(struct tspp_pipe_performance_regs)); + /* assure tspp pipe context registers are set to 0 */ + wmb(); + writel_relaxed(val & ~TSPP_CLK_CONTROL_FORCE_PERF_CNT, + pdev->base + TSPP_CONTROL); + /* assure tspp performance count clock is reset */ + wmb(); + + val = readl_relaxed(pdev->base + TSPP_CONFIG); + val &= ~(TSPP_CONFIG_PS_LEN_ERR_MASK | + TSPP_CONFIG_PS_CONT_ERR_UNSP_MASK | + TSPP_CONFIG_PS_CONT_ERR_MASK); + TSPP_CONFIG_SET_PACKET_LENGTH(val, TSPP_PACKET_LENGTH); + writel_relaxed(val, pdev->base + TSPP_CONFIG); + writel_relaxed(0x0007ffff, pdev->base + TSPP_IRQ_MASK); + writel_relaxed(0x000fffff, pdev->base + TSPP_IRQ_CLEAR); + writel_relaxed(0, pdev->base + TSPP_RST); + /* assure tspp reset clear */ + wmb(); + + tspp_key_entry = 0; + + return 0; +} + +static void tspp_channel_init(struct tspp_channel *channel, + struct tspp_device *pdev) +{ + channel->pdev = pdev; + channel->data = NULL; + channel->read = NULL; + channel->waiting = NULL; + channel->locked = NULL; + channel->id = channel_id++; + channel->used = 0; + channel->buffer_size = TSPP_MIN_BUFFER_SIZE; + channel->max_buffers = TSPP_NUM_BUFFERS; + channel->buffer_count = 0; + channel->filter_count = 0; + channel->int_freq = 1; + channel->src = TSPP_SOURCE_NONE; + channel->mode = TSPP_MODE_DISABLED; + channel->notifier = NULL; + channel->notify_data = NULL; + channel->expiration_period_ms = 0; + channel->memfree = NULL; + channel->user_info = NULL; + init_waitqueue_head(&channel->in_queue); +} + +static void tspp_set_tsif_mode(struct tspp_channel *channel, + enum tspp_tsif_mode mode) +{ + int index; + + switch (channel->src) { + case TSPP_SOURCE_TSIF0: + index = 0; + break; + case TSPP_SOURCE_TSIF1: + index = 1; + break; + default: + pr_warn("tspp: can't set mode for non-tsif source %d", + channel->src); + return; + } + channel->pdev->tsif[index].mode = mode; +} + +static void tspp_set_signal_inversion(struct tspp_channel *channel, + int clock_inverse, int data_inverse, + int sync_inverse, int enable_inverse) +{ + int index; + + switch (channel->src) { + case TSPP_SOURCE_TSIF0: + index = 0; + break; + case TSPP_SOURCE_TSIF1: + index = 1; + break; + default: + return; + } + channel->pdev->tsif[index].clock_inverse = clock_inverse; + channel->pdev->tsif[index].data_inverse = data_inverse; + channel->pdev->tsif[index].sync_inverse = sync_inverse; + channel->pdev->tsif[index].enable_inverse = enable_inverse; +} + +static int tspp_is_buffer_size_aligned(u32 size, enum tspp_mode mode) +{ + u32 alignment; + + switch (mode) { + case TSPP_MODE_RAW: + /* must be a multiple of 192 */ + alignment = (TSPP_PACKET_LENGTH + 4); + if (size % alignment) + return 0; + return 1; + + case TSPP_MODE_RAW_NO_SUFFIX: + /* must be a multiple of 188 */ + alignment = TSPP_PACKET_LENGTH; + if (size % alignment) + return 0; + return 1; + + case TSPP_MODE_DISABLED: + case TSPP_MODE_PES: + default: + /* no alignment requirement */ + return 1; + } + +} + +static u32 tspp_align_buffer_size_by_mode(u32 size, enum tspp_mode mode) +{ + u32 new_size; + u32 alignment; + + switch (mode) { + case TSPP_MODE_RAW: + /* must be a multiple of 192 */ + alignment = (TSPP_PACKET_LENGTH + 4); + break; + + case TSPP_MODE_RAW_NO_SUFFIX: + /* must be a multiple of 188 */ + alignment = TSPP_PACKET_LENGTH; + break; + + case TSPP_MODE_DISABLED: + case TSPP_MODE_PES: + default: + /* no alignment requirement - give the user what he asks for */ + alignment = 1; + break; + } + /* align up */ + new_size = (((size + alignment - 1) / alignment) * alignment); + return new_size; +} + +static void tspp_destroy_buffers(u32 channel_id, struct tspp_channel *channel) +{ + int i; + struct tspp_mem_buffer *pbuf, *temp; + + pbuf = channel->data; + for (i = 0; i < channel->buffer_count; i++) { + if (pbuf->desc.phys_base) { + if (channel->memfree) { + channel->memfree(channel_id, + pbuf->desc.size, + pbuf->desc.virt_base, + pbuf->desc.phys_base, + channel->user_info); + } else { + if (!channel->dma_pool) + dma_free_coherent( + &channel->pdev->pdev->dev, + pbuf->desc.size, + pbuf->desc.virt_base, + pbuf->desc.phys_base); + else + dma_pool_free(channel->dma_pool, + pbuf->desc.virt_base, + pbuf->desc.phys_base); + } + pbuf->desc.phys_base = 0; + } + pbuf->desc.virt_base = 0; + pbuf->state = TSPP_BUF_STATE_EMPTY; + temp = pbuf; + pbuf = pbuf->next; + kfree(temp); + } +} + +static int msm_tspp_req_irqs(struct tspp_device *device) +{ + int rc; + int i; + int j; + + rc = request_irq(device->tspp_irq, tspp_isr, IRQF_SHARED, + dev_name(&device->pdev->dev), device); + if (rc) { + dev_err(&device->pdev->dev, + "failed to request TSPP IRQ %d : %d", + device->tspp_irq, rc); + return rc; + } + + for (i = 0; i < TSPP_TSIF_INSTANCES; i++) { + rc = request_irq(device->tsif[i].tsif_irq, + tsif_isr, IRQF_SHARED, dev_name(&device->pdev->dev), + &device->tsif[i]); + if (rc) { + dev_err(&device->pdev->dev, + "failed to request TSIF%d IRQ: %d", + i, rc); + goto failed; + } + } + device->req_irqs = true; + return 0; + +failed: + free_irq(device->tspp_irq, device); + for (j = 0; j < i; j++) + free_irq(device->tsif[j].tsif_irq, device); + + return rc; +} + +static inline void msm_tspp_free_irqs(struct tspp_device *device) +{ + int i; + + for (i = 0; i < TSPP_TSIF_INSTANCES; i++) { + if (device->tsif[i].tsif_irq) + free_irq(device->tsif[i].tsif_irq, &device->tsif[i]); + } + + if (device->tspp_irq) + free_irq(device->tspp_irq, device); + device->req_irqs = false; +} + +/*** TSPP API functions ***/ + +/** + * tspp_open_stream - open a TSPP stream for use. + * + * @dev: TSPP device (up to TSPP_MAX_DEVICES) + * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS) + * @source: stream source parameters. + * + * Return error status + * + */ +int tspp_open_stream(u32 dev, u32 channel_id, + struct tspp_select_source *source) +{ + u32 val; + int rc; + struct tspp_device *pdev; + struct tspp_channel *channel; + bool req_irqs = false; + + TSPP_DEBUG("tspp_open_stream %i %i %i %i", + dev, channel_id, source->source, source->mode); + + if (dev >= TSPP_MAX_DEVICES) { + pr_err("tspp: device id out of range"); + return -ENODEV; + } + + if (channel_id >= TSPP_NUM_CHANNELS) { + pr_err("tspp: channel id out of range"); + return -ECHRNG; + } + + pdev = tspp_find_by_id(dev); + if (!pdev) { + pr_err("tspp_str: can't find device %i", dev); + return -ENODEV; + } + channel = &pdev->channels[channel_id]; + channel->src = source->source; + tspp_set_tsif_mode(channel, source->mode); + tspp_set_signal_inversion(channel, source->clk_inverse, + source->data_inverse, source->sync_inverse, + source->enable_inverse); + + /* Request IRQ resources on first open */ + if (!pdev->req_irqs && (source->source == TSPP_SOURCE_TSIF0 || + source->source == TSPP_SOURCE_TSIF1)) { + rc = msm_tspp_req_irqs(pdev); + if (rc) { + pr_err("tspp: error requesting irqs\n"); + return rc; + } + req_irqs = true; + } + + switch (source->source) { + case TSPP_SOURCE_TSIF0: + if (tspp_config_gpios(pdev, channel->src, 1) != 0) { + rc = -EBUSY; + pr_err("tspp: error enabling tsif0 GPIOs\n"); + goto free_irq; + } + /* make sure TSIF0 is running & enabled */ + if (tspp_start_tsif(&pdev->tsif[0]) != 0) { + rc = -EBUSY; + pr_err("tspp: error starting tsif0"); + goto free_irq; + } + if (pdev->tsif[0].ref_count == 1) { + val = readl_relaxed(pdev->base + TSPP_CONTROL); + writel_relaxed(val & ~TSPP_CONTROL_TSP_TSIF0_SRC_DIS, + pdev->base + TSPP_CONTROL); + /* Assure BAM TS PKT packet processing is enabled */ + wmb(); + } + break; + case TSPP_SOURCE_TSIF1: + if (tspp_config_gpios(pdev, channel->src, 1) != 0) { + rc = -EBUSY; + pr_err("tspp: error enabling tsif1 GPIOs\n"); + goto free_irq; + } + /* make sure TSIF1 is running & enabled */ + if (tspp_start_tsif(&pdev->tsif[1]) != 0) { + rc = -EBUSY; + pr_err("tspp: error starting tsif1"); + goto free_irq; + } + if (pdev->tsif[1].ref_count == 1) { + val = readl_relaxed(pdev->base + TSPP_CONTROL); + writel_relaxed(val & ~TSPP_CONTROL_TSP_TSIF1_SRC_DIS, + pdev->base + TSPP_CONTROL); + /* Assure BAM TS PKT packet processing is enabled */ + wmb(); + } + break; + case TSPP_SOURCE_MEM: + break; + default: + pr_err("tspp: channel %i invalid source %i", + channel->id, source->source); + return -EBUSY; + } + + return 0; + +free_irq: + /* Free irqs only if were requested during opening of this stream */ + if (req_irqs) + msm_tspp_free_irqs(pdev); + return rc; +} +EXPORT_SYMBOL(tspp_open_stream); + +/** + * tspp_close_stream - close a TSPP stream. + * + * @dev: TSPP device (up to TSPP_MAX_DEVICES) + * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS) + * + * Return error status + * + */ +int tspp_close_stream(u32 dev, u32 channel_id) +{ + u32 val; + u32 prev_ref_count = 0; + struct tspp_device *pdev; + struct tspp_channel *channel; + + if (channel_id >= TSPP_NUM_CHANNELS) { + pr_err("tspp: channel id out of range"); + return -ECHRNG; + } + pdev = tspp_find_by_id(dev); + if (!pdev) { + pr_err("tspp_cs: can't find device %i", dev); + return -EBUSY; + } + channel = &pdev->channels[channel_id]; + + switch (channel->src) { + case TSPP_SOURCE_TSIF0: + prev_ref_count = pdev->tsif[0].ref_count; + tspp_stop_tsif(&pdev->tsif[0]); + if (tspp_config_gpios(pdev, channel->src, 0) != 0) + pr_err("tspp: error disabling tsif0 GPIOs\n"); + + if (prev_ref_count == 1) { + val = readl_relaxed(pdev->base + TSPP_CONTROL); + writel_relaxed(val | TSPP_CONTROL_TSP_TSIF0_SRC_DIS, + pdev->base + TSPP_CONTROL); + /* Assure BAM TS PKT packet processing is disabled */ + wmb(); + } + break; + case TSPP_SOURCE_TSIF1: + prev_ref_count = pdev->tsif[1].ref_count; + tspp_stop_tsif(&pdev->tsif[1]); + if (tspp_config_gpios(pdev, channel->src, 0) != 0) + pr_err("tspp: error disabling tsif0 GPIOs\n"); + + if (prev_ref_count == 1) { + val = readl_relaxed(pdev->base + TSPP_CONTROL); + writel_relaxed(val | TSPP_CONTROL_TSP_TSIF1_SRC_DIS, + pdev->base + TSPP_CONTROL); + /* Assure BAM TS PKT packet processing is disabled */ + wmb(); + } + break; + case TSPP_SOURCE_MEM: + break; + case TSPP_SOURCE_NONE: + break; + } + + channel->src = TSPP_SOURCE_NONE; + + /* Free requested interrupts to save power */ + if ((pdev->tsif[0].ref_count + pdev->tsif[1].ref_count) == 0 && + prev_ref_count) + msm_tspp_free_irqs(pdev); + + return 0; +} +EXPORT_SYMBOL(tspp_close_stream); + +static int tspp_init_sps_device(struct tspp_device *dev) +{ + int ret; + + ret = sps_register_bam_device(&dev->bam_props, &dev->bam_handle); + if (ret) { + pr_err("tspp: failed to register bam device, err-%d\n", ret); + return ret; + } + + ret = sps_device_reset(dev->bam_handle); + if (ret) { + sps_deregister_bam_device(dev->bam_handle); + pr_err("tspp: error resetting bam device, err=%d\n", ret); + return ret; + } + + return 0; +} + +/** + * tspp_open_channel - open a TSPP channel. + * + * @dev: TSPP device (up to TSPP_MAX_DEVICES) + * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS) + * + * Return error status + * + */ +int tspp_open_channel(u32 dev, u32 channel_id) +{ + int rc = 0; + struct sps_connect *config; + struct sps_register_event *event; + struct tspp_channel *channel; + struct tspp_device *pdev; + + if (channel_id >= TSPP_NUM_CHANNELS) { + pr_err("tspp: channel id out of range"); + return -ECHRNG; + } + pdev = tspp_find_by_id(dev); + if (!pdev) { + pr_err("tspp_oc: can't find device %i", dev); + return -ENODEV; + } + channel = &pdev->channels[channel_id]; + + if (channel->used) { + pr_err("tspp channel already in use"); + return -EBUSY; + } + + config = &channel->config; + event = &channel->event; + + /* start the clocks if needed */ + if (tspp_channels_in_use(pdev) == 0) { + rc = tspp_clock_start(pdev); + if (rc) + return rc; + + if (pdev->bam_handle == SPS_DEV_HANDLE_INVALID) { + rc = tspp_init_sps_device(pdev); + if (rc) { + pr_err("tspp: failed to init sps device, err=%d\n", + rc); + tspp_clock_stop(pdev); + return rc; + } + } + + __pm_stay_awake(&pdev->ws); + } + + /* mark it as used */ + channel->used = 1; + + /* start the bam */ + channel->pipe = sps_alloc_endpoint(); + if (channel->pipe == 0) { + pr_err("tspp: error allocating endpoint"); + rc = -ENOMEM; + goto err_sps_alloc; + } + + /* get default configuration */ + sps_get_config(channel->pipe, config); + + config->source = pdev->bam_handle; + config->destination = SPS_DEV_HANDLE_MEM; + config->mode = SPS_MODE_SRC; + config->options = + SPS_O_AUTO_ENABLE | /* connection is auto-enabled */ + SPS_O_STREAMING | /* streaming mode */ + SPS_O_DESC_DONE | /* interrupt on end of descriptor */ + SPS_O_ACK_TRANSFERS | /* must use sps_get_iovec() */ + SPS_O_HYBRID; /* Read actual descriptors in sps_get_iovec() */ + config->src_pipe_index = channel->id; + config->desc.size = + TSPP_SPS_DESCRIPTOR_COUNT * SPS_DESCRIPTOR_SIZE; + config->desc.base = dma_alloc_coherent(&pdev->pdev->dev, + config->desc.size, + &config->desc.phys_base, + GFP_KERNEL); + if (config->desc.base == 0) { + pr_err("tspp: error allocating sps descriptors"); + rc = -ENOMEM; + goto err_desc_alloc; + } + + memset(config->desc.base, 0, config->desc.size); + + rc = sps_connect(channel->pipe, config); + if (rc) { + pr_err("tspp: error connecting bam"); + goto err_connect; + } + + event->mode = SPS_TRIGGER_CALLBACK; + event->options = SPS_O_DESC_DONE; + event->callback = tspp_sps_complete_cb; + event->xfer_done = NULL; + event->user = pdev; + + rc = sps_register_event(channel->pipe, event); + if (rc) { + pr_err("tspp: error registering event"); + goto err_event; + } + + init_timer(&channel->expiration_timer); + channel->expiration_timer.function = tspp_expiration_timer; + channel->expiration_timer.data = (unsigned long)pdev; + channel->expiration_timer.expires = 0xffffffffL; + + rc = pm_runtime_get(&pdev->pdev->dev); + if (rc < 0) { + dev_err(&pdev->pdev->dev, + "Runtime PM: Unable to wake up tspp device, rc = %d", + rc); + } + return 0; + +err_event: + sps_disconnect(channel->pipe); +err_connect: + dma_free_coherent(&pdev->pdev->dev, config->desc.size, + config->desc.base, config->desc.phys_base); +err_desc_alloc: + sps_free_endpoint(channel->pipe); +err_sps_alloc: + channel->used = 0; + return rc; +} +EXPORT_SYMBOL(tspp_open_channel); + +/** + * tspp_close_channel - close a TSPP channel. + * + * @dev: TSPP device (up to TSPP_MAX_DEVICES) + * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS) + * + * Return error status + * + */ +int tspp_close_channel(u32 dev, u32 channel_id) +{ + int i; + int id; + int table_idx; + u32 val; + unsigned long flags; + + struct sps_connect *config; + struct tspp_device *pdev; + struct tspp_channel *channel; + + if (channel_id >= TSPP_NUM_CHANNELS) { + pr_err("tspp: channel id out of range"); + return -ECHRNG; + } + pdev = tspp_find_by_id(dev); + if (!pdev) { + pr_err("tspp_close: can't find device %i", dev); + return -ENODEV; + } + channel = &pdev->channels[channel_id]; + + /* if the channel is not used, we are done */ + if (!channel->used) + return 0; + + /* + * Need to protect access to used and waiting fields, as they are + * used by the tasklet which is invoked from interrupt context + */ + spin_lock_irqsave(&pdev->spinlock, flags); + channel->used = 0; + channel->waiting = NULL; + spin_unlock_irqrestore(&pdev->spinlock, flags); + + if (channel->expiration_period_ms) + del_timer(&channel->expiration_timer); + + channel->notifier = NULL; + channel->notify_data = NULL; + channel->expiration_period_ms = 0; + + config = &channel->config; + pdev = channel->pdev; + + /* disable pipe (channel) */ + val = readl_relaxed(pdev->base + TSPP_PS_DISABLE); + writel_relaxed(val | channel->id, pdev->base + TSPP_PS_DISABLE); + /* Assure PS_DISABLE register is set */ + wmb(); + + /* unregister all filters for this channel */ + for (table_idx = 0; table_idx < TSPP_FILTER_TABLES; table_idx++) { + for (i = 0; i < TSPP_NUM_PRIORITIES; i++) { + struct tspp_pid_filter *filter = + &pdev->filters[table_idx]->filter[i]; + id = FILTER_GET_PIPE_NUMBER0(filter); + if (id == channel->id) { + if (FILTER_HAS_ENCRYPTION(filter)) + tspp_free_key_entry( + FILTER_GET_KEY_NUMBER(filter)); + filter->config = 0; + filter->filter = 0; + } + } + } + channel->filter_count = 0; + + /* disconnect the bam */ + if (sps_disconnect(channel->pipe) != 0) + pr_warn("tspp: Error freeing sps endpoint (%i)", channel->id); + + /* destroy the buffers */ + dma_free_coherent(&pdev->pdev->dev, config->desc.size, + config->desc.base, config->desc.phys_base); + + sps_free_endpoint(channel->pipe); + + tspp_destroy_buffers(channel_id, channel); + + dma_pool_destroy(channel->dma_pool); + channel->dma_pool = NULL; + + channel->src = TSPP_SOURCE_NONE; + channel->mode = TSPP_MODE_DISABLED; + channel->memfree = NULL; + channel->user_info = NULL; + channel->buffer_count = 0; + channel->data = NULL; + channel->read = NULL; + channel->locked = NULL; + + if (tspp_channels_in_use(pdev) == 0) { + sps_deregister_bam_device(pdev->bam_handle); + pdev->bam_handle = SPS_DEV_HANDLE_INVALID; + + __pm_relax(&pdev->ws); + tspp_clock_stop(pdev); + } + + pm_runtime_put(&pdev->pdev->dev); + + return 0; +} +EXPORT_SYMBOL(tspp_close_channel); + +/** + * tspp_get_ref_clk_counter - return the TSIF clock reference (TCR) counter. + * + * @dev: TSPP device (up to TSPP_MAX_DEVICES) + * @source: The TSIF source from which the counter should be read + * @tcr_counter: the value of TCR counter + * + * Return error status + * + * TCR increments at a rate equal to 27 MHz/256 = 105.47 kHz. + * If source is neither TSIF 0 or TSIF1 0 is returned. + */ +int tspp_get_ref_clk_counter(u32 dev, enum tspp_source source, u32 *tcr_counter) +{ + struct tspp_device *pdev; + struct tspp_tsif_device *tsif_device; + + if (!tcr_counter) + return -EINVAL; + + pdev = tspp_find_by_id(dev); + if (!pdev) { + pr_err("tspp_get_ref_clk_counter: can't find device %i\n", dev); + return -ENODEV; + } + + switch (source) { + case TSPP_SOURCE_TSIF0: + tsif_device = &pdev->tsif[0]; + break; + + case TSPP_SOURCE_TSIF1: + tsif_device = &pdev->tsif[1]; + break; + + default: + tsif_device = NULL; + break; + } + + if (tsif_device && tsif_device->ref_count) + *tcr_counter = ioread32(tsif_device->base + TSIF_CLK_REF_OFF); + else + *tcr_counter = 0; + + return 0; +} +EXPORT_SYMBOL(tspp_get_ref_clk_counter); + +/** + * tspp_get_lpass_time_counter - return the LPASS Timer counter value. + * + * @dev: TSPP device (up to TSPP_MAX_DEVICES) + * @source: The TSIF source from which the counter should be read + * @tcr_counter: the value of TCR counter + * + * Return error status + * + * If source is neither TSIF 0 or TSIF1 0 is returned. + */ +int tspp_get_lpass_time_counter(u32 dev, enum tspp_source source, + u64 *lpass_time_counter) +{ + struct tspp_device *pdev; + struct tspp_tsif_device *tsif_device; + + if (!lpass_time_counter) + return -EINVAL; + + pdev = tspp_find_by_id(dev); + if (!pdev) { + pr_err("tspp_get_lpass_time_counter: can't find device %i\n", + dev); + return -ENODEV; + } + + switch (source) { + case TSPP_SOURCE_TSIF0: + tsif_device = &pdev->tsif[0]; + break; + + case TSPP_SOURCE_TSIF1: + tsif_device = &pdev->tsif[1]; + break; + + default: + tsif_device = NULL; + break; + } + + if (tsif_device && tsif_device->ref_count) { + if (avcs_core_query_timer(lpass_time_counter) < 0) { + pr_err("tspp_get_lpass_time_counter: read error\n"); + *lpass_time_counter = 0; + return -ENETRESET; + } + } else + *lpass_time_counter = 0; + + return 0; +} +EXPORT_SYMBOL(tspp_get_lpass_time_counter); + +/** + * tspp_get_tts_source - Return the TTS source value. + * + * @dev: TSPP device (up to TSPP_MAX_DEVICES) + * @tts_source:Updated TTS source type + * + * Return error status + * + */ +int tspp_get_tts_source(u32 dev, int *tts_source) +{ + struct tspp_device *pdev; + + if (tts_source == NULL) + return -EINVAL; + + pdev = tspp_find_by_id(dev); + if (!pdev) { + pr_err("tspp_get_tts_source: can't find device %i\n", + dev); + return -ENODEV; + } + + *tts_source = pdev->tts_source; + + return 0; +} +EXPORT_SYMBOL(tspp_get_tts_source); + +/** + * tspp_add_filter - add a TSPP filter to a channel. + * + * @dev: TSPP device (up to TSPP_MAX_DEVICES) + * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS) + * @filter: TSPP filter parameters + * + * Return error status + * + */ +int tspp_add_filter(u32 dev, u32 channel_id, + struct tspp_filter *filter) +{ + int i, rc; + int other_channel; + int entry; + u32 val, pid, enabled; + struct tspp_device *pdev; + struct tspp_pid_filter p; + struct tspp_channel *channel; + + TSPP_DEBUG("tspp: add filter"); + if (channel_id >= TSPP_NUM_CHANNELS) { + pr_err("tspp: channel id out of range"); + return -ECHRNG; + } + pdev = tspp_find_by_id(dev); + if (!pdev) { + pr_err("tspp_add: can't find device %i", dev); + return -ENODEV; + } + + channel = &pdev->channels[channel_id]; + + if (filter->source > TSPP_SOURCE_MEM) { + pr_err("tspp invalid source"); + return -ENOSR; + } + + if (filter->priority >= TSPP_NUM_PRIORITIES) { + pr_err("tspp invalid filter priority"); + return -ENOSR; + } + + channel->mode = filter->mode; + /* + * if buffers are already allocated, verify they fulfil + * the alignment requirements. + */ + if ((channel->buffer_count > 0) && + (!tspp_is_buffer_size_aligned(channel->buffer_size, channel->mode))) + pr_warn("tspp: buffers allocated with incorrect alignment\n"); + + if (filter->mode == TSPP_MODE_PES) { + for (i = 0; i < TSPP_NUM_PRIORITIES; i++) { + struct tspp_pid_filter *tspp_filter = + &pdev->filters[channel->src]->filter[i]; + pid = FILTER_GET_PIPE_PID((tspp_filter)); + enabled = FILTER_GET_PIPE_PROCESS0(tspp_filter); + if (enabled && (pid == filter->pid)) { + other_channel = + FILTER_GET_PIPE_NUMBER0(tspp_filter); + pr_err("tspp: pid 0x%x already in use by channel %i", + filter->pid, other_channel); + return -EBADSLT; + } + } + } + + /* make sure this priority is not already in use */ + enabled = FILTER_GET_PIPE_PROCESS0( + (&(pdev->filters[channel->src]->filter[filter->priority]))); + if (enabled) { + pr_err("tspp: filter priority %i source %i is already enabled\n", + filter->priority, channel->src); + return -ENOSR; + } + + if (channel->mode == TSPP_MODE_PES) { + /* + * if we are already processing in PES mode, disable pipe + * (channel) and filter to be updated + */ + val = readl_relaxed(pdev->base + TSPP_PS_DISABLE); + writel_relaxed(val | (1 << channel->id), + pdev->base + TSPP_PS_DISABLE); + /* Assure PS_DISABLE register is set */ + wmb(); + } + + /* update entry */ + p.filter = 0; + p.config = FILTER_TRANS_END_DISABLE; + FILTER_SET_PIPE_PROCESS0((&p), filter->mode); + FILTER_SET_PIPE_PID((&p), filter->pid); + FILTER_SET_PID_MASK((&p), filter->mask); + FILTER_SET_PIPE_NUMBER0((&p), channel->id); + FILTER_SET_PIPE_PROCESS1((&p), TSPP_MODE_DISABLED); + if (filter->decrypt) { + entry = tspp_get_key_entry(); + if (entry == -1) { + pr_err("tspp: no more keys available!"); + } else { + p.config |= FILTER_DECRYPT; + FILTER_SET_KEY_NUMBER((&p), entry); + } + } + + pdev->filters[channel->src]-> + filter[filter->priority].config = p.config; + pdev->filters[channel->src]-> + filter[filter->priority].filter = p.filter; + + /* + * allocate buffers if needed (i.e. if user did has not already called + * tspp_allocate_buffers() explicitly). + */ + if (channel->buffer_count == 0) { + channel->buffer_size = + tspp_align_buffer_size_by_mode(channel->buffer_size, + channel->mode); + rc = tspp_allocate_buffers(dev, channel->id, + channel->max_buffers, + channel->buffer_size, + channel->int_freq, NULL, NULL, NULL); + if (rc != 0) { + pr_err("tspp: tspp_allocate_buffers failed\n"); + return rc; + } + } + + /* reenable pipe */ + val = readl_relaxed(pdev->base + TSPP_PS_DISABLE); + writel_relaxed(val & ~(1 << channel->id), pdev->base + TSPP_PS_DISABLE); + /* Assure PS_DISABLE register is reset */ + wmb(); + val = readl_relaxed(pdev->base + TSPP_PS_DISABLE); + + channel->filter_count++; + + return 0; +} +EXPORT_SYMBOL(tspp_add_filter); + +/** + * tspp_remove_filter - remove a TSPP filter from a channel. + * + * @dev: TSPP device (up to TSPP_MAX_DEVICES) + * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS) + * @filter: TSPP filter parameters + * + * Return error status + * + */ +int tspp_remove_filter(u32 dev, u32 channel_id, + struct tspp_filter *filter) +{ + int entry; + u32 val; + struct tspp_device *pdev; + int src; + struct tspp_pid_filter *tspp_filter; + struct tspp_channel *channel; + + if (channel_id >= TSPP_NUM_CHANNELS) { + pr_err("tspp: channel id out of range"); + return -ECHRNG; + } + if (!filter) { + pr_err("tspp: NULL filter pointer"); + return -EINVAL; + } + pdev = tspp_find_by_id(dev); + if (!pdev) { + pr_err("tspp_remove: can't find device %i", dev); + return -ENODEV; + } + if (filter->priority >= TSPP_NUM_PRIORITIES) { + pr_err("tspp invalid filter priority"); + return -ENOSR; + } + channel = &pdev->channels[channel_id]; + + src = channel->src; + if ((src == TSPP_SOURCE_TSIF0) || (src == TSPP_SOURCE_TSIF1)) + tspp_filter = &(pdev->filters[src]->filter[filter->priority]); + else { + pr_err("tspp_remove: wrong source type %d", src); + return -EINVAL; + } + + + /* disable pipe (channel) */ + val = readl_relaxed(pdev->base + TSPP_PS_DISABLE); + writel_relaxed(val | channel->id, pdev->base + TSPP_PS_DISABLE); + /* Assure PS_DISABLE register is set */ + wmb(); + + /* update data keys */ + if (tspp_filter->config & FILTER_DECRYPT) { + entry = FILTER_GET_KEY_NUMBER(tspp_filter); + tspp_free_key_entry(entry); + } + + /* update pid table */ + tspp_filter->config = 0; + tspp_filter->filter = 0; + + channel->filter_count--; + + /* reenable pipe */ + val = readl_relaxed(pdev->base + TSPP_PS_DISABLE); + writel_relaxed(val & ~(1 << channel->id), + pdev->base + TSPP_PS_DISABLE); + /* Assure PS_DISABLE register is reset */ + wmb(); + val = readl_relaxed(pdev->base + TSPP_PS_DISABLE); + + return 0; +} +EXPORT_SYMBOL(tspp_remove_filter); + +/** + * tspp_set_key - set TSPP key in key table. + * + * @dev: TSPP device (up to TSPP_MAX_DEVICES) + * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS) + * @key: TSPP key parameters + * + * Return error status + * + */ +int tspp_set_key(u32 dev, u32 channel_id, struct tspp_key *key) +{ + int i; + int id; + int key_index; + int data; + struct tspp_channel *channel; + struct tspp_device *pdev; + + if (channel_id >= TSPP_NUM_CHANNELS) { + pr_err("tspp: channel id out of range"); + return -ECHRNG; + } + pdev = tspp_find_by_id(dev); + if (!pdev) { + pr_err("tspp_set: can't find device %i", dev); + return -ENODEV; + } + channel = &pdev->channels[channel_id]; + + /* read the key index used by this channel */ + for (i = 0; i < TSPP_NUM_PRIORITIES; i++) { + struct tspp_pid_filter *tspp_filter = + &(pdev->filters[channel->src]->filter[i]); + id = FILTER_GET_PIPE_NUMBER0(tspp_filter); + if (id == channel->id) { + if (FILTER_HAS_ENCRYPTION(tspp_filter)) { + key_index = FILTER_GET_KEY_NUMBER(tspp_filter); + break; + } + } + } + if (i == TSPP_NUM_PRIORITIES) { + pr_err("tspp: no encryption on this channel"); + return -ENOKEY; + } + + if (key->parity == TSPP_KEY_PARITY_EVEN) { + pdev->tspp_key_table->entry[key_index].even_lsb = key->lsb; + pdev->tspp_key_table->entry[key_index].even_msb = key->msb; + } else { + pdev->tspp_key_table->entry[key_index].odd_lsb = key->lsb; + pdev->tspp_key_table->entry[key_index].odd_msb = key->msb; + } + data = readl_relaxed(channel->pdev->base + TSPP_KEY_VALID); + + return 0; +} +EXPORT_SYMBOL(tspp_set_key); + +/** + * tspp_register_notification - register TSPP channel notification function. + * + * @dev: TSPP device (up to TSPP_MAX_DEVICES) + * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS) + * @notify: notification function + * @userdata: user data to pass to notification function + * @timer_ms: notification for partially filled buffers + * + * Return error status + * + */ +int tspp_register_notification(u32 dev, u32 channel_id, + tspp_notifier *notify, void *userdata, u32 timer_ms) +{ + struct tspp_channel *channel; + struct tspp_device *pdev; + + if (channel_id >= TSPP_NUM_CHANNELS) { + pr_err("tspp: channel id out of range"); + return -ECHRNG; + } + pdev = tspp_find_by_id(dev); + if (!pdev) { + pr_err("tspp_reg: can't find device %i", dev); + return -ENODEV; + } + channel = &pdev->channels[channel_id]; + channel->notifier = notify; + channel->notify_data = userdata; + channel->expiration_period_ms = timer_ms; + + return 0; +} +EXPORT_SYMBOL(tspp_register_notification); + +/** + * tspp_unregister_notification - unregister TSPP channel notification function. + * + * @dev: TSPP device (up to TSPP_MAX_DEVICES) + * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS) + * + * Return error status + * + */ +int tspp_unregister_notification(u32 dev, u32 channel_id) +{ + struct tspp_channel *channel; + struct tspp_device *pdev; + + if (channel_id >= TSPP_NUM_CHANNELS) { + pr_err("tspp: channel id out of range"); + return -ECHRNG; + } + pdev = tspp_find_by_id(dev); + if (!pdev) { + pr_err("tspp_unreg: can't find device %i", dev); + return -ENODEV; + } + channel = &pdev->channels[channel_id]; + channel->notifier = NULL; + channel->notify_data = 0; + return 0; +} +EXPORT_SYMBOL(tspp_unregister_notification); + +/** + * tspp_get_buffer - get TSPP data buffer. + * + * @dev: TSPP device (up to TSPP_MAX_DEVICES) + * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS) + * + * Return error status + * + */ +const struct tspp_data_descriptor *tspp_get_buffer(u32 dev, u32 channel_id) +{ + struct tspp_mem_buffer *buffer; + struct tspp_channel *channel; + struct tspp_device *pdev; + unsigned long flags; + + if (channel_id >= TSPP_NUM_CHANNELS) { + pr_err("tspp: channel id out of range"); + return NULL; + } + pdev = tspp_find_by_id(dev); + if (!pdev) { + pr_err("tspp_get: can't find device %i", dev); + return NULL; + } + + spin_lock_irqsave(&pdev->spinlock, flags); + + channel = &pdev->channels[channel_id]; + + if (!channel->read) { + spin_unlock_irqrestore(&pdev->spinlock, flags); + pr_warn("tspp: no buffer to get on channel %i!", + channel->id); + return NULL; + } + + buffer = channel->read; + /* see if we have any buffers ready to read */ + if (buffer->state != TSPP_BUF_STATE_DATA) { + spin_unlock_irqrestore(&pdev->spinlock, flags); + return NULL; + } + + if (buffer->state == TSPP_BUF_STATE_DATA) { + /* mark the buffer as busy */ + buffer->state = TSPP_BUF_STATE_LOCKED; + + /* increment the pointer along the list */ + channel->read = channel->read->next; + } + + spin_unlock_irqrestore(&pdev->spinlock, flags); + + return &buffer->desc; +} +EXPORT_SYMBOL(tspp_get_buffer); + +/** + * tspp_release_buffer - release TSPP data buffer back to TSPP. + * + * @dev: TSPP device (up to TSPP_MAX_DEVICES) + * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS) + * @descriptor_id: buffer descriptor ID + * + * Return error status + * + */ +int tspp_release_buffer(u32 dev, u32 channel_id, u32 descriptor_id) +{ + int i, found = 0; + struct tspp_mem_buffer *buffer; + struct tspp_channel *channel; + struct tspp_device *pdev; + unsigned long flags; + + if (channel_id >= TSPP_NUM_CHANNELS) { + pr_err("tspp: channel id out of range"); + return -ECHRNG; + } + pdev = tspp_find_by_id(dev); + if (!pdev) { + pr_err("tspp: can't find device %i", dev); + return -ENODEV; + } + + spin_lock_irqsave(&pdev->spinlock, flags); + + channel = &pdev->channels[channel_id]; + + if (descriptor_id > channel->buffer_count) + pr_warn("tspp: desc id looks weird 0x%08x", descriptor_id); + + /* find the correct descriptor */ + buffer = channel->locked; + for (i = 0; i < channel->buffer_count; i++) { + if (buffer->desc.id == descriptor_id) { + found = 1; + break; + } + buffer = buffer->next; + } + channel->locked = channel->locked->next; + + if (!found) { + spin_unlock_irqrestore(&pdev->spinlock, flags); + pr_err("tspp: cant find desc %i", descriptor_id); + return -EINVAL; + } + + /* make sure the buffer is in the expected state */ + if (buffer->state != TSPP_BUF_STATE_LOCKED) { + spin_unlock_irqrestore(&pdev->spinlock, flags); + pr_err("tspp: buffer %i not locked", descriptor_id); + return -EINVAL; + } + /* unlock the buffer and requeue it */ + buffer->state = TSPP_BUF_STATE_WAITING; + + if (tspp_queue_buffer(channel, buffer)) + pr_warn("tspp: can't requeue buffer"); + + spin_unlock_irqrestore(&pdev->spinlock, flags); + + return 0; +} +EXPORT_SYMBOL(tspp_release_buffer); + +/** + * tspp_allocate_buffers - allocate TSPP data buffers. + * + * @dev: TSPP device (up to TSPP_MAX_DEVICES) + * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS) + * @count: number of buffers to allocate + * @size: size of each buffer to allocate + * @int_freq: interrupt frequency + * @alloc: user defined memory allocator function. Pass NULL for default. + * @memfree: user defined memory free function. Pass NULL for default. + * @user: user data to pass to the memory allocator/free function + * + * Return error status + * + * The user can optionally call this function explicitly to allocate the TSPP + * data buffers. Alternatively, if the user did not call this function, it + * is called implicitly by tspp_add_filter(). + */ +int tspp_allocate_buffers(u32 dev, u32 channel_id, u32 count, u32 size, + u32 int_freq, tspp_allocator *alloc, + tspp_memfree *memfree, void *user) +{ + struct tspp_channel *channel; + struct tspp_device *pdev; + struct tspp_mem_buffer *last = NULL; + + TSPP_DEBUG("tspp_allocate_buffers"); + + if (channel_id >= TSPP_NUM_CHANNELS) { + pr_err("%s: channel id out of range", __func__); + return -ECHRNG; + } + + pdev = tspp_find_by_id(dev); + if (!pdev) { + pr_err("%s: can't find device %i", __func__, dev); + return -ENODEV; + } + + if (count < MIN_ACCEPTABLE_BUFFER_COUNT) { + pr_err("%s: tspp requires a minimum of %i buffers\n", + __func__, MIN_ACCEPTABLE_BUFFER_COUNT); + return -EINVAL; + } + + if (count > TSPP_NUM_BUFFERS) { + pr_err("%s: tspp requires a maximum of %i buffers\n", + __func__, TSPP_NUM_BUFFERS); + return -EINVAL; + } + + channel = &pdev->channels[channel_id]; + + /* allow buffer allocation only if there was no previous buffer + * allocation for this channel. + */ + if (channel->buffer_count > 0) { + pr_err("%s: buffers already allocated for channel %u", + __func__, channel_id); + return -EINVAL; + } + + channel->max_buffers = count; + + /* set up interrupt frequency */ + if (int_freq > channel->max_buffers) { + int_freq = channel->max_buffers; + pr_warn("%s: setting interrupt frequency to %u\n", + __func__, int_freq); + } + channel->int_freq = int_freq; + /* + * it is the responsibility of the caller to tspp_allocate_buffers(), + * whether it's the user or the driver, to make sure the size parameter + * is compatible to the channel mode. + */ + channel->buffer_size = size; + + /* save user defined memory free function for later use */ + channel->memfree = memfree; + channel->user_info = user; + + /* + * For small buffers, create a DMA pool so that memory + * is not wasted through dma_alloc_coherent. + */ + if (TSPP_USE_DMA_POOL(channel->buffer_size)) { + channel->dma_pool = dma_pool_create("tspp", + &pdev->pdev->dev, channel->buffer_size, 0, 0); + if (!channel->dma_pool) { + pr_err("%s: Can't allocate memory pool\n", __func__); + return -ENOMEM; + } + } else { + channel->dma_pool = NULL; + } + + + for (channel->buffer_count = 0; + channel->buffer_count < channel->max_buffers; + channel->buffer_count++) { + + /* allocate the descriptor */ + struct tspp_mem_buffer *desc = (struct tspp_mem_buffer *) + kmalloc(sizeof(struct tspp_mem_buffer), GFP_KERNEL); + if (!desc) { + pr_warn("%s: Can't allocate desc %i", + __func__, channel->buffer_count); + break; + } + + desc->desc.id = channel->buffer_count; + /* allocate the buffer */ + if (tspp_alloc_buffer(channel_id, &desc->desc, + channel->buffer_size, channel->dma_pool, + alloc, user) != 0) { + kfree(desc); + pr_warn("%s: Can't allocate buffer %i", + __func__, channel->buffer_count); + break; + } + + /* add the descriptor to the list */ + desc->filled = 0; + desc->read_index = 0; + if (!channel->data) { + channel->data = desc; + desc->next = channel->data; + } else { + if (last != NULL) + last->next = desc; + } + last = desc; + desc->next = channel->data; + + /* prepare the sps descriptor */ + desc->sps.phys_base = desc->desc.phys_base; + desc->sps.base = desc->desc.virt_base; + desc->sps.size = desc->desc.size; + + /* start the transfer */ + if (tspp_queue_buffer(channel, desc)) + pr_err("%s: can't queue buffer %i", + __func__, desc->desc.id); + } + + if (channel->buffer_count < channel->max_buffers) { + /* + * we failed to allocate the requested number of buffers. + * we don't allow a partial success, so need to clean up here. + */ + tspp_destroy_buffers(channel_id, channel); + channel->buffer_count = 0; + + dma_pool_destroy(channel->dma_pool); + channel->dma_pool = NULL; + return -ENOMEM; + } + + channel->waiting = channel->data; + channel->read = channel->data; + channel->locked = channel->data; + + /* Now that buffers are scheduled to HW, kick data expiration timer */ + if (channel->expiration_period_ms) + mod_timer(&channel->expiration_timer, + jiffies + + MSEC_TO_JIFFIES( + channel->expiration_period_ms)); + + return 0; +} +EXPORT_SYMBOL(tspp_allocate_buffers); + +/*** debugfs ***/ +static int debugfs_iomem_x32_set(void *data, u64 val) +{ + int rc; + int clock_started = 0; + struct tspp_device *pdev; + + pdev = tspp_find_by_id(0); + if (!pdev) { + pr_err("%s: can't find device 0\n", __func__); + return 0; + } + + if (tspp_channels_in_use(pdev) == 0) { + rc = tspp_clock_start(pdev); + if (rc) { + pr_err("%s: tspp_clock_start failed %d\n", + __func__, rc); + return 0; + } + clock_started = 1; + } + + writel_relaxed(val, data); + /* Assure register write */ + wmb(); + + if (clock_started) + tspp_clock_stop(pdev); + return 0; +} + +static int debugfs_iomem_x32_get(void *data, u64 *val) +{ + int rc; + int clock_started = 0; + struct tspp_device *pdev; + + pdev = tspp_find_by_id(0); + if (!pdev) { + pr_err("%s: can't find device 0\n", __func__); + *val = 0; + return 0; + } + + if (tspp_channels_in_use(pdev) == 0) { + rc = tspp_clock_start(pdev); + if (rc) { + pr_err("%s: tspp_clock_start failed %d\n", + __func__, rc); + *val = 0; + return 0; + } + clock_started = 1; + } + + *val = readl_relaxed(data); + + if (clock_started) + tspp_clock_stop(pdev); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get, + debugfs_iomem_x32_set, "0x%08llx"); + +static void tsif_debugfs_init(struct tspp_tsif_device *tsif_device, + int instance) +{ + char name[10]; + + snprintf(name, 10, "tsif%i", instance); + tsif_device->dent_tsif = debugfs_create_dir( + name, NULL); + if (tsif_device->dent_tsif) { + int i; + void __iomem *base = tsif_device->base; + + for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++) { + tsif_device->debugfs_tsif_regs[i] = + debugfs_create_file( + debugfs_tsif_regs[i].name, + debugfs_tsif_regs[i].mode, + tsif_device->dent_tsif, + base + debugfs_tsif_regs[i].offset, + &fops_iomem_x32); + } + + debugfs_create_u32( + "stat_rx_chunks", 0664, + tsif_device->dent_tsif, + &tsif_device->stat_rx); + + debugfs_create_u32( + "stat_overflow", 0664, + tsif_device->dent_tsif, + &tsif_device->stat_overflow); + + debugfs_create_u32( + "stat_lost_sync", 0664, + tsif_device->dent_tsif, + &tsif_device->stat_lost_sync); + + debugfs_create_u32( + "stat_timeout", 0664, + tsif_device->dent_tsif, + &tsif_device->stat_timeout); + } +} + +static void tsif_debugfs_exit(struct tspp_tsif_device *tsif_device) +{ + int i; + + debugfs_remove_recursive(tsif_device->dent_tsif); + tsif_device->dent_tsif = NULL; + for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++) + tsif_device->debugfs_tsif_regs[i] = NULL; +} + +static void tspp_debugfs_init(struct tspp_device *device, int instance) +{ + char name[10]; + + snprintf(name, 10, "tspp%i", instance); + device->dent = debugfs_create_dir( + name, NULL); + if (device->dent) { + int i; + void __iomem *base = device->base; + + for (i = 0; i < ARRAY_SIZE(debugfs_tspp_regs); i++) + device->debugfs_regs[i] = + debugfs_create_file( + debugfs_tspp_regs[i].name, + debugfs_tspp_regs[i].mode, + device->dent, + base + debugfs_tspp_regs[i].offset, + &fops_iomem_x32); + } +} + +static void tspp_debugfs_exit(struct tspp_device *device) +{ + int i; + + debugfs_remove_recursive(device->dent); + for (i = 0; i < ARRAY_SIZE(debugfs_tspp_regs); i++) + device->debugfs_regs[i] = NULL; +} + +static int msm_tspp_map_irqs(struct platform_device *pdev, + struct tspp_device *device) +{ + int rc; + + /* get IRQ numbers from platform information */ + + /* map TSPP IRQ */ + rc = platform_get_irq_byname(pdev, "TSIF_TSPP_IRQ"); + if (rc > 0) { + device->tspp_irq = rc; + } else { + dev_err(&pdev->dev, "failed to get TSPP IRQ"); + return -EINVAL; + } + + /* map TSIF IRQs */ + rc = platform_get_irq_byname(pdev, "TSIF0_IRQ"); + if (rc > 0) { + device->tsif[0].tsif_irq = rc; + } else { + dev_err(&pdev->dev, "failed to get TSIF0 IRQ"); + return -EINVAL; + } + + rc = platform_get_irq_byname(pdev, "TSIF1_IRQ"); + if (rc > 0) { + device->tsif[1].tsif_irq = rc; + } else { + dev_err(&pdev->dev, "failed to get TSIF1 IRQ"); + return -EINVAL; + } + + /* map BAM IRQ */ + rc = platform_get_irq_byname(pdev, "TSIF_BAM_IRQ"); + if (rc > 0) { + device->bam_irq = rc; + } else { + dev_err(&pdev->dev, "failed to get TSPP BAM IRQ"); + return -EINVAL; + } + + return 0; +} + +static int msm_tspp_probe(struct platform_device *pdev) +{ + int rc = -ENODEV; + u32 version; + u32 i; + struct tspp_device *device; + struct resource *mem_tsif0; + struct resource *mem_tsif1; + struct resource *mem_tspp; + struct resource *mem_bam; + struct msm_bus_scale_pdata *tspp_bus_pdata = NULL; + unsigned long rate; + + if (pdev->dev.of_node) { + /* ID is always 0 since there is only 1 instance of TSPP */ + pdev->id = 0; + tspp_bus_pdata = msm_bus_cl_get_pdata(pdev); + } else { + /* must have device tree data */ + pr_err("tspp: Device tree data not available\n"); + rc = -EINVAL; + goto out; + } + + /* OK, we will use this device */ + device = kzalloc(sizeof(struct tspp_device), GFP_KERNEL); + if (!device) { + rc = -ENOMEM; + goto out; + } + + /* set up references */ + device->pdev = pdev; + platform_set_drvdata(pdev, device); + + /* setup pin control */ + rc = tspp_get_pinctrl(device); + if (rc) { + pr_err("tspp: failed to get pin control data, rc=%d\n", rc); + goto err_pinctrl; + } + + /* register bus client */ + if (tspp_bus_pdata) { + device->tsif_bus_client = + msm_bus_scale_register_client(tspp_bus_pdata); + if (!device->tsif_bus_client) + pr_err("tspp: Unable to register bus client\n"); + } else { + device->tsif_bus_client = 0; + } + + /* map regulators */ + device->tsif_vreg = devm_regulator_get_optional(&pdev->dev, "vdd_cx"); + if (IS_ERR_OR_NULL(device->tsif_vreg)) { + rc = PTR_ERR(device->tsif_vreg); + device->tsif_vreg = NULL; + if (rc == -ENODEV) { + pr_notice("%s: vdd_cx regulator will not be used\n", + __func__); + } else { + dev_err(&pdev->dev, + "failed to get CX regulator, err=%d\n", rc); + goto err_regulator; + } + } else { + /* Set an initial voltage and enable the regulator */ + rc = regulator_set_voltage(device->tsif_vreg, + RPMH_REGULATOR_LEVEL_OFF, + RPMH_REGULATOR_LEVEL_MAX); + if (rc) { + dev_err(&pdev->dev, "Unable to set CX voltage.\n"); + goto err_regulator; + } + + rc = regulator_enable(device->tsif_vreg); + if (rc) { + dev_err(&pdev->dev, "Unable to enable CX regulator.\n"); + goto err_regulator; + } + } + + /* map clocks */ + device->tsif_pclk = clk_get(&pdev->dev, "iface_clk"); + if (IS_ERR_OR_NULL(device->tsif_pclk)) { + rc = PTR_ERR(device->tsif_pclk); + device->tsif_pclk = NULL; + goto err_pclock; + } + + device->tsif_ref_clk = clk_get(&pdev->dev, "ref_clk"); + if (IS_ERR_OR_NULL(device->tsif_ref_clk)) { + rc = PTR_ERR(device->tsif_ref_clk); + device->tsif_ref_clk = NULL; + goto err_refclock; + } + rate = clk_round_rate(device->tsif_ref_clk, 1); + rc = clk_set_rate(device->tsif_ref_clk, rate); + if (rc) + goto err_res_tsif0; + + /* map I/O memory */ + mem_tsif0 = platform_get_resource_byname(pdev, + IORESOURCE_MEM, "MSM_TSIF0_PHYS"); + if (!mem_tsif0) { + pr_err("tspp: Missing tsif0 MEM resource\n"); + rc = -ENXIO; + goto err_res_tsif0; + } + device->tsif[0].base = ioremap(mem_tsif0->start, + resource_size(mem_tsif0)); + if (!device->tsif[0].base) { + pr_err("tspp: ioremap failed\n"); + goto err_map_tsif0; + } + + mem_tsif1 = platform_get_resource_byname(pdev, + IORESOURCE_MEM, "MSM_TSIF1_PHYS"); + if (!mem_tsif1) { + dev_err(&pdev->dev, "Missing tsif1 MEM resource\n"); + rc = -ENXIO; + goto err_res_tsif1; + } + device->tsif[1].base = ioremap(mem_tsif1->start, + resource_size(mem_tsif1)); + if (!device->tsif[1].base) { + dev_err(&pdev->dev, "ioremap failed"); + goto err_map_tsif1; + } + + mem_tspp = platform_get_resource_byname(pdev, + IORESOURCE_MEM, "MSM_TSPP_PHYS"); + if (!mem_tspp) { + dev_err(&pdev->dev, "Missing MEM resource"); + rc = -ENXIO; + goto err_res_dev; + } + device->base = ioremap(mem_tspp->start, resource_size(mem_tspp)); + if (!device->base) { + dev_err(&pdev->dev, "ioremap failed"); + goto err_map_dev; + } + + mem_bam = platform_get_resource_byname(pdev, + IORESOURCE_MEM, "MSM_TSPP_BAM_PHYS"); + if (!mem_bam) { + pr_err("tspp: Missing bam MEM resource"); + rc = -ENXIO; + goto err_res_bam; + } + memset(&device->bam_props, 0, sizeof(device->bam_props)); + device->bam_props.phys_addr = mem_bam->start; + device->bam_props.virt_addr = ioremap(mem_bam->start, + resource_size(mem_bam)); + if (!device->bam_props.virt_addr) { + dev_err(&pdev->dev, "ioremap failed"); + goto err_map_bam; + } + + if (msm_tspp_map_irqs(pdev, device)) + goto err_irq; + device->req_irqs = false; + + /* Check whether AV timer time stamps are enabled */ + if (!of_property_read_u32(pdev->dev.of_node, "qcom,lpass-timer-tts", + &device->tts_source)) { + if (device->tts_source == 1) + device->tts_source = TSIF_TTS_LPASS_TIMER; + else + device->tts_source = TSIF_TTS_TCR; + } else { + device->tts_source = TSIF_TTS_TCR; + } + + for (i = 0; i < TSPP_TSIF_INSTANCES; i++) + device->tsif[i].tts_source = device->tts_source; + + /* power management */ + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + tspp_debugfs_init(device, 0); + + for (i = 0; i < TSPP_TSIF_INSTANCES; i++) + tsif_debugfs_init(&device->tsif[i], i); + + wakeup_source_init(&device->ws, dev_name(&pdev->dev)); + + /* set up pointers to ram-based 'registers' */ + device->filters[0] = device->base + TSPP_PID_FILTER_TABLE0; + device->filters[1] = device->base + TSPP_PID_FILTER_TABLE1; + device->filters[2] = device->base + TSPP_PID_FILTER_TABLE2; + device->tspp_key_table = device->base + TSPP_DATA_KEY; + device->tspp_global_performance = + device->base + TSPP_GLOBAL_PERFORMANCE; + device->tspp_pipe_context = + device->base + TSPP_PIPE_CONTEXT; + device->tspp_pipe_performance = + device->base + TSPP_PIPE_PERFORMANCE; + + device->bam_props.summing_threshold = 0x10; + device->bam_props.irq = device->bam_irq; + device->bam_props.manage = SPS_BAM_MGR_LOCAL; + /*add SPS BAM log level*/ + device->bam_props.ipc_loglevel = TSPP_BAM_DEFAULT_IPC_LOGLVL; + + if (tspp_clock_start(device) != 0) { + dev_err(&pdev->dev, "Can't start clocks"); + goto err_clock; + } + + device->bam_handle = SPS_DEV_HANDLE_INVALID; + + spin_lock_init(&device->spinlock); + mutex_init(&device->mutex); + tasklet_init(&device->tlet, tspp_sps_complete_tlet, + (unsigned long)device); + + /* initialize everything to a known state */ + tspp_global_reset(device); + + version = readl_relaxed(device->base + TSPP_VERSION); + /* + * TSPP version can be bits [7:0] or alternatively, + * TSPP major version is bits [31:28]. + */ + if ((version != 0x1) && (((version >> 28) & 0xF) != 0x1)) + pr_warn("tspp: unrecognized hw version=%i", version); + + /* initialize the channels */ + for (i = 0; i < TSPP_NUM_CHANNELS; i++) + tspp_channel_init(&(device->channels[i]), device); + + /* stop the clocks for power savings */ + tspp_clock_stop(device); + + /* everything is ok, so add the device to the list */ + list_add_tail(&(device->devlist), &tspp_devices); + return 0; + +err_clock: + tspp_debugfs_exit(device); + for (i = 0; i < TSPP_TSIF_INSTANCES; i++) + tsif_debugfs_exit(&device->tsif[i]); +err_irq: + iounmap(device->bam_props.virt_addr); +err_map_bam: +err_res_bam: + iounmap(device->base); +err_map_dev: +err_res_dev: + iounmap(device->tsif[1].base); +err_map_tsif1: +err_res_tsif1: + iounmap(device->tsif[0].base); +err_map_tsif0: +err_res_tsif0: + if (device->tsif_ref_clk) + clk_put(device->tsif_ref_clk); +err_refclock: + if (device->tsif_pclk) + clk_put(device->tsif_pclk); +err_pclock: + if (device->tsif_vreg) + regulator_disable(device->tsif_vreg); +err_regulator: + if (device->tsif_bus_client) + msm_bus_scale_unregister_client(device->tsif_bus_client); +err_pinctrl: + kfree(device); + +out: + return rc; +} + +static int msm_tspp_remove(struct platform_device *pdev) +{ + struct tspp_channel *channel; + u32 i; + + struct tspp_device *device = platform_get_drvdata(pdev); + + /* free the buffers, and delete the channels */ + for (i = 0; i < TSPP_NUM_CHANNELS; i++) { + channel = &device->channels[i]; + tspp_close_channel(device->pdev->id, i); + } + + for (i = 0; i < TSPP_TSIF_INSTANCES; i++) + tsif_debugfs_exit(&device->tsif[i]); + + mutex_destroy(&device->mutex); + + if (device->tsif_bus_client) + msm_bus_scale_unregister_client(device->tsif_bus_client); + + wakeup_source_trash(&device->ws); + if (device->req_irqs) + msm_tspp_free_irqs(device); + + iounmap(device->bam_props.virt_addr); + iounmap(device->base); + for (i = 0; i < TSPP_TSIF_INSTANCES; i++) + iounmap(device->tsif[i].base); + + if (device->tsif_ref_clk) + clk_put(device->tsif_ref_clk); + + if (device->tsif_pclk) + clk_put(device->tsif_pclk); + + if (device->tsif_vreg) + regulator_disable(device->tsif_vreg); + + pm_runtime_disable(&pdev->dev); + + kfree(device); + + return 0; +} + +/*** power management ***/ + +static int tspp_runtime_suspend(struct device *dev) +{ + dev_dbg(dev, "pm_runtime: suspending..."); + return 0; +} + +static int tspp_runtime_resume(struct device *dev) +{ + dev_dbg(dev, "pm_runtime: resuming..."); + return 0; +} + +static const struct dev_pm_ops tspp_dev_pm_ops = { + .runtime_suspend = tspp_runtime_suspend, + .runtime_resume = tspp_runtime_resume, +}; + +static const struct of_device_id msm_match_table[] = { + {.compatible = "qcom,msm_tspp"}, + {} +}; + +static struct platform_driver msm_tspp_driver = { + .probe = msm_tspp_probe, + .remove = msm_tspp_remove, + .driver = { + .name = "msm_tspp", + .pm = &tspp_dev_pm_ops, + .of_match_table = msm_match_table, + }, +}; + + +static int __init mod_init(void) +{ + int rc; + + /* register the driver, and check hardware */ + rc = platform_driver_register(&msm_tspp_driver); + if (rc) + pr_err("tspp: platform_driver_register failed: %d", rc); + + return rc; +} + +static void __exit mod_exit(void) +{ + /* delete low level driver */ + platform_driver_unregister(&msm_tspp_driver); +} + +module_init(mod_init); +module_exit(mod_exit); + +MODULE_DESCRIPTION("TSPP platform device"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/dvb/Kconfig b/drivers/media/platform/msm/dvb/Kconfig new file mode 100644 index 000000000000..e205c8172075 --- /dev/null +++ b/drivers/media/platform/msm/dvb/Kconfig @@ -0,0 +1,10 @@ +config DVB_MPQ + tristate "Qualcomm Technologies Inc Multimedia Processor DVB Adapter" + depends on ARCH_QCOM && DVB_CORE + default n + + help + Support for Qualcomm Technologies Inc MPQ based DVB adapter. + Say Y or M if you own such a device and want to use it. + +source "drivers/media/platform/msm/dvb/demux/Kconfig" diff --git a/drivers/media/platform/msm/dvb/Makefile b/drivers/media/platform/msm/dvb/Makefile new file mode 100644 index 000000000000..862ebca24db9 --- /dev/null +++ b/drivers/media/platform/msm/dvb/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_DVB_MPQ) += adapter/ +obj-$(CONFIG_DVB_MPQ_DEMUX) += demux/ diff --git a/drivers/media/platform/msm/dvb/adapter/Makefile b/drivers/media/platform/msm/dvb/adapter/Makefile new file mode 100644 index 000000000000..662bf99c4d7e --- /dev/null +++ b/drivers/media/platform/msm/dvb/adapter/Makefile @@ -0,0 +1,7 @@ +ccflags-y += -Idrivers/media/dvb-core/ +ccflags-y += -Idrivers/media/platform/msm/dvb/include/ +ccflags-y += -Idrivers/media/platform/msm/dvb/demux/ + +obj-$(CONFIG_DVB_MPQ) += mpq-adapter.o + +mpq-adapter-y := mpq_adapter.o mpq_stream_buffer.o diff --git a/drivers/media/platform/msm/dvb/adapter/mpq_adapter.c b/drivers/media/platform/msm/dvb/adapter/mpq_adapter.c new file mode 100644 index 000000000000..1ccb98fe3201 --- /dev/null +++ b/drivers/media/platform/msm/dvb/adapter/mpq_adapter.c @@ -0,0 +1,208 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +#include "mpq_adapter.h" +#include "mpq_dvb_debug.h" + + +DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); + +/* data-structure holding MPQ adapter information */ +static struct +{ + /* MPQ adapter registered to dvb-core */ + struct dvb_adapter adapter; + + /* mutex protect against the data-structure */ + struct mutex mutex; + + /* List of stream interfaces registered to the MPQ adapter */ + struct { + /* pointer to the stream buffer using for data tunneling */ + struct mpq_streambuffer *stream_buffer; + + /* callback triggered when the stream interface is registered */ + mpq_adapter_stream_if_callback callback; + + /* parameter passed to the callback function */ + void *user_param; + } interfaces[MPQ_ADAPTER_MAX_NUM_OF_INTERFACES]; +} mpq_info; + + +/** + * Initialize MPQ DVB adapter module. + * + * Return error status + */ +static int __init mpq_adapter_init(void) +{ + int i; + int result; + + MPQ_DVB_DBG_PRINT("%s executed\n", __func__); + + mutex_init(&mpq_info.mutex); + + /* reset stream interfaces list */ + for (i = 0; i < MPQ_ADAPTER_MAX_NUM_OF_INTERFACES; i++) { + mpq_info.interfaces[i].stream_buffer = NULL; + mpq_info.interfaces[i].callback = NULL; + } + + /* regsiter a new dvb-adapter to dvb-core */ + result = dvb_register_adapter(&mpq_info.adapter, + "Qualcomm technologies, inc. DVB adapter", + THIS_MODULE, NULL, adapter_nr); + if (result < 0) { + MPQ_DVB_ERR_PRINT( + "%s: dvb_register_adapter failed, errno %d\n", + __func__, + result); + } + + return result; +} + + +/** + * Cleanup MPQ DVB adapter module. + */ +static void __exit mpq_adapter_exit(void) +{ + MPQ_DVB_DBG_PRINT("%s executed\n", __func__); + + /* un-regsiter adapter from dvb-core */ + dvb_unregister_adapter(&mpq_info.adapter); + mutex_destroy(&mpq_info.mutex); +} + +struct dvb_adapter *mpq_adapter_get(void) +{ + return &mpq_info.adapter; +} +EXPORT_SYMBOL(mpq_adapter_get); + + +int mpq_adapter_register_stream_if( + enum mpq_adapter_stream_if interface_id, + struct mpq_streambuffer *stream_buffer) +{ + int ret; + + if (interface_id >= MPQ_ADAPTER_MAX_NUM_OF_INTERFACES) { + ret = -EINVAL; + goto register_failed; + } + + if (mutex_lock_interruptible(&mpq_info.mutex)) { + ret = -ERESTARTSYS; + goto register_failed; + } + + if (mpq_info.interfaces[interface_id].stream_buffer != NULL) { + /* already registered interface */ + ret = -EINVAL; + goto register_failed_unlock_mutex; + } + + mpq_info.interfaces[interface_id].stream_buffer = stream_buffer; + mutex_unlock(&mpq_info.mutex); + + /* + * If callback is installed, trigger it to notify that + * stream interface was registered. + */ + if (mpq_info.interfaces[interface_id].callback != NULL) { + mpq_info.interfaces[interface_id].callback( + interface_id, + mpq_info.interfaces[interface_id].user_param); + } + + return 0; + +register_failed_unlock_mutex: + mutex_unlock(&mpq_info.mutex); +register_failed: + return ret; +} +EXPORT_SYMBOL(mpq_adapter_register_stream_if); + + +int mpq_adapter_unregister_stream_if( + enum mpq_adapter_stream_if interface_id) +{ + if (interface_id >= MPQ_ADAPTER_MAX_NUM_OF_INTERFACES) + return -EINVAL; + + if (mutex_lock_interruptible(&mpq_info.mutex)) + return -ERESTARTSYS; + + /* clear the registered interface */ + mpq_info.interfaces[interface_id].stream_buffer = NULL; + + mutex_unlock(&mpq_info.mutex); + + return 0; +} +EXPORT_SYMBOL(mpq_adapter_unregister_stream_if); + + +int mpq_adapter_get_stream_if( + enum mpq_adapter_stream_if interface_id, + struct mpq_streambuffer **stream_buffer) +{ + if ((interface_id >= MPQ_ADAPTER_MAX_NUM_OF_INTERFACES) || + (stream_buffer == NULL)) + return -EINVAL; + + if (mutex_lock_interruptible(&mpq_info.mutex)) + return -ERESTARTSYS; + + *stream_buffer = mpq_info.interfaces[interface_id].stream_buffer; + + mutex_unlock(&mpq_info.mutex); + + return 0; +} +EXPORT_SYMBOL(mpq_adapter_get_stream_if); + + +int mpq_adapter_notify_stream_if( + enum mpq_adapter_stream_if interface_id, + mpq_adapter_stream_if_callback callback, + void *user_param) +{ + if (interface_id >= MPQ_ADAPTER_MAX_NUM_OF_INTERFACES) + return -EINVAL; + + if (mutex_lock_interruptible(&mpq_info.mutex)) + return -ERESTARTSYS; + + mpq_info.interfaces[interface_id].callback = callback; + mpq_info.interfaces[interface_id].user_param = user_param; + + mutex_unlock(&mpq_info.mutex); + + return 0; +} +EXPORT_SYMBOL(mpq_adapter_notify_stream_if); + + +module_init(mpq_adapter_init); +module_exit(mpq_adapter_exit); + +MODULE_DESCRIPTION("Qualcomm Technologies Inc. MPQ adapter"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/dvb/adapter/mpq_stream_buffer.c b/drivers/media/platform/msm/dvb/adapter/mpq_stream_buffer.c new file mode 100644 index 000000000000..4f84c58333ac --- /dev/null +++ b/drivers/media/platform/msm/dvb/adapter/mpq_stream_buffer.c @@ -0,0 +1,827 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include "mpq_dvb_debug.h" +#include "mpq_stream_buffer.h" + + +int mpq_streambuffer_init( + struct mpq_streambuffer *sbuff, + enum mpq_streambuffer_mode mode, + struct mpq_streambuffer_buffer_desc *data_buffers, + u32 data_buff_num, + void *packet_buff, + size_t packet_buff_size) +{ + if ((sbuff == NULL) || (data_buffers == NULL) || + (packet_buff == NULL) || (data_buff_num == 0)) + return -EINVAL; + + if (data_buff_num > 1) { + if (mode != MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) + return -EINVAL; + /* Linear buffer group */ + dvb_ringbuffer_init( + &sbuff->raw_data, + data_buffers, + data_buff_num * + sizeof(struct mpq_streambuffer_buffer_desc)); + } else { + if (mode != MPQ_STREAMBUFFER_BUFFER_MODE_RING) + return -EINVAL; + /* Single ring-buffer */ + dvb_ringbuffer_init(&sbuff->raw_data, + data_buffers[0].base, data_buffers[0].size); + } + sbuff->mode = mode; + sbuff->buffers = data_buffers; + sbuff->pending_buffers_count = 0; + sbuff->buffers_num = data_buff_num; + sbuff->cb = NULL; + dvb_ringbuffer_init(&sbuff->packet_data, packet_buff, packet_buff_size); + + return 0; +} +EXPORT_SYMBOL(mpq_streambuffer_init); + +void mpq_streambuffer_terminate(struct mpq_streambuffer *sbuff) +{ + spin_lock(&sbuff->packet_data.lock); + spin_lock(&sbuff->raw_data.lock); + sbuff->packet_data.error = -ENODEV; + sbuff->raw_data.error = -ENODEV; + spin_unlock(&sbuff->raw_data.lock); + spin_unlock(&sbuff->packet_data.lock); + + wake_up_all(&sbuff->raw_data.queue); + wake_up_all(&sbuff->packet_data.queue); +} +EXPORT_SYMBOL(mpq_streambuffer_terminate); + +ssize_t mpq_streambuffer_pkt_next( + struct mpq_streambuffer *sbuff, + ssize_t idx, size_t *pktlen) +{ + ssize_t packet_idx; + + spin_lock(&sbuff->packet_data.lock); + + /* buffer was released, return no packet available */ + if (sbuff->packet_data.error == -ENODEV) { + spin_unlock(&sbuff->packet_data.lock); + return -ENODEV; + } + + packet_idx = dvb_ringbuffer_pkt_next(&sbuff->packet_data, idx, pktlen); + spin_unlock(&sbuff->packet_data.lock); + + return packet_idx; +} +EXPORT_SYMBOL(mpq_streambuffer_pkt_next); + + +ssize_t mpq_streambuffer_pkt_read( + struct mpq_streambuffer *sbuff, + size_t idx, + struct mpq_streambuffer_packet_header *packet, + u8 *user_data) +{ + size_t ret; + size_t read_len; + + spin_lock(&sbuff->packet_data.lock); + + /* buffer was released, return no packet available */ + if (sbuff->packet_data.error == -ENODEV) { + spin_unlock(&sbuff->packet_data.lock); + return -ENODEV; + } + + /* read-out the packet header first */ + ret = dvb_ringbuffer_pkt_read( + &sbuff->packet_data, idx, 0, + (u8 *)packet, + sizeof(struct mpq_streambuffer_packet_header)); + + /* verify length, at least packet header should exist */ + if (ret != sizeof(struct mpq_streambuffer_packet_header)) { + spin_unlock(&sbuff->packet_data.lock); + return -EINVAL; + } + + read_len = ret; + + /* read-out private user-data if there are such */ + if ((packet->user_data_len) && (user_data != NULL)) { + ret = dvb_ringbuffer_pkt_read( + &sbuff->packet_data, + idx, + sizeof(struct mpq_streambuffer_packet_header), + user_data, + packet->user_data_len); + + if (ret < 0) { + spin_unlock(&sbuff->packet_data.lock); + return ret; + } + + read_len += ret; + } + + spin_unlock(&sbuff->packet_data.lock); + + return read_len; +} +EXPORT_SYMBOL(mpq_streambuffer_pkt_read); + + +int mpq_streambuffer_pkt_dispose( + struct mpq_streambuffer *sbuff, + size_t idx, + int dispose_data) +{ + int ret; + struct mpq_streambuffer_packet_header packet; + + if (sbuff == NULL) + return -EINVAL; + + spin_lock(&sbuff->packet_data.lock); + + /* check if buffer was released */ + if (sbuff->packet_data.error == -ENODEV) { + spin_unlock(&sbuff->packet_data.lock); + return -ENODEV; + } + + /* read-out the packet header first */ + ret = dvb_ringbuffer_pkt_read(&sbuff->packet_data, idx, + 0, + (u8 *)&packet, + sizeof(struct mpq_streambuffer_packet_header)); + + spin_unlock(&sbuff->packet_data.lock); + + if (ret != sizeof(struct mpq_streambuffer_packet_header)) + return -EINVAL; + + if ((sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) || + (dispose_data)) { + /* Advance the read pointer in the raw-data buffer first */ + ret = mpq_streambuffer_data_read_dispose(sbuff, + packet.raw_data_len); + if (ret != 0) + return ret; + } + + spin_lock(&sbuff->packet_data.lock); + spin_lock(&sbuff->raw_data.lock); + + /* check if buffer was released */ + if ((sbuff->packet_data.error == -ENODEV) || + (sbuff->raw_data.error == -ENODEV)) { + spin_unlock(&sbuff->raw_data.lock); + spin_unlock(&sbuff->packet_data.lock); + return -ENODEV; + } + + /* Move read pointer to the next linear buffer for subsequent reads */ + if ((sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) && + (packet.raw_data_len > 0)) { + struct mpq_streambuffer_buffer_desc *desc; + + desc = (struct mpq_streambuffer_buffer_desc *) + &sbuff->raw_data.data[sbuff->raw_data.pread]; + + desc->write_ptr = 0; + desc->read_ptr = 0; + + DVB_RINGBUFFER_SKIP(&sbuff->raw_data, + sizeof(struct mpq_streambuffer_buffer_desc)); + sbuff->pending_buffers_count--; + + wake_up_all(&sbuff->raw_data.queue); + } + + /* Now clear the packet from the packet header */ + dvb_ringbuffer_pkt_dispose(&sbuff->packet_data, idx); + + spin_unlock(&sbuff->raw_data.lock); + spin_unlock(&sbuff->packet_data.lock); + + return 0; +} +EXPORT_SYMBOL(mpq_streambuffer_pkt_dispose); + +int mpq_streambuffer_pkt_write( + struct mpq_streambuffer *sbuff, + struct mpq_streambuffer_packet_header *packet, + u8 *user_data) +{ + ssize_t idx; + size_t len; + + if ((sbuff == NULL) || (packet == NULL)) + return -EINVAL; + + spin_lock(&sbuff->packet_data.lock); + + /* check if buffer was released */ + if (sbuff->packet_data.error == -ENODEV) { + spin_unlock(&sbuff->packet_data.lock); + return -ENODEV; + } + + /* Make sure we can go to the next linear buffer */ + if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR && + sbuff->pending_buffers_count == sbuff->buffers_num && + packet->raw_data_len) { + spin_unlock(&sbuff->packet_data.lock); + return -ENOSPC; + } + + len = sizeof(struct mpq_streambuffer_packet_header) + + packet->user_data_len; + + /* Make sure enough space available for packet header */ + if (dvb_ringbuffer_free(&sbuff->packet_data) < + (len + DVB_RINGBUFFER_PKTHDRSIZE)) { + spin_unlock(&sbuff->packet_data.lock); + return -ENOSPC; + } + + /* Starting writing packet header */ + idx = dvb_ringbuffer_pkt_start(&sbuff->packet_data, len); + + /* Write non-user private data header */ + dvb_ringbuffer_write(&sbuff->packet_data, + (u8 *)packet, + sizeof(struct mpq_streambuffer_packet_header)); + + /* Write user's own private data header */ + dvb_ringbuffer_write(&sbuff->packet_data, + user_data, + packet->user_data_len); + + dvb_ringbuffer_pkt_close(&sbuff->packet_data, idx); + + /* Move write pointer to next linear buffer for subsequent writes */ + if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR && + packet->raw_data_len) { + DVB_RINGBUFFER_PUSH(&sbuff->raw_data, + sizeof(struct mpq_streambuffer_buffer_desc)); + sbuff->pending_buffers_count++; + } + + spin_unlock(&sbuff->packet_data.lock); + wake_up_all(&sbuff->packet_data.queue); + + return idx; +} +EXPORT_SYMBOL(mpq_streambuffer_pkt_write); + +ssize_t mpq_streambuffer_data_write( + struct mpq_streambuffer *sbuff, + const u8 *buf, size_t len) +{ + int res; + + if ((sbuff == NULL) || (buf == NULL)) + return -EINVAL; + + spin_lock(&sbuff->raw_data.lock); + + /* check if buffer was released */ + if (sbuff->raw_data.error == -ENODEV) { + spin_unlock(&sbuff->raw_data.lock); + return -ENODEV; + } + + if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) { + if (unlikely(dvb_ringbuffer_free(&sbuff->raw_data) < len)) { + spin_unlock(&sbuff->raw_data.lock); + return -ENOSPC; + } + /* + * Secure buffers are not permitted to be mapped into kernel + * memory, and so buffer base address may be NULL + */ + if (sbuff->raw_data.data == NULL) { + spin_unlock(&sbuff->raw_data.lock); + return -EPERM; + } + res = dvb_ringbuffer_write(&sbuff->raw_data, buf, len); + wake_up_all(&sbuff->raw_data.queue); + } else { + /* Linear buffer group */ + struct mpq_streambuffer_buffer_desc *desc; + + desc = (struct mpq_streambuffer_buffer_desc *) + &sbuff->raw_data.data[sbuff->raw_data.pwrite]; + + /* + * Secure buffers are not permitted to be mapped into kernel + * memory, and so buffer base address may be NULL + */ + if (desc->base == NULL) { + spin_unlock(&sbuff->raw_data.lock); + return -EPERM; + } + + if ((sbuff->pending_buffers_count == sbuff->buffers_num) || + ((desc->size - desc->write_ptr) < len)) { + MPQ_DVB_DBG_PRINT( + "%s: No space available! %d pending buffers out of %d total buffers. write_ptr=%d, size=%d\n", + __func__, + sbuff->pending_buffers_count, + sbuff->buffers_num, + desc->write_ptr, + desc->size); + spin_unlock(&sbuff->raw_data.lock); + return -ENOSPC; + } + memcpy(desc->base + desc->write_ptr, buf, len); + desc->write_ptr += len; + res = len; + } + + spin_unlock(&sbuff->raw_data.lock); + return res; +} +EXPORT_SYMBOL(mpq_streambuffer_data_write); + + +int mpq_streambuffer_data_write_deposit( + struct mpq_streambuffer *sbuff, + size_t len) +{ + if (sbuff == NULL) + return -EINVAL; + + spin_lock(&sbuff->raw_data.lock); + + /* check if buffer was released */ + if (sbuff->raw_data.error == -ENODEV) { + spin_unlock(&sbuff->raw_data.lock); + return -ENODEV; + } + + if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) { + if (unlikely(dvb_ringbuffer_free(&sbuff->raw_data) < len)) { + spin_unlock(&sbuff->raw_data.lock); + return -ENOSPC; + } + + DVB_RINGBUFFER_PUSH(&sbuff->raw_data, len); + wake_up_all(&sbuff->raw_data.queue); + } else { + /* Linear buffer group */ + struct mpq_streambuffer_buffer_desc *desc = + (struct mpq_streambuffer_buffer_desc *) + &sbuff->raw_data.data[sbuff->raw_data.pwrite]; + + if ((sbuff->pending_buffers_count == sbuff->buffers_num) || + ((desc->size - desc->write_ptr) < len)) { + MPQ_DVB_ERR_PRINT( + "%s: No space available!\n", + __func__); + spin_unlock(&sbuff->raw_data.lock); + return -ENOSPC; + } + desc->write_ptr += len; + } + + spin_unlock(&sbuff->raw_data.lock); + return 0; +} +EXPORT_SYMBOL(mpq_streambuffer_data_write_deposit); + + +ssize_t mpq_streambuffer_data_read( + struct mpq_streambuffer *sbuff, + u8 *buf, size_t len) +{ + ssize_t actual_len = 0; + u32 offset; + + if ((sbuff == NULL) || (buf == NULL)) + return -EINVAL; + + spin_lock(&sbuff->raw_data.lock); + + /* check if buffer was released */ + if (sbuff->raw_data.error == -ENODEV) { + spin_unlock(&sbuff->raw_data.lock); + return -ENODEV; + } + + if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) { + /* + * Secure buffers are not permitted to be mapped into kernel + * memory, and so buffer base address may be NULL + */ + if (sbuff->raw_data.data == NULL) { + spin_unlock(&sbuff->raw_data.lock); + return -EPERM; + } + + offset = sbuff->raw_data.pread; + actual_len = dvb_ringbuffer_avail(&sbuff->raw_data); + if (actual_len < len) + len = actual_len; + if (len) + dvb_ringbuffer_read(&sbuff->raw_data, buf, len); + + wake_up_all(&sbuff->raw_data.queue); + } else { + /* Linear buffer group */ + struct mpq_streambuffer_buffer_desc *desc; + + desc = (struct mpq_streambuffer_buffer_desc *) + &sbuff->raw_data.data[sbuff->raw_data.pread]; + + /* + * Secure buffers are not permitted to be mapped into kernel + * memory, and so buffer base address may be NULL + */ + if (desc->base == NULL) { + spin_unlock(&sbuff->raw_data.lock); + return -EPERM; + } + + actual_len = (desc->write_ptr - desc->read_ptr); + if (actual_len < len) + len = actual_len; + memcpy(buf, desc->base + desc->read_ptr, len); + offset = desc->read_ptr; + desc->read_ptr += len; + } + + spin_unlock(&sbuff->raw_data.lock); + + if (sbuff->cb) + sbuff->cb(sbuff, offset, len, sbuff->cb_user_data); + + return len; +} +EXPORT_SYMBOL(mpq_streambuffer_data_read); + + +ssize_t mpq_streambuffer_data_read_user( + struct mpq_streambuffer *sbuff, + u8 __user *buf, size_t len) +{ + ssize_t actual_len = 0; + u32 offset; + + if ((sbuff == NULL) || (buf == NULL)) + return -EINVAL; + + /* check if buffer was released */ + if (sbuff->raw_data.error == -ENODEV) + return -ENODEV; + + if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) { + /* + * Secure buffers are not permitted to be mapped into kernel + * memory, and so buffer base address may be NULL + */ + if (sbuff->raw_data.data == NULL) + return -EPERM; + + offset = sbuff->raw_data.pread; + actual_len = dvb_ringbuffer_avail(&sbuff->raw_data); + if (actual_len < len) + len = actual_len; + if (len) + dvb_ringbuffer_read_user(&sbuff->raw_data, buf, len); + wake_up_all(&sbuff->raw_data.queue); + } else { + /* Linear buffer group */ + struct mpq_streambuffer_buffer_desc *desc; + + desc = (struct mpq_streambuffer_buffer_desc *) + &sbuff->raw_data.data[sbuff->raw_data.pread]; + + /* + * Secure buffers are not permitted to be mapped into kernel + * memory, and so buffer base address may be NULL + */ + if (desc->base == NULL) + return -EPERM; + + actual_len = (desc->write_ptr - desc->read_ptr); + if (actual_len < len) + len = actual_len; + if (copy_to_user(buf, desc->base + desc->read_ptr, len)) + return -EFAULT; + + offset = desc->read_ptr; + desc->read_ptr += len; + } + + if (sbuff->cb) + sbuff->cb(sbuff, offset, len, sbuff->cb_user_data); + + return len; +} +EXPORT_SYMBOL(mpq_streambuffer_data_read_user); + +int mpq_streambuffer_data_read_dispose( + struct mpq_streambuffer *sbuff, + size_t len) +{ + u32 offset; + + if (sbuff == NULL) + return -EINVAL; + + spin_lock(&sbuff->raw_data.lock); + + /* check if buffer was released */ + if (sbuff->raw_data.error == -ENODEV) { + spin_unlock(&sbuff->raw_data.lock); + return -ENODEV; + } + + if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) { + if (unlikely(dvb_ringbuffer_avail(&sbuff->raw_data) < len)) { + spin_unlock(&sbuff->raw_data.lock); + return -EINVAL; + } + + offset = sbuff->raw_data.pread; + DVB_RINGBUFFER_SKIP(&sbuff->raw_data, len); + wake_up_all(&sbuff->raw_data.queue); + } else { + struct mpq_streambuffer_buffer_desc *desc; + + desc = (struct mpq_streambuffer_buffer_desc *) + &sbuff->raw_data.data[sbuff->raw_data.pread]; + offset = desc->read_ptr; + + if ((desc->read_ptr + len) > desc->size) + desc->read_ptr = desc->size; + else + desc->read_ptr += len; + } + + spin_unlock(&sbuff->raw_data.lock); + + if (sbuff->cb) + sbuff->cb(sbuff, offset, len, sbuff->cb_user_data); + + return 0; +} +EXPORT_SYMBOL(mpq_streambuffer_data_read_dispose); + + +int mpq_streambuffer_get_buffer_handle( + struct mpq_streambuffer *sbuff, + int read_buffer, + int *handle) +{ + struct mpq_streambuffer_buffer_desc *desc = NULL; + + if ((sbuff == NULL) || (handle == NULL)) + return -EINVAL; + + spin_lock(&sbuff->raw_data.lock); + + /* check if buffer was released */ + if (sbuff->raw_data.error == -ENODEV) { + spin_unlock(&sbuff->raw_data.lock); + return -ENODEV; + } + + if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) { + *handle = sbuff->buffers[0].handle; + } else { + if (read_buffer) + desc = (struct mpq_streambuffer_buffer_desc *) + &sbuff->raw_data.data[sbuff->raw_data.pread]; + else + desc = (struct mpq_streambuffer_buffer_desc *) + &sbuff->raw_data.data[sbuff->raw_data.pwrite]; + *handle = desc->handle; + } + + spin_unlock(&sbuff->raw_data.lock); + + return 0; +} +EXPORT_SYMBOL(mpq_streambuffer_get_buffer_handle); + + +int mpq_streambuffer_register_data_dispose( + struct mpq_streambuffer *sbuff, + mpq_streambuffer_dispose_cb cb_func, + void *user_data) +{ + if ((sbuff == NULL) || (cb_func == NULL)) + return -EINVAL; + + sbuff->cb = cb_func; + sbuff->cb_user_data = user_data; + + return 0; +} +EXPORT_SYMBOL(mpq_streambuffer_register_data_dispose); + + +ssize_t mpq_streambuffer_data_free( + struct mpq_streambuffer *sbuff) +{ + struct mpq_streambuffer_buffer_desc *desc; + + if (sbuff == NULL) + return -EINVAL; + + spin_lock(&sbuff->raw_data.lock); + + /* check if buffer was released */ + if (sbuff->raw_data.error == -ENODEV) { + spin_unlock(&sbuff->raw_data.lock); + return -ENODEV; + } + + if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) { + spin_unlock(&sbuff->raw_data.lock); + return dvb_ringbuffer_free(&sbuff->raw_data); + } + + if (sbuff->pending_buffers_count == sbuff->buffers_num) { + spin_unlock(&sbuff->raw_data.lock); + return 0; + } + + desc = (struct mpq_streambuffer_buffer_desc *) + &sbuff->raw_data.data[sbuff->raw_data.pwrite]; + + spin_unlock(&sbuff->raw_data.lock); + + return desc->size - desc->write_ptr; +} +EXPORT_SYMBOL(mpq_streambuffer_data_free); + + +ssize_t mpq_streambuffer_data_avail( + struct mpq_streambuffer *sbuff) +{ + struct mpq_streambuffer_buffer_desc *desc; + + if (sbuff == NULL) + return -EINVAL; + + spin_lock(&sbuff->raw_data.lock); + + /* check if buffer was released */ + if (sbuff->raw_data.error == -ENODEV) { + spin_unlock(&sbuff->raw_data.lock); + return -ENODEV; + } + + if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) { + ssize_t avail = dvb_ringbuffer_avail(&sbuff->raw_data); + + spin_unlock(&sbuff->raw_data.lock); + return avail; + } + + desc = (struct mpq_streambuffer_buffer_desc *) + &sbuff->raw_data.data[sbuff->raw_data.pread]; + + spin_unlock(&sbuff->raw_data.lock); + + return desc->write_ptr - desc->read_ptr; +} +EXPORT_SYMBOL(mpq_streambuffer_data_avail); + +int mpq_streambuffer_get_data_rw_offset( + struct mpq_streambuffer *sbuff, + u32 *read_offset, + u32 *write_offset) +{ + if (sbuff == NULL) + return -EINVAL; + + spin_lock(&sbuff->raw_data.lock); + + /* check if buffer was released */ + if (sbuff->raw_data.error == -ENODEV) { + spin_unlock(&sbuff->raw_data.lock); + return -ENODEV; + } + + if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) { + if (read_offset) + *read_offset = sbuff->raw_data.pread; + if (write_offset) + *write_offset = sbuff->raw_data.pwrite; + } else { + struct mpq_streambuffer_buffer_desc *desc; + + if (read_offset) { + desc = (struct mpq_streambuffer_buffer_desc *) + &sbuff->raw_data.data[sbuff->raw_data.pread]; + *read_offset = desc->read_ptr; + } + if (write_offset) { + desc = (struct mpq_streambuffer_buffer_desc *) + &sbuff->raw_data.data[sbuff->raw_data.pwrite]; + *write_offset = desc->write_ptr; + } + } + + spin_unlock(&sbuff->raw_data.lock); + + return 0; +} +EXPORT_SYMBOL(mpq_streambuffer_get_data_rw_offset); + +ssize_t mpq_streambuffer_metadata_free(struct mpq_streambuffer *sbuff) +{ + ssize_t free; + + if (sbuff == NULL) + return -EINVAL; + + spin_lock(&sbuff->packet_data.lock); + + /* check if buffer was released */ + if (sbuff->packet_data.error == -ENODEV) { + spin_unlock(&sbuff->packet_data.lock); + return -ENODEV; + } + + free = dvb_ringbuffer_free(&sbuff->packet_data); + + spin_unlock(&sbuff->packet_data.lock); + + return free; +} +EXPORT_SYMBOL(mpq_streambuffer_metadata_free); + +int mpq_streambuffer_flush(struct mpq_streambuffer *sbuff) +{ + struct mpq_streambuffer_buffer_desc *desc; + size_t len; + int idx; + int ret = 0; + + if (sbuff == NULL) + return -EINVAL; + + spin_lock(&sbuff->packet_data.lock); + spin_lock(&sbuff->raw_data.lock); + + /* Check if buffer was released */ + if (sbuff->packet_data.error == -ENODEV || + sbuff->raw_data.error == -ENODEV) { + ret = -ENODEV; + goto end; + } + + if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) + while (sbuff->pending_buffers_count) { + desc = (struct mpq_streambuffer_buffer_desc *) + &sbuff->raw_data.data[sbuff->raw_data.pread]; + desc->write_ptr = 0; + desc->read_ptr = 0; + DVB_RINGBUFFER_SKIP(&sbuff->raw_data, + sizeof(struct mpq_streambuffer_buffer_desc)); + sbuff->pending_buffers_count--; + } + else + dvb_ringbuffer_flush(&sbuff->raw_data); + + /* + * Dispose all packets (simply flushing is not enough since we want + * the packets' status to move to disposed). + */ + do { + idx = dvb_ringbuffer_pkt_next(&sbuff->packet_data, -1, &len); + if (idx >= 0) + dvb_ringbuffer_pkt_dispose(&sbuff->packet_data, idx); + } while (idx >= 0); + +end: + spin_unlock(&sbuff->raw_data.lock); + spin_unlock(&sbuff->packet_data.lock); + return ret; +} +EXPORT_SYMBOL(mpq_streambuffer_flush); diff --git a/drivers/media/platform/msm/dvb/demux/Kconfig b/drivers/media/platform/msm/dvb/demux/Kconfig new file mode 100644 index 000000000000..b9282122e0ac --- /dev/null +++ b/drivers/media/platform/msm/dvb/demux/Kconfig @@ -0,0 +1,47 @@ +menuconfig DVB_MPQ_DEMUX + tristate "DVB Demux Device" + depends on DVB_MPQ && ION && ION_MSM + default n + + help + Support for Qualcomm Technologies Inc based dvb demux device. + Say Y if you own such a device and want to use it. + The Demux device is used to stream playback either + from TSIF interface or from DVR interface. + +config DVB_MPQ_NUM_DMX_DEVICES + int "Number of demux devices" + depends on DVB_MPQ_DEMUX + default 4 + range 1 255 + + help + Configure number of demux devices. + Depends on your use-cases for maximum concurrent stream playback. + +config DVB_MPQ_MEDIA_BOX_DEMUX + bool "Media box demux support" + depends on DVB_MPQ_DEMUX + default n + help + Use this option if your HW is Qualcomm Technologies Inc + media box and demux support is required on that media box. + Currently this config is being used for demux video events + optimization. + +config DVB_MPQ_TSPP1 + bool "TSPPv1 plugin" + depends on DVB_MPQ_DEMUX && TSPP + help + Use this option if your HW has + Transport Stream Packet Processor(TSPP) version1 support. + Demux may take adavantage of HW capabilities to perform + some tasks in HW instead of SW. + +config DVB_MPQ_SW + bool "Software plugin" + depends on DVB_MPQ_DEMUX && !DVB_MPQ_TSPP1 + help + Use this option if your HW does not have any + TSPP hardware support. All demux tasks will be + performed in SW. diff --git a/drivers/media/platform/msm/dvb/demux/Makefile b/drivers/media/platform/msm/dvb/demux/Makefile new file mode 100644 index 000000000000..c08fa85a8d5d --- /dev/null +++ b/drivers/media/platform/msm/dvb/demux/Makefile @@ -0,0 +1,14 @@ + +ccflags-y += -Idrivers/media/dvb-core/ +ccflags-y += -Idrivers/media/platform/msm/dvb/include/ +ccflags-y += -Idrivers/misc/ + +obj-$(CONFIG_DVB_MPQ_DEMUX) += mpq-dmx-hw-plugin.o + +mpq-dmx-hw-plugin-y := mpq_dmx_plugin_common.o + +mpq-dmx-hw-plugin-$(CONFIG_QSEECOM) += mpq_sdmx.o + +mpq-dmx-hw-plugin-$(CONFIG_DVB_MPQ_TSPP1) += mpq_dmx_plugin_tspp_v1.o + +mpq-dmx-hw-plugin-$(CONFIG_DVB_MPQ_SW) += mpq_dmx_plugin_sw.o diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c new file mode 100644 index 000000000000..f16c1ba2aaa6 --- /dev/null +++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c @@ -0,0 +1,6712 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include "mpq_dvb_debug.h" +#include "mpq_dmx_plugin_common.h" +#include "mpq_sdmx.h" + +#define SDMX_MAJOR_VERSION_MATCH (8) + +/* Length of mandatory fields that must exist in header of video PES */ +#define PES_MANDATORY_FIELDS_LEN 9 + +/* Index of first byte in TS packet holding STC */ +#define STC_LOCATION_IDX 188 + +#define MAX_PES_LENGTH (SZ_64K) + +#define MAX_TS_PACKETS_FOR_SDMX_PROCESS (500) + +/* + * PES header length field is 8 bits so PES header length after this field + * can be up to 256 bytes. + * Preceding fields of the PES header total to 9 bytes + * (including the PES header length field). + */ +#define MAX_PES_HEADER_LENGTH (256 + PES_MANDATORY_FIELDS_LEN) + +/* TS packet with adaptation field only can take up the entire TSP */ +#define MAX_TSP_ADAPTATION_LENGTH (184) + +#define MAX_SDMX_METADATA_LENGTH \ + (TS_PACKET_HEADER_LENGTH + \ + MAX_TSP_ADAPTATION_LENGTH + \ + MAX_PES_HEADER_LENGTH) + +#define SDMX_METADATA_BUFFER_SIZE (64*1024) +#define SDMX_SECTION_BUFFER_SIZE (64*1024) +#define SDMX_PCR_BUFFER_SIZE (64*1024) + +/* Number of demux devices, has default of linux configuration */ +static int mpq_demux_device_num = CONFIG_DVB_MPQ_NUM_DMX_DEVICES; +module_param(mpq_demux_device_num, int, 0444); + +/* ION heap IDs used for allocating video output buffer */ +static int video_secure_ion_heap = ION_CP_MM_HEAP_ID; +module_param(video_secure_ion_heap, int, 0644); +MODULE_PARM_DESC(video_secure_ion_heap, "ION heap for secure video buffer allocation"); + +static int video_nonsecure_ion_heap = ION_IOMMU_HEAP_ID; +module_param(video_nonsecure_ion_heap, int, 0644); +MODULE_PARM_DESC(video_nonsecure_ion_heap, "ION heap for non-secure video buffer allocation"); + +/* ION heap IDs used for allocating audio output buffer */ +static int audio_nonsecure_ion_heap = ION_IOMMU_HEAP_ID; +module_param(audio_nonsecure_ion_heap, int, 0644); +MODULE_PARM_DESC(audio_nonsecure_ion_heap, "ION heap for non-secure audio buffer allocation"); + +/* Value of TS packet scramble bits field for even key */ +static int mpq_sdmx_scramble_even = 0x2; +module_param(mpq_sdmx_scramble_even, int, 0644); + +/* Value of TS packet scramble bits field for odd key */ +static int mpq_sdmx_scramble_odd = 0x3; +module_param(mpq_sdmx_scramble_odd, int, 0644); + +/* + * Default action (discard or pass) taken when scramble bit is not one of the + * pass-through / odd / even values. + * When set packets will be discarded, otherwise passed through. + */ +static int mpq_sdmx_scramble_default_discard = 1; +module_param(mpq_sdmx_scramble_default_discard, int, 0644); + +/* Max number of TS packets allowed as input for a single sdmx process */ +static int mpq_sdmx_proc_limit = MAX_TS_PACKETS_FOR_SDMX_PROCESS; +module_param(mpq_sdmx_proc_limit, int, 0644); + +/* Debug flag for secure demux process */ +static int mpq_sdmx_debug; +module_param(mpq_sdmx_debug, int, 0644); + +/* + * Indicates whether the demux should search for frame boundaries + * and notify on video packets on frame-basis or whether to provide + * only video PES packet payloads as-is. + */ +static int video_framing = 1; +module_param(video_framing, int, 0644); + +/* TSIF operation mode: 1 = TSIF_MODE_1, 2 = TSIF_MODE_2, 3 = TSIF_LOOPBACK */ +static int tsif_mode = 2; +module_param(tsif_mode, int, 0644); + +/* Inverse TSIF clock signal */ +static int clock_inv; +module_param(clock_inv, int, 0644); + +/* TSIF Timestamp source: 0 = TSIF Clock Reference, 1 = LPASS time counter */ +enum tsif_tts_source { + TSIF_TTS_TCR = 0, /* Time stamps from TCR counter */ + TSIF_TTS_LPASS_TIMER /* Time stamps from AV/Qtimer Timer */ +}; + +/* Store all mpq feeds corresponding to 4 TS programs in a Transport Stream */ +static struct mpq_feed *store_mpq_audio_feed[CONFIG_DVB_MPQ_NUM_DMX_DEVICES] = { + NULL, NULL, NULL, NULL}; +static struct mpq_feed *store_mpq_video_feed[CONFIG_DVB_MPQ_NUM_DMX_DEVICES] = { + NULL, NULL, NULL, NULL}; +static int non_predicted_video_frame; +/* trigger video ES frame events on MPEG2 B frames and H264 non-IDR frames */ +#ifdef CONFIG_DVB_MPQ_MEDIA_BOX_DEMUX +static int video_b_frame_events = 1; +#else +static int video_b_frame_events; +#endif + +/* Global data-structure for managing demux devices */ +static struct +{ + /* ION demux client used for memory allocation */ + struct ion_client *ion_client; + + /* demux devices array */ + struct mpq_demux *devices; + + /* Stream buffers objects used for tunneling to decoders */ + struct mpq_streambuffer + decoder_buffers[MPQ_ADAPTER_MAX_NUM_OF_INTERFACES]; + + /* Indicates whether secure demux TZ application is available */ + int secure_demux_app_loaded; +} mpq_dmx_info; + + +int mpq_dmx_get_param_scramble_odd(void) +{ + return mpq_sdmx_scramble_odd; +} + +int mpq_dmx_get_param_scramble_even(void) +{ + return mpq_sdmx_scramble_even; +} + +int mpq_dmx_get_param_scramble_default_discard(void) +{ + return mpq_sdmx_scramble_default_discard; +} + +int mpq_dmx_get_param_tsif_mode(void) +{ + return tsif_mode; +} + +int mpq_dmx_get_param_clock_inv(void) +{ + return clock_inv; +} + +struct mpq_streambuffer *consumer_video_streambuffer(int dmx_ts_pes_video) +{ + struct mpq_streambuffer *streambuffer = NULL; + struct mpq_video_feed_info *feed_data = NULL; + + switch (dmx_ts_pes_video) { + case DMX_PES_VIDEO0: + if (store_mpq_video_feed[0] != NULL) { + feed_data = &store_mpq_video_feed[0]->video_info; + feed_data->stream_interface = + MPQ_ADAPTER_VIDEO0_STREAM_IF; + } + break; + case DMX_PES_VIDEO1: + if (store_mpq_video_feed[1] != NULL) { + feed_data = &store_mpq_video_feed[1]->video_info; + feed_data->stream_interface = + MPQ_ADAPTER_VIDEO1_STREAM_IF; + } + break; + case DMX_PES_VIDEO2: + if (store_mpq_video_feed[2] != NULL) { + feed_data = &store_mpq_video_feed[2]->video_info; + feed_data->stream_interface = + MPQ_ADAPTER_VIDEO2_STREAM_IF; + } + break; + case DMX_PES_VIDEO3: + if (store_mpq_video_feed[3] != NULL) { + feed_data = &store_mpq_video_feed[3]->video_info; + feed_data->stream_interface = + MPQ_ADAPTER_VIDEO3_STREAM_IF; + } + break; + } + + if (feed_data != NULL) + mpq_adapter_get_stream_if(feed_data->stream_interface, + &streambuffer); + + return streambuffer; +} +EXPORT_SYMBOL(consumer_video_streambuffer); + +struct mpq_streambuffer *consumer_audio_streambuffer(int dmx_ts_pes_audio) +{ + struct mpq_streambuffer *streambuffer = NULL; + struct mpq_audio_feed_info *feed_data = NULL; + + switch (dmx_ts_pes_audio) { + case DMX_PES_AUDIO0: + if (store_mpq_audio_feed[0] != NULL) { + feed_data = &store_mpq_audio_feed[0]->audio_info; + feed_data->stream_interface = + MPQ_ADAPTER_AUDIO0_STREAM_IF; + } + break; + case DMX_PES_AUDIO1: + if (store_mpq_audio_feed[1] != NULL) { + feed_data = &store_mpq_audio_feed[1]->audio_info; + feed_data->stream_interface = + MPQ_ADAPTER_AUDIO1_STREAM_IF; + } + break; + case DMX_PES_AUDIO2: + if (store_mpq_audio_feed[2] != NULL) { + feed_data = &store_mpq_audio_feed[2]->audio_info; + feed_data->stream_interface = + MPQ_ADAPTER_AUDIO2_STREAM_IF; + } + break; + case DMX_PES_AUDIO3: + if (store_mpq_audio_feed[3] != NULL) { + feed_data = &store_mpq_audio_feed[3]->audio_info; + feed_data->stream_interface = + MPQ_ADAPTER_AUDIO3_STREAM_IF; + } + break; + } + + if (feed_data != NULL) + mpq_adapter_get_stream_if(feed_data->stream_interface, + &streambuffer); + + return streambuffer; +} +EXPORT_SYMBOL(consumer_audio_streambuffer); + + + +/* Check that PES header is valid and that it is a video PES */ +static int mpq_dmx_is_valid_video_pes(struct pes_packet_header *pes_header) +{ + /* start-code valid? */ + if ((pes_header->packet_start_code_prefix_1 != 0) || + (pes_header->packet_start_code_prefix_2 != 0) || + (pes_header->packet_start_code_prefix_3 != 1)) + return -EINVAL; + + /* stream_id is video? */ + if ((pes_header->stream_id & 0xF0) != 0xE0) + return -EINVAL; + + return 0; +} + +static int mpq_dmx_is_valid_audio_pes(struct pes_packet_header *pes_header) +{ + /* start-code valid? */ + if ((pes_header->packet_start_code_prefix_1 != 0) || + (pes_header->packet_start_code_prefix_2 != 0) || + (pes_header->packet_start_code_prefix_3 != 1)) + return -EINVAL; + + /* Note: AC3 stream ID = 0xBD */ + if (pes_header->stream_id == 0xBD) + return 0; + + /* stream_id is audio? */ /* 110x xxxx = Audio Stream IDs */ + if ((pes_header->stream_id & 0xE0) != 0xC0) + return -EINVAL; + + return 0; +} + +/* Check if a framing pattern is a video frame pattern or a header pattern */ +static inline int mpq_dmx_is_video_frame( + enum dmx_video_codec codec, + u64 pattern_type) +{ + switch (codec) { + case DMX_VIDEO_CODEC_MPEG2: + if (video_b_frame_events == 1) + if (pattern_type == DMX_IDX_MPEG_B_FRAME_START) + non_predicted_video_frame = 1; + + if ((pattern_type == DMX_IDX_MPEG_I_FRAME_START) || + (pattern_type == DMX_IDX_MPEG_P_FRAME_START) || + (pattern_type == DMX_IDX_MPEG_B_FRAME_START)) + return 1; + return 0; + + case DMX_VIDEO_CODEC_H264: + if (video_b_frame_events == 1) { + if (pattern_type == DMX_IDX_H264_NON_IDR_BSLICE_START) + non_predicted_video_frame = 1; + + if ((pattern_type == DMX_IDX_H264_IDR_ISLICE_START) || + (pattern_type == + DMX_IDX_H264_NON_IDR_PSLICE_START) || + (pattern_type == DMX_IDX_H264_NON_IDR_BSLICE_START)) + return 1; + } else { + if ((pattern_type == DMX_IDX_H264_IDR_START) || + (pattern_type == DMX_IDX_H264_NON_IDR_START)) + return 1; + } + return 0; + + case DMX_VIDEO_CODEC_VC1: + if (pattern_type == DMX_IDX_VC1_FRAME_START) + return 1; + return 0; + + default: + return -EINVAL; + } +} + +/* + * mpq_dmx_get_pattern_params - Returns the required video + * patterns for framing operation based on video codec. + * + * @video_codec: the video codec. + * @patterns: a pointer to the pattern parameters, updated by this function. + * @patterns_num: number of patterns, updated by this function. + */ +static inline int mpq_dmx_get_pattern_params( + enum dmx_video_codec video_codec, + const struct dvb_dmx_video_patterns + *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM], + int *patterns_num) +{ + switch (video_codec) { + case DMX_VIDEO_CODEC_MPEG2: + patterns[0] = dvb_dmx_get_pattern(DMX_IDX_MPEG_SEQ_HEADER); + patterns[1] = dvb_dmx_get_pattern(DMX_IDX_MPEG_GOP); + patterns[2] = dvb_dmx_get_pattern(DMX_IDX_MPEG_I_FRAME_START); + patterns[3] = dvb_dmx_get_pattern(DMX_IDX_MPEG_P_FRAME_START); + patterns[4] = dvb_dmx_get_pattern(DMX_IDX_MPEG_B_FRAME_START); + *patterns_num = 5; + break; + + case DMX_VIDEO_CODEC_H264: + patterns[0] = dvb_dmx_get_pattern(DMX_IDX_H264_SPS); + patterns[1] = dvb_dmx_get_pattern(DMX_IDX_H264_PPS); + if (video_b_frame_events != 1) { + patterns[2] = dvb_dmx_get_pattern + (DMX_IDX_H264_IDR_START); + patterns[3] = dvb_dmx_get_pattern + (DMX_IDX_H264_NON_IDR_START); + patterns[4] = dvb_dmx_get_pattern(DMX_IDX_H264_SEI); + *patterns_num = 5; + } else { + patterns[2] = dvb_dmx_get_pattern + (DMX_IDX_H264_IDR_ISLICE_START); + patterns[3] = dvb_dmx_get_pattern + (DMX_IDX_H264_NON_IDR_PSLICE_START); + patterns[4] = dvb_dmx_get_pattern + (DMX_IDX_H264_NON_IDR_BSLICE_START); + patterns[5] = dvb_dmx_get_pattern(DMX_IDX_H264_SEI); + *patterns_num = 6; + } + break; + + case DMX_VIDEO_CODEC_VC1: + patterns[0] = dvb_dmx_get_pattern(DMX_IDX_VC1_SEQ_HEADER); + patterns[1] = dvb_dmx_get_pattern(DMX_IDX_VC1_ENTRY_POINT); + patterns[2] = dvb_dmx_get_pattern(DMX_IDX_VC1_FRAME_START); + *patterns_num = 3; + break; + + default: + MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__); + *patterns_num = 0; + return -EINVAL; + } + + return 0; +} + +/* + * mpq_dmx_update_decoder_stat - + * Update decoder output statistics in debug-fs. + * + * @mpq_feed: decoder feed object + */ +void mpq_dmx_update_decoder_stat(struct mpq_feed *mpq_feed) +{ + ktime_t curr_time; + u32 delta_time_ms; + struct mpq_demux *mpq_demux = mpq_feed->mpq_demux; + enum mpq_adapter_stream_if idx; + + if (!dvb_dmx_is_video_feed(mpq_feed->dvb_demux_feed) && + !dvb_dmx_is_audio_feed(mpq_feed->dvb_demux_feed)) + return; + + if (dvb_dmx_is_video_feed(mpq_feed->dvb_demux_feed) && + mpq_feed->video_info.stream_interface <= + MPQ_ADAPTER_VIDEO3_STREAM_IF) + idx = mpq_feed->video_info.stream_interface; + else if (dvb_dmx_is_audio_feed(mpq_feed->dvb_demux_feed) && + mpq_feed->audio_info.stream_interface <= + MPQ_ADAPTER_AUDIO3_STREAM_IF) + idx = mpq_feed->audio_info.stream_interface; + else + return; + + curr_time = ktime_get(); + if (unlikely(!mpq_demux->decoder_stat[idx].out_count)) { + mpq_demux->decoder_stat[idx].out_last_time = curr_time; + mpq_demux->decoder_stat[idx].out_count++; + return; + } + + /* calculate time-delta between frame */ + delta_time_ms = mpq_dmx_calc_time_delta(curr_time, + mpq_demux->decoder_stat[idx].out_last_time); + + mpq_demux->decoder_stat[idx].out_interval_sum += delta_time_ms; + + mpq_demux->decoder_stat[idx].out_interval_average = + mpq_demux->decoder_stat[idx].out_interval_sum / + mpq_demux->decoder_stat[idx].out_count; + + if (delta_time_ms > mpq_demux->decoder_stat[idx].out_interval_max) + mpq_demux->decoder_stat[idx].out_interval_max = delta_time_ms; + + mpq_demux->decoder_stat[idx].out_last_time = curr_time; + mpq_demux->decoder_stat[idx].out_count++; +} + +/* + * mpq_dmx_update_sdmx_stat - + * Update SDMX statistics in debug-fs. + * + * @mpq_demux: mpq_demux object + * @bytes_processed: number of bytes processed by sdmx + * @process_start_time: time before sdmx process was triggered + * @process_end_time: time after sdmx process finished + */ +static inline void mpq_dmx_update_sdmx_stat(struct mpq_demux *mpq_demux, + u32 bytes_processed, ktime_t process_start_time, + ktime_t process_end_time) +{ + u32 packets_num; + u32 process_time; + + mpq_demux->sdmx_process_count++; + packets_num = bytes_processed / mpq_demux->demux.ts_packet_size; + mpq_demux->sdmx_process_packets_sum += packets_num; + mpq_demux->sdmx_process_packets_average = + mpq_demux->sdmx_process_packets_sum / + mpq_demux->sdmx_process_count; + + process_time = + mpq_dmx_calc_time_delta(process_end_time, process_start_time); + + mpq_demux->sdmx_process_time_sum += process_time; + mpq_demux->sdmx_process_time_average = + mpq_demux->sdmx_process_time_sum / + mpq_demux->sdmx_process_count; + + if ((mpq_demux->sdmx_process_count == 1) || + (packets_num < mpq_demux->sdmx_process_packets_min)) + mpq_demux->sdmx_process_packets_min = packets_num; + + if ((mpq_demux->sdmx_process_count == 1) || + (process_time > mpq_demux->sdmx_process_time_max)) + mpq_demux->sdmx_process_time_max = process_time; +} + +static int mpq_sdmx_log_level_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t mpq_sdmx_log_level_read(struct file *fp, + char __user *user_buffer, size_t count, loff_t *position) +{ + char user_str[16]; + struct mpq_demux *mpq_demux = fp->private_data; + int ret; + + ret = scnprintf(user_str, 16, "%d", mpq_demux->sdmx_log_level); + ret = simple_read_from_buffer(user_buffer, count, position, + user_str, ret+1); + + return ret; +} + +static ssize_t mpq_sdmx_log_level_write(struct file *fp, + const char __user *user_buffer, size_t count, loff_t *position) +{ + char user_str[16]; + int ret; + int ret_count; + int level; + struct mpq_demux *mpq_demux = fp->private_data; + + if (count >= 16) + return -EINVAL; + + ret_count = simple_write_to_buffer(user_str, 16, position, user_buffer, + count); + if (ret_count < 0) + return ret_count; + + ret = kstrtoint(user_str, 0, &level); + if (ret) + return ret; + + if (level < SDMX_LOG_NO_PRINT || level > SDMX_LOG_VERBOSE) + return -EINVAL; + + mutex_lock(&mpq_demux->mutex); + mpq_demux->sdmx_log_level = level; + if (mpq_demux->sdmx_session_handle != SDMX_INVALID_SESSION_HANDLE) { + ret = sdmx_set_log_level(mpq_demux->sdmx_session_handle, + mpq_demux->sdmx_log_level); + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: Could not set sdmx log level. ret = %d\n", + __func__, ret); + mutex_unlock(&mpq_demux->mutex); + return -EINVAL; + } + } + + mutex_unlock(&mpq_demux->mutex); + return ret_count; +} + +static const struct file_operations sdmx_debug_fops = { + .open = mpq_sdmx_log_level_open, + .read = mpq_sdmx_log_level_read, + .write = mpq_sdmx_log_level_write, + .owner = THIS_MODULE, +}; + +/* Extend dvb-demux debugfs with common plug-in entries */ +void mpq_dmx_init_debugfs_entries(struct mpq_demux *mpq_demux) +{ + int i; + char file_name[50]; + struct dentry *debugfs_decoder_dir; + + /* + * Extend dvb-demux debugfs with HW statistics. + * Note that destruction of debugfs directory is done + * when dvb-demux is terminated. + */ + mpq_demux->hw_notification_count = 0; + mpq_demux->hw_notification_interval = 0; + mpq_demux->hw_notification_size = 0; + mpq_demux->hw_notification_min_size = 0xFFFFFFFF; + + if (mpq_demux->demux.dmx.debugfs_demux_dir == NULL) + return; + + debugfs_create_u32( + "hw_notification_interval", + 0664, + mpq_demux->demux.dmx.debugfs_demux_dir, + &mpq_demux->hw_notification_interval); + + debugfs_create_u32( + "hw_notification_min_interval", + 0664, + mpq_demux->demux.dmx.debugfs_demux_dir, + &mpq_demux->hw_notification_min_interval); + + debugfs_create_u32( + "hw_notification_count", + 0664, + mpq_demux->demux.dmx.debugfs_demux_dir, + &mpq_demux->hw_notification_count); + + debugfs_create_u32( + "hw_notification_size", + 0664, + mpq_demux->demux.dmx.debugfs_demux_dir, + &mpq_demux->hw_notification_size); + + debugfs_create_u32( + "hw_notification_min_size", + 0664, + mpq_demux->demux.dmx.debugfs_demux_dir, + &mpq_demux->hw_notification_min_size); + + debugfs_decoder_dir = debugfs_create_dir("decoder", + mpq_demux->demux.dmx.debugfs_demux_dir); + + for (i = 0; + debugfs_decoder_dir && + (i < MPQ_ADAPTER_MAX_NUM_OF_INTERFACES); + i++) { + snprintf(file_name, 50, "decoder%d_drop_count", i); + debugfs_create_u32( + file_name, + 0444, + debugfs_decoder_dir, + &mpq_demux->decoder_stat[i].drop_count); + + snprintf(file_name, 50, "decoder%d_out_count", i); + debugfs_create_u32( + file_name, + 0444, + debugfs_decoder_dir, + &mpq_demux->decoder_stat[i].out_count); + + snprintf(file_name, 50, "decoder%d_out_interval_sum", i); + debugfs_create_u32( + file_name, + 0444, + debugfs_decoder_dir, + &mpq_demux->decoder_stat[i].out_interval_sum); + + snprintf(file_name, 50, "decoder%d_out_interval_average", i); + debugfs_create_u32( + file_name, + 0444, + debugfs_decoder_dir, + &mpq_demux->decoder_stat[i].out_interval_average); + + snprintf(file_name, 50, "decoder%d_out_interval_max", i); + debugfs_create_u32( + file_name, + 0444, + debugfs_decoder_dir, + &mpq_demux->decoder_stat[i].out_interval_max); + + snprintf(file_name, 50, "decoder%d_ts_errors", i); + debugfs_create_u32( + file_name, + 0444, + debugfs_decoder_dir, + &mpq_demux->decoder_stat[i].ts_errors); + + snprintf(file_name, 50, "decoder%d_cc_errors", i); + debugfs_create_u32( + file_name, + 0444, + debugfs_decoder_dir, + &mpq_demux->decoder_stat[i].cc_errors); + } + + debugfs_create_u32( + "sdmx_process_count", + 0664, + mpq_demux->demux.dmx.debugfs_demux_dir, + &mpq_demux->sdmx_process_count); + + debugfs_create_u32( + "sdmx_process_time_sum", + 0664, + mpq_demux->demux.dmx.debugfs_demux_dir, + &mpq_demux->sdmx_process_time_sum); + + debugfs_create_u32( + "sdmx_process_time_average", + 0664, + mpq_demux->demux.dmx.debugfs_demux_dir, + &mpq_demux->sdmx_process_time_average); + + debugfs_create_u32( + "sdmx_process_time_max", + 0664, + mpq_demux->demux.dmx.debugfs_demux_dir, + &mpq_demux->sdmx_process_time_max); + + debugfs_create_u32( + "sdmx_process_packets_sum", + 0664, + mpq_demux->demux.dmx.debugfs_demux_dir, + &mpq_demux->sdmx_process_packets_sum); + + debugfs_create_u32( + "sdmx_process_packets_average", + 0664, + mpq_demux->demux.dmx.debugfs_demux_dir, + &mpq_demux->sdmx_process_packets_average); + + debugfs_create_u32( + "sdmx_process_packets_min", + 0664, + mpq_demux->demux.dmx.debugfs_demux_dir, + &mpq_demux->sdmx_process_packets_min); + + debugfs_create_file("sdmx_log_level", + 0664, + mpq_demux->demux.dmx.debugfs_demux_dir, + mpq_demux, + &sdmx_debug_fops); +} + +/* Update dvb-demux debugfs with HW notification statistics */ +void mpq_dmx_update_hw_statistics(struct mpq_demux *mpq_demux) +{ + ktime_t curr_time; + u32 delta_time_ms; + + curr_time = ktime_get(); + if (likely(mpq_demux->hw_notification_count)) { + /* calculate time-delta between notifications */ + delta_time_ms = mpq_dmx_calc_time_delta(curr_time, + mpq_demux->last_notification_time); + + mpq_demux->hw_notification_interval = delta_time_ms; + + if ((mpq_demux->hw_notification_count == 1) || + (mpq_demux->hw_notification_interval && + mpq_demux->hw_notification_interval < + mpq_demux->hw_notification_min_interval)) + mpq_demux->hw_notification_min_interval = + mpq_demux->hw_notification_interval; + } + + mpq_demux->hw_notification_count++; + mpq_demux->last_notification_time = curr_time; +} + +static void mpq_sdmx_check_app_loaded(void) +{ + int session; + u32 version; + int ret; + + ret = sdmx_open_session(&session); + if (ret != SDMX_SUCCESS) { + MPQ_DVB_ERR_PRINT( + "%s: Could not initialize session with SDMX. ret = %d\n", + __func__, ret); + mpq_dmx_info.secure_demux_app_loaded = 0; + return; + } + + /* Check proper sdmx major version */ + ret = sdmx_get_version(session, &version); + if (ret != SDMX_SUCCESS) { + MPQ_DVB_ERR_PRINT( + "%s: Could not get sdmx version. ret = %d\n", + __func__, ret); + } else { + if ((version >> 8) != SDMX_MAJOR_VERSION_MATCH) + MPQ_DVB_ERR_PRINT( + "%s: sdmx major version does not match. expected=%d, actual=%d\n", + __func__, SDMX_MAJOR_VERSION_MATCH, + (version >> 8)); + else + MPQ_DVB_DBG_PRINT( + "%s: sdmx major version is ok = %d\n", + __func__, SDMX_MAJOR_VERSION_MATCH); + } + + mpq_dmx_info.secure_demux_app_loaded = 1; + sdmx_close_session(session); +} + +int mpq_dmx_plugin_init(mpq_dmx_init dmx_init_func) +{ + int i; + int j; + int result; + struct mpq_demux *mpq_demux; + struct dvb_adapter *mpq_adapter; + struct mpq_feed *feed; + + MPQ_DVB_DBG_PRINT("%s executed, device num %d\n", + __func__, + mpq_demux_device_num); + + mpq_adapter = mpq_adapter_get(); + + if (mpq_adapter == NULL) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_adapter is not valid\n", + __func__); + result = -EPERM; + goto init_failed; + } + + if (mpq_demux_device_num == 0) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_demux_device_num set to 0\n", + __func__); + + result = -EPERM; + goto init_failed; + } + + mpq_dmx_info.devices = NULL; + mpq_dmx_info.ion_client = NULL; + + mpq_dmx_info.secure_demux_app_loaded = 0; + + /* Allocate memory for all MPQ devices */ + mpq_dmx_info.devices = + vzalloc(mpq_demux_device_num*sizeof(struct mpq_demux)); + + if (!mpq_dmx_info.devices) { + MPQ_DVB_ERR_PRINT( + "%s: failed to allocate devices memory\n", + __func__); + + result = -ENOMEM; + goto init_failed; + } + + /* + * Create a new ION client used by demux to allocate memory + * for decoder's buffers. + */ + mpq_dmx_info.ion_client = + msm_ion_client_create("demux_client"); + if (IS_ERR_OR_NULL(mpq_dmx_info.ion_client)) { + MPQ_DVB_ERR_PRINT( + "%s: msm_ion_client_create\n", + __func__); + + result = PTR_ERR(mpq_dmx_info.ion_client); + if (!result) + result = -ENOMEM; + mpq_dmx_info.ion_client = NULL; + goto init_failed_free_demux_devices; + } + + /* Initialize and register all demux devices to the system */ + for (i = 0; i < mpq_demux_device_num; i++) { + mpq_demux = mpq_dmx_info.devices+i; + mpq_demux->idx = i; + + /* initialize demux source to memory by default */ + mpq_demux->source = DMX_SOURCE_DVR0 + i; + + /* + * Give the plugin pointer to the ion client so + * that it can allocate memory from ION if it requires so + */ + mpq_demux->ion_client = mpq_dmx_info.ion_client; + + mutex_init(&mpq_demux->mutex); + + mpq_demux->num_secure_feeds = 0; + mpq_demux->num_active_feeds = 0; + mpq_demux->sdmx_filter_count = 0; + mpq_demux->sdmx_session_handle = SDMX_INVALID_SESSION_HANDLE; + mpq_demux->sdmx_eos = 0; + mpq_demux->sdmx_log_level = SDMX_LOG_NO_PRINT; + mpq_demux->ts_packet_timestamp_source = 0; + + if (mpq_demux->demux.feednum > MPQ_MAX_DMX_FILES) { + MPQ_DVB_ERR_PRINT( + "%s: err - actual feednum (%d) larger than max, enlarge MPQ_MAX_DMX_FILES!\n", + __func__, + mpq_demux->demux.feednum); + result = -EINVAL; + goto init_failed_free_demux_devices; + } + + /* Initialize private feed info */ + for (j = 0; j < MPQ_MAX_DMX_FILES; j++) { + feed = &mpq_demux->feeds[j]; + memset(feed, 0, sizeof(*feed)); + feed->sdmx_filter_handle = SDMX_INVALID_FILTER_HANDLE; + feed->mpq_demux = mpq_demux; + feed->session_id = 0; + } + + /* + * mpq_demux_plugin_hw_init should be implemented + * by the specific plugin + */ + result = dmx_init_func(mpq_adapter, mpq_demux); + if (result < 0) { + MPQ_DVB_ERR_PRINT( + "%s: dmx_init_func (errno=%d)\n", + __func__, + result); + + goto init_failed_free_demux_devices; + } + + mpq_demux->is_initialized = 1; + + /* + * dvb-demux is now initialized, + * update back-pointers of private feeds + */ + for (j = 0; j < MPQ_MAX_DMX_FILES; j++) { + feed = &mpq_demux->feeds[j]; + feed->dvb_demux_feed = &mpq_demux->demux.feed[j]; + mpq_demux->demux.feed[j].priv = feed; + } + + /* + * Add capability of receiving input from memory. + * Every demux in our system may be connected to memory input, + * or any live input. + */ + mpq_demux->fe_memory.source = DMX_MEMORY_FE; + result = + mpq_demux->demux.dmx.add_frontend( + &mpq_demux->demux.dmx, + &mpq_demux->fe_memory); + + if (result < 0) { + MPQ_DVB_ERR_PRINT( + "%s: add_frontend (mem) failed (errno=%d)\n", + __func__, + result); + + goto init_failed_free_demux_devices; + } + } + + return 0; + +init_failed_free_demux_devices: + mpq_dmx_plugin_exit(); +init_failed: + return result; +} + +void mpq_dmx_plugin_exit(void) +{ + int i; + struct mpq_demux *mpq_demux; + + MPQ_DVB_DBG_PRINT("%s executed\n", __func__); + + if (mpq_dmx_info.ion_client != NULL) { + ion_client_destroy(mpq_dmx_info.ion_client); + mpq_dmx_info.ion_client = NULL; + } + + if (mpq_dmx_info.devices != NULL) { + for (i = 0; i < mpq_demux_device_num; i++) { + mpq_demux = mpq_dmx_info.devices + i; + + if (!mpq_demux->is_initialized) + continue; + + if (mpq_demux->mpq_dmx_plugin_release) + mpq_demux->mpq_dmx_plugin_release(mpq_demux); + + mpq_demux->demux.dmx.remove_frontend( + &mpq_demux->demux.dmx, + &mpq_demux->fe_memory); + + if (mpq_dmx_info.secure_demux_app_loaded) + mpq_sdmx_close_session(mpq_demux); + mutex_destroy(&mpq_demux->mutex); + dvb_dmxdev_release(&mpq_demux->dmxdev); + dvb_dmx_release(&mpq_demux->demux); + } + + vfree(mpq_dmx_info.devices); + mpq_dmx_info.devices = NULL; + } +} + +int mpq_dmx_set_source( + struct dmx_demux *demux, + const dmx_source_t *src) +{ + int i; + int dvr_index; + int dmx_index; + struct dvb_demux *dvb_demux = demux->priv; + struct mpq_demux *mpq_demux; + + if ((mpq_dmx_info.devices == NULL) || (dvb_demux == NULL)) { + MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + mpq_demux = dvb_demux->priv; + if (mpq_demux == NULL) { + MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + /* + * For dvr sources, + * verify that this source is connected to the respective demux + */ + dmx_index = mpq_demux - mpq_dmx_info.devices; + + if (*src >= DMX_SOURCE_DVR0) { + dvr_index = *src - DMX_SOURCE_DVR0; + + if (dvr_index != dmx_index) { + MPQ_DVB_ERR_PRINT( + "%s: can't connect demux%d to dvr%d\n", + __func__, + dmx_index, + dvr_index); + return -EINVAL; + } + } + + /* + * For front-end sources, + * verify that this source is not already set to different demux + */ + for (i = 0; i < mpq_demux_device_num; i++) { + if ((&mpq_dmx_info.devices[i] != mpq_demux) && + (mpq_dmx_info.devices[i].source == *src)) { + MPQ_DVB_ERR_PRINT( + "%s: demux%d source can't be set,\n" + "demux%d occupies this source already\n", + __func__, + dmx_index, + i); + return -EBUSY; + } + } + + mpq_demux->source = *src; + return 0; +} + +/** + * Takes an ION allocated buffer's file descriptor and handles the details of + * mapping it into kernel memory and obtaining an ION handle for it. + * Internal helper function. + * + * @client: ION client + * @handle: ION file descriptor to map + * @priv_handle: returned ION handle. Must be freed when no longer needed + * @kernel_mem: returned kernel mapped pointer + * + * Note: mapping might not be possible in secured heaps/buffers, and so NULL + * might be returned in kernel_mem + * + * Return errors status + */ +static int mpq_map_buffer_to_kernel( + struct ion_client *client, + int handle, + struct ion_handle **priv_handle, + void **kernel_mem) +{ + struct ion_handle *ion_handle; + unsigned long ionflag = 0; + int ret; + + if (client == NULL || priv_handle == NULL || kernel_mem == NULL) { + MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + ion_handle = ion_import_dma_buf_fd(client, handle); + if (IS_ERR_OR_NULL(ion_handle)) { + ret = PTR_ERR(ion_handle); + MPQ_DVB_ERR_PRINT("%s: ion_import_dma_buf failed %d\n", + __func__, ret); + if (!ret) + ret = -ENOMEM; + + goto map_buffer_failed; + } + + ret = ion_handle_get_flags(client, ion_handle, &ionflag); + if (ret) { + MPQ_DVB_ERR_PRINT("%s: ion_handle_get_flags failed %d\n", + __func__, ret); + goto map_buffer_failed_free_buff; + } + + if (ionflag & ION_FLAG_SECURE) { + MPQ_DVB_DBG_PRINT("%s: secured buffer\n", __func__); + *kernel_mem = NULL; + } else { + size_t tmp; + *kernel_mem = ion_map_kernel(client, ion_handle); + if (IS_ERR_OR_NULL(*kernel_mem)) { + ret = PTR_ERR(*kernel_mem); + MPQ_DVB_ERR_PRINT("%s: ion_map_kernel failed, ret=%d\n", + __func__, ret); + if (!ret) + ret = -ENOMEM; + goto map_buffer_failed_free_buff; + } + ion_handle_get_size(client, ion_handle, &tmp); + MPQ_DVB_DBG_PRINT( + "%s: mapped to address 0x%p, size=%zu\n", + __func__, *kernel_mem, tmp); + } + + *priv_handle = ion_handle; + return 0; + +map_buffer_failed_free_buff: + ion_free(client, ion_handle); +map_buffer_failed: + return ret; +} + +int mpq_dmx_map_buffer(struct dmx_demux *demux, struct dmx_buffer *dmx_buffer, + void **priv_handle, void **kernel_mem) +{ + struct dvb_demux *dvb_demux = demux->priv; + struct mpq_demux *mpq_demux; + + if ((mpq_dmx_info.devices == NULL) || (dvb_demux == NULL) || + (priv_handle == NULL) || (kernel_mem == NULL)) { + MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + mpq_demux = dvb_demux->priv; + if (mpq_demux == NULL) { + MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + return mpq_map_buffer_to_kernel( + mpq_demux->ion_client, + dmx_buffer->handle, + (struct ion_handle **)priv_handle, kernel_mem); +} + +int mpq_dmx_unmap_buffer(struct dmx_demux *demux, + void *priv_handle) +{ + struct dvb_demux *dvb_demux = demux->priv; + struct ion_handle *ion_handle = priv_handle; + struct mpq_demux *mpq_demux; + unsigned long ionflag = 0; + int ret; + + if ((mpq_dmx_info.devices == NULL) || (dvb_demux == NULL) || + (priv_handle == NULL)) { + MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + mpq_demux = dvb_demux->priv; + if (mpq_demux == NULL) { + MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + ret = ion_handle_get_flags(mpq_demux->ion_client, ion_handle, &ionflag); + if (ret) { + MPQ_DVB_ERR_PRINT("%s: ion_handle_get_flags failed %d\n", + __func__, ret); + return -EINVAL; + } + + if (!(ionflag & ION_FLAG_SECURE)) + ion_unmap_kernel(mpq_demux->ion_client, ion_handle); + + ion_free(mpq_demux->ion_client, ion_handle); + + return 0; +} + +int mpq_dmx_reuse_decoder_buffer(struct dvb_demux_feed *feed, int cookie) +{ + struct mpq_demux *mpq_demux = feed->demux->priv; + + MPQ_DVB_DBG_PRINT("%s: cookie=%d\n", __func__, cookie); + + if (cookie < 0) { + MPQ_DVB_ERR_PRINT("%s: invalid cookie parameter\n", __func__); + return -EINVAL; + } + + if (dvb_dmx_is_video_feed(feed)) { + struct mpq_video_feed_info *feed_data; + struct mpq_feed *mpq_feed; + struct mpq_streambuffer *stream_buffer; + int ret; + + mutex_lock(&mpq_demux->mutex); + mpq_feed = feed->priv; + feed_data = &mpq_feed->video_info; + + spin_lock(&feed_data->video_buffer_lock); + stream_buffer = feed_data->video_buffer; + if (stream_buffer == NULL) { + MPQ_DVB_ERR_PRINT( + "%s: invalid feed, feed_data->video_buffer is NULL\n", + __func__); + spin_unlock(&feed_data->video_buffer_lock); + mutex_unlock(&mpq_demux->mutex); + return -EINVAL; + } + + ret = mpq_streambuffer_pkt_dispose(stream_buffer, cookie, 1); + spin_unlock(&feed_data->video_buffer_lock); + mutex_unlock(&mpq_demux->mutex); + + return ret; + } else if (dvb_dmx_is_audio_feed(feed)) { + struct mpq_audio_feed_info *feed_data; + struct mpq_feed *mpq_feed; + struct mpq_streambuffer *stream_buffer; + int ret; + + mutex_lock(&mpq_demux->mutex); + mpq_feed = feed->priv; + feed_data = &mpq_feed->audio_info; + + spin_lock(&feed_data->audio_buffer_lock); + stream_buffer = feed_data->audio_buffer; + if (stream_buffer == NULL) { + MPQ_DVB_ERR_PRINT( + "%s: invalid feed, feed_data->audio_buffer is NULL\n", + __func__); + spin_unlock(&feed_data->audio_buffer_lock); + mutex_unlock(&mpq_demux->mutex); + return -EINVAL; + } + + ret = mpq_streambuffer_pkt_dispose(stream_buffer, cookie, 1); + spin_unlock(&feed_data->audio_buffer_lock); + mutex_unlock(&mpq_demux->mutex); + + return ret; + } + MPQ_DVB_ERR_PRINT("%s: Invalid feed type %d\n", + __func__, feed->pes_type); + + return -EINVAL; +} + +/** + * Handles the details of internal decoder buffer allocation via ION. + * Internal helper function. + * @feed_data: decoder feed object + * @dec_buffs: buffer information + * @client: ION client + * + * Return error status + */ +static int mpq_dmx_init_internal_buffers( + struct mpq_demux *mpq_demux, + struct mpq_video_feed_info *feed_data, + struct dmx_decoder_buffers *dec_buffs) +{ + struct ion_handle *temp_handle = NULL; + void *payload_buffer = NULL; + int actual_buffer_size = 0; + int ret = 0; + + MPQ_DVB_DBG_PRINT("%s: Internal decoder buffer allocation\n", __func__); + + actual_buffer_size = dec_buffs->buffers_size; + actual_buffer_size += (SZ_4K - 1); + actual_buffer_size &= ~(SZ_4K - 1); + + temp_handle = ion_alloc(mpq_demux->ion_client, + actual_buffer_size, SZ_4K, + ION_HEAP(video_secure_ion_heap) | + ION_HEAP(video_nonsecure_ion_heap), + mpq_demux->decoder_alloc_flags); + + if (IS_ERR_OR_NULL(temp_handle)) { + ret = PTR_ERR(temp_handle); + MPQ_DVB_ERR_PRINT("%s: FAILED to allocate payload buffer %d\n", + __func__, ret); + if (!ret) + ret = -ENOMEM; + goto end; + } + + payload_buffer = ion_map_kernel(mpq_demux->ion_client, temp_handle); + + if (IS_ERR_OR_NULL(payload_buffer)) { + ret = PTR_ERR(payload_buffer); + MPQ_DVB_ERR_PRINT( + "%s: FAILED to map payload buffer %d\n", + __func__, ret); + if (!ret) + ret = -ENOMEM; + goto init_failed_free_payload_buffer; + } + + feed_data->buffer_desc.decoder_buffers_num = 1; + feed_data->buffer_desc.ion_handle[0] = temp_handle; + feed_data->buffer_desc.desc[0].base = payload_buffer; + feed_data->buffer_desc.desc[0].size = actual_buffer_size; + feed_data->buffer_desc.desc[0].read_ptr = 0; + feed_data->buffer_desc.desc[0].write_ptr = 0; + feed_data->buffer_desc.desc[0].handle = + ion_share_dma_buf_fd(mpq_demux->ion_client, temp_handle); + + if (feed_data->buffer_desc.desc[0].handle < 0) { + MPQ_DVB_ERR_PRINT( + "%s: FAILED to share payload buffer %d\n", + __func__, ret); + ret = -ENOMEM; + goto init_failed_unmap_payload_buffer; + } + + feed_data->buffer_desc.shared_file = fget( + feed_data->buffer_desc.desc[0].handle); + + return 0; + +init_failed_unmap_payload_buffer: + ion_unmap_kernel(mpq_demux->ion_client, temp_handle); + feed_data->buffer_desc.desc[0].base = NULL; +init_failed_free_payload_buffer: + ion_free(mpq_demux->ion_client, temp_handle); + feed_data->buffer_desc.ion_handle[0] = NULL; + feed_data->buffer_desc.desc[0].size = 0; + feed_data->buffer_desc.decoder_buffers_num = 0; + feed_data->buffer_desc.shared_file = NULL; +end: + return ret; + +} + +/** + * Handles the details of external decoder buffers allocated by user. + * Each buffer is mapped into kernel memory and an ION handle is obtained, and + * decoder feed object is updated with related information. + * Internal helper function. + * @feed_data: decoder feed object + * @dec_buffs: buffer information + * @client: ION client + * + * Return error status + */ +static int mpq_dmx_init_external_buffers( + struct mpq_video_feed_info *feed_data, + struct dmx_decoder_buffers *dec_buffs, + struct ion_client *client) +{ + struct ion_handle *temp_handle = NULL; + void *payload_buffer = NULL; + int actual_buffer_size = 0; + int ret = 0; + int i; + + /* + * Payload buffer was allocated externally (through ION). + * Map the ion handles to kernel memory + */ + MPQ_DVB_DBG_PRINT("%s: External decoder buffer allocation\n", __func__); + + actual_buffer_size = dec_buffs->buffers_size; + if (!dec_buffs->is_linear) { + MPQ_DVB_DBG_PRINT("%s: Ex. Ring-buffer\n", __func__); + feed_data->buffer_desc.decoder_buffers_num = 1; + } else { + MPQ_DVB_DBG_PRINT("%s: Ex. Linear\n", __func__); + feed_data->buffer_desc.decoder_buffers_num = + dec_buffs->buffers_num; + } + + for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) { + ret = mpq_map_buffer_to_kernel( + client, + dec_buffs->handles[i], + &temp_handle, + &payload_buffer); + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: Failed mapping buffer %d\n", + __func__, i); + goto init_failed; + } + feed_data->buffer_desc.ion_handle[i] = temp_handle; + feed_data->buffer_desc.desc[i].base = payload_buffer; + feed_data->buffer_desc.desc[i].handle = + dec_buffs->handles[i]; + feed_data->buffer_desc.desc[i].size = + dec_buffs->buffers_size; + feed_data->buffer_desc.desc[i].read_ptr = 0; + feed_data->buffer_desc.desc[i].write_ptr = 0; + + MPQ_DVB_DBG_PRINT( + "%s: Buffer #%d: base=0x%p, handle=%d, size=%d\n", + __func__, i, + feed_data->buffer_desc.desc[i].base, + feed_data->buffer_desc.desc[i].handle, + feed_data->buffer_desc.desc[i].size); + } + + return 0; + +init_failed: + for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) { + if (feed_data->buffer_desc.ion_handle[i]) { + if (feed_data->buffer_desc.desc[i].base) { + ion_unmap_kernel(client, + feed_data->buffer_desc.ion_handle[i]); + feed_data->buffer_desc.desc[i].base = NULL; + } + ion_free(client, feed_data->buffer_desc.ion_handle[i]); + feed_data->buffer_desc.ion_handle[i] = NULL; + feed_data->buffer_desc.desc[i].size = 0; + } + } + return ret; +} + +/** + * Handles the details of initializing the mpq_streambuffer object according + * to the user decoder buffer configuration: External/Internal buffers and + * ring/linear buffering mode. + * Internal helper function. + * @feed: dvb demux feed object, contains the buffers configuration + * @feed_data: decoder feed object + * @stream_buffer: stream buffer object to initialize + * + * Return error status + */ +static int mpq_dmx_init_streambuffer( + struct mpq_feed *feed, + struct mpq_video_feed_info *feed_data, + struct mpq_streambuffer *stream_buffer) +{ + int ret; + void *packet_buffer = NULL; + struct mpq_demux *mpq_demux = feed->mpq_demux; + struct ion_client *client = mpq_demux->ion_client; + struct dmx_decoder_buffers *dec_buffs = NULL; + enum mpq_streambuffer_mode mode; + + dec_buffs = feed->dvb_demux_feed->feed.ts.decoder_buffers; + + /* Allocate packet buffer holding the meta-data */ + packet_buffer = vmalloc(VIDEO_META_DATA_BUFFER_SIZE); + + if (packet_buffer == NULL) { + MPQ_DVB_ERR_PRINT( + "%s: FAILED to allocate packets buffer\n", + __func__); + + ret = -ENOMEM; + goto end; + } + + MPQ_DVB_DBG_PRINT("%s: dec_buffs: num=%d, size=%d, linear=%d\n", + __func__, + dec_buffs->buffers_num, + dec_buffs->buffers_size, + dec_buffs->is_linear); + + if (dec_buffs->buffers_num == 0) + ret = mpq_dmx_init_internal_buffers( + mpq_demux, feed_data, dec_buffs); + else + ret = mpq_dmx_init_external_buffers( + feed_data, dec_buffs, client); + + if (ret != 0) + goto init_failed_free_packet_buffer; + + mode = dec_buffs->is_linear ? MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR : + MPQ_STREAMBUFFER_BUFFER_MODE_RING; + ret = mpq_streambuffer_init( + feed_data->video_buffer, + mode, + feed_data->buffer_desc.desc, + feed_data->buffer_desc.decoder_buffers_num, + packet_buffer, + VIDEO_META_DATA_BUFFER_SIZE); + + if (ret != 0) + goto init_failed_free_packet_buffer; + + goto end; + + +init_failed_free_packet_buffer: + vfree(packet_buffer); +end: + return ret; +} + +static void mpq_dmx_release_streambuffer( + struct mpq_feed *feed, + struct mpq_video_feed_info *feed_data, + struct mpq_streambuffer *video_buffer, + struct ion_client *client) +{ + int buf_num = 0; + int i; + struct dmx_decoder_buffers *dec_buffs = + feed->dvb_demux_feed->feed.ts.decoder_buffers; + + mpq_adapter_unregister_stream_if(feed_data->stream_interface); + + mpq_streambuffer_terminate(video_buffer); + + vfree(video_buffer->packet_data.data); + + buf_num = feed_data->buffer_desc.decoder_buffers_num; + + for (i = 0; i < buf_num; i++) { + if (feed_data->buffer_desc.ion_handle[i]) { + if (feed_data->buffer_desc.desc[i].base) { + ion_unmap_kernel(client, + feed_data->buffer_desc.ion_handle[i]); + feed_data->buffer_desc.desc[i].base = NULL; + } + + /* + * Un-share the buffer if kernel it the one that + * shared it. + */ + if (!dec_buffs->buffers_num && + feed_data->buffer_desc.shared_file) { + fput(feed_data->buffer_desc.shared_file); + feed_data->buffer_desc.shared_file = NULL; + } + + ion_free(client, feed_data->buffer_desc.ion_handle[i]); + feed_data->buffer_desc.ion_handle[i] = NULL; + feed_data->buffer_desc.desc[i].size = 0; + } + } +} + +int mpq_dmx_flush_stream_buffer(struct dvb_demux_feed *feed) +{ + struct mpq_feed *mpq_feed = feed->priv; + struct mpq_video_feed_info *feed_data = &mpq_feed->video_info; + struct mpq_streambuffer *sbuff; + int ret = 0; + + if (!dvb_dmx_is_video_feed(feed)) { + MPQ_DVB_DBG_PRINT("%s: not a video feed, feed type=%d\n", + __func__, feed->pes_type); + return 0; + } + + spin_lock(&feed_data->video_buffer_lock); + + sbuff = feed_data->video_buffer; + if (sbuff == NULL) { + MPQ_DVB_DBG_PRINT("%s: feed_data->video_buffer is NULL\n", + __func__); + spin_unlock(&feed_data->video_buffer_lock); + return -ENODEV; + } + + feed_data->pending_pattern_len = 0; + + ret = mpq_streambuffer_flush(sbuff); + if (ret) + MPQ_DVB_ERR_PRINT("%s: mpq_streambuffer_flush failed, ret=%d\n", + __func__, ret); + + spin_unlock(&feed_data->video_buffer_lock); + + return ret; +} + +static int mpq_dmx_init_audio_internal_buffers( + struct mpq_demux *mpq_demux, + struct mpq_audio_feed_info *feed_data, + struct dmx_decoder_buffers *dec_buffs) +{ + struct ion_handle *temp_handle = NULL; + void *payload_buffer = NULL; + int actual_buffer_size = 0; + int ret = 0; + + MPQ_DVB_DBG_PRINT("%s: Internal audio decoder buffer allocation\n", + __func__); + + actual_buffer_size = dec_buffs->buffers_size; + actual_buffer_size += (SZ_4K - 1); + actual_buffer_size &= ~(SZ_4K - 1); + + temp_handle = ion_alloc(mpq_demux->ion_client, + actual_buffer_size, SZ_4K, + ION_HEAP(audio_nonsecure_ion_heap), + mpq_demux->decoder_alloc_flags); + + if (IS_ERR_OR_NULL(temp_handle)) { + ret = PTR_ERR(temp_handle); + MPQ_DVB_ERR_PRINT( + "%s: FAILED to allocate audio payload buffer %d\n", + __func__, ret); + if (!ret) + ret = -ENOMEM; + goto end; + } + + payload_buffer = ion_map_kernel(mpq_demux->ion_client, temp_handle); + + if (IS_ERR_OR_NULL(payload_buffer)) { + ret = PTR_ERR(payload_buffer); + MPQ_DVB_ERR_PRINT( + "%s: FAILED to map audio payload buffer %d\n", + __func__, ret); + if (!ret) + ret = -ENOMEM; + goto init_failed_free_payload_buffer; + } + feed_data->buffer_desc.decoder_buffers_num = 1; + feed_data->buffer_desc.ion_handle[0] = temp_handle; + feed_data->buffer_desc.desc[0].base = payload_buffer; + feed_data->buffer_desc.desc[0].size = actual_buffer_size; + feed_data->buffer_desc.desc[0].read_ptr = 0; + feed_data->buffer_desc.desc[0].write_ptr = 0; + feed_data->buffer_desc.desc[0].handle = + ion_share_dma_buf_fd(mpq_demux->ion_client, temp_handle); + if (feed_data->buffer_desc.desc[0].handle < 0) { + MPQ_DVB_ERR_PRINT( + "%s: FAILED to share audio payload buffer %d\n", + __func__, ret); + ret = -ENOMEM; + goto init_failed_unmap_payload_buffer; + } + + feed_data->buffer_desc.shared_file = fget( + feed_data->buffer_desc.desc[0].handle); + + return 0; + +init_failed_unmap_payload_buffer: + ion_unmap_kernel(mpq_demux->ion_client, temp_handle); + feed_data->buffer_desc.desc[0].base = NULL; +init_failed_free_payload_buffer: + ion_free(mpq_demux->ion_client, temp_handle); + feed_data->buffer_desc.ion_handle[0] = NULL; + feed_data->buffer_desc.desc[0].size = 0; + feed_data->buffer_desc.decoder_buffers_num = 0; + feed_data->buffer_desc.shared_file = NULL; +end: + return ret; +} + +static int mpq_dmx_init_audio_external_buffers( + struct mpq_audio_feed_info *feed_data, + struct dmx_decoder_buffers *dec_buffs, + struct ion_client *client) +{ + struct ion_handle *temp_handle = NULL; + void *payload_buffer = NULL; + int actual_buffer_size = 0; + int ret = 0; + int i; + + /* + * Payload buffer was allocated externally (through ION). + * Map the ion handles to kernel memory + */ + MPQ_DVB_DBG_PRINT("%s: External audio decoder buffer allocation\n", + __func__); + + actual_buffer_size = dec_buffs->buffers_size; + if (!dec_buffs->is_linear) { + MPQ_DVB_DBG_PRINT("%s: Ex. Ring-buffer\n", __func__); + feed_data->buffer_desc.decoder_buffers_num = 1; + } else { + MPQ_DVB_DBG_PRINT("%s: Ex. Linear\n", __func__); + feed_data->buffer_desc.decoder_buffers_num = + dec_buffs->buffers_num; + } + + for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) { + ret = mpq_map_buffer_to_kernel( + client, + dec_buffs->handles[i], + &temp_handle, + &payload_buffer); + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: Failed mapping audio buffer %d\n", + __func__, i); + goto init_failed; + } + feed_data->buffer_desc.ion_handle[i] = temp_handle; + feed_data->buffer_desc.desc[i].base = payload_buffer; + feed_data->buffer_desc.desc[i].handle = + dec_buffs->handles[i]; + feed_data->buffer_desc.desc[i].size = + dec_buffs->buffers_size; + feed_data->buffer_desc.desc[i].read_ptr = 0; + feed_data->buffer_desc.desc[i].write_ptr = 0; + + MPQ_DVB_DBG_PRINT( + "%s: Audio Buffer #%d: base=0x%p, handle=%d, size=%d\n", + __func__, i, + feed_data->buffer_desc.desc[i].base, + feed_data->buffer_desc.desc[i].handle, + feed_data->buffer_desc.desc[i].size); + } + + return 0; + +init_failed: + for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) { + if (feed_data->buffer_desc.ion_handle[i]) { + if (feed_data->buffer_desc.desc[i].base) { + ion_unmap_kernel(client, + feed_data->buffer_desc.ion_handle[i]); + feed_data->buffer_desc.desc[i].base = NULL; + } + ion_free(client, feed_data->buffer_desc.ion_handle[i]); + feed_data->buffer_desc.ion_handle[i] = NULL; + feed_data->buffer_desc.desc[i].size = 0; + } + } + return ret; +} +static int mpq_dmx_init_audio_streambuffer( + struct mpq_feed *feed, + struct mpq_audio_feed_info *feed_data, + struct mpq_streambuffer *stream_buffer) +{ + int ret; + void *packet_buffer = NULL; + struct mpq_demux *mpq_demux = feed->mpq_demux; + struct ion_client *client = mpq_demux->ion_client; + struct dmx_decoder_buffers *dec_buffs = NULL; + enum mpq_streambuffer_mode mode; + + dec_buffs = feed->dvb_demux_feed->feed.ts.decoder_buffers; + + /* Allocate packet buffer holding the meta-data */ + packet_buffer = vmalloc(AUDIO_META_DATA_BUFFER_SIZE); + + if (packet_buffer == NULL) { + MPQ_DVB_ERR_PRINT( + "%s: FAILED to allocate packets buffer\n", __func__); + ret = -ENOMEM; + goto end; + } + + MPQ_DVB_DBG_PRINT("%s: dec_buffs: num=%d, size=%d, linear=%d\n", + __func__, dec_buffs->buffers_num, + dec_buffs->buffers_size, + dec_buffs->is_linear); + + if (dec_buffs->buffers_num == 0) + ret = mpq_dmx_init_audio_internal_buffers( + mpq_demux, feed_data, dec_buffs); + else + ret = mpq_dmx_init_audio_external_buffers( + feed_data, dec_buffs, client); + + if (ret != 0) + goto init_failed_free_packet_buffer; + + mode = dec_buffs->is_linear ? MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR : + MPQ_STREAMBUFFER_BUFFER_MODE_RING; + ret = mpq_streambuffer_init( + feed_data->audio_buffer, + mode, + feed_data->buffer_desc.desc, + feed_data->buffer_desc.decoder_buffers_num, + packet_buffer, + AUDIO_META_DATA_BUFFER_SIZE); + + if (ret != 0) + goto init_failed_free_packet_buffer; + + goto end; + + +init_failed_free_packet_buffer: + vfree(packet_buffer); +end: + return ret; +} + +static void mpq_dmx_release_audio_streambuffer( + struct mpq_feed *feed, + struct mpq_audio_feed_info *feed_data, + struct mpq_streambuffer *audio_buffer, + struct ion_client *client) +{ + int buf_num = 0; + int i; + struct dmx_decoder_buffers *dec_buffs = + feed->dvb_demux_feed->feed.ts.decoder_buffers; + + mpq_adapter_unregister_stream_if(feed_data->stream_interface); + + mpq_streambuffer_terminate(audio_buffer); + + vfree(audio_buffer->packet_data.data); + + buf_num = feed_data->buffer_desc.decoder_buffers_num; + + for (i = 0; i < buf_num; i++) { + if (feed_data->buffer_desc.ion_handle[i]) { + if (feed_data->buffer_desc.desc[i].base) { + ion_unmap_kernel(client, + feed_data->buffer_desc.ion_handle[i]); + feed_data->buffer_desc.desc[i].base = NULL; + } + + /* + * Un-share the buffer if kernel is the one that + * shared it. + */ + if (!dec_buffs->buffers_num && + feed_data->buffer_desc.shared_file) { + fput(feed_data->buffer_desc.shared_file); + feed_data->buffer_desc.shared_file = NULL; + } + + ion_free(client, feed_data->buffer_desc.ion_handle[i]); + feed_data->buffer_desc.ion_handle[i] = NULL; + feed_data->buffer_desc.desc[i].size = 0; + } + } +} + +int mpq_dmx_flush_audio_stream_buffer(struct dvb_demux_feed *feed) +{ + struct mpq_feed *mpq_feed = feed->priv; + struct mpq_audio_feed_info *feed_data = &mpq_feed->audio_info; + struct mpq_streambuffer *sbuff; + int ret = 0; + + if (!dvb_dmx_is_audio_feed(feed)) { + MPQ_DVB_DBG_PRINT("%s: not a audio feed, feed type=%d\n", + __func__, feed->pes_type); + return 0; + } + + spin_lock(&feed_data->audio_buffer_lock); + + sbuff = feed_data->audio_buffer; + if (sbuff == NULL) { + MPQ_DVB_DBG_PRINT("%s: feed_data->audio_buffer is NULL\n", + __func__); + spin_unlock(&feed_data->audio_buffer_lock); + return -ENODEV; + } + + ret = mpq_streambuffer_flush(sbuff); + if (ret) + MPQ_DVB_ERR_PRINT("%s: mpq_streambuffer_flush failed, ret=%d\n", + __func__, ret); + + spin_unlock(&feed_data->audio_buffer_lock); + + return ret; +} + +static int mpq_dmx_flush_buffer(struct dmx_ts_feed *ts_feed, size_t length) +{ + struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed; + struct dvb_demux *demux = feed->demux; + int ret = 0; + + if (mutex_lock_interruptible(&demux->mutex)) + return -ERESTARTSYS; + + dvbdmx_ts_reset_pes_state(feed); + + if (dvb_dmx_is_video_feed(feed)) { + MPQ_DVB_DBG_PRINT("%s: flushing video buffer\n", __func__); + + ret = mpq_dmx_flush_stream_buffer(feed); + } else if (dvb_dmx_is_audio_feed(feed)) { + MPQ_DVB_DBG_PRINT("%s: flushing audio buffer\n", __func__); + + ret = mpq_dmx_flush_audio_stream_buffer(feed); + } + + mutex_unlock(&demux->mutex); + return ret; +} + +/** + * mpq_dmx_init_video_feed - Initializes of video feed information + * used to pass data directly to decoder. + * + * @mpq_feed: The mpq feed object + * + * Return error code. + */ +int mpq_dmx_init_video_feed(struct mpq_feed *mpq_feed) +{ + int ret; + struct mpq_video_feed_info *feed_data = &mpq_feed->video_info; + struct mpq_demux *mpq_demux = mpq_feed->mpq_demux; + struct mpq_streambuffer *stream_buffer; + + /* get and store framing information if required */ + if (video_framing) { + mpq_dmx_get_pattern_params( + mpq_feed->dvb_demux_feed->video_codec, + feed_data->patterns, &feed_data->patterns_num); + if (!feed_data->patterns_num) { + MPQ_DVB_ERR_PRINT( + "%s: FAILED to get framing pattern parameters\n", + __func__); + + ret = -EINVAL; + goto init_failed_free_priv_data; + } + } + + /* Register the new stream-buffer interface to MPQ adapter */ + switch (mpq_feed->dvb_demux_feed->pes_type) { + case DMX_PES_VIDEO0: + store_mpq_video_feed[0] = mpq_feed; + feed_data->stream_interface = + MPQ_ADAPTER_VIDEO0_STREAM_IF; + break; + + case DMX_PES_VIDEO1: + store_mpq_video_feed[1] = mpq_feed; + feed_data->stream_interface = + MPQ_ADAPTER_VIDEO1_STREAM_IF; + break; + + case DMX_PES_VIDEO2: + store_mpq_video_feed[2] = mpq_feed; + feed_data->stream_interface = + MPQ_ADAPTER_VIDEO2_STREAM_IF; + break; + + case DMX_PES_VIDEO3: + store_mpq_video_feed[3] = mpq_feed; + feed_data->stream_interface = + MPQ_ADAPTER_VIDEO3_STREAM_IF; + break; + + default: + MPQ_DVB_ERR_PRINT( + "%s: Invalid pes type %d\n", + __func__, + mpq_feed->dvb_demux_feed->pes_type); + ret = -EINVAL; + goto init_failed_free_priv_data; + } + + /* make sure not occupied already */ + stream_buffer = NULL; + mpq_adapter_get_stream_if( + feed_data->stream_interface, + &stream_buffer); + if (stream_buffer != NULL) { + MPQ_DVB_ERR_PRINT( + "%s: Video interface %d already occupied!\n", + __func__, + feed_data->stream_interface); + ret = -EBUSY; + goto init_failed_free_priv_data; + } + + feed_data->video_buffer = + &mpq_dmx_info.decoder_buffers[feed_data->stream_interface]; + + ret = mpq_dmx_init_streambuffer( + mpq_feed, feed_data, feed_data->video_buffer); + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_dmx_init_streambuffer failed, err = %d\n", + __func__, ret); + goto init_failed_free_priv_data; + } + + ret = mpq_adapter_register_stream_if( + feed_data->stream_interface, + feed_data->video_buffer); + + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_adapter_register_stream_if failed, err = %d\n", + __func__, ret); + goto init_failed_free_stream_buffer; + } + + spin_lock_init(&feed_data->video_buffer_lock); + + feed_data->pes_header_left_bytes = PES_MANDATORY_FIELDS_LEN; + feed_data->pes_header_offset = 0; + mpq_feed->dvb_demux_feed->pusi_seen = 0; + mpq_feed->dvb_demux_feed->peslen = 0; + feed_data->fullness_wait_cancel = 0; + mpq_streambuffer_get_data_rw_offset(feed_data->video_buffer, NULL, + &feed_data->frame_offset); + feed_data->last_pattern_offset = 0; + feed_data->pending_pattern_len = 0; + feed_data->last_framing_match_type = 0; + feed_data->found_sequence_header_pattern = 0; + memset(&feed_data->prefix_size, 0, + sizeof(struct dvb_dmx_video_prefix_size_masks)); + feed_data->first_prefix_size = 0; + feed_data->saved_pts_dts_info.pts_exist = 0; + feed_data->saved_pts_dts_info.dts_exist = 0; + feed_data->new_pts_dts_info.pts_exist = 0; + feed_data->new_pts_dts_info.dts_exist = 0; + feed_data->saved_info_used = 1; + feed_data->new_info_exists = 0; + feed_data->first_pts_dts_copy = 1; + feed_data->tei_errs = 0; + feed_data->last_continuity = -1; + feed_data->continuity_errs = 0; + feed_data->ts_packets_num = 0; + feed_data->ts_dropped_bytes = 0; + + mpq_demux->decoder_stat[feed_data->stream_interface].drop_count = 0; + mpq_demux->decoder_stat[feed_data->stream_interface].out_count = 0; + mpq_demux->decoder_stat[feed_data->stream_interface]. + out_interval_sum = 0; + mpq_demux->decoder_stat[feed_data->stream_interface]. + out_interval_max = 0; + mpq_demux->decoder_stat[feed_data->stream_interface].ts_errors = 0; + mpq_demux->decoder_stat[feed_data->stream_interface].cc_errors = 0; + + return 0; + +init_failed_free_stream_buffer: + mpq_dmx_release_streambuffer(mpq_feed, feed_data, + feed_data->video_buffer, mpq_demux->ion_client); + mpq_adapter_unregister_stream_if(feed_data->stream_interface); +init_failed_free_priv_data: + feed_data->video_buffer = NULL; + return ret; +} + +/* Register the new stream-buffer interface to MPQ adapter */ +int mpq_dmx_init_audio_feed(struct mpq_feed *mpq_feed) +{ + int ret; + struct mpq_audio_feed_info *feed_data = &mpq_feed->audio_info; + struct mpq_demux *mpq_demux = mpq_feed->mpq_demux; + struct mpq_streambuffer *stream_buffer; + + switch (mpq_feed->dvb_demux_feed->pes_type) { + case DMX_PES_AUDIO0: + store_mpq_audio_feed[0] = mpq_feed; + feed_data->stream_interface = + MPQ_ADAPTER_AUDIO0_STREAM_IF; + break; + + case DMX_PES_AUDIO1: + store_mpq_audio_feed[1] = mpq_feed; + feed_data->stream_interface = + MPQ_ADAPTER_AUDIO1_STREAM_IF; + break; + + case DMX_PES_AUDIO2: + store_mpq_audio_feed[2] = mpq_feed; + feed_data->stream_interface = + MPQ_ADAPTER_AUDIO2_STREAM_IF; + break; + + case DMX_PES_AUDIO3: + store_mpq_audio_feed[3] = mpq_feed; + feed_data->stream_interface = + MPQ_ADAPTER_AUDIO3_STREAM_IF; + break; + + default: + MPQ_DVB_ERR_PRINT( + "%s: Invalid pes type %d\n", + __func__, + mpq_feed->dvb_demux_feed->pes_type); + ret = -EINVAL; + goto init_failed_free_priv_data; + } + + /* make sure not occupied already */ + stream_buffer = NULL; + mpq_adapter_get_stream_if( + feed_data->stream_interface, + &stream_buffer); + if (stream_buffer != NULL) { + MPQ_DVB_ERR_PRINT( + "%s: Audio interface %d already occupied!\n", + __func__, feed_data->stream_interface); + ret = -EBUSY; + goto init_failed_free_priv_data; + } + + feed_data->audio_buffer = + &mpq_dmx_info.decoder_buffers[feed_data->stream_interface]; + + ret = mpq_dmx_init_audio_streambuffer( + mpq_feed, feed_data, feed_data->audio_buffer); + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_dmx_init_streambuffer failed, err = %d\n", + __func__, ret); + goto init_failed_free_priv_data; + } + + ret = mpq_adapter_register_stream_if( + feed_data->stream_interface, + feed_data->audio_buffer); + + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_adapter_register_stream_if failed, err = %d\n", + __func__, ret); + goto init_failed_free_stream_buffer; + } + + spin_lock_init(&feed_data->audio_buffer_lock); + + feed_data->pes_header_left_bytes = PES_MANDATORY_FIELDS_LEN; + feed_data->pes_header_offset = 0; + mpq_feed->dvb_demux_feed->pusi_seen = 0; + mpq_feed->dvb_demux_feed->peslen = 0; + feed_data->fullness_wait_cancel = 0; + mpq_streambuffer_get_data_rw_offset(feed_data->audio_buffer, NULL, + &feed_data->frame_offset); + feed_data->saved_pts_dts_info.pts_exist = 0; + feed_data->saved_pts_dts_info.dts_exist = 0; + feed_data->new_pts_dts_info.pts_exist = 0; + feed_data->new_pts_dts_info.dts_exist = 0; + feed_data->saved_info_used = 1; + feed_data->new_info_exists = 0; + feed_data->first_pts_dts_copy = 1; + feed_data->tei_errs = 0; + feed_data->last_continuity = -1; + feed_data->continuity_errs = 0; + feed_data->ts_packets_num = 0; + feed_data->ts_dropped_bytes = 0; + + mpq_demux->decoder_stat[feed_data->stream_interface].drop_count = 0; + mpq_demux->decoder_stat[feed_data->stream_interface].out_count = 0; + mpq_demux->decoder_stat[feed_data->stream_interface]. + out_interval_sum = 0; + mpq_demux->decoder_stat[feed_data->stream_interface]. + out_interval_max = 0; + mpq_demux->decoder_stat[feed_data->stream_interface].ts_errors = 0; + mpq_demux->decoder_stat[feed_data->stream_interface].cc_errors = 0; + + return 0; + +init_failed_free_stream_buffer: + mpq_dmx_release_audio_streambuffer(mpq_feed, feed_data, + feed_data->audio_buffer, mpq_demux->ion_client); + mpq_adapter_unregister_stream_if(feed_data->stream_interface); +init_failed_free_priv_data: + feed_data->audio_buffer = NULL; + return ret; +} + +/** + * mpq_dmx_terminate_video_feed - terminate video feed information + * that was previously initialized in mpq_dmx_init_video_feed + * + * @mpq_feed: The mpq feed used for the video TS packets + * + * Return error code. + */ +int mpq_dmx_terminate_video_feed(struct mpq_feed *mpq_feed) +{ + struct mpq_streambuffer *video_buffer; + struct mpq_video_feed_info *feed_data; + struct mpq_demux *mpq_demux; + + if (mpq_feed == NULL) + return -EINVAL; + + mpq_demux = mpq_feed->mpq_demux; + feed_data = &mpq_feed->video_info; + + spin_lock(&feed_data->video_buffer_lock); + video_buffer = feed_data->video_buffer; + feed_data->video_buffer = NULL; + wake_up_all(&video_buffer->raw_data.queue); + spin_unlock(&feed_data->video_buffer_lock); + + mpq_dmx_release_streambuffer(mpq_feed, feed_data, + video_buffer, mpq_demux->ion_client); + + return 0; +} + +int mpq_dmx_terminate_audio_feed(struct mpq_feed *mpq_feed) +{ + struct mpq_streambuffer *audio_buffer; + struct mpq_audio_feed_info *feed_data; + struct mpq_demux *mpq_demux; + + if (mpq_feed == NULL) + return -EINVAL; + + mpq_demux = mpq_feed->mpq_demux; + feed_data = &mpq_feed->audio_info; + + spin_lock(&feed_data->audio_buffer_lock); + audio_buffer = feed_data->audio_buffer; + feed_data->audio_buffer = NULL; + wake_up_all(&audio_buffer->raw_data.queue); + spin_unlock(&feed_data->audio_buffer_lock); + + mpq_dmx_release_audio_streambuffer(mpq_feed, feed_data, + audio_buffer, mpq_demux->ion_client); + + return 0; +} + +struct dvb_demux_feed *mpq_dmx_peer_rec_feed(struct dvb_demux_feed *feed) +{ + struct dvb_demux_feed *tmp; + struct dvb_demux *dvb_demux = feed->demux; + + list_for_each_entry(tmp, &dvb_demux->feed_list, list_head) { + if (tmp != feed && tmp->state == DMX_STATE_GO && + tmp->feed.ts.buffer.ringbuff == + feed->feed.ts.buffer.ringbuff) { + MPQ_DVB_DBG_PRINT( + "%s: main feed pid=%d, secondary feed pid=%d\n", + __func__, tmp->pid, feed->pid); + return tmp; + } + } + + return NULL; +} + +static int mpq_sdmx_alloc_data_buf(struct mpq_feed *mpq_feed, size_t size) +{ + struct mpq_demux *mpq_demux = mpq_feed->mpq_demux; + void *buf_base; + int ret; + + mpq_feed->sdmx_buf_handle = ion_alloc(mpq_demux->ion_client, + size, + SZ_4K, + ION_HEAP(ION_QSECOM_HEAP_ID), + 0); + if (IS_ERR_OR_NULL(mpq_feed->sdmx_buf_handle)) { + ret = PTR_ERR(mpq_feed->sdmx_buf_handle); + mpq_feed->sdmx_buf_handle = NULL; + MPQ_DVB_ERR_PRINT( + "%s: FAILED to allocate sdmx buffer %d\n", + __func__, ret); + if (!ret) + ret = -ENOMEM; + goto end; + } + + buf_base = ion_map_kernel(mpq_demux->ion_client, + mpq_feed->sdmx_buf_handle); + if (IS_ERR_OR_NULL(buf_base)) { + ret = PTR_ERR(buf_base); + MPQ_DVB_ERR_PRINT( + "%s: FAILED to map sdmx buffer %d\n", + __func__, ret); + if (!ret) + ret = -ENOMEM; + goto failed_free_buf; + } + + dvb_ringbuffer_init(&mpq_feed->sdmx_buf, buf_base, size); + + return 0; + +failed_free_buf: + ion_free(mpq_demux->ion_client, mpq_feed->sdmx_buf_handle); + mpq_feed->sdmx_buf_handle = NULL; +end: + return ret; +} + +static int mpq_sdmx_free_data_buf(struct mpq_feed *mpq_feed) +{ + struct mpq_demux *mpq_demux = mpq_feed->mpq_demux; + + if (mpq_feed->sdmx_buf_handle) { + ion_unmap_kernel(mpq_demux->ion_client, + mpq_feed->sdmx_buf_handle); + mpq_feed->sdmx_buf.data = NULL; + ion_free(mpq_demux->ion_client, + mpq_feed->sdmx_buf_handle); + mpq_feed->sdmx_buf_handle = NULL; + } + + return 0; +} + +static int mpq_sdmx_init_metadata_buffer(struct mpq_demux *mpq_demux, + struct mpq_feed *feed, struct sdmx_buff_descr *metadata_buff_desc) +{ + void *metadata_buff_base; + ion_phys_addr_t temp; + int ret; + size_t size; + + feed->metadata_buf_handle = ion_alloc(mpq_demux->ion_client, + SDMX_METADATA_BUFFER_SIZE, + SZ_4K, + ION_HEAP(ION_QSECOM_HEAP_ID), + 0); + if (IS_ERR_OR_NULL(feed->metadata_buf_handle)) { + ret = PTR_ERR(feed->metadata_buf_handle); + feed->metadata_buf_handle = NULL; + MPQ_DVB_ERR_PRINT( + "%s: FAILED to allocate metadata buffer %d\n", + __func__, ret); + if (!ret) + ret = -ENOMEM; + goto end; + } + + metadata_buff_base = ion_map_kernel(mpq_demux->ion_client, + feed->metadata_buf_handle); + if (IS_ERR_OR_NULL(metadata_buff_base)) { + ret = PTR_ERR(metadata_buff_base); + MPQ_DVB_ERR_PRINT( + "%s: FAILED to map metadata buffer %d\n", + __func__, ret); + if (!ret) + ret = -ENOMEM; + goto failed_free_metadata_buf; + } + + ret = ion_phys(mpq_demux->ion_client, + feed->metadata_buf_handle, + &temp, + &size); + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: FAILED to get physical address %d\n", + __func__, ret); + goto failed_unmap_metadata_buf; + } + metadata_buff_desc->size = size; + metadata_buff_desc->base_addr = (u64)temp; + + dvb_ringbuffer_init(&feed->metadata_buf, metadata_buff_base, + SDMX_METADATA_BUFFER_SIZE); + + return 0; + +failed_unmap_metadata_buf: + ion_unmap_kernel(mpq_demux->ion_client, feed->metadata_buf_handle); +failed_free_metadata_buf: + ion_free(mpq_demux->ion_client, feed->metadata_buf_handle); + feed->metadata_buf_handle = NULL; +end: + return ret; +} + +static int mpq_sdmx_terminate_metadata_buffer(struct mpq_feed *mpq_feed) +{ + struct mpq_demux *mpq_demux = mpq_feed->mpq_demux; + + if (mpq_feed->metadata_buf_handle) { + ion_unmap_kernel(mpq_demux->ion_client, + mpq_feed->metadata_buf_handle); + mpq_feed->metadata_buf.data = NULL; + ion_free(mpq_demux->ion_client, + mpq_feed->metadata_buf_handle); + mpq_feed->metadata_buf_handle = NULL; + } + + return 0; +} + +int mpq_dmx_terminate_feed(struct dvb_demux_feed *feed) +{ + int ret = 0; + struct mpq_demux *mpq_demux; + struct mpq_feed *mpq_feed; + struct mpq_feed *main_rec_feed = NULL; + struct dvb_demux_feed *tmp; + + if (feed == NULL) + return -EINVAL; + + mpq_demux = feed->demux->priv; + + mutex_lock(&mpq_demux->mutex); + mpq_feed = feed->priv; + + if (mpq_feed->sdmx_filter_handle != SDMX_INVALID_FILTER_HANDLE) { + if (mpq_feed->filter_type == SDMX_RAW_FILTER) { + tmp = mpq_dmx_peer_rec_feed(feed); + if (tmp) + main_rec_feed = tmp->priv; + } + + if (main_rec_feed) { + /* This feed is part of a recording filter */ + MPQ_DVB_DBG_PRINT( + "%s: Removing raw pid %d from filter %d\n", + __func__, feed->pid, + mpq_feed->sdmx_filter_handle); + ret = sdmx_remove_raw_pid( + mpq_demux->sdmx_session_handle, + mpq_feed->sdmx_filter_handle, feed->pid); + if (ret) + MPQ_DVB_ERR_PRINT( + "%s: SDMX_remove_raw_pid failed. ret = %d\n", + __func__, ret); + + /* If this feed that we are removing was set as primary, + * now other feeds should be set as primary + */ + if (!mpq_feed->secondary_feed) + main_rec_feed->secondary_feed = 0; + } else { + MPQ_DVB_DBG_PRINT("%s: Removing filter %d, pid %d\n", + __func__, mpq_feed->sdmx_filter_handle, + feed->pid); + ret = sdmx_remove_filter(mpq_demux->sdmx_session_handle, + mpq_feed->sdmx_filter_handle); + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: SDMX_remove_filter failed. ret = %d\n", + __func__, ret); + } + + mpq_demux->sdmx_filter_count--; + mpq_feed->sdmx_filter_handle = + SDMX_INVALID_FILTER_HANDLE; + } + + mpq_sdmx_close_session(mpq_demux); + if (mpq_demux->num_secure_feeds > 0) + mpq_demux->num_secure_feeds--; + else + MPQ_DVB_DBG_PRINT("%s: Invalid secure feed count= %u\n", + __func__, mpq_demux->num_secure_feeds); + } + + if (dvb_dmx_is_video_feed(feed)) { + ret = mpq_dmx_terminate_video_feed(mpq_feed); + if (ret) + MPQ_DVB_ERR_PRINT( + "%s: mpq_dmx_terminate_video_feed failed. ret = %d\n", + __func__, ret); + } else if (dvb_dmx_is_audio_feed(feed)) { + ret = mpq_dmx_terminate_audio_feed(mpq_feed); + if (ret) + MPQ_DVB_ERR_PRINT( + "%s: mpq_dmx_terminate_audio_feed failed. ret = %d\n", + __func__, ret); + } + + if (mpq_feed->sdmx_buf_handle) { + wake_up_all(&mpq_feed->sdmx_buf.queue); + mpq_sdmx_free_data_buf(mpq_feed); + } + + mpq_sdmx_terminate_metadata_buffer(mpq_feed); + if (mpq_demux->num_active_feeds > 0) + mpq_demux->num_active_feeds--; + else + MPQ_DVB_DBG_PRINT("%s: Invalid num_active_feeds count = %u\n", + __func__, mpq_demux->num_active_feeds); + + mutex_unlock(&mpq_demux->mutex); + + return ret; +} + +int mpq_dmx_decoder_fullness_init(struct dvb_demux_feed *feed) +{ + struct mpq_feed *mpq_feed; + + if (dvb_dmx_is_video_feed(feed)) { + struct mpq_video_feed_info *feed_data; + + mpq_feed = feed->priv; + feed_data = &mpq_feed->video_info; + feed_data->fullness_wait_cancel = 0; + + return 0; + } else if (dvb_dmx_is_audio_feed(feed)) { + struct mpq_audio_feed_info *feed_data; + + mpq_feed = feed->priv; + feed_data = &mpq_feed->audio_info; + feed_data->fullness_wait_cancel = 0; + + return 0; + } + + MPQ_DVB_DBG_PRINT("%s: Invalid feed type %d\n", __func__, + feed->pes_type); + + return -EINVAL; +} + +/** + * Returns whether the free space of decoder's output + * buffer is larger than specific number of bytes. + * + * @sbuff: MPQ stream buffer used for decoder data. + * @required_space: number of required free bytes in the buffer + * + * Return 1 if required free bytes are available, 0 otherwise. + */ +static inline int mpq_dmx_check_video_decoder_fullness( + struct mpq_streambuffer *sbuff, + size_t required_space) +{ + ssize_t free = mpq_streambuffer_data_free(sbuff); + ssize_t free_meta = mpq_streambuffer_metadata_free(sbuff); + + /* Verify meta-data buffer can contain at least 1 packet */ + if (free_meta < VIDEO_META_DATA_PACKET_SIZE) + return 0; + + /* + * For linear buffers, verify there's enough space for this TSP + * and an additional buffer is free, as framing might required one + * more buffer to be available. + */ + if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) + return (free >= required_space && + sbuff->pending_buffers_count < sbuff->buffers_num-1); + else + /* Ring buffer mode */ + return (free >= required_space); +} + +static inline int mpq_dmx_check_audio_decoder_fullness( + struct mpq_streambuffer *sbuff, + size_t required_space) +{ + ssize_t free = mpq_streambuffer_data_free(sbuff); + ssize_t free_meta = mpq_streambuffer_metadata_free(sbuff); + + /* Verify meta-data buffer can contain at least 1 packet */ + if (free_meta < AUDIO_META_DATA_PACKET_SIZE) + return 0; + + /* + * For linear buffers, verify there's enough space for this TSP + * and an additional buffer is free, as framing might required one + * more buffer to be available. + */ + if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) + return (free >= required_space && + sbuff->pending_buffers_count < sbuff->buffers_num-1); + else + return (free >= required_space); /* Ring buffer mode */ +} + +/** + * Checks whether decoder's output buffer has free space + * for specific number of bytes, if not, the function waits + * until the amount of free-space is available. + * + * @feed: decoder's feed object + * @required_space: number of required free bytes in the buffer + * @lock_feed: indicates whether mutex should be held before + * accessing the feed information. If the caller of this function + * already holds a mutex then this should be set to 0 and 1 otherwise. + * + * Return 0 if required space is available and error code + * in case waiting on buffer fullness was aborted. + */ +static int mpq_dmx_decoder_fullness_check( + struct dvb_demux_feed *feed, + size_t required_space, + int lock_feed) +{ + struct mpq_demux *mpq_demux = feed->demux->priv; + struct mpq_streambuffer *sbuff = NULL; + struct mpq_video_feed_info *feed_data; + struct mpq_feed *mpq_feed; + int ret = 0; + + if (!dvb_dmx_is_video_feed(feed)) { + MPQ_DVB_DBG_PRINT("%s: Invalid feed type %d\n", + __func__, + feed->pes_type); + return -EINVAL; + } + + if (lock_feed) { + mutex_lock(&mpq_demux->mutex); + } else if (!mutex_is_locked(&mpq_demux->mutex)) { + MPQ_DVB_ERR_PRINT( + "%s: Mutex should have been locked\n", + __func__); + return -EINVAL; + } + + mpq_feed = feed->priv; + feed_data = &mpq_feed->video_info; + + sbuff = feed_data->video_buffer; + if (sbuff == NULL) { + if (lock_feed) + mutex_unlock(&mpq_demux->mutex); + MPQ_DVB_ERR_PRINT("%s: mpq_streambuffer object is NULL\n", + __func__); + return -EINVAL; + } + + if ((feed_data->video_buffer != NULL) && + (!feed_data->fullness_wait_cancel) && + (!mpq_dmx_check_video_decoder_fullness(sbuff, + required_space))) { + DEFINE_WAIT(__wait); + + for (;;) { + prepare_to_wait(&sbuff->raw_data.queue, + &__wait, + TASK_INTERRUPTIBLE); + if (!feed_data->video_buffer || + feed_data->fullness_wait_cancel || + mpq_dmx_check_video_decoder_fullness(sbuff, + required_space)) + break; + + if (!signal_pending(current)) { + mutex_unlock(&mpq_demux->mutex); + schedule(); + mutex_lock(&mpq_demux->mutex); + continue; + } + + ret = -ERESTARTSYS; + break; + } + finish_wait(&sbuff->raw_data.queue, &__wait); + } + + if (ret < 0) { + if (lock_feed) + mutex_unlock(&mpq_demux->mutex); + return ret; + } + + if ((feed_data->fullness_wait_cancel) || + (feed_data->video_buffer == NULL)) { + if (lock_feed) + mutex_unlock(&mpq_demux->mutex); + return -EINVAL; + } + + if (lock_feed) + mutex_unlock(&mpq_demux->mutex); + return 0; +} + +static int mpq_dmx_audio_decoder_fullness_check( + struct dvb_demux_feed *feed, + size_t required_space, + int lock_feed) +{ + struct mpq_demux *mpq_demux = feed->demux->priv; + struct mpq_streambuffer *sbuff = NULL; + struct mpq_audio_feed_info *feed_data; + struct mpq_feed *mpq_feed; + int ret = 0; + + if (!dvb_dmx_is_audio_feed(feed)) { + MPQ_DVB_DBG_PRINT("%s: Invalid feed type %d\n", + __func__, + feed->pes_type); + return -EINVAL; + } + + if (lock_feed) { + mutex_lock(&mpq_demux->mutex); + } else if (!mutex_is_locked(&mpq_demux->mutex)) { + MPQ_DVB_ERR_PRINT( + "%s: Mutex should have been locked\n", + __func__); + return -EINVAL; + } + + mpq_feed = feed->priv; + feed_data = &mpq_feed->audio_info; + + sbuff = feed_data->audio_buffer; + if (sbuff == NULL) { + if (lock_feed) + mutex_unlock(&mpq_demux->mutex); + MPQ_DVB_ERR_PRINT("%s: mpq_streambuffer object is NULL\n", + __func__); + return -EINVAL; + } + + if ((feed_data->audio_buffer != NULL) && + (!feed_data->fullness_wait_cancel) && + (!mpq_dmx_check_audio_decoder_fullness(sbuff, + required_space))) { + DEFINE_WAIT(__wait); + + for (;;) { + prepare_to_wait(&sbuff->raw_data.queue, + &__wait, TASK_INTERRUPTIBLE); + if (!feed_data->audio_buffer || + feed_data->fullness_wait_cancel || + mpq_dmx_check_audio_decoder_fullness(sbuff, + required_space)) + break; + + if (!signal_pending(current)) { + mutex_unlock(&mpq_demux->mutex); + schedule(); + mutex_lock(&mpq_demux->mutex); + continue; + } + + ret = -ERESTARTSYS; + break; + } + finish_wait(&sbuff->raw_data.queue, &__wait); + } + + if (ret < 0) { + if (lock_feed) + mutex_unlock(&mpq_demux->mutex); + return ret; + } + + if ((feed_data->fullness_wait_cancel) || + (feed_data->audio_buffer == NULL)) { + if (lock_feed) + mutex_unlock(&mpq_demux->mutex); + return -EINVAL; + } + + if (lock_feed) + mutex_unlock(&mpq_demux->mutex); + return 0; +} + +int mpq_dmx_decoder_fullness_wait( + struct dvb_demux_feed *feed, + size_t required_space) +{ + if (dvb_dmx_is_video_feed(feed)) + return mpq_dmx_decoder_fullness_check(feed, required_space, 1); + else if (dvb_dmx_is_audio_feed(feed)) + return mpq_dmx_audio_decoder_fullness_check(feed, + required_space, 1); + + return 0; +} + +int mpq_dmx_decoder_fullness_abort(struct dvb_demux_feed *feed) +{ + if (dvb_dmx_is_video_feed(feed)) { + struct mpq_feed *mpq_feed; + struct mpq_video_feed_info *feed_data; + struct dvb_ringbuffer *video_buff; + + mpq_feed = feed->priv; + feed_data = &mpq_feed->video_info; + + feed_data->fullness_wait_cancel = 1; + + spin_lock(&feed_data->video_buffer_lock); + if (feed_data->video_buffer == NULL) { + MPQ_DVB_DBG_PRINT( + "%s: video_buffer released\n", __func__); + spin_unlock(&feed_data->video_buffer_lock); + return 0; + } + + video_buff = &feed_data->video_buffer->raw_data; + wake_up_all(&video_buff->queue); + spin_unlock(&feed_data->video_buffer_lock); + + return 0; + } else if (dvb_dmx_is_audio_feed(feed)) { + struct mpq_feed *mpq_feed; + struct mpq_audio_feed_info *feed_data; + struct dvb_ringbuffer *audio_buff; + + mpq_feed = feed->priv; + feed_data = &mpq_feed->audio_info; + + feed_data->fullness_wait_cancel = 1; + + spin_lock(&feed_data->audio_buffer_lock); + if (feed_data->audio_buffer == NULL) { + MPQ_DVB_DBG_PRINT( + "%s: audio_buffer released\n", __func__); + spin_unlock(&feed_data->audio_buffer_lock); + return 0; + } + + audio_buff = &feed_data->audio_buffer->raw_data; + wake_up_all(&audio_buff->queue); + spin_unlock(&feed_data->audio_buffer_lock); + + return 0; + } + + MPQ_DVB_ERR_PRINT( + "%s: Invalid feed type %d\n", __func__, feed->pes_type); + + return -EINVAL; +} + +int mpq_dmx_parse_mandatory_pes_header( + struct dvb_demux_feed *feed, + struct mpq_video_feed_info *feed_data, + struct pes_packet_header *pes_header, + const u8 *buf, + u32 *ts_payload_offset, + int *bytes_avail) +{ + int left_size, copy_len; + + if (feed_data->pes_header_offset < PES_MANDATORY_FIELDS_LEN) { + left_size = + PES_MANDATORY_FIELDS_LEN - + feed_data->pes_header_offset; + + copy_len = (left_size > *bytes_avail) ? + *bytes_avail : + left_size; + + memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset), + (buf + *ts_payload_offset), + copy_len); + + feed_data->pes_header_offset += copy_len; + + if (left_size > *bytes_avail) + return -EINVAL; + + /* else - we have beginning of PES header */ + *bytes_avail -= left_size; + *ts_payload_offset += left_size; + + /* Make sure the PES packet is valid */ + if (mpq_dmx_is_valid_video_pes(pes_header) < 0) { + /* + * Since the new PES header parsing + * failed, reset pusi_seen to drop all + * data until next PUSI + */ + feed->pusi_seen = 0; + feed_data->pes_header_offset = 0; + + MPQ_DVB_ERR_PRINT( + "%s: invalid packet\n", + __func__); + + return -EINVAL; + } + + feed_data->pes_header_left_bytes = + pes_header->pes_header_data_length; + } + + return 0; +} + +int mpq_dmx_parse_mandatory_audio_pes_header( + struct dvb_demux_feed *feed, + struct mpq_audio_feed_info *feed_data, + struct pes_packet_header *pes_header, + const u8 *buf, + u32 *ts_payload_offset, + int *bytes_avail) +{ + int left_size, copy_len; + + if (feed_data->pes_header_offset < PES_MANDATORY_FIELDS_LEN) { + left_size = + PES_MANDATORY_FIELDS_LEN - + feed_data->pes_header_offset; + + copy_len = (left_size > *bytes_avail) ? + *bytes_avail : + left_size; + + memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset), + (buf + *ts_payload_offset), + copy_len); + + feed_data->pes_header_offset += copy_len; + + if (left_size > *bytes_avail) + return -EINVAL; + + /* else - we have beginning of PES header */ + *bytes_avail -= left_size; + *ts_payload_offset += left_size; + + /* Make sure the PES packet is valid */ + if (mpq_dmx_is_valid_audio_pes(pes_header) < 0) { + /* + * Since the new PES header parsing + * failed, reset pusi_seen to drop all + * data until next PUSI + */ + feed->pusi_seen = 0; + feed_data->pes_header_offset = 0; + + MPQ_DVB_ERR_PRINT( + "%s: invalid packet\n", + __func__); + + return -EINVAL; + } + + feed_data->pes_header_left_bytes = + pes_header->pes_header_data_length; + } + + return 0; +} + +static inline void mpq_dmx_get_pts_dts(struct mpq_video_feed_info *feed_data, + struct pes_packet_header *pes_header) +{ + struct dmx_pts_dts_info *info = &(feed_data->new_pts_dts_info); + + /* Get PTS/DTS information from PES header */ + + if ((pes_header->pts_dts_flag == 2) || + (pes_header->pts_dts_flag == 3)) { + info->pts_exist = 1; + + info->pts = + ((u64)pes_header->pts_1 << 30) | + ((u64)pes_header->pts_2 << 22) | + ((u64)pes_header->pts_3 << 15) | + ((u64)pes_header->pts_4 << 7) | + (u64)pes_header->pts_5; + } else { + info->pts_exist = 0; + info->pts = 0; + } + + if (pes_header->pts_dts_flag == 3) { + info->dts_exist = 1; + + info->dts = + ((u64)pes_header->dts_1 << 30) | + ((u64)pes_header->dts_2 << 22) | + ((u64)pes_header->dts_3 << 15) | + ((u64)pes_header->dts_4 << 7) | + (u64)pes_header->dts_5; + } else { + info->dts_exist = 0; + info->dts = 0; + } + + feed_data->new_info_exists = 1; +} + +static inline void mpq_dmx_get_audio_pts_dts( + struct mpq_audio_feed_info *feed_data, + struct pes_packet_header *pes_header) +{ + struct dmx_pts_dts_info *info = &(feed_data->new_pts_dts_info); + + /* Get PTS/DTS information from PES header */ + + if ((pes_header->pts_dts_flag == 2) || + (pes_header->pts_dts_flag == 3)) { + info->pts_exist = 1; + + info->pts = + ((u64)pes_header->pts_1 << 30) | + ((u64)pes_header->pts_2 << 22) | + ((u64)pes_header->pts_3 << 15) | + ((u64)pes_header->pts_4 << 7) | + (u64)pes_header->pts_5; + } else { + info->pts_exist = 0; + info->pts = 0; + } + + if (pes_header->pts_dts_flag == 3) { + info->dts_exist = 1; + + info->dts = + ((u64)pes_header->dts_1 << 30) | + ((u64)pes_header->dts_2 << 22) | + ((u64)pes_header->dts_3 << 15) | + ((u64)pes_header->dts_4 << 7) | + (u64)pes_header->dts_5; + } else { + info->dts_exist = 0; + info->dts = 0; + } + + feed_data->new_info_exists = 1; +} + +int mpq_dmx_parse_remaining_pes_header( + struct dvb_demux_feed *feed, + struct mpq_video_feed_info *feed_data, + struct pes_packet_header *pes_header, + const u8 *buf, + u32 *ts_payload_offset, + int *bytes_avail) +{ + int left_size, copy_len; + + /* Remaining header bytes that need to be processed? */ + if (!feed_data->pes_header_left_bytes) + return 0; + + /* Did we capture the PTS value (if exists)? */ + if ((*bytes_avail != 0) && + (feed_data->pes_header_offset < + (PES_MANDATORY_FIELDS_LEN+5)) && + ((pes_header->pts_dts_flag == 2) || + (pes_header->pts_dts_flag == 3))) { + + /* 5 more bytes should be there */ + left_size = + PES_MANDATORY_FIELDS_LEN + 5 - + feed_data->pes_header_offset; + + copy_len = (left_size > *bytes_avail) ? + *bytes_avail : + left_size; + + memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset), + (buf + *ts_payload_offset), + copy_len); + + feed_data->pes_header_offset += copy_len; + feed_data->pes_header_left_bytes -= copy_len; + + if (left_size > *bytes_avail) + return -EINVAL; + + /* else - we have the PTS */ + *bytes_avail -= copy_len; + *ts_payload_offset += copy_len; + } + + /* Did we capture the DTS value (if exist)? */ + if ((*bytes_avail != 0) && + (feed_data->pes_header_offset < + (PES_MANDATORY_FIELDS_LEN+10)) && + (pes_header->pts_dts_flag == 3)) { + + /* 5 more bytes should be there */ + left_size = + PES_MANDATORY_FIELDS_LEN + 10 - + feed_data->pes_header_offset; + + copy_len = (left_size > *bytes_avail) ? + *bytes_avail : + left_size; + + memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset), + (buf + *ts_payload_offset), + copy_len); + + feed_data->pes_header_offset += copy_len; + feed_data->pes_header_left_bytes -= copy_len; + + if (left_size > *bytes_avail) + return -EINVAL; + + /* else - we have the DTS */ + *bytes_avail -= copy_len; + *ts_payload_offset += copy_len; + } + + /* Any more header bytes?! */ + if (feed_data->pes_header_left_bytes >= *bytes_avail) { + feed_data->pes_header_left_bytes -= *bytes_avail; + return -EINVAL; + } + + /* get PTS/DTS information from PES header to be written later */ + mpq_dmx_get_pts_dts(feed_data, pes_header); + + /* Got PES header, process payload */ + *bytes_avail -= feed_data->pes_header_left_bytes; + *ts_payload_offset += feed_data->pes_header_left_bytes; + feed_data->pes_header_left_bytes = 0; + + return 0; +} + +int mpq_dmx_parse_remaining_audio_pes_header( + struct dvb_demux_feed *feed, + struct mpq_audio_feed_info *feed_data, + struct pes_packet_header *pes_header, + const u8 *buf, + u32 *ts_payload_offset, + int *bytes_avail) +{ + int left_size, copy_len; + + /* Remaining header bytes that need to be processed? */ + if (!feed_data->pes_header_left_bytes) + return 0; + + /* Did we capture the PTS value (if exists)? */ + if ((*bytes_avail != 0) && + (feed_data->pes_header_offset < + (PES_MANDATORY_FIELDS_LEN+5)) && + ((pes_header->pts_dts_flag == 2) || + (pes_header->pts_dts_flag == 3))) { + + /* 5 more bytes should be there */ + left_size = + PES_MANDATORY_FIELDS_LEN + 5 - + feed_data->pes_header_offset; + + copy_len = + (left_size > *bytes_avail) ? *bytes_avail : left_size; + + memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset), + (buf + *ts_payload_offset), copy_len); + + feed_data->pes_header_offset += copy_len; + feed_data->pes_header_left_bytes -= copy_len; + + if (left_size > *bytes_avail) + return -EINVAL; + + /* else - we have the PTS */ + *bytes_avail -= copy_len; + *ts_payload_offset += copy_len; + } + + /* Did we capture the DTS value (if exist)? */ + if ((*bytes_avail != 0) && + (feed_data->pes_header_offset < + (PES_MANDATORY_FIELDS_LEN+10)) && + (pes_header->pts_dts_flag == 3)) { + + /* 5 more bytes should be there */ + left_size = + PES_MANDATORY_FIELDS_LEN + 10 - + feed_data->pes_header_offset; + + copy_len = (left_size > *bytes_avail) ? + *bytes_avail : + left_size; + + memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset), + (buf + *ts_payload_offset), + copy_len); + + feed_data->pes_header_offset += copy_len; + feed_data->pes_header_left_bytes -= copy_len; + + if (left_size > *bytes_avail) + return -EINVAL; + + /* else - we have the DTS */ + *bytes_avail -= copy_len; + *ts_payload_offset += copy_len; + } + + /* Any more header bytes?! */ + if (feed_data->pes_header_left_bytes >= *bytes_avail) { + feed_data->pes_header_left_bytes -= *bytes_avail; + return -EINVAL; + } + + /* get PTS/DTS information from PES header to be written later */ + mpq_dmx_get_audio_pts_dts(feed_data, pes_header); + + /* Got PES header, process payload */ + *bytes_avail -= feed_data->pes_header_left_bytes; + *ts_payload_offset += feed_data->pes_header_left_bytes; + feed_data->pes_header_left_bytes = 0; + + return 0; +} + +static void mpq_dmx_check_continuity(struct mpq_video_feed_info *feed_data, + int current_continuity, + int discontinuity_indicator) +{ + const int max_continuity = 0x0F; /* 4 bits in the TS packet header */ + + /* sanity check */ + if (unlikely((current_continuity < 0) || + (current_continuity > max_continuity))) { + MPQ_DVB_DBG_PRINT( + "%s: received invalid continuity counter value %d\n", + __func__, current_continuity); + return; + } + + /* reset last continuity */ + if ((feed_data->last_continuity == -1) || + (discontinuity_indicator)) { + feed_data->last_continuity = current_continuity; + return; + } + + /* check for continuity errors */ + if (current_continuity != + ((feed_data->last_continuity + 1) & max_continuity)) + feed_data->continuity_errs++; + + /* save for next time */ + feed_data->last_continuity = current_continuity; +} + +static void mpq_dmx_check_audio_continuity( + struct mpq_audio_feed_info *feed_data, + int current_continuity, + int discontinuity_indicator) +{ + const int max_continuity = 0x0F; /* 4 bits in the TS packet header */ + + /* sanity check */ + if (unlikely((current_continuity < 0) || + (current_continuity > max_continuity))) { + MPQ_DVB_DBG_PRINT( + "%s: received invalid continuity counter value %d\n", + __func__, current_continuity); + return; + } + + /* reset last continuity */ + if ((feed_data->last_continuity == -1) || (discontinuity_indicator)) { + feed_data->last_continuity = current_continuity; + return; + } + + /* check for continuity errors */ + if (current_continuity != + ((feed_data->last_continuity + 1) & max_continuity)) + feed_data->continuity_errs++; + + /* save for next time */ + feed_data->last_continuity = current_continuity; +} + +static inline void mpq_dmx_prepare_es_event_data( + struct mpq_streambuffer_packet_header *packet, + struct mpq_adapter_video_meta_data *meta_data, + struct mpq_video_feed_info *feed_data, + struct mpq_streambuffer *stream_buffer, + struct dmx_data_ready *data, + int cookie) +{ + struct dmx_pts_dts_info *pts_dts; + + if (meta_data->packet_type == DMX_PES_PACKET) { + pts_dts = &meta_data->info.pes.pts_dts_info; + data->buf.stc = meta_data->info.pes.stc; + } else { + pts_dts = &meta_data->info.framing.pts_dts_info; + data->buf.stc = meta_data->info.framing.stc; + } + + pts_dts = meta_data->packet_type == DMX_PES_PACKET ? + &meta_data->info.pes.pts_dts_info : + &meta_data->info.framing.pts_dts_info; + + data->data_length = 0; + data->buf.handle = packet->raw_data_handle; + data->buf.cookie = cookie; + data->buf.offset = packet->raw_data_offset; + data->buf.len = packet->raw_data_len; + data->buf.pts_exists = pts_dts->pts_exist; + data->buf.pts = pts_dts->pts; + data->buf.dts_exists = pts_dts->dts_exist; + data->buf.dts = pts_dts->dts; + data->buf.tei_counter = feed_data->tei_errs; + data->buf.cont_err_counter = feed_data->continuity_errs; + data->buf.ts_packets_num = feed_data->ts_packets_num; + data->buf.ts_dropped_bytes = feed_data->ts_dropped_bytes; + data->status = DMX_OK_DECODER_BUF; + + MPQ_DVB_DBG_PRINT("%s: cookie=%d\n", __func__, data->buf.cookie); + + /* reset counters */ + feed_data->ts_packets_num = 0; + feed_data->ts_dropped_bytes = 0; + feed_data->tei_errs = 0; + feed_data->continuity_errs = 0; +} + +static inline void mpq_dmx_prepare_audio_es_event_data( + struct mpq_streambuffer_packet_header *packet, + struct mpq_adapter_audio_meta_data *meta_data, + struct mpq_audio_feed_info *feed_data, + struct mpq_streambuffer *stream_buffer, + struct dmx_data_ready *data, + int cookie) +{ + struct dmx_pts_dts_info *pts_dts; + + pts_dts = &meta_data->info.pes.pts_dts_info; + data->buf.stc = meta_data->info.pes.stc; + + data->data_length = 0; + data->buf.handle = packet->raw_data_handle; + data->buf.cookie = cookie; + data->buf.offset = packet->raw_data_offset; + data->buf.len = packet->raw_data_len; + data->buf.pts_exists = pts_dts->pts_exist; + data->buf.pts = pts_dts->pts; + data->buf.dts_exists = pts_dts->dts_exist; + data->buf.dts = pts_dts->dts; + data->buf.tei_counter = feed_data->tei_errs; + data->buf.cont_err_counter = feed_data->continuity_errs; + data->buf.ts_packets_num = feed_data->ts_packets_num; + data->buf.ts_dropped_bytes = feed_data->ts_dropped_bytes; + data->status = DMX_OK_DECODER_BUF; + + MPQ_DVB_DBG_PRINT("%s: cookie=%d\n", __func__, data->buf.cookie); + + /* reset counters */ + feed_data->ts_packets_num = 0; + feed_data->ts_dropped_bytes = 0; + feed_data->tei_errs = 0; + feed_data->continuity_errs = 0; +} + +static int mpq_sdmx_dvr_buffer_desc(struct mpq_demux *mpq_demux, + struct sdmx_buff_descr *buf_desc) +{ + struct dvb_ringbuffer *rbuf = (struct dvb_ringbuffer *) + mpq_demux->demux.dmx.dvr_input.ringbuff; + struct ion_handle *ion_handle = + mpq_demux->demux.dmx.dvr_input.priv_handle; + ion_phys_addr_t phys_addr; + size_t len; + int ret; + + ret = ion_phys(mpq_demux->ion_client, ion_handle, &phys_addr, &len); + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: Failed to obtain physical address of input buffer. ret = %d\n", + __func__, ret); + return ret; + } + + buf_desc->base_addr = (u64)phys_addr; + buf_desc->size = rbuf->size; + + return 0; +} + +static inline int mpq_dmx_notify_overflow(struct dvb_demux_feed *feed) +{ + struct dmx_data_ready data; + + data.data_length = 0; + data.status = DMX_OVERRUN_ERROR; + return feed->data_ready_cb.ts(&feed->feed.ts, &data); +} + +/** + * mpq_dmx_decoder_frame_closure - Helper function to handle closing current + * pending frame upon reaching EOS. + * + * @mpq_demux - mpq demux instance + * @mpq_feed - mpq feed object + */ +static void mpq_dmx_decoder_frame_closure(struct mpq_demux *mpq_demux, + struct mpq_feed *mpq_feed) +{ + struct mpq_streambuffer_packet_header packet; + struct mpq_streambuffer *stream_buffer; + struct mpq_adapter_video_meta_data meta_data; + struct mpq_video_feed_info *feed_data; + struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed; + struct dmx_data_ready data; + int cookie; + + feed_data = &mpq_feed->video_info; + + /* + * spin-lock is taken to protect against manipulation of video + * output buffer by the API (terminate video feed, re-use of video + * buffers). + */ + spin_lock(&feed_data->video_buffer_lock); + stream_buffer = feed_data->video_buffer; + + if (stream_buffer == NULL) { + MPQ_DVB_DBG_PRINT("%s: video_buffer released\n", __func__); + spin_unlock(&feed_data->video_buffer_lock); + return; + } + + /* Report last pattern found */ + if ((feed_data->pending_pattern_len) && + mpq_dmx_is_video_frame(feed->video_codec, + feed_data->last_framing_match_type)) { + meta_data.packet_type = DMX_FRAMING_INFO_PACKET; + mpq_dmx_write_pts_dts(feed_data, + &(meta_data.info.framing.pts_dts_info)); + mpq_dmx_save_pts_dts(feed_data); + packet.user_data_len = + sizeof(struct mpq_adapter_video_meta_data); + packet.raw_data_len = feed_data->pending_pattern_len; + packet.raw_data_offset = feed_data->frame_offset; + meta_data.info.framing.pattern_type = + feed_data->last_framing_match_type; + meta_data.info.framing.stc = feed_data->last_framing_match_stc; + meta_data.info.framing.continuity_error_counter = + feed_data->continuity_errs; + meta_data.info.framing.transport_error_indicator_counter = + feed_data->tei_errs; + meta_data.info.framing.ts_dropped_bytes = + feed_data->ts_dropped_bytes; + meta_data.info.framing.ts_packets_num = + feed_data->ts_packets_num; + + mpq_streambuffer_get_buffer_handle(stream_buffer, + 0, /* current write buffer handle */ + &packet.raw_data_handle); + + mpq_dmx_update_decoder_stat(mpq_feed); + + /* Writing meta-data that includes the framing information */ + cookie = mpq_streambuffer_pkt_write(stream_buffer, &packet, + (u8 *)&meta_data); + if (cookie >= 0) { + mpq_dmx_prepare_es_event_data(&packet, &meta_data, + feed_data, stream_buffer, &data, cookie); + feed->data_ready_cb.ts(&feed->feed.ts, &data); + } else { + MPQ_DVB_ERR_PRINT( + "%s: mpq_streambuffer_pkt_write failed, ret=%d\n", + __func__, cookie); + } + } + + spin_unlock(&feed_data->video_buffer_lock); +} + +/** + * mpq_dmx_decoder_pes_closure - Helper function to handle closing current PES + * upon reaching EOS. + * + * @mpq_demux - mpq demux instance + * @mpq_feed - mpq feed object + */ +static void mpq_dmx_decoder_pes_closure(struct mpq_demux *mpq_demux, + struct mpq_feed *mpq_feed) +{ + struct mpq_streambuffer_packet_header packet; + struct mpq_streambuffer *stream_buffer; + struct mpq_adapter_video_meta_data meta_data; + struct mpq_video_feed_info *feed_data; + struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed; + struct dmx_data_ready data; + int cookie; + + feed_data = &mpq_feed->video_info; + + /* + * spin-lock is taken to protect against manipulation of video + * output buffer by the API (terminate video feed, re-use of video + * buffers). + */ + spin_lock(&feed_data->video_buffer_lock); + stream_buffer = feed_data->video_buffer; + + if (stream_buffer == NULL) { + MPQ_DVB_DBG_PRINT("%s: video_buffer released\n", __func__); + spin_unlock(&feed_data->video_buffer_lock); + return; + } + + /* + * Close previous PES. + * Push new packet to the meta-data buffer. + */ + if ((feed->pusi_seen) && (feed_data->pes_header_left_bytes == 0)) { + packet.raw_data_len = feed->peslen; + mpq_streambuffer_get_buffer_handle(stream_buffer, + 0, /* current write buffer handle */ + &packet.raw_data_handle); + packet.raw_data_offset = feed_data->frame_offset; + packet.user_data_len = + sizeof(struct mpq_adapter_video_meta_data); + + mpq_dmx_write_pts_dts(feed_data, + &(meta_data.info.pes.pts_dts_info)); + + meta_data.packet_type = DMX_PES_PACKET; + meta_data.info.pes.stc = feed_data->prev_stc; + + mpq_dmx_update_decoder_stat(mpq_feed); + + cookie = mpq_streambuffer_pkt_write(stream_buffer, &packet, + (u8 *)&meta_data); + if (cookie >= 0) { + /* Save write offset where new PES will begin */ + mpq_streambuffer_get_data_rw_offset(stream_buffer, NULL, + &feed_data->frame_offset); + mpq_dmx_prepare_es_event_data(&packet, &meta_data, + feed_data, stream_buffer, &data, cookie); + feed->data_ready_cb.ts(&feed->feed.ts, &data); + } else { + MPQ_DVB_ERR_PRINT( + "%s: mpq_streambuffer_pkt_write failed, ret=%d\n", + __func__, cookie); + } + } + /* Reset PES info */ + feed->peslen = 0; + feed_data->pes_header_offset = 0; + feed_data->pes_header_left_bytes = PES_MANDATORY_FIELDS_LEN; + + spin_unlock(&feed_data->video_buffer_lock); +} + +/* + * in audio handling although ES frames are send to decoder, close the + * pes packet + */ +static void mpq_dmx_decoder_audio_pes_closure(struct mpq_demux *mpq_demux, + struct mpq_feed *mpq_feed) +{ + struct mpq_streambuffer_packet_header packet; + struct mpq_streambuffer *stream_buffer; + struct mpq_adapter_audio_meta_data meta_data; + struct mpq_audio_feed_info *feed_data; + struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed; + struct dmx_data_ready data; + int cookie; + + feed_data = &mpq_feed->audio_info; + + /* + * spin-lock is taken to protect against manipulation of audio + * output buffer by the API (terminate audio feed, re-use of audio + * buffers). + */ + spin_lock(&feed_data->audio_buffer_lock); + stream_buffer = feed_data->audio_buffer; + + if (stream_buffer == NULL) { + MPQ_DVB_DBG_PRINT("%s: audio_buffer released\n", __func__); + spin_unlock(&feed_data->audio_buffer_lock); + return; + } + + /* + * Close previous PES. + * Push new packet to the meta-data buffer. + */ + if ((feed->pusi_seen) && (feed_data->pes_header_left_bytes == 0)) { + packet.raw_data_len = feed->peslen; + mpq_streambuffer_get_buffer_handle(stream_buffer, + 0, /* current write buffer handle */ + &packet.raw_data_handle); + packet.raw_data_offset = feed_data->frame_offset; + packet.user_data_len = + sizeof(struct mpq_adapter_audio_meta_data); + + mpq_dmx_write_audio_pts_dts(feed_data, + &(meta_data.info.pes.pts_dts_info)); + + meta_data.packet_type = DMX_PES_PACKET; + meta_data.info.pes.stc = feed_data->prev_stc; + + mpq_dmx_update_decoder_stat(mpq_feed); + + cookie = mpq_streambuffer_pkt_write(stream_buffer, &packet, + (u8 *)&meta_data); + if (cookie >= 0) { + /* Save write offset where new PES will begin */ + mpq_streambuffer_get_data_rw_offset(stream_buffer, NULL, + &feed_data->frame_offset); + mpq_dmx_prepare_audio_es_event_data(&packet, &meta_data, + feed_data, stream_buffer, &data, cookie); + feed->data_ready_cb.ts(&feed->feed.ts, &data); + } else { + MPQ_DVB_ERR_PRINT( + "%s: mpq_sb_pkt_write failed, ret=%d\n", + __func__, cookie); + } + } + /* Reset PES info */ + feed->peslen = 0; + feed_data->pes_header_offset = 0; + feed_data->pes_header_left_bytes = PES_MANDATORY_FIELDS_LEN; + + spin_unlock(&feed_data->audio_buffer_lock); +} + +static int mpq_dmx_process_video_packet_framing( + struct dvb_demux_feed *feed, + const u8 *buf, + u64 curr_stc) +{ + int bytes_avail; + u32 ts_payload_offset; + struct mpq_video_feed_info *feed_data; + const struct ts_packet_header *ts_header; + struct mpq_streambuffer *stream_buffer; + struct pes_packet_header *pes_header; + struct mpq_demux *mpq_demux; + struct mpq_feed *mpq_feed; + + struct dvb_dmx_video_patterns_results framing_res; + struct mpq_streambuffer_packet_header packet; + struct mpq_adapter_video_meta_data meta_data; + int bytes_written = 0; + int bytes_to_write = 0; + int found_patterns = 0; + int first_pattern = 0; + int i; + int is_video_frame = 0; + int pending_data_len = 0; + int ret = 0; + int discontinuity_indicator = 0; + struct dmx_data_ready data; + + mpq_demux = feed->demux->priv; + + mpq_feed = feed->priv; + feed_data = &mpq_feed->video_info; + + /* + * spin-lock is taken to protect against manipulation of video + * output buffer by the API (terminate video feed, re-use of video + * buffers). Mutex on the video-feed cannot be held here + * since SW demux holds a spin-lock while calling write_to_decoder + */ + spin_lock(&feed_data->video_buffer_lock); + stream_buffer = feed_data->video_buffer; + + if (stream_buffer == NULL) { + MPQ_DVB_DBG_PRINT( + "%s: video_buffer released\n", + __func__); + spin_unlock(&feed_data->video_buffer_lock); + return 0; + } + + ts_header = (const struct ts_packet_header *)buf; + + pes_header = &feed_data->pes_header; + + /* Make sure this TS packet has a payload and not scrambled */ + if ((ts_header->sync_byte != 0x47) || + (ts_header->adaptation_field_control == 0) || + (ts_header->adaptation_field_control == 2) || + (ts_header->transport_scrambling_control)) { + /* continue to next packet */ + spin_unlock(&feed_data->video_buffer_lock); + return 0; + } + + if (ts_header->payload_unit_start_indicator) { /* PUSI? */ + if (feed->pusi_seen) { /* Did we see PUSI before? */ + /* + * Double check that we are not in middle of + * previous PES header parsing. + */ + if (feed_data->pes_header_left_bytes != 0) + MPQ_DVB_ERR_PRINT( + "%s: received PUSI while handling PES header of previous PES\n", + __func__); + + feed->peslen = 0; + feed_data->pes_header_offset = 0; + feed_data->pes_header_left_bytes = + PES_MANDATORY_FIELDS_LEN; + } else { + feed->pusi_seen = 1; + } + } + + /* + * Parse PES data only if PUSI was encountered, + * otherwise the data is dropped + */ + if (!feed->pusi_seen) { + spin_unlock(&feed_data->video_buffer_lock); + return 0; /* drop and wait for next packets */ + } + + ts_payload_offset = sizeof(struct ts_packet_header); + + /* + * Skip adaptation field if exists. + * Save discontinuity indicator if exists. + */ + if (ts_header->adaptation_field_control == 3) { + const struct ts_adaptation_field *adaptation_field = + (const struct ts_adaptation_field *)(buf + + ts_payload_offset); + + discontinuity_indicator = + adaptation_field->discontinuity_indicator; + ts_payload_offset += buf[ts_payload_offset] + 1; + } + + bytes_avail = TS_PACKET_SIZE - ts_payload_offset; + + /* Get the mandatory fields of the video PES header */ + if (mpq_dmx_parse_mandatory_pes_header(feed, feed_data, + pes_header, buf, + &ts_payload_offset, + &bytes_avail)) { + spin_unlock(&feed_data->video_buffer_lock); + return 0; + } + + if (mpq_dmx_parse_remaining_pes_header(feed, feed_data, + pes_header, buf, + &ts_payload_offset, + &bytes_avail)) { + spin_unlock(&feed_data->video_buffer_lock); + return 0; + } + + /* + * If we reached here, + * then we are now at the PES payload data + */ + if (bytes_avail == 0) { + spin_unlock(&feed_data->video_buffer_lock); + return 0; + } + + /* + * the decoder requires demux to do framing, + * so search for the patterns now. + */ + found_patterns = dvb_dmx_video_pattern_search( + feed_data->patterns, + feed_data->patterns_num, + (buf + ts_payload_offset), + bytes_avail, + &feed_data->prefix_size, + &framing_res); + + if (!feed_data->found_sequence_header_pattern) { + for (i = 0; i < found_patterns; i++) { + if ((framing_res.info[i].type == + DMX_IDX_MPEG_SEQ_HEADER) || + (framing_res.info[i].type == + DMX_IDX_H264_SPS) || + (framing_res.info[i].type == + DMX_IDX_VC1_SEQ_HEADER)) { + + MPQ_DVB_DBG_PRINT( + "%s: Found Sequence Pattern, buf %p, i = %d, offset = %d, type = %lld\n", + __func__, buf, i, + framing_res.info[i].offset, + framing_res.info[i].type); + + first_pattern = i; + feed_data->found_sequence_header_pattern = 1; + ts_payload_offset += + framing_res.info[i].offset; + bytes_avail -= framing_res.info[i].offset; + + if (framing_res.info[i].used_prefix_size) { + feed_data->first_prefix_size = + framing_res.info[i]. + used_prefix_size; + } + break; + } + } + } + + /* + * If decoder requires demux to do framing, + * pass data to decoder only after sequence header + * or equivalent is found. Otherwise the data is dropped. + */ + if (!feed_data->found_sequence_header_pattern) { + feed_data->prev_stc = curr_stc; + spin_unlock(&feed_data->video_buffer_lock); + return 0; + } + + /* Update error counters based on TS header */ + feed_data->ts_packets_num++; + feed_data->tei_errs += ts_header->transport_error_indicator; + mpq_demux->decoder_stat[feed_data->stream_interface].ts_errors += + ts_header->transport_error_indicator; + mpq_dmx_check_continuity(feed_data, + ts_header->continuity_counter, + discontinuity_indicator); + mpq_demux->decoder_stat[feed_data->stream_interface].cc_errors += + feed_data->continuity_errs; + + /* Need to back-up the PTS information of the very first frame */ + if (feed_data->first_pts_dts_copy) { + for (i = first_pattern; i < found_patterns; i++) { + is_video_frame = mpq_dmx_is_video_frame( + feed->video_codec, + framing_res.info[i].type); + + if (is_video_frame == 1) { + mpq_dmx_save_pts_dts(feed_data); + feed_data->first_pts_dts_copy = 0; + break; + } + } + } + + /* + * write prefix used to find first Sequence pattern, if needed. + * feed_data->patterns[0]->pattern always contains the sequence + * header pattern. + */ + if (feed_data->first_prefix_size) { + ret = mpq_streambuffer_data_write(stream_buffer, + feed_data->patterns[0]->pattern, + feed_data->first_prefix_size); + if (ret < 0) { + mpq_demux->decoder_stat + [feed_data->stream_interface].drop_count += + feed_data->first_prefix_size; + feed_data->ts_dropped_bytes += + feed_data->first_prefix_size; + MPQ_DVB_DBG_PRINT("%s: could not write prefix\n", + __func__); + if (ret == -ENOSPC) + mpq_dmx_notify_overflow(feed); + } else { + MPQ_DVB_DBG_PRINT( + "%s: Writing pattern prefix of size %d\n", + __func__, feed_data->first_prefix_size); + /* + * update the length of the data we report + * to include the size of the prefix that was used. + */ + feed_data->pending_pattern_len += + feed_data->first_prefix_size; + } + } + + feed->peslen += bytes_avail; + pending_data_len += bytes_avail; + + meta_data.packet_type = DMX_FRAMING_INFO_PACKET; + packet.user_data_len = sizeof(struct mpq_adapter_video_meta_data); + + /* + * Go over all the patterns that were found in this packet. + * For each pattern found, write the relevant data to the data + * buffer, then write the respective meta-data. + * Each pattern can only be reported when the next pattern is found + * (in order to know the data length). + * There are three possible cases for each pattern: + * 1. This is the very first pattern we found in any TS packet in this + * feed. + * 2. This is the first pattern found in this TS packet, but we've + * already found patterns in previous packets. + * 3. This is not the first pattern in this packet, i.e., we've + * already found patterns in this TS packet. + */ + for (i = first_pattern; i < found_patterns; i++) { + if (i == first_pattern) { + /* + * The way to identify the very first pattern: + * 1. It's the first pattern found in this packet. + * 2. The pending_pattern_len, which indicates the + * data length of the previous pattern that has + * not yet been reported, is usually 0. However, + * it may be larger than 0 if a prefix was used + * to find this pattern (i.e., the pattern was + * split over two TS packets). In that case, + * pending_pattern_len equals first_prefix_size. + * first_prefix_size is set to 0 later in this + * function. + */ + if (feed_data->first_prefix_size == + feed_data->pending_pattern_len) { + /* + * This is the very first pattern, so no + * previous pending frame data exists. + * Update frame info and skip to the + * next frame. + */ + feed_data->last_framing_match_type = + framing_res.info[i].type; + feed_data->last_pattern_offset = + framing_res.info[i].offset; + if (framing_res.info[i].used_prefix_size) + feed_data->last_framing_match_stc = + feed_data->prev_stc; + else + feed_data->last_framing_match_stc = + curr_stc; + continue; + } + /* + * This is the first pattern in this + * packet and previous frame from + * previous packet is pending for report + */ + bytes_to_write = framing_res.info[i].offset; + } else { + /* Previous pending frame is in the same packet */ + bytes_to_write = + framing_res.info[i].offset - + feed_data->last_pattern_offset; + } + + ret = mpq_streambuffer_data_write( + stream_buffer, + (buf + ts_payload_offset + bytes_written), + bytes_to_write); + if (ret < 0) { + mpq_demux->decoder_stat + [feed_data->stream_interface].drop_count += + bytes_to_write; + feed_data->ts_dropped_bytes += bytes_to_write; + MPQ_DVB_DBG_PRINT( + "%s: Couldn't write %d bytes to data buffer, ret=%d\n", + __func__, bytes_to_write, ret); + if (ret == -ENOSPC) + mpq_dmx_notify_overflow(feed); + } else { + bytes_written += bytes_to_write; + pending_data_len -= bytes_to_write; + feed_data->pending_pattern_len += bytes_to_write; + } + non_predicted_video_frame = 0; + + is_video_frame = mpq_dmx_is_video_frame( + feed->video_codec, + feed_data->last_framing_match_type); + if (is_video_frame == 1) { + mpq_dmx_write_pts_dts(feed_data, + &(meta_data.info.framing.pts_dts_info)); + mpq_dmx_save_pts_dts(feed_data); + + packet.raw_data_len = feed_data->pending_pattern_len - + framing_res.info[i].used_prefix_size; + packet.raw_data_offset = feed_data->frame_offset; + meta_data.info.framing.pattern_type = + feed_data->last_framing_match_type; + meta_data.info.framing.stc = + feed_data->last_framing_match_stc; + meta_data.info.framing.continuity_error_counter = + feed_data->continuity_errs; + meta_data.info.framing. + transport_error_indicator_counter = + feed_data->tei_errs; + meta_data.info.framing.ts_dropped_bytes = + feed_data->ts_dropped_bytes; + meta_data.info.framing.ts_packets_num = + feed_data->ts_packets_num; + + mpq_streambuffer_get_buffer_handle( + stream_buffer, + 0, /* current write buffer handle */ + &packet.raw_data_handle); + + mpq_dmx_update_decoder_stat(mpq_feed); + + if (video_b_frame_events == 1) { + if (non_predicted_video_frame == 0) { + struct dmx_pts_dts_info *pts_dts; + + pts_dts = + &meta_data.info.framing.pts_dts_info; + pts_dts->pts_exist = 0; + pts_dts->pts = 0; + pts_dts->dts_exist = 0; + pts_dts->dts = 0; + } + } + /* + * Write meta-data that includes the framing information + */ + ret = mpq_streambuffer_pkt_write(stream_buffer, &packet, + (u8 *)&meta_data); + if (ret < 0) { + MPQ_DVB_ERR_PRINT + ("%s: mpq_sb_pkt_write failed ret=%d\n", + __func__, ret); + if (ret == -ENOSPC) + mpq_dmx_notify_overflow(feed); + } else { + mpq_dmx_prepare_es_event_data( + &packet, &meta_data, feed_data, + stream_buffer, &data, ret); + + /* Trigger ES Data Event for VPTS */ + feed->data_ready_cb.ts(&feed->feed.ts, &data); + + if (feed_data->video_buffer->mode == + MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) + feed_data->frame_offset = 0; + else + mpq_streambuffer_get_data_rw_offset( + feed_data->video_buffer, + NULL, + &feed_data->frame_offset); + } + + /* + * In linear buffers, after writing the packet + * we switched over to a new linear buffer for the new + * frame. In that case, we should re-write the prefix + * of the existing frame if any exists. + */ + if ((MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR == + feed_data->video_buffer->mode) && + framing_res.info[i].used_prefix_size) { + ret = mpq_streambuffer_data_write(stream_buffer, + feed_data->prev_pattern + + DVB_DMX_MAX_PATTERN_LEN - + framing_res.info[i].used_prefix_size, + framing_res.info[i].used_prefix_size); + + if (ret < 0) { + feed_data->pending_pattern_len = 0; + mpq_demux->decoder_stat + [feed_data->stream_interface]. + drop_count += bytes_avail; + feed_data->ts_dropped_bytes += + framing_res.info[i].used_prefix_size; + if (ret == -ENOSPC) + mpq_dmx_notify_overflow(feed); + } else { + feed_data->pending_pattern_len = + framing_res.info[i].used_prefix_size; + } + } else { + s32 offset = (s32)feed_data->frame_offset; + u32 buff_size = + feed_data->video_buffer->buffers[0].size; + + offset -= framing_res.info[i].used_prefix_size; + offset += (offset < 0) ? buff_size : 0; + feed_data->pending_pattern_len = + framing_res.info[i].used_prefix_size; + + if (MPQ_STREAMBUFFER_BUFFER_MODE_RING == + feed_data->video_buffer->mode) { + feed_data->frame_offset = (u32)offset; + } + } + } + + /* save the last match for next time */ + feed_data->last_framing_match_type = + framing_res.info[i].type; + feed_data->last_pattern_offset = + framing_res.info[i].offset; + if (framing_res.info[i].used_prefix_size) + feed_data->last_framing_match_stc = feed_data->prev_stc; + else + feed_data->last_framing_match_stc = curr_stc; + } + + feed_data->prev_stc = curr_stc; + feed_data->first_prefix_size = 0; + + /* + * Save the trailing of the TS packet as we might have a pattern + * split that we need to re-use when closing the next + * video linear buffer. + */ + if (MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR == + feed_data->video_buffer->mode) + memcpy(feed_data->prev_pattern, + buf + TS_PACKET_SIZE - DVB_DMX_MAX_PATTERN_LEN, + DVB_DMX_MAX_PATTERN_LEN); + + if (pending_data_len) { + ret = mpq_streambuffer_data_write( + stream_buffer, + (buf + ts_payload_offset + bytes_written), + pending_data_len); + + if (ret < 0) { + mpq_demux->decoder_stat + [feed_data->stream_interface].drop_count += + pending_data_len; + feed_data->ts_dropped_bytes += pending_data_len; + MPQ_DVB_DBG_PRINT( + "%s: Couldn't write %d pending bytes to data buffer, ret=%d\n", + __func__, pending_data_len, ret); + if (ret == -ENOSPC) + mpq_dmx_notify_overflow(feed); + } else { + feed_data->pending_pattern_len += pending_data_len; + } + } + + spin_unlock(&feed_data->video_buffer_lock); + return 0; +} + +static int mpq_dmx_process_video_packet_no_framing( + struct dvb_demux_feed *feed, + const u8 *buf, + u64 curr_stc) +{ + int bytes_avail; + u32 ts_payload_offset; + struct mpq_video_feed_info *feed_data; + const struct ts_packet_header *ts_header; + struct mpq_streambuffer *stream_buffer; + struct pes_packet_header *pes_header; + struct mpq_demux *mpq_demux; + struct mpq_feed *mpq_feed; + int discontinuity_indicator = 0; + struct dmx_data_ready data; + int cookie; + int ret; + + mpq_demux = feed->demux->priv; + mpq_feed = feed->priv; + feed_data = &mpq_feed->video_info; + + /* + * spin-lock is taken to protect against manipulation of video + * output buffer by the API (terminate video feed, re-use of video + * buffers). Mutex on the video-feed cannot be held here + * since SW demux holds a spin-lock while calling write_to_decoder + */ + spin_lock(&feed_data->video_buffer_lock); + stream_buffer = feed_data->video_buffer; + if (stream_buffer == NULL) { + MPQ_DVB_DBG_PRINT( + "%s: video_buffer released\n", + __func__); + spin_unlock(&feed_data->video_buffer_lock); + return 0; + } + + ts_header = (const struct ts_packet_header *)buf; + + pes_header = &feed_data->pes_header; + + /* Make sure this TS packet has a payload and not scrambled */ + if ((ts_header->sync_byte != 0x47) || + (ts_header->adaptation_field_control == 0) || + (ts_header->adaptation_field_control == 2) || + (ts_header->transport_scrambling_control)) { + /* continue to next packet */ + spin_unlock(&feed_data->video_buffer_lock); + return 0; + } + + if (ts_header->payload_unit_start_indicator) { /* PUSI? */ + if (feed->pusi_seen) { /* Did we see PUSI before? */ + struct mpq_streambuffer_packet_header packet; + struct mpq_adapter_video_meta_data meta_data; + + /* + * Close previous PES. + * Push new packet to the meta-data buffer. + * Double check that we are not in middle of + * previous PES header parsing. + */ + + if (feed_data->pes_header_left_bytes == 0) { + packet.raw_data_len = feed->peslen; + mpq_streambuffer_get_buffer_handle( + stream_buffer, + 0, /* current write buffer handle */ + &packet.raw_data_handle); + packet.raw_data_offset = + feed_data->frame_offset; + packet.user_data_len = + sizeof(struct + mpq_adapter_video_meta_data); + + mpq_dmx_write_pts_dts(feed_data, + &(meta_data.info.pes.pts_dts_info)); + + /* Mark that we detected start of new PES */ + feed_data->first_pts_dts_copy = 1; + + meta_data.packet_type = DMX_PES_PACKET; + meta_data.info.pes.stc = feed_data->prev_stc; + + mpq_dmx_update_decoder_stat(mpq_feed); + + cookie = mpq_streambuffer_pkt_write( + stream_buffer, &packet, + (u8 *)&meta_data); + if (cookie < 0) { + MPQ_DVB_ERR_PRINT + ("%s: write failed, ret=%d\n", + __func__, cookie); + } else { + /* + * Save write offset where new PES + * will begin + */ + mpq_streambuffer_get_data_rw_offset( + stream_buffer, + NULL, + &feed_data->frame_offset); + + mpq_dmx_prepare_es_event_data( + &packet, &meta_data, + feed_data, + stream_buffer, &data, cookie); + + feed->data_ready_cb.ts(&feed->feed.ts, + &data); + } + } else { + MPQ_DVB_ERR_PRINT( + "%s: received PUSI while handling PES header of previous PES\n", + __func__); + } + + /* Reset PES info */ + feed->peslen = 0; + feed_data->pes_header_offset = 0; + feed_data->pes_header_left_bytes = + PES_MANDATORY_FIELDS_LEN; + } else { + feed->pusi_seen = 1; + } + + feed_data->prev_stc = curr_stc; + } + + /* + * Parse PES data only if PUSI was encountered, + * otherwise the data is dropped + */ + if (!feed->pusi_seen) { + spin_unlock(&feed_data->video_buffer_lock); + return 0; /* drop and wait for next packets */ + } + + ts_payload_offset = sizeof(struct ts_packet_header); + + /* + * Skip adaptation field if exists. + * Save discontinuity indicator if exists. + */ + if (ts_header->adaptation_field_control == 3) { + const struct ts_adaptation_field *adaptation_field = + (const struct ts_adaptation_field *)(buf + + ts_payload_offset); + + discontinuity_indicator = + adaptation_field->discontinuity_indicator; + ts_payload_offset += buf[ts_payload_offset] + 1; + } + + bytes_avail = TS_PACKET_SIZE - ts_payload_offset; + + /* Get the mandatory fields of the video PES header */ + if (mpq_dmx_parse_mandatory_pes_header(feed, feed_data, + pes_header, buf, + &ts_payload_offset, + &bytes_avail)) { + spin_unlock(&feed_data->video_buffer_lock); + return 0; + } + + if (mpq_dmx_parse_remaining_pes_header(feed, feed_data, + pes_header, buf, + &ts_payload_offset, + &bytes_avail)) { + spin_unlock(&feed_data->video_buffer_lock); + return 0; + } + + /* + * If we reached here, + * then we are now at the PES payload data + */ + if (bytes_avail == 0) { + spin_unlock(&feed_data->video_buffer_lock); + return 0; + } + + /* + * Need to back-up the PTS information + * of the start of new PES + */ + if (feed_data->first_pts_dts_copy) { + mpq_dmx_save_pts_dts(feed_data); + feed_data->first_pts_dts_copy = 0; + } + + /* Update error counters based on TS header */ + feed_data->ts_packets_num++; + feed_data->tei_errs += ts_header->transport_error_indicator; + mpq_demux->decoder_stat[feed_data->stream_interface].ts_errors += + ts_header->transport_error_indicator; + mpq_dmx_check_continuity(feed_data, + ts_header->continuity_counter, + discontinuity_indicator); + mpq_demux->decoder_stat[feed_data->stream_interface].cc_errors += + feed_data->continuity_errs; + + ret = mpq_streambuffer_data_write(stream_buffer, buf+ts_payload_offset, + bytes_avail); + if (ret < 0) { + mpq_demux->decoder_stat + [feed_data->stream_interface].drop_count += bytes_avail; + feed_data->ts_dropped_bytes += bytes_avail; + if (ret == -ENOSPC) + mpq_dmx_notify_overflow(feed); + } else { + feed->peslen += bytes_avail; + } + + spin_unlock(&feed_data->video_buffer_lock); + + return 0; +} + +/* + * parse PES headers and send down ES packets to decoder + * Trigger a new ES Data Event with APTS and QTimer in 1st PES + */ +static int mpq_dmx_process_audio_packet_no_framing( + struct dvb_demux_feed *feed, + const u8 *buf, + u64 curr_stc) +{ + int bytes_avail; + u32 ts_payload_offset; + struct mpq_audio_feed_info *feed_data; + const struct ts_packet_header *ts_header; + struct mpq_streambuffer *stream_buffer; + struct pes_packet_header *pes_header; + struct mpq_demux *mpq_demux; + struct mpq_feed *mpq_feed; + int discontinuity_indicator = 0; + struct dmx_data_ready data; + int cookie; + int ret; + + mpq_demux = feed->demux->priv; + mpq_feed = feed->priv; + feed_data = &mpq_feed->audio_info; + + /* + * spin-lock is taken to protect against manipulation of audio + * output buffer by the API (terminate audio feed, re-use of audio + * buffers). Mutex on the audio-feed cannot be held here + * since SW demux holds a spin-lock while calling write_to_decoder + */ + spin_lock(&feed_data->audio_buffer_lock); + stream_buffer = feed_data->audio_buffer; + if (stream_buffer == NULL) { + MPQ_DVB_DBG_PRINT( + "%s: audio_buffer released\n", + __func__); + spin_unlock(&feed_data->audio_buffer_lock); + return 0; + } + + ts_header = (const struct ts_packet_header *)buf; + + pes_header = &feed_data->pes_header; + + /* Make sure this TS packet has a payload and not scrambled */ + if ((ts_header->sync_byte != 0x47) || + (ts_header->adaptation_field_control == 0) || + (ts_header->adaptation_field_control == 2) || + (ts_header->transport_scrambling_control)) { + /* continue to next packet */ + spin_unlock(&feed_data->audio_buffer_lock); + return 0; + } + + if (ts_header->payload_unit_start_indicator) { /* PUSI? */ + if (feed->pusi_seen) { /* Did we see PUSI before? */ + struct mpq_streambuffer_packet_header packet; + struct mpq_adapter_audio_meta_data meta_data; + + /* + * Close previous PES. + * Push new packet to the meta-data buffer. + * Double check that we are not in middle of + * previous PES header parsing. + */ + + if (feed_data->pes_header_left_bytes == 0) { + packet.raw_data_len = feed->peslen; + mpq_streambuffer_get_buffer_handle( + stream_buffer, + 0, /* current write buffer handle */ + &packet.raw_data_handle); + packet.raw_data_offset = + feed_data->frame_offset; + packet.user_data_len = + sizeof(struct + mpq_adapter_audio_meta_data); + + mpq_dmx_write_audio_pts_dts(feed_data, + &(meta_data.info.pes.pts_dts_info)); + + /* Mark that we detected start of new PES */ + feed_data->first_pts_dts_copy = 1; + + meta_data.packet_type = DMX_PES_PACKET; + meta_data.info.pes.stc = feed_data->prev_stc; + + mpq_dmx_update_decoder_stat(mpq_feed); + + /* actual writing of stream audio headers */ + cookie = mpq_streambuffer_pkt_write( + stream_buffer, &packet, + (u8 *)&meta_data); + if (cookie < 0) { + MPQ_DVB_ERR_PRINT + ("%s: write failed, ret=%d\n", + __func__, cookie); + } else { + /* + * Save write offset where new PES + * will begin + */ + mpq_streambuffer_get_data_rw_offset( + stream_buffer, + NULL, + &feed_data->frame_offset); + + mpq_dmx_prepare_audio_es_event_data( + &packet, &meta_data, + feed_data, + stream_buffer, &data, cookie); + + /* + * Trigger ES data event for APTS + * and AFRAME + */ + feed->data_ready_cb.ts(&feed->feed.ts, + &data); + } + } else { + MPQ_DVB_ERR_PRINT( + "%s: received PUSI while handling PES header of previous PES\n", + __func__); + } + + /* Reset PES info */ + feed->peslen = 0; + feed_data->pes_header_offset = 0; + feed_data->pes_header_left_bytes = + PES_MANDATORY_FIELDS_LEN; + } else { + feed->pusi_seen = 1; + } + + feed_data->prev_stc = curr_stc; + } + + /* + * Parse PES data only if PUSI was encountered, + * otherwise the data is dropped + */ + if (!feed->pusi_seen) { + spin_unlock(&feed_data->audio_buffer_lock); + return 0; /* drop and wait for next packets */ + } + + ts_payload_offset = sizeof(struct ts_packet_header); + + /* + * Skip adaptation field if exists. + * Save discontinuity indicator if exists. + */ + if (ts_header->adaptation_field_control == 3) { + const struct ts_adaptation_field *adaptation_field = + (const struct ts_adaptation_field *)(buf + + ts_payload_offset); + + discontinuity_indicator = + adaptation_field->discontinuity_indicator; + ts_payload_offset += buf[ts_payload_offset] + 1; + } + + bytes_avail = TS_PACKET_SIZE - ts_payload_offset; + + /* The audio decoder requires ES packets ! */ + + /* Get the mandatory fields of the audio PES header */ + if (mpq_dmx_parse_mandatory_audio_pes_header(feed, feed_data, + pes_header, buf, + &ts_payload_offset, + &bytes_avail)) { + spin_unlock(&feed_data->audio_buffer_lock); + return 0; + } + + if (mpq_dmx_parse_remaining_audio_pes_header(feed, feed_data, + pes_header, buf, + &ts_payload_offset, + &bytes_avail)) { + spin_unlock(&feed_data->audio_buffer_lock); + return 0; + } + + /* + * If we reached here, + * then we are now at the PES payload data + */ + if (bytes_avail == 0) { + spin_unlock(&feed_data->audio_buffer_lock); + return 0; + } + + /* + * Need to back-up the PTS information + * of the start of new PES + */ + if (feed_data->first_pts_dts_copy) { + mpq_dmx_save_audio_pts_dts(feed_data); + feed_data->first_pts_dts_copy = 0; + } + + /* Update error counters based on TS header */ + feed_data->ts_packets_num++; + feed_data->tei_errs += ts_header->transport_error_indicator; + mpq_demux->decoder_stat[feed_data->stream_interface].ts_errors += + ts_header->transport_error_indicator; + mpq_dmx_check_audio_continuity(feed_data, + ts_header->continuity_counter, + discontinuity_indicator); + mpq_demux->decoder_stat[feed_data->stream_interface].cc_errors += + feed_data->continuity_errs; + + /* actual writing of audio data for a stream */ + ret = mpq_streambuffer_data_write(stream_buffer, buf+ts_payload_offset, + bytes_avail); + if (ret < 0) { + mpq_demux->decoder_stat + [feed_data->stream_interface].drop_count += bytes_avail; + feed_data->ts_dropped_bytes += bytes_avail; + if (ret == -ENOSPC) + mpq_dmx_notify_overflow(feed); + } else { + feed->peslen += bytes_avail; + } + + spin_unlock(&feed_data->audio_buffer_lock); + + return 0; +} + +/* function ptr used in several places, handle differently */ +int mpq_dmx_decoder_buffer_status(struct dvb_demux_feed *feed, + struct dmx_buffer_status *dmx_buffer_status) +{ + + if (dvb_dmx_is_video_feed(feed)) { + struct mpq_demux *mpq_demux = feed->demux->priv; + struct mpq_video_feed_info *feed_data; + struct mpq_streambuffer *video_buff; + struct mpq_feed *mpq_feed; + + mutex_lock(&mpq_demux->mutex); + + mpq_feed = feed->priv; + feed_data = &mpq_feed->video_info; + video_buff = feed_data->video_buffer; + if (!video_buff) { + mutex_unlock(&mpq_demux->mutex); + return -EINVAL; + } + + dmx_buffer_status->error = video_buff->raw_data.error; + + if (video_buff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) { + dmx_buffer_status->fullness = + video_buff->buffers[0].size * + video_buff->pending_buffers_count; + dmx_buffer_status->free_bytes = + video_buff->buffers[0].size * + (video_buff->buffers_num - + video_buff->pending_buffers_count); + dmx_buffer_status->size = + video_buff->buffers[0].size * + video_buff->buffers_num; + } else { + dmx_buffer_status->fullness = + mpq_streambuffer_data_avail(video_buff); + dmx_buffer_status->free_bytes = + mpq_streambuffer_data_free(video_buff); + dmx_buffer_status->size = video_buff->buffers[0].size; + } + + mpq_streambuffer_get_data_rw_offset( + video_buff, + &dmx_buffer_status->read_offset, + &dmx_buffer_status->write_offset); + + mutex_unlock(&mpq_demux->mutex); + + } else if (dvb_dmx_is_audio_feed(feed)) { + struct mpq_demux *mpq_demux = feed->demux->priv; + struct mpq_audio_feed_info *feed_data; + struct mpq_streambuffer *audio_buff; + struct mpq_feed *mpq_feed; + + mutex_lock(&mpq_demux->mutex); + + mpq_feed = feed->priv; + feed_data = &mpq_feed->audio_info; + audio_buff = feed_data->audio_buffer; + if (!audio_buff) { + mutex_unlock(&mpq_demux->mutex); + return -EINVAL; + } + + dmx_buffer_status->error = audio_buff->raw_data.error; + + if (audio_buff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) { + dmx_buffer_status->fullness = + audio_buff->buffers[0].size * + audio_buff->pending_buffers_count; + dmx_buffer_status->free_bytes = + audio_buff->buffers[0].size * + (audio_buff->buffers_num - + audio_buff->pending_buffers_count); + dmx_buffer_status->size = + audio_buff->buffers[0].size * + audio_buff->buffers_num; + } else { + dmx_buffer_status->fullness = + mpq_streambuffer_data_avail(audio_buff); + dmx_buffer_status->free_bytes = + mpq_streambuffer_data_free(audio_buff); + dmx_buffer_status->size = audio_buff->buffers[0].size; + } + + mpq_streambuffer_get_data_rw_offset( + audio_buff, + &dmx_buffer_status->read_offset, + &dmx_buffer_status->write_offset); + + mutex_unlock(&mpq_demux->mutex); + } else { + MPQ_DVB_ERR_PRINT("%s: Invalid feed type %d\n", + __func__, feed->pes_type); + return -EINVAL; + } + return 0; +} + +int mpq_dmx_process_video_packet( + struct dvb_demux_feed *feed, + const u8 *buf) +{ + u64 curr_stc; + struct mpq_demux *mpq_demux = feed->demux->priv; + + if ((mpq_demux->source >= DMX_SOURCE_DVR0) && + (mpq_demux->demux.tsp_format != DMX_TSP_FORMAT_192_TAIL)) { + curr_stc = 0; + } else { + if (mpq_demux->ts_packet_timestamp_source != + TSIF_TTS_LPASS_TIMER) { + curr_stc = buf[STC_LOCATION_IDX + 2] << 16; + curr_stc += buf[STC_LOCATION_IDX + 1] << 8; + curr_stc += buf[STC_LOCATION_IDX]; + curr_stc *= 256; /* convert from 105.47 KHZ to 27MHz */ + } else { + curr_stc = buf[STC_LOCATION_IDX + 3] << 24; + curr_stc += buf[STC_LOCATION_IDX + 2] << 16; + curr_stc += buf[STC_LOCATION_IDX + 1] << 8; + curr_stc += buf[STC_LOCATION_IDX]; + } + } + + if (!video_framing) + return mpq_dmx_process_video_packet_no_framing(feed, buf, + curr_stc); + else + return mpq_dmx_process_video_packet_framing(feed, buf, + curr_stc); +} + +int mpq_dmx_process_audio_packet( + struct dvb_demux_feed *feed, + const u8 *buf) +{ + u64 curr_stc; + struct mpq_demux *mpq_demux = feed->demux->priv; + + if ((mpq_demux->source >= DMX_SOURCE_DVR0) && + (mpq_demux->demux.tsp_format != DMX_TSP_FORMAT_192_TAIL)) { + curr_stc = 0; + } else { + if (mpq_demux->ts_packet_timestamp_source != + TSIF_TTS_LPASS_TIMER) { + curr_stc = buf[STC_LOCATION_IDX + 2] << 16; + curr_stc += buf[STC_LOCATION_IDX + 1] << 8; + curr_stc += buf[STC_LOCATION_IDX]; + curr_stc *= 256; /* convert from 105.47 KHZ to 27MHz */ + } else { + curr_stc = buf[STC_LOCATION_IDX + 3] << 24; + curr_stc += buf[STC_LOCATION_IDX + 2] << 16; + curr_stc += buf[STC_LOCATION_IDX + 1] << 8; + curr_stc += buf[STC_LOCATION_IDX]; + } + } + + return mpq_dmx_process_audio_packet_no_framing(feed, buf, curr_stc); +} + +int mpq_dmx_extract_pcr_and_dci(const u8 *buf, u64 *pcr, int *dci) +{ + const struct ts_packet_header *ts_header; + const struct ts_adaptation_field *adaptation_field; + + if (buf == NULL || pcr == NULL || dci == NULL) + return 0; + + ts_header = (const struct ts_packet_header *)buf; + + /* Make sure this TS packet has a adaptation field */ + if ((ts_header->sync_byte != 0x47) || + (ts_header->adaptation_field_control == 0) || + (ts_header->adaptation_field_control == 1) || + ts_header->transport_error_indicator) + return 0; + + adaptation_field = (const struct ts_adaptation_field *) + (buf + sizeof(struct ts_packet_header)); + + if ((!adaptation_field->adaptation_field_length) || + (!adaptation_field->PCR_flag)) + return 0; /* 0 adaptation field or no PCR */ + + *pcr = ((u64)adaptation_field->program_clock_reference_base_1) << 25; + *pcr += ((u64)adaptation_field->program_clock_reference_base_2) << 17; + *pcr += ((u64)adaptation_field->program_clock_reference_base_3) << 9; + *pcr += ((u64)adaptation_field->program_clock_reference_base_4) << 1; + *pcr += adaptation_field->program_clock_reference_base_5; + *pcr *= 300; + *pcr += (((u64)adaptation_field->program_clock_reference_ext_1) << 8) + + adaptation_field->program_clock_reference_ext_2; + + *dci = adaptation_field->discontinuity_indicator; + + return 1; +} + +int mpq_dmx_process_pcr_packet( + struct dvb_demux_feed *feed, + const u8 *buf) +{ + u64 stc; + struct dmx_data_ready data; + struct mpq_demux *mpq_demux = feed->demux->priv; + + if (mpq_dmx_extract_pcr_and_dci(buf, &data.pcr.pcr, + &data.pcr.disc_indicator_set) == 0) + return 0; + + /* + * When we play from front-end, we configure HW + * to output the extra timestamp, if we are playing + * from DVR, we don't have a timestamp if the packet + * format is not 192-tail. + */ + if ((mpq_demux->source >= DMX_SOURCE_DVR0) && + (mpq_demux->demux.tsp_format != DMX_TSP_FORMAT_192_TAIL)) { + stc = 0; + } else { + if (mpq_demux->ts_packet_timestamp_source != + TSIF_TTS_LPASS_TIMER) { + stc = buf[STC_LOCATION_IDX + 2] << 16; + stc += buf[STC_LOCATION_IDX + 1] << 8; + stc += buf[STC_LOCATION_IDX]; + stc *= 256; /* convert from 105.47 KHZ to 27MHz */ + } else { + stc = buf[STC_LOCATION_IDX + 3] << 24; + stc += buf[STC_LOCATION_IDX + 2] << 16; + stc += buf[STC_LOCATION_IDX + 1] << 8; + stc += buf[STC_LOCATION_IDX]; + } + } + + data.data_length = 0; + data.pcr.stc = stc; + data.status = DMX_OK_PCR; + feed->data_ready_cb.ts(&feed->feed.ts, &data); + + return 0; +} + +int mpq_dmx_decoder_eos_cmd(struct mpq_feed *mpq_feed, int feed_type) +{ + if (feed_type == 1) { /* video feed */ + struct mpq_video_feed_info *feed_data = &mpq_feed->video_info; + struct mpq_streambuffer *stream_buffer; + struct mpq_streambuffer_packet_header oob_packet; + struct mpq_adapter_video_meta_data oob_meta_data; + int ret; + + spin_lock(&feed_data->video_buffer_lock); + stream_buffer = feed_data->video_buffer; + + if (stream_buffer == NULL) { + MPQ_DVB_DBG_PRINT("%s: video_buffer released\n", + __func__); + spin_unlock(&feed_data->video_buffer_lock); + return 0; + } + + memset(&oob_packet, 0, sizeof(oob_packet)); + oob_packet.user_data_len = sizeof(oob_meta_data); + oob_meta_data.packet_type = DMX_EOS_PACKET; + + ret = mpq_streambuffer_pkt_write(stream_buffer, &oob_packet, + (u8 *)&oob_meta_data); + + spin_unlock(&feed_data->video_buffer_lock); + return (ret < 0) ? ret : 0; + + } else if (feed_type == 2) { /* audio feed */ + struct mpq_audio_feed_info *feed_data = &mpq_feed->audio_info; + struct mpq_streambuffer *stream_buffer; + struct mpq_streambuffer_packet_header oob_packet; + struct mpq_adapter_audio_meta_data oob_meta_data; + int ret; + + spin_lock(&feed_data->audio_buffer_lock); + stream_buffer = feed_data->audio_buffer; + + if (stream_buffer == NULL) { + MPQ_DVB_DBG_PRINT("%s: audio_buffer released\n", + __func__); + spin_unlock(&feed_data->audio_buffer_lock); + return 0; + } + + memset(&oob_packet, 0, sizeof(oob_packet)); + oob_packet.user_data_len = sizeof(oob_meta_data); + oob_meta_data.packet_type = DMX_EOS_PACKET; + + ret = mpq_streambuffer_pkt_write(stream_buffer, &oob_packet, + (u8 *)&oob_meta_data); + + spin_unlock(&feed_data->audio_buffer_lock); + return (ret < 0) ? ret : 0; + } + + return 0; +} + +void mpq_dmx_convert_tts(struct dvb_demux_feed *feed, + const u8 timestamp[TIMESTAMP_LEN], + u64 *timestampIn27Mhz) +{ + struct mpq_demux *mpq_demux = feed->demux->priv; + + if (unlikely(!timestampIn27Mhz)) + return; + + if (mpq_demux->ts_packet_timestamp_source != TSIF_TTS_LPASS_TIMER) { + *timestampIn27Mhz = timestamp[2] << 16; + *timestampIn27Mhz += timestamp[1] << 8; + *timestampIn27Mhz += timestamp[0]; + *timestampIn27Mhz *= 256; /* convert from 105.47 KHZ to 27MHz */ + } else { + *timestampIn27Mhz = timestamp[3] << 24; + *timestampIn27Mhz += timestamp[2] << 16; + *timestampIn27Mhz += timestamp[1] << 8; + *timestampIn27Mhz += timestamp[0]; + } +} + +int mpq_sdmx_open_session(struct mpq_demux *mpq_demux) +{ + enum sdmx_status ret = SDMX_SUCCESS; + enum sdmx_proc_mode proc_mode; + enum sdmx_pkt_format pkt_format; + + MPQ_DVB_DBG_PRINT("%s: ref_count %d\n", + __func__, mpq_demux->sdmx_session_ref_count); + + if (mpq_demux->sdmx_session_ref_count) { + /* session is already open */ + mpq_demux->sdmx_session_ref_count++; + return ret; + } + + proc_mode = (mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) ? + SDMX_PUSH_MODE : SDMX_PULL_MODE; + MPQ_DVB_DBG_PRINT( + "%s: Proc mode = %s\n", + __func__, SDMX_PUSH_MODE == proc_mode ? "Push" : "Pull"); + + if (mpq_demux->source < DMX_SOURCE_DVR0) { + pkt_format = SDMX_192_BYTE_PKT; + } else if (mpq_demux->demux.tsp_format == DMX_TSP_FORMAT_188) { + pkt_format = SDMX_188_BYTE_PKT; + } else if (mpq_demux->demux.tsp_format == DMX_TSP_FORMAT_192_TAIL) { + pkt_format = SDMX_192_BYTE_PKT; + } else { + MPQ_DVB_ERR_PRINT("%s: invalid tsp format\n", __func__); + return -EINVAL; + } + + MPQ_DVB_DBG_PRINT("%s: (%s) source, packet format: %d\n", + __func__, + (mpq_demux->source < DMX_SOURCE_DVR0) ? + "frontend" : "DVR", pkt_format); + + /* open session and set configuration */ + ret = sdmx_open_session(&mpq_demux->sdmx_session_handle); + if (ret != SDMX_SUCCESS) { + MPQ_DVB_ERR_PRINT("%s: Could not open session. ret=%d\n", + __func__, ret); + return ret; + } + + MPQ_DVB_DBG_PRINT("%s: new session_handle = %d\n", + __func__, mpq_demux->sdmx_session_handle); + + ret = sdmx_set_session_cfg(mpq_demux->sdmx_session_handle, + proc_mode, + SDMX_PKT_ENC_MODE, + pkt_format, + mpq_sdmx_scramble_odd, + mpq_sdmx_scramble_even); + if (ret != SDMX_SUCCESS) { + MPQ_DVB_ERR_PRINT("%s: Could not set session config. ret=%d\n", + __func__, ret); + sdmx_close_session(mpq_demux->sdmx_session_handle); + mpq_demux->sdmx_session_handle = SDMX_INVALID_SESSION_HANDLE; + return -EINVAL; + } + + ret = sdmx_set_log_level(mpq_demux->sdmx_session_handle, + mpq_demux->sdmx_log_level); + if (ret != SDMX_SUCCESS) { + MPQ_DVB_ERR_PRINT("%s: Could not set log level. ret=%d\n", + __func__, ret); + /* Don't fail open session if just log level setting failed */ + ret = 0; + } + + mpq_demux->sdmx_process_count = 0; + mpq_demux->sdmx_process_time_sum = 0; + mpq_demux->sdmx_process_time_average = 0; + mpq_demux->sdmx_process_time_max = 0; + mpq_demux->sdmx_process_packets_sum = 0; + mpq_demux->sdmx_process_packets_average = 0; + mpq_demux->sdmx_process_packets_min = 0; + + mpq_demux->sdmx_session_ref_count++; + return ret; +} + +int mpq_sdmx_close_session(struct mpq_demux *mpq_demux) +{ + int ret = 0; + enum sdmx_status status; + + MPQ_DVB_DBG_PRINT("%s: session_handle = %d, ref_count %d\n", + __func__, + mpq_demux->sdmx_session_handle, + mpq_demux->sdmx_session_ref_count); + + if (!mpq_demux->sdmx_session_ref_count) + return -EINVAL; + + if (mpq_demux->sdmx_session_ref_count == 1) { + status = sdmx_close_session(mpq_demux->sdmx_session_handle); + if (status != SDMX_SUCCESS) { + MPQ_DVB_ERR_PRINT("%s: sdmx_close_session failed %d\n", + __func__, status); + } + mpq_demux->sdmx_eos = 0; + mpq_demux->sdmx_session_handle = SDMX_INVALID_SESSION_HANDLE; + } + + mpq_demux->sdmx_session_ref_count--; + + return ret; +} + +static int mpq_sdmx_get_buffer_chunks(struct mpq_demux *mpq_demux, + struct ion_handle *buff_handle, + u32 actual_buff_size, + struct sdmx_buff_descr buff_chunks[SDMX_MAX_PHYSICAL_CHUNKS]) +{ + int i; + struct sg_table *sg_ptr; + struct scatterlist *sg; + u32 chunk_size; + int ret; + + memset(buff_chunks, 0, + sizeof(struct sdmx_buff_descr) * SDMX_MAX_PHYSICAL_CHUNKS); + + sg_ptr = ion_sg_table(mpq_demux->ion_client, buff_handle); + if (IS_ERR_OR_NULL(sg_ptr)) { + ret = PTR_ERR(sg_ptr); + MPQ_DVB_ERR_PRINT("%s: ion_sg_table failed, ret=%d\n", + __func__, ret); + if (!ret) + ret = -EINVAL; + return ret; + } + + if (sg_ptr->nents == 0) { + MPQ_DVB_ERR_PRINT("%s: num of scattered entries is 0\n", + __func__); + return -EINVAL; + } + + if (sg_ptr->nents > SDMX_MAX_PHYSICAL_CHUNKS) { + MPQ_DVB_ERR_PRINT( + "%s: num of scattered entries %d greater than max supported %d\n", + __func__, sg_ptr->nents, SDMX_MAX_PHYSICAL_CHUNKS); + return -EINVAL; + } + + sg = sg_ptr->sgl; + for (i = 0; i < sg_ptr->nents; i++) { + buff_chunks[i].base_addr = (u64)sg_dma_address(sg); + + if (sg->length > actual_buff_size) + chunk_size = actual_buff_size; + else + chunk_size = sg->length; + + buff_chunks[i].size = chunk_size; + sg = sg_next(sg); + actual_buff_size -= chunk_size; + } + + return 0; +} + +static int mpq_sdmx_init_data_buffer(struct mpq_demux *mpq_demux, + struct mpq_feed *feed, u32 *num_buffers, + struct sdmx_data_buff_descr buf_desc[DMX_MAX_DECODER_BUFFER_NUM], + enum sdmx_buf_mode *buf_mode) +{ + struct dvb_demux_feed *dvbdmx_feed = feed->dvb_demux_feed; + struct dvb_ringbuffer *buffer; + struct mpq_video_feed_info *feed_data = &feed->video_info; + struct ion_handle *sdmx_buff; + int ret; + int i; + + *buf_mode = SDMX_RING_BUF; + + if (dvb_dmx_is_video_feed(feed->dvb_demux_feed)) { + if (feed_data->buffer_desc.decoder_buffers_num > 1) + *buf_mode = SDMX_LINEAR_GROUP_BUF; + *num_buffers = feed_data->buffer_desc.decoder_buffers_num; + + for (i = 0; i < *num_buffers; i++) { + buf_desc[i].length = + feed_data->buffer_desc.desc[i].size; + + ret = mpq_sdmx_get_buffer_chunks(mpq_demux, + feed_data->buffer_desc.ion_handle[i], + buf_desc[i].length, + buf_desc[i].buff_chunks); + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_sdmx_get_buffer_chunks failed\n", + __func__); + return ret; + } + } + + return 0; + } + + *num_buffers = 1; + if (dvb_dmx_is_sec_feed(dvbdmx_feed) || + dvb_dmx_is_pcr_feed(dvbdmx_feed)) { + buffer = &feed->sdmx_buf; + sdmx_buff = feed->sdmx_buf_handle; + } else { + buffer = (struct dvb_ringbuffer *) + dvbdmx_feed->feed.ts.buffer.ringbuff; + sdmx_buff = dvbdmx_feed->feed.ts.buffer.priv_handle; + } + + if (sdmx_buff == NULL) { + MPQ_DVB_ERR_PRINT( + "%s: Invalid buffer allocation\n", + __func__); + return -ENOMEM; + } + + buf_desc[0].length = buffer->size; + ret = mpq_sdmx_get_buffer_chunks(mpq_demux, sdmx_buff, + buf_desc[0].length, + buf_desc[0].buff_chunks); + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_sdmx_get_buffer_chunks failed\n", + __func__); + return ret; + } + + return 0; +} + +static int mpq_sdmx_filter_setup(struct mpq_demux *mpq_demux, + struct dvb_demux_feed *dvbdmx_feed) +{ + int ret = 0; + struct mpq_feed *feed; + struct mpq_feed *main_rec_feed = NULL; + struct dvb_demux_feed *tmp; + struct sdmx_buff_descr metadata_buff_desc; + struct sdmx_data_buff_descr *data_buff_desc = NULL; + u32 data_buf_num = DMX_MAX_DECODER_BUFFER_NUM; + enum sdmx_buf_mode buf_mode; + enum sdmx_raw_out_format ts_out_format = SDMX_188_OUTPUT; + u32 filter_flags = 0; + + feed = dvbdmx_feed->priv; + + if (dvb_dmx_is_sec_feed(dvbdmx_feed)) { + feed->filter_type = SDMX_SECTION_FILTER; + if (dvbdmx_feed->feed.sec.check_crc) + filter_flags |= SDMX_FILTER_FLAG_VERIFY_SECTION_CRC; + MPQ_DVB_DBG_PRINT("%s: SDMX_SECTION_FILTER\n", __func__); + } else if (dvb_dmx_is_pcr_feed(dvbdmx_feed)) { + feed->filter_type = SDMX_PCR_FILTER; + MPQ_DVB_DBG_PRINT("%s: SDMX_PCR_FILTER\n", __func__); + } else if (dvb_dmx_is_video_feed(dvbdmx_feed)) { + feed->filter_type = SDMX_SEPARATED_PES_FILTER; + MPQ_DVB_DBG_PRINT("%s: SDMX_SEPARATED_PES_FILTER\n", __func__); + } else if (dvb_dmx_is_rec_feed(dvbdmx_feed)) { + feed->filter_type = SDMX_RAW_FILTER; + switch (dvbdmx_feed->tsp_out_format) { + case (DMX_TSP_FORMAT_188): + ts_out_format = SDMX_188_OUTPUT; + break; + case (DMX_TSP_FORMAT_192_HEAD): + ts_out_format = SDMX_192_HEAD_OUTPUT; + break; + case (DMX_TSP_FORMAT_192_TAIL): + ts_out_format = SDMX_192_TAIL_OUTPUT; + break; + default: + MPQ_DVB_ERR_PRINT( + "%s: Unsupported TS output format %d\n", + __func__, dvbdmx_feed->tsp_out_format); + return -EINVAL; + } + MPQ_DVB_DBG_PRINT("%s: SDMX_RAW_FILTER\n", __func__); + } else { + feed->filter_type = SDMX_PES_FILTER; + MPQ_DVB_DBG_PRINT("%s: SDMX_PES_FILTER\n", __func__); + } + + data_buff_desc = vmalloc( + sizeof(*data_buff_desc)*DMX_MAX_DECODER_BUFFER_NUM); + if (!data_buff_desc) { + MPQ_DVB_ERR_PRINT( + "%s: failed to allocate memory for data buffer\n", + __func__); + return -ENOMEM; + } + + /* + * Recording feed sdmx filter handle lookup: + * In case this is a recording filter with multiple feeds, + * this feed is either the first feed of a new recording filter, + * or it is another feed of an existing filter for which a filter was + * already opened with sdmx. In such case, we need to look up in the + * feed pool for a allocated feed with same output buffer (meaning they + * belong to the same filter) and to use the already allocated sdmx + * filter handle. + */ + if (feed->filter_type == SDMX_RAW_FILTER) { + tmp = mpq_dmx_peer_rec_feed(dvbdmx_feed); + if (tmp) + main_rec_feed = tmp->priv; + } + + /* + * If this PID is not part of existing recording filter, + * configure a new filter to SDMX. + */ + if (!main_rec_feed) { + feed->secondary_feed = 0; + + MPQ_DVB_DBG_PRINT( + "%s: Adding new sdmx filter, pid %d, flags=0x%X, ts_out_format=%d\n", + __func__, dvbdmx_feed->pid, filter_flags, + ts_out_format); + + /* Meta-data initialization, + * Recording filters do no need meta-data buffers. + */ + if (dvb_dmx_is_rec_feed(dvbdmx_feed)) { + metadata_buff_desc.base_addr = 0; + metadata_buff_desc.size = 0; + } else { + ret = mpq_sdmx_init_metadata_buffer(mpq_demux, feed, + &metadata_buff_desc); + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: Failed to initialize metadata buffer. ret=%d\n", + __func__, ret); + goto sdmx_filter_setup_failed; + } + } + + ret = mpq_sdmx_init_data_buffer(mpq_demux, feed, &data_buf_num, + data_buff_desc, &buf_mode); + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: Failed to initialize data buffer. ret=%d\n", + __func__, ret); + mpq_sdmx_terminate_metadata_buffer(feed); + goto sdmx_filter_setup_failed; + } + ret = sdmx_add_filter(mpq_demux->sdmx_session_handle, + dvbdmx_feed->pid, + feed->filter_type, + &metadata_buff_desc, + buf_mode, + data_buf_num, + data_buff_desc, + &feed->sdmx_filter_handle, + ts_out_format, + filter_flags); + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: SDMX_add_filter failed. ret = %d\n", + __func__, ret); + ret = -ENODEV; + mpq_sdmx_terminate_metadata_buffer(feed); + goto sdmx_filter_setup_failed; + } + + MPQ_DVB_DBG_PRINT( + "%s: feed=0x%p, filter pid=%d, handle=%d, data buffer(s)=%d, size=%d\n", + __func__, feed, dvbdmx_feed->pid, + feed->sdmx_filter_handle, + data_buf_num, data_buff_desc[0].length); + + mpq_demux->sdmx_filter_count++; + } else { + MPQ_DVB_DBG_PRINT( + "%s: Adding RAW pid to sdmx, pid %d\n", + __func__, dvbdmx_feed->pid); + + feed->secondary_feed = 1; + feed->sdmx_filter_handle = main_rec_feed->sdmx_filter_handle; + ret = sdmx_add_raw_pid(mpq_demux->sdmx_session_handle, + feed->sdmx_filter_handle, dvbdmx_feed->pid); + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: FAILED to add raw pid, ret=%d\n", + __func__, ret); + ret = -ENODEV; + goto sdmx_filter_setup_failed; + } + } + + /* + * If pid has a key ladder id associated, we need to + * set it to SDMX. + */ + if (dvbdmx_feed->secure_mode.is_secured && + dvbdmx_feed->cipher_ops.operations_count) { + MPQ_DVB_DBG_PRINT( + "%s: set key-ladder %d to PID %d\n", + __func__, + dvbdmx_feed->cipher_ops.operations[0].key_ladder_id, + dvbdmx_feed->cipher_ops.pid); + + ret = sdmx_set_kl_ind(mpq_demux->sdmx_session_handle, + dvbdmx_feed->cipher_ops.pid, + dvbdmx_feed->cipher_ops.operations[0].key_ladder_id); + + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: FAILED to set key ladder, ret=%d\n", + __func__, ret); + } + } + + vfree(data_buff_desc); + return 0; + +sdmx_filter_setup_failed: + vfree(data_buff_desc); + return ret; +} + +/** + * mpq_sdmx_init_feed - initialize secure demux related elements of mpq feed + * + * @mpq_demux: mpq_demux object + * @mpq_feed: mpq_feed object + * + * Note: the function assumes mpq_demux->mutex locking is done by caller. + */ +static int mpq_sdmx_init_feed(struct mpq_demux *mpq_demux, + struct mpq_feed *mpq_feed) +{ + int ret; + + ret = mpq_sdmx_open_session(mpq_demux); + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_sdmx_open_session failed, ret=%d\n", + __func__, ret); + + ret = -ENODEV; + goto init_sdmx_feed_failed; + } + + /* PCR and sections have internal buffer for SDMX */ + if (dvb_dmx_is_pcr_feed(mpq_feed->dvb_demux_feed)) + ret = mpq_sdmx_alloc_data_buf(mpq_feed, SDMX_PCR_BUFFER_SIZE); + else if (dvb_dmx_is_sec_feed(mpq_feed->dvb_demux_feed)) + ret = mpq_sdmx_alloc_data_buf(mpq_feed, + SDMX_SECTION_BUFFER_SIZE); + else + ret = 0; + + if (ret) { + MPQ_DVB_ERR_PRINT("%s: init buffer failed, ret=%d\n", + __func__, ret); + goto init_sdmx_feed_failed_free_sdmx; + } + + ret = mpq_sdmx_filter_setup(mpq_demux, mpq_feed->dvb_demux_feed); + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_sdmx_filter_setup failed, ret=%d\n", + __func__, ret); + goto init_sdmx_feed_failed_free_data_buff; + } + + mpq_demux->num_secure_feeds++; + return 0; + +init_sdmx_feed_failed_free_data_buff: + mpq_sdmx_free_data_buf(mpq_feed); +init_sdmx_feed_failed_free_sdmx: + mpq_sdmx_close_session(mpq_demux); +init_sdmx_feed_failed: + return ret; +} + +int mpq_dmx_init_mpq_feed(struct dvb_demux_feed *feed) +{ + int ret = 0; + struct mpq_demux *mpq_demux = feed->demux->priv; + struct mpq_feed *mpq_feed = feed->priv; + + if (mutex_lock_interruptible(&mpq_demux->mutex)) + return -ERESTARTSYS; + + mpq_feed->sdmx_buf_handle = NULL; + mpq_feed->metadata_buf_handle = NULL; + mpq_feed->sdmx_filter_handle = SDMX_INVALID_FILTER_HANDLE; + + if (feed->type != DMX_TYPE_SEC) + feed->feed.ts.flush_buffer = mpq_dmx_flush_buffer; + + if (dvb_dmx_is_video_feed(feed)) { + ret = mpq_dmx_init_video_feed(mpq_feed); + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_dmx_init_video_feed failed, ret=%d\n", + __func__, ret); + goto init_mpq_feed_end; + } + } + + if (dvb_dmx_is_audio_feed(feed)) { + ret = mpq_dmx_init_audio_feed(mpq_feed); + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_dmx_init_audio_feed failed, ret=%d\n", + __func__, ret); + goto init_mpq_feed_end; + } + } + + /* + * sdmx is not relevant for recording filters, which always use + * regular filters (non-sdmx) + */ + if (!mpq_sdmx_is_loaded() || !feed->secure_mode.is_secured || + dvb_dmx_is_rec_feed(feed)) { + if (!mpq_sdmx_is_loaded()) + mpq_demux->sdmx_session_handle = + SDMX_INVALID_SESSION_HANDLE; + goto init_mpq_feed_end; + } + + /* Initialization of secure demux filters (PES/PCR/Video/Section) */ + ret = mpq_sdmx_init_feed(mpq_demux, mpq_feed); + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_sdmx_init_feed failed, ret=%d\n", + __func__, ret); + if (dvb_dmx_is_video_feed(feed)) + mpq_dmx_terminate_video_feed(mpq_feed); + else if (dvb_dmx_is_audio_feed(feed)) + mpq_dmx_terminate_audio_feed(mpq_feed); + } + +init_mpq_feed_end: + if (!ret) { + mpq_demux->num_active_feeds++; + mpq_feed->session_id++; + } + mutex_unlock(&mpq_demux->mutex); + return ret; +} + +/** + * Note: Called only when filter is in "GO" state - after feed has been started. + */ +int mpq_dmx_set_cipher_ops(struct dvb_demux_feed *feed, + struct dmx_cipher_operations *cipher_ops) +{ + struct mpq_feed *mpq_feed; + struct mpq_demux *mpq_demux; + int ret = 0; + + if (!feed || !feed->priv || !cipher_ops) { + MPQ_DVB_ERR_PRINT( + "%s: invalid parameters\n", + __func__); + return -EINVAL; + } + + MPQ_DVB_DBG_PRINT("%s(%d, %d, %d)\n", + __func__, cipher_ops->pid, + cipher_ops->operations_count, + cipher_ops->operations[0].key_ladder_id); + + if ((cipher_ops->operations_count > 1) || + (cipher_ops->operations_count && + cipher_ops->operations[0].encrypt)) { + MPQ_DVB_ERR_PRINT( + "%s: Invalid cipher operations, count=%d, encrypt=%d\n", + __func__, cipher_ops->operations_count, + cipher_ops->operations[0].encrypt); + return -EINVAL; + } + + if (!feed->secure_mode.is_secured) { + /* + * Filter is not configured as secured, setting cipher + * operations is not allowed. + */ + MPQ_DVB_ERR_PRINT( + "%s: Cannot set cipher operations to non-secure filter\n", + __func__); + return -EPERM; + } + + mpq_feed = feed->priv; + mpq_demux = mpq_feed->mpq_demux; + + mutex_lock(&mpq_demux->mutex); + + /* + * Feed is running in secure mode, this secure mode request is to + * update the key ladder id + */ + if ((mpq_demux->sdmx_session_handle != SDMX_INVALID_SESSION_HANDLE) && + cipher_ops->operations_count) { + ret = sdmx_set_kl_ind(mpq_demux->sdmx_session_handle, + cipher_ops->pid, + cipher_ops->operations[0].key_ladder_id); + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: FAILED to set key ladder, ret=%d\n", + __func__, ret); + ret = -ENODEV; + } + } + + mutex_unlock(&mpq_demux->mutex); + + return ret; +} + +static int mpq_sdmx_invalidate_buffer(struct mpq_feed *mpq_feed) +{ + struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed; + struct mpq_video_feed_info *feed_data; + struct dvb_ringbuffer *buffer; + struct ion_handle *ion_handle; + int ret = 0; + int i; + + if (!dvb_dmx_is_video_feed(feed)) { + if (dvb_dmx_is_sec_feed(feed) || + dvb_dmx_is_pcr_feed(feed)) { + buffer = (struct dvb_ringbuffer *) + &mpq_feed->sdmx_buf; + ion_handle = mpq_feed->sdmx_buf_handle; + } else { + buffer = (struct dvb_ringbuffer *) + feed->feed.ts.buffer.ringbuff; + ion_handle = feed->feed.ts.buffer.priv_handle; + } + + ret = msm_ion_do_cache_op(mpq_feed->mpq_demux->ion_client, + ion_handle, buffer->data, + buffer->size, ION_IOC_INV_CACHES); + if (ret) + MPQ_DVB_ERR_PRINT( + "%s: msm_ion_do_cache_op failed, ret = %d\n", + __func__, ret); + return ret; + } + + /* Video buffers */ + feed_data = &mpq_feed->video_info; + for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) { + if (feed_data->buffer_desc.desc[i].base) { + /* Non-secured buffer */ + ret = msm_ion_do_cache_op( + mpq_feed->mpq_demux->ion_client, + feed_data->buffer_desc.ion_handle[i], + feed_data->buffer_desc.desc[i].base, + feed_data->buffer_desc.desc[i].size, + ION_IOC_INV_CACHES); + if (ret) + MPQ_DVB_ERR_PRINT( + "%s: msm_ion_do_cache_op failed, ret = %d\n", + __func__, ret); + } + } + + return ret; +} + +static void mpq_sdmx_prepare_filter_status(struct mpq_demux *mpq_demux, + struct sdmx_filter_status *filter_sts, + struct mpq_feed *mpq_feed) +{ + struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed; + struct mpq_video_feed_info *feed_data; + struct mpq_streambuffer *sbuff; + + filter_sts->filter_handle = mpq_feed->sdmx_filter_handle; + filter_sts->metadata_fill_count = + dvb_ringbuffer_avail(&mpq_feed->metadata_buf); + filter_sts->metadata_write_offset = mpq_feed->metadata_buf.pwrite; + filter_sts->error_indicators = 0; + filter_sts->status_indicators = 0; + + MPQ_DVB_DBG_PRINT( + "%s: Filter meta-data buffer status: fill count = %d, write_offset = %d\n", + __func__, filter_sts->metadata_fill_count, + filter_sts->metadata_write_offset); + + if (!dvb_dmx_is_video_feed(feed)) { + struct dvb_ringbuffer *buffer; + + if (dvb_dmx_is_sec_feed(feed) || + dvb_dmx_is_pcr_feed(feed)) { + buffer = (struct dvb_ringbuffer *) + &mpq_feed->sdmx_buf; + } else { + buffer = (struct dvb_ringbuffer *) + feed->feed.ts.buffer.ringbuff; + } + + filter_sts->data_fill_count = dvb_ringbuffer_avail(buffer); + filter_sts->data_write_offset = buffer->pwrite; + + MPQ_DVB_DBG_PRINT( + "%s: Filter buffers status: fill count = %d, write_offset = %d\n", + __func__, filter_sts->data_fill_count, + filter_sts->data_write_offset); + + return; + } + + /* Video feed - decoder buffers */ + feed_data = &mpq_feed->video_info; + + spin_lock(&mpq_feed->video_info.video_buffer_lock); + sbuff = feed_data->video_buffer; + if (sbuff == NULL) { + MPQ_DVB_DBG_PRINT( + "%s: video_buffer released\n", + __func__); + spin_unlock(&feed_data->video_buffer_lock); + return; + } + + if (feed_data->buffer_desc.decoder_buffers_num > 1) { + /* linear mode */ + filter_sts->data_fill_count = sbuff->pending_buffers_count; + filter_sts->data_write_offset = + sbuff->raw_data.pwrite / + sizeof(struct mpq_streambuffer_buffer_desc); + } else { + /* ring buffer mode */ + filter_sts->data_fill_count = + mpq_streambuffer_data_avail(sbuff); + mpq_streambuffer_get_data_rw_offset(sbuff, NULL, + &filter_sts->data_write_offset); + + } + + spin_unlock(&mpq_feed->video_info.video_buffer_lock); + + MPQ_DVB_DBG_PRINT( + "%s: Decoder buffers filter status: fill count = %d, write_offset = %d\n", + __func__, filter_sts->data_fill_count, + filter_sts->data_write_offset); +} + +static int mpq_sdmx_section_filtering(struct mpq_feed *mpq_feed, + struct dvb_demux_filter *f, + struct sdmx_metadata_header *header) +{ + struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed; + int ret; + u8 neq = 0; + u8 xor; + u8 tmp; + int i; + + if (!mutex_is_locked(&mpq_feed->mpq_demux->mutex)) { + MPQ_DVB_ERR_PRINT( + "%s: Mutex should have been locked\n", + __func__); + return -EINVAL; + } + + for (i = 0; i < DVB_DEMUX_MASK_MAX; i++) { + tmp = DVB_RINGBUFFER_PEEK(&mpq_feed->sdmx_buf, i); + xor = f->filter.filter_value[i] ^ tmp; + + if (f->maskandmode[i] & xor) + return 0; + + neq |= f->maskandnotmode[i] & xor; + } + + if (f->doneq && !neq) + return 0; + + if (feed->demux->playback_mode == DMX_PB_MODE_PULL) { + mutex_unlock(&mpq_feed->mpq_demux->mutex); + + ret = feed->demux->buffer_ctrl.sec(&f->filter, + header->payload_length, 1); + + mutex_lock(&mpq_feed->mpq_demux->mutex); + + if (ret) { + MPQ_DVB_DBG_PRINT( + "%s: buffer_ctrl.sec aborted\n", + __func__); + return ret; + } + + if (mpq_feed->sdmx_filter_handle == + SDMX_INVALID_FILTER_HANDLE) { + MPQ_DVB_DBG_PRINT("%s: filter was stopped\n", + __func__); + return -ENODEV; + } + } + + if (mpq_feed->sdmx_buf.pread + header->payload_length < + mpq_feed->sdmx_buf.size) { + feed->cb.sec(&mpq_feed->sdmx_buf.data[mpq_feed->sdmx_buf.pread], + header->payload_length, + NULL, 0, &f->filter); + } else { + int split = mpq_feed->sdmx_buf.size - mpq_feed->sdmx_buf.pread; + + feed->cb.sec(&mpq_feed->sdmx_buf.data[mpq_feed->sdmx_buf.pread], + split, + &mpq_feed->sdmx_buf.data[0], + header->payload_length - split, + &f->filter); + } + + return 0; +} + +static int mpq_sdmx_check_ts_stall(struct mpq_demux *mpq_demux, + struct mpq_feed *mpq_feed, + struct sdmx_filter_status *sts, + size_t req, + int events_only) +{ + struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed; + int ret; + + if (!mutex_is_locked(&mpq_feed->mpq_demux->mutex)) { + MPQ_DVB_ERR_PRINT( + "%s: Mutex should have been locked\n", + __func__); + return -EINVAL; + } + + /* + * For PULL mode need to verify there is enough space for the dmxdev + * event. Also, if data buffer is full we want to stall until some + * data is removed from it to prevent calling the sdmx when it cannot + * output data to the still full buffer. + */ + if (mpq_demux->demux.playback_mode == DMX_PB_MODE_PULL) { + MPQ_DVB_DBG_PRINT("%s: Stalling for events and %zu bytes\n", + __func__, req); + + mutex_unlock(&mpq_demux->mutex); + + ret = mpq_demux->demux.buffer_ctrl.ts(&feed->feed.ts, req, 1); + MPQ_DVB_DBG_PRINT("%s: stall result = %d\n", + __func__, ret); + + mutex_lock(&mpq_demux->mutex); + + if (mpq_feed->sdmx_filter_handle == + SDMX_INVALID_FILTER_HANDLE) { + MPQ_DVB_DBG_PRINT("%s: filter was stopped\n", + __func__); + return -ENODEV; + } + + return ret; + } + + return 0; +} + +/* Handle filter results for filters with no extra meta-data */ +static void mpq_sdmx_pes_filter_results(struct mpq_demux *mpq_demux, + struct mpq_feed *mpq_feed, + struct sdmx_filter_status *sts) +{ + int ret; + struct sdmx_metadata_header header; + struct sdmx_pes_counters counters; + struct dmx_data_ready data_event; + struct dmx_data_ready pes_event; + struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed; + struct dvb_ringbuffer *buf = (struct dvb_ringbuffer *) + feed->feed.ts.buffer.ringbuff; + ssize_t bytes_avail; + + if ((!sts->metadata_fill_count) && (!sts->data_fill_count)) + goto pes_filter_check_overflow; + + MPQ_DVB_DBG_PRINT( + "%s: Meta: fill=%u, write=%u. Data: fill=%u, write=%u\n", + __func__, sts->metadata_fill_count, sts->metadata_write_offset, + sts->data_fill_count, sts->data_write_offset); + + mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset; + + if ((sts->metadata_fill_count == 0) && + (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)) { + ssize_t free = dvb_ringbuffer_free(buf); + + ret = 0; + if ((free + SZ_2K) < MAX_PES_LENGTH) + ret = mpq_sdmx_check_ts_stall(mpq_demux, mpq_feed, sts, + free + SZ_2K, 0); + else + MPQ_DVB_ERR_PRINT( + "%s: Cannot stall when free space bigger than max PES size\n", + __func__); + if (ret) { + MPQ_DVB_DBG_PRINT( + "%s: mpq_sdmx_check_ts_stall aborted\n", + __func__); + return; + } + } + + while (sts->metadata_fill_count) { + bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf); + if (bytes_avail < (sizeof(header) + sizeof(counters))) { + MPQ_DVB_ERR_PRINT( + "%s: metadata_fill_count is %d less than required %zu bytes\n", + __func__, + sts->metadata_fill_count, + sizeof(header) + sizeof(counters)); + + /* clean-up remaining bytes to try to recover */ + DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf, + bytes_avail); + sts->metadata_fill_count = 0; + break; + } + + dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *)&header, + sizeof(header)); + MPQ_DVB_DBG_PRINT( + "%s: metadata header: start=%u, length=%u\n", + __func__, header.payload_start, header.payload_length); + sts->metadata_fill_count -= sizeof(header); + + dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *)&counters, + sizeof(counters)); + sts->metadata_fill_count -= sizeof(counters); + + /* Notify new data in buffer */ + data_event.status = DMX_OK; + data_event.data_length = header.payload_length; + ret = mpq_sdmx_check_ts_stall(mpq_demux, mpq_feed, sts, + data_event.data_length, 0); + if (ret) { + MPQ_DVB_DBG_PRINT( + "%s: mpq_sdmx_check_ts_stall aborted\n", + __func__); + return; + } + + feed->data_ready_cb.ts(&feed->feed.ts, &data_event); + + /* Notify new complete PES */ + pes_event.status = DMX_OK_PES_END; + pes_event.pes_end.actual_length = header.payload_length; + pes_event.pes_end.start_gap = 0; + pes_event.data_length = 0; + + /* Parse error indicators */ + if (sts->error_indicators & SDMX_FILTER_ERR_INVALID_PES_LEN) + pes_event.pes_end.pes_length_mismatch = 1; + else + pes_event.pes_end.pes_length_mismatch = 0; + + pes_event.pes_end.disc_indicator_set = 0; + + pes_event.pes_end.stc = 0; + pes_event.pes_end.tei_counter = counters.transport_err_count; + pes_event.pes_end.cont_err_counter = + counters.continuity_err_count; + pes_event.pes_end.ts_packets_num = + counters.pes_ts_count; + + ret = mpq_sdmx_check_ts_stall(mpq_demux, mpq_feed, sts, 0, 1); + if (ret) { + MPQ_DVB_DBG_PRINT( + "%s: mpq_sdmx_check_ts_stall aborted\n", + __func__); + return; + } + feed->data_ready_cb.ts(&feed->feed.ts, &pes_event); + } + +pes_filter_check_overflow: + if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) && + (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)) { + MPQ_DVB_ERR_PRINT("%s: DMX_OVERRUN_ERROR\n", __func__); + mpq_dmx_notify_overflow(feed); + } + + if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) { + data_event.data_length = 0; + data_event.status = DMX_OK_EOS; + feed->data_ready_cb.ts(&feed->feed.ts, &data_event); + } +} + +static void mpq_sdmx_section_filter_results(struct mpq_demux *mpq_demux, + struct mpq_feed *mpq_feed, + struct sdmx_filter_status *sts) +{ + struct sdmx_metadata_header header; + struct dmx_data_ready event; + struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed; + struct dvb_demux_filter *f; + struct dmx_section_feed *sec = &feed->feed.sec; + ssize_t bytes_avail; + + /* Parse error indicators */ + if (sts->error_indicators & SDMX_FILTER_ERR_SEC_VERIF_CRC32_FAIL) { + MPQ_DVB_DBG_PRINT("%s: Notify CRC err event\n", __func__); + event.status = DMX_CRC_ERROR; + event.data_length = 0; + dvb_dmx_notify_section_event(feed, &event, 1); + } + + if (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL) + MPQ_DVB_ERR_PRINT("%s: internal section buffer overflowed!\n", + __func__); + + if ((!sts->metadata_fill_count) && (!sts->data_fill_count)) + goto section_filter_check_eos; + + mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset; + mpq_feed->sdmx_buf.pwrite = sts->data_write_offset; + + while (sts->metadata_fill_count) { + bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf); + if (bytes_avail < sizeof(header)) { + MPQ_DVB_ERR_PRINT( + "%s: metadata_fill_count is %d less than required %zu bytes\n", + __func__, + sts->metadata_fill_count, + sizeof(header)); + + /* clean-up remaining bytes to try to recover */ + DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf, + bytes_avail); + sts->metadata_fill_count = 0; + break; + } + + dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *) &header, + sizeof(header)); + sts->metadata_fill_count -= sizeof(header); + MPQ_DVB_DBG_PRINT( + "%s: metadata header: start=%u, length=%u\n", + __func__, header.payload_start, header.payload_length); + + f = feed->filter; + do { + if (mpq_sdmx_section_filtering(mpq_feed, f, &header)) + return; + } while ((f = f->next) && sec->is_filtering); + + DVB_RINGBUFFER_SKIP(&mpq_feed->sdmx_buf, header.payload_length); + } + +section_filter_check_eos: + if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) { + event.data_length = 0; + event.status = DMX_OK_EOS; + dvb_dmx_notify_section_event(feed, &event, 1); + } +} + +static void mpq_sdmx_decoder_filter_results(struct mpq_demux *mpq_demux, + struct mpq_feed *mpq_feed, + struct sdmx_filter_status *sts) +{ + struct sdmx_metadata_header header; + struct sdmx_pes_counters counters; + int pes_header_offset; + struct ts_packet_header *ts_header; + struct ts_adaptation_field *ts_adapt; + struct pes_packet_header *pes_header; + u8 metadata_buf[MAX_SDMX_METADATA_LENGTH]; + struct mpq_streambuffer *sbuf; + int ret; + struct dmx_data_ready data_event; + struct dmx_data_ready data; + struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed; + ssize_t bytes_avail; + + if ((!sts->metadata_fill_count) && (!sts->data_fill_count)) + goto decoder_filter_check_flags; + + /* Update meta data buffer write pointer */ + mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset; + + if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PULL) && + (sts->error_indicators & SDMX_FILTER_ERR_D_LIN_BUFS_FULL)) { + MPQ_DVB_DBG_PRINT("%s: Decoder stall...\n", __func__); + + ret = mpq_dmx_decoder_fullness_check( + mpq_feed->dvb_demux_feed, 0, 0); + if (ret) { + /* we reach here if demuxing was aborted */ + MPQ_DVB_DBG_PRINT( + "%s: mpq_dmx_decoder_fullness_check aborted\n", + __func__); + return; + } + } + + while (sts->metadata_fill_count) { + struct mpq_streambuffer_packet_header packet; + struct mpq_adapter_video_meta_data meta_data; + + bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf); + if (bytes_avail < (sizeof(header) + sizeof(counters))) { + MPQ_DVB_ERR_PRINT( + "%s: metadata_fill_count is %d less than required %zu bytes\n", + __func__, + sts->metadata_fill_count, + sizeof(header) + sizeof(counters)); + + /* clean-up remaining bytes to try to recover */ + DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf, + bytes_avail); + sts->metadata_fill_count = 0; + break; + } + + /* Read metadata header */ + dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *)&header, + sizeof(header)); + sts->metadata_fill_count -= sizeof(header); + MPQ_DVB_DBG_PRINT( + "%s: metadata header: start=%u, length=%u, metadata=%u\n", + __func__, header.payload_start, header.payload_length, + header.metadata_length); + + /* Read metadata - PES counters */ + dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *)&counters, + sizeof(counters)); + sts->metadata_fill_count -= sizeof(counters); + + /* Read metadata - TS & PES headers */ + bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf); + if ((header.metadata_length < MAX_SDMX_METADATA_LENGTH) && + (header.metadata_length >= sizeof(counters)) && + (bytes_avail >= + (header.metadata_length - sizeof(counters)))) { + dvb_ringbuffer_read(&mpq_feed->metadata_buf, + metadata_buf, + header.metadata_length - sizeof(counters)); + } else { + MPQ_DVB_ERR_PRINT( + "%s: meta-data size %d larger than available meta-data %zd or max allowed %d\n", + __func__, header.metadata_length, + bytes_avail, + MAX_SDMX_METADATA_LENGTH); + + /* clean-up remaining bytes to try to recover */ + DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf, + bytes_avail); + sts->metadata_fill_count = 0; + break; + } + + sts->metadata_fill_count -= + (header.metadata_length - sizeof(counters)); + + ts_header = (struct ts_packet_header *)&metadata_buf[0]; + if (ts_header->adaptation_field_control == 1) { + ts_adapt = NULL; + pes_header_offset = sizeof(*ts_header); + } else { + ts_adapt = (struct ts_adaptation_field *) + &metadata_buf[sizeof(*ts_header)]; + pes_header_offset = sizeof(*ts_header) + 1 + + ts_adapt->adaptation_field_length; + } + pes_header = (struct pes_packet_header *) + &metadata_buf[pes_header_offset]; + meta_data.packet_type = DMX_PES_PACKET; + /* TODO - set to real STC when SDMX supports it */ + meta_data.info.pes.stc = 0; + + if (pes_header->pts_dts_flag & 0x2) { + meta_data.info.pes.pts_dts_info.pts_exist = 1; + meta_data.info.pes.pts_dts_info.pts = + ((u64)pes_header->pts_1 << 30) | + ((u64)pes_header->pts_2 << 22) | + ((u64)pes_header->pts_3 << 15) | + ((u64)pes_header->pts_4 << 7) | + (u64)pes_header->pts_5; + } else { + meta_data.info.pes.pts_dts_info.pts_exist = 0; + } + + if (pes_header->pts_dts_flag & 0x1) { + meta_data.info.pes.pts_dts_info.dts_exist = 1; + meta_data.info.pes.pts_dts_info.dts = + ((u64)pes_header->dts_1 << 30) | + ((u64)pes_header->dts_2 << 22) | + ((u64)pes_header->dts_3 << 15) | + ((u64)pes_header->dts_4 << 7) | + (u64)pes_header->dts_5; + } else { + meta_data.info.pes.pts_dts_info.dts_exist = 0; + } + + spin_lock(&mpq_feed->video_info.video_buffer_lock); + + mpq_feed->video_info.tei_errs = + counters.transport_err_count; + mpq_feed->video_info.continuity_errs = + counters.continuity_err_count; + mpq_feed->video_info.ts_packets_num = + counters.pes_ts_count; + mpq_feed->video_info.ts_dropped_bytes = + counters.drop_count * + mpq_demux->demux.ts_packet_size; + + sbuf = mpq_feed->video_info.video_buffer; + if (sbuf == NULL) { + MPQ_DVB_DBG_PRINT( + "%s: video_buffer released\n", + __func__); + spin_unlock(&mpq_feed->video_info.video_buffer_lock); + return; + } + + if (!header.payload_length) { + MPQ_DVB_DBG_PRINT( + "%s: warnning - video frame with 0 length, dropping\n", + __func__); + spin_unlock(&mpq_feed->video_info.video_buffer_lock); + continue; + } + + packet.raw_data_len = header.payload_length; + packet.user_data_len = sizeof(meta_data); + mpq_streambuffer_get_buffer_handle(sbuf, 0, + &packet.raw_data_handle); + mpq_streambuffer_get_data_rw_offset(sbuf, + NULL, &packet.raw_data_offset); + ret = mpq_streambuffer_data_write_deposit(sbuf, + header.payload_length); + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_streambuffer_data_write_deposit failed. ret=%d\n", + __func__, ret); + } + mpq_dmx_update_decoder_stat(mpq_feed); + ret = mpq_streambuffer_pkt_write(sbuf, &packet, + (u8 *)&meta_data); + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_streambuffer_pkt_write failed, ret=%d\n", + __func__, ret); + } else { + mpq_dmx_prepare_es_event_data( + &packet, &meta_data, &mpq_feed->video_info, + sbuf, &data, ret); + MPQ_DVB_DBG_PRINT("%s: Notify ES Event\n", __func__); + feed->data_ready_cb.ts(&feed->feed.ts, &data); + } + + spin_unlock(&mpq_feed->video_info.video_buffer_lock); + } + +decoder_filter_check_flags: + if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) && + (sts->error_indicators & SDMX_FILTER_ERR_D_LIN_BUFS_FULL)) { + MPQ_DVB_ERR_PRINT("%s: DMX_OVERRUN_ERROR\n", __func__); + mpq_dmx_notify_overflow(mpq_feed->dvb_demux_feed); + } + + if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) { + /* Notify decoder via the stream buffer */ + ret = mpq_dmx_decoder_eos_cmd(mpq_feed, 1); + if (ret) + MPQ_DVB_ERR_PRINT( + "%s: Failed to notify decoder on EOS, ret=%d\n", + __func__, ret); + + /* Notify user filter */ + data_event.data_length = 0; + data_event.status = DMX_OK_EOS; + mpq_feed->dvb_demux_feed->data_ready_cb.ts( + &mpq_feed->dvb_demux_feed->feed.ts, &data_event); + } +} + +static void mpq_sdmx_pcr_filter_results(struct mpq_demux *mpq_demux, + struct mpq_feed *mpq_feed, + struct sdmx_filter_status *sts) +{ + int ret; + struct sdmx_metadata_header header; + struct dmx_data_ready data; + struct dvb_ringbuffer *rbuff = &mpq_feed->sdmx_buf; + struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed; + u8 buf[TS_PACKET_HEADER_LENGTH + MAX_TSP_ADAPTATION_LENGTH + + TIMESTAMP_LEN]; + size_t stc_len = 0; + ssize_t bytes_avail; + + if (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL) + MPQ_DVB_ERR_PRINT("%s: internal PCR buffer overflowed!\n", + __func__); + + if ((!sts->metadata_fill_count) && (!sts->data_fill_count)) + goto pcr_filter_check_eos; + + if (mpq_demux->demux.tsp_format == DMX_TSP_FORMAT_192_TAIL) + stc_len = 4; + + mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset; + rbuff->pwrite = sts->data_write_offset; + + while (sts->metadata_fill_count) { + bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf); + if (bytes_avail < sizeof(header)) { + MPQ_DVB_ERR_PRINT( + "%s: metadata_fill_count is %d less than required %zu bytes\n", + __func__, + sts->metadata_fill_count, + sizeof(header)); + + /* clean-up remaining bytes to try to recover */ + DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf, + bytes_avail); + sts->metadata_fill_count = 0; + break; + } + + dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *) &header, + sizeof(header)); + MPQ_DVB_DBG_PRINT( + "%s: metadata header: start=%u, length=%u\n", + __func__, header.payload_start, header.payload_length); + sts->metadata_fill_count -= sizeof(header); + + dvb_ringbuffer_read(rbuff, buf, header.payload_length); + + if (mpq_dmx_extract_pcr_and_dci(buf, &data.pcr.pcr, + &data.pcr.disc_indicator_set)) { + + if (stc_len) { + data.pcr.stc = + buf[header.payload_length-2] << 16; + data.pcr.stc += + buf[header.payload_length-3] << 8; + data.pcr.stc += buf[header.payload_length-4]; + /* convert from 105.47 KHZ to 27MHz */ + data.pcr.stc *= 256; + } else { + data.pcr.stc = 0; + } + + data.data_length = 0; + data.status = DMX_OK_PCR; + ret = mpq_sdmx_check_ts_stall( + mpq_demux, mpq_feed, sts, 0, 1); + if (ret) { + MPQ_DVB_DBG_PRINT( + "%s: mpq_sdmx_check_ts_stall aborted\n", + __func__); + return; + } + feed->data_ready_cb.ts(&feed->feed.ts, &data); + } + } + +pcr_filter_check_eos: + if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) { + data.data_length = 0; + data.status = DMX_OK_EOS; + feed->data_ready_cb.ts(&feed->feed.ts, &data); + } +} + +static void mpq_sdmx_raw_filter_results(struct mpq_demux *mpq_demux, + struct mpq_feed *mpq_feed, + struct sdmx_filter_status *sts) +{ + int ret; + ssize_t new_data; + struct dmx_data_ready data_event; + struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed; + struct dvb_ringbuffer *buf = (struct dvb_ringbuffer *) + feed->feed.ts.buffer.ringbuff; + + if ((!sts->metadata_fill_count) && (!sts->data_fill_count)) + goto raw_filter_check_flags; + + new_data = sts->data_write_offset - + buf->pwrite; + if (new_data < 0) + new_data += buf->size; + + ret = mpq_sdmx_check_ts_stall(mpq_demux, mpq_feed, sts, + new_data + feed->demux->ts_packet_size, 0); + if (ret) { + MPQ_DVB_DBG_PRINT( + "%s: mpq_sdmx_check_ts_stall aborted\n", + __func__); + return; + } + + data_event.status = DMX_OK; + data_event.data_length = new_data; + feed->data_ready_cb.ts(&feed->feed.ts, &data_event); + MPQ_DVB_DBG_PRINT("%s: Callback DMX_OK, size=%d\n", + __func__, data_event.data_length); + +raw_filter_check_flags: + if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) && + (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)) { + MPQ_DVB_DBG_PRINT("%s: DMX_OVERRUN_ERROR\n", __func__); + mpq_dmx_notify_overflow(feed); + } + + if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) { + data_event.data_length = 0; + data_event.status = DMX_OK_EOS; + feed->data_ready_cb.ts(&feed->feed.ts, &data_event); + } + +} + +static void mpq_sdmx_process_results(struct mpq_demux *mpq_demux) +{ + int i; + int sdmx_filters; + struct sdmx_filter_status *sts; + struct mpq_feed *mpq_feed; + u8 mpq_feed_idx; + + sdmx_filters = mpq_demux->sdmx_filter_count; + for (i = 0; i < sdmx_filters; i++) { + sts = &mpq_demux->sdmx_filters_state.status[i]; + MPQ_DVB_DBG_PRINT( + "%s: Filter: handle=%d, status=0x%x, errors=0x%x\n", + __func__, sts->filter_handle, sts->status_indicators, + sts->error_indicators); + MPQ_DVB_DBG_PRINT("%s: Metadata fill count=%d (write=%d)\n", + __func__, sts->metadata_fill_count, + sts->metadata_write_offset); + MPQ_DVB_DBG_PRINT("%s: Data fill count=%d (write=%d)\n", + __func__, sts->data_fill_count, sts->data_write_offset); + + mpq_feed_idx = mpq_demux->sdmx_filters_state.mpq_feed_idx[i]; + mpq_feed = &mpq_demux->feeds[mpq_feed_idx]; + if ((mpq_feed->dvb_demux_feed->state != DMX_STATE_GO) || + (sts->filter_handle != mpq_feed->sdmx_filter_handle) || + mpq_feed->secondary_feed || + (mpq_demux->sdmx_filters_state.session_id[i] != + mpq_feed->session_id)) + continue; + + /* Invalidate output buffer before processing the results */ + mpq_sdmx_invalidate_buffer(mpq_feed); + + if (sts->error_indicators & SDMX_FILTER_ERR_MD_BUF_FULL) + MPQ_DVB_ERR_PRINT( + "%s: meta-data buff for pid %d overflowed!\n", + __func__, mpq_feed->dvb_demux_feed->pid); + + switch (mpq_feed->filter_type) { + case SDMX_PCR_FILTER: + mpq_sdmx_pcr_filter_results(mpq_demux, mpq_feed, sts); + break; + case SDMX_PES_FILTER: + mpq_sdmx_pes_filter_results(mpq_demux, mpq_feed, + sts); + break; + case SDMX_SEPARATED_PES_FILTER: + mpq_sdmx_decoder_filter_results(mpq_demux, mpq_feed, + sts); + break; + case SDMX_SECTION_FILTER: + mpq_sdmx_section_filter_results(mpq_demux, mpq_feed, + sts); + break; + case SDMX_RAW_FILTER: + mpq_sdmx_raw_filter_results(mpq_demux, mpq_feed, sts); + break; + default: + break; + } + } +} + +static int mpq_sdmx_process_buffer(struct mpq_demux *mpq_demux, + struct sdmx_buff_descr *input, + u32 fill_count, + u32 read_offset) +{ + struct sdmx_filter_status *sts; + struct mpq_feed *mpq_feed; + u8 flags = 0; + u32 errors; + u32 status; + u32 prev_read_offset; + u32 prev_fill_count; + enum sdmx_status sdmx_res; + int i; + int filter_index = 0; + int bytes_read; + ktime_t process_start_time; + ktime_t process_end_time; + + mutex_lock(&mpq_demux->mutex); + + /* + * All active filters may get totally closed and therefore + * sdmx session may get terminated, in such case nothing to process + */ + if (mpq_demux->sdmx_session_handle == SDMX_INVALID_SESSION_HANDLE) { + MPQ_DVB_DBG_PRINT( + "%s: sdmx filters aborted, filter-count %d, session %d\n", + __func__, mpq_demux->sdmx_filter_count, + mpq_demux->sdmx_session_handle); + mutex_unlock(&mpq_demux->mutex); + return 0; + } + + /* Set input flags */ + if (mpq_demux->sdmx_eos) + flags |= SDMX_INPUT_FLAG_EOS; + if (mpq_sdmx_debug) + flags |= SDMX_INPUT_FLAG_DBG_ENABLE; + + /* Build up to date filter status array */ + for (i = 0; i < MPQ_MAX_DMX_FILES; i++) { + mpq_feed = &mpq_demux->feeds[i]; + if ((mpq_feed->sdmx_filter_handle != SDMX_INVALID_FILTER_HANDLE) + && (!mpq_feed->secondary_feed)) { + sts = mpq_demux->sdmx_filters_state.status + + filter_index; + mpq_sdmx_prepare_filter_status(mpq_demux, sts, + mpq_feed); + mpq_demux->sdmx_filters_state.mpq_feed_idx[filter_index] + = i; + mpq_demux->sdmx_filters_state.session_id[filter_index] = + mpq_feed->session_id; + filter_index++; + } + } + + /* Sanity check */ + if (filter_index != mpq_demux->sdmx_filter_count) { + mutex_unlock(&mpq_demux->mutex); + MPQ_DVB_ERR_PRINT( + "%s: Updated %d SDMX filters status but should be %d\n", + __func__, filter_index, mpq_demux->sdmx_filter_count); + return -ERESTART; + } + + MPQ_DVB_DBG_PRINT( + "%s: Before SDMX_process: input read_offset=%u, fill count=%u\n", + __func__, read_offset, fill_count); + + process_start_time = ktime_get(); + + prev_read_offset = read_offset; + prev_fill_count = fill_count; + sdmx_res = sdmx_process(mpq_demux->sdmx_session_handle, flags, input, + &fill_count, &read_offset, &errors, &status, + mpq_demux->sdmx_filter_count, + mpq_demux->sdmx_filters_state.status); + + process_end_time = ktime_get(); + bytes_read = prev_fill_count - fill_count; + + mpq_dmx_update_sdmx_stat(mpq_demux, bytes_read, + process_start_time, process_end_time); + + MPQ_DVB_DBG_PRINT( + "%s: SDMX result=%d, input_fill_count=%u, read_offset=%u, read %d bytes from input, status=0x%X, errors=0x%X\n", + __func__, sdmx_res, fill_count, read_offset, bytes_read, + status, errors); + + if ((sdmx_res == SDMX_SUCCESS) || + (sdmx_res == SDMX_STATUS_STALLED_IN_PULL_MODE)) { + if (sdmx_res == SDMX_STATUS_STALLED_IN_PULL_MODE) + MPQ_DVB_DBG_PRINT("%s: SDMX stalled for PULL mode\n", + __func__); + + mpq_sdmx_process_results(mpq_demux); + } else { + MPQ_DVB_ERR_PRINT( + "%s: SDMX Process returned %d\n", + __func__, sdmx_res); + } + + mutex_unlock(&mpq_demux->mutex); + + return bytes_read; +} + +int mpq_sdmx_process(struct mpq_demux *mpq_demux, + struct sdmx_buff_descr *input, + u32 fill_count, + u32 read_offset, + size_t tsp_size) +{ + int ret; + int todo; + int total_bytes_read = 0; + int limit = mpq_sdmx_proc_limit * tsp_size; + + MPQ_DVB_DBG_PRINT( + "\n\n%s: read_offset=%u, fill_count=%u, tsp_size=%zu\n", + __func__, read_offset, fill_count, tsp_size); + + while (fill_count >= tsp_size) { + todo = fill_count > limit ? limit : fill_count; + ret = mpq_sdmx_process_buffer(mpq_demux, input, todo, + read_offset); + + if (mpq_demux->demux.sw_filter_abort) { + MPQ_DVB_ERR_PRINT( + "%s: Demuxing from DVR was aborted\n", + __func__); + return -ENODEV; + } + + if (ret > 0) { + total_bytes_read += ret; + fill_count -= ret; + read_offset += ret; + if (read_offset >= input->size) + read_offset -= input->size; + } else { + /* + * ret < 0: some error occurred + * ret == 0: not enough data (less than 1 TS packet) + */ + if (ret < 0) + MPQ_DVB_ERR_PRINT( + "%s: mpq_sdmx_process_buffer failed, returned %d\n", + __func__, ret); + break; + } + } + + return total_bytes_read; +} + +static int mpq_sdmx_write(struct mpq_demux *mpq_demux, + struct ion_handle *input_handle, + const char *buf, + size_t count) +{ + struct ion_handle *ion_handle; + struct dvb_ringbuffer *rbuf; + struct sdmx_buff_descr buf_desc; + u32 read_offset; + int ret; + + if (mpq_demux == NULL || input_handle == NULL) { + MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + ion_handle = mpq_demux->demux.dmx.dvr_input.priv_handle; + rbuf = (struct dvb_ringbuffer *)mpq_demux->demux.dmx.dvr_input.ringbuff; + + ret = mpq_sdmx_dvr_buffer_desc(mpq_demux, &buf_desc); + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: Failed to init input buffer descriptor. ret = %d\n", + __func__, ret); + return ret; + } + read_offset = mpq_demux->demux.dmx.dvr_input.ringbuff->pread; + + + /* + * We must flush the buffer before SDMX starts reading from it + * so that it gets a valid data in memory. + */ + ret = msm_ion_do_cache_op(mpq_demux->ion_client, + ion_handle, rbuf->data, + rbuf->size, ION_IOC_CLEAN_CACHES); + if (ret) + MPQ_DVB_ERR_PRINT( + "%s: msm_ion_do_cache_op failed, ret = %d\n", + __func__, ret); + + return mpq_sdmx_process(mpq_demux, &buf_desc, count, + read_offset, mpq_demux->demux.ts_packet_size); +} + +int mpq_dmx_write(struct dmx_demux *demux, const char *buf, size_t count) +{ + struct dvb_demux *dvb_demux; + struct mpq_demux *mpq_demux; + int ret = count; + + if (demux == NULL) + return -EINVAL; + + dvb_demux = demux->priv; + mpq_demux = dvb_demux->priv; + + /* Route through secure demux - process secure feeds if any exist */ + if (mpq_sdmx_is_loaded() && mpq_demux->sdmx_filter_count) { + ret = mpq_sdmx_write(mpq_demux, + demux->dvr_input.priv_handle, + buf, + count); + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_sdmx_write failed. ret = %d\n", + __func__, ret); + ret = count; + } + } + + /* + * Route through sw filter - process non-secure feeds if any exist. + * For sw filter, should process the same amount of bytes the sdmx + * process managed to consume, unless some sdmx error occurred, for + * which should process the whole buffer + */ + if (mpq_demux->num_active_feeds > mpq_demux->num_secure_feeds) + dvb_dmx_swfilter_format(dvb_demux, buf, ret, + dvb_demux->tsp_format); + + if (signal_pending(current)) + return -EINTR; + + return ret; +} + +int mpq_sdmx_is_loaded(void) +{ + static int sdmx_load_checked; + + if (!sdmx_load_checked) { + mpq_sdmx_check_app_loaded(); + sdmx_load_checked = 1; + } + + return mpq_dmx_info.secure_demux_app_loaded; +} + +int mpq_dmx_oob_command(struct dvb_demux_feed *feed, + struct dmx_oob_command *cmd) +{ + struct mpq_feed *mpq_feed = feed->priv; + struct mpq_demux *mpq_demux = mpq_feed->mpq_demux; + struct dmx_data_ready event; + int ret = 0; + + mutex_lock(&mpq_demux->mutex); + mpq_feed = feed->priv; + + if (!dvb_dmx_is_video_feed(feed) && !dvb_dmx_is_pcr_feed(feed) && + !feed->secure_mode.is_secured) { + mutex_unlock(&mpq_demux->mutex); + return 0; + } + + event.data_length = 0; + + switch (cmd->type) { + case DMX_OOB_CMD_EOS: + event.status = DMX_OK_EOS; + if (!feed->secure_mode.is_secured) { + if (dvb_dmx_is_video_feed(feed)) { + if (!video_framing) + mpq_dmx_decoder_pes_closure(mpq_demux, + mpq_feed); + else + mpq_dmx_decoder_frame_closure(mpq_demux, + mpq_feed); + ret = mpq_dmx_decoder_eos_cmd(mpq_feed, 1); + if (ret) + MPQ_DVB_ERR_PRINT( + "%s: Couldn't write oob eos packet\n", + __func__); + } else if (dvb_dmx_is_audio_feed(feed)) { + mpq_dmx_decoder_audio_pes_closure(mpq_demux, + mpq_feed); + ret = mpq_dmx_decoder_eos_cmd(mpq_feed, 2); + if (ret) + MPQ_DVB_ERR_PRINT( + "%s: Couldn't write oob eos packet\n", + __func__); + } + ret = feed->data_ready_cb.ts(&feed->feed.ts, &event); + } else if (!mpq_demux->sdmx_eos) { + struct sdmx_buff_descr buf_desc; + + mpq_demux->sdmx_eos = 1; + ret = mpq_sdmx_dvr_buffer_desc(mpq_demux, &buf_desc); + if (!ret) { + mutex_unlock(&mpq_demux->mutex); + mpq_sdmx_process_buffer(mpq_demux, &buf_desc, + 0, 0); + return 0; + } + } + break; + case DMX_OOB_CMD_MARKER: + event.status = DMX_OK_MARKER; + event.marker.id = cmd->params.marker.id; + + if (feed->type == DMX_TYPE_SEC) + ret = dvb_dmx_notify_section_event(feed, &event, 1); + else + /* MPQ_TODO: Notify decoder via the stream buffer */ + ret = feed->data_ready_cb.ts(&feed->feed.ts, &event); + break; + + default: + ret = -EINVAL; + break; + } + + mutex_unlock(&mpq_demux->mutex); + return ret; +} diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h new file mode 100644 index 000000000000..0c20a8978427 --- /dev/null +++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h @@ -0,0 +1,1116 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _MPQ_DMX_PLUGIN_COMMON_H +#define _MPQ_DMX_PLUGIN_COMMON_H + +#include + +#include "dvbdev.h" +#include "dmxdev.h" +#include "demux.h" +#include "dvb_demux.h" +#include "dvb_frontend.h" +#include "mpq_adapter.h" +#include "mpq_sdmx.h" + +#define TS_PACKET_SYNC_BYTE (0x47) +#define TS_PACKET_SIZE (188) +#define TS_PACKET_HEADER_LENGTH (4) + +/* Length of mandatory fields that must exist in header of video PES */ +#define PES_MANDATORY_FIELDS_LEN 9 + +/* + * 500 PES header packets in the meta-data buffer, + * should be more than enough + */ +#define VIDEO_NUM_OF_PES_PACKETS 500 + +#define VIDEO_META_DATA_PACKET_SIZE \ + (DVB_RINGBUFFER_PKTHDRSIZE + \ + sizeof(struct mpq_streambuffer_packet_header) + \ + sizeof(struct mpq_adapter_video_meta_data)) + +#define VIDEO_META_DATA_BUFFER_SIZE \ + (VIDEO_NUM_OF_PES_PACKETS * VIDEO_META_DATA_PACKET_SIZE) + +#define AUDIO_NUM_OF_PES_PACKETS 100 + +#define AUDIO_META_DATA_PACKET_SIZE \ + (DVB_RINGBUFFER_PKTHDRSIZE + \ + sizeof(struct mpq_streambuffer_packet_header) + \ + sizeof(struct mpq_adapter_audio_meta_data)) + +#define AUDIO_META_DATA_BUFFER_SIZE \ + (AUDIO_NUM_OF_PES_PACKETS * AUDIO_META_DATA_PACKET_SIZE) + +/* Max number open() request can be done on demux device */ +#define MPQ_MAX_DMX_FILES 128 + +/* TSIF alias name length */ +#define TSIF_NAME_LENGTH 20 + +/** + * struct ts_packet_header - Transport packet header + * as defined in MPEG2 transport stream standard. + */ +struct ts_packet_header { +#if defined(__BIG_ENDIAN_BITFIELD) + unsigned sync_byte:8; + unsigned transport_error_indicator:1; + unsigned payload_unit_start_indicator:1; + unsigned transport_priority:1; + unsigned pid_msb:5; + unsigned pid_lsb:8; + unsigned transport_scrambling_control:2; + unsigned adaptation_field_control:2; + unsigned continuity_counter:4; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + unsigned sync_byte:8; + unsigned pid_msb:5; + unsigned transport_priority:1; + unsigned payload_unit_start_indicator:1; + unsigned transport_error_indicator:1; + unsigned pid_lsb:8; + unsigned continuity_counter:4; + unsigned adaptation_field_control:2; + unsigned transport_scrambling_control:2; +#else +#error "Please fix " +#endif +} __packed; + +/** + * struct ts_adaptation_field - Adaptation field prefix + * as defined in MPEG2 transport stream standard. + */ +struct ts_adaptation_field { +#if defined(__BIG_ENDIAN_BITFIELD) + unsigned adaptation_field_length:8; + unsigned discontinuity_indicator:1; + unsigned random_access_indicator:1; + unsigned elementary_stream_priority_indicator:1; + unsigned PCR_flag:1; + unsigned OPCR_flag:1; + unsigned splicing_point_flag:1; + unsigned transport_private_data_flag:1; + unsigned adaptation_field_extension_flag:1; + unsigned program_clock_reference_base_1:8; + unsigned program_clock_reference_base_2:8; + unsigned program_clock_reference_base_3:8; + unsigned program_clock_reference_base_4:8; + unsigned program_clock_reference_base_5:1; + unsigned reserved:6; + unsigned program_clock_reference_ext_1:1; + unsigned program_clock_reference_ext_2:8; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + unsigned adaptation_field_length:8; + unsigned adaptation_field_extension_flag:1; + unsigned transport_private_data_flag:1; + unsigned splicing_point_flag:1; + unsigned OPCR_flag:1; + unsigned PCR_flag:1; + unsigned elementary_stream_priority_indicator:1; + unsigned random_access_indicator:1; + unsigned discontinuity_indicator:1; + unsigned program_clock_reference_base_1:8; + unsigned program_clock_reference_base_2:8; + unsigned program_clock_reference_base_3:8; + unsigned program_clock_reference_base_4:8; + unsigned program_clock_reference_ext_1:1; + unsigned reserved:6; + unsigned program_clock_reference_base_5:1; + unsigned program_clock_reference_ext_2:8; +#else +#error "Please fix " +#endif +} __packed; + + +/* + * PES packet header containing dts and/or pts values + * as defined in MPEG2 transport stream standard. + */ +struct pes_packet_header { +#if defined(__BIG_ENDIAN_BITFIELD) + unsigned packet_start_code_prefix_1:8; + unsigned packet_start_code_prefix_2:8; + unsigned packet_start_code_prefix_3:8; + unsigned stream_id:8; + unsigned pes_packet_length_msb:8; + unsigned pes_packet_length_lsb:8; + unsigned reserved_bits0:2; + unsigned pes_scrambling_control:2; + unsigned pes_priority:1; + unsigned data_alignment_indicator:1; + unsigned copyright:1; + unsigned original_or_copy:1; + unsigned pts_dts_flag:2; + unsigned escr_flag:1; + unsigned es_rate_flag:1; + unsigned dsm_trick_mode_flag:1; + unsigned additional_copy_info_flag:1; + unsigned pes_crc_flag:1; + unsigned pes_extension_flag:1; + unsigned pes_header_data_length:8; + unsigned reserved_bits1:4; + unsigned pts_1:3; + unsigned marker_bit0:1; + unsigned pts_2:8; + unsigned pts_3:7; + unsigned marker_bit1:1; + unsigned pts_4:8; + unsigned pts_5:7; + unsigned marker_bit2:1; + unsigned reserved_bits2:4; + unsigned dts_1:3; + unsigned marker_bit3:1; + unsigned dts_2:8; + unsigned dts_3:7; + unsigned marker_bit4:1; + unsigned dts_4:8; + unsigned dts_5:7; + unsigned marker_bit5:1; + unsigned reserved_bits3:4; +#elif defined(__LITTLE_ENDIAN_BITFIELD) + unsigned packet_start_code_prefix_1:8; + unsigned packet_start_code_prefix_2:8; + unsigned packet_start_code_prefix_3:8; + unsigned stream_id:8; + unsigned pes_packet_length_lsb:8; + unsigned pes_packet_length_msb:8; + unsigned original_or_copy:1; + unsigned copyright:1; + unsigned data_alignment_indicator:1; + unsigned pes_priority:1; + unsigned pes_scrambling_control:2; + unsigned reserved_bits0:2; + unsigned pes_extension_flag:1; + unsigned pes_crc_flag:1; + unsigned additional_copy_info_flag:1; + unsigned dsm_trick_mode_flag:1; + unsigned es_rate_flag:1; + unsigned escr_flag:1; + unsigned pts_dts_flag:2; + unsigned pes_header_data_length:8; + unsigned marker_bit0:1; + unsigned pts_1:3; + unsigned reserved_bits1:4; + unsigned pts_2:8; + unsigned marker_bit1:1; + unsigned pts_3:7; + unsigned pts_4:8; + unsigned marker_bit2:1; + unsigned pts_5:7; + unsigned marker_bit3:1; + unsigned dts_1:3; + unsigned reserved_bits2:4; + unsigned dts_2:8; + unsigned marker_bit4:1; + unsigned dts_3:7; + unsigned dts_4:8; + unsigned marker_bit5:1; + unsigned dts_5:7; + unsigned reserved_bits3:4; +#else +#error "Please fix " +#endif +} __packed; + +/** + * mpq_decoder_buffers_desc - decoder buffer(s) management information. + * + * @desc: Array of buffer descriptors as they are passed to mpq_streambuffer + * upon its initialization. These descriptors must remain valid as long as + * the mpq_streambuffer object is used. + * @ion_handle: Array of ION handles, one for each decoder buffer, used for + * kernel memory mapping or allocation. Handles are saved in order to release + * resources properly later on. + * @decoder_buffers_num: number of buffers that are managed, either externally + * or internally by the mpq_streambuffer object + * @shared_file: File handle of internally allocated video buffer shared + * with video consumer. + */ +struct mpq_decoder_buffers_desc { + struct mpq_streambuffer_buffer_desc desc[DMX_MAX_DECODER_BUFFER_NUM]; + struct ion_handle *ion_handle[DMX_MAX_DECODER_BUFFER_NUM]; + u32 decoder_buffers_num; + struct file *shared_file; +}; + +/* + * mpq_video_feed_info - private data used for video feed. + * + * @video_buffer: Holds the streamer buffer shared with + * the decoder for feeds having the data going to the decoder. + * @video_buffer_lock: Lock protecting against video output buffer. + * The lock protects against API calls to manipulate the output buffer + * (initialize, free, re-use buffers) and dvb-sw demux parsing the video + * data through mpq_dmx_process_video_packet(). + * @buffer_desc: Holds decoder buffer(s) information used for stream buffer. + * @pes_header: Used for feeds that output data to decoder, + * holds PES header of current processed PES. + * @pes_header_left_bytes: Used for feeds that output data to decoder, + * holds remaining PES header bytes of current processed PES. + * @pes_header_offset: Holds the offset within the current processed + * pes header. + * @fullness_wait_cancel: Flag used to signal to abort waiting for + * decoder's fullness. + * @stream_interface: The ID of the video stream interface registered + * with this stream buffer. + * @patterns: pointer to the framing patterns to look for. + * @patterns_num: number of framing patterns. + * @prev_pattern: holds the trailing data of the last processed video packet. + * @frame_offset: Saves data buffer offset to which a new frame will be written + * @last_pattern_offset: Holds the previous pattern offset + * @pending_pattern_len: Accumulated number of data bytes that will be + * reported for this frame. + * @last_framing_match_type: Used for saving the type of + * the previous pattern match found in this video feed. + * @last_framing_match_stc: Used for saving the STC attached to TS packet + * of the previous pattern match found in this video feed. + * @found_sequence_header_pattern: Flag used to note that an MPEG-2 + * Sequence Header, H.264 SPS or VC-1 Sequence Header pattern + * (whichever is relevant according to the video standard) had already + * been found. + * @prefix_size: a bit mask representing the size(s) of possible prefixes + * to the pattern, already found in the previous buffer. If bit 0 is set, + * a prefix of size 1 was found. If bit 1 is set, a prefix of size 2 was + * found, etc. This supports a prefix size of up to 32, which is more + * than we need. The search function updates prefix_size as needed + * for the next buffer search. + * @first_prefix_size: used to save the prefix size used to find the first + * pattern written to the stream buffer. + * @saved_pts_dts_info: used to save PTS/DTS information until it is written. + * @new_pts_dts_info: used to store PTS/DTS information from current PES header. + * @saved_info_used: indicates if saved PTS/DTS information was used. + * @new_info_exists: indicates if new PTS/DTS information exists in + * new_pts_dts_info that should be saved to saved_pts_dts_info. + * @first_pts_dts_copy: a flag used to indicate if PTS/DTS information needs + * to be copied from the currently parsed PES header to the saved_pts_dts_info. + * @tei_errs: Transport stream Transport Error Indicator (TEI) counter. + * @last_continuity: last continuity counter value found in TS packet header. + * Initialized to -1. + * @continuity_errs: Transport stream continuity error counter. + * @ts_packets_num: TS packets counter. + * @ts_dropped_bytes: counts the number of bytes dropped due to insufficient + * buffer space. + * @prev_stc: STC attached to the previous video TS packet + */ +struct mpq_video_feed_info { + struct mpq_streambuffer *video_buffer; + spinlock_t video_buffer_lock; + struct mpq_decoder_buffers_desc buffer_desc; + struct pes_packet_header pes_header; + u32 pes_header_left_bytes; + u32 pes_header_offset; + int fullness_wait_cancel; + enum mpq_adapter_stream_if stream_interface; +const struct dvb_dmx_video_patterns *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM]; + int patterns_num; + char prev_pattern[DVB_DMX_MAX_PATTERN_LEN]; + u32 frame_offset; + u32 last_pattern_offset; + u32 pending_pattern_len; + u64 last_framing_match_type; + u64 last_framing_match_stc; + int found_sequence_header_pattern; + struct dvb_dmx_video_prefix_size_masks prefix_size; + u32 first_prefix_size; + struct dmx_pts_dts_info saved_pts_dts_info; + struct dmx_pts_dts_info new_pts_dts_info; + int saved_info_used; + int new_info_exists; + int first_pts_dts_copy; + u32 tei_errs; + int last_continuity; + u32 continuity_errs; + u32 ts_packets_num; + u32 ts_dropped_bytes; + u64 prev_stc; +}; + +/* require a bare minimal mpq_audio_feed_info struct */ +struct mpq_audio_feed_info { + struct mpq_streambuffer *audio_buffer; + spinlock_t audio_buffer_lock; + struct mpq_decoder_buffers_desc buffer_desc; + struct pes_packet_header pes_header; + u32 pes_header_left_bytes; + u32 pes_header_offset; + int fullness_wait_cancel; + enum mpq_adapter_stream_if stream_interface; + u32 frame_offset; /* pes frame offset */ + struct dmx_pts_dts_info saved_pts_dts_info; + struct dmx_pts_dts_info new_pts_dts_info; + int saved_info_used; + int new_info_exists; + int first_pts_dts_copy; + u32 tei_errs; + int last_continuity; + u32 continuity_errs; + u32 ts_packets_num; + u32 ts_dropped_bytes; + u64 prev_stc; +}; + +/** + * mpq feed object - mpq common plugin feed information + * + * @dvb_demux_feed: Back pointer to dvb demux level feed object + * @mpq_demux: Pointer to common mpq demux object + * @plugin_priv: Plugin specific private data + * @sdmx_filter_handle: Secure demux filter handle. Recording feed may share + * same filter handle + * @secondary_feed: Specifies if this feed shares filter handle with + * other feeds + * @metadata_buf: Ring buffer object for managing the metadata buffer + * @metadata_buf_handle: Allocation handle for the metadata buffer + * @session_id: Counter that is incremented every time feed is initialized + * through mpq_dmx_init_mpq_feed + * @sdmx_buf: Ring buffer object for intermediate output data from the sdmx + * @sdmx_buf_handle: Allocation handle for the sdmx intermediate data buffer + * @video_info: Video feed specific information + */ +struct mpq_feed { + struct dvb_demux_feed *dvb_demux_feed; + struct mpq_demux *mpq_demux; + void *plugin_priv; + + /* Secure demux related */ + int sdmx_filter_handle; + int secondary_feed; + enum sdmx_filter filter_type; + struct dvb_ringbuffer metadata_buf; + struct ion_handle *metadata_buf_handle; + + u8 session_id; + struct dvb_ringbuffer sdmx_buf; + struct ion_handle *sdmx_buf_handle; + + struct mpq_video_feed_info video_info; + struct mpq_audio_feed_info audio_info; +}; + +/** + * struct mpq_demux - mpq demux information + * @idx: Instance index + * @demux: The dvb_demux instance used by mpq_demux + * @dmxdev: The dmxdev instance used by mpq_demux + * @fe_memory: Handle of front-end memory source to mpq_demux + * @source: The current source connected to the demux + * @is_initialized: Indicates whether this demux device was + * initialized or not. + * @ion_client: ION demux client used to allocate memory from ION. + * @mutex: Lock used to protect against private feed data + * @feeds: mpq common feed object pool + * @num_active_feeds: Number of active mpq feeds + * @num_secure_feeds: Number of secure feeds (have a sdmx filter associated) + * currently allocated. + * Used before each call to sdmx_process() to build up to date state. + * @sdmx_session_handle: Secure demux open session handle + * @sdmx_filter_count: Number of active secure demux filters + * @sdmx_eos: End-of-stream indication flag for current sdmx session + * @sdmx_filters_state: Array holding buffers status for each secure + * demux filter. + * @decoder_alloc_flags: ION flags to be used when allocating internally + * @plugin_priv: Underlying plugin's own private data + * @mpq_dmx_plugin_release: Underlying plugin's release function + * @hw_notification_interval: Notification interval in msec, + * exposed in debugfs. + * @hw_notification_min_interval: Minimum notification internal in msec, + * exposed in debugfs. + * @hw_notification_count: Notification count, exposed in debugfs. + * @hw_notification_size: Notification size in bytes, exposed in debugfs. + * @hw_notification_min_size: Minimum notification size in bytes, + * exposed in debugfs. + * @decoder_stat: Decoder output statistics, exposed in debug-fs. + * @sdmx_process_count: Total number of times sdmx_process is called. + * @sdmx_process_time_sum: Total time sdmx_process takes. + * @sdmx_process_time_average: Average time sdmx_process takes. + * @sdmx_process_time_max: Max time sdmx_process takes. + * @sdmx_process_packets_sum: Total packets number sdmx_process handled. + * @sdmx_process_packets_average: Average packets number sdmx_process handled. + * @sdmx_process_packets_min: Minimum packets number sdmx_process handled. + * @last_notification_time: Time of last HW notification. + */ +struct mpq_demux { + int idx; + struct dvb_demux demux; + struct dmxdev dmxdev; + struct dmx_frontend fe_memory; + dmx_source_t source; + int is_initialized; + struct ion_client *ion_client; + struct mutex mutex; + struct mpq_feed feeds[MPQ_MAX_DMX_FILES]; + u32 num_active_feeds; + u32 num_secure_feeds; + int sdmx_session_handle; + int sdmx_session_ref_count; + int sdmx_filter_count; + int sdmx_eos; + struct { + /* SDMX filters status */ + struct sdmx_filter_status status[MPQ_MAX_DMX_FILES]; + + /* Index of the feed respective to SDMX filter */ + u8 mpq_feed_idx[MPQ_MAX_DMX_FILES]; + + /* + * Snapshot of session_id of the feed + * when SDMX process was called. This is used + * to identify whether the feed has been + * restarted when processing SDMX results. + * May happen when demux is stalled in playback + * from memory with PULL mode. + */ + u8 session_id[MPQ_MAX_DMX_FILES]; + } sdmx_filters_state; + + unsigned int decoder_alloc_flags; + + /* HW plugin specific */ + void *plugin_priv; + int (*mpq_dmx_plugin_release)(struct mpq_demux *mpq_demux); + + /* debug-fs */ + u32 hw_notification_interval; + u32 hw_notification_min_interval; + u32 hw_notification_count; + u32 hw_notification_size; + u32 hw_notification_min_size; + + struct { + /* + * Accumulated number of bytes + * dropped due to decoder buffer fullness. + */ + u32 drop_count; + + /* Counter incremeneted for each video frame output by demux */ + u32 out_count; + + /* + * Sum of intervals (msec) holding the time + * between two successive video frames output. + */ + u32 out_interval_sum; + + /* + * Average interval (msec) between two + * successive video frames output. + */ + u32 out_interval_average; + + /* + * Max interval (msec) between two + * successive video frames output. + */ + u32 out_interval_max; + + /* Counter for number of decoder packets with TEI bit set */ + u32 ts_errors; + + /* + * Counter for number of decoder packets + * with continuity counter errors. + */ + u32 cc_errors; + + /* Time of last video frame output */ + ktime_t out_last_time; + } decoder_stat[MPQ_ADAPTER_MAX_NUM_OF_INTERFACES]; + + u32 sdmx_process_count; + u32 sdmx_process_time_sum; + u32 sdmx_process_time_average; + u32 sdmx_process_time_max; + u32 sdmx_process_packets_sum; + u32 sdmx_process_packets_average; + u32 sdmx_process_packets_min; + enum sdmx_log_level sdmx_log_level; + + ktime_t last_notification_time; + int ts_packet_timestamp_source; +}; + +/** + * mpq_dmx_init - initialization and registration function of + * single MPQ demux device + * + * @adapter: The adapter to register mpq_demux to + * @mpq_demux: The mpq demux to initialize + * + * Every HW plug-in needs to provide implementation of such + * function that will be called for each demux device on the + * module initialization. The function mpq_demux_plugin_init + * should be called during the HW plug-in module initialization. + */ +typedef int (*mpq_dmx_init)(struct dvb_adapter *mpq_adapter, + struct mpq_demux *demux); + +/** + * mpq_demux_plugin_init - Initialize demux devices and register + * them to the dvb adapter. + * + * @dmx_init_func: Pointer to the function to be used + * to initialize demux of the underlying HW plugin. + * + * Return error code + * + * Should be called at the HW plugin module initialization. + */ +int mpq_dmx_plugin_init(mpq_dmx_init dmx_init_func); + +/** + * mpq_demux_plugin_exit - terminate demux devices. + * + * Should be called at the HW plugin module termination. + */ +void mpq_dmx_plugin_exit(void); + +/** + * mpq_dmx_set_source - implmenetation of set_source routine. + * + * @demux: The demux device to set its source. + * @src: The source to be set. + * + * Return error code + * + * Can be used by the underlying plugins to implement kernel + * demux API set_source routine. + */ +int mpq_dmx_set_source(struct dmx_demux *demux, const dmx_source_t *src); + +/** + * mpq_dmx_map_buffer - map user-space buffer into kernel space. + * + * @demux: The demux device. + * @dmx_buffer: The demux buffer from user-space, assumes that + * buffer handle is ION file-handle. + * @priv_handle: Saves ION-handle of the buffer imported by this function. + * @kernel_mem: Saves kernel mapped address of the buffer. + * + * Return error code + * + * The function maps the buffer into kernel memory only if the buffer + * was not allocated with secure flag, otherwise the returned kernel + * memory address is set to NULL. + */ +int mpq_dmx_map_buffer(struct dmx_demux *demux, struct dmx_buffer *dmx_buffer, + void **priv_handle, void **kernel_mem); + +/** + * mpq_dmx_unmap_buffer - unmap user-space buffer from kernel space memory. + * + * @demux: The demux device. + * @priv_handle: ION-handle of the buffer returned from mpq_dmx_map_buffer. + * + * Return error code + * + * The function unmaps the buffer from kernel memory only if the buffer + * was not allocated with secure flag. + */ +int mpq_dmx_unmap_buffer(struct dmx_demux *demux, void *priv_handle); + +/** + * mpq_dmx_decoder_fullness_init - Initialize waiting + * mechanism on decoder's buffer fullness. + * + * @feed: The decoder's feed + * + * Return error code. + */ +int mpq_dmx_decoder_fullness_init(struct dvb_demux_feed *feed); + +/** + * mpq_dmx_decoder_fullness_wait - Checks whether decoder buffer + * have free space as required, if not, wait for it. + * + * @feed: The decoder's feed + * @required_space: the required free space to wait for + * + * Return error code. + */ +int mpq_dmx_decoder_fullness_wait(struct dvb_demux_feed *feed, + size_t required_space); + +/** + * mpq_dmx_decoder_fullness_abort - Aborts waiting + * on decoder's buffer fullness if any waiting is done + * now. After calling this, to wait again the user must + * call mpq_dmx_decoder_fullness_init. + * + * @feed: The decoder's feed + * + * Return error code. + */ +int mpq_dmx_decoder_fullness_abort(struct dvb_demux_feed *feed); + +/** + * mpq_dmx_decoder_buffer_status - Returns the + * status of the decoder's buffer. + * + * @feed: The decoder's feed + * @dmx_buffer_status: Status of decoder's buffer + * + * Return error code. + */ +int mpq_dmx_decoder_buffer_status(struct dvb_demux_feed *feed, + struct dmx_buffer_status *dmx_buffer_status); + +/** + * mpq_dmx_reuse_decoder_buffer - release buffer passed to decoder for reuse + * by the stream-buffer. + * + * @feed: The decoder's feed. + * @cookie: stream-buffer handle of the buffer. + * + * Return error code + * + * The function releases the buffer provided by the stream-buffer + * connected to the decoder back to the stream-buffer for reuse. + */ +int mpq_dmx_reuse_decoder_buffer(struct dvb_demux_feed *feed, int cookie); + +/** + * mpq_dmx_process_video_packet - Assemble PES data and output it + * to the stream-buffer connected to the decoder. + * + * @feed: The feed used for the video TS packets + * @buf: The buffer holding video TS packet. + * + * Return error code. + * + * The function assumes it receives buffer with single TS packet + * of the relevant PID. + * If the output buffer is full while assembly, the function drops + * the packet and does not write them to the output buffer. + * Scrambled packets are bypassed. + */ +int mpq_dmx_process_video_packet(struct dvb_demux_feed *feed, const u8 *buf); + +/** + * mpq_dmx_process_pcr_packet - Extract PCR/STC pairs from + * a 192 bytes packet. + * + * @feed: The feed used for the PCR TS packets + * @buf: The buffer holding pcr/stc packet. + * + * Return error code. + * + * The function assumes it receives buffer with single TS packet + * of the relevant PID, and that it has 4 bytes + * suffix as extra timestamp in the following format: + * + * Byte3: TSIF flags + * Byte0-2: TTS, 0..2^24-1 at 105.47 Khz (27*10^6/256). + * + * The function callbacks dmxdev after extraction of the pcr/stc + * pair. + */ +int mpq_dmx_process_pcr_packet(struct dvb_demux_feed *feed, const u8 *buf); + +/** + * mpq_dmx_extract_pcr_and_dci() - Extract the PCR field and discontinuity + * indicator from a TS packet buffer. + * + * @buf: TS packet buffer + * @pcr: returned PCR value + * @dci: returned discontinuity indicator + * + * Returns 1 if PCR was extracted, 0 otherwise. + */ +int mpq_dmx_extract_pcr_and_dci(const u8 *buf, u64 *pcr, int *dci); + +/** + * mpq_dmx_init_debugfs_entries - + * Extend dvb-demux debugfs with mpq related entries (HW statistics and secure + * demux log level). + * + * @mpq_demux: The mpq_demux device to initialize. + */ +void mpq_dmx_init_debugfs_entries(struct mpq_demux *mpq_demux); + +/** + * mpq_dmx_update_hw_statistics - + * Update dvb-demux debugfs with HW notification statistics. + * + * @mpq_demux: The mpq_demux device to update. + */ +void mpq_dmx_update_hw_statistics(struct mpq_demux *mpq_demux); + +/** + * mpq_dmx_set_cipher_ops - Handles setting of cipher operations + * + * @feed: The feed to set its cipher operations + * @cipher_ops: Cipher operations to be set + * + * This common function handles only the case when working with + * secure-demux. When working with secure demux a single decrypt cipher + * operation is allowed. + * + * Return error code + */ +int mpq_dmx_set_cipher_ops(struct dvb_demux_feed *feed, + struct dmx_cipher_operations *cipher_ops); + +/** + * mpq_dmx_convert_tts - Convert timestamp attached by HW to each TS + * packet to 27MHz. + * + * @feed: The feed with TTS attached + * @timestamp: Buffer holding the timestamp attached by the HW + * @timestampIn27Mhz: Timestamp result in 27MHz + * + * Return error code + */ +void mpq_dmx_convert_tts(struct dvb_demux_feed *feed, + const u8 timestamp[TIMESTAMP_LEN], + u64 *timestampIn27Mhz); + +/** + * mpq_sdmx_open_session - Handle the details of opening a new secure demux + * session for the specified mpq demux instance. Multiple calls to this + * is allowed, reference counting is managed to open it only when needed. + * + * @mpq_demux: mpq demux instance + * + * Return error code + */ +int mpq_sdmx_open_session(struct mpq_demux *mpq_demux); + +/** + * mpq_sdmx_close_session - Closes secure demux session. The session + * is closed only if reference counter of the session reaches 0. + * + * @mpq_demux: mpq demux instance + * + * Return error code + */ +int mpq_sdmx_close_session(struct mpq_demux *mpq_demux); + +/** + * mpq_dmx_init_mpq_feed - Initialize an mpq feed object + * The function allocates mpq_feed object and saves in the dvb_demux_feed + * priv field. + * + * @feed: A dvb demux level feed parent object + * + * Return error code + */ +int mpq_dmx_init_mpq_feed(struct dvb_demux_feed *feed); + +/** + * mpq_dmx_terminate_feed - Destroy an mpq feed object + * + * @feed: A dvb demux level feed parent object + * + * Return error code + */ +int mpq_dmx_terminate_feed(struct dvb_demux_feed *feed); + +/** + * mpq_dmx_init_video_feed() - Initializes video related data structures + * + * @mpq_feed: mpq_feed object to initialize + * + * Return error code + */ +int mpq_dmx_init_video_feed(struct mpq_feed *mpq_feed); + +/** + * mpq_dmx_terminate_video_feed() - Release video related feed resources + * + * @mpq_feed: mpq_feed object to terminate + * + * Return error code + */ +int mpq_dmx_terminate_video_feed(struct mpq_feed *mpq_feed); + +/** + * mpq_dmx_write - demux write() function implementation. + * + * A wrapper function used for writing new data into the demux via DVR. + * It checks where new data should actually go, the secure demux or the normal + * dvb demux software demux. + * + * @demux: demux interface + * @buf: input buffer + * @count: number of data bytes in input buffer + * + * Return number of bytes processed or error code + */ +int mpq_dmx_write(struct dmx_demux *demux, const char *buf, size_t count); + +/** + * mpq_sdmx_process - Perform demuxing process on the specified input buffer + * in the secure demux instance + * + * @mpq_demux: mpq demux instance + * @input: input buffer descriptor + * @fill_count: number of data bytes in input buffer that can be read + * @read_offset: offset in buffer for reading + * @tsp_size: size of single TS packet + * + * Return number of bytes read or error code + */ +int mpq_sdmx_process(struct mpq_demux *mpq_demux, + struct sdmx_buff_descr *input, + u32 fill_count, + u32 read_offset, + size_t tsp_size); + +/** + * mpq_sdmx_loaded - Returns 1 if secure demux application is loaded, + * 0 otherwise. This function should be used to determine whether or not + * processing should take place in the SDMX. + */ +int mpq_sdmx_is_loaded(void); + +/** + * mpq_dmx_oob_command - Handles OOB command from dvb-demux. + * + * OOB marker commands trigger callback to the dmxdev. + * Handling of EOS command may trigger current (last on stream) PES/Frame to + * be reported, in addition to callback to the dmxdev. + * In case secure demux is active for the feed, EOS command is passed to the + * secure demux for handling. + * + * @feed: dvb demux feed object + * @cmd: oob command data + * + * returns 0 on success or error + */ +int mpq_dmx_oob_command(struct dvb_demux_feed *feed, + struct dmx_oob_command *cmd); + +/** + * mpq_dmx_peer_rec_feed() - For a recording filter with multiple feeds objects + * search for a feed object that shares the same filter as the specified feed + * object, and return it. + * This can be used to test whether the specified feed object is the first feed + * allocate for the recording filter - return value is NULL. + * + * @feed: dvb demux feed object + * + * Return the dvb_demux_feed sharing the same filter's buffer or NULL if no + * such is found. + */ +struct dvb_demux_feed *mpq_dmx_peer_rec_feed(struct dvb_demux_feed *feed); + +/** + * mpq_dmx_decoder_eos_cmd() - Report EOS event to the mpq_streambuffer + * + * @mpq_feed: Audio/Video mpq_feed object for notification + * @feed_type: Feed type( Audio or Video ) + * + * Return error code + */ +int mpq_dmx_decoder_eos_cmd(struct mpq_feed *mpq_feed, int feed_type); + +/** + * mpq_dmx_parse_mandatory_pes_header() - Parse non-optional PES header fields + * from TS packet buffer and save results in the feed object. + * + * @feed: Video dvb demux feed object + * @feed_data: Structure where results will be saved + * @pes_header: Saved PES header + * @buf: Input buffer containing TS packet with the PES header + * @ts_payload_offset: Offset in 'buf' where payload begins + * @bytes_avail: Length of actual payload + * + * Return error code + */ +int mpq_dmx_parse_mandatory_pes_header( + struct dvb_demux_feed *feed, + struct mpq_video_feed_info *feed_data, + struct pes_packet_header *pes_header, + const u8 *buf, + u32 *ts_payload_offset, + int *bytes_avail); + +/** + * mpq_dmx_parse_remaining_pes_header() - Parse optional PES header fields + * from TS packet buffer and save results in the feed object. + * This function depends on mpq_dmx_parse_mandatory_pes_header being called + * first for state to be valid. + * + * @feed: Video dvb demux feed object + * @feed_data: Structure where results will be saved + * @pes_header: Saved PES header + * @buf: Input buffer containing TS packet with the PES header + * @ts_payload_offset: Offset in 'buf' where payload begins + * @bytes_avail: Length of actual payload + * + * Return error code + */ +int mpq_dmx_parse_remaining_pes_header( + struct dvb_demux_feed *feed, + struct mpq_video_feed_info *feed_data, + struct pes_packet_header *pes_header, + const u8 *buf, + u32 *ts_payload_offset, + int *bytes_avail); + +/** + * mpq_dmx_flush_stream_buffer() - Flush video stream buffer object of the + * specific video feed, both meta-data packets and data. + * + * @feed: dvb demux video feed object + * + * Return error code + */ +int mpq_dmx_flush_stream_buffer(struct dvb_demux_feed *feed); + +/** + * mpq_dmx_save_pts_dts() - Save the current PTS/DTS data + * + * @feed_data: Video feed structure where PTS/DTS is saved + */ +static inline void mpq_dmx_save_pts_dts(struct mpq_video_feed_info *feed_data) +{ + if (feed_data->new_info_exists) { + feed_data->saved_pts_dts_info.pts_exist = + feed_data->new_pts_dts_info.pts_exist; + feed_data->saved_pts_dts_info.pts = + feed_data->new_pts_dts_info.pts; + feed_data->saved_pts_dts_info.dts_exist = + feed_data->new_pts_dts_info.dts_exist; + feed_data->saved_pts_dts_info.dts = + feed_data->new_pts_dts_info.dts; + + feed_data->new_info_exists = 0; + feed_data->saved_info_used = 0; + } +} + +/** + * mpq_dmx_write_pts_dts() - Write out the saved PTS/DTS data and mark as used + * + * @feed_data: Video feed structure where PTS/DTS was saved + * @info: PTS/DTS structure to write to + */ +static inline void mpq_dmx_write_pts_dts(struct mpq_video_feed_info *feed_data, + struct dmx_pts_dts_info *info) +{ + if (!feed_data->saved_info_used) { + info->pts_exist = feed_data->saved_pts_dts_info.pts_exist; + info->pts = feed_data->saved_pts_dts_info.pts; + info->dts_exist = feed_data->saved_pts_dts_info.dts_exist; + info->dts = feed_data->saved_pts_dts_info.dts; + + feed_data->saved_info_used = 1; + } else { + info->pts_exist = 0; + info->dts_exist = 0; + } +} + +/* + * mpq_dmx_calc_time_delta - + * Calculate delta in msec between two time snapshots. + * + * @curr_time: value of current time + * @prev_time: value of previous time + * + * Return time-delta in msec + */ +static inline u32 mpq_dmx_calc_time_delta(ktime_t curr_time, ktime_t prev_time) +{ + s64 delta_time_ms = ktime_ms_delta(curr_time, prev_time); + + return (u32)delta_time_ms; +} + +void mpq_dmx_update_decoder_stat(struct mpq_feed *mpq_feed); + +/* Return the common module parameter tsif_mode */ +int mpq_dmx_get_param_tsif_mode(void); + +/* Return the common module parameter clock_inv */ +int mpq_dmx_get_param_clock_inv(void); + +/* Return the common module parameter mpq_sdmx_scramble_odd */ +int mpq_dmx_get_param_scramble_odd(void); + +/* Return the common module parameter mpq_sdmx_scramble_even */ +int mpq_dmx_get_param_scramble_even(void); + +/* Return the common module parameter mpq_sdmx_scramble_default_discard */ +int mpq_dmx_get_param_scramble_default_discard(void); + +/* APIs for Audio stream buffers interface -- Added for broadcase use case */ +/* + * The Audio/Video drivers (or consumers) require the stream_buffer information + * for consuming packet headers and compressed AV data from the + * ring buffer filled by demux driver which is the producer + */ +struct mpq_streambuffer *consumer_audio_streambuffer(int dmx_ts_pes_audio); +struct mpq_streambuffer *consumer_video_streambuffer(int dmx_ts_pes_video); + +int mpq_dmx_init_audio_feed(struct mpq_feed *mpq_feed); + +int mpq_dmx_terminate_audio_feed(struct mpq_feed *mpq_feed); + +int mpq_dmx_parse_remaining_audio_pes_header( + struct dvb_demux_feed *feed, + struct mpq_audio_feed_info *feed_data, + struct pes_packet_header *pes_header, + const u8 *buf, + u32 *ts_payload_offset, + int *bytes_avail); + +static inline void mpq_dmx_save_audio_pts_dts( + struct mpq_audio_feed_info *feed_data) +{ + if (feed_data->new_info_exists) { + feed_data->saved_pts_dts_info.pts_exist = + feed_data->new_pts_dts_info.pts_exist; + feed_data->saved_pts_dts_info.pts = + feed_data->new_pts_dts_info.pts; + feed_data->saved_pts_dts_info.dts_exist = + feed_data->new_pts_dts_info.dts_exist; + feed_data->saved_pts_dts_info.dts = + feed_data->new_pts_dts_info.dts; + + feed_data->new_info_exists = 0; + feed_data->saved_info_used = 0; + } +} + +/* + * mpq_dmx_process_audio_packet - Assemble Audio PES data and output to + * stream buffer connected to decoder. + */ +int mpq_dmx_process_audio_packet(struct dvb_demux_feed *feed, const u8 *buf); + +static inline void mpq_dmx_write_audio_pts_dts( + struct mpq_audio_feed_info *feed_data, + struct dmx_pts_dts_info *info) +{ + if (!feed_data->saved_info_used) { + info->pts_exist = feed_data->saved_pts_dts_info.pts_exist; + info->pts = feed_data->saved_pts_dts_info.pts; + info->dts_exist = feed_data->saved_pts_dts_info.dts_exist; + info->dts = feed_data->saved_pts_dts_info.dts; + + feed_data->saved_info_used = 1; + } else { + info->pts_exist = 0; + info->dts_exist = 0; + } +} + +#endif /* _MPQ_DMX_PLUGIN_COMMON_H */ diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c new file mode 100644 index 000000000000..16e1ba4cea36 --- /dev/null +++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c @@ -0,0 +1,280 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include "mpq_dvb_debug.h" +#include "mpq_dmx_plugin_common.h" + + +static int mpq_sw_dmx_start_filtering(struct dvb_demux_feed *feed) +{ + int ret = -EINVAL; + struct mpq_demux *mpq_demux = feed->demux->priv; + + MPQ_DVB_DBG_PRINT("%s(pid=%d) executed\n", __func__, feed->pid); + + if (mpq_demux == NULL) { + MPQ_DVB_ERR_PRINT("%s: invalid mpq_demux handle\n", __func__); + goto out; + } + + if (mpq_demux->source < DMX_SOURCE_DVR0) { + MPQ_DVB_ERR_PRINT("%s: only DVR source is supported (%d)\n", + __func__, mpq_demux->source); + goto out; + } + + /* + * Always feed sections/PES starting from a new one and + * do not partial transfer data from older one + */ + feed->pusi_seen = 0; + + ret = mpq_dmx_init_mpq_feed(feed); + if (ret) + MPQ_DVB_ERR_PRINT("%s: mpq_dmx_init_mpq_feed failed(%d)\n", + __func__, ret); +out: + return ret; +} + +static int mpq_sw_dmx_stop_filtering(struct dvb_demux_feed *feed) +{ + int ret; + + MPQ_DVB_DBG_PRINT("%s(%d) executed\n", __func__, feed->pid); + + ret = mpq_dmx_terminate_feed(feed); + if (ret) + MPQ_DVB_ERR_PRINT("%s: mpq_dmx_terminate_feed failed(%d)\n", + __func__, ret); + + return ret; +} + +static int mpq_sw_dmx_write_to_decoder(struct dvb_demux_feed *feed, + const u8 *buf, size_t len) +{ + /* + * It is assumed that this function is called once for each + * TS packet of the relevant feed. + */ + if (len > (TIMESTAMP_LEN + TS_PACKET_SIZE)) + MPQ_DVB_DBG_PRINT( + "%s: warnning - len larger than one packet\n", + __func__); + + if (dvb_dmx_is_video_feed(feed)) + return mpq_dmx_process_video_packet(feed, buf); + + if (dvb_dmx_is_pcr_feed(feed)) + return mpq_dmx_process_pcr_packet(feed, buf); + + return 0; +} + +static int mpq_sw_dmx_set_source(struct dmx_demux *demux, + const dmx_source_t *src) +{ + int ret = -EINVAL; + + if (demux == NULL || demux->priv == NULL || src == NULL) { + MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__); + goto out; + } + + if (*src >= DMX_SOURCE_DVR0 && *src <= DMX_SOURCE_DVR3) { + ret = mpq_dmx_set_source(demux, src); + if (ret) + MPQ_DVB_ERR_PRINT( + "%s: mpq_dmx_set_source(%d) failed, ret=%d\n", + __func__, *src, ret); + } else { + MPQ_DVB_ERR_PRINT("%s: not a DVR source\n", __func__); + } + +out: + return ret; +} + +static int mpq_sw_dmx_get_caps(struct dmx_demux *demux, struct dmx_caps *caps) +{ + struct dvb_demux *dvb_demux = demux->priv; + + if (dvb_demux == NULL || caps == NULL) { + MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__); + return -EINVAL; + } + + caps->caps = DMX_CAP_PULL_MODE | DMX_CAP_VIDEO_DECODER_DATA | + DMX_CAP_TS_INSERTION | DMX_CAP_VIDEO_INDEXING | + DMX_CAP_AUTO_BUFFER_FLUSH; + caps->recording_max_video_pids_indexed = 0; + caps->num_decoders = MPQ_ADAPTER_MAX_NUM_OF_INTERFACES; + caps->num_demux_devices = CONFIG_DVB_MPQ_NUM_DMX_DEVICES; + caps->num_pid_filters = MPQ_MAX_DMX_FILES; + caps->num_section_filters = dvb_demux->filternum; + caps->num_section_filters_per_pid = dvb_demux->filternum; + caps->section_filter_length = DMX_FILTER_SIZE; + caps->num_demod_inputs = 0; + caps->num_memory_inputs = CONFIG_DVB_MPQ_NUM_DMX_DEVICES; + caps->max_bitrate = 192; + caps->demod_input_max_bitrate = 96; + caps->memory_input_max_bitrate = 96; + caps->num_cipher_ops = 1; + + /* No STC support */ + caps->max_stc = 0; + + /* Buffer requirements */ + caps->section.flags = + DMX_BUFFER_EXTERNAL_SUPPORT | + DMX_BUFFER_INTERNAL_SUPPORT | + DMX_BUFFER_CACHED; + caps->section.max_buffer_num = 1; + caps->section.max_size = 0xFFFFFFFF; + caps->section.size_alignment = 0; + caps->pes.flags = + DMX_BUFFER_EXTERNAL_SUPPORT | + DMX_BUFFER_INTERNAL_SUPPORT | + DMX_BUFFER_CACHED; + caps->pes.max_buffer_num = 1; + caps->pes.max_size = 0xFFFFFFFF; + caps->pes.size_alignment = 0; + caps->recording_188_tsp.flags = + DMX_BUFFER_EXTERNAL_SUPPORT | + DMX_BUFFER_INTERNAL_SUPPORT | + DMX_BUFFER_CACHED; + caps->recording_188_tsp.max_buffer_num = 1; + caps->recording_188_tsp.max_size = 0xFFFFFFFF; + caps->recording_188_tsp.size_alignment = 0; + caps->recording_192_tsp.flags = + DMX_BUFFER_EXTERNAL_SUPPORT | + DMX_BUFFER_INTERNAL_SUPPORT | + DMX_BUFFER_CACHED; + caps->recording_192_tsp.max_buffer_num = 1; + caps->recording_192_tsp.max_size = 0xFFFFFFFF; + caps->recording_192_tsp.size_alignment = 0; + caps->playback_188_tsp.flags = + DMX_BUFFER_EXTERNAL_SUPPORT | + DMX_BUFFER_INTERNAL_SUPPORT | + DMX_BUFFER_CACHED; + caps->playback_188_tsp.max_buffer_num = 1; + caps->playback_188_tsp.max_size = 0xFFFFFFFF; + caps->playback_188_tsp.size_alignment = 188; + caps->playback_192_tsp.flags = + DMX_BUFFER_EXTERNAL_SUPPORT | + DMX_BUFFER_INTERNAL_SUPPORT | + DMX_BUFFER_CACHED; + caps->playback_192_tsp.max_buffer_num = 1; + caps->playback_192_tsp.max_size = 0xFFFFFFFF; + caps->playback_192_tsp.size_alignment = 192; + caps->decoder.flags = + DMX_BUFFER_SECURED_IF_DECRYPTED | + DMX_BUFFER_EXTERNAL_SUPPORT | + DMX_BUFFER_INTERNAL_SUPPORT | + DMX_BUFFER_LINEAR_GROUP_SUPPORT | + DMX_BUFFER_CACHED; + caps->decoder.max_buffer_num = DMX_MAX_DECODER_BUFFER_NUM; + caps->decoder.max_size = 0xFFFFFFFF; + caps->decoder.size_alignment = SZ_4K; + + return 0; +} + +static int mpq_sw_dmx_init(struct dvb_adapter *mpq_adapter, + struct mpq_demux *mpq_demux) +{ + int ret; + struct dvb_demux *dvb_demux = &mpq_demux->demux; + + /* Set the kernel-demux object capabilities */ + mpq_demux->demux.dmx.capabilities = + DMX_TS_FILTERING | + DMX_PES_FILTERING | + DMX_SECTION_FILTERING | + DMX_MEMORY_BASED_FILTERING | + DMX_CRC_CHECKING | + DMX_TS_DESCRAMBLING; + + mpq_demux->decoder_alloc_flags = ION_FLAG_CACHED; + + /* Set dvb-demux "virtual" function pointers */ + dvb_demux->priv = (void *)mpq_demux; + dvb_demux->filternum = MPQ_MAX_DMX_FILES; + dvb_demux->feednum = MPQ_MAX_DMX_FILES; + dvb_demux->start_feed = mpq_sw_dmx_start_filtering; + dvb_demux->stop_feed = mpq_sw_dmx_stop_filtering; + dvb_demux->write_to_decoder = mpq_sw_dmx_write_to_decoder; + dvb_demux->decoder_fullness_init = mpq_dmx_decoder_fullness_init; + dvb_demux->decoder_fullness_wait = mpq_dmx_decoder_fullness_wait; + dvb_demux->decoder_fullness_abort = mpq_dmx_decoder_fullness_abort; + dvb_demux->decoder_buffer_status = mpq_dmx_decoder_buffer_status; + dvb_demux->reuse_decoder_buffer = mpq_dmx_reuse_decoder_buffer; + dvb_demux->set_cipher_op = mpq_dmx_set_cipher_ops; + dvb_demux->oob_command = mpq_dmx_oob_command; + dvb_demux->convert_ts = mpq_dmx_convert_tts; + dvb_demux->flush_decoder_buffer = NULL; + + /* Initialize dvb_demux object */ + ret = dvb_dmx_init(dvb_demux); + if (ret) { + MPQ_DVB_ERR_PRINT("%s: dvb_dmx_init failed, ret=%d\n", + __func__, ret); + goto init_failed; + } + + /* Now initialize the dmx-dev object */ + mpq_demux->dmxdev.filternum = MPQ_MAX_DMX_FILES; + mpq_demux->dmxdev.demux = &mpq_demux->demux.dmx; + mpq_demux->dmxdev.capabilities = DMXDEV_CAP_DUPLEX; + + mpq_demux->dmxdev.demux->set_source = mpq_sw_dmx_set_source; + mpq_demux->dmxdev.demux->get_stc = NULL; + mpq_demux->dmxdev.demux->get_caps = mpq_sw_dmx_get_caps; + mpq_demux->dmxdev.demux->map_buffer = mpq_dmx_map_buffer; + mpq_demux->dmxdev.demux->unmap_buffer = mpq_dmx_unmap_buffer; + mpq_demux->dmxdev.demux->write = mpq_dmx_write; + ret = dvb_dmxdev_init(&mpq_demux->dmxdev, mpq_adapter); + if (ret) { + MPQ_DVB_ERR_PRINT("%s: dvb_dmxdev_init failed, ret=%d\n", + __func__, ret); + goto init_failed_dmx_release; + } + + /* Extend dvb-demux debugfs with mpq demux statistics. */ + mpq_dmx_init_debugfs_entries(mpq_demux); + + return 0; + +init_failed_dmx_release: + dvb_dmx_release(dvb_demux); +init_failed: + return ret; +} + +static int __init mpq_dmx_sw_plugin_init(void) +{ + return mpq_dmx_plugin_init(mpq_sw_dmx_init); +} + +static void __exit mpq_dmx_sw_plugin_exit(void) +{ + mpq_dmx_plugin_exit(); +} + + +module_init(mpq_dmx_sw_plugin_init); +module_exit(mpq_dmx_sw_plugin_exit); + +MODULE_DESCRIPTION("Qualcomm Technologies Inc. demux software plugin"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c new file mode 100644 index 000000000000..da7eccec14af --- /dev/null +++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c @@ -0,0 +1,1984 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include "mpq_dvb_debug.h" +#include "mpq_dmx_plugin_common.h" + +#define TSIF_COUNT 2 + +/* Max number of PID filters */ +#define TSPP_MAX_PID_FILTER_NUM 128 + +/* Max number of user-defined HW PID filters */ +#define TSPP_MAX_HW_PID_FILTER_NUM 15 + +/* HW index of the last entry in the TSPP HW filter table */ +#define TSPP_LAST_HW_FILTER_INDEX 15 + +/* Number of filters required to accept all packets except NULL packets */ +#define TSPP_BLOCK_NULLS_FILTERS_NUM 13 + +/* Max number of section filters */ +#define TSPP_MAX_SECTION_FILTER_NUM 128 + +/* For each TSIF we use a single pipe holding the data after PID filtering */ +#define TSPP_CHANNEL 0 + +/* the channel_id set to TSPP driver based on TSIF number and channel type */ +#define TSPP_CHANNEL_ID(tsif, ch) ((tsif << 1) + ch) +#define TSPP_GET_TSIF_NUM(ch_id) (ch_id >> 1) + +/* mask that set to care for all bits in pid filter */ +#define TSPP_PID_MASK 0x1FFF + +/* dvb-demux defines pid 0x2000 as full capture pid */ +#define TSPP_PASS_THROUGH_PID 0x2000 + +/* NULL packets pid */ +#define TSPP_NULL_PACKETS_PID 0x1FFF + +#define TSPP_RAW_TTS_SIZE 192 +#define TSPP_RAW_SIZE 188 + +#define MAX_BAM_DESCRIPTOR_SIZE (32 * 1024 - 1) + +#define MAX_BAM_DESCRIPTOR_COUNT (8 * 1024 - 2) + +#define TSPP_BUFFER_SIZE (500 * 1024) /* 500KB */ + +#define TSPP_DEFAULT_DESCRIPTOR_SIZE (TSPP_RAW_TTS_SIZE) + +#define TSPP_BUFFER_COUNT(buffer_size) \ + ((buffer_size) / tspp_desc_size) + +/* When TSPP notifies demux that new packets are received. + * Using max descriptor size (170 packets). + * Assuming 20MBit/sec stream, with 170 packets + * per descriptor there would be about 82 descriptors, + * Meaning about 82 notifications per second. + */ +#define TSPP_NOTIFICATION_SIZE(desc_size) \ + (MAX_BAM_DESCRIPTOR_SIZE / (desc_size)) + +/* Channel timeout in msec */ +#define TSPP_CHANNEL_TIMEOUT 100 + +enum mem_buffer_allocation_mode { + MPQ_DMX_TSPP_INTERNAL_ALLOC = 0, + MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC = 1 +}; + +/* module parameters for load time configuration */ +static int allocation_mode = MPQ_DMX_TSPP_INTERNAL_ALLOC; +static int tspp_out_buffer_size = TSPP_BUFFER_SIZE; +static int tspp_desc_size = TSPP_DEFAULT_DESCRIPTOR_SIZE; +static int tspp_notification_size = + TSPP_NOTIFICATION_SIZE(TSPP_DEFAULT_DESCRIPTOR_SIZE); +static int tspp_channel_timeout = TSPP_CHANNEL_TIMEOUT; +static int tspp_out_ion_heap = ION_QSECOM_HEAP_ID; + +module_param(allocation_mode, int, 0644); +module_param(tspp_out_buffer_size, int, 0644); +module_param(tspp_desc_size, int, 0644); +module_param(tspp_notification_size, int, 0644); +module_param(tspp_channel_timeout, int, 0644); +module_param(tspp_out_ion_heap, int, 0644); + +/* The following structure hold singleton information + * required for dmx implementation on top of TSPP. + */ +static struct +{ + /* Information for each TSIF input processing */ + struct { + /* + * TSPP pipe holding all TS packets after PID filtering. + * The following is reference count for number of feeds + * allocated on that pipe. + */ + int channel_ref; + + /* Counter for data notifications on the pipe */ + atomic_t data_cnt; + + /* flag to indicate control operation is in progress */ + atomic_t control_op; + + /* ION handle used for TSPP data buffer allocation */ + struct ion_handle *ch_mem_heap_handle; + + /* TSPP data buffer heap virtual base address */ + void *ch_mem_heap_virt_base; + + /* TSPP data buffer heap physical base address */ + ion_phys_addr_t ch_mem_heap_phys_base; + + /* Buffer allocation index */ + int buff_index; + + /* Number of buffers */ + u32 buffer_count; + + /* + * Array holding the IDs of the TSPP buffer descriptors in the + * current aggregate, in order to release these descriptors at + * the end of processing. + */ + int *aggregate_ids; + + /* + * Holds PIDs of allocated filters along with + * how many feeds are opened on the same PID. For + * TSPP HW filters, holds also the filter table index. + * When pid == -1, the entry is free. + */ + struct { + int pid; + int ref_count; + int hw_index; + } filters[TSPP_MAX_PID_FILTER_NUM]; + + /* Indicates available/allocated filter table indexes */ + int hw_indexes[TSPP_MAX_HW_PID_FILTER_NUM]; + + /* Number of currently allocated PID filters */ + u16 current_filter_count; + + /* + * Flag to indicate whether the user added a filter to accept + * NULL packets (PID = 0x1FFF) + */ + int pass_nulls_flag; + + /* + * Flag to indicate whether the user added a filter to accept + * all packets (PID = 0x2000) + */ + int pass_all_flag; + + /* + * Flag to indicate whether the filter that accepts + * all packets has already been added and is + * currently enabled + */ + int accept_all_filter_exists_flag; + + /* Thread processing TS packets from TSPP */ + struct task_struct *thread; + wait_queue_head_t wait_queue; + + /* TSIF alias */ + char name[TSIF_NAME_LENGTH]; + + /* Pointer to the demux connected to this TSIF */ + struct mpq_demux *mpq_demux; + + /* Mutex protecting the data-structure */ + struct mutex mutex; + } tsif[TSIF_COUNT]; + + /* ION client used for TSPP data buffer allocation */ + struct ion_client *ion_client; +} mpq_dmx_tspp_info; + +static void *tspp_mem_allocator(int channel_id, u32 size, + phys_addr_t *phys_base, void *user) +{ + void *virt_addr = NULL; + int i = TSPP_GET_TSIF_NUM(channel_id); + + if (mpq_dmx_tspp_info.tsif[i].buff_index == + mpq_dmx_tspp_info.tsif[i].buffer_count) + return NULL; + + virt_addr = + (mpq_dmx_tspp_info.tsif[i].ch_mem_heap_virt_base + + (mpq_dmx_tspp_info.tsif[i].buff_index * size)); + + *phys_base = + (mpq_dmx_tspp_info.tsif[i].ch_mem_heap_phys_base + + (mpq_dmx_tspp_info.tsif[i].buff_index * size)); + + mpq_dmx_tspp_info.tsif[i].buff_index++; + + return virt_addr; +} + +static void tspp_mem_free(int channel_id, u32 size, + void *virt_base, phys_addr_t phys_base, void *user) +{ + int i = TSPP_GET_TSIF_NUM(channel_id); + + /* + * actual buffer heap free is done in mpq_dmx_tspp_plugin_exit(). + * we update index here, so if this function is called repetitively + * for all the buffers, then afterwards tspp_mem_allocator() + * can be called again. + * Note: it would be incorrect to call tspp_mem_allocator() + * a few times, then call tspp_mem_free(), then call + * tspp_mem_allocator() again. + */ + if (mpq_dmx_tspp_info.tsif[i].buff_index > 0) + mpq_dmx_tspp_info.tsif[i].buff_index--; +} + +/** + * Returns a free HW filter index that can be used. + * + * @tsif: The TSIF to allocate filter from + * + * Return HW filter index or -ENOMEM if no filters available + */ +static int mpq_tspp_allocate_hw_filter_index(int tsif) +{ + int i; + + for (i = 0; i < TSPP_MAX_HW_PID_FILTER_NUM; i++) { + if (mpq_dmx_tspp_info.tsif[tsif].hw_indexes[i] == 0) { + mpq_dmx_tspp_info.tsif[tsif].hw_indexes[i] = 1; + return i; + } + } + + return -ENOMEM; +} + +/** + * Releases a HW filter index for future reuse. + * + * @tsif: The TSIF from which the filter should be released + * @hw_index: The HW index to release + * + */ +static inline void mpq_tspp_release_hw_filter_index(int tsif, int hw_index) +{ + if ((hw_index >= 0) && (hw_index < TSPP_MAX_HW_PID_FILTER_NUM)) + mpq_dmx_tspp_info.tsif[tsif].hw_indexes[hw_index] = 0; +} + + +/** + * Returns a free filter slot that can be used. + * + * @tsif: The TSIF to allocate filter from + * + * Return filter index or -ENOMEM if no filters available + */ +static int mpq_tspp_get_free_filter_slot(int tsif) +{ + int slot; + + for (slot = 0; slot < TSPP_MAX_PID_FILTER_NUM; slot++) + if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == -1) + return slot; + + return -ENOMEM; +} + +/** + * Returns filter index of specific pid. + * + * @tsif: The TSIF to which the pid is allocated + * @pid: The pid to search for + * + * Return filter index or -1 if no filter available + */ +static int mpq_tspp_get_filter_slot(int tsif, int pid) +{ + int slot; + + for (slot = 0; slot < TSPP_MAX_PID_FILTER_NUM; slot++) + if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == pid) + return slot; + + return -EINVAL; +} + +/** + * mpq_dmx_tspp_swfilter_desc - helper function + * + * Takes a tspp buffer descriptor and send it to the SW filter for demuxing, + * one TS packet at a time. + * + * @mpq_demux - mpq demux object + * @tspp_data_desc - tspp buffer descriptor + */ +static inline void mpq_dmx_tspp_swfilter_desc(struct mpq_demux *mpq_demux, + const struct tspp_data_descriptor *tspp_data_desc) +{ + u32 notif_size; + int i; + + notif_size = tspp_data_desc->size / TSPP_RAW_TTS_SIZE; + for (i = 0; i < notif_size; i++) + dvb_dmx_swfilter_packet(&mpq_demux->demux, + ((u8 *)tspp_data_desc->virt_base) + + i * TSPP_RAW_TTS_SIZE, + ((u8 *)tspp_data_desc->virt_base) + + i * TSPP_RAW_TTS_SIZE + TSPP_RAW_SIZE); +} + +/** + * Demux TS packets from TSPP by secure-demux. + * The function assumes the buffer is physically contiguous + * and that TSPP descriptors are continuous in memory. + * + * @tsif: The TSIF interface to process its packets + * @channel_id: the TSPP output pipe with the TS packets + */ +static void mpq_dmx_tspp_aggregated_process(int tsif, int channel_id) +{ + const struct tspp_data_descriptor *tspp_data_desc; + struct mpq_demux *mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux; + struct sdmx_buff_descr input; + size_t aggregate_len = 0; + size_t aggregate_count = 0; + phys_addr_t buff_start_addr_phys; + phys_addr_t buff_current_addr_phys = 0; + u32 notif_size; + int i; + + while ((tspp_data_desc = tspp_get_buffer(0, channel_id)) != NULL) { + if (aggregate_count == 0) + buff_current_addr_phys = tspp_data_desc->phys_base; + notif_size = tspp_data_desc->size / TSPP_RAW_TTS_SIZE; + mpq_dmx_tspp_info.tsif[tsif].aggregate_ids[aggregate_count] = + tspp_data_desc->id; + aggregate_len += tspp_data_desc->size; + aggregate_count++; + mpq_demux->hw_notification_size += notif_size; + + /* Let SW filter process only if it might be relevant */ + if (mpq_demux->num_active_feeds > mpq_demux->num_secure_feeds) + mpq_dmx_tspp_swfilter_desc(mpq_demux, tspp_data_desc); + + } + + if (!aggregate_count) + return; + + buff_start_addr_phys = + mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base; + + input.base_addr = (u64)buff_start_addr_phys; + input.size = mpq_dmx_tspp_info.tsif[tsif].buffer_count * tspp_desc_size; + + if (mpq_sdmx_is_loaded() && mpq_demux->sdmx_filter_count) { + MPQ_DVB_DBG_PRINT( + "%s: SDMX Processing %zu descriptors: %zu bytes at start address 0x%llx, read offset %d\n", + __func__, aggregate_count, aggregate_len, + input.base_addr, + (int)(buff_current_addr_phys - buff_start_addr_phys)); + + mpq_sdmx_process(mpq_demux, &input, aggregate_len, + buff_current_addr_phys - buff_start_addr_phys, + TSPP_RAW_TTS_SIZE); + } + + for (i = 0; i < aggregate_count; i++) + tspp_release_buffer(0, channel_id, + mpq_dmx_tspp_info.tsif[tsif].aggregate_ids[i]); +} + + +/** + * Demux thread function handling data from specific TSIF. + * + * @arg: TSIF number + */ +static int mpq_dmx_tspp_thread(void *arg) +{ + int tsif = (int)(uintptr_t)arg; + struct mpq_demux *mpq_demux; + const struct tspp_data_descriptor *tspp_data_desc; + atomic_t *data_cnt; + u32 notif_size; + int channel_id; + int ref_count; + int ret; + + do { + ret = wait_event_interruptible( + mpq_dmx_tspp_info.tsif[tsif].wait_queue, + (atomic_read(&mpq_dmx_tspp_info.tsif[tsif].data_cnt) && + !atomic_read(&mpq_dmx_tspp_info.tsif[tsif].control_op)) + || kthread_should_stop()); + + if ((ret < 0) || kthread_should_stop()) { + MPQ_DVB_ERR_PRINT("%s: exit\n", __func__); + break; + } + + /* Lock against the TSPP filters data-structure */ + if (mutex_lock_interruptible( + &mpq_dmx_tspp_info.tsif[tsif].mutex)) + return -ERESTARTSYS; + + channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL); + + ref_count = mpq_dmx_tspp_info.tsif[tsif].channel_ref; + data_cnt = &mpq_dmx_tspp_info.tsif[tsif].data_cnt; + + /* Make sure channel is still active */ + if (ref_count == 0) { + mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex); + continue; + } + + atomic_dec(data_cnt); + + mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux; + mpq_demux->hw_notification_size = 0; + + if (allocation_mode != MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC && + mpq_sdmx_is_loaded()) + pr_err_once( + "%s: TSPP Allocation mode does not support secure demux.\n", + __func__); + + if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC && + mpq_sdmx_is_loaded()) { + mpq_dmx_tspp_aggregated_process(tsif, channel_id); + } else { + /* + * Go through all filled descriptors + * and perform demuxing on them + */ + do { + if (atomic_read(&mpq_dmx_tspp_info.tsif[tsif]. + control_op)) { + /* restore for next iteration */ + atomic_inc(data_cnt); + break; + } + tspp_data_desc = tspp_get_buffer(0, channel_id); + if (!tspp_data_desc) + break; + + notif_size = tspp_data_desc->size / + TSPP_RAW_TTS_SIZE; + mpq_demux->hw_notification_size += notif_size; + + mpq_dmx_tspp_swfilter_desc(mpq_demux, + tspp_data_desc); + /* + * Notify TSPP that the buffer + * is no longer needed + */ + tspp_release_buffer(0, channel_id, + tspp_data_desc->id); + } while (1); + } + + if (mpq_demux->hw_notification_size && + (mpq_demux->hw_notification_size < + mpq_demux->hw_notification_min_size)) + mpq_demux->hw_notification_min_size = + mpq_demux->hw_notification_size; + + mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex); + } while (1); + + return 0; +} + +/** + * Callback function from TSPP when new data is ready. + * + * @channel_id: Channel with new TS packets + * @user: user-data holding TSIF number + */ +static void mpq_tspp_callback(int channel_id, void *user) +{ + int tsif = (int)(uintptr_t)user; + struct mpq_demux *mpq_demux; + + /* Save statistics on TSPP notifications */ + mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux; + mpq_dmx_update_hw_statistics(mpq_demux); + + atomic_inc(&mpq_dmx_tspp_info.tsif[tsif].data_cnt); + wake_up(&mpq_dmx_tspp_info.tsif[tsif].wait_queue); +} + +/** + * Free memory of channel output of specific TSIF. + * + * @tsif: The TSIF id to which memory should be freed. + */ +static void mpq_dmx_channel_mem_free(int tsif) +{ + MPQ_DVB_DBG_PRINT("%s(%d)\n", __func__, tsif); + + mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base = 0; + + if (!IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle)) { + if (!IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif]. + ch_mem_heap_virt_base)) + ion_unmap_kernel(mpq_dmx_tspp_info.ion_client, + mpq_dmx_tspp_info.tsif[tsif]. + ch_mem_heap_handle); + + ion_free(mpq_dmx_tspp_info.ion_client, + mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle); + } + + mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_virt_base = NULL; + mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle = NULL; +} + +/** + * Allocate memory for channel output of specific TSIF. + * + * @tsif: The TSIF id to which memory should be allocated. + * + * Return error status + */ +static int mpq_dmx_channel_mem_alloc(int tsif) +{ + int result; + size_t len; + + MPQ_DVB_DBG_PRINT("%s(%d)\n", __func__, tsif); + + mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle = + ion_alloc(mpq_dmx_tspp_info.ion_client, + (mpq_dmx_tspp_info.tsif[tsif].buffer_count * tspp_desc_size), + SZ_4K, + ION_HEAP(tspp_out_ion_heap), + 0); /* non-cached */ + + if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle)) { + MPQ_DVB_ERR_PRINT("%s: ion_alloc() failed\n", __func__); + mpq_dmx_channel_mem_free(tsif); + return -ENOMEM; + } + + /* save virtual base address of heap */ + mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_virt_base = + ion_map_kernel(mpq_dmx_tspp_info.ion_client, + mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle); + if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif]. + ch_mem_heap_virt_base)) { + MPQ_DVB_ERR_PRINT("%s: ion_map_kernel() failed\n", __func__); + mpq_dmx_channel_mem_free(tsif); + return -ENOMEM; + } + + /* save physical base address of heap */ + result = ion_phys(mpq_dmx_tspp_info.ion_client, + mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle, + &(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base), &len); + if (result < 0) { + MPQ_DVB_ERR_PRINT("%s: ion_phys() failed\n", __func__); + mpq_dmx_channel_mem_free(tsif); + return -ENOMEM; + } + + return 0; +} + +/** + * Add a filter to accept all packets as the last entry + * of the TSPP HW filter table. + * + * @channel_id: Channel ID number. + * @source: TSPP source. + * + * Return error status + */ +static int mpq_tspp_add_accept_all_filter(int channel_id, + enum tspp_source source) +{ + struct tspp_filter tspp_filter; + int tsif = TSPP_GET_TSIF_NUM(channel_id); + int ret; + + MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n", + __func__, channel_id, source); + + if (mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag) { + MPQ_DVB_DBG_PRINT("%s: accept all filter already exists\n", + __func__); + return 0; + } + + /* This filter will be the last entry in the table */ + tspp_filter.priority = TSPP_LAST_HW_FILTER_INDEX; + /* Pass all pids - set mask to 0 */ + tspp_filter.pid = 0; + tspp_filter.mask = 0; + /* + * Include TTS in RAW packets, if you change this to + * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE + * accordingly. + */ + tspp_filter.mode = TSPP_MODE_RAW; + tspp_filter.source = source; + tspp_filter.decrypt = 0; + + ret = tspp_add_filter(0, channel_id, &tspp_filter); + if (!ret) { + mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag = 1; + MPQ_DVB_DBG_PRINT( + "%s: accept all filter added successfully\n", + __func__); + } + + return ret; +} + +/** + * Remove the filter that accepts all packets from the last entry + * of the TSPP HW filter table. + * + * @channel_id: Channel ID number. + * @source: TSPP source. + * + * Return error status + */ +static int mpq_tspp_remove_accept_all_filter(int channel_id, + enum tspp_source source) +{ + struct tspp_filter tspp_filter; + int tsif = TSPP_GET_TSIF_NUM(channel_id); + int ret; + + MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n", + __func__, channel_id, source); + + if (mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag == 0) { + MPQ_DVB_DBG_PRINT("%s: accept all filter doesn't exist\n", + __func__); + return 0; + } + + tspp_filter.priority = TSPP_LAST_HW_FILTER_INDEX; + + ret = tspp_remove_filter(0, channel_id, &tspp_filter); + if (!ret) { + mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag = 0; + MPQ_DVB_DBG_PRINT( + "%s: accept all filter removed successfully\n", + __func__); + } + + return ret; +} + +/** + * Add filters designed to accept all packets except NULL packets, i.e. + * packets with PID = 0x1FFF. + * This function is called after user-defined filters were removed, + * so it assumes that the first 13 HW filters in the TSPP filter + * table are free for use. + * + * @channel_id: Channel ID number. + * @source: TSPP source. + * + * Return 0 on success, -1 otherwise + */ +static int mpq_tspp_add_null_blocking_filters(int channel_id, + enum tspp_source source) +{ + struct tspp_filter tspp_filter; + int ret = 0; + int i, j; + u16 full_pid_mask = 0x1FFF; + u8 mask_shift; + u8 pid_shift; + int tsif = TSPP_GET_TSIF_NUM(channel_id); + + MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n", + __func__, channel_id, source); + + /* + * Add a total of 13 filters that will accept packets with + * every PID other than 0x1FFF, which is the NULL PID. + * + * Filter 0: accept all PIDs with bit 12 clear, i.e. + * PID = 0x0000 .. 0x0FFF (4096 PIDs in total): + * Mask = 0x1000, PID = 0x0000. + * + * Filter 12: Accept PID 0x1FFE: + * Mask = 0x1FFF, PID = 0x1FFE. + * + * In general: For N = 0 .. 12, + * Filter : accept all PIDs with MSBits set and bit clear. + * Filter Mask = N+1 MSBits set, others clear. + * Filter PID = MSBits set, others clear. + */ + + /* + * Include TTS in RAW packets, if you change this to + * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE + * accordingly. + */ + tspp_filter.mode = TSPP_MODE_RAW; + tspp_filter.source = source; + tspp_filter.decrypt = 0; + + for (i = 0; i < TSPP_BLOCK_NULLS_FILTERS_NUM; i++) { + tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif); + if (tspp_filter.priority != i) { + MPQ_DVB_ERR_PRINT( + "%s: got unexpected HW index %d, expected %d\n", + __func__, tspp_filter.priority, i); + ret = -1; + break; + } + mask_shift = (TSPP_BLOCK_NULLS_FILTERS_NUM - 1 - i); + pid_shift = (TSPP_BLOCK_NULLS_FILTERS_NUM - i); + tspp_filter.mask = + ((full_pid_mask >> mask_shift) << mask_shift); + tspp_filter.pid = ((full_pid_mask >> pid_shift) << pid_shift); + + if (tspp_add_filter(0, channel_id, &tspp_filter)) { + ret = -1; + break; + } + } + + if (ret) { + /* cleanup on failure */ + for (j = 0; j < i; j++) { + tspp_filter.priority = j; + mpq_tspp_release_hw_filter_index(tsif, j); + tspp_remove_filter(0, channel_id, &tspp_filter); + } + } else { + MPQ_DVB_DBG_PRINT( + "%s: NULL blocking filters added successfully\n", + __func__); + } + + return ret; +} + +/** + * Remove filters designed to accept all packets except NULL packets, i.e. + * packets with PID = 0x1FFF. + * + * @channel_id: Channel ID number. + * + * @source: TSPP source. + * + * Return 0 on success, -1 otherwise + */ +static int mpq_tspp_remove_null_blocking_filters(int channel_id, + enum tspp_source source) +{ + struct tspp_filter tspp_filter; + int tsif = TSPP_GET_TSIF_NUM(channel_id); + int ret = 0; + int i; + + MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n", + __func__, channel_id, source); + + for (i = 0; i < TSPP_BLOCK_NULLS_FILTERS_NUM; i++) { + tspp_filter.priority = i; + if (tspp_remove_filter(0, channel_id, &tspp_filter)) { + MPQ_DVB_ERR_PRINT("%s: failed to remove filter %d\n", + __func__, i); + ret = -1; + } + + mpq_tspp_release_hw_filter_index(tsif, i); + } + + return ret; +} + +/** + * Add all current user-defined filters (up to 15) as HW filters + * + * @channel_id: Channel ID number. + * + * @source: TSPP source. + * + * Return 0 on success, -1 otherwise + */ +static int mpq_tspp_add_all_user_filters(int channel_id, + enum tspp_source source) +{ + struct tspp_filter tspp_filter; + int tsif = TSPP_GET_TSIF_NUM(channel_id); + int slot; + u16 added_count = 0; + u16 total_filters_count = 0; + + MPQ_DVB_DBG_PRINT("%s: executed\n", __func__); + + /* + * Include TTS in RAW packets, if you change this to + * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE + * accordingly. + */ + tspp_filter.mode = TSPP_MODE_RAW; + tspp_filter.source = source; + tspp_filter.decrypt = 0; + + for (slot = 0; slot < TSPP_MAX_PID_FILTER_NUM; slot++) { + if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == -1) + continue; + + /* + * count total number of user filters to verify that it is + * exactly TSPP_MAX_HW_PID_FILTER_NUM as expected. + */ + total_filters_count++; + + if (added_count > TSPP_MAX_HW_PID_FILTER_NUM) + continue; + + tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif); + + if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == + TSPP_PASS_THROUGH_PID) { + /* pass all pids */ + tspp_filter.pid = 0; + tspp_filter.mask = 0; + } else { + tspp_filter.pid = + mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid; + tspp_filter.mask = TSPP_PID_MASK; + } + + MPQ_DVB_DBG_PRINT( + "%s: adding HW filter, PID = %d, mask = 0x%X, index = %d\n", + __func__, tspp_filter.pid, tspp_filter.mask, + tspp_filter.priority); + + if (!tspp_add_filter(0, channel_id, &tspp_filter)) { + mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index = + tspp_filter.priority; + added_count++; + } else { + MPQ_DVB_ERR_PRINT("%s: tspp_add_filter failed\n", + __func__); + } + } + + if ((added_count != TSPP_MAX_HW_PID_FILTER_NUM) || + (added_count != total_filters_count)) + return -EINVAL; + + return 0; +} + +/** + * Remove all user-defined HW filters + * + * @channel_id: Channel ID number. + * + * @source: TSPP source. + * + * Return 0 on success, -1 otherwise + */ +static int mpq_tspp_remove_all_user_filters(int channel_id, + enum tspp_source source) +{ + struct tspp_filter tspp_filter; + int ret = 0; + int tsif = TSPP_GET_TSIF_NUM(channel_id); + int i; + + MPQ_DVB_DBG_PRINT("%s: executed\n", __func__); + + for (i = 0; i < TSPP_MAX_HW_PID_FILTER_NUM; i++) { + tspp_filter.priority = i; + MPQ_DVB_DBG_PRINT("%s: Removing HW filter %d\n", + __func__, tspp_filter.priority); + if (tspp_remove_filter(0, channel_id, &tspp_filter)) + ret = -1; + + mpq_tspp_release_hw_filter_index(tsif, i); + mpq_dmx_tspp_info.tsif[tsif].filters[i].hw_index = -1; + } + + return ret; +} + +/** + * Configure TSPP channel to filter the PID of new feed. + * + * @feed: The feed to configure the channel with + * + * Return error status + * + * The function checks if the new PID can be added to an already + * allocated channel, if not, a new channel is allocated and configured. + */ +static int mpq_tspp_dmx_add_channel(struct dvb_demux_feed *feed) +{ + struct mpq_demux *mpq_demux = feed->demux->priv; + struct tspp_select_source tspp_source; + struct tspp_filter tspp_filter; + int tsif; + int tsif_mode = mpq_dmx_get_param_tsif_mode(); + int ret = 0; + int slot; + int channel_id; + int *channel_ref_count; + u32 buffer_size; + int restore_user_filters = 0; + int remove_accept_all_filter = 0; + int remove_null_blocking_filters = 0; + size_t agg_size; + + tspp_source.clk_inverse = mpq_dmx_get_param_clock_inv(); + tspp_source.data_inverse = 0; + tspp_source.sync_inverse = 0; + tspp_source.enable_inverse = 0; + + MPQ_DVB_DBG_PRINT("%s: executed, PID = %d\n", __func__, feed->pid); + + switch (tsif_mode) { + case 1: + tspp_source.mode = TSPP_TSIF_MODE_1; + break; + case 2: + tspp_source.mode = TSPP_TSIF_MODE_2; + break; + default: + tspp_source.mode = TSPP_TSIF_MODE_LOOPBACK; + break; + } + + /* determine the TSIF we are reading from */ + if (mpq_demux->source == DMX_SOURCE_FRONT0) { + tsif = 0; + tspp_source.source = TSPP_SOURCE_TSIF0; + } else if (mpq_demux->source == DMX_SOURCE_FRONT1) { + tsif = 1; + tspp_source.source = TSPP_SOURCE_TSIF1; + } else { + /* invalid source */ + MPQ_DVB_ERR_PRINT( + "%s: invalid input source (%d)\n", + __func__, + mpq_demux->source); + + return -EINVAL; + } + + atomic_inc(&mpq_dmx_tspp_info.tsif[tsif].control_op); + if (mutex_lock_interruptible(&mpq_dmx_tspp_info.tsif[tsif].mutex)) { + atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op); + return -ERESTARTSYS; + } + + /* + * It is possible that this PID was already requested before. + * Can happen if we play and record same PES or PCR + * piggypacked on video packet. + */ + slot = mpq_tspp_get_filter_slot(tsif, feed->pid); + if (slot >= 0) { + /* PID already configured */ + mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++; + goto out; + } + + + channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL); + channel_ref_count = &mpq_dmx_tspp_info.tsif[tsif].channel_ref; + + /* + * Recalculate 'tspp_notification_size' and buffer count in case + * 'tspp_desc_size' or 'tspp_out_buffer_size' parameters have changed. + */ + buffer_size = tspp_desc_size; + tspp_notification_size = TSPP_NOTIFICATION_SIZE(tspp_desc_size); + mpq_dmx_tspp_info.tsif[tsif].buffer_count = + TSPP_BUFFER_COUNT(tspp_out_buffer_size); + if (mpq_dmx_tspp_info.tsif[tsif].buffer_count > + MAX_BAM_DESCRIPTOR_COUNT) + mpq_dmx_tspp_info.tsif[tsif].buffer_count = + MAX_BAM_DESCRIPTOR_COUNT; + + /* check if required TSPP pipe is already allocated or not */ + if (*channel_ref_count == 0) { + if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) { + agg_size = mpq_dmx_tspp_info.tsif[tsif].buffer_count * + sizeof(int); + mpq_dmx_tspp_info.tsif[tsif].aggregate_ids = + vzalloc(agg_size); + if (!mpq_dmx_tspp_info.tsif[tsif].aggregate_ids) { + MPQ_DVB_ERR_PRINT( + "%s: Failed to allocate memory for buffer descriptors aggregation\n", + __func__); + ret = -ENOMEM; + goto out; + } + + ret = mpq_dmx_channel_mem_alloc(tsif); + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_dmx_channel_mem_alloc(%d) failed (%d)\n", + __func__, + channel_id, + ret); + + goto add_channel_failed; + } + } + + ret = tspp_open_channel(0, channel_id); + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: tspp_open_channel(%d) failed (%d)\n", + __func__, + channel_id, + ret); + + goto add_channel_failed; + } + + /* set TSPP source */ + ret = tspp_open_stream(0, channel_id, &tspp_source); + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: tspp_select_source(%d,%d) failed (%d)\n", + __func__, + channel_id, + tspp_source.source, + ret); + + goto add_channel_close_ch; + } + + /* register notification on TS packets */ + tspp_register_notification(0, + channel_id, + mpq_tspp_callback, + (void *)(uintptr_t)tsif, + tspp_channel_timeout); + + /* + * Register allocator and provide allocation function + * that allocates from contiguous memory so that we can have + * big notification size, smallest descriptor, and still provide + * TZ with single big buffer based on notification size. + */ + if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) { + ret = tspp_allocate_buffers(0, channel_id, + mpq_dmx_tspp_info.tsif[tsif].buffer_count, + buffer_size, tspp_notification_size, + tspp_mem_allocator, tspp_mem_free, NULL); + } else { + ret = tspp_allocate_buffers(0, channel_id, + mpq_dmx_tspp_info.tsif[tsif].buffer_count, + buffer_size, tspp_notification_size, + NULL, NULL, NULL); + } + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: tspp_allocate_buffers(%d) failed (%d)\n", + __func__, + channel_id, + ret); + + goto add_channel_unregister_notif; + } + + mpq_dmx_tspp_info.tsif[tsif].mpq_demux = mpq_demux; + } + + /* add new PID to the existing pipe */ + slot = mpq_tspp_get_free_filter_slot(tsif); + if (slot < 0) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_tspp_get_free_filter_slot(%d) failed\n", + __func__, tsif); + + goto add_channel_unregister_notif; + } + + if (feed->pid == TSPP_PASS_THROUGH_PID) + mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 1; + else if (feed->pid == TSPP_NULL_PACKETS_PID) + mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 1; + + mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = feed->pid; + mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++; + + tspp_filter.priority = -1; + + if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count < + TSPP_MAX_HW_PID_FILTER_NUM) { + /* HW filtering mode */ + tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif); + if (tspp_filter.priority < 0) + goto add_channel_free_filter_slot; + + if (feed->pid == TSPP_PASS_THROUGH_PID) { + /* pass all pids */ + tspp_filter.pid = 0; + tspp_filter.mask = 0; + } else { + tspp_filter.pid = feed->pid; + tspp_filter.mask = TSPP_PID_MASK; + } + + /* + * Include TTS in RAW packets, if you change this to + * TSPP_MODE_RAW_NO_SUFFIX you must also change + * TSPP_RAW_TTS_SIZE accordingly. + */ + tspp_filter.mode = TSPP_MODE_RAW; + tspp_filter.source = tspp_source.source; + tspp_filter.decrypt = 0; + ret = tspp_add_filter(0, channel_id, &tspp_filter); + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: tspp_add_filter(%d) failed (%d)\n", + __func__, + channel_id, + ret); + + goto add_channel_free_filter_slot; + } + mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index = + tspp_filter.priority; + + MPQ_DVB_DBG_PRINT( + "%s: HW filtering mode: added TSPP HW filter, PID = %d, mask = 0x%X, index = %d\n", + __func__, tspp_filter.pid, tspp_filter.mask, + tspp_filter.priority); + } else if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count == + TSPP_MAX_HW_PID_FILTER_NUM) { + /* Crossing the threshold - from HW to SW filtering mode */ + + /* Add a temporary filter to accept all packets */ + ret = mpq_tspp_add_accept_all_filter(channel_id, + tspp_source.source); + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n", + __func__, channel_id, tspp_source.source); + + goto add_channel_free_filter_slot; + } + + /* Remove all existing user filters */ + ret = mpq_tspp_remove_all_user_filters(channel_id, + tspp_source.source); + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_tspp_remove_all_user_filters(%d, %d) failed\n", + __func__, channel_id, tspp_source.source); + + restore_user_filters = 1; + remove_accept_all_filter = 1; + + goto add_channel_free_filter_slot; + } + + /* Add HW filters to block NULL packets */ + ret = mpq_tspp_add_null_blocking_filters(channel_id, + tspp_source.source); + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_tspp_add_null_blocking_filters(%d, %d) failed\n", + __func__, channel_id, tspp_source.source); + + restore_user_filters = 1; + remove_accept_all_filter = 1; + + goto add_channel_free_filter_slot; + } + + /* Remove filters that accepts all packets, if necessary */ + if ((mpq_dmx_tspp_info.tsif[tsif].pass_all_flag == 0) && + (mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag == 0)) { + + ret = mpq_tspp_remove_accept_all_filter(channel_id, + tspp_source.source); + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n", + __func__, channel_id, + tspp_source.source); + + remove_null_blocking_filters = 1; + restore_user_filters = 1; + remove_accept_all_filter = 1; + + goto add_channel_free_filter_slot; + } + } + } else { + /* Already working in SW filtering mode */ + if (mpq_dmx_tspp_info.tsif[tsif].pass_all_flag || + mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag) { + + ret = mpq_tspp_add_accept_all_filter(channel_id, + tspp_source.source); + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n", + __func__, channel_id, + tspp_source.source); + + goto add_channel_free_filter_slot; + } + } + } + + (*channel_ref_count)++; + mpq_dmx_tspp_info.tsif[tsif].current_filter_count++; + + MPQ_DVB_DBG_PRINT("%s: success, current_filter_count = %d\n", + __func__, mpq_dmx_tspp_info.tsif[tsif].current_filter_count); + + goto out; + +add_channel_free_filter_slot: + /* restore internal database state */ + mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = -1; + mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count--; + + /* release HW index if we allocated one */ + if (tspp_filter.priority >= 0) { + mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index = -1; + mpq_tspp_release_hw_filter_index(tsif, tspp_filter.priority); + } + + /* restore HW filter table state if necessary */ + if (remove_null_blocking_filters) + mpq_tspp_remove_null_blocking_filters(channel_id, + tspp_source.source); + + if (restore_user_filters) + mpq_tspp_add_all_user_filters(channel_id, tspp_source.source); + + if (remove_accept_all_filter) + mpq_tspp_remove_accept_all_filter(channel_id, + tspp_source.source); + + /* restore flags. we can only get here if we changed the flags. */ + if (feed->pid == TSPP_PASS_THROUGH_PID) + mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 0; + else if (feed->pid == TSPP_NULL_PACKETS_PID) + mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 0; + +add_channel_unregister_notif: + if (*channel_ref_count == 0) { + tspp_unregister_notification(0, channel_id); + tspp_close_stream(0, channel_id); + } +add_channel_close_ch: + if (*channel_ref_count == 0) + tspp_close_channel(0, channel_id); +add_channel_failed: + if (*channel_ref_count == 0) + if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) { + vfree(mpq_dmx_tspp_info.tsif[tsif].aggregate_ids); + mpq_dmx_tspp_info.tsif[tsif].aggregate_ids = NULL; + mpq_dmx_channel_mem_free(tsif); + } + +out: + mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex); + atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op); + return ret; +} + +/** + * Removes filter from TSPP. + * + * @feed: The feed to remove + * + * Return error status + * + * The function checks if this is the only PID allocated within + * the channel, if so, the channel is closed as well. + */ +static int mpq_tspp_dmx_remove_channel(struct dvb_demux_feed *feed) +{ + int tsif; + int ret = 0; + int channel_id; + int slot; + atomic_t *data_cnt; + int *channel_ref_count; + enum tspp_source tspp_source; + struct tspp_filter tspp_filter; + struct mpq_demux *mpq_demux = feed->demux->priv; + int restore_null_blocking_filters = 0; + int remove_accept_all_filter = 0; + int remove_user_filters = 0; + int accept_all_filter_existed = 0; + + MPQ_DVB_DBG_PRINT("%s: executed, PID = %d\n", __func__, feed->pid); + + /* determine the TSIF we are reading from */ + if (mpq_demux->source == DMX_SOURCE_FRONT0) { + tsif = 0; + tspp_source = TSPP_SOURCE_TSIF0; + } else if (mpq_demux->source == DMX_SOURCE_FRONT1) { + tsif = 1; + tspp_source = TSPP_SOURCE_TSIF1; + } else { + /* invalid source */ + MPQ_DVB_ERR_PRINT( + "%s: invalid input source (%d)\n", + __func__, + mpq_demux->source); + + return -EINVAL; + } + + atomic_inc(&mpq_dmx_tspp_info.tsif[tsif].control_op); + if (mutex_lock_interruptible(&mpq_dmx_tspp_info.tsif[tsif].mutex)) { + atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op); + return -ERESTARTSYS; + } + + channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL); + channel_ref_count = &mpq_dmx_tspp_info.tsif[tsif].channel_ref; + data_cnt = &mpq_dmx_tspp_info.tsif[tsif].data_cnt; + + /* check if required TSPP pipe is already allocated or not */ + if (*channel_ref_count == 0) { + /* invalid feed provided as the channel is not allocated */ + MPQ_DVB_ERR_PRINT( + "%s: invalid feed (%d)\n", + __func__, + channel_id); + + ret = -EINVAL; + goto out; + } + + slot = mpq_tspp_get_filter_slot(tsif, feed->pid); + + if (slot < 0) { + /* invalid feed provided as it has no filter allocated */ + MPQ_DVB_ERR_PRINT( + "%s: mpq_tspp_get_filter_slot failed (%d,%d)\n", + __func__, + feed->pid, + tsif); + + ret = -EINVAL; + goto out; + } + + /* since filter was found, ref_count > 0 so it's ok to decrement it */ + mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count--; + + if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count) { + /* + * there are still references to this pid, do not + * remove the filter yet + */ + goto out; + } + + if (feed->pid == TSPP_PASS_THROUGH_PID) + mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 0; + else if (feed->pid == TSPP_NULL_PACKETS_PID) + mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 0; + + mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = -1; + + if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count <= + TSPP_MAX_HW_PID_FILTER_NUM) { + /* staying in HW filtering mode */ + tspp_filter.priority = + mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index; + ret = tspp_remove_filter(0, channel_id, &tspp_filter); + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: tspp_remove_filter failed (%d,%d)\n", + __func__, + channel_id, + tspp_filter.priority); + + goto remove_channel_failed_restore_count; + } + mpq_tspp_release_hw_filter_index(tsif, tspp_filter.priority); + mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index = -1; + + MPQ_DVB_DBG_PRINT( + "%s: HW filtering mode: Removed TSPP HW filter, PID = %d, index = %d\n", + __func__, feed->pid, tspp_filter.priority); + } else if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count == + (TSPP_MAX_HW_PID_FILTER_NUM + 1)) { + /* Crossing the threshold - from SW to HW filtering mode */ + + accept_all_filter_existed = + mpq_dmx_tspp_info.tsif[tsif]. + accept_all_filter_exists_flag; + + /* Add a temporary filter to accept all packets */ + ret = mpq_tspp_add_accept_all_filter(channel_id, + tspp_source); + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n", + __func__, channel_id, tspp_source); + + goto remove_channel_failed_restore_count; + } + + ret = mpq_tspp_remove_null_blocking_filters(channel_id, + tspp_source); + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_tspp_remove_null_blocking_filters(%d, %d) failed\n", + __func__, channel_id, tspp_source); + + restore_null_blocking_filters = 1; + if (!accept_all_filter_existed) + remove_accept_all_filter = 1; + + goto remove_channel_failed_restore_count; + } + + ret = mpq_tspp_add_all_user_filters(channel_id, + tspp_source); + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_tspp_add_all_user_filters(%d, %d) failed\n", + __func__, channel_id, tspp_source); + + remove_user_filters = 1; + restore_null_blocking_filters = 1; + if (!accept_all_filter_existed) + remove_accept_all_filter = 1; + + goto remove_channel_failed_restore_count; + } + + ret = mpq_tspp_remove_accept_all_filter(channel_id, + tspp_source); + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n", + __func__, channel_id, tspp_source); + + remove_user_filters = 1; + restore_null_blocking_filters = 1; + if (!accept_all_filter_existed) + remove_accept_all_filter = 1; + + goto remove_channel_failed_restore_count; + } + } else { + /* staying in SW filtering mode */ + if ((mpq_dmx_tspp_info.tsif[tsif].pass_all_flag == 0) && + (mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag == 0)) { + + ret = mpq_tspp_remove_accept_all_filter(channel_id, + tspp_source); + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n", + __func__, channel_id, + tspp_source); + + goto remove_channel_failed_restore_count; + } + } + } + + mpq_dmx_tspp_info.tsif[tsif].current_filter_count--; + (*channel_ref_count)--; + + MPQ_DVB_DBG_PRINT("%s: success, current_filter_count = %d\n", + __func__, mpq_dmx_tspp_info.tsif[tsif].current_filter_count); + + if (*channel_ref_count == 0) { + /* channel is not used any more, release it */ + tspp_unregister_notification(0, channel_id); + tspp_close_stream(0, channel_id); + tspp_close_channel(0, channel_id); + atomic_set(data_cnt, 0); + + if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) { + vfree(mpq_dmx_tspp_info.tsif[tsif].aggregate_ids); + mpq_dmx_tspp_info.tsif[tsif].aggregate_ids = NULL; + mpq_dmx_channel_mem_free(tsif); + } + } + + goto out; + +remove_channel_failed_restore_count: + /* restore internal database state */ + mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = feed->pid; + mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++; + + if (remove_user_filters) + mpq_tspp_remove_all_user_filters(channel_id, tspp_source); + + if (restore_null_blocking_filters) + mpq_tspp_add_null_blocking_filters(channel_id, tspp_source); + + if (remove_accept_all_filter) + mpq_tspp_remove_accept_all_filter(channel_id, tspp_source); + + /* restore flags. we can only get here if we changed the flags. */ + if (feed->pid == TSPP_PASS_THROUGH_PID) + mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 1; + else if (feed->pid == TSPP_NULL_PACKETS_PID) + mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 1; + +out: + mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex); + atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op); + return ret; +} + +static int mpq_tspp_dmx_start_filtering(struct dvb_demux_feed *feed) +{ + int ret; + struct mpq_demux *mpq_demux = feed->demux->priv; + + MPQ_DVB_DBG_PRINT( + "%s(pid=%d) executed\n", + __func__, + feed->pid); + + if (mpq_demux == NULL) { + MPQ_DVB_ERR_PRINT( + "%s: invalid mpq_demux handle\n", + __func__); + + return -EINVAL; + } + + if (mpq_demux->source < DMX_SOURCE_DVR0) { + /* source from TSPP, need to configure tspp pipe */ + ret = mpq_tspp_dmx_add_channel(feed); + + if (ret < 0) { + MPQ_DVB_DBG_PRINT( + "%s: mpq_tspp_dmx_add_channel failed(%d)\n", + __func__, + ret); + return ret; + } + } + + /* + * Always feed sections/PES starting from a new one and + * do not partial transfer data from older one + */ + feed->pusi_seen = 0; + + ret = mpq_dmx_init_mpq_feed(feed); + if (ret) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_dmx_init_mpq_feed failed(%d)\n", + __func__, + ret); + if (mpq_demux->source < DMX_SOURCE_DVR0) + mpq_tspp_dmx_remove_channel(feed); + + return ret; + } + + return 0; +} + +static int mpq_tspp_dmx_stop_filtering(struct dvb_demux_feed *feed) +{ + int ret = 0; + struct mpq_demux *mpq_demux = feed->demux->priv; + + MPQ_DVB_DBG_PRINT("%s(%d) executed\n", __func__, feed->pid); + + mpq_dmx_terminate_feed(feed); + + if (mpq_demux->source < DMX_SOURCE_DVR0) { + /* source from TSPP, need to configure tspp pipe */ + ret = mpq_tspp_dmx_remove_channel(feed); + } + + return ret; +} + +static int mpq_tspp_dmx_write_to_decoder( + struct dvb_demux_feed *feed, + const u8 *buf, + size_t len) +{ + /* + * It is assumed that this function is called once for each + * TS packet of the relevant feed. + */ + if (len > TSPP_RAW_TTS_SIZE) + MPQ_DVB_DBG_PRINT( + "%s: warnning - len larger than one packet\n", + __func__); + + if (dvb_dmx_is_video_feed(feed)) + return mpq_dmx_process_video_packet(feed, buf); + + if (dvb_dmx_is_audio_feed(feed)) + return mpq_dmx_process_audio_packet(feed, buf); + + if (dvb_dmx_is_pcr_feed(feed)) + return mpq_dmx_process_pcr_packet(feed, buf); + + return 0; +} + +/** + * Returns demux capabilities of TSPPv1 plugin + * + * @demux: demux device + * @caps: Returned capbabilities + * + * Return error code + */ +static int mpq_tspp_dmx_get_caps(struct dmx_demux *demux, + struct dmx_caps *caps) +{ + struct dvb_demux *dvb_demux = demux->priv; + + if ((dvb_demux == NULL) || (caps == NULL)) { + MPQ_DVB_ERR_PRINT( + "%s: invalid parameters\n", + __func__); + + return -EINVAL; + } + + caps->caps = DMX_CAP_PULL_MODE | DMX_CAP_VIDEO_DECODER_DATA | + DMX_CAP_TS_INSERTION | DMX_CAP_VIDEO_INDEXING | + DMX_CAP_AUDIO_DECODER_DATA | DMX_CAP_AUTO_BUFFER_FLUSH; + caps->recording_max_video_pids_indexed = 0; + caps->num_decoders = MPQ_ADAPTER_MAX_NUM_OF_INTERFACES; + caps->num_demux_devices = CONFIG_DVB_MPQ_NUM_DMX_DEVICES; + caps->num_pid_filters = TSPP_MAX_PID_FILTER_NUM; + caps->num_section_filters = dvb_demux->filternum; + caps->num_section_filters_per_pid = dvb_demux->filternum; + caps->section_filter_length = DMX_FILTER_SIZE; + caps->num_demod_inputs = TSIF_COUNT; + caps->num_memory_inputs = CONFIG_DVB_MPQ_NUM_DMX_DEVICES; + caps->max_bitrate = 192; + caps->demod_input_max_bitrate = 96; + caps->memory_input_max_bitrate = 96; + caps->num_cipher_ops = 1; + + /* TSIF reports 3 bytes STC at unit of 27MHz/256 */ + caps->max_stc = (u64)0xFFFFFF * 256; + + /* Buffer requirements */ + caps->section.flags = + DMX_BUFFER_EXTERNAL_SUPPORT | + DMX_BUFFER_INTERNAL_SUPPORT | + DMX_BUFFER_CACHED; + caps->section.max_buffer_num = 1; + caps->section.max_size = 0xFFFFFFFF; + caps->section.size_alignment = 0; + caps->pes.flags = + DMX_BUFFER_EXTERNAL_SUPPORT | + DMX_BUFFER_INTERNAL_SUPPORT | + DMX_BUFFER_CACHED; + caps->pes.max_buffer_num = 1; + caps->pes.max_size = 0xFFFFFFFF; + caps->pes.size_alignment = 0; + caps->recording_188_tsp.flags = + DMX_BUFFER_EXTERNAL_SUPPORT | + DMX_BUFFER_INTERNAL_SUPPORT | + DMX_BUFFER_CACHED; + caps->recording_188_tsp.max_buffer_num = 1; + caps->recording_188_tsp.max_size = 0xFFFFFFFF; + caps->recording_188_tsp.size_alignment = 0; + caps->recording_192_tsp.flags = + DMX_BUFFER_EXTERNAL_SUPPORT | + DMX_BUFFER_INTERNAL_SUPPORT | + DMX_BUFFER_CACHED; + caps->recording_192_tsp.max_buffer_num = 1; + caps->recording_192_tsp.max_size = 0xFFFFFFFF; + caps->recording_192_tsp.size_alignment = 0; + caps->playback_188_tsp.flags = + DMX_BUFFER_EXTERNAL_SUPPORT | + DMX_BUFFER_INTERNAL_SUPPORT | + DMX_BUFFER_CACHED; + caps->playback_188_tsp.max_buffer_num = 1; + caps->playback_188_tsp.max_size = 0xFFFFFFFF; + caps->playback_188_tsp.size_alignment = 188; + caps->playback_192_tsp.flags = + DMX_BUFFER_EXTERNAL_SUPPORT | + DMX_BUFFER_INTERNAL_SUPPORT | + DMX_BUFFER_CACHED; + caps->playback_192_tsp.max_buffer_num = 1; + caps->playback_192_tsp.max_size = 0xFFFFFFFF; + caps->playback_192_tsp.size_alignment = 192; + caps->decoder.flags = + DMX_BUFFER_SECURED_IF_DECRYPTED | + DMX_BUFFER_EXTERNAL_SUPPORT | + DMX_BUFFER_INTERNAL_SUPPORT | + DMX_BUFFER_LINEAR_GROUP_SUPPORT | + DMX_BUFFER_CACHED; + caps->decoder.max_buffer_num = DMX_MAX_DECODER_BUFFER_NUM; + caps->decoder.max_size = 0xFFFFFFFF; + caps->decoder.size_alignment = SZ_4K; + + return 0; +} + + +/** + * Reads TSIF STC from TSPP + * + * @demux: demux device + * @num: STC number. 0 for TSIF0 and 1 for TSIF1. + * @stc: STC value + * @base: divisor to get 90KHz value + * + * Return error code + */ +static int mpq_tspp_dmx_get_stc(struct dmx_demux *demux, unsigned int num, + u64 *stc, unsigned int *base) +{ + enum tspp_source source; + u32 tcr_counter; + u64 avtimer_stc = 0; + int tts_source = 0; + + if (!demux || !stc || !base) + return -EINVAL; + + if (num == 0) + source = TSPP_SOURCE_TSIF0; + else if (num == 1) + source = TSPP_SOURCE_TSIF1; + else + return -EINVAL; + + if (tspp_get_tts_source(0, &tts_source) < 0) + tts_source = TSIF_TTS_TCR; + + if (tts_source != TSIF_TTS_LPASS_TIMER) { + tspp_get_ref_clk_counter(0, source, &tcr_counter); + *stc = ((u64)tcr_counter) * 256; /* conversion to 27MHz */ + *base = 300; /* divisor to get 90KHz clock from stc value */ + } else { + if (tspp_get_lpass_time_counter(0, source, &avtimer_stc) < 0) + return -EINVAL; + *stc = avtimer_stc; + } + return 0; +} + +static int mpq_tspp_dmx_init( + struct dvb_adapter *mpq_adapter, + struct mpq_demux *mpq_demux) +{ + int result; + + MPQ_DVB_DBG_PRINT("%s executed\n", __func__); + + mpq_dmx_tspp_info.ion_client = mpq_demux->ion_client; + + /* Set the kernel-demux object capabilities */ + mpq_demux->demux.dmx.capabilities = + DMX_TS_FILTERING | + DMX_PES_FILTERING | + DMX_SECTION_FILTERING | + DMX_MEMORY_BASED_FILTERING | + DMX_CRC_CHECKING | + DMX_TS_DESCRAMBLING; + + mpq_demux->decoder_alloc_flags = ION_FLAG_CACHED; + + /* Set dvb-demux "virtual" function pointers */ + mpq_demux->demux.priv = (void *)mpq_demux; + mpq_demux->demux.filternum = TSPP_MAX_SECTION_FILTER_NUM; + mpq_demux->demux.feednum = MPQ_MAX_DMX_FILES; + mpq_demux->demux.start_feed = mpq_tspp_dmx_start_filtering; + mpq_demux->demux.stop_feed = mpq_tspp_dmx_stop_filtering; + mpq_demux->demux.write_to_decoder = mpq_tspp_dmx_write_to_decoder; + mpq_demux->demux.decoder_fullness_init = mpq_dmx_decoder_fullness_init; + mpq_demux->demux.decoder_fullness_wait = mpq_dmx_decoder_fullness_wait; + mpq_demux->demux.decoder_fullness_abort = + mpq_dmx_decoder_fullness_abort; + mpq_demux->demux.decoder_buffer_status = mpq_dmx_decoder_buffer_status; + mpq_demux->demux.reuse_decoder_buffer = mpq_dmx_reuse_decoder_buffer; + mpq_demux->demux.set_cipher_op = mpq_dmx_set_cipher_ops; + mpq_demux->demux.oob_command = mpq_dmx_oob_command; + mpq_demux->demux.convert_ts = mpq_dmx_convert_tts; + mpq_demux->demux.flush_decoder_buffer = NULL; + + /* Initialize dvb_demux object */ + result = dvb_dmx_init(&mpq_demux->demux); + if (result < 0) { + MPQ_DVB_ERR_PRINT("%s: dvb_dmx_init failed\n", __func__); + goto init_failed; + } + + /* Now initailize the dmx-dev object */ + mpq_demux->dmxdev.filternum = MPQ_MAX_DMX_FILES; + mpq_demux->dmxdev.demux = &mpq_demux->demux.dmx; + mpq_demux->dmxdev.capabilities = DMXDEV_CAP_DUPLEX; + + mpq_demux->dmxdev.demux->set_source = mpq_dmx_set_source; + mpq_demux->dmxdev.demux->get_stc = mpq_tspp_dmx_get_stc; + mpq_demux->dmxdev.demux->get_caps = mpq_tspp_dmx_get_caps; + mpq_demux->dmxdev.demux->map_buffer = mpq_dmx_map_buffer; + mpq_demux->dmxdev.demux->unmap_buffer = mpq_dmx_unmap_buffer; + mpq_demux->dmxdev.demux->write = mpq_dmx_write; + result = dvb_dmxdev_init(&mpq_demux->dmxdev, mpq_adapter); + if (result < 0) { + MPQ_DVB_ERR_PRINT("%s: dvb_dmxdev_init failed (errno=%d)\n", + __func__, + result); + goto init_failed_dmx_release; + } + + /* Extend dvb-demux debugfs with TSPP statistics. */ + mpq_dmx_init_debugfs_entries(mpq_demux); + + /* Get the TSIF TTS info */ + if (tspp_get_tts_source(0, &mpq_demux->ts_packet_timestamp_source) < 0) + mpq_demux->ts_packet_timestamp_source = TSIF_TTS_TCR; + + return 0; + +init_failed_dmx_release: + dvb_dmx_release(&mpq_demux->demux); +init_failed: + return result; +} + +static int __init mpq_dmx_tspp_plugin_init(void) +{ + int i; + int j; + int ret; + + MPQ_DVB_DBG_PRINT("%s executed\n", __func__); + + for (i = 0; i < TSIF_COUNT; i++) { + mpq_dmx_tspp_info.tsif[i].aggregate_ids = NULL; + mpq_dmx_tspp_info.tsif[i].channel_ref = 0; + mpq_dmx_tspp_info.tsif[i].buff_index = 0; + mpq_dmx_tspp_info.tsif[i].ch_mem_heap_handle = NULL; + mpq_dmx_tspp_info.tsif[i].ch_mem_heap_virt_base = NULL; + mpq_dmx_tspp_info.tsif[i].ch_mem_heap_phys_base = 0; + atomic_set(&mpq_dmx_tspp_info.tsif[i].data_cnt, 0); + atomic_set(&mpq_dmx_tspp_info.tsif[i].control_op, 0); + + for (j = 0; j < TSPP_MAX_PID_FILTER_NUM; j++) { + mpq_dmx_tspp_info.tsif[i].filters[j].pid = -1; + mpq_dmx_tspp_info.tsif[i].filters[j].ref_count = 0; + mpq_dmx_tspp_info.tsif[i].filters[j].hw_index = -1; + } + + for (j = 0; j < TSPP_MAX_HW_PID_FILTER_NUM; j++) + mpq_dmx_tspp_info.tsif[i].hw_indexes[j] = 0; + + mpq_dmx_tspp_info.tsif[i].current_filter_count = 0; + mpq_dmx_tspp_info.tsif[i].pass_nulls_flag = 0; + mpq_dmx_tspp_info.tsif[i].pass_all_flag = 0; + mpq_dmx_tspp_info.tsif[i].accept_all_filter_exists_flag = 0; + + snprintf(mpq_dmx_tspp_info.tsif[i].name, + TSIF_NAME_LENGTH, + "dmx_tsif%d", + i); + + init_waitqueue_head(&mpq_dmx_tspp_info.tsif[i].wait_queue); + mpq_dmx_tspp_info.tsif[i].thread = + kthread_run( + mpq_dmx_tspp_thread, (void *)(uintptr_t)i, + mpq_dmx_tspp_info.tsif[i].name); + + if (IS_ERR(mpq_dmx_tspp_info.tsif[i].thread)) { + for (j = 0; j < i; j++) { + kthread_stop(mpq_dmx_tspp_info.tsif[j].thread); + mutex_destroy(&mpq_dmx_tspp_info.tsif[j].mutex); + } + + MPQ_DVB_ERR_PRINT( + "%s: kthread_run failed\n", + __func__); + + return -ENOMEM; + } + + mutex_init(&mpq_dmx_tspp_info.tsif[i].mutex); + } + + ret = mpq_dmx_plugin_init(mpq_tspp_dmx_init); + + if (ret < 0) { + MPQ_DVB_ERR_PRINT( + "%s: mpq_dmx_plugin_init failed (errno=%d)\n", + __func__, + ret); + + for (i = 0; i < TSIF_COUNT; i++) { + kthread_stop(mpq_dmx_tspp_info.tsif[i].thread); + mutex_destroy(&mpq_dmx_tspp_info.tsif[i].mutex); + } + } + + return ret; +} + +static void __exit mpq_dmx_tspp_plugin_exit(void) +{ + int i; + + MPQ_DVB_DBG_PRINT("%s executed\n", __func__); + + for (i = 0; i < TSIF_COUNT; i++) { + mutex_lock(&mpq_dmx_tspp_info.tsif[i].mutex); + + /* + * Note: tspp_close_channel will also free the TSPP buffers + * even if we allocated them ourselves, + * using our free function. + */ + if (mpq_dmx_tspp_info.tsif[i].channel_ref) { + tspp_unregister_notification(0, + TSPP_CHANNEL_ID(i, TSPP_CHANNEL)); + tspp_close_channel(0, + TSPP_CHANNEL_ID(i, TSPP_CHANNEL)); + + if (allocation_mode == + MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) { + vfree(mpq_dmx_tspp_info.tsif[i].aggregate_ids); + mpq_dmx_tspp_info.tsif[i].aggregate_ids = NULL; + mpq_dmx_channel_mem_free(i); + } + } + + mutex_unlock(&mpq_dmx_tspp_info.tsif[i].mutex); + kthread_stop(mpq_dmx_tspp_info.tsif[i].thread); + mutex_destroy(&mpq_dmx_tspp_info.tsif[i].mutex); + } + + mpq_dmx_plugin_exit(); +} + + +module_init(mpq_dmx_tspp_plugin_init); +module_exit(mpq_dmx_tspp_plugin_exit); + +MODULE_DESCRIPTION("Qualcomm Technologies Inc. demux TSPP version 1 HW Plugin"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/media/platform/msm/dvb/demux/mpq_sdmx.c b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.c new file mode 100644 index 000000000000..860c36566b92 --- /dev/null +++ b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.c @@ -0,0 +1,1023 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include "qseecom_kernel.h" +#include "mpq_sdmx.h" + +static struct qseecom_handle *sdmx_qseecom_handles[SDMX_MAX_SESSIONS]; +static struct mutex sdmx_lock[SDMX_MAX_SESSIONS]; + +#define QSEECOM_SBUFF_SIZE SZ_128K + +enum sdmx_cmd_id { + SDMX_OPEN_SESSION_CMD, + SDMX_CLOSE_SESSION_CMD, + SDMX_SET_SESSION_CFG_CMD, + SDMX_ADD_FILTER_CMD, + SDMX_REMOVE_FILTER_CMD, + SDMX_SET_KL_IDX_CMD, + SDMX_ADD_RAW_PID_CMD, + SDMX_REMOVE_RAW_PID_CMD, + SDMX_PROCESS_CMD, + SDMX_GET_DBG_COUNTERS_CMD, + SDMX_RESET_DBG_COUNTERS_CMD, + SDMX_GET_VERSION_CMD, + SDMX_INVALIDATE_KL_CMD, + SDMX_SET_LOG_LEVEL_CMD +}; + +#pragma pack(push, sdmx, 1) + +struct sdmx_proc_req { + enum sdmx_cmd_id cmd_id; + u32 session_handle; + u8 flags; + struct sdmx_buff_descr in_buf_descr; + u32 inp_fill_cnt; + u32 in_rd_offset; + u32 num_filters; + struct sdmx_filter_status filters_status[]; +}; + +struct sdmx_proc_rsp { + enum sdmx_status ret; + u32 inp_fill_cnt; + u32 in_rd_offset; + u32 err_indicators; + u32 status_indicators; +}; + +struct sdmx_open_ses_req { + enum sdmx_cmd_id cmd_id; +}; + +struct sdmx_open_ses_rsp { + enum sdmx_status ret; + u32 session_handle; +}; + +struct sdmx_close_ses_req { + enum sdmx_cmd_id cmd_id; + u32 session_handle; +}; + +struct sdmx_close_ses_rsp { + enum sdmx_status ret; +}; + +struct sdmx_ses_cfg_req { + enum sdmx_cmd_id cmd_id; + u32 session_handle; + enum sdmx_proc_mode process_mode; + enum sdmx_inp_mode input_mode; + enum sdmx_pkt_format packet_len; + u8 odd_scramble_bits; + u8 even_scramble_bits; +}; + +struct sdmx_ses_cfg_rsp { + enum sdmx_status ret; +}; + +struct sdmx_set_kl_ind_req { + enum sdmx_cmd_id cmd_id; + u32 session_handle; + u32 pid; + u32 kl_index; +}; + +struct sdmx_set_kl_ind_rsp { + enum sdmx_status ret; +}; + +struct sdmx_add_filt_req { + enum sdmx_cmd_id cmd_id; + u32 session_handle; + u32 pid; + enum sdmx_filter filter_type; + struct sdmx_buff_descr meta_data_buf; + enum sdmx_buf_mode buffer_mode; + enum sdmx_raw_out_format ts_out_format; + u32 flags; + u32 num_data_bufs; + struct sdmx_data_buff_descr data_bufs[]; +}; + +struct sdmx_add_filt_rsp { + enum sdmx_status ret; + u32 filter_handle; +}; + +struct sdmx_rem_filt_req { + enum sdmx_cmd_id cmd_id; + u32 session_handle; + u32 filter_handle; +}; + +struct sdmx_rem_filt_rsp { + enum sdmx_status ret; +}; + +struct sdmx_add_raw_req { + enum sdmx_cmd_id cmd_id; + u32 session_handle; + u32 filter_handle; + u32 pid; +}; + +struct sdmx_add_raw_rsp { + enum sdmx_status ret; +}; + +struct sdmx_rem_raw_req { + enum sdmx_cmd_id cmd_id; + u32 session_handle; + u32 filter_handle; + u32 pid; +}; + +struct sdmx_rem_raw_rsp { + enum sdmx_status ret; +}; + +struct sdmx_get_counters_req { + enum sdmx_cmd_id cmd_id; + u32 session_handle; + u32 num_filters; +}; + +struct sdmx_get_counters_rsp { + enum sdmx_status ret; + struct sdmx_session_dbg_counters session_counters; + u32 num_filters; + struct sdmx_filter_dbg_counters filter_counters[]; +}; + +struct sdmx_rst_counters_req { + enum sdmx_cmd_id cmd_id; + u32 session_handle; +}; + +struct sdmx_rst_counters_rsp { + enum sdmx_status ret; +}; + +struct sdmx_get_version_req { + enum sdmx_cmd_id cmd_id; +}; + +struct sdmx_get_version_rsp { + enum sdmx_status ret; + int32_t version; +}; + +struct sdmx_set_log_level_req { + enum sdmx_cmd_id cmd_id; + enum sdmx_log_level level; + u32 session_handle; +}; + +struct sdmx_set_log_level_rsp { + enum sdmx_status ret; +}; + +#pragma pack(pop, sdmx) + +static int get_cmd_rsp_buffers(int handle_index, + void **cmd, + int *cmd_len, + void **rsp, + int *rsp_len) +{ + if (*cmd_len & QSEECOM_ALIGN_MASK) + *cmd_len = QSEECOM_ALIGN(*cmd_len); + + if (*rsp_len & QSEECOM_ALIGN_MASK) + *rsp_len = QSEECOM_ALIGN(*rsp_len); + + if ((*rsp_len + *cmd_len) > QSEECOM_SBUFF_SIZE) { + pr_err("%s: shared buffer too small to hold cmd=%d and rsp=%d\n", + __func__, *cmd_len, *rsp_len); + return SDMX_STATUS_OUT_OF_MEM; + } + + *cmd = sdmx_qseecom_handles[handle_index]->sbuf; + *rsp = sdmx_qseecom_handles[handle_index]->sbuf + *cmd_len; + return SDMX_SUCCESS; +} + +/* + * Returns version of secure-demux app. + * + * @session_handle: Returned instance handle. Must not be NULL. + * Return error code + */ +int sdmx_get_version(int session_handle, int32_t *version) +{ + int res, cmd_len, rsp_len; + struct sdmx_get_version_req *cmd; + struct sdmx_get_version_rsp *rsp; + enum sdmx_status ret; + + if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) || + (version == NULL)) + return SDMX_STATUS_INVALID_INPUT_PARAMS; + + cmd_len = sizeof(struct sdmx_get_version_req); + rsp_len = sizeof(struct sdmx_get_version_rsp); + + /* Lock shared memory */ + mutex_lock(&sdmx_lock[session_handle]); + + /* Get command and response buffers */ + ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len, + (void **)&rsp, &rsp_len); + if (ret) + goto out; + + /* Populate command struct */ + cmd->cmd_id = SDMX_GET_VERSION_CMD; + + /* Issue QSEECom command */ + res = qseecom_send_command(sdmx_qseecom_handles[session_handle], + (void *)cmd, cmd_len, (void *)rsp, rsp_len); + + if (res < 0) { + mutex_unlock(&sdmx_lock[session_handle]); + return SDMX_STATUS_GENERAL_FAILURE; + } + + ret = rsp->ret; + *version = rsp->version; +out: + mutex_unlock(&sdmx_lock[session_handle]); + + return ret; + +} +EXPORT_SYMBOL(sdmx_get_version); + +/* + * Initializes a new secure demux instance and returns a handle of the instance. + * + * @session_handle: handle of a secure demux instance to get its version. + * Return the version if successful or an error code. + */ +int sdmx_open_session(int *session_handle) +{ + int res, cmd_len, rsp_len; + enum sdmx_status ret, version_ret; + struct sdmx_open_ses_req *cmd; + struct sdmx_open_ses_rsp *rsp; + struct qseecom_handle *qseecom_handle = NULL; + int32_t version; + + /* Input validation */ + if (session_handle == NULL) + return SDMX_STATUS_GENERAL_FAILURE; + + /* Start the TZ app */ + res = qseecom_start_app(&qseecom_handle, "securemm", + QSEECOM_SBUFF_SIZE); + + if (res < 0) + return SDMX_STATUS_GENERAL_FAILURE; + + cmd_len = sizeof(struct sdmx_open_ses_req); + rsp_len = sizeof(struct sdmx_open_ses_rsp); + + /* Get command and response buffers */ + cmd = (struct sdmx_open_ses_req *)qseecom_handle->sbuf; + + if (cmd_len & QSEECOM_ALIGN_MASK) + cmd_len = QSEECOM_ALIGN(cmd_len); + + rsp = (struct sdmx_open_ses_rsp *)qseecom_handle->sbuf + cmd_len; + + if (rsp_len & QSEECOM_ALIGN_MASK) + rsp_len = QSEECOM_ALIGN(rsp_len); + + /* Will be later overridden by SDMX response */ + *session_handle = SDMX_INVALID_SESSION_HANDLE; + + /* Populate command struct */ + cmd->cmd_id = SDMX_OPEN_SESSION_CMD; + + /* Issue QSEECom command */ + res = qseecom_send_command(qseecom_handle, (void *)cmd, cmd_len, + (void *)rsp, rsp_len); + + if (res < 0) { + qseecom_shutdown_app(&qseecom_handle); + return SDMX_STATUS_GENERAL_FAILURE; + } + + /* Parse response struct */ + *session_handle = rsp->session_handle; + + /* Initialize handle and mutex */ + sdmx_qseecom_handles[*session_handle] = qseecom_handle; + mutex_init(&sdmx_lock[*session_handle]); + ret = rsp->ret; + + /* Get and print the app version */ + version_ret = sdmx_get_version(*session_handle, &version); + if (version_ret == SDMX_SUCCESS) + pr_info("TZ SDMX version is %x.%x\n", version >> 8, + version & 0xFF); + else + pr_err("Error reading TZ SDMX version\n"); + + return ret; +} +EXPORT_SYMBOL(sdmx_open_session); + +/* + * Closes a secure demux instance. + * + * @session_handle: handle of a secure demux instance to close. + * Return error code + */ +int sdmx_close_session(int session_handle) +{ + int res, cmd_len, rsp_len; + struct sdmx_close_ses_req *cmd; + struct sdmx_close_ses_rsp *rsp; + enum sdmx_status ret; + + if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS)) + return SDMX_STATUS_INVALID_INPUT_PARAMS; + + cmd_len = sizeof(struct sdmx_close_ses_req); + rsp_len = sizeof(struct sdmx_close_ses_rsp); + + /* Lock shared memory */ + mutex_lock(&sdmx_lock[session_handle]); + + /* Get command and response buffers */ + ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len, + (void **)&rsp, &rsp_len); + if (ret) + goto out; + + /* Populate command struct */ + cmd->cmd_id = SDMX_CLOSE_SESSION_CMD; + cmd->session_handle = session_handle; + + /* Issue QSEECom command */ + res = qseecom_send_command(sdmx_qseecom_handles[session_handle], + (void *)cmd, cmd_len, (void *)rsp, rsp_len); + + if (res < 0) { + mutex_unlock(&sdmx_lock[session_handle]); + return SDMX_STATUS_GENERAL_FAILURE; + } + + ret = rsp->ret; + + /* Shutdown the TZ app (or at least free the current handle) */ + res = qseecom_shutdown_app(&sdmx_qseecom_handles[session_handle]); + if (res < 0) { + mutex_unlock(&sdmx_lock[session_handle]); + return SDMX_STATUS_GENERAL_FAILURE; + } + + sdmx_qseecom_handles[session_handle] = NULL; +out: + mutex_unlock(&sdmx_lock[session_handle]); + + return ret; +} +EXPORT_SYMBOL(sdmx_close_session); + +/* + * Configures an open secure demux instance. + * + * @session_handle: secure demux instance + * @proc_mode: Defines secure demux's behavior in case of output + * buffer overflow. + * @inp_mode: Defines the input encryption settings. + * @pkt_format: TS packet length in input buffer. + * @odd_scramble_bits: Value of the scramble bits indicating the ODD key. + * @even_scramble_bits: Value of the scramble bits indicating the EVEN key. + * Return error code + */ +int sdmx_set_session_cfg(int session_handle, + enum sdmx_proc_mode proc_mode, + enum sdmx_inp_mode inp_mode, + enum sdmx_pkt_format pkt_format, + u8 odd_scramble_bits, + u8 even_scramble_bits) +{ + int res, cmd_len, rsp_len; + struct sdmx_ses_cfg_req *cmd; + struct sdmx_ses_cfg_rsp *rsp; + enum sdmx_status ret; + + if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS)) + return SDMX_STATUS_INVALID_INPUT_PARAMS; + + cmd_len = sizeof(struct sdmx_ses_cfg_req); + rsp_len = sizeof(struct sdmx_ses_cfg_rsp); + + /* Lock shared memory */ + mutex_lock(&sdmx_lock[session_handle]); + + /* Get command and response buffers */ + ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len, + (void **)&rsp, &rsp_len); + if (ret) + goto out; + + /* Populate command struct */ + cmd->cmd_id = SDMX_SET_SESSION_CFG_CMD; + cmd->session_handle = session_handle; + cmd->process_mode = proc_mode; + cmd->input_mode = inp_mode; + cmd->packet_len = pkt_format; + cmd->odd_scramble_bits = odd_scramble_bits; + cmd->even_scramble_bits = even_scramble_bits; + + /* Issue QSEECom command */ + res = qseecom_send_command(sdmx_qseecom_handles[session_handle], + (void *)cmd, cmd_len, (void *)rsp, rsp_len); + + if (res < 0) { + mutex_unlock(&sdmx_lock[session_handle]); + return SDMX_STATUS_GENERAL_FAILURE; + } + + ret = rsp->ret; +out: + mutex_unlock(&sdmx_lock[session_handle]); + + return ret; +} +EXPORT_SYMBOL(sdmx_set_session_cfg); + +/* + * Creates a new secure demux filter and returns a filter handle + * + * @session_handle: secure demux instance + * @pid: pid to filter + * @filter_type: type of filtering + * @meta_data_buf: meta data buffer descriptor + * @data_buf_mode: data buffer mode (ring/linear) + * @num_data_bufs: number of data buffers (use 1 for a ring buffer) + * @data_bufs: data buffers descriptors array + * @filter_handle: returned filter handle + * @ts_out_format: output format for raw filters + * @flags: optional flags for filter + * (currently only clear section CRC verification is supported) + * + * Return error code + */ +int sdmx_add_filter(int session_handle, + u16 pid, + enum sdmx_filter filterype, + struct sdmx_buff_descr *meta_data_buf, + enum sdmx_buf_mode d_buf_mode, + u32 num_data_bufs, + struct sdmx_data_buff_descr *data_bufs, + int *filter_handle, + enum sdmx_raw_out_format ts_out_format, + u32 flags) +{ + int res, cmd_len, rsp_len; + struct sdmx_add_filt_req *cmd; + struct sdmx_add_filt_rsp *rsp; + enum sdmx_status ret; + + if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) || + (filter_handle == NULL)) + return SDMX_STATUS_INVALID_INPUT_PARAMS; + + cmd_len = sizeof(struct sdmx_add_filt_req) + + num_data_bufs * sizeof(struct sdmx_data_buff_descr); + rsp_len = sizeof(struct sdmx_add_filt_rsp); + + /* Will be later overridden by SDMX response */ + *filter_handle = SDMX_INVALID_FILTER_HANDLE; + + /* Lock shared memory */ + mutex_lock(&sdmx_lock[session_handle]); + + /* Get command and response buffers */ + ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len, + (void **)&rsp, &rsp_len); + if (ret) + goto out; + + /* Populate command struct */ + cmd->cmd_id = SDMX_ADD_FILTER_CMD; + cmd->session_handle = session_handle; + cmd->pid = (u32)pid; + cmd->filter_type = filterype; + cmd->ts_out_format = ts_out_format; + cmd->flags = flags; + if (meta_data_buf != NULL) + memcpy(&(cmd->meta_data_buf), meta_data_buf, + sizeof(struct sdmx_buff_descr)); + else + memset(&(cmd->meta_data_buf), 0, sizeof(cmd->meta_data_buf)); + + cmd->buffer_mode = d_buf_mode; + cmd->num_data_bufs = num_data_bufs; + memcpy(cmd->data_bufs, data_bufs, + num_data_bufs * sizeof(struct sdmx_data_buff_descr)); + + /* Issue QSEECom command */ + res = qseecom_send_command(sdmx_qseecom_handles[session_handle], + (void *)cmd, cmd_len, (void *)rsp, rsp_len); + + if (res < 0) { + mutex_unlock(&sdmx_lock[session_handle]); + return SDMX_STATUS_GENERAL_FAILURE; + } + + /* Parse response struct */ + *filter_handle = rsp->filter_handle; + ret = rsp->ret; +out: + mutex_unlock(&sdmx_lock[session_handle]); + + return ret; +} +EXPORT_SYMBOL(sdmx_add_filter); + +/* + * Removes a secure demux filter + * + * @session_handle: secure demux instance + * @filter_handle: filter handle to remove + * + * Return error code + */ +int sdmx_remove_filter(int session_handle, int filter_handle) +{ + int res, cmd_len, rsp_len; + struct sdmx_rem_filt_req *cmd; + struct sdmx_rem_filt_rsp *rsp; + enum sdmx_status ret; + + if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS)) + return SDMX_STATUS_INVALID_INPUT_PARAMS; + + cmd_len = sizeof(struct sdmx_rem_filt_req); + rsp_len = sizeof(struct sdmx_rem_filt_rsp); + + /* Lock shared memory */ + mutex_lock(&sdmx_lock[session_handle]); + + /* Get command and response buffers */ + ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len, + (void **)&rsp, &rsp_len); + if (ret) + goto out; + + /* Populate command struct */ + cmd->cmd_id = SDMX_REMOVE_FILTER_CMD; + cmd->session_handle = session_handle; + cmd->filter_handle = filter_handle; + + /* Issue QSEECom command */ + res = qseecom_send_command(sdmx_qseecom_handles[session_handle], + (void *)cmd, cmd_len, (void *)rsp, rsp_len); + + if (res < 0) { + mutex_unlock(&sdmx_lock[session_handle]); + return SDMX_STATUS_GENERAL_FAILURE; + } + + ret = rsp->ret; +out: + mutex_unlock(&sdmx_lock[session_handle]); + + return ret; +} +EXPORT_SYMBOL(sdmx_remove_filter); + +/* + * Associates a key ladder index for the specified pid + * + * @session_handle: secure demux instance + * @pid: pid + * @key_ladder_index: key ladder index to associate to the pid + * + * Return error code + * + * Note: if pid already has some key ladder index associated, it will be + * overridden. + */ +int sdmx_set_kl_ind(int session_handle, u16 pid, u32 key_ladder_index) +{ + int res, cmd_len, rsp_len; + struct sdmx_set_kl_ind_req *cmd; + struct sdmx_set_kl_ind_rsp *rsp; + enum sdmx_status ret; + + if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS)) + return SDMX_STATUS_INVALID_INPUT_PARAMS; + + cmd_len = sizeof(struct sdmx_set_kl_ind_req); + rsp_len = sizeof(struct sdmx_set_kl_ind_rsp); + + /* Lock shared memory */ + mutex_lock(&sdmx_lock[session_handle]); + + /* Get command and response buffers */ + ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len, + (void **)&rsp, &rsp_len); + if (ret) + goto out; + + /* Populate command struct */ + cmd->cmd_id = SDMX_SET_KL_IDX_CMD; + cmd->session_handle = session_handle; + cmd->pid = (u32)pid; + cmd->kl_index = key_ladder_index; + + /* Issue QSEECom command */ + res = qseecom_send_command(sdmx_qseecom_handles[session_handle], + (void *)cmd, cmd_len, (void *)rsp, rsp_len); + + if (res < 0) { + mutex_unlock(&sdmx_lock[session_handle]); + return SDMX_STATUS_GENERAL_FAILURE; + } + + ret = rsp->ret; +out: + mutex_unlock(&sdmx_lock[session_handle]); + + return ret; +} +EXPORT_SYMBOL(sdmx_set_kl_ind); + +/* + * Adds the specified pid to an existing raw (recording) filter + * + * @session_handle: secure demux instance + * @filter_handle: raw filter handle + * @pid: pid + * + * Return error code + */ +int sdmx_add_raw_pid(int session_handle, int filter_handle, u16 pid) +{ + int res, cmd_len, rsp_len; + struct sdmx_add_raw_req *cmd; + struct sdmx_add_raw_rsp *rsp; + enum sdmx_status ret; + + if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS)) + return SDMX_STATUS_INVALID_INPUT_PARAMS; + + cmd_len = sizeof(struct sdmx_add_raw_req); + rsp_len = sizeof(struct sdmx_add_raw_rsp); + + /* Lock shared memory */ + mutex_lock(&sdmx_lock[session_handle]); + + /* Get command and response buffers */ + ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len, + (void **)&rsp, &rsp_len); + if (ret) + goto out; + + /* Populate command struct */ + cmd->cmd_id = SDMX_ADD_RAW_PID_CMD; + cmd->session_handle = session_handle; + cmd->filter_handle = filter_handle; + cmd->pid = (u32)pid; + + /* Issue QSEECom command */ + res = qseecom_send_command(sdmx_qseecom_handles[session_handle], + (void *)cmd, cmd_len, (void *)rsp, rsp_len); + + if (res < 0) { + mutex_unlock(&sdmx_lock[session_handle]); + return SDMX_STATUS_GENERAL_FAILURE; + } + + ret = rsp->ret; +out: + mutex_unlock(&sdmx_lock[session_handle]); + + return ret; +} +EXPORT_SYMBOL(sdmx_add_raw_pid); + +/* + * Removes the specified pid from a raw (recording) filter + * + * @session_handle: secure demux instance + * @filter_handle: raw filter handle + * @pid: pid + * + * Return error code + */ +int sdmx_remove_raw_pid(int session_handle, int filter_handle, u16 pid) +{ + int res, cmd_len, rsp_len; + struct sdmx_rem_raw_req *cmd; + struct sdmx_rem_raw_rsp *rsp; + enum sdmx_status ret; + + if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS)) + return SDMX_STATUS_INVALID_INPUT_PARAMS; + + cmd_len = sizeof(struct sdmx_rem_raw_req); + rsp_len = sizeof(struct sdmx_rem_raw_rsp); + + /* Lock shared memory */ + mutex_lock(&sdmx_lock[session_handle]); + + /* Get command and response buffers */ + ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len, + (void **)&rsp, &rsp_len); + if (ret) + goto out; + + /* Populate command struct */ + cmd->cmd_id = SDMX_REMOVE_RAW_PID_CMD; + cmd->session_handle = session_handle; + cmd->filter_handle = filter_handle; + cmd->pid = (u32)pid; + + /* Issue QSEECom command */ + res = qseecom_send_command(sdmx_qseecom_handles[session_handle], + (void *)cmd, cmd_len, (void *)rsp, rsp_len); + + if (res < 0) { + mutex_unlock(&sdmx_lock[session_handle]); + return SDMX_STATUS_GENERAL_FAILURE; + } + + ret = rsp->ret; +out: + mutex_unlock(&sdmx_lock[session_handle]); + + return ret; +} +EXPORT_SYMBOL(sdmx_remove_raw_pid); + +/* + * Call secure demux to perform processing on the specified input buffer + * + * @session_handle: secure demux instance + * @flags: input flags. Currently only EOS marking is supported. + * @input_buf_desc: input buffer descriptor + * @input_fill_count: number of bytes available in input buffer + * @input_read_offset: offset inside input buffer where data starts + * @error_indicators: returned general error indicators + * @status_indicators: returned general status indicators + * @num_filters: number of filters in filter status array + * @filter_status: filter status descriptor array + * + * Return error code + */ +int sdmx_process(int session_handle, u8 flags, + struct sdmx_buff_descr *input_buf_desc, + u32 *input_fill_count, + u32 *input_read_offset, + u32 *error_indicators, + u32 *status_indicators, + u32 num_filters, + struct sdmx_filter_status *filter_status) +{ + int res, cmd_len, rsp_len; + struct sdmx_proc_req *cmd; + struct sdmx_proc_rsp *rsp; + enum sdmx_status ret; + + if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) || + (input_buf_desc == NULL) || + (input_fill_count == NULL) || (input_read_offset == NULL) || + (error_indicators == NULL) || (status_indicators == NULL) || + (filter_status == NULL)) + return SDMX_STATUS_INVALID_INPUT_PARAMS; + + cmd_len = sizeof(struct sdmx_proc_req) + + num_filters * sizeof(struct sdmx_filter_status); + rsp_len = sizeof(struct sdmx_proc_rsp); + + /* Lock shared memory */ + mutex_lock(&sdmx_lock[session_handle]); + + /* Get command and response buffers */ + ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len, + (void **)&rsp, &rsp_len); + if (ret) + goto out; + + /* Populate command struct */ + cmd->cmd_id = SDMX_PROCESS_CMD; + cmd->session_handle = session_handle; + cmd->flags = flags; + cmd->in_buf_descr.base_addr = input_buf_desc->base_addr; + cmd->in_buf_descr.size = input_buf_desc->size; + cmd->inp_fill_cnt = *input_fill_count; + cmd->in_rd_offset = *input_read_offset; + cmd->num_filters = num_filters; + memcpy(cmd->filters_status, filter_status, + num_filters * sizeof(struct sdmx_filter_status)); + + /* Issue QSEECom command */ + res = qseecom_send_command(sdmx_qseecom_handles[session_handle], + (void *)cmd, cmd_len, (void *)rsp, rsp_len); + + if (res < 0) { + mutex_unlock(&sdmx_lock[session_handle]); + return SDMX_STATUS_GENERAL_FAILURE; + } + + /* Parse response struct */ + *input_fill_count = rsp->inp_fill_cnt; + *input_read_offset = rsp->in_rd_offset; + *error_indicators = rsp->err_indicators; + *status_indicators = rsp->status_indicators; + memcpy(filter_status, cmd->filters_status, + num_filters * sizeof(struct sdmx_filter_status)); + ret = rsp->ret; +out: + mutex_unlock(&sdmx_lock[session_handle]); + + return ret; +} +EXPORT_SYMBOL(sdmx_process); + +/* + * Returns session-level & filter-level debug counters + * + * @session_handle: secure demux instance + * @session_counters: returned session-level debug counters + * @num_filters: returned number of filters reported in filter_counters + * @filter_counters: returned filter-level debug counters array + * + * Return error code + */ +int sdmx_get_dbg_counters(int session_handle, + struct sdmx_session_dbg_counters *session_counters, + u32 *num_filters, + struct sdmx_filter_dbg_counters *filter_counters) +{ + int res, cmd_len, rsp_len; + struct sdmx_get_counters_req *cmd; + struct sdmx_get_counters_rsp *rsp; + enum sdmx_status ret; + + if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) || + (session_counters == NULL) || (num_filters == NULL) || + (filter_counters == NULL)) + return SDMX_STATUS_INVALID_INPUT_PARAMS; + + cmd_len = sizeof(struct sdmx_get_counters_req); + rsp_len = sizeof(struct sdmx_get_counters_rsp) + + *num_filters * sizeof(struct sdmx_filter_dbg_counters); + + /* Lock shared memory */ + mutex_lock(&sdmx_lock[session_handle]); + + /* Get command and response buffers */ + ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len, + (void **)&rsp, &rsp_len); + if (ret) + goto out; + + /* Populate command struct */ + cmd->cmd_id = SDMX_GET_DBG_COUNTERS_CMD; + cmd->session_handle = session_handle; + cmd->num_filters = *num_filters; + + /* Issue QSEECom command */ + res = qseecom_send_command(sdmx_qseecom_handles[session_handle], + (void *)cmd, cmd_len, (void *)rsp, rsp_len); + + if (res < 0) { + mutex_unlock(&sdmx_lock[session_handle]); + return SDMX_STATUS_GENERAL_FAILURE; + } + + /* Parse response struct */ + *session_counters = rsp->session_counters; + *num_filters = rsp->num_filters; + memcpy(filter_counters, rsp->filter_counters, + *num_filters * sizeof(struct sdmx_filter_dbg_counters)); + ret = rsp->ret; +out: + mutex_unlock(&sdmx_lock[session_handle]); + + return ret; +} +EXPORT_SYMBOL(sdmx_get_dbg_counters); + +/* + * Reset debug counters + * + * @session_handle: secure demux instance + * + * Return error code + */ +int sdmx_reset_dbg_counters(int session_handle) +{ + int res, cmd_len, rsp_len; + struct sdmx_rst_counters_req *cmd; + struct sdmx_rst_counters_rsp *rsp; + enum sdmx_status ret; + + if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS)) + return SDMX_STATUS_INVALID_INPUT_PARAMS; + + cmd_len = sizeof(struct sdmx_rst_counters_req); + rsp_len = sizeof(struct sdmx_rst_counters_rsp); + + /* Lock shared memory */ + mutex_lock(&sdmx_lock[session_handle]); + + /* Get command and response buffers */ + ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len, + (void **)&rsp, &rsp_len); + if (ret) + goto out; + + /* Populate command struct */ + cmd->cmd_id = SDMX_RESET_DBG_COUNTERS_CMD; + cmd->session_handle = session_handle; + + /* Issue QSEECom command */ + res = qseecom_send_command(sdmx_qseecom_handles[session_handle], + (void *)cmd, cmd_len, (void *)rsp, rsp_len); + + if (res < 0) { + mutex_unlock(&sdmx_lock[session_handle]); + return SDMX_STATUS_GENERAL_FAILURE; + } + + ret = rsp->ret; +out: + mutex_unlock(&sdmx_lock[session_handle]); + + return ret; +} +EXPORT_SYMBOL(sdmx_reset_dbg_counters); + +/* + * Set debug log verbosity level + * + * @session_handle: secure demux instance + * @level: requested log level + * + * Return error code + */ +int sdmx_set_log_level(int session_handle, enum sdmx_log_level level) +{ + int res, cmd_len, rsp_len; + struct sdmx_set_log_level_req *cmd; + struct sdmx_set_log_level_rsp *rsp; + enum sdmx_status ret; + + cmd_len = sizeof(struct sdmx_set_log_level_req); + rsp_len = sizeof(struct sdmx_set_log_level_rsp); + + /* Lock shared memory */ + mutex_lock(&sdmx_lock[session_handle]); + + /* Get command and response buffers */ + ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len, + (void **)&rsp, &rsp_len); + if (ret) + goto out; + + /* Populate command struct */ + cmd->cmd_id = SDMX_SET_LOG_LEVEL_CMD; + cmd->session_handle = session_handle; + cmd->level = level; + + /* Issue QSEECom command */ + res = qseecom_send_command(sdmx_qseecom_handles[session_handle], + (void *)cmd, cmd_len, (void *)rsp, rsp_len); + if (res < 0) { + mutex_unlock(&sdmx_lock[session_handle]); + return SDMX_STATUS_GENERAL_FAILURE; + } + ret = rsp->ret; +out: + /* Unlock */ + mutex_unlock(&sdmx_lock[session_handle]); + return ret; +} diff --git a/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h new file mode 100644 index 000000000000..9be26ae565aa --- /dev/null +++ b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h @@ -0,0 +1,368 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _MPQ_SDMX_H +#define _MPQ_SDMX_H + +#include + +/* Constant declarations */ +#define SDMX_MAX_SESSIONS (4) +#define SDMX_LOOPBACK_PID (0x2000) + +#define SDMX_MAX_PHYSICAL_CHUNKS (256) + +/* Filter-level error indicators */ +#define SDMX_FILTER_SUCCESS (0) +#define SDMX_FILTER_ERR_MD_BUF_FULL BIT(0) +#define SDMX_FILTER_ERR_D_BUF_FULL BIT(1) +#define SDMX_FILTER_ERR_D_LIN_BUFS_FULL BIT(2) +#define SDMX_FILTER_ERR_INVALID_SCRAMBLE_BITS BIT(3) +#define SDMX_FILTER_ERR_KL_IND_NOT_SET BIT(4) +#define SDMX_FILTER_ERR_CAS_DECRYPT_ERROR BIT(5) +#define SDMX_FILTER_ERR_SEC_VERIF_CRC32_FAIL BIT(6) +#define SDMX_FILTER_ERR_SEC_INTERNAL_MALLOC_FAIL BIT(7) +#define SDMX_FILTER_ERR_SEC_LEN_INVALID BIT(8) +#define SDMX_FILTER_ERR_SEC_PUSI_PTR_INVALID BIT(9) +#define SDMX_FILTER_ERR_TS_SYNC_BYTE_INVALID BIT(10) +#define SDMX_FILTER_ERR_TS_TRANSPORT_ERR BIT(11) +#define SDMX_FILTER_ERR_CONT_CNT_INVALID BIT(12) +#define SDMX_FILTER_ERR_CONT_CNT_DUPLICATE BIT(13) +#define SDMX_FILTER_ERR_INVALID_PES_HDR BIT(14) +#define SDMX_FILTER_ERR_INVALID_PES_LEN BIT(15) +#define SDMX_FILTER_ERR_INVALID_PES_ENCRYPTION BIT(16) +#define SDMX_FILTER_ERR_SECURITY_FAULT BIT(17) +#define SDMX_FILTER_ERR_IN_NS_BUFFER BIT(18) + +/* Filter-level status indicators */ +#define SDMX_FILTER_STATUS_EOS BIT(0) +#define SDMX_FILTER_STATUS_WR_PTR_CHANGED BIT(1) + +/* Filter-level flags */ +#define SDMX_FILTER_FLAG_VERIFY_SECTION_CRC BIT(0) + +#define SDMX_INVALID_SESSION_HANDLE (-1) +#define SDMX_INVALID_FILTER_HANDLE (-1) + +/* Input flags */ +#define SDMX_INPUT_FLAG_EOS BIT(0) +#define SDMX_INPUT_FLAG_DBG_ENABLE BIT(1) + + +enum sdmx_buf_mode { + SDMX_RING_BUF, + SDMX_LINEAR_GROUP_BUF, +}; + +enum sdmx_proc_mode { + SDMX_PUSH_MODE, + SDMX_PULL_MODE, +}; + +enum sdmx_inp_mode { + SDMX_PKT_ENC_MODE, + SDMX_BULK_ENC_MODE, + SDMX_CLEAR_MODE, +}; + +enum sdmx_pkt_format { + SDMX_188_BYTE_PKT = 188, + SDMX_192_BYTE_PKT = 192, + SDMX_195_BYTE_PKT = 195, +}; + +enum sdmx_log_level { + SDMX_LOG_NO_PRINT, + SDMX_LOG_MSG_ERROR, + SDMX_LOG_DEBUG, + SDMX_LOG_VERBOSE +}; + +enum sdmx_status { + SDMX_SUCCESS = 0, + SDMX_STATUS_GENERAL_FAILURE = -1, + SDMX_STATUS_MAX_OPEN_SESSIONS_REACHED = -2, + SDMX_STATUS_INVALID_SESSION_HANDLE = -3, + SDMX_STATUS_INVALID_INPUT_PARAMS = -4, + SDMX_STATUS_UNSUPPORTED_MODE = -5, + SDMX_STATUS_INVALID_PID = -6, + SDMX_STATUS_OUT_OF_MEM = -7, + SDMX_STATUS_FILTER_EXISTS = -8, + SDMX_STATUS_INVALID_FILTER_HANDLE = -9, + SDMX_STATUS_MAX_RAW_PIDS_REACHED = -10, + SDMX_STATUS_SINGLE_PID_RAW_FILTER = -11, + SDMX_STATUS_INP_BUF_INVALID_PARAMS = -12, + SDMX_STATUS_INVALID_FILTER_CFG = -13, + SDMX_STATUS_STALLED_IN_PULL_MODE = -14, + SDMX_STATUS_SECURITY_FAULT = -15, + SDMX_STATUS_NS_BUFFER_ERROR = -16, +}; + +enum sdmx_filter { + SDMX_PES_FILTER, /* Other PES */ + SDMX_SEPARATED_PES_FILTER, /* Separated PES (for decoder) */ + SDMX_SECTION_FILTER, /* Section */ + SDMX_PCR_FILTER, /* PCR */ + SDMX_RAW_FILTER, /* Recording */ +}; + +enum sdmx_raw_out_format { + SDMX_188_OUTPUT, + SDMX_192_HEAD_OUTPUT, + SDMX_192_TAIL_OUTPUT +}; + +#pragma pack(push, sdmx, 1) + +struct sdmx_session_dbg_counters { + /* Total number of TS-packets input to SDMX. */ + u32 ts_pkt_in; + + /* Total number of TS-packets filtered out by SDMX. */ + u32 ts_pkt_out; +}; + +struct sdmx_filter_dbg_counters { + int filter_handle; + + /* Number of TS-packets filtered. */ + u32 ts_pkt_count; + + /* Number of TS-packets with adaptation field only (no payload). */ + u32 ts_pkt_no_payload; + + /* Number of TS-packets with the discontinuity indicator set. */ + u32 ts_pkt_discont; + + /* Number of duplicate TS-packets detected. */ + u32 ts_pkt_dup; + + /* Number of packets not decrypted because the key wasn't ready. */ + u32 ts_pkt_key_not_ready; +}; + +struct sdmx_pes_counters { + /* Number of TS packets with the TEI flag set */ + u32 transport_err_count; + + /* Number of TS packets with continuity counter errors */ + u32 continuity_err_count; + + /* Number of TS packets composing this PES frame */ + u32 pes_ts_count; + + /* Number of TS packets dropped due to full buffer */ + u32 drop_count; +}; + +struct sdmx_buff_descr { + /* Physical address where buffer starts */ + u64 base_addr; + + /* Size of buffer */ + u32 size; +}; + +struct sdmx_data_buff_descr { + /* Physical chunks of the buffer */ + struct sdmx_buff_descr buff_chunks[SDMX_MAX_PHYSICAL_CHUNKS]; + + /* Length of buffer */ + u32 length; +}; + +/* + * Data payload residing in the data buffers is described using this meta-data + * header. The meta data header specifies where the payload is located in the + * data buffer and how big it is. + * The meta data header optionally carries additional relevant meta data + * immediately following the meta-data header. + */ +struct sdmx_metadata_header { + /* + * Payload start offset inside data buffer. In case data is managed + * as a linear buffer group, this specifies buffer index. + */ + u32 payload_start; + + /* Payload length */ + u32 payload_length; + + /* Number of meta data bytes immediately following this header */ + u32 metadata_length; +}; + + +struct sdmx_filter_status { + /* Secure demux filter handle */ + int filter_handle; + + /* + * Number of pending bytes in filter's output data buffer. + * For linear buffer mode, this is number of buffers pending. + */ + u32 data_fill_count; + + /* + * Offset in data buffer for next data payload to be written. + * For linear buffer mode, this is a buffer index. + */ + u32 data_write_offset; + + /* Number of pending bytes in filter's output meta data buffer */ + u32 metadata_fill_count; + + /* Offset in meta data buffer for next metadata header to be written */ + u32 metadata_write_offset; + + /* Errors (bitmap) reported by secure demux for this filter */ + u32 error_indicators; + + /* General status (bitmap) reported by secure demux for this filter */ + u32 status_indicators; +}; +#pragma pack(pop, sdmx) + +#ifdef CONFIG_QSEECOM + +int sdmx_open_session(int *session_handle); + +int sdmx_close_session(int session_handle); + +int sdmx_get_version(int session_handle, int32_t *version); + +int sdmx_set_session_cfg(int session_handle, enum sdmx_proc_mode proc_mode, + enum sdmx_inp_mode inp_mode, enum sdmx_pkt_format pkt_format, + u8 odd_scramble_bits, u8 even_scramble_bits); + +int sdmx_add_filter(int session_handle, u16 pid, enum sdmx_filter filter_type, + struct sdmx_buff_descr *meta_data_buf, enum sdmx_buf_mode data_buf_mode, + u32 num_data_bufs, struct sdmx_data_buff_descr *data_bufs, + int *filter_handle, enum sdmx_raw_out_format ts_out_format, u32 flags); + +int sdmx_remove_filter(int session_handle, int filter_handle); + +int sdmx_set_kl_ind(int session_handle, u16 pid, u32 key_ladder_index); + +int sdmx_add_raw_pid(int session_handle, int filter_handle, u16 pid); + +int sdmx_remove_raw_pid(int session_handle, int filter_handle, u16 pid); + +int sdmx_process(int session_handle, u8 flags, + struct sdmx_buff_descr *input_buf_desc, + u32 *input_fill_count, u32 *input_read_offset, + u32 *error_indicators, + u32 *status_indicators, + u32 num_filters, + struct sdmx_filter_status *filter_status); + +int sdmx_get_dbg_counters(int session_handle, + struct sdmx_session_dbg_counters *session_counters, + u32 *num_filters, + struct sdmx_filter_dbg_counters *filter_counters); + +int sdmx_reset_dbg_counters(int session_handle); + +int sdmx_set_log_level(int session_handle, enum sdmx_log_level level); + +#else + +static inline int sdmx_open_session(int *session_handle) +{ + return SDMX_STATUS_GENERAL_FAILURE; +} + +static inline int sdmx_close_session(int session_handle) +{ + return SDMX_STATUS_GENERAL_FAILURE; +} + +static inline int sdmx_get_version(int session_handle, int32_t *version) +{ + return SDMX_STATUS_GENERAL_FAILURE; +} + +static inline int sdmx_set_session_cfg(int session_handle, + enum sdmx_proc_mode proc_mode, + enum sdmx_inp_mode inp_mode, enum sdmx_pkt_format pkt_format, + u8 odd_scramble_bits, u8 even_scramble_bits) +{ + return SDMX_STATUS_GENERAL_FAILURE; +} + +static inline int sdmx_add_filter(int session_handle, u16 pid, + enum sdmx_filter filter_type, + struct sdmx_buff_descr *meta_data_buf, enum sdmx_buf_mode data_buf_mode, + u32 num_data_bufs, struct sdmx_data_buff_descr *data_bufs, + int *filter_handle, enum sdmx_raw_out_format ts_out_format, u32 flags) +{ + return SDMX_STATUS_GENERAL_FAILURE; +} + +static inline int sdmx_remove_filter(int session_handle, int filter_handle) +{ + return SDMX_STATUS_GENERAL_FAILURE; +} + +static inline int sdmx_set_kl_ind(int session_handle, u16 pid, + u32 key_ladder_index) +{ + return SDMX_STATUS_GENERAL_FAILURE; +} + +static inline int sdmx_add_raw_pid(int session_handle, int filter_handle, + u16 pid) +{ + return SDMX_STATUS_GENERAL_FAILURE; +} + +static inline int sdmx_remove_raw_pid(int session_handle, int filter_handle, + u16 pid) +{ + return SDMX_STATUS_GENERAL_FAILURE; +} + +static inline int sdmx_process(int session_handle, u8 flags, + struct sdmx_buff_descr *input_buf_desc, + u32 *input_fill_count, u32 *input_read_offset, + u32 *error_indicators, + u32 *status_indicators, + u32 num_filters, + struct sdmx_filter_status *filter_status) +{ + *status_indicators = 0; + *error_indicators = 0; + return SDMX_STATUS_GENERAL_FAILURE; +} + +static inline int sdmx_get_dbg_counters(int session_handle, + struct sdmx_session_dbg_counters *session_counters, + u32 *num_filters, + struct sdmx_filter_dbg_counters *filter_counters) +{ + return SDMX_STATUS_GENERAL_FAILURE; +} + +static inline int sdmx_reset_dbg_counters(int session_handle) +{ + return SDMX_STATUS_GENERAL_FAILURE; +} + +static inline int sdmx_set_log_level(int session_handle, + enum sdmx_log_level level) +{ + return SDMX_STATUS_GENERAL_FAILURE; +} + +#endif + +#endif /* _MPQ_SDMX_H */ diff --git a/drivers/media/platform/msm/dvb/include/mpq_adapter.h b/drivers/media/platform/msm/dvb/include/mpq_adapter.h new file mode 100644 index 000000000000..c55a5aa1ae32 --- /dev/null +++ b/drivers/media/platform/msm/dvb/include/mpq_adapter.h @@ -0,0 +1,222 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _MPQ_ADAPTER_H +#define _MPQ_ADAPTER_H + +#include "dvbdev.h" +#include "dvb_demux.h" +#include "mpq_stream_buffer.h" + + + +/** IDs of interfaces holding stream-buffers */ +enum mpq_adapter_stream_if { + /** Interface holding stream-buffer for video0 stream */ + MPQ_ADAPTER_VIDEO0_STREAM_IF = 0, + + /** Interface holding stream-buffer for video1 stream */ + MPQ_ADAPTER_VIDEO1_STREAM_IF = 1, + + /** Interface holding stream-buffer for video2 stream */ + MPQ_ADAPTER_VIDEO2_STREAM_IF = 2, + + /** Interface holding stream-buffer for video3 stream */ + MPQ_ADAPTER_VIDEO3_STREAM_IF = 3, + + /** Interface holding stream-buffer for audio0 stream */ + MPQ_ADAPTER_AUDIO0_STREAM_IF = 4, + + /** Interface holding stream-buffer for audio1 stream */ + MPQ_ADAPTER_AUDIO1_STREAM_IF = 5, + + /** Interface holding stream-buffer for audio2 stream */ + MPQ_ADAPTER_AUDIO2_STREAM_IF = 6, + + /** Interface holding stream-buffer for audio3 stream */ + MPQ_ADAPTER_AUDIO3_STREAM_IF = 7, + + /** Maximum number of interfaces holding stream-buffers */ + MPQ_ADAPTER_MAX_NUM_OF_INTERFACES, +}; + +enum dmx_packet_type { + DMX_PES_PACKET, + DMX_FRAMING_INFO_PACKET, + DMX_EOS_PACKET, + DMX_MARKER_PACKET +}; + +struct dmx_pts_dts_info { + /** Indication whether PTS exist */ + int pts_exist; + + /** Indication whether DTS exist */ + int dts_exist; + + /** PTS value associated with the PES data if any */ + u64 pts; + + /** DTS value associated with the PES data if any */ + u64 dts; +}; + +struct dmx_framing_packet_info { + /** framing pattern type, one of DMX_IDX_* definitions */ + u64 pattern_type; + + /** PTS/DTS information */ + struct dmx_pts_dts_info pts_dts_info; + + /** STC value attached to first TS packet holding the pattern */ + u64 stc; + + /* + * Number of TS packets with Transport Error Indicator (TEI) + * found while constructing the frame. + */ + __u32 transport_error_indicator_counter; + + /* Number of continuity errors found while constructing the frame */ + __u32 continuity_error_counter; + + /* + * Number of dropped bytes due to insufficient buffer space, + * since last reported frame. + */ + __u32 ts_dropped_bytes; + + /* Total number of TS packets holding the frame */ + __u32 ts_packets_num; +}; + +struct dmx_pes_packet_info { + /** PTS/DTS information */ + struct dmx_pts_dts_info pts_dts_info; + + /** STC value attached to first TS packet holding the PES */ + u64 stc; +}; + +struct dmx_marker_info { + /* marker id */ + u64 id; +}; + +/** The meta-data used for video interface */ +struct mpq_adapter_video_meta_data { + /** meta-data packet type */ + enum dmx_packet_type packet_type; + + /** packet-type specific information */ + union { + struct dmx_framing_packet_info framing; + struct dmx_pes_packet_info pes; + struct dmx_marker_info marker; + } info; +} __packed; + +/** The meta-data used for audio interface */ +struct mpq_adapter_audio_meta_data { + /** meta-data packet type */ + enum dmx_packet_type packet_type; + + /** packet-type specific information */ + union { + struct dmx_pes_packet_info pes; + struct dmx_marker_info marker; + } info; +} __packed; + +/** Callback function to notify on registrations of specific interfaces */ +typedef void (*mpq_adapter_stream_if_callback)( + enum mpq_adapter_stream_if interface_id, + void *user_param); + + +/** + * mpq_adapter_get - Returns pointer to Qualcomm Technologies Inc. DVB adapter + * + * Return dvb adapter or NULL if not exist. + */ +struct dvb_adapter *mpq_adapter_get(void); + + +/** + * mpq_adapter_register_stream_if - Register a stream interface. + * + * @interface_id: The interface id + * @stream_buffer: The buffer used for the interface + * + * Return error status + * + * Stream interface used to connect between two units in tunneling + * mode using mpq_streambuffer implementation. + * The producer of the interface should register the new interface, + * consumer may get the interface using mpq_adapter_get_stream_if. + * + * Note that the function holds a pointer to this interface, + * stream_buffer pointer assumed to be valid as long as interface + * is active. + */ +int mpq_adapter_register_stream_if( + enum mpq_adapter_stream_if interface_id, + struct mpq_streambuffer *stream_buffer); + + +/** + * mpq_adapter_unregister_stream_if - Un-register a stream interface. + * + * @interface_id: The interface id + * + * Return error status + */ +int mpq_adapter_unregister_stream_if( + enum mpq_adapter_stream_if interface_id); + + +/** + * mpq_adapter_get_stream_if - Get buffer used for a stream interface. + * + * @interface_id: The interface id + * @stream_buffer: The returned stream buffer + * + * Return error status + */ +int mpq_adapter_get_stream_if( + enum mpq_adapter_stream_if interface_id, + struct mpq_streambuffer **stream_buffer); + + +/** + * mpq_adapter_notify_stream_if - Register notification + * to be triggered when a stream interface is registered. + * + * @interface_id: The interface id + * @callback: The callback to be triggered when the interface is registered + * @user_param: A parameter that is passed back to the callback function + * when triggered. + * + * Return error status + * + * Producer may use this to register notification when desired + * interface registered in the system and query its information + * afterwards using mpq_adapter_get_stream_if. + * To remove the callback, this function should be called with NULL + * value in callback parameter. + */ +int mpq_adapter_notify_stream_if( + enum mpq_adapter_stream_if interface_id, + mpq_adapter_stream_if_callback callback, + void *user_param); + +#endif /* _MPQ_ADAPTER_H */ diff --git a/drivers/media/platform/msm/dvb/include/mpq_dvb_debug.h b/drivers/media/platform/msm/dvb/include/mpq_dvb_debug.h new file mode 100644 index 000000000000..6550ddd76d8c --- /dev/null +++ b/drivers/media/platform/msm/dvb/include/mpq_dvb_debug.h @@ -0,0 +1,41 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _MPQ_DVB_DEBUG_H +#define _MPQ_DVB_DEBUG_H + +/* Enable this line if you want to output debug printouts */ +#define MPG_DVB_DEBUG_ENABLE + +#undef MPQ_DVB_DBG_PRINT /* undef it, just in case */ + +#ifdef MPG_DVB_DEBUG_ENABLE +#define MPQ_DVB_ERR_PRINT(fmt, args...) pr_err(fmt, ## args) +#define MPQ_DVB_WARN_PRINT(fmt, args...) pr_warn(fmt, ## args) +#define MPQ_DVB_NOTICE_PRINT(fmt, args...) pr_notice(fmt, ## args) +#define MPQ_DVB_DBG_PRINT(fmt, args...) pr_debug(fmt, ## args) +#else /* MPG_DVB_DEBUG_ENABLE */ +#define MPQ_DVB_ERR_PRINT(fmt, args...) +#define MPQ_DVB_WARN_PRINT(fmt, args...) +#define MPQ_DVB_NOTICE_PRINT(fmt, args...) +#define MPQ_DVB_DBG_PRINT(fmt, args...) +#endif /* MPG_DVB_DEBUG_ENABLE */ + + +/* + * The following can be used to disable specific printout + * by adding a letter to the end of MPQ_DVB_DBG_PRINT + */ +#undef MPQ_DVB_DBG_PRINTT +#define MPQ_DVB_DBG_PRINTT(fmt, args...) + +#endif /* _MPQ_DVB_DEBUG_H */ diff --git a/drivers/media/platform/msm/dvb/include/mpq_stream_buffer.h b/drivers/media/platform/msm/dvb/include/mpq_stream_buffer.h new file mode 100644 index 000000000000..62404513007a --- /dev/null +++ b/drivers/media/platform/msm/dvb/include/mpq_stream_buffer.h @@ -0,0 +1,494 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _MPQ_STREAM_BUFFER_H +#define _MPQ_STREAM_BUFFER_H + +#include "dvb_ringbuffer.h" + +/** + * DOC: MPQ Stream Buffer + * + * A stream buffer implementation is used to transfer data between two units + * such as demux and decoders. The implementation relies on dvb_ringbuffer + * implementation. Refer to dvb_ringbuffer.h for details. + * + * The implementation uses two dvb_ringbuffers, one to pass the + * raw-data (PES payload for example) and the other to pass + * meta-data (information from PES header for example). + * + * The meta-data uses dvb_ringbuffer packet interface. Each meta-data + * packet points to the data buffer, and includes the offset to the data in the + * buffer, the size of raw-data described by the meta-data packet, and also the + * size of user's own parameters if any required. + * + * Data can be managed in two ways: ring-buffer & linear buffers, as specified + * in initialization when calling the mpq_streambuffer_init function. + * For managing data as a ring buffer exactly 1 data buffer descriptor must be + * specified in initialization. For this mode, dvb_ringbuffer is used "as-is". + * For managing data in several linear buffers, an array of buffer descriptors + * must be passed. + * For both modes, data descriptor(s) must be remain valid throughout the life + * span of the mpq_streambuffer object. + * Apart from initialization API remains the same for both modes. + * + * Contrary to dvb_ringbuffer implementation, this API makes sure there's + * enough data to read/write when making read/write operations. + * Users interested to flush/reset specific buffer, check for bytes + * ready or space available for write should use the respective services + * in dvb_ringbuffer (dvb_ringbuffer_avail, dvb_ringbuffer_free, + * dvb_ringbuffer_reset, dvb_ringbuffer_flush, + * dvb_ringbuffer_flush_spinlock_wakeup). + * + * Concurrency protection is handled in the same manner as in + * dvb_ringbuffer implementation. + * + * Typical call flow from producer: + * + * - Start writing the raw-data of new packet, the following call is + * repeated until end of data of the specific packet + * + * mpq_streambuffer_data_write(...) + * + * - Now write a new packet describing the new available raw-data + * mpq_streambuffer_pkt_write(...) + * + * For linear buffer mode, writing a new packet with data size > 0, causes the + * current buffer to be marked as pending for reading, and triggers moving to + * the next available buffer, that shall now be the current write buffer. + * + * Typical call flow from consumer: + * + * - Poll for next available packet: + * mpq_streambuffer_pkt_next(&streambuff,-1,&len) + * + * In different approach, consumer can wait on event for new data and then + * call mpq_streambuffer_pkt_next, waiting for data can be done as follows: + * + * wait_event_interruptible( + * streambuff->packet_data->queue, + * !dvb_ringbuffer_empty(&streambuff->packet_data) || + * (streambuff->packet_data.error != 0); + * + * - Get the new packet information: + * mpq_streambuffer_pkt_read(..) + * + * - Read the raw-data of the new packet. Here you can use two methods: + * + * 1. Read the data to a user supplied buffer: + * mpq_streambuffer_data_read() + * + * In this case memory copy is done, read pointer is updated in the raw + * data buffer, the amount of raw-data is provided part of the + * packet's information. User should then call mpq_streambuffer_pkt_dispose + * with dispose_data set to 0 as the raw-data was already disposed. + * Note that secure buffer cannot be accessed directly and an error will + * occur. + * + * 2. Access the data directly using the raw-data address. The address + * of the raw data is provided part of the packet's information. User + * then should call mpq_streambuffer_pkt_dispose with dispose_data set + * to 1 to dispose the packet along with it's raw-data. + * + * - Disposal of packets: + * mpq_streambuffer_pkt_dispose(...) + * + * For linear buffer mode, disposing of a packet with data size > 0, + * regardless of the 'dispose_data' parameter, causes the current buffer's + * data to be disposed and marked as free for writing, and triggers moving to + * the next available buffer, that shall now be the current read buffer. + */ + +struct mpq_streambuffer; +struct mpq_streambuffer_packet_header; + +typedef void (*mpq_streambuffer_dispose_cb) ( + struct mpq_streambuffer *sbuff, + u32 offset, + size_t len, + void *user_data); + +enum mpq_streambuffer_mode { + MPQ_STREAMBUFFER_BUFFER_MODE_RING, + MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR +}; + +/** + * struct mpq_streambuffer - mpq stream buffer representation + * + * @raw_data: The buffer used to hold raw-data, or linear buffer descriptors + * @packet_data: The buffer user to hold the meta-data + * @buffers: array of buffer descriptor(s) holding buffer initial & dynamic + * buffer information + * @mode: mpq_streambuffer buffer management work mode - Ring-buffer or Linear + * buffers + * @buffers_num: number of data buffers to manage + * @pending_buffers_count: for linear buffer management, counts the number of + * buffer that has been + */ +struct mpq_streambuffer { + struct dvb_ringbuffer raw_data; + struct dvb_ringbuffer packet_data; + struct mpq_streambuffer_buffer_desc *buffers; + enum mpq_streambuffer_mode mode; + u32 buffers_num; + u32 pending_buffers_count; + mpq_streambuffer_dispose_cb cb; + void *cb_user_data; +}; + +/** + * mpq_streambuffer_linear_desc + * @handle: ION handle's file descriptor of buffer + * @base: kernel mapped address to start of buffer. + * Can be NULL for secured buffers + * @size: size of buffer + * @read_ptr: initial read pointer value (should normally be 0) + * @write_ptr: initial write pointer value (should normally be 0) + */ +struct mpq_streambuffer_buffer_desc { + int handle; + void *base; + u32 size; + u32 read_ptr; + u32 write_ptr; +}; + +/** + * struct mpq_streambuffer_packet_header - packet header saved in packet buffer + * @user_data_len: length of private user (meta) data + * @raw_data_handle: ION handle's file descriptor of raw-data buffer + * @raw_data_offset: offset of raw-data from start of buffer (0 for linear) + * @raw_data_len: size of raw-data in the raw-data buffer (can be 0) + * + * The packet structure that is saved in each packet-buffer: + * user_data_len + * raw_data_handle + * raw_data_offset + * raw_data_len + * private user-data bytes + */ +struct mpq_streambuffer_packet_header { + u32 user_data_len; + int raw_data_handle; + u32 raw_data_offset; + u32 raw_data_len; +} __packed; + +/** + * mpq_streambuffer_init - Initialize a new stream buffer + * + * @sbuff: The buffer to initialize + * @data_buffers: array of data buffer descriptor(s). + * Data descriptor(s) must be remain valid throughout the life + * span of the mpq_streambuffer object + * @data_buff_num: number of data buffer in array + * @packet_buff: The buffer holding meta-data + * @packet_buff_size: Size of meta-data buffer + * + * Return Error status, -EINVAL if any of the arguments are invalid + * + * Note: + * for data_buff_num > 1, mpq_streambuffer object manages these buffers as a + * separated set of linear buffers. A linear buffer cannot wrap-around and one + * can only write as many data bytes as the buffer's size. Data will not be + * written to the next free buffer. + */ +int mpq_streambuffer_init( + struct mpq_streambuffer *sbuff, + enum mpq_streambuffer_mode mode, + struct mpq_streambuffer_buffer_desc *data_buffers, + u32 data_buff_num, + void *packet_buff, + size_t packet_buff_size); + +/** + * mpq_streambuffer_terminate - Terminate stream buffer + * + * @sbuff: The buffer to terminate + * + * The function sets the the buffers error flags to ENODEV + * and wakeup any waiting threads on the buffer queues. + * Threads waiting on the buffer queues should check if + * error was set. + */ +void mpq_streambuffer_terminate(struct mpq_streambuffer *sbuff); + +/** + * mpq_streambuffer_packet_next - Returns index of next available packet. + * + * @sbuff: The stream buffer + * @idx: Previous packet index or -1 to return index of the the first + * available packet. + * @pktlen: The length of the ready packet + * + * Return index to the packet-buffer, -1 if buffer is empty + * + * After getting the index, the user of this function can either + * access the packet buffer directly using the returned index + * or ask to read the data back from the buffer using mpq_ringbuffer_pkt_read + */ +ssize_t mpq_streambuffer_pkt_next( + struct mpq_streambuffer *sbuff, + ssize_t idx, size_t *pktlen); + +/** + * mpq_streambuffer_pkt_read - Reads out the packet from the provided index. + * + * @sbuff: The stream buffer + * @idx: The index of the packet to be read + * @packet: The read packet's header + * @user_data: The read private user data + * + * Return The actual number of bytes read, -EINVAL if the packet is + * already disposed or the packet-data is invalid. + * + * The packet is not disposed after this function is called, to dispose it + * along with the raw-data it points to use mpq_streambuffer_pkt_dispose. + * If there are no private user-data, the user-data pointer can be NULL. + * The caller of this function must make sure that the private user-data + * buffer has enough space for the private user-data length + */ +ssize_t mpq_streambuffer_pkt_read( + struct mpq_streambuffer *sbuff, + size_t idx, + struct mpq_streambuffer_packet_header *packet, + u8 *user_data); + +/** + * mpq_streambuffer_pkt_dispose - Disposes a packet from the packet buffer + * + * @sbuff: The stream buffer + * @idx: The index of the packet to be disposed + * @dispose_data: Indicates whether to update the read pointer inside the + * raw-data buffer for the respective data pointed by the packet. + * + * Return error status, -EINVAL if the packet-data is invalid + * + * The function updates the read pointer inside the raw-data buffer + * for the respective data pointed by the packet if dispose_data is set. + */ +int mpq_streambuffer_pkt_dispose( + struct mpq_streambuffer *sbuff, + size_t idx, + int dispose_data); + +/** + * mpq_streambuffer_pkt_write - Write a new packet to the packet buffer. + * + * @sbuff: The stream buffer + * @packet: The packet header to write + * @user_data: The private user-data to be written + * + * Return error status, -ENOSPC if there's no space to write the packet + */ +int mpq_streambuffer_pkt_write( + struct mpq_streambuffer *sbuff, + struct mpq_streambuffer_packet_header *packet, + u8 *user_data); + +/** + * mpq_streambuffer_data_write - Write data to raw-data buffer + * + * @sbuff: The stream buffer + * @buf: The buffer holding the data to be written + * @len: The length of the data buffer + * + * Return The actual number of bytes written or -ENOSPC if + * no space to write the data + */ +ssize_t mpq_streambuffer_data_write( + struct mpq_streambuffer *sbuff, + const u8 *buf, size_t len); + +/** + * mpq_streambuffer_data_write_deposit - Advances the raw-buffer write pointer. + * Assumes the raw-data was written by the user directly + * + * @sbuff: The stream buffer + * @len: The length of the raw-data that was already written + * + * Return error status + */ +int mpq_streambuffer_data_write_deposit( + struct mpq_streambuffer *sbuff, + size_t len); + +/** + * mpq_streambuffer_data_read - Reads out raw-data to the provided buffer. + * + * @sbuff: The stream buffer + * @buf: The buffer to read the raw-data data to + * @len: The length of the buffer that will hold the raw-data + * + * Return The actual number of bytes read or error code + * + * This function copies the data from the ring-buffer to the + * provided buf parameter. The user can save the extra copy by accessing + * the data pointer directly and reading from it, then update the + * read pointer by the amount of data that was read using + * mpq_streambuffer_data_read_dispose + */ +ssize_t mpq_streambuffer_data_read( + struct mpq_streambuffer *sbuff, + u8 *buf, size_t len); + +/** + * mpq_streambuffer_data_read_user + * + * Same as mpq_streambuffer_data_read except data can be copied to user-space + * buffer. + */ +ssize_t mpq_streambuffer_data_read_user( + struct mpq_streambuffer *sbuff, + u8 __user *buf, size_t len); + +/** + * mpq_streambuffer_data_read_dispose - Advances the raw-buffer read pointer. + * Assumes the raw-data was read by the user directly. + * + * @sbuff: The stream buffer + * @len: The length of the raw-data to be disposed + * + * Return error status, -EINVAL if buffer there's no enough data to + * be disposed + * + * The user can instead dispose a packet along with the data in the + * raw-data buffer using mpq_streambuffer_pkt_dispose. + */ +int mpq_streambuffer_data_read_dispose( + struct mpq_streambuffer *sbuff, + size_t len); +/** + * mpq_streambuffer_get_buffer_handle - Returns the current linear buffer + * ION handle. + * @sbuff: The stream buffer + * @read_buffer: specifies if a read buffer handle is requested (when set), + * or a write buffer handle is requested. + * For linear buffer mode read & write buffers may be different + * buffers. For ring buffer mode, the same (single) buffer handle + * is returned. + * buffer handle + * @handle: returned handle + * + * Return error status + * -EINVAL is arguments are invalid. + * -EPERM if stream buffer specified was not initialized with linear support. + */ +int mpq_streambuffer_get_buffer_handle( + struct mpq_streambuffer *sbuff, + int read_buffer, + int *handle); + +/** + * mpq_streambuffer_data_free - Returns number of free bytes in data buffer. + * @sbuff: The stream buffer object + * + * Note: for linear buffer management this return number of free bytes in the + * current write buffer only. + */ +ssize_t mpq_streambuffer_data_free( + struct mpq_streambuffer *sbuff); + +/** + * mpq_streambuffer_data_avail - Returns number of bytes in data buffer that + * can be read. + * @sbuff: The stream buffer object + * + * Note: for linear buffer management this return number of data bytes in the + * current read buffer only. + */ +ssize_t mpq_streambuffer_data_avail( + struct mpq_streambuffer *sbuff); + +/** + * mpq_streambuffer_register_pkt_dispose - Registers a callback to notify on + * packet disposal events. + * can be read. + * @sbuff: The stream buffer object + * @cb_func: user callback function + * @user_data: user data to be passed to callback function. + * + * Returns error status + * -EINVAL if arguments are invalid + */ +int mpq_streambuffer_register_data_dispose( + struct mpq_streambuffer *sbuff, + mpq_streambuffer_dispose_cb cb_func, + void *user_data); + +/** + * mpq_streambuffer_data_rw_offset - returns read/write offsets of current data + * buffer. + * @sbuff: The stream buffer object + * @read_offset: returned read offset + * @write_offset: returned write offset + * + * Note: read offset or write offset may be NULL if not required. + * Returns error status + * -EINVAL if arguments are invalid + */ +int mpq_streambuffer_get_data_rw_offset( + struct mpq_streambuffer *sbuff, + u32 *read_offset, + u32 *write_offset); + +/** + * mpq_streambuffer_metadata_free - returns number of free bytes in the meta + * data buffer, or error status. + * @sbuff: the stream buffer object + */ +ssize_t mpq_streambuffer_metadata_free(struct mpq_streambuffer *sbuff); + +/** + * mpq_streambuffer_flush - flush both pending packets and data in buffer + * + * @sbuff: the stream buffer object + * + * Returns error status + */ +int mpq_streambuffer_flush(struct mpq_streambuffer *sbuff); + +/* + * ------------------------------------------------------ + * Consumer or AV Decoder Stream Interface to Ring Buffer + * ------------------------------------------------------ + * Producer is Demux Driver + * ------------------------ + * + * call from Audio/Video Decoder Driver to find Audio/Video + * streambuffer AV handles, "DMX_PES_AUDIO0 through 3" or + * DMX_PES_VIDEO0 through 3" interfaces corresponding to 4 programs. + */ + +/* call from Audio/Video Decoder Driver via POLLING to consume + * Headers and Compressed data from ring buffer using streambuffer handle. + * hdrdata[] and cdata[] buffers have to be malloc'd by consumer + * + * -------------------------- + * Consumer Calling Sequence + * -------------------------- + * Find the streambuffer corresponding to a DMX TS PES stream instance. + * 1. consumer_audio_streambuffer() or consumer_video_streambuffer() + * Process the packet headers if required. + * 2. mpq_read_new_packet_hdr_data() + * Process the compressed data by forwarding to AV decoder. + * 3. mpq_read_new_packet_compressed_data() + * Dispose the packet. + * 4. mpq_dispose_new_packet_read() + * + * The Audio/Video drivers (or consumers) require the stream_buffer information + * for consuming packet headers and compressed AV data from the + * ring buffer filled by demux driver which is the producer + */ + +#endif /* _MPQ_STREAM_BUFFER_H */ diff --git a/include/linux/qcom_tspp.h b/include/linux/qcom_tspp.h new file mode 100644 index 000000000000..1b34c389d7f0 --- /dev/null +++ b/include/linux/qcom_tspp.h @@ -0,0 +1,108 @@ +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _MSM_TSPP_H_ +#define _MSM_TSPP_H_ + +struct tspp_data_descriptor { + void *virt_base; /* logical address of the actual data */ + phys_addr_t phys_base; /* physical address of the actual data */ + u32 size; /* size of buffer in bytes */ + int id; /* unique identifier */ + void *user; /* user-defined data */ +}; + +enum tspp_key_parity { + TSPP_KEY_PARITY_EVEN, + TSPP_KEY_PARITY_ODD +}; + +struct tspp_key { + enum tspp_key_parity parity; + int lsb; + int msb; +}; + +enum tspp_source { + TSPP_SOURCE_TSIF0, + TSPP_SOURCE_TSIF1, + TSPP_SOURCE_MEM, + TSPP_SOURCE_NONE = -1 +}; + +enum tspp_mode { + TSPP_MODE_DISABLED, + TSPP_MODE_PES, + TSPP_MODE_RAW, + TSPP_MODE_RAW_NO_SUFFIX +}; + +enum tspp_tsif_mode { + TSPP_TSIF_MODE_LOOPBACK, /* loopback mode */ + TSPP_TSIF_MODE_1, /* without sync */ + TSPP_TSIF_MODE_2 /* with sync signal */ +}; + +struct tspp_filter { + int pid; + int mask; + enum tspp_mode mode; + unsigned int priority; /* 0 - 15 */ + int decrypt; + enum tspp_source source; +}; + +struct tspp_select_source { + enum tspp_source source; + enum tspp_tsif_mode mode; + int clk_inverse; + int data_inverse; + int sync_inverse; + int enable_inverse; +}; + +enum tsif_tts_source { + TSIF_TTS_TCR = 0, /* Time stamps from TCR counter */ + TSIF_TTS_LPASS_TIMER /* Time stamps from AV/Qtimer Timer */ +}; + +typedef void (tspp_notifier)(int channel_id, void *user); +typedef void* (tspp_allocator)(int channel_id, u32 size, + phys_addr_t *phys_base, void *user); +typedef void (tspp_memfree)(int channel_id, u32 size, + void *virt_base, phys_addr_t phys_base, void *user); + +/* Kernel API functions */ +int tspp_open_stream(u32 dev, u32 channel_id, + struct tspp_select_source *source); +int tspp_close_stream(u32 dev, u32 channel_id); +int tspp_open_channel(u32 dev, u32 channel_id); +int tspp_close_channel(u32 dev, u32 channel_id); +int tspp_get_ref_clk_counter(u32 dev, + enum tspp_source source, u32 *tcr_counter); +int tspp_add_filter(u32 dev, u32 channel_id, struct tspp_filter *filter); +int tspp_remove_filter(u32 dev, u32 channel_id, struct tspp_filter *filter); +int tspp_set_key(u32 dev, u32 channel_id, struct tspp_key *key); +int tspp_register_notification(u32 dev, u32 channel_id, tspp_notifier *notify, + void *data, u32 timer_ms); +int tspp_unregister_notification(u32 dev, u32 channel_id); +const struct tspp_data_descriptor *tspp_get_buffer(u32 dev, u32 channel_id); +int tspp_release_buffer(u32 dev, u32 channel_id, u32 descriptor_id); +int tspp_allocate_buffers(u32 dev, u32 channel_id, u32 count, + u32 size, u32 int_freq, tspp_allocator *alloc, + tspp_memfree *memfree, void *user); + +int tspp_get_tts_source(u32 dev, int *tts_source); +int tspp_get_lpass_time_counter(u32 dev, enum tspp_source source, + u64 *lpass_time_counter); + +#endif /* _MSM_TSPP_H_ */ -- GitLab From ba29a3e82c501bb7b9947d88d8d0824d89b4669c Mon Sep 17 00:00:00 2001 From: Udaya Bhaskara Reddy Mallavarapu Date: Mon, 12 Jun 2017 14:57:05 +0530 Subject: [PATCH 364/786] Migrate mpq demux driver from kernel 4.4 to 4.9 This change migrates all the relevant files and updates made to the dvb/demux framework, required for mpq demux driver. The snapshot is taken as of msm-4.4, 'commit a74dd0fdc772 ("mmc: core: retune after un-gating the clocks")' In addition, introduce a few code changes to reduce checkpatch warnings, typos and other style issues. Change-Id: Ifb50d730ff8bd245f5f96cd880dfe5c9a0a75fd2 Signed-off-by: Udaya Bhaskara Reddy Mallavarapu --- drivers/media/dvb-core/demux.h | 224 +- drivers/media/dvb-core/dmxdev.c | 4060 +++++++++++++++++++++-- drivers/media/dvb-core/dmxdev.h | 137 +- drivers/media/dvb-core/dvb_demux.c | 2836 ++++++++++++++-- drivers/media/dvb-core/dvb_demux.h | 297 ++ drivers/media/dvb-core/dvb_net.c | 2 +- drivers/media/dvb-core/dvb_ringbuffer.c | 69 +- drivers/media/dvb-core/dvb_ringbuffer.h | 35 +- drivers/media/tuners/xc5000.c | 5 +- include/uapi/linux/dvb/dmx.h | 728 ++++ 10 files changed, 7821 insertions(+), 572 deletions(-) diff --git a/drivers/media/dvb-core/demux.h b/drivers/media/dvb-core/demux.h index aeda2b64931c..fbe0165e5a03 100644 --- a/drivers/media/dvb-core/demux.h +++ b/drivers/media/dvb-core/demux.h @@ -40,6 +40,8 @@ * Common definitions */ +#define DMX_EVENT_QUEUE_SIZE 500 /* number of events */ + /* * DMX_MAX_FILTER_SIZE: Maximum length (in bytes) of a section/PES filter. */ @@ -60,6 +62,104 @@ #define DMX_MAX_SECFEED_SIZE (DMX_MAX_SECTION_SIZE + 188) #endif +/* + * enum dmx_success: Success codes for the Demux Callback API. + */ +enum dmx_success { + DMX_OK = 0, /* Received Ok */ + DMX_OK_PES_END, /* Received OK, data reached end of PES packet */ + DMX_OK_PCR, /* Received OK, data with new PCR/STC pair */ + DMX_OK_EOS, /* Received OK, reached End-of-Stream (EOS) */ + DMX_OK_MARKER, /* Received OK, reached a data Marker */ + DMX_LENGTH_ERROR, /* Incorrect length */ + DMX_OVERRUN_ERROR, /* Receiver ring buffer overrun */ + DMX_CRC_ERROR, /* Incorrect CRC */ + DMX_FRAME_ERROR, /* Frame alignment error */ + DMX_FIFO_ERROR, /* Receiver FIFO overrun */ + DMX_MISSED_ERROR, /* Receiver missed packet */ + DMX_OK_DECODER_BUF, /* Received OK, new ES data in decoder buffer */ + DMX_OK_IDX, /* Received OK, new index event */ + DMX_OK_SCRAMBLING_STATUS, /* Received OK, new scrambling status */ +}; + + +/* + * struct dmx_data_ready: Parameters for event notification callback. + * Event notification notifies demux device that data is written + * and available in the device's output buffer or provides + * notification on errors and other events. In the latter case + * data_length is zero. + */ +struct dmx_data_ready { + enum dmx_success status; + + /* + * data_length may be 0 in case of DMX_OK_PES_END or DMX_OK_EOS + * and in non-DMX_OK_XXX events. In DMX_OK_PES_END, + * data_length is for data coming after the end of PES. + */ + int data_length; + + union { + struct { + int start_gap; + int actual_length; + int disc_indicator_set; + int pes_length_mismatch; + u64 stc; + u32 tei_counter; + u32 cont_err_counter; + u32 ts_packets_num; + } pes_end; + + struct { + u64 pcr; + u64 stc; + int disc_indicator_set; + } pcr; + + struct { + int handle; + int cookie; + u32 offset; + u32 len; + int pts_exists; + u64 pts; + int dts_exists; + u64 dts; + u32 tei_counter; + u32 cont_err_counter; + u32 ts_packets_num; + u32 ts_dropped_bytes; + u64 stc; + } buf; + + struct { + u64 id; + } marker; + + struct dmx_index_event_info idx_event; + struct dmx_scrambling_status_event_info scrambling_bits; + }; +}; + +/* + * struct data_buffer: Parameters of buffer allocated by + * demux device for input/output. Can be used to directly map the + * demux-device buffer to HW output if HW supports it. + */ +struct data_buffer { + /* dvb_ringbuffer managed by demux-device */ + const struct dvb_ringbuffer *ringbuff; + + + /* + * Private handle returned by kernel demux when + * map_buffer is called in case external buffer + * is used. NULL if buffer is allocated internally. + */ + void *priv_handle; +}; /* * TS packet reception */ @@ -95,10 +195,18 @@ enum ts_filter_type { * Using this API, the client can set the filtering properties to start/stop * filtering TS packets on a particular TS feed. */ +struct dmx_ts_feed; + +typedef int (*dmx_ts_data_ready_cb)( + struct dmx_ts_feed *source, + struct dmx_data_ready *dmx_data_ready); + struct dmx_ts_feed { int is_filtering; struct dmx_demux *parent; + struct data_buffer buffer; void *priv; + struct dmx_decoder_buffers *decoder_buffers; int (*set)(struct dmx_ts_feed *feed, u16 pid, int type, @@ -107,6 +215,34 @@ struct dmx_ts_feed { ktime_t timeout); int (*start_filtering)(struct dmx_ts_feed *feed); int (*stop_filtering)(struct dmx_ts_feed *feed); + int (*set_video_codec)(struct dmx_ts_feed *feed, + enum dmx_video_codec video_codec); + int (*set_idx_params)(struct dmx_ts_feed *feed, + struct dmx_indexing_params *idx_params); + int (*get_decoder_buff_status)( + struct dmx_ts_feed *feed, + struct dmx_buffer_status *dmx_buffer_status); + int (*reuse_decoder_buffer)( + struct dmx_ts_feed *feed, + int cookie); + int (*data_ready_cb)(struct dmx_ts_feed *feed, + dmx_ts_data_ready_cb callback); + int (*notify_data_read)(struct dmx_ts_feed *feed, + u32 bytes_num); + int (*set_tsp_out_format)(struct dmx_ts_feed *feed, + enum dmx_tsp_format_t tsp_format); + int (*set_secure_mode)(struct dmx_ts_feed *feed, + struct dmx_secure_mode *sec_mode); + int (*set_cipher_ops)(struct dmx_ts_feed *feed, + struct dmx_cipher_operations *cipher_ops); + int (*oob_command)(struct dmx_ts_feed *feed, + struct dmx_oob_command *cmd); + int (*ts_insertion_init)(struct dmx_ts_feed *feed); + int (*ts_insertion_terminate)(struct dmx_ts_feed *feed); + int (*ts_insertion_insert_buffer)(struct dmx_ts_feed *feed, + char *data, size_t size); + int (*get_scrambling_bits)(struct dmx_ts_feed *feed, u8 *value); + int (*flush_buffer)(struct dmx_ts_feed *feed, size_t length); }; /* @@ -131,14 +267,21 @@ struct dmx_ts_feed { * corresponding bits are compared. The filter only accepts sections that are * equal to filter_value in all the tested bit positions. */ + +struct dmx_section_feed; struct dmx_section_filter { u8 filter_value[DMX_MAX_FILTER_SIZE]; u8 filter_mask[DMX_MAX_FILTER_SIZE]; u8 filter_mode[DMX_MAX_FILTER_SIZE]; struct dmx_section_feed *parent; /* Back-pointer */ + struct data_buffer buffer; void *priv; /* Pointer to private data of the API client */ }; +typedef int (*dmx_section_data_ready_cb)( + struct dmx_section_filter *source, + struct dmx_data_ready *dmx_data_ready); + /** * struct dmx_section_feed - Structure that contains a section feed filter * @@ -189,8 +332,24 @@ struct dmx_section_feed { struct dmx_section_filter *filter); int (*start_filtering)(struct dmx_section_feed *feed); int (*stop_filtering)(struct dmx_section_feed *feed); + int (*data_ready_cb)(struct dmx_section_feed *feed, + dmx_section_data_ready_cb callback); + int (*notify_data_read)(struct dmx_section_filter *filter, + u32 bytes_num); + int (*set_secure_mode)(struct dmx_section_feed *feed, + struct dmx_secure_mode *sec_mode); + int (*set_cipher_ops)(struct dmx_section_feed *feed, + struct dmx_cipher_operations *cipher_ops); + int (*oob_command)(struct dmx_section_feed *feed, + struct dmx_oob_command *cmd); + int (*get_scrambling_bits)(struct dmx_section_feed *feed, u8 *value); + int (*flush_buffer)(struct dmx_section_feed *feed, size_t length); }; +/* + * Callback functions + */ + /** * typedef dmx_ts_cb - DVB demux TS filter callback function prototype * @@ -295,9 +454,19 @@ typedef int (*dmx_section_cb)(const u8 *buffer1, size_t buffer2_len, struct dmx_section_filter *source); -/* - * DVB Front-End - */ +typedef int (*dmx_ts_fullness) ( + struct dmx_ts_feed *source, + int required_space, + int wait); + +typedef int (*dmx_section_fullness) ( + struct dmx_section_filter *source, + int required_space, + int wait); + +/*--------------------------------------------------------------------------*/ +/* DVB Front-End */ +/*--------------------------------------------------------------------------*/ /** * enum dmx_frontend_source - Used to identify the type of frontend @@ -312,6 +481,13 @@ typedef int (*dmx_section_cb)(const u8 *buffer1, enum dmx_frontend_source { DMX_MEMORY_FE, DMX_FRONTEND_0, + DMX_FRONTEND_1, + DMX_FRONTEND_2, + DMX_FRONTEND_3, + DMX_STREAM_0, /* external stream input, e.g. LVDS */ + DMX_STREAM_1, + DMX_STREAM_2, + DMX_STREAM_3 }; /** @@ -345,14 +521,24 @@ struct dmx_frontend { */ enum dmx_demux_caps { DMX_TS_FILTERING = 1, + DMX_PES_FILTERING = 2, DMX_SECTION_FILTERING = 4, DMX_MEMORY_BASED_FILTERING = 8, + DMX_CRC_CHECKING = 16, + DMX_TS_DESCRAMBLING = 32 }; /* * Demux resource type identifier. */ +/* + * DMX_FE_ENTRY(): Casts elements in the list of registered + * front-ends from the generic type struct list_head + * to the type * struct dmx_frontend. + * + */ + /** * DMX_FE_ENTRY - Casts elements in the list of registered * front-ends from the generic type struct list_head @@ -557,6 +743,10 @@ struct dmx_demux { enum dmx_demux_caps capabilities; struct dmx_frontend *frontend; void *priv; + struct data_buffer dvr_input; /* DVR input buffer */ + int dvr_input_protected; + struct dentry *debugfs_demux_dir; /* debugfs dir */ + int (*open)(struct dmx_demux *demux); int (*close)(struct dmx_demux *demux); int (*write)(struct dmx_demux *demux, const char __user *buf, @@ -582,15 +772,31 @@ struct dmx_demux { int (*get_pes_pids)(struct dmx_demux *demux, u16 *pids); - /* private: */ + int (*get_caps)(struct dmx_demux *demux, struct dmx_caps *caps); + + int (*set_source)(struct dmx_demux *demux, const dmx_source_t *src); + + int (*set_tsp_format)(struct dmx_demux *demux, + enum dmx_tsp_format_t tsp_format); + + int (*set_playback_mode)(struct dmx_demux *demux, + enum dmx_playback_mode_t mode, + dmx_ts_fullness ts_fullness_callback, + dmx_section_fullness sec_fullness_callback); + + int (*write_cancel)(struct dmx_demux *demux); - /* - * Only used at av7110, to read some data from firmware. - * As this was never documented, we have no clue about what's - * there, and its usage on other drivers aren't encouraged. - */ int (*get_stc)(struct dmx_demux *demux, unsigned int num, u64 *stc, unsigned int *base); + + int (*map_buffer)(struct dmx_demux *demux, + struct dmx_buffer *dmx_buffer, + void **priv_handle, void **mem); + + int (*unmap_buffer)(struct dmx_demux *demux, + void *priv_handle); + + int (*get_tsp_size)(struct dmx_demux *demux); }; #endif /* #ifndef __DEMUX_H */ diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c index 7b67e1dd97fd..e868f92ce30c 100644 --- a/drivers/media/dvb-core/dmxdev.c +++ b/drivers/media/dvb-core/dmxdev.c @@ -28,15 +28,74 @@ #include #include #include -#include +#include +#include +#include +#include +#include #include "dmxdev.h" -static int debug; +static int overflow_auto_flush = 1; +module_param(overflow_auto_flush, int, 0644); +MODULE_PARM_DESC(overflow_auto_flush, + "Automatically flush buffer on overflow (default: on)"); -module_param(debug, int, 0644); -MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); +#define DMX_DEFAULT_DECODER_BUFFER_SIZE (32768) -#define dprintk if (debug) printk +static inline int dvb_dmxdev_verify_buffer_size(u32 size, u32 max_size, + u32 size_align) +{ + if (size_align) + return size <= max_size && !(size % size_align); + else + return size <= max_size; +} + +static int dvb_filter_verify_buffer_size(struct dmxdev_filter *filter) +{ + struct dmx_caps caps; + size_t size = filter->buffer.size; + + /* + * For backward compatibility, if no demux capabilities can + * be retrieved assume size is ok. + * Decoder filter buffer size is verified when decoder buffer is set. + */ + if (filter->dev->demux->get_caps) { + filter->dev->demux->get_caps(filter->dev->demux, &caps); + + if (filter->type == DMXDEV_TYPE_SEC) + return dvb_dmxdev_verify_buffer_size( + size, + caps.section.max_size, + caps.section.size_alignment); + + if (filter->params.pes.output == DMX_OUT_TAP) + return dvb_dmxdev_verify_buffer_size( + size, + caps.pes.max_size, + caps.pes.size_alignment); + + size = (filter->params.pes.output == DMX_OUT_TS_TAP) ? + filter->dev->dvr_buffer.size : size; + + if (filter->params.pes.output == DMX_OUT_TSDEMUX_TAP || + filter->params.pes.output == DMX_OUT_TS_TAP) { + if (filter->dmx_tsp_format == DMX_TSP_FORMAT_188) + return dvb_dmxdev_verify_buffer_size( + size, + caps.recording_188_tsp.max_size, + caps.recording_188_tsp.size_alignment); + + return dvb_dmxdev_verify_buffer_size( + size, + caps.recording_192_tsp.max_size, + caps.recording_192_tsp.size_alignment); + } + } + + return 1; +} static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf, const u8 *src, size_t len) @@ -50,16 +109,400 @@ static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf, free = dvb_ringbuffer_free(buf); if (len > free) { - dprintk("dmxdev: buffer overflow\n"); + pr_debug("dmxdev: buffer overflow\n"); return -EOVERFLOW; } return dvb_ringbuffer_write(buf, src, len); } -static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src, - int non_blocking, char __user *buf, - size_t count, loff_t *ppos) +static inline void dvb_dmxdev_notify_data_read(struct dmxdev_filter *filter, + int bytes_read) +{ + if (!filter) + return; + + if (filter->type == DMXDEV_TYPE_SEC) { + if (filter->feed.sec.feed->notify_data_read) + filter->feed.sec.feed->notify_data_read( + filter->filter.sec, + bytes_read); + } else { + struct dmxdev_feed *feed; + + /* + * All feeds of same demux-handle share the same output + * buffer, it is enough to notify on the buffer status + * on one of the feeds + */ + feed = list_first_entry(&filter->feed.ts, + struct dmxdev_feed, next); + + if (feed->ts->notify_data_read) + feed->ts->notify_data_read( + feed->ts, + bytes_read); + } +} + +static inline u32 dvb_dmxdev_advance_event_idx(u32 index) +{ + index++; + if (index >= DMX_EVENT_QUEUE_SIZE) + index = 0; + + return index; +} + +static inline int dvb_dmxdev_events_is_full(struct dmxdev_events_queue *events) +{ + int new_write_index; + + new_write_index = dvb_dmxdev_advance_event_idx(events->write_index); + if (new_write_index == events->read_index) + return 1; + + return 0; + +} + +static inline void dvb_dmxdev_flush_events(struct dmxdev_events_queue *events) +{ + events->read_index = 0; + events->write_index = 0; + events->notified_index = 0; + events->bytes_read_no_event = 0; + events->current_event_data_size = 0; + events->wakeup_events_counter = 0; +} + +static inline void dvb_dmxdev_flush_output(struct dvb_ringbuffer *buffer, + struct dmxdev_events_queue *events) +{ + dvb_dmxdev_flush_events(events); + dvb_ringbuffer_flush(buffer); +} + +static int dvb_dmxdev_update_pes_event(struct dmx_filter_event *event, + int bytes_read) +{ + int start_delta; + + if (event->params.pes.total_length <= bytes_read) + return event->params.pes.total_length; + + /* + * only part of the data relevant to this event was read. + * Update the event's information to reflect the new state. + */ + event->params.pes.total_length -= bytes_read; + + start_delta = event->params.pes.start_offset - + event->params.pes.base_offset; + + if (bytes_read <= start_delta) { + event->params.pes.base_offset += + bytes_read; + } else { + start_delta = + bytes_read - start_delta; + + event->params.pes.start_offset += start_delta; + event->params.pes.actual_length -= start_delta; + + event->params.pes.base_offset = + event->params.pes.start_offset; + } + + return 0; +} + +static int dvb_dmxdev_update_section_event(struct dmx_filter_event *event, + int bytes_read) +{ + int start_delta; + + if (event->params.section.total_length <= bytes_read) + return event->params.section.total_length; + + /* + * only part of the data relevant to this event was read. + * Update the event's information to reflect the new state. + */ + + event->params.section.total_length -= bytes_read; + + start_delta = event->params.section.start_offset - + event->params.section.base_offset; + + if (bytes_read <= start_delta) { + event->params.section.base_offset += + bytes_read; + } else { + start_delta = + bytes_read - start_delta; + + event->params.section.start_offset += start_delta; + event->params.section.actual_length -= start_delta; + + event->params.section.base_offset = + event->params.section.start_offset; + } + + return 0; +} + +static int dvb_dmxdev_update_rec_event(struct dmx_filter_event *event, + int bytes_read) +{ + if (event->params.recording_chunk.size <= bytes_read) + return event->params.recording_chunk.size; + + /* + * only part of the data relevant to this event was read. + * Update the event's information to reflect the new state. + */ + event->params.recording_chunk.size -= bytes_read; + event->params.recording_chunk.offset += bytes_read; + + return 0; +} + +static int dvb_dmxdev_add_event(struct dmxdev_events_queue *events, + struct dmx_filter_event *event) +{ + int res; + int new_write_index; + int data_event; + + /* Check if the event is disabled */ + if (events->event_mask.disable_mask & event->type) + return 0; + + /* Check if we are adding an event that user already read its data */ + if (events->bytes_read_no_event) { + data_event = 1; + + if (event->type == DMX_EVENT_NEW_PES) + res = dvb_dmxdev_update_pes_event(event, + events->bytes_read_no_event); + else if (event->type == DMX_EVENT_NEW_SECTION) + res = dvb_dmxdev_update_section_event(event, + events->bytes_read_no_event); + else if (event->type == DMX_EVENT_NEW_REC_CHUNK) + res = dvb_dmxdev_update_rec_event(event, + events->bytes_read_no_event); + else + data_event = 0; + + if (data_event) { + if (res) { + /* + * Data relevant to this event was fully + * consumed already, discard event. + */ + events->bytes_read_no_event -= res; + return 0; + } + events->bytes_read_no_event = 0; + } else { + /* + * data was read beyond the non-data event, + * making it not relevant anymore + */ + return 0; + } + } + + new_write_index = dvb_dmxdev_advance_event_idx(events->write_index); + if (new_write_index == events->read_index) { + pr_err("dmxdev: events overflow\n"); + return -EOVERFLOW; + } + + events->queue[events->write_index] = *event; + events->write_index = new_write_index; + + if (!(events->event_mask.no_wakeup_mask & event->type)) + events->wakeup_events_counter++; + + return 0; +} + +static int dvb_dmxdev_remove_event(struct dmxdev_events_queue *events, + struct dmx_filter_event *event) +{ + if (events->notified_index == events->write_index) + return -ENODATA; + + *event = events->queue[events->notified_index]; + + events->notified_index = + dvb_dmxdev_advance_event_idx(events->notified_index); + + if (!(events->event_mask.no_wakeup_mask & event->type)) + events->wakeup_events_counter--; + + return 0; +} + +static int dvb_dmxdev_update_events(struct dmxdev_events_queue *events, + int bytes_read) +{ + struct dmx_filter_event *event; + int res; + int data_event; + + /* + * If data events are not enabled on this filter, + * there's nothing to update. + */ + if (events->data_read_event_masked) + return 0; + + /* + * Go through all events that were notified and + * remove them from the events queue if their respective + * data was read. + */ + while ((events->read_index != events->notified_index) && + (bytes_read)) { + event = events->queue + events->read_index; + + data_event = 1; + + if (event->type == DMX_EVENT_NEW_PES) + res = dvb_dmxdev_update_pes_event(event, bytes_read); + else if (event->type == DMX_EVENT_NEW_SECTION) + res = dvb_dmxdev_update_section_event(event, + bytes_read); + else if (event->type == DMX_EVENT_NEW_REC_CHUNK) + res = dvb_dmxdev_update_rec_event(event, bytes_read); + else + data_event = 0; + + if (data_event) { + if (res) { + /* + * Data relevant to this event was + * fully consumed, remove it from the queue. + */ + bytes_read -= res; + events->read_index = + dvb_dmxdev_advance_event_idx( + events->read_index); + } else { + bytes_read = 0; + } + } else { + /* + * non-data event was already notified, + * no need to keep it + */ + events->read_index = dvb_dmxdev_advance_event_idx( + events->read_index); + } + } + + if (!bytes_read) + return 0; + + /* + * If we reached here it means: + * bytes_read != 0 + * events->read_index == events->notified_index + * Check if there are pending events in the queue + * which the user didn't read while their relevant data + * was read. + */ + while ((events->notified_index != events->write_index) && + (bytes_read)) { + event = events->queue + events->notified_index; + + data_event = 1; + + if (event->type == DMX_EVENT_NEW_PES) + res = dvb_dmxdev_update_pes_event(event, bytes_read); + else if (event->type == DMX_EVENT_NEW_SECTION) + res = dvb_dmxdev_update_section_event(event, + bytes_read); + else if (event->type == DMX_EVENT_NEW_REC_CHUNK) + res = dvb_dmxdev_update_rec_event(event, bytes_read); + else + data_event = 0; + + if (data_event) { + if (res) { + /* + * Data relevant to this event was + * fully consumed, remove it from the queue. + */ + bytes_read -= res; + events->notified_index = + dvb_dmxdev_advance_event_idx( + events->notified_index); + if (!(events->event_mask.no_wakeup_mask & + event->type)) + events->wakeup_events_counter--; + } else { + bytes_read = 0; + } + } else { + if (bytes_read) + /* + * data was read beyond the non-data event, + * making it not relevant anymore + */ + events->notified_index = + dvb_dmxdev_advance_event_idx( + events->notified_index); + if (!(events->event_mask.no_wakeup_mask & + event->type)) + events->wakeup_events_counter--; + } + + events->read_index = events->notified_index; + } + + /* + * Check if data was read without having a respective + * event in the events-queue + */ + if (bytes_read) + events->bytes_read_no_event += bytes_read; + + return 0; +} + +static inline int dvb_dmxdev_check_data(struct dmxdev_filter *filter, + struct dvb_ringbuffer *src) +{ + int data_status_change; + + if (filter) + if (mutex_lock_interruptible(&filter->mutex)) + return -ERESTARTSYS; + + if (!src->data || + !dvb_ringbuffer_empty(src) || + src->error || + (filter && + (filter->state != DMXDEV_STATE_GO) && + (filter->state != DMXDEV_STATE_DONE))) + data_status_change = 1; + else + data_status_change = 0; + + if (filter) + mutex_unlock(&filter->mutex); + + return data_status_change; +} + +static ssize_t dvb_dmxdev_buffer_read(struct dmxdev_filter *filter, + struct dvb_ringbuffer *src, + int non_blocking, char __user *buf, + size_t count, loff_t *ppos) { size_t todo; ssize_t avail; @@ -70,7 +513,7 @@ static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src, if (src->error) { ret = src->error; - dvb_ringbuffer_flush(src); + src->error = 0; return ret; } @@ -80,15 +523,35 @@ static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src, break; } + if (filter) { + if ((filter->state == DMXDEV_STATE_DONE) && + dvb_ringbuffer_empty(src)) + break; + + mutex_unlock(&filter->mutex); + } + ret = wait_event_interruptible(src->queue, - !dvb_ringbuffer_empty(src) || - (src->error != 0)); + dvb_dmxdev_check_data(filter, src)); + + if (filter) { + if (mutex_lock_interruptible(&filter->mutex)) + return -ERESTARTSYS; + + if ((filter->state != DMXDEV_STATE_GO) && + (filter->state != DMXDEV_STATE_DONE)) + return -ENODEV; + } + if (ret < 0) break; + if (!src->data) + return 0; + if (src->error) { ret = src->error; - dvb_ringbuffer_flush(src); + src->error = 0; break; } @@ -103,6 +566,9 @@ static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src, buf += ret; } + if (count - todo) /* some data was read? */ + wake_up_all(&src->queue); + return (count - todo) ? (count - todo) : ret; } @@ -120,13 +586,238 @@ static struct dmx_frontend *get_fe(struct dmx_demux *demux, int type) return NULL; } +static void dvb_dvr_oob_cmd(struct dmxdev *dmxdev, struct dmx_oob_command *cmd) +{ + int i; + struct dmxdev_filter *filter; + struct dmxdev_feed *feed; + + for (i = 0; i < dmxdev->filternum; i++) { + filter = &dmxdev->filter[i]; + if (!filter || filter->state != DMXDEV_STATE_GO) + continue; + + switch (filter->type) { + case DMXDEV_TYPE_SEC: + filter->feed.sec.feed->oob_command( + filter->feed.sec.feed, cmd); + break; + case DMXDEV_TYPE_PES: + feed = list_first_entry(&filter->feed.ts, + struct dmxdev_feed, next); + feed->ts->oob_command(feed->ts, cmd); + break; + case DMXDEV_TYPE_NONE: + break; + default: + break; + } + } +} + +static int dvb_dvr_feed_cmd(struct dmxdev *dmxdev, struct dvr_command *dvr_cmd) +{ + int ret = 0; + size_t todo; + int bytes_written = 0; + size_t split; + size_t tsp_size; + u8 *data_start; + struct dvb_ringbuffer *src = &dmxdev->dvr_input_buffer; + + todo = dvr_cmd->cmd.data_feed_count; + + if (dmxdev->demux->get_tsp_size) + tsp_size = dmxdev->demux->get_tsp_size(dmxdev->demux); + else + tsp_size = 188; + + while (todo >= tsp_size) { + /* wait for input */ + ret = wait_event_interruptible( + src->queue, + (dvb_ringbuffer_avail(src) >= tsp_size) || + dmxdev->dvr_in_exit || src->error); + + if (ret < 0) + break; + + spin_lock(&dmxdev->dvr_in_lock); + + if (dmxdev->exit || dmxdev->dvr_in_exit) { + spin_unlock(&dmxdev->dvr_in_lock); + ret = -ENODEV; + break; + } + + if (src->error) { + spin_unlock(&dmxdev->dvr_in_lock); + wake_up_all(&src->queue); + ret = -EINVAL; + break; + } + + dmxdev->dvr_processing_input = 1; + + split = (src->pread + todo > src->size) ? + src->size - src->pread : 0; + + /* + * In DVR PULL mode, write might block. + * Lock on DVR buffer is released before calling to + * write, if DVR was released meanwhile, dvr_in_exit is + * prompted. Lock is acquired when updating the read pointer + * again to preserve read/write pointers consistency. + * + * In protected input mode, DVR input buffer is not mapped + * to kernel memory. Underlying demux implementation + * should trigger HW to read from DVR input buffer + * based on current read offset. + */ + if (split > 0) { + data_start = (dmxdev->demux->dvr_input_protected) ? + NULL : (src->data + src->pread); + + spin_unlock(&dmxdev->dvr_in_lock); + ret = dmxdev->demux->write(dmxdev->demux, + data_start, + split); + + if (ret < 0) { + pr_err("dmxdev: dvr write error %d\n", ret); + continue; + } + + if (dmxdev->dvr_in_exit) { + ret = -ENODEV; + break; + } + + spin_lock(&dmxdev->dvr_in_lock); + + todo -= ret; + bytes_written += ret; + DVB_RINGBUFFER_SKIP(src, ret); + if (ret < split) { + dmxdev->dvr_processing_input = 0; + spin_unlock(&dmxdev->dvr_in_lock); + wake_up_all(&src->queue); + continue; + } + } + + data_start = (dmxdev->demux->dvr_input_protected) ? + NULL : (src->data + src->pread); + + spin_unlock(&dmxdev->dvr_in_lock); + ret = dmxdev->demux->write(dmxdev->demux, + data_start, todo); + + if (ret < 0) { + pr_err("dmxdev: dvr write error %d\n", ret); + continue; + } + + if (dmxdev->dvr_in_exit) { + ret = -ENODEV; + break; + } + + spin_lock(&dmxdev->dvr_in_lock); + + todo -= ret; + bytes_written += ret; + DVB_RINGBUFFER_SKIP(src, ret); + dmxdev->dvr_processing_input = 0; + spin_unlock(&dmxdev->dvr_in_lock); + + wake_up_all(&src->queue); + } + + if (ret < 0) + return ret; + + return bytes_written; +} + +static int dvr_input_thread_entry(void *arg) +{ + struct dmxdev *dmxdev = arg; + struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer; + struct dvr_command dvr_cmd; + int leftover = 0; + int ret; + + while (1) { + /* wait for input */ + ret = wait_event_interruptible( + cmdbuf->queue, + (!cmdbuf->data) || + (dvb_ringbuffer_avail(cmdbuf) >= sizeof(dvr_cmd)) || + (dmxdev->dvr_in_exit)); + + if (ret < 0) + break; + + spin_lock(&dmxdev->dvr_in_lock); + + if (!cmdbuf->data || dmxdev->exit || dmxdev->dvr_in_exit) { + spin_unlock(&dmxdev->dvr_in_lock); + break; + } + + dvb_ringbuffer_read(cmdbuf, (u8 *)&dvr_cmd, sizeof(dvr_cmd)); + + spin_unlock(&dmxdev->dvr_in_lock); + + if (dvr_cmd.type == DVR_DATA_FEED_CMD) { + dvr_cmd.cmd.data_feed_count += leftover; + + ret = dvb_dvr_feed_cmd(dmxdev, &dvr_cmd); + if (ret < 0) { + pr_debug("%s: DVR data feed failed, ret=%d\n", + __func__, ret); + continue; + } + + leftover = dvr_cmd.cmd.data_feed_count - ret; + } else { + /* + * For EOS, try to process leftover data in the input + * buffer. + */ + if (dvr_cmd.cmd.oobcmd.type == DMX_OOB_CMD_EOS) { + struct dvr_command feed_cmd; + + feed_cmd.type = DVR_DATA_FEED_CMD; + feed_cmd.cmd.data_feed_count = + dvb_ringbuffer_avail( + &dmxdev->dvr_input_buffer); + dvb_dvr_feed_cmd(dmxdev, &feed_cmd); + } + + dvb_dvr_oob_cmd(dmxdev, &dvr_cmd.cmd.oobcmd); + } + } + + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + schedule(); + set_current_state(TASK_INTERRUPTIBLE); + } + set_current_state(TASK_RUNNING); + + return 0; +} + static int dvb_dvr_open(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; struct dmxdev *dmxdev = dvbdev->priv; struct dmx_frontend *front; + void *mem; - dprintk("function : %s\n", __func__); + pr_debug("function : %s(%X)\n", __func__, (file->f_flags & O_ACCMODE)); if (mutex_lock_interruptible(&dmxdev->mutex)) return -ERESTARTSYS; @@ -144,21 +835,28 @@ static int dvb_dvr_open(struct inode *inode, struct file *file) } if ((file->f_flags & O_ACCMODE) == O_RDONLY) { - void *mem; if (!dvbdev->readers) { mutex_unlock(&dmxdev->mutex); return -EBUSY; } - mem = vmalloc(DVR_BUFFER_SIZE); + mem = vmalloc_user(DVR_BUFFER_SIZE); if (!mem) { mutex_unlock(&dmxdev->mutex); return -ENOMEM; } dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE); - dvbdev->readers--; - } + dvb_dmxdev_flush_events(&dmxdev->dvr_output_events); + dmxdev->dvr_output_events.event_mask.disable_mask = 0; + dmxdev->dvr_output_events.event_mask.no_wakeup_mask = 0; + dmxdev->dvr_output_events.event_mask.wakeup_threshold = 1; + dmxdev->dvr_feeds_count = 0; + dmxdev->dvr_buffer_mode = DMX_BUFFER_MODE_INTERNAL; + dmxdev->dvr_priv_buff_handle = NULL; - if ((file->f_flags & O_ACCMODE) == O_WRONLY) { + dvbdev->readers--; + } else if (!dvbdev->writers) { + dmxdev->dvr_in_exit = 0; + dmxdev->dvr_processing_input = 0; dmxdev->dvr_orig_fe = dmxdev->demux->frontend; if (!dmxdev->demux->write) { @@ -172,9 +870,51 @@ static int dvb_dvr_open(struct inode *inode, struct file *file) mutex_unlock(&dmxdev->mutex); return -EINVAL; } + + mem = vmalloc_user(DVR_BUFFER_SIZE); + if (!mem) { + mutex_unlock(&dmxdev->mutex); + return -ENOMEM; + } + dmxdev->demux->disconnect_frontend(dmxdev->demux); dmxdev->demux->connect_frontend(dmxdev->demux, front); + dmxdev->dvr_input_buffer_mode = DMX_BUFFER_MODE_INTERNAL; + + dvb_ringbuffer_init(&dmxdev->dvr_input_buffer, + mem, + DVR_BUFFER_SIZE); + + dmxdev->demux->dvr_input.priv_handle = NULL; + dmxdev->demux->dvr_input.ringbuff = &dmxdev->dvr_input_buffer; + dmxdev->demux->dvr_input_protected = 0; + mem = vmalloc(DVR_CMDS_BUFFER_SIZE); + if (!mem) { + vfree(dmxdev->dvr_input_buffer.data); + dmxdev->dvr_input_buffer.data = NULL; + mutex_unlock(&dmxdev->mutex); + return -ENOMEM; + } + dvb_ringbuffer_init(&dmxdev->dvr_cmd_buffer, mem, + DVR_CMDS_BUFFER_SIZE); + dvbdev->writers--; + + dmxdev->dvr_input_thread = + kthread_run( + dvr_input_thread_entry, + (void *)dmxdev, + "dvr_input"); + + if (IS_ERR(dmxdev->dvr_input_thread)) { + vfree(dmxdev->dvr_input_buffer.data); + vfree(dmxdev->dvr_cmd_buffer.data); + dmxdev->dvr_input_buffer.data = NULL; + dmxdev->dvr_cmd_buffer.data = NULL; + mutex_unlock(&dmxdev->mutex); + return -ENOMEM; + } } + dvbdev->users++; mutex_unlock(&dmxdev->mutex); return 0; @@ -187,11 +927,6 @@ static int dvb_dvr_release(struct inode *inode, struct file *file) mutex_lock(&dmxdev->mutex); - if ((file->f_flags & O_ACCMODE) == O_WRONLY) { - dmxdev->demux->disconnect_frontend(dmxdev->demux); - dmxdev->demux->connect_frontend(dmxdev->demux, - dmxdev->dvr_orig_fe); - } if ((file->f_flags & O_ACCMODE) == O_RDONLY) { dvbdev->readers++; if (dmxdev->dvr_buffer.data) { @@ -200,31 +935,123 @@ static int dvb_dvr_release(struct inode *inode, struct file *file) spin_lock_irq(&dmxdev->lock); dmxdev->dvr_buffer.data = NULL; spin_unlock_irq(&dmxdev->lock); - vfree(mem); + wake_up_all(&dmxdev->dvr_buffer.queue); + + if (dmxdev->dvr_buffer_mode == DMX_BUFFER_MODE_INTERNAL) + vfree(mem); } - } - /* TODO */ - dvbdev->users--; - if (dvbdev->users == 1 && dmxdev->exit == 1) { - mutex_unlock(&dmxdev->mutex); - wake_up(&dvbdev->wait_queue); - } else - mutex_unlock(&dmxdev->mutex); - return 0; -} + if ((dmxdev->dvr_buffer_mode == DMX_BUFFER_MODE_EXTERNAL) && + dmxdev->dvr_priv_buff_handle) { + dmxdev->demux->unmap_buffer(dmxdev->demux, + dmxdev->dvr_priv_buff_handle); + dmxdev->dvr_priv_buff_handle = NULL; + } + } else { + int i; + + spin_lock(&dmxdev->dvr_in_lock); + dmxdev->dvr_in_exit = 1; + spin_unlock(&dmxdev->dvr_in_lock); + + wake_up_all(&dmxdev->dvr_cmd_buffer.queue); + + /* + * There might be dmx filters reading now from DVR + * device, in PULL mode, they might be also stalled + * on output, signal to them that DVR is exiting. + */ + if (dmxdev->playback_mode == DMX_PB_MODE_PULL) { + wake_up_all(&dmxdev->dvr_buffer.queue); + + for (i = 0; i < dmxdev->filternum; i++) + if (dmxdev->filter[i].state == DMXDEV_STATE_GO) + wake_up_all( + &dmxdev->filter[i].buffer.queue); + } -static ssize_t dvb_dvr_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) + /* notify kernel demux that we are canceling */ + if (dmxdev->demux->write_cancel) + dmxdev->demux->write_cancel(dmxdev->demux); + + /* + * Now stop dvr-input thread so that no one + * would process data from dvr input buffer any more + * before it gets freed. + */ + kthread_stop(dmxdev->dvr_input_thread); + + dvbdev->writers++; + dmxdev->demux->disconnect_frontend(dmxdev->demux); + dmxdev->demux->connect_frontend(dmxdev->demux, + dmxdev->dvr_orig_fe); + + if (dmxdev->dvr_input_buffer.data) { + void *mem = dmxdev->dvr_input_buffer.data; + /* + * Ensure all the operations on the DVR input buffer + * are completed before it gets freed. + */ + mb(); + spin_lock_irq(&dmxdev->dvr_in_lock); + dmxdev->dvr_input_buffer.data = NULL; + spin_unlock_irq(&dmxdev->dvr_in_lock); + + if (dmxdev->dvr_input_buffer_mode == + DMX_BUFFER_MODE_INTERNAL) + vfree(mem); + } + + if ((dmxdev->dvr_input_buffer_mode == + DMX_BUFFER_MODE_EXTERNAL) && + (dmxdev->demux->dvr_input.priv_handle)) { + if (!dmxdev->demux->dvr_input_protected) + dmxdev->demux->unmap_buffer(dmxdev->demux, + dmxdev->demux->dvr_input.priv_handle); + dmxdev->demux->dvr_input.priv_handle = NULL; + } + + if (dmxdev->dvr_cmd_buffer.data) { + void *mem = dmxdev->dvr_cmd_buffer.data; + /* + * Ensure all the operations on the DVR command buffer + * are completed before it gets freed. + */ + mb(); + spin_lock_irq(&dmxdev->dvr_in_lock); + dmxdev->dvr_cmd_buffer.data = NULL; + spin_unlock_irq(&dmxdev->dvr_in_lock); + vfree(mem); + } + } + /* TODO */ + dvbdev->users--; + if (dvbdev->users == 1 && dmxdev->exit == 1) { + fops_put(file->f_op); + file->f_op = NULL; + mutex_unlock(&dmxdev->mutex); + wake_up(&dvbdev->wait_queue); + } else + mutex_unlock(&dmxdev->mutex); + + return 0; +} + + +static int dvb_dvr_mmap(struct file *filp, struct vm_area_struct *vma) { - struct dvb_device *dvbdev = file->private_data; + struct dvb_device *dvbdev = filp->private_data; struct dmxdev *dmxdev = dvbdev->priv; + struct dvb_ringbuffer *buffer; + enum dmx_buffer_mode buffer_mode; + int vma_size; + int buffer_size; int ret; - if (!dmxdev->demux->write) - return -EOPNOTSUPP; - if ((file->f_flags & O_ACCMODE) != O_WRONLY) + if (((filp->f_flags & O_ACCMODE) == O_RDONLY) && + (vma->vm_flags & VM_WRITE)) return -EINVAL; + if (mutex_lock_interruptible(&dmxdev->mutex)) return -ERESTARTSYS; @@ -232,188 +1059,2030 @@ static ssize_t dvb_dvr_write(struct file *file, const char __user *buf, mutex_unlock(&dmxdev->mutex); return -ENODEV; } - ret = dmxdev->demux->write(dmxdev->demux, buf, count); + + if ((filp->f_flags & O_ACCMODE) == O_RDONLY) { + buffer = &dmxdev->dvr_buffer; + buffer_mode = dmxdev->dvr_buffer_mode; + } else { + buffer = &dmxdev->dvr_input_buffer; + buffer_mode = dmxdev->dvr_input_buffer_mode; + } + + if (buffer_mode == DMX_BUFFER_MODE_EXTERNAL) { + mutex_unlock(&dmxdev->mutex); + return -EINVAL; + } + + vma_size = vma->vm_end - vma->vm_start; + + /* Make sure requested mapping is not larger than buffer size */ + buffer_size = buffer->size + (PAGE_SIZE-1); + buffer_size = buffer_size & ~(PAGE_SIZE-1); + + if (vma_size != buffer_size) { + mutex_unlock(&dmxdev->mutex); + return -EINVAL; + } + + ret = remap_vmalloc_range(vma, buffer->data, 0); + if (ret) { + mutex_unlock(&dmxdev->mutex); + return ret; + } + + vma->vm_flags |= VM_DONTDUMP; + vma->vm_flags |= VM_DONTEXPAND; + mutex_unlock(&dmxdev->mutex); return ret; } +static void dvb_dvr_queue_data_feed(struct dmxdev *dmxdev, size_t count) +{ + struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer; + struct dvr_command *dvr_cmd; + int last_dvr_cmd; + + spin_lock(&dmxdev->dvr_in_lock); + + /* Peek at the last DVR command queued, try to coalesce FEED commands */ + if (dvb_ringbuffer_avail(cmdbuf) >= sizeof(*dvr_cmd)) { + last_dvr_cmd = cmdbuf->pwrite - sizeof(*dvr_cmd); + if (last_dvr_cmd < 0) + last_dvr_cmd += cmdbuf->size; + + dvr_cmd = (struct dvr_command *)&cmdbuf->data[last_dvr_cmd]; + if (dvr_cmd->type == DVR_DATA_FEED_CMD) { + dvr_cmd->cmd.data_feed_count += count; + spin_unlock(&dmxdev->dvr_in_lock); + return; + } + } + + /* + * We assume command buffer is large enough so that overflow should not + * happen. Overflow to the command buffer means data previously written + * to the input buffer is 'orphan' - does not have a matching FEED + * command. Issue a warning if this ever happens. + * Orphan data might still be processed if EOS is issued. + */ + if (dvb_ringbuffer_free(cmdbuf) < sizeof(*dvr_cmd)) { + pr_err("%s: DVR command buffer overflow\n", __func__); + spin_unlock(&dmxdev->dvr_in_lock); + return; + } + + dvr_cmd = (struct dvr_command *)&cmdbuf->data[cmdbuf->pwrite]; + dvr_cmd->type = DVR_DATA_FEED_CMD; + dvr_cmd->cmd.data_feed_count = count; + DVB_RINGBUFFER_PUSH(cmdbuf, sizeof(*dvr_cmd)); + spin_unlock(&dmxdev->dvr_in_lock); + + wake_up_all(&cmdbuf->queue); +} + +static int dvb_dvr_external_input_only(struct dmxdev *dmxdev) +{ + struct dmx_caps caps; + int is_external_only; + int flags; + size_t tsp_size; + + if (dmxdev->demux->get_tsp_size) + tsp_size = dmxdev->demux->get_tsp_size(dmxdev->demux); + else + tsp_size = 188; + + /* + * For backward compatibility, default assumes that + * external only buffers are not supported. + */ + flags = 0; + if (dmxdev->demux->get_caps) { + dmxdev->demux->get_caps(dmxdev->demux, &caps); + + if (tsp_size == 188) + flags = caps.playback_188_tsp.flags; + else + flags = caps.playback_192_tsp.flags; + } + + if (!(flags & DMX_BUFFER_INTERNAL_SUPPORT) && + (flags & DMX_BUFFER_EXTERNAL_SUPPORT)) + is_external_only = 1; + else + is_external_only = 0; + + return is_external_only; +} + +static int dvb_dvr_verify_buffer_size(struct dmxdev *dmxdev, + unsigned int f_flags, + unsigned long size) +{ + struct dmx_caps caps; + int tsp_size; + + if (!dmxdev->demux->get_caps) + return 1; + + if (dmxdev->demux->get_tsp_size) + tsp_size = dmxdev->demux->get_tsp_size(dmxdev->demux); + else + tsp_size = 188; + + dmxdev->demux->get_caps(dmxdev->demux, &caps); + if ((f_flags & O_ACCMODE) == O_RDONLY) + return (tsp_size == 188 && dvb_dmxdev_verify_buffer_size(size, + caps.recording_188_tsp.max_size, + caps.recording_188_tsp.size_alignment)) || + (tsp_size == 192 && dvb_dmxdev_verify_buffer_size(size, + caps.recording_192_tsp.max_size, + caps.recording_192_tsp.size_alignment)); + + return (tsp_size == 188 && dvb_dmxdev_verify_buffer_size(size, + caps.playback_188_tsp.max_size, + caps.playback_188_tsp.size_alignment)) || + (tsp_size == 192 && dvb_dmxdev_verify_buffer_size(size, + caps.playback_192_tsp.max_size, + caps.playback_192_tsp.size_alignment)); +} + +static ssize_t dvb_dvr_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct dvb_device *dvbdev = file->private_data; + struct dmxdev *dmxdev = dvbdev->priv; + struct dvb_ringbuffer *src = &dmxdev->dvr_input_buffer; + struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer; + int ret; + size_t todo; + ssize_t free_space; + + if (!dmxdev->demux->write) + return -EOPNOTSUPP; + + if (!dvb_dvr_verify_buffer_size(dmxdev, file->f_flags, src->size) || + ((file->f_flags & O_ACCMODE) == O_RDONLY) || + !src->data || !cmdbuf->data || + (dvb_dvr_external_input_only(dmxdev) && + (dmxdev->dvr_input_buffer_mode == DMX_BUFFER_MODE_INTERNAL))) + return -EINVAL; + + if ((file->f_flags & O_NONBLOCK) && + (dvb_ringbuffer_free(src) == 0)) + return -EWOULDBLOCK; + + ret = 0; + for (todo = count; todo > 0; todo -= ret) { + ret = wait_event_interruptible(src->queue, + (dvb_ringbuffer_free(src)) || + !src->data || !cmdbuf->data || + (src->error != 0) || dmxdev->dvr_in_exit); + + if (ret < 0) + return ret; + + if (mutex_lock_interruptible(&dmxdev->mutex)) + return -ERESTARTSYS; + + if ((!src->data) || (!cmdbuf->data)) { + mutex_unlock(&dmxdev->mutex); + return 0; + } + + if (dmxdev->exit || dmxdev->dvr_in_exit) { + mutex_unlock(&dmxdev->mutex); + return -ENODEV; + } + + if (src->error) { + ret = src->error; + dvb_ringbuffer_flush(src); + mutex_unlock(&dmxdev->mutex); + wake_up_all(&src->queue); + return ret; + } + + free_space = dvb_ringbuffer_free(src); + + if (free_space > todo) + free_space = todo; + + ret = dvb_ringbuffer_write_user(src, buf, free_space); + + if (ret < 0) { + mutex_unlock(&dmxdev->mutex); + return ret; + } + + buf += ret; + + dvb_dvr_queue_data_feed(dmxdev, ret); + + mutex_unlock(&dmxdev->mutex); + } + + return (count - todo) ? (count - todo) : ret; +} + +static int dvb_dmxdev_flush_data(struct dmxdev_filter *filter, size_t length) +{ + int ret = 0; + unsigned long flags; + + struct dvb_ringbuffer *buffer = &filter->buffer; + struct dmxdev_events_queue *events = &filter->events; + + if (filter->type == DMXDEV_TYPE_PES && + filter->params.pes.output == DMX_OUT_TS_TAP) { + buffer = &filter->dev->dvr_buffer; + events = &filter->dev->dvr_output_events; + } + + /* + * Drop 'length' pending data bytes from the ringbuffer and update + * event queue accordingly, similarly to dvb_dmxdev_release_data(). + */ + spin_lock_irqsave(&filter->dev->lock, flags); + DVB_RINGBUFFER_SKIP(buffer, length); + buffer->error = 0; + dvb_dmxdev_flush_events(events); + events->current_event_start_offset = buffer->pwrite; + spin_unlock_irqrestore(&filter->dev->lock, flags); + + if (filter->type == DMXDEV_TYPE_PES) { + struct dmxdev_feed *feed; + + feed = list_first_entry(&filter->feed.ts, + struct dmxdev_feed, next); + + if (feed->ts->flush_buffer) + return feed->ts->flush_buffer(feed->ts, length); + } else if (filter->type == DMXDEV_TYPE_SEC && + filter->feed.sec.feed->flush_buffer) { + return filter->feed.sec.feed->flush_buffer( + filter->feed.sec.feed, length); + } + + return ret; +} + +static inline void dvb_dmxdev_auto_flush_buffer(struct dmxdev_filter *filter, + struct dvb_ringbuffer *buf) +{ + size_t flush_len; + + /* + * When buffer overflowed, demux-dev marked the buffer in + * error state. If auto-flush is enabled discard current + * pending data in buffer. + */ + if (overflow_auto_flush) { + flush_len = dvb_ringbuffer_avail(buf); + dvb_dmxdev_flush_data(filter, flush_len); + } +} + static ssize_t dvb_dvr_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { + ssize_t res; struct dvb_device *dvbdev = file->private_data; struct dmxdev *dmxdev = dvbdev->priv; + unsigned long flags; if (dmxdev->exit) return -ENODEV; - return dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer, - file->f_flags & O_NONBLOCK, - buf, count, ppos); + if (!dvb_dvr_verify_buffer_size(dmxdev, file->f_flags, + dmxdev->dvr_buffer.size)) + return -EINVAL; + + res = dvb_dmxdev_buffer_read(NULL, &dmxdev->dvr_buffer, + file->f_flags & O_NONBLOCK, + buf, count, ppos); + + if (res > 0) { + dvb_dmxdev_notify_data_read(dmxdev->dvr_feed, res); + spin_lock_irqsave(&dmxdev->lock, flags); + dvb_dmxdev_update_events(&dmxdev->dvr_output_events, res); + spin_unlock_irqrestore(&dmxdev->lock, flags); + + /* + * in PULL mode, we might be stalling on + * event queue, so need to wake-up waiters + */ + if (dmxdev->playback_mode == DMX_PB_MODE_PULL) + wake_up_all(&dmxdev->dvr_buffer.queue); + } else if (res == -EOVERFLOW) { + dvb_dmxdev_auto_flush_buffer(dmxdev->dvr_feed, + &dmxdev->dvr_buffer); + } + + return res; +} + +/* + * dvb_dvr_push_oob_cmd + * + * Note: this function assume dmxdev->mutex was taken, so command buffer cannot + * be released during its operation. + */ +static int dvb_dvr_push_oob_cmd(struct dmxdev *dmxdev, unsigned int f_flags, + struct dmx_oob_command *cmd) +{ + struct dvb_ringbuffer *cmdbuf = &dmxdev->dvr_cmd_buffer; + struct dvr_command *dvr_cmd; + + if ((f_flags & O_ACCMODE) == O_RDONLY || + dmxdev->source < DMX_SOURCE_DVR0) + return -EPERM; + + if (dvb_ringbuffer_free(cmdbuf) < sizeof(*dvr_cmd)) + return -ENOMEM; + + dvr_cmd = (struct dvr_command *)&cmdbuf->data[cmdbuf->pwrite]; + dvr_cmd->type = DVR_OOB_CMD; + dvr_cmd->cmd.oobcmd = *cmd; + DVB_RINGBUFFER_PUSH(cmdbuf, sizeof(*dvr_cmd)); + wake_up_all(&cmdbuf->queue); + + return 0; +} + +static int dvb_dvr_flush_buffer(struct dmxdev *dmxdev, unsigned int f_flags) +{ + size_t flush_len; + int ret; + + if ((f_flags & O_ACCMODE) != O_RDONLY) + return -EINVAL; + + flush_len = dvb_ringbuffer_avail(&dmxdev->dvr_buffer); + ret = dvb_dmxdev_flush_data(dmxdev->dvr_feed, flush_len); + + return ret; } static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev, - unsigned long size) + unsigned int f_flags, + unsigned long size) { - struct dvb_ringbuffer *buf = &dmxdev->dvr_buffer; + struct dvb_ringbuffer *buf; void *newmem; void *oldmem; - - dprintk("function : %s\n", __func__); + spinlock_t *lock; + enum dmx_buffer_mode buffer_mode; + + pr_debug("function : %s\n", __func__); + + if ((f_flags & O_ACCMODE) == O_RDONLY) { + buf = &dmxdev->dvr_buffer; + lock = &dmxdev->lock; + buffer_mode = dmxdev->dvr_buffer_mode; + } else { + buf = &dmxdev->dvr_input_buffer; + lock = &dmxdev->dvr_in_lock; + buffer_mode = dmxdev->dvr_input_buffer_mode; + } if (buf->size == size) return 0; - if (!size) + if (!size || (buffer_mode == DMX_BUFFER_MODE_EXTERNAL)) return -EINVAL; - newmem = vmalloc(size); + newmem = vmalloc_user(size); if (!newmem) return -ENOMEM; oldmem = buf->data; - spin_lock_irq(&dmxdev->lock); + spin_lock_irq(lock); + + if (((f_flags & O_ACCMODE) != O_RDONLY) && + (dmxdev->dvr_processing_input)) { + spin_unlock_irq(lock); + vfree(oldmem); + return -EBUSY; + } + buf->data = newmem; buf->size = size; /* reset and not flush in case the buffer shrinks */ dvb_ringbuffer_reset(buf); - spin_unlock_irq(&dmxdev->lock); + + spin_unlock_irq(lock); vfree(oldmem); return 0; } -static inline void dvb_dmxdev_filter_state_set(struct dmxdev_filter - *dmxdevfilter, int state) +static int dvb_dvr_set_buffer_mode(struct dmxdev *dmxdev, + unsigned int f_flags, enum dmx_buffer_mode mode) { - spin_lock_irq(&dmxdevfilter->dev->lock); - dmxdevfilter->state = state; - spin_unlock_irq(&dmxdevfilter->dev->lock); + struct dvb_ringbuffer *buf; + spinlock_t *lock; + enum dmx_buffer_mode *buffer_mode; + void **buff_handle; + void *oldmem; + int *is_protected; + + if ((mode != DMX_BUFFER_MODE_INTERNAL) && + (mode != DMX_BUFFER_MODE_EXTERNAL)) + return -EINVAL; + + if ((mode == DMX_BUFFER_MODE_EXTERNAL) && + (!dmxdev->demux->map_buffer || !dmxdev->demux->unmap_buffer)) + return -EINVAL; + + if ((f_flags & O_ACCMODE) == O_RDONLY) { + buf = &dmxdev->dvr_buffer; + lock = &dmxdev->lock; + buffer_mode = &dmxdev->dvr_buffer_mode; + buff_handle = &dmxdev->dvr_priv_buff_handle; + is_protected = NULL; + } else { + buf = &dmxdev->dvr_input_buffer; + lock = &dmxdev->dvr_in_lock; + buffer_mode = &dmxdev->dvr_input_buffer_mode; + buff_handle = &dmxdev->demux->dvr_input.priv_handle; + is_protected = &dmxdev->demux->dvr_input_protected; + } + + if (mode == *buffer_mode) + return 0; + + oldmem = buf->data; + spin_lock_irq(lock); + buf->data = NULL; + spin_unlock_irq(lock); + + *buffer_mode = mode; + + if (mode == DMX_BUFFER_MODE_INTERNAL) { + /* switched from external to internal */ + if (*buff_handle) { + dmxdev->demux->unmap_buffer(dmxdev->demux, + *buff_handle); + *buff_handle = NULL; + } + + if (is_protected) + *is_protected = 0; + + /* set default internal buffer */ + dvb_dvr_set_buffer_size(dmxdev, f_flags, DVR_BUFFER_SIZE); + } else if (oldmem) { + /* switched from internal to external */ + vfree(oldmem); + } + + return 0; } -static int dvb_dmxdev_set_buffer_size(struct dmxdev_filter *dmxdevfilter, - unsigned long size) +static int dvb_dvr_set_buffer(struct dmxdev *dmxdev, + unsigned int f_flags, struct dmx_buffer *dmx_buffer) { - struct dvb_ringbuffer *buf = &dmxdevfilter->buffer; + struct dvb_ringbuffer *buf; + spinlock_t *lock; + enum dmx_buffer_mode buffer_mode; + void **buff_handle; void *newmem; void *oldmem; + int *is_protected; + struct dmx_caps caps; - if (buf->size == size) - return 0; - if (!size) + if (dmxdev->demux->get_caps) + dmxdev->demux->get_caps(dmxdev->demux, &caps); + else + caps.caps = 0; + + if ((f_flags & O_ACCMODE) == O_RDONLY) { + buf = &dmxdev->dvr_buffer; + lock = &dmxdev->lock; + buffer_mode = dmxdev->dvr_buffer_mode; + buff_handle = &dmxdev->dvr_priv_buff_handle; + is_protected = NULL; + } else { + buf = &dmxdev->dvr_input_buffer; + lock = &dmxdev->dvr_in_lock; + buffer_mode = dmxdev->dvr_input_buffer_mode; + buff_handle = &dmxdev->demux->dvr_input.priv_handle; + is_protected = &dmxdev->demux->dvr_input_protected; + if (!(caps.caps & DMX_CAP_SECURED_INPUT_PLAYBACK) && + dmx_buffer->is_protected) + return -EINVAL; + } + + if (!dmx_buffer->size || + (buffer_mode == DMX_BUFFER_MODE_INTERNAL)) return -EINVAL; - if (dmxdevfilter->state >= DMXDEV_STATE_GO) - return -EBUSY; - newmem = vmalloc(size); - if (!newmem) - return -ENOMEM; + oldmem = *buff_handle; - oldmem = buf->data; + /* + * Protected buffer is relevant only for DVR input buffer + * when DVR device is opened for write. In such case, + * buffer is mapped only if the buffer is not protected one. + */ + if (!is_protected || !dmx_buffer->is_protected) { + if (dmxdev->demux->map_buffer(dmxdev->demux, dmx_buffer, + buff_handle, &newmem)) + return -ENOMEM; + } else { + newmem = NULL; + *buff_handle = NULL; + } - spin_lock_irq(&dmxdevfilter->dev->lock); + spin_lock_irq(lock); buf->data = newmem; - buf->size = size; - - /* reset and not flush in case the buffer shrinks */ + buf->size = dmx_buffer->size; + if (is_protected) + *is_protected = dmx_buffer->is_protected; dvb_ringbuffer_reset(buf); - spin_unlock_irq(&dmxdevfilter->dev->lock); + spin_unlock_irq(lock); - vfree(oldmem); + if (oldmem) + dmxdev->demux->unmap_buffer(dmxdev->demux, oldmem); return 0; } -static void dvb_dmxdev_filter_timeout(unsigned long data) +static int dvb_dvr_get_event(struct dmxdev *dmxdev, + unsigned int f_flags, + struct dmx_filter_event *event) { - struct dmxdev_filter *dmxdevfilter = (struct dmxdev_filter *)data; + int res = 0; - dmxdevfilter->buffer.error = -ETIMEDOUT; - spin_lock_irq(&dmxdevfilter->dev->lock); - dmxdevfilter->state = DMXDEV_STATE_TIMEDOUT; - spin_unlock_irq(&dmxdevfilter->dev->lock); - wake_up(&dmxdevfilter->buffer.queue); + if (!((f_flags & O_ACCMODE) == O_RDONLY)) + return -EINVAL; + + spin_lock_irq(&dmxdev->lock); + + if (dmxdev->dvr_buffer.error == -EOVERFLOW) { + event->type = DMX_EVENT_BUFFER_OVERFLOW; + dmxdev->dvr_buffer.error = 0; + } else { + res = dvb_dmxdev_remove_event(&dmxdev->dvr_output_events, + event); + if (res) { + spin_unlock_irq(&dmxdev->lock); + return res; + } + } + + spin_unlock_irq(&dmxdev->lock); + + if (event->type == DMX_EVENT_BUFFER_OVERFLOW) + dvb_dmxdev_auto_flush_buffer(dmxdev->dvr_feed, + &dmxdev->dvr_buffer); + + /* + * in PULL mode, we might be stalling on + * event queue, so need to wake-up waiters + */ + if (dmxdev->playback_mode == DMX_PB_MODE_PULL) + wake_up_all(&dmxdev->dvr_buffer.queue); + + return res; } -static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter) +static int dvb_dvr_get_buffer_status(struct dmxdev *dmxdev, + unsigned int f_flags, + struct dmx_buffer_status *dmx_buffer_status) { - struct dmx_sct_filter_params *para = &dmxdevfilter->params.sec; + struct dvb_ringbuffer *buf; + spinlock_t *lock; + + if ((f_flags & O_ACCMODE) == O_RDONLY) { + buf = &dmxdev->dvr_buffer; + lock = &dmxdev->lock; + } else { + buf = &dmxdev->dvr_input_buffer; + lock = &dmxdev->dvr_in_lock; + } + + spin_lock_irq(lock); + + dmx_buffer_status->error = buf->error; + dmx_buffer_status->fullness = dvb_ringbuffer_avail(buf); + dmx_buffer_status->free_bytes = dvb_ringbuffer_free(buf); + dmx_buffer_status->read_offset = buf->pread; + dmx_buffer_status->write_offset = buf->pwrite; + dmx_buffer_status->size = buf->size; + buf->error = 0; + + spin_unlock_irq(lock); + + if (dmx_buffer_status->error == -EOVERFLOW) + dvb_dmxdev_auto_flush_buffer(dmxdev->dvr_feed, buf); + + return 0; +} + +static int dvb_dvr_release_data(struct dmxdev *dmxdev, + unsigned int f_flags, + u32 bytes_count) +{ + ssize_t buff_fullness; + + if (!((f_flags & O_ACCMODE) == O_RDONLY)) + return -EINVAL; + + if (!bytes_count) + return 0; + + buff_fullness = dvb_ringbuffer_avail(&dmxdev->dvr_buffer); + + if (bytes_count > buff_fullness) + return -EINVAL; + + DVB_RINGBUFFER_SKIP(&dmxdev->dvr_buffer, bytes_count); + + dvb_dmxdev_notify_data_read(dmxdev->dvr_feed, bytes_count); + spin_lock_irq(&dmxdev->lock); + dvb_dmxdev_update_events(&dmxdev->dvr_output_events, bytes_count); + spin_unlock_irq(&dmxdev->lock); + + wake_up_all(&dmxdev->dvr_buffer.queue); + return 0; +} + +/* + * dvb_dvr_feed_data - Notify new data in DVR input buffer + * + * @dmxdev - demux device instance + * @f_flags - demux device file flag (access mode) + * @bytes_count - how many bytes were written to the input buffer + * + * Note: this function assume dmxdev->mutex was taken, so buffer cannot + * be released during its operation. + */ +static int dvb_dvr_feed_data(struct dmxdev *dmxdev, + unsigned int f_flags, + u32 bytes_count) +{ + ssize_t free_space; + struct dvb_ringbuffer *buffer = &dmxdev->dvr_input_buffer; + + if ((f_flags & O_ACCMODE) == O_RDONLY) + return -EINVAL; + + if (!bytes_count) + return 0; + + free_space = dvb_ringbuffer_free(buffer); + + if (bytes_count > free_space) + return -EINVAL; + + DVB_RINGBUFFER_PUSH(buffer, bytes_count); + + dvb_dvr_queue_data_feed(dmxdev, bytes_count); + + return 0; +} + +static inline void dvb_dmxdev_filter_state_set(struct dmxdev_filter + *dmxdevfilter, int state) +{ + spin_lock_irq(&dmxdevfilter->dev->lock); + dmxdevfilter->state = state; + spin_unlock_irq(&dmxdevfilter->dev->lock); +} + +static int dvb_dmxdev_set_buffer_size(struct dmxdev_filter *dmxdevfilter, + unsigned long size) +{ + struct dvb_ringbuffer *buf = &dmxdevfilter->buffer; + void *newmem; + void *oldmem; + + if (buf->size == size) + return 0; + if (!size || + (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL)) + return -EINVAL; + if (dmxdevfilter->state >= DMXDEV_STATE_GO) + return -EBUSY; + + newmem = vmalloc_user(size); + if (!newmem) + return -ENOMEM; + + oldmem = buf->data; + + spin_lock_irq(&dmxdevfilter->dev->lock); + buf->data = newmem; + buf->size = size; + + /* reset and not flush in case the buffer shrinks */ + dvb_ringbuffer_reset(buf); + spin_unlock_irq(&dmxdevfilter->dev->lock); + + vfree(oldmem); + + return 0; +} + +static int dvb_dmxdev_set_buffer_mode(struct dmxdev_filter *dmxdevfilter, + enum dmx_buffer_mode mode) +{ + struct dvb_ringbuffer *buf = &dmxdevfilter->buffer; + struct dmxdev *dmxdev = dmxdevfilter->dev; + void *oldmem; + + if (dmxdevfilter->state >= DMXDEV_STATE_GO) + return -EBUSY; + + if ((mode != DMX_BUFFER_MODE_INTERNAL) && + (mode != DMX_BUFFER_MODE_EXTERNAL)) + return -EINVAL; + + if ((mode == DMX_BUFFER_MODE_EXTERNAL) && + (!dmxdev->demux->map_buffer || !dmxdev->demux->unmap_buffer)) + return -EINVAL; + + if (mode == dmxdevfilter->buffer_mode) + return 0; + + oldmem = buf->data; + spin_lock_irq(&dmxdevfilter->dev->lock); + buf->data = NULL; + spin_unlock_irq(&dmxdevfilter->dev->lock); + + dmxdevfilter->buffer_mode = mode; + + if (mode == DMX_BUFFER_MODE_INTERNAL) { + /* switched from external to internal */ + if (dmxdevfilter->priv_buff_handle) { + dmxdev->demux->unmap_buffer(dmxdev->demux, + dmxdevfilter->priv_buff_handle); + dmxdevfilter->priv_buff_handle = NULL; + } + } else if (oldmem) { + /* switched from internal to external */ + vfree(oldmem); + } + + return 0; +} + +static int dvb_dmxdev_set_buffer(struct dmxdev_filter *dmxdevfilter, + struct dmx_buffer *buffer) +{ + struct dvb_ringbuffer *buf = &dmxdevfilter->buffer; + struct dmxdev *dmxdev = dmxdevfilter->dev; + void *newmem; + void *oldmem; + + if (dmxdevfilter->state >= DMXDEV_STATE_GO) + return -EBUSY; + + if ((!buffer->size) || + (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_INTERNAL)) + return -EINVAL; + + oldmem = dmxdevfilter->priv_buff_handle; + if (dmxdev->demux->map_buffer(dmxdev->demux, buffer, + &dmxdevfilter->priv_buff_handle, &newmem)) + return -ENOMEM; + + spin_lock_irq(&dmxdevfilter->dev->lock); + buf->data = newmem; + buf->size = buffer->size; + dvb_ringbuffer_reset(buf); + spin_unlock_irq(&dmxdevfilter->dev->lock); + + if (oldmem) + dmxdev->demux->unmap_buffer(dmxdev->demux, oldmem); + + return 0; +} + +static int dvb_dmxdev_set_tsp_out_format(struct dmxdev_filter *dmxdevfilter, + enum dmx_tsp_format_t dmx_tsp_format) +{ + if (dmxdevfilter->state >= DMXDEV_STATE_GO) + return -EBUSY; + + if ((dmx_tsp_format > DMX_TSP_FORMAT_192_HEAD) || + (dmx_tsp_format < DMX_TSP_FORMAT_188)) + return -EINVAL; + + dmxdevfilter->dmx_tsp_format = dmx_tsp_format; + + return 0; +} + +static int dvb_dmxdev_set_decoder_buffer_size( + struct dmxdev_filter *dmxdevfilter, + unsigned long size) +{ + struct dmx_caps caps; + struct dmx_demux *demux = dmxdevfilter->dev->demux; + + if (demux->get_caps) { + demux->get_caps(demux, &caps); + if (!dvb_dmxdev_verify_buffer_size(size, caps.decoder.max_size, + caps.decoder.size_alignment)) + return -EINVAL; + } + + if (size == 0) + return -EINVAL; + + if (dmxdevfilter->decoder_buffers.buffers_size == size) + return 0; + + if (dmxdevfilter->state >= DMXDEV_STATE_GO) + return -EBUSY; + + /* + * In case decoder buffers were already set before to some external + * buffers, setting the decoder buffer size alone implies transition + * to internal buffer mode. + */ + dmxdevfilter->decoder_buffers.buffers_size = size; + dmxdevfilter->decoder_buffers.buffers_num = 0; + dmxdevfilter->decoder_buffers.is_linear = 0; + return 0; +} + +static int dvb_dmxdev_set_source(struct dmxdev_filter *dmxdevfilter, + dmx_source_t *source) +{ + int ret = 0; + struct dmxdev *dev; + + if (dmxdevfilter->state == DMXDEV_STATE_GO) + return -EBUSY; + + dev = dmxdevfilter->dev; + if (dev->demux->set_source) + ret = dev->demux->set_source(dev->demux, source); + + if (!ret) + dev->source = *source; + + return ret; +} + +static int dvb_dmxdev_reuse_decoder_buf(struct dmxdev_filter *dmxdevfilter, + int cookie) +{ + struct dmxdev_feed *feed; + + if (dmxdevfilter->state != DMXDEV_STATE_GO || + (dmxdevfilter->type != DMXDEV_TYPE_PES) || + (dmxdevfilter->params.pes.output != DMX_OUT_DECODER) || + (dmxdevfilter->events.event_mask.disable_mask & + DMX_EVENT_NEW_ES_DATA)) + return -EPERM; + + /* Only one feed should be in the list in case of decoder */ + feed = list_first_entry(&dmxdevfilter->feed.ts, + struct dmxdev_feed, next); + if (feed && feed->ts && feed->ts->reuse_decoder_buffer) + return feed->ts->reuse_decoder_buffer(feed->ts, cookie); + + return -ENODEV; +} + +static int dvb_dmxdev_set_event_mask(struct dmxdev_filter *dmxdevfilter, + struct dmx_events_mask *event_mask) +{ + if (!event_mask || + (event_mask->wakeup_threshold >= DMX_EVENT_QUEUE_SIZE)) + return -EINVAL; + + if (dmxdevfilter->state == DMXDEV_STATE_GO) + return -EBUSY; + + /* + * Overflow event is not allowed to be masked. + * This is because if overflow occurs, demux stops outputting data + * until user is notified. If user is using events to read the data, + * the overflow event must be always enabled or otherwise we would + * never recover from overflow state. + */ + event_mask->disable_mask &= ~(u32)DMX_EVENT_BUFFER_OVERFLOW; + event_mask->no_wakeup_mask &= ~(u32)DMX_EVENT_BUFFER_OVERFLOW; + + dmxdevfilter->events.event_mask = *event_mask; + + return 0; +} + +static int dvb_dmxdev_get_event_mask(struct dmxdev_filter *dmxdevfilter, + struct dmx_events_mask *event_mask) +{ + if (!event_mask) + return -EINVAL; + + *event_mask = dmxdevfilter->events.event_mask; + + return 0; +} + +static int dvb_dmxdev_set_indexing_params(struct dmxdev_filter *dmxdevfilter, + struct dmx_indexing_params *idx_params) +{ + int found_pid; + struct dmxdev_feed *feed; + struct dmxdev_feed *ts_feed = NULL; + struct dmx_caps caps; + int ret = 0; + + if (!dmxdevfilter->dev->demux->get_caps) + return -EINVAL; + + dmxdevfilter->dev->demux->get_caps(dmxdevfilter->dev->demux, &caps); + + if (!idx_params || + !(caps.caps & DMX_CAP_VIDEO_INDEXING) || + (dmxdevfilter->state < DMXDEV_STATE_SET) || + (dmxdevfilter->type != DMXDEV_TYPE_PES) || + ((dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) && + (dmxdevfilter->params.pes.output != DMX_OUT_TSDEMUX_TAP))) + return -EINVAL; + + if (idx_params->enable && !idx_params->types) + return -EINVAL; + + found_pid = 0; + list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) { + if (feed->pid == idx_params->pid) { + found_pid = 1; + ts_feed = feed; + ts_feed->idx_params = *idx_params; + if ((dmxdevfilter->state == DMXDEV_STATE_GO) && + ts_feed->ts->set_idx_params) + ret = ts_feed->ts->set_idx_params( + ts_feed->ts, idx_params); + break; + } + } + + if (!found_pid) + return -EINVAL; + + return ret; +} + +static int dvb_dmxdev_get_scrambling_bits(struct dmxdev_filter *filter, + struct dmx_scrambling_bits *scrambling_bits) +{ + struct dmxdev_feed *feed; + + if (!scrambling_bits || + (filter->state != DMXDEV_STATE_GO)) + return -EINVAL; + + if (filter->type == DMXDEV_TYPE_SEC) { + if (filter->feed.sec.feed->get_scrambling_bits) + return filter->feed.sec.feed->get_scrambling_bits( + filter->feed.sec.feed, + &scrambling_bits->value); + return -EINVAL; + } + + list_for_each_entry(feed, &filter->feed.ts, next) { + if (feed->pid == scrambling_bits->pid) { + if (feed->ts->get_scrambling_bits) + return feed->ts->get_scrambling_bits(feed->ts, + &scrambling_bits->value); + return -EINVAL; + } + } + + return -EINVAL; +} + +static void dvb_dmxdev_ts_insertion_work(struct work_struct *worker) +{ + struct ts_insertion_buffer *ts_buffer = + container_of(to_delayed_work(worker), + struct ts_insertion_buffer, dwork); + struct dmxdev_feed *feed; + size_t free_bytes; + struct dmx_ts_feed *ts; + + mutex_lock(&ts_buffer->dmxdevfilter->mutex); + + if (ts_buffer->abort || + (ts_buffer->dmxdevfilter->state != DMXDEV_STATE_GO)) { + mutex_unlock(&ts_buffer->dmxdevfilter->mutex); + return; + } + + feed = list_first_entry(&ts_buffer->dmxdevfilter->feed.ts, + struct dmxdev_feed, next); + ts = feed->ts; + free_bytes = dvb_ringbuffer_free(&ts_buffer->dmxdevfilter->buffer); + + mutex_unlock(&ts_buffer->dmxdevfilter->mutex); + + if (ts_buffer->size < free_bytes) + ts->ts_insertion_insert_buffer(ts, + ts_buffer->buffer, ts_buffer->size); + + if (ts_buffer->repetition_time && !ts_buffer->abort) + schedule_delayed_work(&ts_buffer->dwork, + msecs_to_jiffies(ts_buffer->repetition_time)); +} + +static void dvb_dmxdev_queue_ts_insertion( + struct ts_insertion_buffer *ts_buffer) +{ + size_t tsp_size; + + if (ts_buffer->dmxdevfilter->dmx_tsp_format == DMX_TSP_FORMAT_188) + tsp_size = 188; + else + tsp_size = 192; + + if (ts_buffer->size % tsp_size) { + pr_err("%s: Wrong buffer alignment, size=%zu, tsp_size=%zu\n", + __func__, ts_buffer->size, tsp_size); + return; + } + + ts_buffer->abort = 0; + schedule_delayed_work(&ts_buffer->dwork, 0); +} + +static void dvb_dmxdev_cancel_ts_insertion( + struct ts_insertion_buffer *ts_buffer) +{ + /* + * This function assumes it is called while mutex + * of demux filter is taken. Since work in workqueue + * captures the filter's mutex to protect against the DB, + * mutex needs to be released before waiting for the work + * to get finished otherwise work in workqueue will + * never be finished. + */ + if (!mutex_is_locked(&ts_buffer->dmxdevfilter->mutex)) { + pr_err("%s: mutex is not locked!\n", __func__); + return; + } + + ts_buffer->abort = 1; + + mutex_unlock(&ts_buffer->dmxdevfilter->mutex); + cancel_delayed_work_sync(&ts_buffer->dwork); + mutex_lock(&ts_buffer->dmxdevfilter->mutex); +} + +static int dvb_dmxdev_set_ts_insertion(struct dmxdev_filter *dmxdevfilter, + struct dmx_set_ts_insertion *params) +{ + int ret = 0; + int first_buffer; + struct dmxdev_feed *feed; + struct ts_insertion_buffer *ts_buffer; + struct dmx_caps caps; + + if (!dmxdevfilter->dev->demux->get_caps) + return -EINVAL; + + dmxdevfilter->dev->demux->get_caps(dmxdevfilter->dev->demux, &caps); + + if (!params || + !params->size || + !(caps.caps & DMX_CAP_TS_INSERTION) || + (dmxdevfilter->state < DMXDEV_STATE_SET) || + (dmxdevfilter->type != DMXDEV_TYPE_PES) || + ((dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) && + (dmxdevfilter->params.pes.output != DMX_OUT_TSDEMUX_TAP))) + return -EINVAL; + + ts_buffer = vmalloc(sizeof(struct ts_insertion_buffer)); + if (!ts_buffer) + return -ENOMEM; + + ts_buffer->buffer = vmalloc(params->size); + if (!ts_buffer->buffer) { + vfree(ts_buffer); + return -ENOMEM; + } + + if (copy_from_user(ts_buffer->buffer, + params->ts_packets, params->size)) { + vfree(ts_buffer->buffer); + vfree(ts_buffer); + return -EFAULT; + } + + if (params->repetition_time && + params->repetition_time < DMX_MIN_INSERTION_REPETITION_TIME) + params->repetition_time = DMX_MIN_INSERTION_REPETITION_TIME; + + ts_buffer->size = params->size; + ts_buffer->identifier = params->identifier; + ts_buffer->repetition_time = params->repetition_time; + ts_buffer->dmxdevfilter = dmxdevfilter; + INIT_DELAYED_WORK(&ts_buffer->dwork, dvb_dmxdev_ts_insertion_work); + + first_buffer = list_empty(&dmxdevfilter->insertion_buffers); + list_add_tail(&ts_buffer->next, &dmxdevfilter->insertion_buffers); + + if (dmxdevfilter->state != DMXDEV_STATE_GO) + return 0; + + feed = list_first_entry(&dmxdevfilter->feed.ts, + struct dmxdev_feed, next); + + if (first_buffer && feed->ts->ts_insertion_init) + ret = feed->ts->ts_insertion_init(feed->ts); + + if (!ret) { + dvb_dmxdev_queue_ts_insertion(ts_buffer); + } else { + list_del(&ts_buffer->next); + vfree(ts_buffer->buffer); + vfree(ts_buffer); + } + + return ret; +} + +static int dvb_dmxdev_abort_ts_insertion(struct dmxdev_filter *dmxdevfilter, + struct dmx_abort_ts_insertion *params) +{ + int ret = 0; + int found_buffer; + struct dmxdev_feed *feed; + struct ts_insertion_buffer *ts_buffer, *tmp; + struct dmx_caps caps; + + if (!dmxdevfilter->dev->demux->get_caps) + return -EINVAL; + + dmxdevfilter->dev->demux->get_caps(dmxdevfilter->dev->demux, &caps); + + if (!params || + !(caps.caps & DMX_CAP_TS_INSERTION) || + (dmxdevfilter->state < DMXDEV_STATE_SET) || + (dmxdevfilter->type != DMXDEV_TYPE_PES) || + ((dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) && + (dmxdevfilter->params.pes.output != DMX_OUT_TSDEMUX_TAP))) + return -EINVAL; + + found_buffer = 0; + list_for_each_entry_safe(ts_buffer, tmp, + &dmxdevfilter->insertion_buffers, next) { + if (ts_buffer->identifier == params->identifier) { + list_del(&ts_buffer->next); + found_buffer = 1; + break; + } + } + + if (!found_buffer) + return -EINVAL; + + if (dmxdevfilter->state == DMXDEV_STATE_GO) { + dvb_dmxdev_cancel_ts_insertion(ts_buffer); + if (list_empty(&dmxdevfilter->insertion_buffers)) { + feed = list_first_entry(&dmxdevfilter->feed.ts, + struct dmxdev_feed, next); + if (feed->ts->ts_insertion_terminate) + ret = feed->ts->ts_insertion_terminate( + feed->ts); + } + } + + vfree(ts_buffer->buffer); + vfree(ts_buffer); + + return ret; +} + +static int dvb_dmxdev_ts_fullness_callback(struct dmx_ts_feed *filter, + int required_space, int wait) +{ + struct dmxdev_filter *dmxdevfilter = filter->priv; + struct dvb_ringbuffer *src; + struct dmxdev_events_queue *events; + int ret; + + if (!dmxdevfilter) { + pr_err("%s: NULL demux filter object!\n", __func__); + return -ENODEV; + } + + if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) { + src = &dmxdevfilter->buffer; + events = &dmxdevfilter->events; + } else { + src = &dmxdevfilter->dev->dvr_buffer; + events = &dmxdevfilter->dev->dvr_output_events; + } + + do { + ret = 0; + + if (dmxdevfilter->dev->dvr_in_exit) + return -ENODEV; + + spin_lock(&dmxdevfilter->dev->lock); + + if ((!src->data) || + (dmxdevfilter->state != DMXDEV_STATE_GO)) + ret = -EINVAL; + else if (src->error) + ret = src->error; + + if (ret) { + spin_unlock(&dmxdevfilter->dev->lock); + return ret; + } + + if ((required_space <= dvb_ringbuffer_free(src)) && + (!dvb_dmxdev_events_is_full(events))) { + spin_unlock(&dmxdevfilter->dev->lock); + return 0; + } + + spin_unlock(&dmxdevfilter->dev->lock); + + if (!wait) + return -ENOSPC; + + ret = wait_event_interruptible(src->queue, + (!src->data) || + ((dvb_ringbuffer_free(src) >= required_space) && + (!dvb_dmxdev_events_is_full(events))) || + (src->error != 0) || + (dmxdevfilter->state != DMXDEV_STATE_GO) || + dmxdevfilter->dev->dvr_in_exit); + + if (ret < 0) + return ret; + } while (1); +} + +static int dvb_dmxdev_sec_fullness_callback( + struct dmx_section_filter *filter, + int required_space, int wait) +{ + struct dmxdev_filter *dmxdevfilter = filter->priv; + struct dvb_ringbuffer *src; + struct dmxdev_events_queue *events; + int ret; + + if (!dmxdevfilter) { + pr_err("%s: NULL demux filter object!\n", __func__); + return -ENODEV; + } + + src = &dmxdevfilter->buffer; + events = &dmxdevfilter->events; + + do { + ret = 0; + + if (dmxdevfilter->dev->dvr_in_exit) + return -ENODEV; + + spin_lock(&dmxdevfilter->dev->lock); + + if ((!src->data) || + (dmxdevfilter->state != DMXDEV_STATE_GO)) + ret = -EINVAL; + else if (src->error) + ret = src->error; + + if (ret) { + spin_unlock(&dmxdevfilter->dev->lock); + return ret; + } + + if ((required_space <= dvb_ringbuffer_free(src)) && + (!dvb_dmxdev_events_is_full(events))) { + spin_unlock(&dmxdevfilter->dev->lock); + return 0; + } + + spin_unlock(&dmxdevfilter->dev->lock); + + if (!wait) + return -ENOSPC; + + ret = wait_event_interruptible(src->queue, + (!src->data) || + ((dvb_ringbuffer_free(src) >= required_space) && + (!dvb_dmxdev_events_is_full(events))) || + (src->error != 0) || + (dmxdevfilter->state != DMXDEV_STATE_GO) || + dmxdevfilter->dev->dvr_in_exit); + + if (ret < 0) + return ret; + } while (1); +} + +static int dvb_dmxdev_set_playback_mode(struct dmxdev_filter *dmxdevfilter, + enum dmx_playback_mode_t playback_mode) +{ + struct dmxdev *dmxdev = dmxdevfilter->dev; + struct dmx_caps caps; + + if (dmxdev->demux->get_caps) + dmxdev->demux->get_caps(dmxdev->demux, &caps); + else + caps.caps = 0; + + if ((playback_mode != DMX_PB_MODE_PUSH) && + (playback_mode != DMX_PB_MODE_PULL)) + return -EINVAL; + + if (dmxdev->demux->set_playback_mode == NULL) + return -EINVAL; + + if (((dmxdev->source < DMX_SOURCE_DVR0) || + !(caps.caps & DMX_CAP_PULL_MODE)) && + (playback_mode == DMX_PB_MODE_PULL)) + return -EPERM; + + if (dmxdevfilter->state == DMXDEV_STATE_GO) + return -EBUSY; + + dmxdev->playback_mode = playback_mode; + + return dmxdev->demux->set_playback_mode( + dmxdev->demux, + dmxdev->playback_mode, + dvb_dmxdev_ts_fullness_callback, + dvb_dmxdev_sec_fullness_callback); +} + +static int dvb_dmxdev_flush_buffer(struct dmxdev_filter *filter) +{ + size_t flush_len; + int ret; + + if (filter->state != DMXDEV_STATE_GO) + return -EINVAL; + + flush_len = dvb_ringbuffer_avail(&filter->buffer); + ret = dvb_dmxdev_flush_data(filter, flush_len); + + return ret; +} + +static int dvb_dmxdev_get_buffer_status( + struct dmxdev_filter *dmxdevfilter, + struct dmx_buffer_status *dmx_buffer_status) +{ + struct dvb_ringbuffer *buf = &dmxdevfilter->buffer; + + /* + * Note: Taking the dmxdevfilter->dev->lock spinlock is required only + * when getting the status of the Demux-userspace data ringbuffer . + * In case we are getting the status of a decoder buffer, taking this + * spinlock is not required and in fact might lead to a deadlock. + */ + if ((dmxdevfilter->type == DMXDEV_TYPE_PES) && + (dmxdevfilter->params.pes.output == DMX_OUT_DECODER)) { + struct dmxdev_feed *feed; + int ret; + + /* Only one feed should be in the list in case of decoder */ + feed = list_first_entry(&dmxdevfilter->feed.ts, + struct dmxdev_feed, next); + + /* Ask for status of decoder's buffer from underlying HW */ + if (feed->ts->get_decoder_buff_status) + ret = feed->ts->get_decoder_buff_status( + feed->ts, + dmx_buffer_status); + else + ret = -ENODEV; + + return ret; + } + + spin_lock_irq(&dmxdevfilter->dev->lock); + + if (!buf->data) { + spin_unlock_irq(&dmxdevfilter->dev->lock); + return -EINVAL; + } + + dmx_buffer_status->error = buf->error; + dmx_buffer_status->fullness = dvb_ringbuffer_avail(buf); + dmx_buffer_status->free_bytes = dvb_ringbuffer_free(buf); + dmx_buffer_status->read_offset = buf->pread; + dmx_buffer_status->write_offset = buf->pwrite; + dmx_buffer_status->size = buf->size; + buf->error = 0; + + spin_unlock_irq(&dmxdevfilter->dev->lock); + + if (dmx_buffer_status->error == -EOVERFLOW) + dvb_dmxdev_auto_flush_buffer(dmxdevfilter, buf); + + return 0; +} + +static int dvb_dmxdev_release_data(struct dmxdev_filter *dmxdevfilter, + u32 bytes_count) +{ + ssize_t buff_fullness; + + if (!dmxdevfilter->buffer.data) + return -EINVAL; + + if (!bytes_count) + return 0; + + buff_fullness = dvb_ringbuffer_avail(&dmxdevfilter->buffer); + + if (bytes_count > buff_fullness) + return -EINVAL; + + DVB_RINGBUFFER_SKIP(&dmxdevfilter->buffer, bytes_count); + + dvb_dmxdev_notify_data_read(dmxdevfilter, bytes_count); + spin_lock_irq(&dmxdevfilter->dev->lock); + dvb_dmxdev_update_events(&dmxdevfilter->events, bytes_count); + spin_unlock_irq(&dmxdevfilter->dev->lock); + + wake_up_all(&dmxdevfilter->buffer.queue); + + return 0; +} + +static int dvb_dmxdev_get_event(struct dmxdev_filter *dmxdevfilter, + struct dmx_filter_event *event) +{ + int res = 0; + + spin_lock_irq(&dmxdevfilter->dev->lock); + + /* Check first for filter overflow */ + if (dmxdevfilter->buffer.error == -EOVERFLOW) { + event->type = DMX_EVENT_BUFFER_OVERFLOW; + } else { + res = dvb_dmxdev_remove_event(&dmxdevfilter->events, event); + if (res) { + spin_unlock_irq(&dmxdevfilter->dev->lock); + return res; + } + } + + /* clear buffer error now that user was notified */ + if (event->type == DMX_EVENT_BUFFER_OVERFLOW || + event->type == DMX_EVENT_SECTION_TIMEOUT) + dmxdevfilter->buffer.error = 0; + + spin_unlock_irq(&dmxdevfilter->dev->lock); + + if (event->type == DMX_EVENT_BUFFER_OVERFLOW) + dvb_dmxdev_auto_flush_buffer(dmxdevfilter, + &dmxdevfilter->buffer); + + spin_lock_irq(&dmxdevfilter->dev->lock); + + /* + * If no-data events are enabled on this filter, + * the events can be removed from the queue when + * user gets them. + * For filters with data events enabled, the event is removed + * from the queue only when the respective data is read. + */ + if (event->type != DMX_EVENT_BUFFER_OVERFLOW && + dmxdevfilter->events.data_read_event_masked) + dmxdevfilter->events.read_index = + dvb_dmxdev_advance_event_idx( + dmxdevfilter->events.read_index); + + spin_unlock_irq(&dmxdevfilter->dev->lock); + + /* + * in PULL mode, we might be stalling on + * event queue, so need to wake-up waiters + */ + if (dmxdevfilter->dev->playback_mode == DMX_PB_MODE_PULL) + wake_up_all(&dmxdevfilter->buffer.queue); + + return res; +} + +static void dvb_dmxdev_filter_timeout(unsigned long data) +{ + struct dmxdev_filter *dmxdevfilter = (struct dmxdev_filter *)data; + struct dmx_filter_event event; + + dmxdevfilter->buffer.error = -ETIMEDOUT; + spin_lock_irq(&dmxdevfilter->dev->lock); + dmxdevfilter->state = DMXDEV_STATE_TIMEDOUT; + event.type = DMX_EVENT_SECTION_TIMEOUT; + dvb_dmxdev_add_event(&dmxdevfilter->events, &event); + spin_unlock_irq(&dmxdevfilter->dev->lock); + wake_up_all(&dmxdevfilter->buffer.queue); +} + +static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter) +{ + struct dmx_sct_filter_params *para = &dmxdevfilter->params.sec; + + del_timer(&dmxdevfilter->timer); + if (para->timeout) { + dmxdevfilter->timer.function = dvb_dmxdev_filter_timeout; + dmxdevfilter->timer.data = (unsigned long)dmxdevfilter; + dmxdevfilter->timer.expires = + jiffies + 1 + (HZ / 2 + HZ * para->timeout) / 1000; + add_timer(&dmxdevfilter->timer); + } +} + +static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len, + const u8 *buffer2, size_t buffer2_len, + struct dmx_section_filter *filter) +{ + struct dmxdev_filter *dmxdevfilter = filter->priv; + struct dmx_filter_event event; + ssize_t free; + + + if (!dmxdevfilter) { + pr_err("%s: null filter.\n", __func__); + return -EINVAL; + } + + spin_lock(&dmxdevfilter->dev->lock); + + if (dmxdevfilter->buffer.error || + dmxdevfilter->state != DMXDEV_STATE_GO || + dmxdevfilter->eos_state) { + spin_unlock(&dmxdevfilter->dev->lock); + return 0; + } + + /* Discard section data if event cannot be notified */ + if (!(dmxdevfilter->events.event_mask.disable_mask & + DMX_EVENT_NEW_SECTION) && + dvb_dmxdev_events_is_full(&dmxdevfilter->events)) { + spin_unlock(&dmxdevfilter->dev->lock); + return 0; + } + + if ((buffer1_len + buffer2_len) == 0) { + if (buffer1 == NULL && buffer2 == NULL) { + /* Section was dropped due to CRC error */ + event.type = DMX_EVENT_SECTION_CRC_ERROR; + dvb_dmxdev_add_event(&dmxdevfilter->events, &event); + + spin_unlock(&dmxdevfilter->dev->lock); + wake_up_all(&dmxdevfilter->buffer.queue); + } else { + spin_unlock(&dmxdevfilter->dev->lock); + } + + return 0; + } + + event.params.section.base_offset = dmxdevfilter->buffer.pwrite; + event.params.section.start_offset = dmxdevfilter->buffer.pwrite; + + del_timer(&dmxdevfilter->timer); + + /* Verify output buffer has sufficient space, or report overflow */ + free = dvb_ringbuffer_free(&dmxdevfilter->buffer); + if (free < (buffer1_len + buffer2_len)) { + pr_debug("%s: section filter overflow (pid=%u)\n", + __func__, dmxdevfilter->params.sec.pid); + dmxdevfilter->buffer.error = -EOVERFLOW; + spin_unlock(&dmxdevfilter->dev->lock); + wake_up_all(&dmxdevfilter->buffer.queue); + return 0; + } + + dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer1, buffer1_len); + dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2, buffer2_len); + + event.type = DMX_EVENT_NEW_SECTION; + event.params.section.total_length = buffer1_len + buffer2_len; + event.params.section.actual_length = + event.params.section.total_length; + + dvb_dmxdev_add_event(&dmxdevfilter->events, &event); + + if (dmxdevfilter->params.sec.flags & DMX_ONESHOT) + dmxdevfilter->state = DMXDEV_STATE_DONE; + spin_unlock(&dmxdevfilter->dev->lock); + wake_up_all(&dmxdevfilter->buffer.queue); + return 0; +} + +static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len, + const u8 *buffer2, size_t buffer2_len, + struct dmx_ts_feed *feed) +{ + struct dmxdev_filter *dmxdevfilter = feed->priv; + struct dvb_ringbuffer *buffer; + struct dmxdev_events_queue *events; + struct dmx_filter_event event; + ssize_t free; + + if (!dmxdevfilter) { + pr_err("%s: null filter (feed->is_filtering=%d)\n", + __func__, feed->is_filtering); + return -EINVAL; + } + spin_lock(&dmxdevfilter->dev->lock); + + if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER || + dmxdevfilter->state != DMXDEV_STATE_GO || + dmxdevfilter->eos_state) { + spin_unlock(&dmxdevfilter->dev->lock); + return 0; + } + + if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) { + buffer = &dmxdevfilter->buffer; + events = &dmxdevfilter->events; + } else { + buffer = &dmxdevfilter->dev->dvr_buffer; + events = &dmxdevfilter->dev->dvr_output_events; + } + + if (buffer->error) { + spin_unlock(&dmxdevfilter->dev->lock); + wake_up_all(&buffer->queue); + return buffer->error; + } + + if (!events->current_event_data_size) + events->current_event_start_offset = buffer->pwrite; + + /* Verify output buffer has sufficient space, or report overflow */ + free = dvb_ringbuffer_free(buffer); + if (free < (buffer1_len + buffer2_len)) { + pr_debug("%s: buffer overflow error, pid=%u\n", + __func__, dmxdevfilter->params.pes.pid); + buffer->error = -EOVERFLOW; + spin_unlock(&dmxdevfilter->dev->lock); + wake_up_all(&buffer->queue); + + return -EOVERFLOW; + } + + if (buffer1_len + buffer2_len) { + dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len); + dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len); + + events->current_event_data_size += (buffer1_len + buffer2_len); + + if ((dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP || + dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP) + && events->current_event_data_size >= + dmxdevfilter->params.pes.rec_chunk_size) { + event.type = DMX_EVENT_NEW_REC_CHUNK; + event.params.recording_chunk.offset = + events->current_event_start_offset; + event.params.recording_chunk.size = + events->current_event_data_size; + + dvb_dmxdev_add_event(events, &event); + events->current_event_data_size = 0; + } + } + + spin_unlock(&dmxdevfilter->dev->lock); + wake_up_all(&buffer->queue); + return 0; +} + +static int dvb_dmxdev_section_event_cb(struct dmx_section_filter *filter, + struct dmx_data_ready *dmx_data_ready) +{ + int res = 0; + struct dmxdev_filter *dmxdevfilter = filter->priv; + struct dmx_filter_event event; + ssize_t free; + + if (!dmxdevfilter) { + pr_err("%s: null filter. event type=%d (length=%d) will be discarded\n", + __func__, dmx_data_ready->status, + dmx_data_ready->data_length); + return -EINVAL; + } + + spin_lock(&dmxdevfilter->dev->lock); + + if (dmxdevfilter->buffer.error == -ETIMEDOUT || + dmxdevfilter->state != DMXDEV_STATE_GO || + dmxdevfilter->eos_state) { + spin_unlock(&dmxdevfilter->dev->lock); + return 0; + } + + if (dmx_data_ready->data_length == 0) { + if (dmx_data_ready->status == DMX_CRC_ERROR) { + /* Section was dropped due to CRC error */ + event.type = DMX_EVENT_SECTION_CRC_ERROR; + dvb_dmxdev_add_event(&dmxdevfilter->events, &event); + + spin_unlock(&dmxdevfilter->dev->lock); + wake_up_all(&dmxdevfilter->buffer.queue); + } else if (dmx_data_ready->status == DMX_OK_EOS) { + event.type = DMX_EVENT_EOS; + dvb_dmxdev_add_event(&dmxdevfilter->events, &event); + spin_unlock(&dmxdevfilter->dev->lock); + wake_up_all(&dmxdevfilter->buffer.queue); + } else if (dmx_data_ready->status == DMX_OK_MARKER) { + event.type = DMX_EVENT_MARKER; + event.params.marker.id = dmx_data_ready->marker.id; + dvb_dmxdev_add_event(&dmxdevfilter->events, &event); + spin_unlock(&dmxdevfilter->dev->lock); + wake_up_all(&dmxdevfilter->buffer.queue); + } else if (dmx_data_ready->status == DMX_OK_SCRAMBLING_STATUS) { + event.type = DMX_EVENT_SCRAMBLING_STATUS_CHANGE; + event.params.scrambling_status = + dmx_data_ready->scrambling_bits; + dvb_dmxdev_add_event(&dmxdevfilter->events, &event); + spin_unlock(&dmxdevfilter->dev->lock); + wake_up_all(&dmxdevfilter->buffer.queue); + } else if (dmx_data_ready->status == DMX_OVERRUN_ERROR) { + pr_debug("dmxdev: section filter overflow (pid=%u)\n", + dmxdevfilter->params.sec.pid); + /* Set buffer error to notify user overflow occurred */ + dmxdevfilter->buffer.error = -EOVERFLOW; + spin_unlock(&dmxdevfilter->dev->lock); + wake_up_all(&dmxdevfilter->buffer.queue); + } else { + spin_unlock(&dmxdevfilter->dev->lock); + } + return 0; + } + + event.type = DMX_EVENT_NEW_SECTION; + event.params.section.base_offset = dmxdevfilter->buffer.pwrite; + event.params.section.start_offset = dmxdevfilter->buffer.pwrite; + event.params.section.total_length = dmx_data_ready->data_length; + event.params.section.actual_length = dmx_data_ready->data_length; + + if (dmx_data_ready->status == DMX_MISSED_ERROR) + event.params.section.flags = DMX_FILTER_CC_ERROR; + else + event.params.section.flags = 0; + + free = dvb_ringbuffer_free(&dmxdevfilter->buffer); + if (free < dmx_data_ready->data_length) { + pr_err("%s: invalid data length: data_length=%d > free=%zd\n", + __func__, dmx_data_ready->data_length, free); + } else { + res = dvb_dmxdev_add_event(&dmxdevfilter->events, &event); + DVB_RINGBUFFER_PUSH(&dmxdevfilter->buffer, + dmx_data_ready->data_length); + } + + spin_unlock(&dmxdevfilter->dev->lock); + wake_up_all(&dmxdevfilter->buffer.queue); + + return res; +} + +static int dvb_dmxdev_ts_event_cb(struct dmx_ts_feed *feed, + struct dmx_data_ready *dmx_data_ready) +{ + struct dmxdev_filter *dmxdevfilter = feed->priv; + struct dvb_ringbuffer *buffer; + struct dmxdev_events_queue *events; + struct dmx_filter_event event; + ssize_t free; + + if (!dmxdevfilter) { + pr_err("%s: null filter (feed->is_filtering=%d) event type=%d (length=%d) will be discarded\n", + __func__, feed->is_filtering, + dmx_data_ready->status, + dmx_data_ready->data_length); + return -EINVAL; + } + + spin_lock(&dmxdevfilter->dev->lock); + + if (dmxdevfilter->state != DMXDEV_STATE_GO || + dmxdevfilter->eos_state) { + spin_unlock(&dmxdevfilter->dev->lock); + return 0; + } + + if (dmxdevfilter->params.pes.output != DMX_OUT_TS_TAP) { + buffer = &dmxdevfilter->buffer; + events = &dmxdevfilter->events; + } else { + buffer = &dmxdevfilter->dev->dvr_buffer; + events = &dmxdevfilter->dev->dvr_output_events; + } + + if (!buffer->error && dmx_data_ready->status == DMX_OVERRUN_ERROR) { + pr_debug("dmxdev: %s filter buffer overflow (pid=%u)\n", + dmxdevfilter->params.pes.output == DMX_OUT_DECODER ? + "decoder" : "", + dmxdevfilter->params.pes.pid); + /* Set buffer error to notify user overflow occurred */ + buffer->error = -EOVERFLOW; + spin_unlock(&dmxdevfilter->dev->lock); + wake_up_all(&buffer->queue); + return 0; + } + + if (dmx_data_ready->status == DMX_OK_EOS) { + /* Report partial recording chunk */ + if ((dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP || + dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP) + && events->current_event_data_size) { + event.type = DMX_EVENT_NEW_REC_CHUNK; + event.params.recording_chunk.offset = + events->current_event_start_offset; + event.params.recording_chunk.size = + events->current_event_data_size; + events->current_event_start_offset = + (events->current_event_start_offset + + events->current_event_data_size) % + buffer->size; + events->current_event_data_size = 0; + dvb_dmxdev_add_event(events, &event); + } + + dmxdevfilter->eos_state = 1; + pr_debug("dmxdev: DMX_OK_EOS - entering EOS state\n"); + event.type = DMX_EVENT_EOS; + dvb_dmxdev_add_event(events, &event); + spin_unlock(&dmxdevfilter->dev->lock); + wake_up_all(&buffer->queue); + return 0; + } - del_timer(&dmxdevfilter->timer); - if (para->timeout) { - dmxdevfilter->timer.function = dvb_dmxdev_filter_timeout; - dmxdevfilter->timer.data = (unsigned long)dmxdevfilter; - dmxdevfilter->timer.expires = - jiffies + 1 + (HZ / 2 + HZ * para->timeout) / 1000; - add_timer(&dmxdevfilter->timer); + if (dmx_data_ready->status == DMX_OK_MARKER) { + pr_debug("dmxdev: DMX_OK_MARKER - id=%llu\n", + dmx_data_ready->marker.id); + event.type = DMX_EVENT_MARKER; + event.params.marker.id = dmx_data_ready->marker.id; + dvb_dmxdev_add_event(events, &event); + spin_unlock(&dmxdevfilter->dev->lock); + wake_up_all(&buffer->queue); + return 0; } -} -static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len, - const u8 *buffer2, size_t buffer2_len, - struct dmx_section_filter *filter) -{ - struct dmxdev_filter *dmxdevfilter = filter->priv; - int ret; + if (dmx_data_ready->status == DMX_OK_PCR) { + pr_debug("dmxdev: event callback DMX_OK_PCR\n"); + event.type = DMX_EVENT_NEW_PCR; + event.params.pcr.pcr = dmx_data_ready->pcr.pcr; + event.params.pcr.stc = dmx_data_ready->pcr.stc; + if (dmx_data_ready->pcr.disc_indicator_set) + event.params.pcr.flags = + DMX_FILTER_DISCONTINUITY_INDICATOR; + else + event.params.pcr.flags = 0; - if (dmxdevfilter->buffer.error) { - wake_up(&dmxdevfilter->buffer.queue); + dvb_dmxdev_add_event(events, &event); + spin_unlock(&dmxdevfilter->dev->lock); + wake_up_all(&buffer->queue); return 0; } - spin_lock(&dmxdevfilter->dev->lock); - if (dmxdevfilter->state != DMXDEV_STATE_GO) { + + if (dmx_data_ready->status == DMX_OK_IDX) { + pr_debug("dmxdev: event callback DMX_OK_IDX\n"); + event.type = DMX_EVENT_NEW_INDEX_ENTRY; + event.params.index = dmx_data_ready->idx_event; + + dvb_dmxdev_add_event(events, &event); spin_unlock(&dmxdevfilter->dev->lock); + wake_up_all(&buffer->queue); return 0; } - del_timer(&dmxdevfilter->timer); - dprintk("dmxdev: section callback %*ph\n", 6, buffer1); - ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer1, - buffer1_len); - if (ret == buffer1_len) { - ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2, - buffer2_len); + + if (dmx_data_ready->status == DMX_OK_SCRAMBLING_STATUS) { + event.type = DMX_EVENT_SCRAMBLING_STATUS_CHANGE; + event.params.scrambling_status = + dmx_data_ready->scrambling_bits; + dvb_dmxdev_add_event(events, &event); + spin_unlock(&dmxdevfilter->dev->lock); + wake_up_all(&buffer->queue); + return 0; } - if (ret < 0) - dmxdevfilter->buffer.error = ret; - if (dmxdevfilter->params.sec.flags & DMX_ONESHOT) - dmxdevfilter->state = DMXDEV_STATE_DONE; - spin_unlock(&dmxdevfilter->dev->lock); - wake_up(&dmxdevfilter->buffer.queue); - return 0; -} -static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len, - const u8 *buffer2, size_t buffer2_len, - struct dmx_ts_feed *feed) -{ - struct dmxdev_filter *dmxdevfilter = feed->priv; - struct dvb_ringbuffer *buffer; - int ret; + if (dmx_data_ready->status == DMX_OK_DECODER_BUF) { + event.type = DMX_EVENT_NEW_ES_DATA; + event.params.es_data.buf_handle = dmx_data_ready->buf.handle; + event.params.es_data.cookie = dmx_data_ready->buf.cookie; + event.params.es_data.offset = dmx_data_ready->buf.offset; + event.params.es_data.data_len = dmx_data_ready->buf.len; + event.params.es_data.pts_valid = dmx_data_ready->buf.pts_exists; + event.params.es_data.pts = dmx_data_ready->buf.pts; + event.params.es_data.dts_valid = dmx_data_ready->buf.dts_exists; + event.params.es_data.dts = dmx_data_ready->buf.dts; + event.params.es_data.stc = dmx_data_ready->buf.stc; + event.params.es_data.transport_error_indicator_counter = + dmx_data_ready->buf.tei_counter; + event.params.es_data.continuity_error_counter = + dmx_data_ready->buf.cont_err_counter; + event.params.es_data.ts_packets_num = + dmx_data_ready->buf.ts_packets_num; + event.params.es_data.ts_dropped_bytes = + dmx_data_ready->buf.ts_dropped_bytes; + dvb_dmxdev_add_event(events, &event); + spin_unlock(&dmxdevfilter->dev->lock); + wake_up_all(&buffer->queue); + return 0; + } - spin_lock(&dmxdevfilter->dev->lock); if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) { spin_unlock(&dmxdevfilter->dev->lock); + wake_up_all(&buffer->queue); return 0; } - if (dmxdevfilter->params.pes.output == DMX_OUT_TAP - || dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP) - buffer = &dmxdevfilter->buffer; - else - buffer = &dmxdevfilter->dev->dvr_buffer; - if (buffer->error) { + free = dvb_ringbuffer_free(buffer); + if (free < dmx_data_ready->data_length) { + pr_err("%s: invalid data length: data_length=%d > free=%zd\n", + __func__, dmx_data_ready->data_length, free); + spin_unlock(&dmxdevfilter->dev->lock); - wake_up(&buffer->queue); + wake_up_all(&buffer->queue); return 0; } - ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len); - if (ret == buffer1_len) - ret = dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len); - if (ret < 0) - buffer->error = ret; + + if (dmxdevfilter->params.pes.output == DMX_OUT_TAP) { + if (dmx_data_ready->status == DMX_OK && + !events->current_event_data_size) { + events->current_event_start_offset = buffer->pwrite; + } else if (dmx_data_ready->status == DMX_OK_PES_END) { + event.type = DMX_EVENT_NEW_PES; + + event.params.pes.base_offset = + events->current_event_start_offset; + event.params.pes.start_offset = + (events->current_event_start_offset + + dmx_data_ready->pes_end.start_gap) % + buffer->size; + + event.params.pes.actual_length = + dmx_data_ready->pes_end.actual_length; + event.params.pes.total_length = + events->current_event_data_size; + + event.params.pes.flags = 0; + if (dmx_data_ready->pes_end.disc_indicator_set) + event.params.pes.flags |= + DMX_FILTER_DISCONTINUITY_INDICATOR; + if (dmx_data_ready->pes_end.pes_length_mismatch) + event.params.pes.flags |= + DMX_FILTER_PES_LENGTH_ERROR; + + event.params.pes.stc = dmx_data_ready->pes_end.stc; + event.params.pes.transport_error_indicator_counter = + dmx_data_ready->pes_end.tei_counter; + event.params.pes.continuity_error_counter = + dmx_data_ready->pes_end.cont_err_counter; + event.params.pes.ts_packets_num = + dmx_data_ready->pes_end.ts_packets_num; + + /* Do not report zero length PES */ + if (event.params.pes.total_length) + dvb_dmxdev_add_event(events, &event); + + events->current_event_data_size = 0; + } + } else if (!events->current_event_data_size) { + events->current_event_start_offset = buffer->pwrite; + } + + events->current_event_data_size += dmx_data_ready->data_length; + DVB_RINGBUFFER_PUSH(buffer, dmx_data_ready->data_length); + + if ((dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP) || + (dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP)) { + while (events->current_event_data_size >= + dmxdevfilter->params.pes.rec_chunk_size) { + event.type = DMX_EVENT_NEW_REC_CHUNK; + event.params.recording_chunk.offset = + events->current_event_start_offset; + event.params.recording_chunk.size = + dmxdevfilter->params.pes.rec_chunk_size; + events->current_event_data_size = + events->current_event_data_size - + dmxdevfilter->params.pes.rec_chunk_size; + events->current_event_start_offset = + (events->current_event_start_offset + + dmxdevfilter->params.pes.rec_chunk_size) % + buffer->size; + + dvb_dmxdev_add_event(events, &event); + } + } spin_unlock(&dmxdevfilter->dev->lock); - wake_up(&buffer->queue); + wake_up_all(&buffer->queue); return 0; } @@ -427,11 +3096,18 @@ static int dvb_dmxdev_feed_stop(struct dmxdev_filter *dmxdevfilter) switch (dmxdevfilter->type) { case DMXDEV_TYPE_SEC: del_timer(&dmxdevfilter->timer); - dmxdevfilter->feed.sec->stop_filtering(dmxdevfilter->feed.sec); + dmxdevfilter->feed.sec.feed->stop_filtering( + dmxdevfilter->feed.sec.feed); break; case DMXDEV_TYPE_PES: - list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) + list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) { + if (dmxdevfilter->params.pes.output == DMX_OUT_TS_TAP) { + dmxdevfilter->dev->dvr_feeds_count--; + if (!dmxdevfilter->dev->dvr_feeds_count) + dmxdevfilter->dev->dvr_feed = NULL; + } feed->ts->stop_filtering(feed->ts); + } break; default: return -EINVAL; @@ -449,7 +3125,8 @@ static int dvb_dmxdev_feed_start(struct dmxdev_filter *filter) switch (filter->type) { case DMXDEV_TYPE_SEC: - return filter->feed.sec->start_filtering(filter->feed.sec); + return filter->feed.sec.feed->start_filtering( + filter->feed.sec.feed); case DMXDEV_TYPE_PES: list_for_each_entry(feed, &filter->feed.ts, next) { ret = feed->ts->start_filtering(feed->ts); @@ -483,7 +3160,7 @@ static int dvb_dmxdev_feed_restart(struct dmxdev_filter *filter) } filter->dev->demux->release_section_feed(dmxdev->demux, - filter->feed.sec); + filter->feed.sec.feed); return 0; } @@ -492,25 +3169,38 @@ static int dvb_dmxdev_filter_stop(struct dmxdev_filter *dmxdevfilter) { struct dmxdev_feed *feed; struct dmx_demux *demux; + struct ts_insertion_buffer *ts_buffer; if (dmxdevfilter->state < DMXDEV_STATE_GO) return 0; switch (dmxdevfilter->type) { case DMXDEV_TYPE_SEC: - if (!dmxdevfilter->feed.sec) + if (!dmxdevfilter->feed.sec.feed) break; dvb_dmxdev_feed_stop(dmxdevfilter); if (dmxdevfilter->filter.sec) - dmxdevfilter->feed.sec-> - release_filter(dmxdevfilter->feed.sec, + dmxdevfilter->feed.sec.feed-> + release_filter(dmxdevfilter->feed.sec.feed, dmxdevfilter->filter.sec); dvb_dmxdev_feed_restart(dmxdevfilter); - dmxdevfilter->feed.sec = NULL; + dmxdevfilter->feed.sec.feed = NULL; break; case DMXDEV_TYPE_PES: dvb_dmxdev_feed_stop(dmxdevfilter); demux = dmxdevfilter->dev->demux; + + if (!list_empty(&dmxdevfilter->insertion_buffers)) { + feed = list_first_entry(&dmxdevfilter->feed.ts, + struct dmxdev_feed, next); + + list_for_each_entry(ts_buffer, + &dmxdevfilter->insertion_buffers, next) + dvb_dmxdev_cancel_ts_insertion(ts_buffer); + if (feed->ts->ts_insertion_terminate) + feed->ts->ts_insertion_terminate(feed->ts); + } + list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) { demux->release_ts_feed(demux, feed->ts); feed->ts = NULL; @@ -522,7 +3212,13 @@ static int dvb_dmxdev_filter_stop(struct dmxdev_filter *dmxdevfilter) return -EINVAL; } - dvb_ringbuffer_flush(&dmxdevfilter->buffer); + spin_lock_irq(&dmxdevfilter->dev->lock); + dvb_dmxdev_flush_output(&dmxdevfilter->buffer, &dmxdevfilter->events); + dvb_ringbuffer_reset(&dmxdevfilter->buffer); + spin_unlock_irq(&dmxdevfilter->dev->lock); + + wake_up_all(&dmxdevfilter->buffer.queue); + return 0; } @@ -589,12 +3285,76 @@ static int dvb_dmxdev_start_feed(struct dmxdev *dmxdev, tsfeed = feed->ts; tsfeed->priv = filter; - ret = tsfeed->set(tsfeed, feed->pid, ts_type, ts_pes, 32768, timeout); + if (filter->params.pes.output == DMX_OUT_TS_TAP) { + tsfeed->buffer.ringbuff = &dmxdev->dvr_buffer; + tsfeed->buffer.priv_handle = dmxdev->dvr_priv_buff_handle; + if (!dmxdev->dvr_feeds_count) + dmxdev->dvr_feed = filter; + dmxdev->dvr_feeds_count++; + } else if (filter->params.pes.output == DMX_OUT_DECODER) { + tsfeed->buffer.ringbuff = &filter->buffer; + tsfeed->decoder_buffers = &filter->decoder_buffers; + tsfeed->buffer.priv_handle = filter->priv_buff_handle; + } else { + tsfeed->buffer.ringbuff = &filter->buffer; + tsfeed->buffer.priv_handle = filter->priv_buff_handle; + } + + if (tsfeed->data_ready_cb) { + ret = tsfeed->data_ready_cb(tsfeed, dvb_dmxdev_ts_event_cb); + + if (ret < 0) { + dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed); + return ret; + } + } + + ret = tsfeed->set(tsfeed, feed->pid, + ts_type, ts_pes, + filter->decoder_buffers.buffers_size, + timeout); if (ret < 0) { dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed); return ret; } + if (tsfeed->set_tsp_out_format) + tsfeed->set_tsp_out_format(tsfeed, filter->dmx_tsp_format); + + if (tsfeed->set_secure_mode) + tsfeed->set_secure_mode(tsfeed, &filter->sec_mode); + + if (tsfeed->set_cipher_ops) + tsfeed->set_cipher_ops(tsfeed, &feed->cipher_ops); + + if ((para->pes_type == DMX_PES_VIDEO0) || + (para->pes_type == DMX_PES_VIDEO1) || + (para->pes_type == DMX_PES_VIDEO2) || + (para->pes_type == DMX_PES_VIDEO3)) { + if (tsfeed->set_video_codec) { + ret = tsfeed->set_video_codec(tsfeed, + para->video_codec); + + if (ret < 0) { + dmxdev->demux->release_ts_feed(dmxdev->demux, + tsfeed); + return ret; + } + } + } + + if ((filter->params.pes.output == DMX_OUT_TS_TAP) || + (filter->params.pes.output == DMX_OUT_TSDEMUX_TAP)) + if (tsfeed->set_idx_params) { + ret = tsfeed->set_idx_params( + tsfeed, &feed->idx_params); + if (ret) { + dmxdev->demux->release_ts_feed(dmxdev->demux, + tsfeed); + return ret; + } + } + ret = tsfeed->start_filtering(tsfeed); if (ret < 0) { dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed); @@ -604,12 +3364,50 @@ static int dvb_dmxdev_start_feed(struct dmxdev *dmxdev, return 0; } +static int dvb_filter_external_buffer_only(struct dmxdev *dmxdev, + struct dmxdev_filter *filter) +{ + struct dmx_caps caps; + int is_external_only; + int flags; + + /* + * For backward compatibility, default assumes that + * external only buffers are not supported. + */ + flags = 0; + if (dmxdev->demux->get_caps) { + dmxdev->demux->get_caps(dmxdev->demux, &caps); + + if (filter->type == DMXDEV_TYPE_SEC) + flags = caps.section.flags; + else if (filter->params.pes.output == DMX_OUT_DECODER) + /* For decoder filters dmxdev buffer is not required */ + flags = 0; + else if (filter->params.pes.output == DMX_OUT_TAP) + flags = caps.pes.flags; + else if (filter->dmx_tsp_format == DMX_TSP_FORMAT_188) + flags = caps.recording_188_tsp.flags; + else + flags = caps.recording_192_tsp.flags; + } + + if (!(flags & DMX_BUFFER_INTERNAL_SUPPORT) && + (flags & DMX_BUFFER_EXTERNAL_SUPPORT)) + is_external_only = 1; + else + is_external_only = 0; + + return is_external_only; +} + static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter) { struct dmxdev *dmxdev = filter->dev; struct dmxdev_feed *feed; void *mem; int ret, i; + size_t tsp_size; if (filter->state < DMXDEV_STATE_SET) return -EINVAL; @@ -617,34 +3415,64 @@ static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter) if (filter->state >= DMXDEV_STATE_GO) dvb_dmxdev_filter_stop(filter); + if (!dvb_filter_verify_buffer_size(filter)) + return -EINVAL; + if (!filter->buffer.data) { - mem = vmalloc(filter->buffer.size); + /* + * dmxdev buffer in decoder filters is not really used + * to exchange data with applications. Decoder buffers + * can be set using DMX_SET_DECODER_BUFFER, which + * would not update the filter->buffer.data at all. + * Therefore we should not treat this filter as + * other regular filters and should not fail here + * even if user sets the buffer in deocder + * filter as external buffer. + */ + if (filter->type == DMXDEV_TYPE_PES && + (filter->params.pes.output == DMX_OUT_DECODER || + filter->params.pes.output == DMX_OUT_TS_TAP)) + filter->buffer_mode = DMX_BUFFER_MODE_INTERNAL; + + if (!(filter->type == DMXDEV_TYPE_PES && + filter->params.pes.output == DMX_OUT_TS_TAP) && + (filter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL || + dvb_filter_external_buffer_only(dmxdev, filter))) + return -ENOMEM; + + mem = vmalloc_user(filter->buffer.size); if (!mem) return -ENOMEM; spin_lock_irq(&filter->dev->lock); filter->buffer.data = mem; spin_unlock_irq(&filter->dev->lock); + } else if ((filter->buffer_mode == DMX_BUFFER_MODE_INTERNAL) && + dvb_filter_external_buffer_only(dmxdev, filter)) { + return -ENOMEM; } - dvb_ringbuffer_flush(&filter->buffer); + filter->eos_state = 0; + + spin_lock_irq(&filter->dev->lock); + dvb_dmxdev_flush_output(&filter->buffer, &filter->events); + spin_unlock_irq(&filter->dev->lock); switch (filter->type) { case DMXDEV_TYPE_SEC: { struct dmx_sct_filter_params *para = &filter->params.sec; struct dmx_section_filter **secfilter = &filter->filter.sec; - struct dmx_section_feed **secfeed = &filter->feed.sec; + struct dmx_section_feed **secfeed = &filter->feed.sec.feed; *secfilter = NULL; *secfeed = NULL; - /* find active filter/feed with same PID */ for (i = 0; i < dmxdev->filternum; i++) { if (dmxdev->filter[i].state >= DMXDEV_STATE_GO && dmxdev->filter[i].type == DMXDEV_TYPE_SEC && dmxdev->filter[i].params.sec.pid == para->pid) { - *secfeed = dmxdev->filter[i].feed.sec; + *secfeed = dmxdev->filter[i].feed.sec.feed; break; } } @@ -652,22 +3480,44 @@ static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter) /* if no feed found, try to allocate new one */ if (!*secfeed) { ret = dmxdev->demux->allocate_section_feed(dmxdev->demux, - secfeed, - dvb_dmxdev_section_callback); + secfeed, + dvb_dmxdev_section_callback); if (ret < 0) { - printk("DVB (%s): could not alloc feed\n", + pr_err("DVB (%s): could not alloc feed\n", __func__); return ret; } + if ((*secfeed)->data_ready_cb) { + ret = (*secfeed)->data_ready_cb( + *secfeed, + dvb_dmxdev_section_event_cb); + + if (ret < 0) { + pr_err( + "DVB (%s): could not set event cb\n", + __func__); + dvb_dmxdev_feed_restart(filter); + return ret; + } + } + ret = (*secfeed)->set(*secfeed, para->pid, 32768, (para->flags & DMX_CHECK_CRC) ? 1 : 0); if (ret < 0) { - printk("DVB (%s): could not set feed\n", - __func__); + pr_err("DVB (%s): could not set feed\n", + __func__); dvb_dmxdev_feed_restart(filter); return ret; } + + if ((*secfeed)->set_secure_mode) + (*secfeed)->set_secure_mode(*secfeed, + &filter->sec_mode); + + if ((*secfeed)->set_cipher_ops) + (*secfeed)->set_cipher_ops(*secfeed, + &filter->feed.sec.cipher_ops); } else { dvb_dmxdev_feed_stop(filter); } @@ -675,12 +3525,14 @@ static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter) ret = (*secfeed)->allocate_filter(*secfeed, secfilter); if (ret < 0) { dvb_dmxdev_feed_restart(filter); - filter->feed.sec->start_filtering(*secfeed); - dprintk("could not get filter\n"); + filter->feed.sec.feed->start_filtering(*secfeed); + pr_debug("could not get filter\n"); return ret; } (*secfilter)->priv = filter; + (*secfilter)->buffer.ringbuff = &filter->buffer; + (*secfilter)->buffer.priv_handle = filter->priv_buff_handle; memcpy(&((*secfilter)->filter_value[3]), &(para->filter.filter[1]), DMX_FILTER_SIZE - 1); @@ -696,8 +3548,12 @@ static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter) (*secfilter)->filter_mask[2] = 0; filter->todo = 0; + filter->events.data_read_event_masked = + filter->events.event_mask.disable_mask & + DMX_EVENT_NEW_SECTION; - ret = filter->feed.sec->start_filtering(filter->feed.sec); + ret = filter->feed.sec.feed->start_filtering( + filter->feed.sec.feed); if (ret < 0) return ret; @@ -705,19 +3561,93 @@ static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter) break; } case DMXDEV_TYPE_PES: + if (filter->params.pes.rec_chunk_size < + DMX_REC_BUFF_CHUNK_MIN_SIZE) + filter->params.pes.rec_chunk_size = + DMX_REC_BUFF_CHUNK_MIN_SIZE; + + if (filter->params.pes.rec_chunk_size >= + filter->buffer.size) + filter->params.pes.rec_chunk_size = + filter->buffer.size >> 2; + + /* Align rec-chunk based on output format */ + if (filter->dmx_tsp_format == DMX_TSP_FORMAT_188) + tsp_size = 188; + else + tsp_size = 192; + + filter->params.pes.rec_chunk_size /= tsp_size; + filter->params.pes.rec_chunk_size *= tsp_size; + + if (filter->params.pes.output == DMX_OUT_TS_TAP) + dmxdev->dvr_output_events.data_read_event_masked = + dmxdev->dvr_output_events.event_mask.disable_mask & + DMX_EVENT_NEW_REC_CHUNK; + else if (filter->params.pes.output == DMX_OUT_TSDEMUX_TAP) + filter->events.data_read_event_masked = + filter->events.event_mask.disable_mask & + DMX_EVENT_NEW_REC_CHUNK; + else if (filter->params.pes.output == DMX_OUT_TAP) + filter->events.data_read_event_masked = + filter->events.event_mask.disable_mask & + DMX_EVENT_NEW_PES; + else + filter->events.data_read_event_masked = 1; + + ret = 0; list_for_each_entry(feed, &filter->feed.ts, next) { ret = dvb_dmxdev_start_feed(dmxdev, filter, feed); - if (ret < 0) { - dvb_dmxdev_filter_stop(filter); - return ret; + if (ret) + break; + } + + if (!ret) + break; + + /* cleanup feeds that were started before the failure */ + list_for_each_entry(feed, &filter->feed.ts, next) { + if (!feed->ts) + continue; + feed->ts->stop_filtering(feed->ts); + dmxdev->demux->release_ts_feed(dmxdev->demux, feed->ts); + feed->ts = NULL; + + if (filter->params.pes.output == DMX_OUT_TS_TAP) { + filter->dev->dvr_feeds_count--; + if (!filter->dev->dvr_feeds_count) + filter->dev->dvr_feed = NULL; } } - break; + return ret; + default: return -EINVAL; } dvb_dmxdev_filter_state_set(filter, DMXDEV_STATE_GO); + + if ((filter->type == DMXDEV_TYPE_PES) && + !list_empty(&filter->insertion_buffers)) { + struct ts_insertion_buffer *ts_buffer; + + feed = list_first_entry(&filter->feed.ts, + struct dmxdev_feed, next); + + ret = 0; + if (feed->ts->ts_insertion_init) + ret = feed->ts->ts_insertion_init(feed->ts); + if (!ret) { + list_for_each_entry(ts_buffer, + &filter->insertion_buffers, next) + dvb_dmxdev_queue_ts_insertion( + ts_buffer); + } else { + pr_err("%s: ts_insertion_init failed, err %d\n", + __func__, ret); + } + } + return 0; } @@ -747,11 +3677,28 @@ static int dvb_demux_open(struct inode *inode, struct file *file) mutex_init(&dmxdevfilter->mutex); file->private_data = dmxdevfilter; + memset(&dmxdevfilter->decoder_buffers, + 0, + sizeof(dmxdevfilter->decoder_buffers)); + dmxdevfilter->decoder_buffers.buffers_size = + DMX_DEFAULT_DECODER_BUFFER_SIZE; + dmxdevfilter->buffer_mode = DMX_BUFFER_MODE_INTERNAL; + dmxdevfilter->priv_buff_handle = NULL; dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); + dvb_dmxdev_flush_events(&dmxdevfilter->events); + dmxdevfilter->events.event_mask.disable_mask = DMX_EVENT_NEW_ES_DATA; + dmxdevfilter->events.event_mask.no_wakeup_mask = 0; + dmxdevfilter->events.event_mask.wakeup_threshold = 1; + dmxdevfilter->type = DMXDEV_TYPE_NONE; dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); init_timer(&dmxdevfilter->timer); + dmxdevfilter->sec_mode.is_secured = 0; + + INIT_LIST_HEAD(&dmxdevfilter->insertion_buffers); + + dmxdevfilter->dmx_tsp_format = DMX_TSP_FORMAT_188; dvbdev->users++; mutex_unlock(&dmxdev->mutex); @@ -761,23 +3708,40 @@ static int dvb_demux_open(struct inode *inode, struct file *file) static int dvb_dmxdev_filter_free(struct dmxdev *dmxdev, struct dmxdev_filter *dmxdevfilter) { + struct ts_insertion_buffer *ts_buffer, *tmp; + mutex_lock(&dmxdev->mutex); mutex_lock(&dmxdevfilter->mutex); dvb_dmxdev_filter_stop(dmxdevfilter); dvb_dmxdev_filter_reset(dmxdevfilter); + list_for_each_entry_safe(ts_buffer, tmp, + &dmxdevfilter->insertion_buffers, next) { + list_del(&ts_buffer->next); + vfree(ts_buffer->buffer); + vfree(ts_buffer); + } + if (dmxdevfilter->buffer.data) { void *mem = dmxdevfilter->buffer.data; spin_lock_irq(&dmxdev->lock); dmxdevfilter->buffer.data = NULL; spin_unlock_irq(&dmxdev->lock); - vfree(mem); + if (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_INTERNAL) + vfree(mem); + } + + if ((dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL) && + dmxdevfilter->priv_buff_handle) { + dmxdev->demux->unmap_buffer(dmxdev->demux, + dmxdevfilter->priv_buff_handle); + dmxdevfilter->priv_buff_handle = NULL; } dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_FREE); - wake_up(&dmxdevfilter->buffer.queue); + wake_up_all(&dmxdevfilter->buffer.queue); mutex_unlock(&dmxdevfilter->mutex); mutex_unlock(&dmxdev->mutex); return 0; @@ -795,6 +3759,7 @@ static int dvb_dmxdev_add_pid(struct dmxdev *dmxdev, struct dmxdev_filter *filter, u16 pid) { struct dmxdev_feed *feed; + int ret = 0; if ((filter->type != DMXDEV_TYPE_PES) || (filter->state < DMXDEV_STATE_SET)) @@ -810,28 +3775,45 @@ static int dvb_dmxdev_add_pid(struct dmxdev *dmxdev, return -ENOMEM; feed->pid = pid; - list_add(&feed->next, &filter->feed.ts); + feed->cipher_ops.operations_count = 0; + feed->idx_params.enable = 0; if (filter->state >= DMXDEV_STATE_GO) - return dvb_dmxdev_start_feed(dmxdev, filter, feed); + ret = dvb_dmxdev_start_feed(dmxdev, filter, feed); - return 0; + if (!ret) + list_add(&feed->next, &filter->feed.ts); + else + kfree(feed); + + return ret; } static int dvb_dmxdev_remove_pid(struct dmxdev *dmxdev, struct dmxdev_filter *filter, u16 pid) { + int feed_count; struct dmxdev_feed *feed, *tmp; if ((filter->type != DMXDEV_TYPE_PES) || (filter->state < DMXDEV_STATE_SET)) return -EINVAL; + feed_count = 0; + list_for_each_entry(tmp, &filter->feed.ts, next) + feed_count++; + + if (feed_count <= 1) + return -EINVAL; + list_for_each_entry_safe(feed, tmp, &filter->feed.ts, next) { - if ((feed->pid == pid) && (feed->ts != NULL)) { - feed->ts->stop_filtering(feed->ts); - filter->dev->demux->release_ts_feed(filter->dev->demux, - feed->ts); + if (feed->pid == pid) { + if (feed->ts != NULL) { + feed->ts->stop_filtering(feed->ts); + filter->dev->demux->release_ts_feed( + filter->dev->demux, + feed->ts); + } list_del(&feed->next); kfree(feed); } @@ -844,7 +3826,7 @@ static int dvb_dmxdev_filter_set(struct dmxdev *dmxdev, struct dmxdev_filter *dmxdevfilter, struct dmx_sct_filter_params *params) { - dprintk("function : %s, PID=0x%04x, flags=%02x, timeout=%d\n", + pr_debug("function : %s, PID=0x%04x, flags=%02x, timeout=%d\n", __func__, params->pid, params->flags, params->timeout); dvb_dmxdev_filter_stop(dmxdevfilter); @@ -853,6 +3835,7 @@ static int dvb_dmxdev_filter_set(struct dmxdev *dmxdev, memcpy(&dmxdevfilter->params.sec, params, sizeof(struct dmx_sct_filter_params)); invert_mode(&dmxdevfilter->params.sec.filter); + dmxdevfilter->feed.sec.cipher_ops.operations_count = 0; dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); if (params->flags & DMX_IMMEDIATE_START) @@ -861,6 +3844,99 @@ static int dvb_dmxdev_filter_set(struct dmxdev *dmxdev, return 0; } +static int dvb_dmxdev_set_secure_mode( + struct dmxdev *dmxdev, + struct dmxdev_filter *filter, + struct dmx_secure_mode *sec_mode) +{ + if (!dmxdev || !filter || !sec_mode) + return -EINVAL; + + if (filter->state == DMXDEV_STATE_GO) { + pr_err("%s: invalid filter state\n", __func__); + return -EBUSY; + } + + pr_debug("%s: secure=%d\n", __func__, sec_mode->is_secured); + + filter->sec_mode = *sec_mode; + + return 0; +} + +static int dvb_dmxdev_set_cipher(struct dmxdev *dmxdev, + struct dmxdev_filter *filter, + struct dmx_cipher_operations *cipher_ops) +{ + struct dmxdev_feed *feed; + struct dmxdev_feed *ts_feed = NULL; + struct dmxdev_sec_feed *sec_feed = NULL; + struct dmx_caps caps; + + if (!dmxdev || !dmxdev->demux->get_caps) + return -EINVAL; + + dmxdev->demux->get_caps(dmxdev->demux, &caps); + + if (!filter || !cipher_ops || + (cipher_ops->operations_count > caps.num_cipher_ops) || + (cipher_ops->operations_count > + DMX_MAX_CIPHER_OPERATIONS_COUNT)) + return -EINVAL; + + pr_debug("%s: pid=%d, operations=%d\n", __func__, + cipher_ops->pid, cipher_ops->operations_count); + + if (filter->state < DMXDEV_STATE_SET || + filter->state > DMXDEV_STATE_GO) { + pr_err("%s: invalid filter state\n", __func__); + return -EPERM; + } + + if (!filter->sec_mode.is_secured && cipher_ops->operations_count) { + pr_err("%s: secure mode must be enabled to set cipher ops\n", + __func__); + return -EPERM; + } + + switch (filter->type) { + case DMXDEV_TYPE_PES: + list_for_each_entry(feed, &filter->feed.ts, next) { + if (feed->pid == cipher_ops->pid) { + ts_feed = feed; + ts_feed->cipher_ops = *cipher_ops; + if (filter->state == DMXDEV_STATE_GO && + ts_feed->ts->set_cipher_ops) + ts_feed->ts->set_cipher_ops( + ts_feed->ts, cipher_ops); + break; + } + } + break; + case DMXDEV_TYPE_SEC: + if (filter->params.sec.pid == cipher_ops->pid) { + sec_feed = &filter->feed.sec; + sec_feed->cipher_ops = *cipher_ops; + if (filter->state == DMXDEV_STATE_GO && + sec_feed->feed->set_cipher_ops) + sec_feed->feed->set_cipher_ops(sec_feed->feed, + cipher_ops); + } + break; + + default: + return -EINVAL; + } + + if (!ts_feed && !sec_feed) { + pr_err("%s: pid %d is undefined for this filter\n", + __func__, cipher_ops->pid); + return -EINVAL; + } + + return 0; +} + static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev, struct dmxdev_filter *dmxdevfilter, struct dmx_pes_filter_params *params) @@ -891,6 +3967,55 @@ static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev, return 0; } +static int dvb_dmxdev_set_decoder_buffer(struct dmxdev *dmxdev, + struct dmxdev_filter *filter, + struct dmx_decoder_buffers *buffs) +{ + int i; + struct dmx_decoder_buffers *dec_buffs; + struct dmx_caps caps; + + if (!dmxdev || !filter || !buffs) + return -EINVAL; + + dec_buffs = &filter->decoder_buffers; + if (!dmxdev->demux->get_caps) + return -EINVAL; + + dmxdev->demux->get_caps(dmxdev->demux, &caps); + if (!dvb_dmxdev_verify_buffer_size(buffs->buffers_size, + caps.decoder.max_size, caps.decoder.size_alignment)) + return -EINVAL; + + if ((buffs->buffers_size == 0) || + (buffs->is_linear && + ((buffs->buffers_num <= 1) || + (buffs->buffers_num > DMX_MAX_DECODER_BUFFER_NUM)))) + return -EINVAL; + + if (buffs->buffers_num == 0) { + /* Internal mode - linear buffers not supported in this mode */ + if (!(caps.decoder.flags & DMX_BUFFER_INTERNAL_SUPPORT) || + buffs->is_linear) + return -EINVAL; + } else { + /* External buffer(s) mode */ + if ((!(caps.decoder.flags & DMX_BUFFER_LINEAR_GROUP_SUPPORT) && + buffs->buffers_num > 1) || + !(caps.decoder.flags & DMX_BUFFER_EXTERNAL_SUPPORT) || + buffs->buffers_num > caps.decoder.max_buffer_num) + return -EINVAL; + + dec_buffs->is_linear = buffs->is_linear; + dec_buffs->buffers_num = buffs->buffers_num; + dec_buffs->buffers_size = buffs->buffers_size; + for (i = 0; i < dec_buffs->buffers_num; i++) + dec_buffs->handles[i] = buffs->handles[i]; + } + + return 0; +} + static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil, struct file *file, char __user *buf, size_t count, loff_t *ppos) @@ -902,7 +4027,7 @@ static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil, hcount = 3 + dfil->todo; if (hcount > count) hcount = count; - result = dvb_dmxdev_buffer_read(&dfil->buffer, + result = dvb_dmxdev_buffer_read(dfil, &dfil->buffer, file->f_flags & O_NONBLOCK, buf, hcount, ppos); if (result < 0) { @@ -923,7 +4048,7 @@ static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil, } if (count > dfil->todo) count = dfil->todo; - result = dvb_dmxdev_buffer_read(&dfil->buffer, + result = dvb_dmxdev_buffer_read(dfil, &dfil->buffer, file->f_flags & O_NONBLOCK, buf, count, ppos); if (result < 0) @@ -942,12 +4067,36 @@ dvb_demux_read(struct file *file, char __user *buf, size_t count, if (mutex_lock_interruptible(&dmxdevfilter->mutex)) return -ERESTARTSYS; + if (dmxdevfilter->eos_state && + dvb_ringbuffer_empty(&dmxdevfilter->buffer)) { + mutex_unlock(&dmxdevfilter->mutex); + return 0; + } + if (dmxdevfilter->type == DMXDEV_TYPE_SEC) ret = dvb_dmxdev_read_sec(dmxdevfilter, file, buf, count, ppos); else - ret = dvb_dmxdev_buffer_read(&dmxdevfilter->buffer, - file->f_flags & O_NONBLOCK, - buf, count, ppos); + ret = dvb_dmxdev_buffer_read(dmxdevfilter, + &dmxdevfilter->buffer, + file->f_flags & O_NONBLOCK, + buf, count, ppos); + + if (ret > 0) { + dvb_dmxdev_notify_data_read(dmxdevfilter, ret); + spin_lock_irq(&dmxdevfilter->dev->lock); + dvb_dmxdev_update_events(&dmxdevfilter->events, ret); + spin_unlock_irq(&dmxdevfilter->dev->lock); + + /* + * in PULL mode, we might be stalling on + * event queue, so need to wake-up waiters + */ + if (dmxdevfilter->dev->playback_mode == DMX_PB_MODE_PULL) + wake_up_all(&dmxdevfilter->buffer.queue); + } else if (ret == -EOVERFLOW) { + dvb_dmxdev_auto_flush_buffer(dmxdevfilter, + &dmxdevfilter->buffer); + } mutex_unlock(&dmxdevfilter->mutex); return ret; @@ -977,39 +4126,76 @@ static int dvb_demux_do_ioctl(struct file *file, mutex_unlock(&dmxdevfilter->mutex); break; - case DMX_STOP: + case DMX_STOP: + if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { + mutex_unlock(&dmxdev->mutex); + return -ERESTARTSYS; + } + ret = dvb_dmxdev_filter_stop(dmxdevfilter); + mutex_unlock(&dmxdevfilter->mutex); + break; + + case DMX_SET_FILTER: + if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { + mutex_unlock(&dmxdev->mutex); + return -ERESTARTSYS; + } + ret = dvb_dmxdev_filter_set(dmxdev, dmxdevfilter, parg); + mutex_unlock(&dmxdevfilter->mutex); + break; + + case DMX_SET_PES_FILTER: + if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { + mutex_unlock(&dmxdev->mutex); + return -ERESTARTSYS; + } + ret = dvb_dmxdev_pes_filter_set(dmxdev, dmxdevfilter, parg); + mutex_unlock(&dmxdevfilter->mutex); + break; + + case DMX_SET_BUFFER_SIZE: + if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { + mutex_unlock(&dmxdev->mutex); + return -ERESTARTSYS; + } + ret = dvb_dmxdev_set_buffer_size(dmxdevfilter, arg); + mutex_unlock(&dmxdevfilter->mutex); + break; + + case DMX_SET_BUFFER_MODE: if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { mutex_unlock(&dmxdev->mutex); return -ERESTARTSYS; } - ret = dvb_dmxdev_filter_stop(dmxdevfilter); + ret = dvb_dmxdev_set_buffer_mode(dmxdevfilter, + *(enum dmx_buffer_mode *)parg); mutex_unlock(&dmxdevfilter->mutex); break; - case DMX_SET_FILTER: + case DMX_SET_BUFFER: if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { mutex_unlock(&dmxdev->mutex); return -ERESTARTSYS; } - ret = dvb_dmxdev_filter_set(dmxdev, dmxdevfilter, parg); + ret = dvb_dmxdev_set_buffer(dmxdevfilter, parg); mutex_unlock(&dmxdevfilter->mutex); break; - case DMX_SET_PES_FILTER: + case DMX_GET_BUFFER_STATUS: if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { mutex_unlock(&dmxdev->mutex); return -ERESTARTSYS; } - ret = dvb_dmxdev_pes_filter_set(dmxdev, dmxdevfilter, parg); + ret = dvb_dmxdev_get_buffer_status(dmxdevfilter, parg); mutex_unlock(&dmxdevfilter->mutex); break; - case DMX_SET_BUFFER_SIZE: + case DMX_RELEASE_DATA: if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { mutex_unlock(&dmxdev->mutex); return -ERESTARTSYS; } - ret = dvb_dmxdev_set_buffer_size(dmxdevfilter, arg); + ret = dvb_dmxdev_release_data(dmxdevfilter, arg); mutex_unlock(&dmxdevfilter->mutex); break; @@ -1021,9 +4207,6 @@ static int dvb_demux_do_ioctl(struct file *file, dmxdev->demux->get_pes_pids(dmxdev->demux, parg); break; -#if 0 - /* Not used upstream and never documented */ - case DMX_GET_CAPS: if (!dmxdev->demux->get_caps) { ret = -EINVAL; @@ -1033,13 +4216,65 @@ static int dvb_demux_do_ioctl(struct file *file, break; case DMX_SET_SOURCE: - if (!dmxdev->demux->set_source) { + if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { + mutex_unlock(&dmxdev->mutex); + return -ERESTARTSYS; + } + ret = dvb_dmxdev_set_source(dmxdevfilter, parg); + mutex_unlock(&dmxdevfilter->mutex); + break; + + case DMX_SET_TS_PACKET_FORMAT: + if (!dmxdev->demux->set_tsp_format) { ret = -EINVAL; break; } - ret = dmxdev->demux->set_source(dmxdev->demux, parg); + + if (dmxdevfilter->state >= DMXDEV_STATE_GO) { + ret = -EBUSY; + break; + } + ret = dmxdev->demux->set_tsp_format( + dmxdev->demux, + *(enum dmx_tsp_format_t *)parg); + break; + + case DMX_SET_TS_OUT_FORMAT: + if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { + mutex_unlock(&dmxdev->mutex); + return -ERESTARTSYS; + } + + ret = dvb_dmxdev_set_tsp_out_format(dmxdevfilter, + *(enum dmx_tsp_format_t *)parg); + + mutex_unlock(&dmxdevfilter->mutex); + break; + + case DMX_SET_DECODER_BUFFER_SIZE: + if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { + mutex_unlock(&dmxdev->mutex); + return -ERESTARTSYS; + } + + ret = dvb_dmxdev_set_decoder_buffer_size(dmxdevfilter, arg); + mutex_unlock(&dmxdevfilter->mutex); + break; + + case DMX_SET_PLAYBACK_MODE: + ret = dvb_dmxdev_set_playback_mode( + dmxdevfilter, + *(enum dmx_playback_mode_t *)parg); + break; + + case DMX_GET_EVENT: + if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { + mutex_unlock(&dmxdev->mutex); + return -ERESTARTSYS; + } + ret = dvb_dmxdev_get_event(dmxdevfilter, parg); + mutex_unlock(&dmxdevfilter->mutex); break; -#endif case DMX_GET_STC: if (!dmxdev->demux->get_stc) { @@ -1070,8 +4305,109 @@ static int dvb_demux_do_ioctl(struct file *file, mutex_unlock(&dmxdevfilter->mutex); break; + case DMX_SET_DECODER_BUFFER: + if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { + ret = -ERESTARTSYS; + break; + } + ret = dvb_dmxdev_set_decoder_buffer(dmxdev, dmxdevfilter, parg); + mutex_unlock(&dmxdevfilter->mutex); + break; + + case DMX_SET_SECURE_MODE: + if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { + ret = -ERESTARTSYS; + break; + } + ret = dvb_dmxdev_set_secure_mode(dmxdev, dmxdevfilter, parg); + mutex_unlock(&dmxdevfilter->mutex); + break; + + case DMX_SET_CIPHER: + if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { + ret = -ERESTARTSYS; + break; + } + ret = dvb_dmxdev_set_cipher(dmxdev, dmxdevfilter, parg); + mutex_unlock(&dmxdevfilter->mutex); + break; + + case DMX_REUSE_DECODER_BUFFER: + if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { + mutex_unlock(&dmxdev->mutex); + return -ERESTARTSYS; + } + ret = dvb_dmxdev_reuse_decoder_buf(dmxdevfilter, arg); + mutex_unlock(&dmxdevfilter->mutex); + break; + + case DMX_SET_EVENTS_MASK: + if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { + mutex_unlock(&dmxdev->mutex); + return -ERESTARTSYS; + } + ret = dvb_dmxdev_set_event_mask(dmxdevfilter, parg); + mutex_unlock(&dmxdevfilter->mutex); + break; + + case DMX_GET_EVENTS_MASK: + if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { + mutex_unlock(&dmxdev->mutex); + return -ERESTARTSYS; + } + ret = dvb_dmxdev_get_event_mask(dmxdevfilter, parg); + mutex_unlock(&dmxdevfilter->mutex); + break; + + case DMX_SET_INDEXING_PARAMS: + if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { + mutex_unlock(&dmxdev->mutex); + return -ERESTARTSYS; + } + ret = dvb_dmxdev_set_indexing_params(dmxdevfilter, parg); + mutex_unlock(&dmxdevfilter->mutex); + break; + + case DMX_SET_TS_INSERTION: + if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { + mutex_unlock(&dmxdev->mutex); + return -ERESTARTSYS; + } + ret = dvb_dmxdev_set_ts_insertion(dmxdevfilter, parg); + mutex_unlock(&dmxdevfilter->mutex); + break; + + case DMX_ABORT_TS_INSERTION: + if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { + mutex_unlock(&dmxdev->mutex); + return -ERESTARTSYS; + } + ret = dvb_dmxdev_abort_ts_insertion(dmxdevfilter, parg); + mutex_unlock(&dmxdevfilter->mutex); + break; + + case DMX_GET_SCRAMBLING_BITS: + if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { + mutex_unlock(&dmxdev->mutex); + return -ERESTARTSYS; + } + ret = dvb_dmxdev_get_scrambling_bits(dmxdevfilter, parg); + mutex_unlock(&dmxdevfilter->mutex); + break; + + case DMX_FLUSH_BUFFER: + if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { + mutex_unlock(&dmxdev->mutex); + return -ERESTARTSYS; + } + ret = dvb_dmxdev_flush_buffer(dmxdevfilter); + mutex_unlock(&dmxdevfilter->mutex); + break; + default: - ret = -EINVAL; + pr_err("%s: unknown ioctl code (0x%x)\n", + __func__, cmd); + ret = -ENOIOCTLCMD; break; } mutex_unlock(&dmxdev->mutex); @@ -1084,13 +4420,78 @@ static long dvb_demux_ioctl(struct file *file, unsigned int cmd, return dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl); } +#ifdef CONFIG_COMPAT + +struct dmx_set_ts_insertion32 { + __u32 identifier; + __u32 repetition_time; + compat_uptr_t ts_packets; + compat_size_t size; +}; + +static long dmx_set_ts_insertion32_wrapper(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int ret; + struct dmx_set_ts_insertion32 dmx_ts_insert32; + struct dmx_set_ts_insertion dmx_ts_insert; + + ret = copy_from_user(&dmx_ts_insert32, (void __user *)arg, + sizeof(dmx_ts_insert32)); + if (ret) { + pr_err( + "%s: copy dmx_set_ts_insertion32 from user failed, ret=%d\n", + __func__, ret); + return -EFAULT; + } + + memset(&dmx_ts_insert, 0, sizeof(dmx_ts_insert)); + dmx_ts_insert.identifier = dmx_ts_insert32.identifier; + dmx_ts_insert.repetition_time = dmx_ts_insert32.repetition_time; + dmx_ts_insert.ts_packets = compat_ptr(dmx_ts_insert32.ts_packets); + dmx_ts_insert.size = dmx_ts_insert32.size; + + ret = dvb_demux_do_ioctl(file, DMX_SET_TS_INSERTION, &dmx_ts_insert); + + return ret; +} + +#define DMX_SET_TS_INSERTION32 _IOW('o', 70, struct dmx_set_ts_insertion32) + +/* + * compat ioctl is called whenever compatibility is required, i.e when a 32bit + * process calls an ioctl for a 64bit kernel. + */ +static long dvb_demux_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + long ret = 0; + + switch (cmd) { + case DMX_SET_TS_INSERTION32: + ret = dmx_set_ts_insertion32_wrapper(file, cmd, arg); + break; + case DMX_SET_TS_INSERTION: + pr_err("%s: 64bit ioctl code (0x%lx) used by 32bit userspace\n", + __func__, DMX_SET_TS_INSERTION); + ret = -ENOIOCTLCMD; + break; + default: + /* use regular ioctl */ + ret = dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl); + } + + return ret; +} +#endif + static unsigned int dvb_demux_poll(struct file *file, poll_table *wait) { struct dmxdev_filter *dmxdevfilter = file->private_data; unsigned int mask = 0; - if ((!dmxdevfilter) || dmxdevfilter->dev->exit) - return POLLERR; + if (!dmxdevfilter) + return -EINVAL; poll_wait(file, &dmxdevfilter->buffer.queue, wait); @@ -1099,20 +4500,80 @@ static unsigned int dvb_demux_poll(struct file *file, poll_table *wait) dmxdevfilter->state != DMXDEV_STATE_TIMEDOUT) return 0; - if (dmxdevfilter->buffer.error) - mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR); + if (dmxdevfilter->buffer.error) { + mask |= (POLLIN | POLLRDNORM | POLLERR); + if (dmxdevfilter->buffer.error == -EOVERFLOW) + mask |= POLLPRI; + } if (!dvb_ringbuffer_empty(&dmxdevfilter->buffer)) - mask |= (POLLIN | POLLRDNORM | POLLPRI); + mask |= (POLLIN | POLLRDNORM); + + if (dmxdevfilter->events.wakeup_events_counter >= + dmxdevfilter->events.event_mask.wakeup_threshold) + mask |= POLLPRI; return mask; } +static int dvb_demux_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct dmxdev_filter *dmxdevfilter = filp->private_data; + struct dmxdev *dmxdev = dmxdevfilter->dev; + int ret; + int vma_size; + int buffer_size; + + vma_size = vma->vm_end - vma->vm_start; + + if (vma->vm_flags & VM_WRITE) + return -EINVAL; + + if (mutex_lock_interruptible(&dmxdev->mutex)) + return -ERESTARTSYS; + + if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { + mutex_unlock(&dmxdev->mutex); + return -ERESTARTSYS; + } + + if ((!dmxdevfilter->buffer.data) || + (dmxdevfilter->buffer_mode == DMX_BUFFER_MODE_EXTERNAL)) { + mutex_unlock(&dmxdevfilter->mutex); + mutex_unlock(&dmxdev->mutex); + return -EINVAL; + } + + /* Make sure requested mapping is not larger than buffer size */ + buffer_size = dmxdevfilter->buffer.size + (PAGE_SIZE-1); + buffer_size = buffer_size & ~(PAGE_SIZE-1); + + if (vma_size != buffer_size) { + mutex_unlock(&dmxdevfilter->mutex); + mutex_unlock(&dmxdev->mutex); + return -EINVAL; + } + + ret = remap_vmalloc_range(vma, dmxdevfilter->buffer.data, 0); + if (ret) { + mutex_unlock(&dmxdevfilter->mutex); + mutex_unlock(&dmxdev->mutex); + return ret; + } + + vma->vm_flags |= VM_DONTDUMP; + vma->vm_flags |= VM_DONTEXPAND; + + mutex_unlock(&dmxdevfilter->mutex); + mutex_unlock(&dmxdev->mutex); + + return 0; +} + static int dvb_demux_release(struct inode *inode, struct file *file) { struct dmxdev_filter *dmxdevfilter = file->private_data; struct dmxdev *dmxdev = dmxdevfilter->dev; - int ret; ret = dvb_dmxdev_filter_free(dmxdev, dmxdevfilter); @@ -1120,6 +4581,8 @@ static int dvb_demux_release(struct inode *inode, struct file *file) mutex_lock(&dmxdev->mutex); dmxdev->dvbdev->users--; if(dmxdev->dvbdev->users==1 && dmxdev->exit==1) { + fops_put(file->f_op); + file->f_op = NULL; mutex_unlock(&dmxdev->mutex); wake_up(&dmxdev->dvbdev->wait_queue); } else @@ -1136,6 +4599,10 @@ static const struct file_operations dvb_demux_fops = { .release = dvb_demux_release, .poll = dvb_demux_poll, .llseek = default_llseek, + .mmap = dvb_demux_mmap, +#ifdef CONFIG_COMPAT + .compat_ioctl = dvb_demux_compat_ioctl, +#endif }; static const struct dvb_device dvbdev_demux = { @@ -1161,11 +4628,44 @@ static int dvb_dvr_do_ioctl(struct file *file, switch (cmd) { case DMX_SET_BUFFER_SIZE: - ret = dvb_dvr_set_buffer_size(dmxdev, arg); + ret = dvb_dvr_set_buffer_size(dmxdev, file->f_flags, arg); + break; + + case DMX_SET_BUFFER_MODE: + ret = dvb_dvr_set_buffer_mode(dmxdev, file->f_flags, + *(enum dmx_buffer_mode *)parg); + break; + + case DMX_SET_BUFFER: + ret = dvb_dvr_set_buffer(dmxdev, file->f_flags, parg); + break; + + case DMX_GET_BUFFER_STATUS: + ret = dvb_dvr_get_buffer_status(dmxdev, file->f_flags, parg); + break; + + case DMX_RELEASE_DATA: + ret = dvb_dvr_release_data(dmxdev, file->f_flags, arg); + break; + + case DMX_FEED_DATA: + ret = dvb_dvr_feed_data(dmxdev, file->f_flags, arg); + break; + + case DMX_GET_EVENT: + ret = dvb_dvr_get_event(dmxdev, file->f_flags, parg); + break; + + case DMX_PUSH_OOB_COMMAND: + ret = dvb_dvr_push_oob_cmd(dmxdev, file->f_flags, parg); + break; + + case DMX_FLUSH_BUFFER: + ret = dvb_dvr_flush_buffer(dmxdev, file->f_flags); break; default: - ret = -EINVAL; + ret = -ENOIOCTLCMD; break; } mutex_unlock(&dmxdev->mutex); @@ -1173,10 +4673,18 @@ static int dvb_dvr_do_ioctl(struct file *file, } static long dvb_dvr_ioctl(struct file *file, - unsigned int cmd, unsigned long arg) + unsigned int cmd, unsigned long arg) +{ + return dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl); +} + +#ifdef CONFIG_COMPAT +static long dvb_dvr_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) { return dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl); } +#endif static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait) { @@ -1184,21 +4692,31 @@ static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait) struct dmxdev *dmxdev = dvbdev->priv; unsigned int mask = 0; - dprintk("function : %s\n", __func__); - - if (dmxdev->exit) - return POLLERR; - - poll_wait(file, &dmxdev->dvr_buffer.queue, wait); + pr_debug("function : %s\n", __func__); if ((file->f_flags & O_ACCMODE) == O_RDONLY) { - if (dmxdev->dvr_buffer.error) - mask |= (POLLIN | POLLRDNORM | POLLPRI | POLLERR); + poll_wait(file, &dmxdev->dvr_buffer.queue, wait); + + if (dmxdev->dvr_buffer.error) { + mask |= (POLLIN | POLLRDNORM | POLLERR); + if (dmxdev->dvr_buffer.error == -EOVERFLOW) + mask |= POLLPRI; + } if (!dvb_ringbuffer_empty(&dmxdev->dvr_buffer)) - mask |= (POLLIN | POLLRDNORM | POLLPRI); - } else - mask |= (POLLOUT | POLLWRNORM | POLLPRI); + mask |= (POLLIN | POLLRDNORM); + + if (dmxdev->dvr_output_events.wakeup_events_counter >= + dmxdev->dvr_output_events.event_mask.wakeup_threshold) + mask |= POLLPRI; + } else { + poll_wait(file, &dmxdev->dvr_input_buffer.queue, wait); + if (dmxdev->dvr_input_buffer.error) + mask |= (POLLOUT | POLLRDNORM | POLLPRI | POLLERR); + + if (dvb_ringbuffer_free(&dmxdev->dvr_input_buffer)) + mask |= (POLLOUT | POLLRDNORM | POLLPRI); + } return mask; } @@ -1207,7 +4725,11 @@ static const struct file_operations dvb_dvr_fops = { .owner = THIS_MODULE, .read = dvb_dvr_read, .write = dvb_dvr_write, + .mmap = dvb_dvr_mmap, .unlocked_ioctl = dvb_dvr_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = dvb_dvr_compat_ioctl, +#endif .open = dvb_dvr_open, .release = dvb_dvr_release, .poll = dvb_dvr_poll, @@ -1223,9 +4745,94 @@ static const struct dvb_device dvbdev_dvr = { #endif .fops = &dvb_dvr_fops }; + + +/** + * debugfs service to print active filters information. + */ +static int dvb_dmxdev_dbgfs_print(struct seq_file *s, void *p) +{ + int i; + struct dmxdev *dmxdev = s->private; + struct dmxdev_filter *filter; + int active_count = 0; + struct dmx_buffer_status buffer_status; + struct dmx_scrambling_bits scrambling_bits; + static const char * const pes_feeds[] = {"DEC", "PES", "DVR", "REC"}; + int ret; + + if (!dmxdev) + return 0; + + for (i = 0; i < dmxdev->filternum; i++) { + filter = &dmxdev->filter[i]; + if (filter->state >= DMXDEV_STATE_GO) { + active_count++; + + seq_printf(s, "filter_%02d - ", i); + + if (filter->type == DMXDEV_TYPE_SEC) { + seq_puts(s, "type: SEC, "); + seq_printf(s, "PID %04d ", + filter->params.sec.pid); + scrambling_bits.pid = filter->params.sec.pid; + } else { + seq_printf(s, "type: %s, ", + pes_feeds[filter->params.pes.output]); + seq_printf(s, "PID: %04d ", + filter->params.pes.pid); + scrambling_bits.pid = filter->params.pes.pid; + } + + dvb_dmxdev_get_scrambling_bits(filter, + &scrambling_bits); + + if (filter->type == DMXDEV_TYPE_PES && + filter->params.pes.output == DMX_OUT_TS_TAP) + ret = dvb_dvr_get_buffer_status(dmxdev, + O_RDONLY, &buffer_status); + else + ret = dvb_dmxdev_get_buffer_status(filter, + &buffer_status); + if (!ret) { + seq_printf(s, "size: %08d, ", + buffer_status.size); + seq_printf(s, "fullness: %08d, ", + buffer_status.fullness); + seq_printf(s, "error: %d, ", + buffer_status.error); + } + + seq_printf(s, "scramble: %d, ", + scrambling_bits.value); + seq_printf(s, "secured: %d\n", + filter->sec_mode.is_secured); + } + } + + if (!active_count) + seq_puts(s, "No active filters\n"); + + return 0; +} + +static int dvb_dmxdev_dbgfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, dvb_dmxdev_dbgfs_print, inode->i_private); +} + +static const struct file_operations dbgfs_filters_fops = { + .open = dvb_dmxdev_dbgfs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter) { int i; + struct dmx_caps caps; if (dmxdev->demux->open(dmxdev->demux) < 0) return -EUSERS; @@ -1234,8 +4841,12 @@ int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter) if (!dmxdev->filter) return -ENOMEM; + dmxdev->playback_mode = DMX_PB_MODE_PUSH; + dmxdev->demux->dvr_input_protected = 0; + mutex_init(&dmxdev->mutex); spin_lock_init(&dmxdev->lock); + spin_lock_init(&dmxdev->dvr_in_lock); for (i = 0; i < dmxdev->filternum; i++) { dmxdev->filter[i].dev = dmxdev; dmxdev->filter[i].buffer.data = NULL; @@ -1244,11 +4855,24 @@ int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter) } dvb_register_device(dvb_adapter, &dmxdev->dvbdev, &dvbdev_demux, dmxdev, - DVB_DEVICE_DEMUX, dmxdev->filternum); + DVB_DEVICE_DEMUX, 0); dvb_register_device(dvb_adapter, &dmxdev->dvr_dvbdev, &dvbdev_dvr, - dmxdev, DVB_DEVICE_DVR, dmxdev->filternum); + dmxdev, DVB_DEVICE_DVR, 0); dvb_ringbuffer_init(&dmxdev->dvr_buffer, NULL, 8192); + dvb_ringbuffer_init(&dmxdev->dvr_input_buffer, NULL, 8192); + + /* Disable auto buffer flushing if plugin does not allow it */ + if (dmxdev->demux->get_caps) { + dmxdev->demux->get_caps(dmxdev->demux, &caps); + if (!(caps.caps & DMX_CAP_AUTO_BUFFER_FLUSH)) + overflow_auto_flush = 0; + } + + if (dmxdev->demux->debugfs_demux_dir) + debugfs_create_file("filters", 0444, + dmxdev->demux->debugfs_demux_dir, dmxdev, + &dbgfs_filters_fops); return 0; } diff --git a/drivers/media/dvb-core/dmxdev.h b/drivers/media/dvb-core/dmxdev.h index 48c6cf92ab99..ad007f4fb9ac 100644 --- a/drivers/media/dvb-core/dmxdev.h +++ b/drivers/media/dvb-core/dmxdev.h @@ -33,7 +33,7 @@ #include #include #include - +#include #include #include "dvbdev.h" @@ -57,10 +57,87 @@ enum dmxdev_state { struct dmxdev_feed { u16 pid; + struct dmx_indexing_params idx_params; + struct dmx_cipher_operations cipher_ops; struct dmx_ts_feed *ts; struct list_head next; }; +struct dmxdev_sec_feed { + struct dmx_section_feed *feed; + struct dmx_cipher_operations cipher_ops; +}; + +struct dmxdev_events_queue { + /* + * indices used to manage events queue. + * read_index advanced when relevant data is read + * from the buffer. + * notified_index is the index from which next events + * are returned. + * read_index <= notified_index <= write_index + * + * If user reads the data without getting the respective + * event first, the read/notified indices are updated + * automatically to reflect the actual data that exist + * in the buffer. + */ + u32 read_index; + u32 write_index; + u32 notified_index; + + /* Bytes read by user without having respective event in the queue */ + u32 bytes_read_no_event; + + /* internal tracking of PES and recording events */ + u32 current_event_data_size; + u32 current_event_start_offset; + + /* current setting of the events masking */ + struct dmx_events_mask event_mask; + + /* + * indicates if an event used for data-reading from demux + * filter is enabled or not. These are events on which + * user may wait for before calling read() on the demux filter. + */ + int data_read_event_masked; + + /* + * holds the current number of pending events in the + * events queue that are considered as a wake-up source + */ + u32 wakeup_events_counter; + + struct dmx_filter_event queue[DMX_EVENT_QUEUE_SIZE]; +}; + +#define DMX_MIN_INSERTION_REPETITION_TIME 25 /* in msec */ +struct ts_insertion_buffer { + /* work scheduled for insertion of this buffer */ + struct delayed_work dwork; + + struct list_head next; + + /* buffer holding TS packets for insertion */ + char *buffer; + + /* buffer size */ + size_t size; + + /* buffer ID from user */ + u32 identifier; + + /* repetition time for the buffer insertion */ + u32 repetition_time; + + /* the recording filter to which this buffer belongs */ + struct dmxdev_filter *dmxdevfilter; + + /* indication whether insertion should be aborted */ + int abort; +}; + struct dmxdev_filter { union { struct dmx_section_filter *sec; @@ -69,7 +146,7 @@ struct dmxdev_filter { union { /* list of TS and PES feeds (struct dmxdev_feed) */ struct list_head ts; - struct dmx_section_feed *sec; + struct dmxdev_sec_feed sec; } feed; union { @@ -77,19 +154,37 @@ struct dmxdev_filter { struct dmx_pes_filter_params pes; } params; + struct dmxdev_events_queue events; + enum dmxdev_type type; enum dmxdev_state state; struct dmxdev *dev; struct dvb_ringbuffer buffer; + void *priv_buff_handle; + enum dmx_buffer_mode buffer_mode; struct mutex mutex; + /* for recording output */ + enum dmx_tsp_format_t dmx_tsp_format; + u32 rec_chunk_size; + + /* list of buffers used for insertion (struct ts_insertion_buffer) */ + struct list_head insertion_buffers; + + /* End-of-stream indication has been received */ + int eos_state; + /* only for sections */ struct timer_list timer; int todo; u8 secheader[3]; -}; + struct dmx_secure_mode sec_mode; + + /* Decoder buffer(s) related */ + struct dmx_decoder_buffers decoder_buffers; +}; struct dmxdev { struct dvb_device *dvbdev; @@ -100,18 +195,52 @@ struct dmxdev { int filternum; int capabilities; +#define DMXDEV_CAP_DUPLEX 0x01 + + enum dmx_playback_mode_t playback_mode; + dmx_source_t source; unsigned int exit:1; -#define DMXDEV_CAP_DUPLEX 1 + unsigned int dvr_in_exit:1; + unsigned int dvr_processing_input:1; + struct dmx_frontend *dvr_orig_fe; struct dvb_ringbuffer dvr_buffer; + void *dvr_priv_buff_handle; + enum dmx_buffer_mode dvr_buffer_mode; + struct dmxdev_events_queue dvr_output_events; + struct dmxdev_filter *dvr_feed; + int dvr_feeds_count; + + struct dvb_ringbuffer dvr_input_buffer; + enum dmx_buffer_mode dvr_input_buffer_mode; + struct task_struct *dvr_input_thread; + /* DVR commands (data feed / OOB command) queue */ + struct dvb_ringbuffer dvr_cmd_buffer; + #define DVR_BUFFER_SIZE (10*188*1024) struct mutex mutex; spinlock_t lock; + spinlock_t dvr_in_lock; +}; + +enum dvr_cmd { + DVR_DATA_FEED_CMD, + DVR_OOB_CMD }; +struct dvr_command { + enum dvr_cmd type; + union { + struct dmx_oob_command oobcmd; + size_t data_feed_count; + } cmd; +}; + +#define DVR_CMDS_BUFFER_SIZE (sizeof(struct dvr_command)*500) + int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *); void dvb_dmxdev_release(struct dmxdev *dmxdev); diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c index a0cf7b0d03e8..474684f3d9fd 100644 --- a/drivers/media/dvb-core/dvb_demux.c +++ b/drivers/media/dvb-core/dvb_demux.c @@ -55,11 +55,151 @@ module_param(dvb_demux_feed_err_pkts, int, 0644); MODULE_PARM_DESC(dvb_demux_feed_err_pkts, "when set to 0, drop packets with the TEI bit set (1 by default)"); +/* counter advancing for each new dvb-demux device */ +static int dvb_demux_index; + +static int dvb_demux_performancecheck; +module_param(dvb_demux_performancecheck, int, 0644); +MODULE_PARM_DESC(dvb_demux_performancecheck, + "enable transport stream performance check, reported through debugfs"); + #define dprintk_tscheck(x...) do { \ if (dvb_demux_tscheck && printk_ratelimit()) \ printk(x); \ } while (0) +static const struct dvb_dmx_video_patterns mpeg2_seq_hdr = { + {0x00, 0x00, 0x01, 0xB3}, + {0xFF, 0xFF, 0xFF, 0xFF}, + 4, + DMX_IDX_MPEG_SEQ_HEADER +}; + +static const struct dvb_dmx_video_patterns mpeg2_gop = { + {0x00, 0x00, 0x01, 0xB8}, + {0xFF, 0xFF, 0xFF, 0xFF}, + 4, + DMX_IDX_MPEG_GOP +}; + +static const struct dvb_dmx_video_patterns mpeg2_iframe = { + {0x00, 0x00, 0x01, 0x00, 0x00, 0x08}, + {0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38}, + 6, + DMX_IDX_MPEG_I_FRAME_START +}; + +static const struct dvb_dmx_video_patterns mpeg2_pframe = { + {0x00, 0x00, 0x01, 0x00, 0x00, 0x10}, + {0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38}, + 6, + DMX_IDX_MPEG_P_FRAME_START +}; + +static const struct dvb_dmx_video_patterns mpeg2_bframe = { + {0x00, 0x00, 0x01, 0x00, 0x00, 0x18}, + {0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x38}, + 6, + DMX_IDX_MPEG_B_FRAME_START +}; + +static const struct dvb_dmx_video_patterns h264_sps = { + {0x00, 0x00, 0x01, 0x07}, + {0xFF, 0xFF, 0xFF, 0x1F}, + 4, + DMX_IDX_H264_SPS +}; + +static const struct dvb_dmx_video_patterns h264_pps = { + {0x00, 0x00, 0x01, 0x08}, + {0xFF, 0xFF, 0xFF, 0x1F}, + 4, + DMX_IDX_H264_PPS +}; + +static const struct dvb_dmx_video_patterns h264_idr = { + {0x00, 0x00, 0x01, 0x05, 0x80}, + {0xFF, 0xFF, 0xFF, 0x1F, 0x80}, + 5, + DMX_IDX_H264_IDR_START +}; + +static const struct dvb_dmx_video_patterns h264_non_idr = { + {0x00, 0x00, 0x01, 0x01, 0x80}, + {0xFF, 0xFF, 0xFF, 0x1F, 0x80}, + 5, + DMX_IDX_H264_NON_IDR_START +}; + +/* + * Forbidden (1 bit) + NAL idc (2 bits) + NAL type (5 bits) + * I-Slice NAL idc = 3, NAL type = 5, 01100101 mask 0x7F + */ +static const struct dvb_dmx_video_patterns h264_idr_islice = { + {0x00, 0x00, 0x01, 0x65, 0x80}, + {0xFF, 0xFF, 0xFF, 0x7F, 0x80}, + 5, + DMX_IDX_H264_IDR_ISLICE_START +}; + +/* + * Forbidden (1 bit) + NAL idc (2 bits) + NAL type (5 bits) + * P-Slice NAL idc = 2, NAL type = 1, 01000001 mask 0x7F + */ +static const struct dvb_dmx_video_patterns h264_non_idr_pslice = { + {0x00, 0x00, 0x01, 0x41, 0x80}, + {0xFF, 0xFF, 0xFF, 0x7F, 0x80}, + 5, + DMX_IDX_H264_NON_IDR_PSLICE_START +}; + +/* + * Forbidden (1 bit) + NAL idc (2 bits) + NAL type (5 bits) + * B-Slice NAL idc = 0, NAL type = 1, 00000001 mask 0x7F + */ +static const struct dvb_dmx_video_patterns h264_non_idr_bslice = { + {0x00, 0x00, 0x01, 0x01, 0x80}, + {0xFF, 0xFF, 0xFF, 0x7F, 0x80}, + 5, + DMX_IDX_H264_NON_IDR_BSLICE_START +}; + +static const struct dvb_dmx_video_patterns h264_non_access_unit_del = { + {0x00, 0x00, 0x01, 0x09}, + {0xFF, 0xFF, 0xFF, 0x1F}, + 4, + DMX_IDX_H264_ACCESS_UNIT_DEL +}; + +static const struct dvb_dmx_video_patterns h264_non_sei = { + {0x00, 0x00, 0x01, 0x06}, + {0xFF, 0xFF, 0xFF, 0x1F}, + 4, + DMX_IDX_H264_SEI +}; + +static const struct dvb_dmx_video_patterns vc1_seq_hdr = { + {0x00, 0x00, 0x01, 0x0F}, + {0xFF, 0xFF, 0xFF, 0xFF}, + 4, + DMX_IDX_VC1_SEQ_HEADER +}; + +static const struct dvb_dmx_video_patterns vc1_entry_point = { + {0x00, 0x00, 0x01, 0x0E}, + {0xFF, 0xFF, 0xFF, 0xFF}, + 4, + DMX_IDX_VC1_ENTRY_POINT +}; + +static const struct dvb_dmx_video_patterns vc1_frame = { + {0x00, 0x00, 0x01, 0x0D}, + {0xFF, 0xFF, 0xFF, 0xFF}, + 4, + DMX_IDX_VC1_FRAME_START +}; + + /****************************************************************************** * static inlined helper functions ******************************************************************************/ @@ -69,9 +209,9 @@ static inline u16 section_length(const u8 *buf) return 3 + ((buf[1] & 0x0f) << 8) + buf[2]; } -static inline u16 ts_pid(const u8 *buf) +static inline u8 ts_scrambling_ctrl(const u8 *buf) { - return ((buf[1] & 0x1f) << 8) + buf[2]; + return (buf[3] >> 6) & 0x3; } static inline u8 payload(const u8 *tsp) @@ -100,37 +240,355 @@ static void dvb_dmx_memcopy(struct dvb_demux_feed *f, u8 *d, const u8 *s, memcpy(d, s, len); } +static u32 dvb_dmx_calc_time_delta(ktime_t past_time) +{ + ktime_t curr_time = ktime_get(); + s64 delta_time_us = ktime_us_delta(curr_time, past_time); + + return (u32)delta_time_us; +} + /****************************************************************************** * Software filter functions ******************************************************************************/ +/* + * Check if two patterns are identical, taking mask into consideration. + * @pattern1: the first byte pattern to compare. + * @pattern2: the second byte pattern to compare. + * @mask: the bit mask to use. + * @pattern_size: the length of both patterns and the mask, in bytes. + * + * Return: 1 if patterns match, 0 otherwise. + */ +static inline int dvb_dmx_patterns_match(const u8 *pattern1, const u8 *pattern2, + const u8 *mask, size_t pattern_size) +{ + int i; + + /* + * Assumption: it is OK to access pattern1, pattern2 and mask. + * This function performs no sanity checks to keep things fast. + */ + + for (i = 0; i < pattern_size; i++) + if ((pattern1[i] & mask[i]) != (pattern2[i] & mask[i])) + return 0; + + return 1; +} + +/* + * dvb_dmx_video_pattern_search - + * search for framing patterns in a given buffer. + * + * Optimized version: first search for a common substring, e.g. 0x00 0x00 0x01. + * If this string is found, go over all the given patterns (all must start + * with this string) and search for their ending in the buffer. + * + * Assumption: the patterns we look for do not spread over more than two + * buffers. + * + * @paterns: the full patterns information to look for. + * @patterns_num: the number of patterns to look for. + * @buf: the buffer to search. + * @buf_size: the size of the buffer to search. we search the entire buffer. + * @prefix_size_masks: a bit mask (per pattern) of possible prefix sizes to use + * when searching for a pattern that started at the last buffer. + * Updated in this function for use in the next lookup. + * @results: lookup results (offset, type, used_prefix_size) per found pattern, + * up to DVB_DMX_MAX_FOUND_PATTERNS. + * + * Return: + * Number of patterns found (up to DVB_DMX_MAX_FOUND_PATTERNS). + * 0 if pattern was not found. + * error value on failure. + */ +int dvb_dmx_video_pattern_search( + const struct dvb_dmx_video_patterns + *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM], + int patterns_num, + const u8 *buf, + size_t buf_size, + struct dvb_dmx_video_prefix_size_masks *prefix_size_masks, + struct dvb_dmx_video_patterns_results *results) +{ + int i, j; + unsigned int current_size; + u32 prefix; + int found = 0; + int start_offset = 0; + /* the starting common substring to look for */ + u8 string[] = {0x00, 0x00, 0x01}; + /* the mask for the starting string */ + u8 string_mask[] = {0xFF, 0xFF, 0xFF}; + /* the size of the starting string (in bytes) */ + size_t string_size = 3; + + if ((patterns == NULL) || (patterns_num <= 0) || (buf == NULL)) + return -EINVAL; + + memset(results, 0, sizeof(struct dvb_dmx_video_patterns_results)); + + /* + * handle prefix - disregard string, simply check all patterns, + * looking for a matching suffix at the very beginning of the buffer. + */ + for (j = 0; (j < patterns_num) && !found; j++) { + prefix = prefix_size_masks->size_mask[j]; + current_size = 32; + while (prefix) { + if (prefix & (0x1 << (current_size - 1))) { + /* + * check that we don't look further + * than buf_size boundary + */ + if ((int)(patterns[j]->size - current_size) > + buf_size) + break; + + if (dvb_dmx_patterns_match( + (patterns[j]->pattern + current_size), + buf, (patterns[j]->mask + current_size), + (patterns[j]->size - current_size))) { + + /* + * pattern found using prefix at the + * very beginning of the buffer, so + * offset is 0, but we already zeroed + * everything in the beginning of the + * function. that's why the next line + * is commented. + */ + /* results->info[found].offset = 0; */ + results->info[found].type = + patterns[j]->type; + results->info[found].used_prefix_size = + current_size; + found++; + /* + * save offset to start looking from + * in the buffer, to avoid reusing the + * data of a pattern we already found. + */ + start_offset = (patterns[j]->size - + current_size); + + if (found >= DVB_DMX_MAX_FOUND_PATTERNS) + goto next_prefix_lookup; + /* + * we don't want to search for the same + * pattern with several possible prefix + * sizes if we have already found it, + * so we break from the inner loop. + * since we incremented 'found', we + * will not search for additional + * patterns using a prefix - that would + * imply ambiguous patterns where one + * pattern can be included in another. + * the for loop will exit. + */ + break; + } + } + prefix &= ~(0x1 << (current_size - 1)); + current_size--; + } + } + + /* + * Search buffer for entire pattern, starting with the string. + * Note the external for loop does not execute if buf_size is + * smaller than string_size (the cast to int is required, since + * size_t is unsigned). + */ + for (i = start_offset; i < (int)(buf_size - string_size + 1); i++) { + if (dvb_dmx_patterns_match(string, (buf + i), string_mask, + string_size)) { + /* now search for patterns: */ + for (j = 0; j < patterns_num; j++) { + /* avoid overflow to next buffer */ + if ((i + patterns[j]->size) > buf_size) + continue; + + if (dvb_dmx_patterns_match( + (patterns[j]->pattern + string_size), + (buf + i + string_size), + (patterns[j]->mask + string_size), + (patterns[j]->size - string_size))) { + + results->info[found].offset = i; + results->info[found].type = + patterns[j]->type; + /* + * save offset to start next prefix + * lookup, to avoid reusing the data + * of any pattern we already found. + */ + if ((i + patterns[j]->size) > + start_offset) + start_offset = (i + + patterns[j]->size); + /* + * did not use a prefix to find this + * pattern, but we zeroed everything + * in the beginning of the function. + * So no need to zero used_prefix_size + * for results->info[found] + */ + + found++; + if (found >= DVB_DMX_MAX_FOUND_PATTERNS) + goto next_prefix_lookup; + /* + * theoretically we don't have to break + * here, but we don't want to search + * for the other matching patterns on + * the very same same place in the + * buffer. That would mean the + * (pattern & mask) combinations are + * not unique. So we break from inner + * loop and move on to the next place + * in the buffer. + */ + break; + } + } + } + } + +next_prefix_lookup: + /* check for possible prefix sizes for the next buffer */ + for (j = 0; j < patterns_num; j++) { + prefix_size_masks->size_mask[j] = 0; + for (i = 1; i < patterns[j]->size; i++) { + /* + * avoid looking outside of the buffer + * or reusing previously used data. + */ + if (i > (buf_size - start_offset)) + break; + + if (dvb_dmx_patterns_match(patterns[j]->pattern, + (buf + buf_size - i), + patterns[j]->mask, i)) { + prefix_size_masks->size_mask[j] |= + (1 << (i - 1)); + } + } + } + + return found; +} +EXPORT_SYMBOL(dvb_dmx_video_pattern_search); + +/** + * dvb_dmx_notify_section_event() - Notify demux event for all filters of a + * specified section feed. + * + * @feed: dvb_demux_feed object + * @event: demux event to notify + * @should_lock: specifies whether the function should lock the demux + * + * Caller is responsible for locking the demux properly, either by doing the + * locking itself and setting 'should_lock' to 0, or have the function do it + * by setting 'should_lock' to 1. + */ +int dvb_dmx_notify_section_event(struct dvb_demux_feed *feed, + struct dmx_data_ready *event, int should_lock) +{ + struct dvb_demux_filter *f; + + if (feed == NULL || event == NULL || feed->type != DMX_TYPE_SEC) + return -EINVAL; + + if (!should_lock && !spin_is_locked(&feed->demux->lock)) + return -EINVAL; + + if (should_lock) + spin_lock(&feed->demux->lock); + + f = feed->filter; + while (f && feed->feed.sec.is_filtering) { + feed->data_ready_cb.sec(&f->filter, event); + f = f->next; + } + + if (should_lock) + spin_unlock(&feed->demux->lock); + + return 0; +} +EXPORT_SYMBOL(dvb_dmx_notify_section_event); + +static int dvb_dmx_check_pes_end(struct dvb_demux_feed *feed) +{ + struct dmx_data_ready data; + + if (!feed->pusi_seen) + return 0; + + data.status = DMX_OK_PES_END; + data.data_length = 0; + data.pes_end.start_gap = 0; + data.pes_end.actual_length = feed->peslen; + data.pes_end.disc_indicator_set = 0; + data.pes_end.pes_length_mismatch = 0; + data.pes_end.stc = 0; + data.pes_end.tei_counter = feed->pes_tei_counter; + data.pes_end.cont_err_counter = feed->pes_cont_err_counter; + data.pes_end.ts_packets_num = feed->pes_ts_packets_num; + + return feed->data_ready_cb.ts(&feed->feed.ts, &data); +} + static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed, const u8 *buf) { int count = payload(buf); int p; - //int ccok; - //u8 cc; + int ccok; + u8 cc; + int ret; if (count == 0) return -1; p = 188 - count; - /* cc = buf[3] & 0x0f; - ccok = ((feed->cc + 1) & 0x0f) == cc; + if (feed->first_cc) + ccok = 1; + else + ccok = ((feed->cc + 1) & 0x0f) == cc; + + feed->first_cc = 0; feed->cc = cc; - if (!ccok) - printk("missed packet!\n"); - */ - if (buf[1] & 0x40) // PUSI ? - feed->peslen = 0xfffa; + /* PUSI ? */ + if (buf[1] & 0x40) { + dvb_dmx_check_pes_end(feed); + feed->pusi_seen = 1; + feed->peslen = 0; + feed->pes_tei_counter = 0; + feed->pes_cont_err_counter = 0; + feed->pes_ts_packets_num = 0; + } + + if (feed->pusi_seen == 0) + return 0; + + ret = feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts); - feed->peslen += count; + /* Verify TS packet was copied successfully */ + if (!ret) { + feed->pes_cont_err_counter += !ccok; + feed->pes_tei_counter += (buf[1] & 0x80) ? 1 : 0; + feed->pes_ts_packets_num++; + feed->peslen += count; + } - return feed->cb.ts(&buf[p], count, NULL, 0, &feed->feed.ts); + return ret; } static int dvb_dmx_swfilter_sectionfilter(struct dvb_demux_feed *feed, @@ -169,10 +627,28 @@ static inline int dvb_dmx_swfilter_section_feed(struct dvb_demux_feed *feed) return 0; if (sec->check_crc) { + ktime_t pre_crc_time = ktime_set(0, 0); + + if (dvb_demux_performancecheck) + pre_crc_time = ktime_get(); + section_syntax_indicator = ((sec->secbuf[1] & 0x80) != 0); if (section_syntax_indicator && - demux->check_crc32(feed, sec->secbuf, sec->seclen)) + demux->check_crc32(feed, sec->secbuf, sec->seclen)) { + if (dvb_demux_performancecheck) + demux->total_crc_time += + dvb_dmx_calc_time_delta(pre_crc_time); + + /* Notify on CRC error */ + feed->cb.sec(NULL, 0, NULL, 0, + &f->filter); + return -1; + } + + if (dvb_demux_performancecheck) + demux->total_crc_time += + dvb_dmx_calc_time_delta(pre_crc_time); } do { @@ -287,7 +763,7 @@ static int dvb_dmx_swfilter_section_copy_dump(struct dvb_demux_feed *feed, return 0; } -static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed, +static int dvb_dmx_swfilter_section_one_packet(struct dvb_demux_feed *feed, const u8 *buf) { u8 p, count; @@ -302,7 +778,16 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed, p = 188 - count; /* payload start */ cc = buf[3] & 0x0f; - ccok = ((feed->cc + 1) & 0x0f) == cc; + if (feed->first_cc) + ccok = 1; + else + ccok = ((feed->cc + 1) & 0x0f) == cc; + + /* discard TS packets holding sections with TEI bit set */ + if (buf[1] & 0x80) + return -EINVAL; + + feed->first_cc = 0; feed->cc = cc; if (buf[3] & 0x20) { @@ -356,200 +841,915 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed, return 0; } -static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed, - const u8 *buf) +/* + * dvb_dmx_swfilter_section_packet - wrapper for section filtering of single + * TS packet. + * + * @feed: dvb demux feed + * @buf: buffer containing the TS packet + * @should_lock: specifies demux locking semantics: if not set, proper demux + * locking is expected to have been done by the caller. + * + * Return error status + */ +int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed, + const u8 *buf, int should_lock) { - switch (feed->type) { - case DMX_TYPE_TS: - if (!feed->feed.ts.is_filtering) - break; - if (feed->ts_type & TS_PACKET) { - if (feed->ts_type & TS_PAYLOAD_ONLY) - dvb_dmx_swfilter_payload(feed, buf); - else - feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts); - } - if (feed->ts_type & TS_DECODER) - if (feed->demux->write_to_decoder) - feed->demux->write_to_decoder(feed, buf, 188); - break; - - case DMX_TYPE_SEC: - if (!feed->feed.sec.is_filtering) - break; - if (dvb_dmx_swfilter_section_packet(feed, buf) < 0) - feed->feed.sec.seclen = feed->feed.sec.secbufp = 0; - break; + int ret; - default: - break; + if (!should_lock && !spin_is_locked(&feed->demux->lock)) { + pr_err("%s: demux spinlock should have been locked\n", + __func__); + return -EINVAL; } -} -#define DVR_FEED(f) \ - (((f)->type == DMX_TYPE_TS) && \ - ((f)->feed.ts.is_filtering) && \ - (((f)->ts_type & (TS_PACKET | TS_DEMUX)) == TS_PACKET)) + if (should_lock) + spin_lock(&feed->demux->lock); -static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf) -{ - struct dvb_demux_feed *feed; - u16 pid = ts_pid(buf); - int dvr_done = 0; + ret = dvb_dmx_swfilter_section_one_packet(feed, buf); - if (dvb_demux_speedcheck) { - ktime_t cur_time; - u64 speed_bytes, speed_timedelta; + if (should_lock) + spin_unlock(&feed->demux->lock); - demux->speed_pkts_cnt++; + return ret; +} +EXPORT_SYMBOL(dvb_dmx_swfilter_section_packet); - /* show speed every SPEED_PKTS_INTERVAL packets */ - if (!(demux->speed_pkts_cnt % SPEED_PKTS_INTERVAL)) { - cur_time = ktime_get(); +static int dvb_demux_idx_event_sort(struct dmx_index_event_info *curr, + struct dmx_index_event_info *new) +{ + if (curr->match_tsp_num > new->match_tsp_num) + return 0; - if (ktime_to_ns(demux->speed_last_time) != 0) { - speed_bytes = (u64)demux->speed_pkts_cnt - * 188 * 8; - /* convert to 1024 basis */ - speed_bytes = 1000 * div64_u64(speed_bytes, - 1024); - speed_timedelta = ktime_ms_delta(cur_time, - demux->speed_last_time); - printk(KERN_INFO "TS speed %llu Kbits/sec \n", - div64_u64(speed_bytes, - speed_timedelta)); - } + if (curr->match_tsp_num < new->match_tsp_num) + return 1; + /* + * In case TSP numbers are equal, sort according to event type giving + * priority to PUSI events first, then RAI and finally framing events. + */ + if ((curr->type & DMX_IDX_RAI && new->type & DMX_IDX_PUSI) || + (!(curr->type & DMX_IDX_PUSI) && !(curr->type & DMX_IDX_RAI) && + new->type & (DMX_IDX_PUSI | DMX_IDX_RAI))) + return 0; - demux->speed_last_time = cur_time; - demux->speed_pkts_cnt = 0; - } + return 1; +} + +static int dvb_demux_save_idx_event(struct dvb_demux_feed *feed, + struct dmx_index_event_info *idx_event, + int traverse_from_tail) +{ + struct dmx_index_entry *idx_entry; + struct dmx_index_entry *curr_entry; + struct list_head *pos; + + /* get entry from free list */ + if (list_empty(&feed->rec_info->idx_info.free_list)) { + pr_err("%s: index free list is empty\n", __func__); + return -ENOMEM; } - if (buf[1] & 0x80) { - dprintk_tscheck("TEI detected. " - "PID=0x%x data1=0x%x\n", - pid, buf[1]); - /* data in this packet can't be trusted - drop it unless - * module option dvb_demux_feed_err_pkts is set */ - if (!dvb_demux_feed_err_pkts) - return; - } else /* if TEI bit is set, pid may be wrong- skip pkt counter */ - if (demux->cnt_storage && dvb_demux_tscheck) { - /* check pkt counter */ - if (pid < MAX_PID) { - if (buf[3] & 0x10) - demux->cnt_storage[pid] = - (demux->cnt_storage[pid] + 1) & 0xf; + idx_entry = list_first_entry(&feed->rec_info->idx_info.free_list, + struct dmx_index_entry, next); + list_del(&idx_entry->next); - if ((buf[3] & 0xf) != demux->cnt_storage[pid]) { - dprintk_tscheck("TS packet counter mismatch. PID=0x%x expected 0x%x got 0x%x\n", - pid, demux->cnt_storage[pid], - buf[3] & 0xf); - demux->cnt_storage[pid] = buf[3] & 0xf; - } + idx_entry->event = *idx_event; + + pos = &feed->rec_info->idx_info.ready_list; + if (traverse_from_tail) { + list_for_each_entry_reverse(curr_entry, + &feed->rec_info->idx_info.ready_list, next) { + if (dvb_demux_idx_event_sort(&curr_entry->event, + idx_event)) { + pos = &curr_entry->next; + break; } - /* end check */ } + } else { + list_for_each_entry(curr_entry, + &feed->rec_info->idx_info.ready_list, next) { + if (!dvb_demux_idx_event_sort(&curr_entry->event, + idx_event)) { + pos = &curr_entry->next; + break; + } + } + } - list_for_each_entry(feed, &demux->feed_list, list_head) { - if ((feed->pid != pid) && (feed->pid != 0x2000)) - continue; - - /* copy each packet only once to the dvr device, even - * if a PID is in multiple filters (e.g. video + PCR) */ - if ((DVR_FEED(feed)) && (dvr_done++)) - continue; + if (traverse_from_tail) + list_add(&idx_entry->next, pos); + else + list_add_tail(&idx_entry->next, pos); - if (feed->pid == pid) - dvb_dmx_swfilter_packet_type(feed, buf); - else if (feed->pid == 0x2000) - feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts); - } + return 0; } -void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf, - size_t count) +int dvb_demux_push_idx_event(struct dvb_demux_feed *feed, + struct dmx_index_event_info *idx_event, int should_lock) { - unsigned long flags; + int ret; - spin_lock_irqsave(&demux->lock, flags); + if (!should_lock && !spin_is_locked(&feed->demux->lock)) + return -EINVAL; - while (count--) { - if (buf[0] == 0x47) - dvb_dmx_swfilter_packet(demux, buf); - buf += 188; - } + if (should_lock) + spin_lock(&feed->demux->lock); + ret = dvb_demux_save_idx_event(feed, idx_event, 1); + if (should_lock) + spin_unlock(&feed->demux->lock); - spin_unlock_irqrestore(&demux->lock, flags); + return ret; } +EXPORT_SYMBOL(dvb_demux_push_idx_event); -EXPORT_SYMBOL(dvb_dmx_swfilter_packets); - -static inline int find_next_packet(const u8 *buf, int pos, size_t count, - const int pktsize) +static inline void dvb_dmx_notify_indexing(struct dvb_demux_feed *feed) { - int start = pos, lost; - - while (pos < count) { - if (buf[pos] == 0x47 || - (pktsize == 204 && buf[pos] == 0xB8)) - break; - pos++; - } - - lost = pos - start; - if (lost) { - /* This garbage is part of a valid packet? */ - int backtrack = pos - pktsize; - if (backtrack >= 0 && (buf[backtrack] == 0x47 || - (pktsize == 204 && buf[backtrack] == 0xB8))) - return backtrack; + struct dmx_data_ready dmx_data_ready; + struct dmx_index_entry *curr_entry; + struct list_head *n, *pos; + + dmx_data_ready.status = DMX_OK_IDX; + + list_for_each_safe(pos, n, &feed->rec_info->idx_info.ready_list) { + curr_entry = list_entry(pos, struct dmx_index_entry, next); + + if ((feed->rec_info->idx_info.min_pattern_tsp_num == (u64)-1) || + (curr_entry->event.match_tsp_num <= + feed->rec_info->idx_info.min_pattern_tsp_num)) { + dmx_data_ready.idx_event = curr_entry->event; + feed->data_ready_cb.ts(&feed->feed.ts, &dmx_data_ready); + list_del(&curr_entry->next); + list_add_tail(&curr_entry->next, + &feed->rec_info->idx_info.free_list); + } } - - return pos; } -/* Filter all pktsize= 188 or 204 sized packets and skip garbage. */ -static inline void _dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, - size_t count, const int pktsize) +void dvb_dmx_notify_idx_events(struct dvb_demux_feed *feed, int should_lock) { - int p = 0, i, j; - const u8 *q; - unsigned long flags; + if (!should_lock && !spin_is_locked(&feed->demux->lock)) + return; - spin_lock_irqsave(&demux->lock, flags); + if (should_lock) + spin_lock(&feed->demux->lock); + dvb_dmx_notify_indexing(feed); + if (should_lock) + spin_unlock(&feed->demux->lock); +} +EXPORT_SYMBOL(dvb_dmx_notify_idx_events); - if (demux->tsbufp) { /* tsbuf[0] is now 0x47. */ - i = demux->tsbufp; - j = pktsize - i; - if (count < j) { - memcpy(&demux->tsbuf[i], buf, count); - demux->tsbufp += count; - goto bailout; - } - memcpy(&demux->tsbuf[i], buf, j); - if (demux->tsbuf[0] == 0x47) /* double check */ - dvb_dmx_swfilter_packet(demux, demux->tsbuf); - demux->tsbufp = 0; - p += j; +static void dvb_dmx_process_pattern_result(struct dvb_demux_feed *feed, + struct dvb_dmx_video_patterns_results *patterns, int pattern, + u64 curr_stc, u64 prev_stc, + u64 curr_match_tsp, u64 prev_match_tsp, + u64 curr_pusi_tsp, u64 prev_pusi_tsp) +{ + int mpeg_frame_start; + int h264_frame_start; + int vc1_frame_start; + int seq_start; + u64 frame_end_in_seq; + struct dmx_index_event_info idx_event; + + idx_event.pid = feed->pid; + if (patterns->info[pattern].used_prefix_size) { + idx_event.match_tsp_num = prev_match_tsp; + idx_event.last_pusi_tsp_num = prev_pusi_tsp; + idx_event.stc = prev_stc; + } else { + idx_event.match_tsp_num = curr_match_tsp; + idx_event.last_pusi_tsp_num = curr_pusi_tsp; + idx_event.stc = curr_stc; } - while (1) { - p = find_next_packet(buf, p, count, pktsize); - if (p >= count) - break; - if (count - p < pktsize) - break; + /* notify on frame-end if needed */ + if (feed->prev_frame_valid) { + if (feed->prev_frame_type & DMX_IDX_MPEG_I_FRAME_START) { + idx_event.type = DMX_IDX_MPEG_I_FRAME_END; + frame_end_in_seq = DMX_IDX_MPEG_FIRST_SEQ_FRAME_END; + } else if (feed->prev_frame_type & DMX_IDX_MPEG_P_FRAME_START) { + idx_event.type = DMX_IDX_MPEG_P_FRAME_END; + frame_end_in_seq = DMX_IDX_MPEG_FIRST_SEQ_FRAME_END; + } else if (feed->prev_frame_type & DMX_IDX_MPEG_B_FRAME_START) { + idx_event.type = DMX_IDX_MPEG_B_FRAME_END; + frame_end_in_seq = DMX_IDX_MPEG_FIRST_SEQ_FRAME_END; + } else if (feed->prev_frame_type & DMX_IDX_H264_IDR_START) { + idx_event.type = DMX_IDX_H264_IDR_END; + frame_end_in_seq = DMX_IDX_H264_FIRST_SPS_FRAME_END; + } else if (feed->prev_frame_type & DMX_IDX_H264_NON_IDR_START) { + idx_event.type = DMX_IDX_H264_NON_IDR_END; + frame_end_in_seq = DMX_IDX_H264_FIRST_SPS_FRAME_END; + } else if (feed->prev_frame_type & + DMX_IDX_H264_IDR_ISLICE_START) { + idx_event.type = DMX_IDX_H264_IDR_END; + frame_end_in_seq = DMX_IDX_H264_FIRST_SPS_FRAME_END; + } else if (feed->prev_frame_type & + DMX_IDX_H264_NON_IDR_PSLICE_START) { + idx_event.type = DMX_IDX_H264_NON_IDR_END; + frame_end_in_seq = DMX_IDX_H264_FIRST_SPS_FRAME_END; + } else if (feed->prev_frame_type & + DMX_IDX_H264_NON_IDR_BSLICE_START) { + idx_event.type = DMX_IDX_H264_NON_IDR_END; + frame_end_in_seq = DMX_IDX_H264_FIRST_SPS_FRAME_END; + } else { + idx_event.type = DMX_IDX_VC1_FRAME_END; + frame_end_in_seq = DMX_IDX_VC1_FIRST_SEQ_FRAME_END; + } - q = &buf[p]; + if (feed->idx_params.types & idx_event.type) + dvb_demux_save_idx_event(feed, &idx_event, 1); - if (pktsize == 204 && (*q == 0xB8)) { - memcpy(demux->tsbuf, q, 188); - demux->tsbuf[0] = 0x47; - q = demux->tsbuf; + if (feed->first_frame_in_seq_notified && + feed->idx_params.types & frame_end_in_seq) { + idx_event.type = frame_end_in_seq; + dvb_demux_save_idx_event(feed, &idx_event, 1); + feed->first_frame_in_seq_notified = 0; } - dvb_dmx_swfilter_packet(demux, q); - p += pktsize; + } + + seq_start = patterns->info[pattern].type & + (DMX_IDX_MPEG_SEQ_HEADER | DMX_IDX_H264_SPS | + DMX_IDX_VC1_SEQ_HEADER); + + /* did we find start of sequence/SPS? */ + if (seq_start) { + feed->first_frame_in_seq = 1; + feed->first_frame_in_seq_notified = 0; + feed->prev_frame_valid = 0; + idx_event.type = patterns->info[pattern].type; + if (feed->idx_params.types & idx_event.type) + dvb_demux_save_idx_event(feed, &idx_event, 1); + return; + } + + mpeg_frame_start = patterns->info[pattern].type & + (DMX_IDX_MPEG_I_FRAME_START | + DMX_IDX_MPEG_P_FRAME_START | + DMX_IDX_MPEG_B_FRAME_START); + + h264_frame_start = patterns->info[pattern].type & + (DMX_IDX_H264_IDR_START | DMX_IDX_H264_NON_IDR_START); + + vc1_frame_start = patterns->info[pattern].type & + DMX_IDX_VC1_FRAME_START; + + if (!mpeg_frame_start && !h264_frame_start && !vc1_frame_start) { + /* neither sequence nor frame, notify on the entry if needed */ + idx_event.type = patterns->info[pattern].type; + if (feed->idx_params.types & idx_event.type) + dvb_demux_save_idx_event(feed, &idx_event, 1); + feed->prev_frame_valid = 0; + return; + } + + /* notify on first frame in sequence/sps if needed */ + if (feed->first_frame_in_seq) { + feed->first_frame_in_seq = 0; + feed->first_frame_in_seq_notified = 1; + if (mpeg_frame_start) + idx_event.type = DMX_IDX_MPEG_FIRST_SEQ_FRAME_START; + else if (h264_frame_start) + idx_event.type = DMX_IDX_H264_FIRST_SPS_FRAME_START; + else + idx_event.type = DMX_IDX_VC1_FIRST_SEQ_FRAME_START; + + if (feed->idx_params.types & idx_event.type) + dvb_demux_save_idx_event(feed, &idx_event, 1); + } + + /* notify on frame start if needed */ + idx_event.type = patterns->info[pattern].type; + if (feed->idx_params.types & idx_event.type) + dvb_demux_save_idx_event(feed, &idx_event, 1); + + feed->prev_frame_valid = 1; + feed->prev_frame_type = patterns->info[pattern].type; +} + +void dvb_dmx_process_idx_pattern(struct dvb_demux_feed *feed, + struct dvb_dmx_video_patterns_results *patterns, int pattern, + u64 curr_stc, u64 prev_stc, + u64 curr_match_tsp, u64 prev_match_tsp, + u64 curr_pusi_tsp, u64 prev_pusi_tsp) +{ + spin_lock(&feed->demux->lock); + dvb_dmx_process_pattern_result(feed, + patterns, pattern, + curr_stc, prev_stc, + curr_match_tsp, prev_match_tsp, + curr_pusi_tsp, prev_pusi_tsp); + spin_unlock(&feed->demux->lock); +} +EXPORT_SYMBOL(dvb_dmx_process_idx_pattern); + +static void dvb_dmx_index(struct dvb_demux_feed *feed, + const u8 *buf, + const u8 timestamp[TIMESTAMP_LEN]) +{ + int i; + int p; + u64 stc; + int found_patterns; + int count = payload(buf); + u64 min_pattern_tsp_num; + struct dvb_demux_feed *tmp_feed; + struct dvb_demux *demux = feed->demux; + struct dmx_index_event_info idx_event; + struct dvb_dmx_video_patterns_results patterns; + + if (feed->demux->convert_ts) + feed->demux->convert_ts(feed, timestamp, &stc); + else + stc = 0; + + idx_event.pid = feed->pid; + idx_event.stc = stc; + idx_event.match_tsp_num = feed->rec_info->ts_output_count; + + /* PUSI ? */ + if (buf[1] & 0x40) { + feed->curr_pusi_tsp_num = feed->rec_info->ts_output_count; + if (feed->idx_params.types & DMX_IDX_PUSI) { + idx_event.type = DMX_IDX_PUSI; + idx_event.last_pusi_tsp_num = + feed->curr_pusi_tsp_num; + dvb_demux_save_idx_event(feed, &idx_event, 1); + } + } + + /* + * if we still did not encounter a TS packet with PUSI indication, + * we cannot report index entries yet as we need to provide + * the TS packet number with PUSI indication preceding the TS + * packet pointed by the reported index entry. + */ + if (feed->curr_pusi_tsp_num == (u64)-1) { + dvb_dmx_notify_indexing(feed); + return; + } + + if ((feed->idx_params.types & DMX_IDX_RAI) && /* index RAI? */ + (buf[3] & 0x20) && /* adaptation field exists? */ + (buf[4] > 0) && /* adaptation field len > 0 ? */ + (buf[5] & 0x40)) { /* RAI is set? */ + idx_event.type = DMX_IDX_RAI; + idx_event.last_pusi_tsp_num = + feed->curr_pusi_tsp_num; + dvb_demux_save_idx_event(feed, &idx_event, 1); + } + + /* + * if no pattern search is required, or the TS packet has no payload, + * pattern search is not executed. + */ + if (!feed->pattern_num || !count) { + dvb_dmx_notify_indexing(feed); + return; + } + + p = 188 - count; /* payload start */ + + found_patterns = + dvb_dmx_video_pattern_search(feed->patterns, + feed->pattern_num, &buf[p], count, + &feed->prefix_size, &patterns); + + for (i = 0; i < found_patterns; i++) + dvb_dmx_process_pattern_result(feed, &patterns, i, + stc, feed->prev_stc, + feed->rec_info->ts_output_count, feed->prev_tsp_num, + feed->curr_pusi_tsp_num, feed->prev_pusi_tsp_num); + + feed->prev_tsp_num = feed->rec_info->ts_output_count; + feed->prev_pusi_tsp_num = feed->curr_pusi_tsp_num; + feed->prev_stc = stc; + feed->last_pattern_tsp_num = feed->rec_info->ts_output_count; + + /* + * it is possible to have a TS packet that has a prefix of + * a video pattern but the video pattern is not identified yet + * until we get the next TS packet of that PID. When we get + * the next TS packet of that PID, pattern-search would + * detect that we have a new index entry that starts in the + * previous TS packet. + * In order to notify the user on index entries with match_tsp_num + * in ascending order, index events with match_tsp_num up to + * the last_pattern_tsp_num are notified now to the user, + * the rest can't be notified now as we might hit the above + * scenario and cause the events not to be notified with + * ascending order of match_tsp_num. + */ + if (feed->rec_info->idx_info.pattern_search_feeds_num == 1) { + /* + * optimization for case we have only one PID + * with video pattern search, in this case + * min_pattern_tsp_num is simply updated to the new + * TS packet number of the PID with pattern search. + */ + feed->rec_info->idx_info.min_pattern_tsp_num = + feed->last_pattern_tsp_num; + dvb_dmx_notify_indexing(feed); + return; + } + + /* + * if we have more than one PID with pattern search, + * min_pattern_tsp_num needs to be updated now based on + * last_pattern_tsp_num of all PIDs with pattern search. + */ + min_pattern_tsp_num = (u64)-1; + i = feed->rec_info->idx_info.pattern_search_feeds_num; + list_for_each_entry(tmp_feed, &demux->feed_list, list_head) { + if ((tmp_feed->state != DMX_STATE_GO) || + (tmp_feed->type != DMX_TYPE_TS) || + (tmp_feed->feed.ts.buffer.ringbuff != + feed->feed.ts.buffer.ringbuff)) + continue; + + if ((tmp_feed->last_pattern_tsp_num != (u64)-1) && + ((min_pattern_tsp_num == (u64)-1) || + (tmp_feed->last_pattern_tsp_num < + min_pattern_tsp_num))) + min_pattern_tsp_num = tmp_feed->last_pattern_tsp_num; + + if (tmp_feed->pattern_num) { + i--; + if (i == 0) + break; + } + } + + feed->rec_info->idx_info.min_pattern_tsp_num = min_pattern_tsp_num; + + /* notify all index entries up to min_pattern_tsp_num */ + dvb_dmx_notify_indexing(feed); +} + +static inline void dvb_dmx_swfilter_output_packet( + struct dvb_demux_feed *feed, + const u8 *buf, + const u8 timestamp[TIMESTAMP_LEN]) +{ + /* + * if we output 192 packet with timestamp at head of packet, + * output the timestamp now before the 188 TS packet + */ + if (feed->tsp_out_format == DMX_TSP_FORMAT_192_HEAD) + feed->cb.ts(timestamp, TIMESTAMP_LEN, NULL, + 0, &feed->feed.ts); + + feed->cb.ts(buf, 188, NULL, 0, &feed->feed.ts); + + /* + * if we output 192 packet with timestamp at tail of packet, + * output the timestamp now after the 188 TS packet + */ + if (feed->tsp_out_format == DMX_TSP_FORMAT_192_TAIL) + feed->cb.ts(timestamp, TIMESTAMP_LEN, NULL, + 0, &feed->feed.ts); + + if (feed->idx_params.enable) + dvb_dmx_index(feed, buf, timestamp); + + feed->rec_info->ts_output_count++; +} + +static inline void dvb_dmx_configure_decoder_fullness( + struct dvb_demux *demux, + int initialize) +{ + struct dvb_demux_feed *feed; + int j; + + for (j = 0; j < demux->feednum; j++) { + feed = &demux->feed[j]; + + if ((feed->state != DMX_STATE_GO) || + (feed->type != DMX_TYPE_TS) || + !(feed->ts_type & TS_DECODER)) + continue; + + if (initialize) { + if (demux->decoder_fullness_init) + demux->decoder_fullness_init(feed); + } else { + if (demux->decoder_fullness_abort) + demux->decoder_fullness_abort(feed); + } + } +} + +static inline int dvb_dmx_swfilter_buffer_check( + struct dvb_demux *demux, + u16 pid) +{ + int desired_space; + int ret; + struct dmx_ts_feed *ts; + struct dvb_demux_filter *f; + struct dvb_demux_feed *feed; + int was_locked; + int i, j; + + if (likely(spin_is_locked(&demux->lock))) + was_locked = 1; + else + was_locked = 0; + + /* + * Check that there's enough free space for data output. + * If there no space, wait for it (block). + * Since this function is called while spinlock + * is acquired, the lock should be released first. + * Once we get control back, lock is acquired back + * and checks that the filter is still valid. + */ + for (j = 0; j < demux->feednum; j++) { + feed = &demux->feed[j]; + + if (demux->sw_filter_abort) + return -ENODEV; + + if ((feed->state != DMX_STATE_GO) || + ((feed->pid != pid) && (feed->pid != 0x2000))) + continue; + + if (feed->secure_mode.is_secured && + !dvb_dmx_is_rec_feed(feed)) + return 0; + + if (feed->type == DMX_TYPE_TS) { + desired_space = 192; /* upper bound */ + ts = &feed->feed.ts; + + if (feed->ts_type & TS_PACKET) { + if (likely(was_locked)) + spin_unlock(&demux->lock); + + ret = demux->buffer_ctrl.ts(ts, + desired_space, 1); + + if (likely(was_locked)) + spin_lock(&demux->lock); + + if (ret < 0) + continue; + } + + if (demux->sw_filter_abort) + return -ENODEV; + + if (!ts->is_filtering) + continue; + + if ((feed->ts_type & TS_DECODER) && + (demux->decoder_fullness_wait)) { + if (likely(was_locked)) + spin_unlock(&demux->lock); + + ret = demux->decoder_fullness_wait( + feed, + desired_space); + + if (likely(was_locked)) + spin_lock(&demux->lock); + + if (ret < 0) + continue; + } + + continue; + } + + /* else - section case */ + desired_space = feed->feed.sec.tsfeedp + 188; /* upper bound */ + for (i = 0; i < demux->filternum; i++) { + if (demux->sw_filter_abort) + return -EPERM; + + if (!feed->feed.sec.is_filtering) + continue; + + f = &demux->filter[i]; + if (f->feed != feed) + continue; + + if (likely(was_locked)) + spin_unlock(&demux->lock); + + ret = demux->buffer_ctrl.sec(&f->filter, + desired_space, 1); + + if (likely(was_locked)) + spin_lock(&demux->lock); + + if (ret < 0) + break; + } + } + + return 0; +} + +static inline void dvb_dmx_swfilter_packet_type(struct dvb_demux_feed *feed, + const u8 *buf, const u8 timestamp[TIMESTAMP_LEN]) +{ + u16 pid = ts_pid(buf); + u8 scrambling_bits = ts_scrambling_ctrl(buf); + struct dmx_data_ready dmx_data_ready; + + /* + * Notify on scrambling status change only when we move + * from clear (0) to non-clear and vise-versa + */ + if ((scrambling_bits && !feed->scrambling_bits) || + (!scrambling_bits && feed->scrambling_bits)) { + dmx_data_ready.status = DMX_OK_SCRAMBLING_STATUS; + dmx_data_ready.data_length = 0; + dmx_data_ready.scrambling_bits.pid = pid; + dmx_data_ready.scrambling_bits.old_value = + feed->scrambling_bits; + dmx_data_ready.scrambling_bits.new_value = scrambling_bits; + + if (feed->type == DMX_TYPE_SEC) + dvb_dmx_notify_section_event(feed, &dmx_data_ready, 0); + else if (feed->feed.ts.is_filtering) + feed->data_ready_cb.ts(&feed->feed.ts, &dmx_data_ready); + } + + feed->scrambling_bits = scrambling_bits; + + switch (feed->type) { + case DMX_TYPE_TS: + if (!feed->feed.ts.is_filtering) + break; + if (feed->ts_type & TS_PACKET) { + if (feed->ts_type & TS_PAYLOAD_ONLY) { + if (!feed->secure_mode.is_secured) + dvb_dmx_swfilter_payload(feed, buf); + } else { + dvb_dmx_swfilter_output_packet(feed, + buf, timestamp); + } + } + if ((feed->ts_type & TS_DECODER) && + !feed->secure_mode.is_secured) + if (feed->demux->write_to_decoder) + feed->demux->write_to_decoder(feed, buf, 188); + break; + + case DMX_TYPE_SEC: + if (!feed->feed.sec.is_filtering || + feed->secure_mode.is_secured) + break; + if (dvb_dmx_swfilter_section_one_packet(feed, buf) < 0) + feed->feed.sec.seclen = feed->feed.sec.secbufp = 0; + break; + + default: + break; + } +} + +#define DVR_FEED(f) \ + (((f)->type == DMX_TYPE_TS) && \ + ((f)->feed.ts.is_filtering) && \ + (((f)->ts_type & (TS_PACKET | TS_DEMUX)) == TS_PACKET)) + +static void dvb_dmx_swfilter_one_packet(struct dvb_demux *demux, const u8 *buf, + const u8 timestamp[TIMESTAMP_LEN]) +{ + struct dvb_demux_feed *feed; + u16 pid = ts_pid(buf); + int dvr_done = 0; + + if (dvb_demux_speedcheck) { + ktime_t cur_time; + u64 speed_bytes, speed_timedelta; + + demux->speed_pkts_cnt++; + + /* show speed every SPEED_PKTS_INTERVAL packets */ + if (!(demux->speed_pkts_cnt % SPEED_PKTS_INTERVAL)) { + cur_time = ktime_get(); + + if (ktime_to_ns(demux->speed_last_time) != 0) { + speed_bytes = (u64)demux->speed_pkts_cnt + * 188 * 8; + /* convert to 1024 basis */ + speed_bytes = 1000 * div64_u64(speed_bytes, + 1024); + speed_timedelta = ktime_ms_delta(cur_time, + demux->speed_last_time); + pr_info("TS speed %llu Kbits/sec\n", + div64_u64(speed_bytes, speed_timedelta)); + } + + demux->speed_last_time = cur_time; + demux->speed_pkts_cnt = 0; + } + } + + if (buf[1] & 0x80) { + dprintk_tscheck("TEI detected. PID=0x%x data1=0x%x\n", pid, + buf[1]); + /* + * data in this packet can't be trusted - drop it unless + * module option dvb_demux_feed_err_pkts is set + */ + if (!dvb_demux_feed_err_pkts) + return; + } else /* if TEI bit is set, pid may be wrong- skip pkt counter */ + if (demux->cnt_storage && dvb_demux_tscheck) { + /* check pkt counter */ + if (pid < MAX_PID) { + if (buf[3] & 0x10) + demux->cnt_storage[pid] = + (demux->cnt_storage[pid] + 1) & + 0xf; + + if ((buf[3] & 0xf) != demux->cnt_storage[pid]) { + dprintk_tscheck( + "TS packet counter mismatch. PID=0x%x expected 0x%x got 0x%x\n", + pid, demux->cnt_storage[pid], + buf[3] & 0xf); + demux->cnt_storage[pid] = buf[3] & 0xf; + } + } + /* end check */ + } + + if (demux->playback_mode == DMX_PB_MODE_PULL) + if (dvb_dmx_swfilter_buffer_check(demux, pid) < 0) + return; + + list_for_each_entry(feed, &demux->feed_list, list_head) { + if ((feed->pid != pid) && (feed->pid != 0x2000)) + continue; + + /* + * copy each packet only once to the dvr device, even + * if a PID is in multiple filters (e.g. video + PCR) + */ + if ((DVR_FEED(feed)) && (dvr_done++)) + continue; + + if (feed->pid == pid) + dvb_dmx_swfilter_packet_type(feed, buf, timestamp); + else if ((feed->pid == 0x2000) && + (feed->feed.ts.is_filtering)) + dvb_dmx_swfilter_output_packet(feed, buf, timestamp); + } +} + +void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf, + const u8 timestamp[TIMESTAMP_LEN]) +{ + spin_lock(&demux->lock); + dvb_dmx_swfilter_one_packet(demux, buf, timestamp); + spin_unlock(&demux->lock); +} +EXPORT_SYMBOL(dvb_dmx_swfilter_packet); + +void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf, + size_t count) +{ + ktime_t pre_time = ktime_set(0, 0); + u8 timestamp[TIMESTAMP_LEN] = {0}; + + if (dvb_demux_performancecheck) + pre_time = ktime_get(); + + spin_lock(&demux->lock); + + demux->sw_filter_abort = 0; + dvb_dmx_configure_decoder_fullness(demux, 1); + + while (count--) { + if (buf[0] == 0x47) + dvb_dmx_swfilter_one_packet(demux, buf, timestamp); + buf += 188; + } + + spin_unlock(&demux->lock); + + if (dvb_demux_performancecheck) + demux->total_process_time += dvb_dmx_calc_time_delta(pre_time); +} +EXPORT_SYMBOL(dvb_dmx_swfilter_packets); + +static inline int find_next_packet(const u8 *buf, int pos, size_t count, + const int pktsize, const int leadingbytes) +{ + int start = pos, lost; + + while (pos < count) { + if ((buf[pos] == 0x47 && !leadingbytes) || + (pktsize == 204 && buf[pos] == 0xB8) || + (pktsize == 192 && leadingbytes && + (pos+leadingbytes < count) && + buf[pos+leadingbytes] == 0x47)) + break; + pos++; + } + + lost = pos - start; + if (lost) { + /* This garbage is part of a valid packet? */ + int backtrack = pos - pktsize; + + if (backtrack >= 0 && (buf[backtrack] == 0x47 || + (pktsize == 204 && buf[backtrack] == 0xB8) || + (pktsize == 192 && + buf[backtrack+leadingbytes] == 0x47))) + return backtrack; + } + + return pos; +} + +/* Filter all pktsize= 188 or 204 sized packets and skip garbage. */ +static inline void _dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, + size_t count, const int pktsize, const int leadingbytes) +{ + int p = 0, i, j; + const u8 *q; + ktime_t pre_time; + u8 timestamp[TIMESTAMP_LEN]; + + if (dvb_demux_performancecheck) + pre_time = ktime_get(); + + spin_lock(&demux->lock); + + demux->sw_filter_abort = 0; + dvb_dmx_configure_decoder_fullness(demux, 1); + + if (demux->tsbufp) { /* tsbuf[0] is now 0x47. */ + i = demux->tsbufp; + j = pktsize - i; + if (count < j) { + memcpy(&demux->tsbuf[i], buf, count); + demux->tsbufp += count; + goto bailout; + } + memcpy(&demux->tsbuf[i], buf, j); + + if (pktsize == 192) { + if (leadingbytes) + memcpy(timestamp, &demux->tsbuf[p], + TIMESTAMP_LEN); + else + memcpy(timestamp, &demux->tsbuf[188], + TIMESTAMP_LEN); + } else { + memset(timestamp, 0, TIMESTAMP_LEN); + } + + if (pktsize == 192 && + leadingbytes && + demux->tsbuf[leadingbytes] == 0x47) /* double check */ + dvb_dmx_swfilter_one_packet(demux, + demux->tsbuf + TIMESTAMP_LEN, timestamp); + else if (demux->tsbuf[0] == 0x47) /* double check */ + dvb_dmx_swfilter_one_packet(demux, + demux->tsbuf, timestamp); + demux->tsbufp = 0; + p += j; + } + + while (1) { + p = find_next_packet(buf, p, count, pktsize, leadingbytes); + + if (demux->sw_filter_abort) + goto bailout; + + if (p >= count) + break; + if (count - p < pktsize) + break; + + q = &buf[p]; + + if (pktsize == 204 && (*q == 0xB8)) { + memcpy(demux->tsbuf, q, 188); + demux->tsbuf[0] = 0x47; + q = demux->tsbuf; + } + + if (pktsize == 192) { + if (leadingbytes) { + q = &buf[p+leadingbytes]; + memcpy(timestamp, &buf[p], TIMESTAMP_LEN); + } else { + memcpy(timestamp, &buf[p+188], TIMESTAMP_LEN); + } + } else { + memset(timestamp, 0, TIMESTAMP_LEN); + } + + dvb_dmx_swfilter_one_packet(demux, q, timestamp); + p += pktsize; } i = count - p; @@ -560,219 +1760,909 @@ static inline void _dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, demux->tsbuf[0] = 0x47; } -bailout: - spin_unlock_irqrestore(&demux->lock, flags); +bailout: + spin_unlock(&demux->lock); + + if (dvb_demux_performancecheck) + demux->total_process_time += dvb_dmx_calc_time_delta(pre_time); +} + +void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count) +{ + _dvb_dmx_swfilter(demux, buf, count, 188, 0); +} +EXPORT_SYMBOL(dvb_dmx_swfilter); + +void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, size_t count) +{ + _dvb_dmx_swfilter(demux, buf, count, 204, 0); +} +EXPORT_SYMBOL(dvb_dmx_swfilter_204); + +void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, size_t count) +{ + spin_lock(&demux->lock); + + demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts); + + spin_unlock(&demux->lock); +} +EXPORT_SYMBOL(dvb_dmx_swfilter_raw); + +void dvb_dmx_swfilter_format( + struct dvb_demux *demux, + const u8 *buf, + size_t count, + enum dmx_tsp_format_t tsp_format) +{ + switch (tsp_format) { + case DMX_TSP_FORMAT_188: + _dvb_dmx_swfilter(demux, buf, count, 188, 0); + break; + + case DMX_TSP_FORMAT_192_TAIL: + _dvb_dmx_swfilter(demux, buf, count, 192, 0); + break; + + case DMX_TSP_FORMAT_192_HEAD: + _dvb_dmx_swfilter(demux, buf, count, 192, TIMESTAMP_LEN); + break; + + case DMX_TSP_FORMAT_204: + _dvb_dmx_swfilter(demux, buf, count, 204, 0); + break; + + default: + pr_err("%s: invalid TS packet format (format=%d)\n", __func__, + tsp_format); + break; + } +} +EXPORT_SYMBOL(dvb_dmx_swfilter_format); + +static struct dvb_demux_filter *dvb_dmx_filter_alloc(struct dvb_demux *demux) +{ + int i; + + for (i = 0; i < demux->filternum; i++) + if (demux->filter[i].state == DMX_STATE_FREE) + break; + + if (i == demux->filternum) + return NULL; + + demux->filter[i].state = DMX_STATE_ALLOCATED; + + return &demux->filter[i]; +} + +static struct dvb_demux_feed *dvb_dmx_feed_alloc(struct dvb_demux *demux) +{ + int i; + + for (i = 0; i < demux->feednum; i++) + if (demux->feed[i].state == DMX_STATE_FREE) + break; + + if (i == demux->feednum) + return NULL; + + demux->feed[i].state = DMX_STATE_ALLOCATED; + + return &demux->feed[i]; +} + +const struct dvb_dmx_video_patterns *dvb_dmx_get_pattern(u64 dmx_idx_pattern) +{ + switch (dmx_idx_pattern) { + case DMX_IDX_MPEG_SEQ_HEADER: + return &mpeg2_seq_hdr; + + case DMX_IDX_MPEG_GOP: + return &mpeg2_gop; + + case DMX_IDX_MPEG_I_FRAME_START: + return &mpeg2_iframe; + + case DMX_IDX_MPEG_P_FRAME_START: + return &mpeg2_pframe; + + case DMX_IDX_MPEG_B_FRAME_START: + return &mpeg2_bframe; + + case DMX_IDX_H264_SPS: + return &h264_sps; + + case DMX_IDX_H264_PPS: + return &h264_pps; + + case DMX_IDX_H264_IDR_START: + return &h264_idr; + + case DMX_IDX_H264_NON_IDR_START: + return &h264_non_idr; + + case DMX_IDX_H264_IDR_ISLICE_START: + return &h264_idr_islice; + + case DMX_IDX_H264_NON_IDR_PSLICE_START: + return &h264_non_idr_pslice; + + case DMX_IDX_H264_NON_IDR_BSLICE_START: + return &h264_non_idr_bslice; + + case DMX_IDX_H264_ACCESS_UNIT_DEL: + return &h264_non_access_unit_del; + + case DMX_IDX_H264_SEI: + return &h264_non_sei; + + case DMX_IDX_VC1_SEQ_HEADER: + return &vc1_seq_hdr; + + case DMX_IDX_VC1_ENTRY_POINT: + return &vc1_entry_point; + + case DMX_IDX_VC1_FRAME_START: + return &vc1_frame; + + default: + return NULL; + } +} +EXPORT_SYMBOL(dvb_dmx_get_pattern); + +static void dvb_dmx_init_idx_state(struct dvb_demux_feed *feed) +{ + feed->prev_tsp_num = (u64)-1; + feed->curr_pusi_tsp_num = (u64)-1; + feed->prev_pusi_tsp_num = (u64)-1; + feed->prev_frame_valid = 0; + feed->first_frame_in_seq = 0; + feed->first_frame_in_seq_notified = 0; + feed->last_pattern_tsp_num = (u64)-1; + feed->pattern_num = 0; + memset(&feed->prefix_size, 0, + sizeof(struct dvb_dmx_video_prefix_size_masks)); + + if (feed->idx_params.types & + (DMX_IDX_MPEG_SEQ_HEADER | + DMX_IDX_MPEG_FIRST_SEQ_FRAME_START | + DMX_IDX_MPEG_FIRST_SEQ_FRAME_END)) { + feed->patterns[feed->pattern_num] = + dvb_dmx_get_pattern(DMX_IDX_MPEG_SEQ_HEADER); + feed->pattern_num++; + } + + if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) && + (feed->idx_params.types & DMX_IDX_MPEG_GOP)) { + feed->patterns[feed->pattern_num] = + dvb_dmx_get_pattern(DMX_IDX_MPEG_GOP); + feed->pattern_num++; + } + + /* MPEG2 I-frame */ + if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) && + (feed->idx_params.types & + (DMX_IDX_MPEG_I_FRAME_START | DMX_IDX_MPEG_I_FRAME_END | + DMX_IDX_MPEG_P_FRAME_END | DMX_IDX_MPEG_B_FRAME_END | + DMX_IDX_MPEG_FIRST_SEQ_FRAME_START | + DMX_IDX_MPEG_FIRST_SEQ_FRAME_END))) { + feed->patterns[feed->pattern_num] = + dvb_dmx_get_pattern(DMX_IDX_MPEG_I_FRAME_START); + feed->pattern_num++; + } + + /* MPEG2 P-frame */ + if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) && + (feed->idx_params.types & + (DMX_IDX_MPEG_P_FRAME_START | DMX_IDX_MPEG_P_FRAME_END | + DMX_IDX_MPEG_I_FRAME_END | DMX_IDX_MPEG_B_FRAME_END | + DMX_IDX_MPEG_FIRST_SEQ_FRAME_START | + DMX_IDX_MPEG_FIRST_SEQ_FRAME_END))) { + feed->patterns[feed->pattern_num] = + dvb_dmx_get_pattern(DMX_IDX_MPEG_P_FRAME_START); + feed->pattern_num++; + } + + /* MPEG2 B-frame */ + if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) && + (feed->idx_params.types & + (DMX_IDX_MPEG_B_FRAME_START | DMX_IDX_MPEG_B_FRAME_END | + DMX_IDX_MPEG_I_FRAME_END | DMX_IDX_MPEG_P_FRAME_END | + DMX_IDX_MPEG_FIRST_SEQ_FRAME_START | + DMX_IDX_MPEG_FIRST_SEQ_FRAME_END))) { + feed->patterns[feed->pattern_num] = + dvb_dmx_get_pattern(DMX_IDX_MPEG_B_FRAME_START); + feed->pattern_num++; + } + + if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) && + (feed->idx_params.types & + (DMX_IDX_H264_SPS | + DMX_IDX_H264_FIRST_SPS_FRAME_START | + DMX_IDX_H264_FIRST_SPS_FRAME_END))) { + feed->patterns[feed->pattern_num] = + dvb_dmx_get_pattern(DMX_IDX_H264_SPS); + feed->pattern_num++; + } + + if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) && + (feed->idx_params.types & DMX_IDX_H264_PPS)) { + feed->patterns[feed->pattern_num] = + dvb_dmx_get_pattern(DMX_IDX_H264_PPS); + feed->pattern_num++; + } + + /* H264 IDR */ + if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) && + (feed->idx_params.types & + (DMX_IDX_H264_IDR_START | DMX_IDX_H264_IDR_END | + DMX_IDX_H264_NON_IDR_END | + DMX_IDX_H264_FIRST_SPS_FRAME_START | + DMX_IDX_H264_FIRST_SPS_FRAME_END))) { + feed->patterns[feed->pattern_num] = + dvb_dmx_get_pattern(DMX_IDX_H264_IDR_START); + feed->pattern_num++; + } + + /* H264 non-IDR */ + if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) && + (feed->idx_params.types & + (DMX_IDX_H264_NON_IDR_START | DMX_IDX_H264_NON_IDR_END | + DMX_IDX_H264_IDR_END | + DMX_IDX_H264_FIRST_SPS_FRAME_START | + DMX_IDX_H264_FIRST_SPS_FRAME_END))) { + feed->patterns[feed->pattern_num] = + dvb_dmx_get_pattern(DMX_IDX_H264_NON_IDR_START); + feed->pattern_num++; + } + + /* H264 IDR ISlice */ + if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) && + (feed->idx_params.types & + (DMX_IDX_H264_IDR_ISLICE_START | DMX_IDX_H264_IDR_END | + DMX_IDX_H264_NON_IDR_END | + DMX_IDX_H264_FIRST_SPS_FRAME_START | + DMX_IDX_H264_FIRST_SPS_FRAME_END))) { + feed->patterns[feed->pattern_num] = + dvb_dmx_get_pattern(DMX_IDX_H264_IDR_ISLICE_START); + feed->pattern_num++; + } + /* H264 non-IDR PSlice */ + if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) && + (feed->idx_params.types & + (DMX_IDX_H264_NON_IDR_PSLICE_START | DMX_IDX_H264_NON_IDR_END | + DMX_IDX_H264_IDR_END | + DMX_IDX_H264_FIRST_SPS_FRAME_START | + DMX_IDX_H264_FIRST_SPS_FRAME_END))) { + feed->patterns[feed->pattern_num] = + dvb_dmx_get_pattern(DMX_IDX_H264_NON_IDR_PSLICE_START); + feed->pattern_num++; + } + /* H264 non-IDR BSlice */ + if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) && + (feed->idx_params.types & + (DMX_IDX_H264_NON_IDR_BSLICE_START | DMX_IDX_H264_NON_IDR_END | + DMX_IDX_H264_IDR_END | + DMX_IDX_H264_FIRST_SPS_FRAME_START | + DMX_IDX_H264_FIRST_SPS_FRAME_END))) { + feed->patterns[feed->pattern_num] = + dvb_dmx_get_pattern(DMX_IDX_H264_NON_IDR_BSLICE_START); + feed->pattern_num++; + } + + if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) && + (feed->idx_params.types & DMX_IDX_H264_ACCESS_UNIT_DEL)) { + feed->patterns[feed->pattern_num] = + dvb_dmx_get_pattern(DMX_IDX_H264_ACCESS_UNIT_DEL); + feed->pattern_num++; + } + + if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) && + (feed->idx_params.types & DMX_IDX_H264_SEI)) { + feed->patterns[feed->pattern_num] = + dvb_dmx_get_pattern(DMX_IDX_H264_SEI); + feed->pattern_num++; + } + + if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) && + (feed->idx_params.types & + (DMX_IDX_VC1_SEQ_HEADER | + DMX_IDX_VC1_FIRST_SEQ_FRAME_START | + DMX_IDX_VC1_FIRST_SEQ_FRAME_END))) { + feed->patterns[feed->pattern_num] = + dvb_dmx_get_pattern(DMX_IDX_VC1_SEQ_HEADER); + feed->pattern_num++; + } + + if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) && + (feed->idx_params.types & DMX_IDX_VC1_ENTRY_POINT)) { + feed->patterns[feed->pattern_num] = + dvb_dmx_get_pattern(DMX_IDX_VC1_ENTRY_POINT); + feed->pattern_num++; + } + + /* VC1 frame */ + if ((feed->pattern_num < DVB_DMX_MAX_SEARCH_PATTERN_NUM) && + (feed->idx_params.types & + (DMX_IDX_VC1_FRAME_START | DMX_IDX_VC1_FRAME_END | + DMX_IDX_VC1_FIRST_SEQ_FRAME_START | + DMX_IDX_VC1_FIRST_SEQ_FRAME_END))) { + feed->patterns[feed->pattern_num] = + dvb_dmx_get_pattern(DMX_IDX_VC1_FRAME_START); + feed->pattern_num++; + } + + if (feed->pattern_num) + feed->rec_info->idx_info.pattern_search_feeds_num++; +} + +static struct dvb_demux_rec_info *dvb_dmx_alloc_rec_info( + struct dmx_ts_feed *ts_feed) +{ + int i; + struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed; + struct dvb_demux *demux = feed->demux; + struct dvb_demux_rec_info *rec_info; + struct dvb_demux_feed *tmp_feed; + + /* check if this feed share recording buffer with other active feeds */ + list_for_each_entry(tmp_feed, &demux->feed_list, list_head) { + if ((tmp_feed->state == DMX_STATE_GO) && + (tmp_feed->type == DMX_TYPE_TS) && + (tmp_feed != feed) && + (tmp_feed->feed.ts.buffer.ringbuff == + ts_feed->buffer.ringbuff)) { + /* indexing information is shared between the feeds */ + tmp_feed->rec_info->ref_count++; + return tmp_feed->rec_info; + } + } + + /* Need to allocate a new indexing info */ + for (i = 0; i < demux->feednum; i++) + if (!demux->rec_info_pool[i].ref_count) + break; + + if (i == demux->feednum) + return NULL; + + rec_info = &demux->rec_info_pool[i]; + rec_info->ref_count++; + INIT_LIST_HEAD(&rec_info->idx_info.free_list); + INIT_LIST_HEAD(&rec_info->idx_info.ready_list); + + for (i = 0; i < DMX_IDX_EVENT_QUEUE_SIZE; i++) + list_add(&rec_info->idx_info.events[i].next, + &rec_info->idx_info.free_list); + + rec_info->ts_output_count = 0; + rec_info->idx_info.min_pattern_tsp_num = (u64)-1; + rec_info->idx_info.pattern_search_feeds_num = 0; + rec_info->idx_info.indexing_feeds_num = 0; + + return rec_info; +} + +static void dvb_dmx_free_rec_info(struct dmx_ts_feed *ts_feed) +{ + struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed; + + if (!feed->rec_info || !feed->rec_info->ref_count) { + pr_err("%s: invalid idx info state\n", __func__); + return; + } + + feed->rec_info->ref_count--; +} + +static int dvb_demux_feed_find(struct dvb_demux_feed *feed) +{ + struct dvb_demux_feed *entry; + + list_for_each_entry(entry, &feed->demux->feed_list, list_head) + if (entry == feed) + return 1; + + return 0; +} + +static void dvb_demux_feed_add(struct dvb_demux_feed *feed) +{ + spin_lock_irq(&feed->demux->lock); + if (dvb_demux_feed_find(feed)) { + pr_err("%s: feed already in list (type=%x state=%x pid=%x)\n", + __func__, feed->type, feed->state, feed->pid); + goto out; + } + + list_add(&feed->list_head, &feed->demux->feed_list); +out: + spin_unlock_irq(&feed->demux->lock); +} + +static void dvb_demux_feed_del(struct dvb_demux_feed *feed) +{ + spin_lock_irq(&feed->demux->lock); + if (!(dvb_demux_feed_find(feed))) { + pr_err("%s: feed not in list (type=%x state=%x pid=%x)\n", + __func__, feed->type, feed->state, feed->pid); + goto out; + } + + list_del(&feed->list_head); +out: + spin_unlock_irq(&feed->demux->lock); +} + +static int dmx_ts_feed_set(struct dmx_ts_feed *ts_feed, u16 pid, int ts_type, + enum dmx_ts_pes pes_type, + size_t circular_buffer_size, ktime_t timeout) +{ + struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed; + struct dvb_demux *demux = feed->demux; + + if (pid > DMX_MAX_PID) + return -EINVAL; + + if (mutex_lock_interruptible(&demux->mutex)) + return -ERESTARTSYS; + + if (ts_type & TS_DECODER) { + if (pes_type >= DMX_PES_OTHER) { + mutex_unlock(&demux->mutex); + return -EINVAL; + } + + if (demux->pesfilter[pes_type] && + demux->pesfilter[pes_type] != feed) { + mutex_unlock(&demux->mutex); + return -EINVAL; + } + + demux->pesfilter[pes_type] = feed; + demux->pids[pes_type] = pid; + } + + dvb_demux_feed_add(feed); + + feed->pid = pid; + feed->buffer_size = circular_buffer_size; + feed->timeout = timeout; + feed->ts_type = ts_type; + feed->pes_type = pes_type; + + if (feed->buffer_size) { +#ifdef NOBUFS + feed->buffer = NULL; +#else + feed->buffer = vmalloc(feed->buffer_size); + if (!feed->buffer) { + mutex_unlock(&demux->mutex); + return -ENOMEM; + } +#endif + } + + feed->state = DMX_STATE_READY; + mutex_unlock(&demux->mutex); + + return 0; } -void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count) +static int dmx_ts_feed_start_filtering(struct dmx_ts_feed *ts_feed) { - _dvb_dmx_swfilter(demux, buf, count, 188); + struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed; + struct dvb_demux *demux = feed->demux; + int ret; + + if (mutex_lock_interruptible(&demux->mutex)) + return -ERESTARTSYS; + + if (feed->state != DMX_STATE_READY || feed->type != DMX_TYPE_TS) { + mutex_unlock(&demux->mutex); + return -EINVAL; + } + + if (!demux->start_feed) { + mutex_unlock(&demux->mutex); + return -ENODEV; + } + + feed->first_cc = 1; + feed->scrambling_bits = 0; + + if ((feed->ts_type & TS_PACKET) && + !(feed->ts_type & TS_PAYLOAD_ONLY)) { + feed->rec_info = dvb_dmx_alloc_rec_info(ts_feed); + if (!feed->rec_info) { + mutex_unlock(&demux->mutex); + return -ENOMEM; + } + if (feed->idx_params.enable) { + dvb_dmx_init_idx_state(feed); + feed->rec_info->idx_info.indexing_feeds_num++; + if (demux->set_indexing) + demux->set_indexing(feed); + } + } else { + feed->pattern_num = 0; + feed->rec_info = NULL; + } + + ret = demux->start_feed(feed); + if (ret < 0) { + if ((feed->ts_type & TS_PACKET) && + !(feed->ts_type & TS_PAYLOAD_ONLY)) { + dvb_dmx_free_rec_info(ts_feed); + feed->rec_info = NULL; + } + mutex_unlock(&demux->mutex); + return ret; + } + + spin_lock_irq(&demux->lock); + ts_feed->is_filtering = 1; + feed->state = DMX_STATE_GO; + spin_unlock_irq(&demux->lock); + mutex_unlock(&demux->mutex); + + return 0; } -EXPORT_SYMBOL(dvb_dmx_swfilter); -void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, size_t count) +static int dmx_ts_feed_stop_filtering(struct dmx_ts_feed *ts_feed) { - _dvb_dmx_swfilter(demux, buf, count, 204); + struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed; + struct dvb_demux *demux = feed->demux; + int ret; + + mutex_lock(&demux->mutex); + + if (feed->state < DMX_STATE_GO) { + mutex_unlock(&demux->mutex); + return -EINVAL; + } + + if (!demux->stop_feed) { + mutex_unlock(&demux->mutex); + return -ENODEV; + } + + ret = demux->stop_feed(feed); + + spin_lock_irq(&demux->lock); + ts_feed->is_filtering = 0; + feed->state = DMX_STATE_ALLOCATED; + spin_unlock_irq(&demux->lock); + + if (feed->rec_info) { + if (feed->pattern_num) + feed->rec_info->idx_info.pattern_search_feeds_num--; + if (feed->idx_params.enable) + feed->rec_info->idx_info.indexing_feeds_num--; + dvb_dmx_free_rec_info(ts_feed); + feed->rec_info = NULL; + } + + mutex_unlock(&demux->mutex); + + return ret; } -EXPORT_SYMBOL(dvb_dmx_swfilter_204); -void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, size_t count) +static int dmx_ts_feed_decoder_buff_status(struct dmx_ts_feed *ts_feed, + struct dmx_buffer_status *dmx_buffer_status) { - unsigned long flags; + struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed; + struct dvb_demux *demux = feed->demux; + int ret; - spin_lock_irqsave(&demux->lock, flags); + mutex_lock(&demux->mutex); - demux->feed->cb.ts(buf, count, NULL, 0, &demux->feed->feed.ts); + if (feed->state < DMX_STATE_GO) { + mutex_unlock(&demux->mutex); + return -EINVAL; + } + + if (!demux->decoder_buffer_status) { + mutex_unlock(&demux->mutex); + return -ENODEV; + } + + ret = demux->decoder_buffer_status(feed, dmx_buffer_status); + + mutex_unlock(&demux->mutex); - spin_unlock_irqrestore(&demux->lock, flags); + return ret; } -EXPORT_SYMBOL(dvb_dmx_swfilter_raw); -static struct dvb_demux_filter *dvb_dmx_filter_alloc(struct dvb_demux *demux) +static int dmx_ts_feed_reuse_decoder_buffer(struct dmx_ts_feed *ts_feed, + int cookie) { - int i; + struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed; + struct dvb_demux *demux = feed->demux; + int ret; - for (i = 0; i < demux->filternum; i++) - if (demux->filter[i].state == DMX_STATE_FREE) - break; + mutex_lock(&demux->mutex); - if (i == demux->filternum) - return NULL; + if (feed->state < DMX_STATE_GO) { + mutex_unlock(&demux->mutex); + return -EINVAL; + } - demux->filter[i].state = DMX_STATE_ALLOCATED; + if (!demux->reuse_decoder_buffer) { + mutex_unlock(&demux->mutex); + return -ENODEV; + } - return &demux->filter[i]; + ret = demux->reuse_decoder_buffer(feed, cookie); + + mutex_unlock(&demux->mutex); + + return ret; } -static struct dvb_demux_feed *dvb_dmx_feed_alloc(struct dvb_demux *demux) +static int dmx_ts_feed_data_ready_cb(struct dmx_ts_feed *feed, + dmx_ts_data_ready_cb callback) { - int i; + struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed; + struct dvb_demux *dvbdmx = dvbdmxfeed->demux; - for (i = 0; i < demux->feednum; i++) - if (demux->feed[i].state == DMX_STATE_FREE) - break; + mutex_lock(&dvbdmx->mutex); - if (i == demux->feednum) - return NULL; + if (dvbdmxfeed->state == DMX_STATE_GO) { + mutex_unlock(&dvbdmx->mutex); + return -EINVAL; + } - demux->feed[i].state = DMX_STATE_ALLOCATED; + dvbdmxfeed->data_ready_cb.ts = callback; - return &demux->feed[i]; + mutex_unlock(&dvbdmx->mutex); + return 0; } -static int dvb_demux_feed_find(struct dvb_demux_feed *feed) +static int dmx_ts_set_secure_mode(struct dmx_ts_feed *feed, + struct dmx_secure_mode *secure_mode) { - struct dvb_demux_feed *entry; + struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed; + struct dvb_demux *dvbdmx = dvbdmxfeed->demux; - list_for_each_entry(entry, &feed->demux->feed_list, list_head) - if (entry == feed) - return 1; + if (mutex_lock_interruptible(&dvbdmx->mutex)) + return -ERESTARTSYS; + + if (dvbdmxfeed->state == DMX_STATE_GO) { + mutex_unlock(&dvbdmx->mutex); + return -EBUSY; + } + dvbdmxfeed->secure_mode = *secure_mode; + mutex_unlock(&dvbdmx->mutex); return 0; } -static void dvb_demux_feed_add(struct dvb_demux_feed *feed) +static int dmx_ts_set_cipher_ops(struct dmx_ts_feed *feed, + struct dmx_cipher_operations *cipher_ops) { - spin_lock_irq(&feed->demux->lock); - if (dvb_demux_feed_find(feed)) { - printk(KERN_ERR "%s: feed already in list (type=%x state=%x pid=%x)\n", - __func__, feed->type, feed->state, feed->pid); - goto out; + struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed; + struct dvb_demux *dvbdmx = dvbdmxfeed->demux; + int ret = 0; + + if (mutex_lock_interruptible(&dvbdmx->mutex)) + return -ERESTARTSYS; + + if ((dvbdmxfeed->state == DMX_STATE_GO) && + dvbdmx->set_cipher_op) + ret = dvbdmx->set_cipher_op(dvbdmxfeed, cipher_ops); + + if (!ret) + dvbdmxfeed->cipher_ops = *cipher_ops; + + mutex_unlock(&dvbdmx->mutex); + return ret; +} + +static int dmx_ts_set_video_codec( + struct dmx_ts_feed *ts_feed, + enum dmx_video_codec video_codec) +{ + struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed; + + feed->video_codec = video_codec; + + return 0; +} + +static int dmx_ts_set_idx_params(struct dmx_ts_feed *ts_feed, + struct dmx_indexing_params *idx_params) +{ + struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed; + struct dvb_demux *dvbdmx = feed->demux; + int idx_enabled; + int ret = 0; + + mutex_lock(&dvbdmx->mutex); + + if ((feed->state == DMX_STATE_GO) && + !feed->rec_info) { + mutex_unlock(&dvbdmx->mutex); + return -EINVAL; } - list_add(&feed->list_head, &feed->demux->feed_list); -out: - spin_unlock_irq(&feed->demux->lock); + idx_enabled = feed->idx_params.enable; + feed->idx_params = *idx_params; + + if (feed->state == DMX_STATE_GO) { + spin_lock_irq(&dvbdmx->lock); + if (feed->pattern_num) + feed->rec_info->idx_info.pattern_search_feeds_num--; + if (idx_enabled && !idx_params->enable) + feed->rec_info->idx_info.indexing_feeds_num--; + if (!idx_enabled && idx_params->enable) + feed->rec_info->idx_info.indexing_feeds_num++; + dvb_dmx_init_idx_state(feed); + spin_unlock_irq(&dvbdmx->lock); + + if (dvbdmx->set_indexing) + ret = dvbdmx->set_indexing(feed); + } + + mutex_unlock(&dvbdmx->mutex); + + return ret; } -static void dvb_demux_feed_del(struct dvb_demux_feed *feed) +static int dvbdmx_ts_feed_oob_cmd(struct dmx_ts_feed *ts_feed, + struct dmx_oob_command *cmd) { - spin_lock_irq(&feed->demux->lock); - if (!(dvb_demux_feed_find(feed))) { - printk(KERN_ERR "%s: feed not in list (type=%x state=%x pid=%x)\n", - __func__, feed->type, feed->state, feed->pid); - goto out; + struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed; + struct dmx_data_ready data; + struct dvb_demux *dvbdmx = feed->demux; + int ret = 0; + int secure_non_rec = feed->secure_mode.is_secured && + !dvb_dmx_is_rec_feed(feed); + + mutex_lock(&dvbdmx->mutex); + + if (feed->state != DMX_STATE_GO) { + mutex_unlock(&dvbdmx->mutex); + return -EINVAL; } - list_del(&feed->list_head); -out: - spin_unlock_irq(&feed->demux->lock); + /* Decoder & non-recording secure feeds are handled by plug-in */ + if ((feed->ts_type & TS_DECODER) || secure_non_rec) { + if (feed->demux->oob_command) + ret = feed->demux->oob_command(feed, cmd); + } + + if (!(feed->ts_type & (TS_PAYLOAD_ONLY | TS_PACKET)) || + secure_non_rec) { + mutex_unlock(&dvbdmx->mutex); + return ret; + } + + data.data_length = 0; + + switch (cmd->type) { + case DMX_OOB_CMD_EOS: + if (feed->ts_type & TS_PAYLOAD_ONLY) + dvb_dmx_check_pes_end(feed); + + data.status = DMX_OK_EOS; + ret = feed->data_ready_cb.ts(&feed->feed.ts, &data); + break; + + case DMX_OOB_CMD_MARKER: + data.status = DMX_OK_MARKER; + data.marker.id = cmd->params.marker.id; + ret = feed->data_ready_cb.ts(&feed->feed.ts, &data); + break; + + default: + ret = -EINVAL; + break; + } + + mutex_unlock(&dvbdmx->mutex); + return ret; } -static int dmx_ts_feed_set(struct dmx_ts_feed *ts_feed, u16 pid, int ts_type, - enum dmx_ts_pes pes_type, - size_t circular_buffer_size, ktime_t timeout) +static int dvbdmx_ts_get_scrambling_bits(struct dmx_ts_feed *ts_feed, + u8 *value) { struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed; struct dvb_demux *demux = feed->demux; - if (pid > DMX_MAX_PID) + spin_lock(&demux->lock); + + if (!ts_feed->is_filtering) { + spin_unlock(&demux->lock); return -EINVAL; + } - if (mutex_lock_interruptible(&demux->mutex)) - return -ERESTARTSYS; + *value = feed->scrambling_bits; + spin_unlock(&demux->lock); - if (ts_type & TS_DECODER) { - if (pes_type >= DMX_PES_OTHER) { - mutex_unlock(&demux->mutex); - return -EINVAL; - } + return 0; +} - if (demux->pesfilter[pes_type] && - demux->pesfilter[pes_type] != feed) { - mutex_unlock(&demux->mutex); - return -EINVAL; - } +static int dvbdmx_ts_insertion_insert_buffer(struct dmx_ts_feed *ts_feed, + char *data, size_t size) +{ + struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed; + struct dvb_demux *demux = feed->demux; - demux->pesfilter[pes_type] = feed; - demux->pids[pes_type] = pid; + spin_lock(&demux->lock); + if (!ts_feed->is_filtering) { + spin_unlock(&demux->lock); + return 0; } - dvb_demux_feed_add(feed); - - feed->pid = pid; - feed->buffer_size = circular_buffer_size; - feed->timeout = timeout; - feed->ts_type = ts_type; - feed->pes_type = pes_type; - - if (feed->buffer_size) { -#ifdef NOBUFS - feed->buffer = NULL; -#else - feed->buffer = vmalloc(feed->buffer_size); - if (!feed->buffer) { - mutex_unlock(&demux->mutex); - return -ENOMEM; - } -#endif - } + feed->cb.ts(data, size, NULL, 0, ts_feed); - feed->state = DMX_STATE_READY; - mutex_unlock(&demux->mutex); + spin_unlock(&demux->lock); return 0; } -static int dmx_ts_feed_start_filtering(struct dmx_ts_feed *ts_feed) +static int dmx_ts_set_tsp_out_format( + struct dmx_ts_feed *ts_feed, + enum dmx_tsp_format_t tsp_format) { struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed; - struct dvb_demux *demux = feed->demux; - int ret; + struct dvb_demux *dvbdmx = feed->demux; - if (mutex_lock_interruptible(&demux->mutex)) - return -ERESTARTSYS; + mutex_lock(&dvbdmx->mutex); - if (feed->state != DMX_STATE_READY || feed->type != DMX_TYPE_TS) { - mutex_unlock(&demux->mutex); + if (feed->state == DMX_STATE_GO) { + mutex_unlock(&dvbdmx->mutex); return -EINVAL; } - if (!demux->start_feed) { - mutex_unlock(&demux->mutex); - return -ENODEV; - } + feed->tsp_out_format = tsp_format; + mutex_unlock(&dvbdmx->mutex); + return 0; +} - if ((ret = demux->start_feed(feed)) < 0) { - mutex_unlock(&demux->mutex); - return ret; - } +/** + * dvbdmx_ts_reset_pes_state() - Reset the current PES length and PES counters + * + * @feed: dvb demux feed object + */ +void dvbdmx_ts_reset_pes_state(struct dvb_demux_feed *feed) +{ + unsigned long flags; - spin_lock_irq(&demux->lock); - ts_feed->is_filtering = 1; - feed->state = DMX_STATE_GO; - spin_unlock_irq(&demux->lock); - mutex_unlock(&demux->mutex); + /* + * Reset PES state. + * PUSI seen indication is kept so we can get partial PES. + */ + spin_lock_irqsave(&feed->demux->lock, flags); - return 0; + feed->peslen = 0; + feed->pes_tei_counter = 0; + feed->pes_cont_err_counter = 0; + feed->pes_ts_packets_num = 0; + + spin_unlock_irqrestore(&feed->demux->lock, flags); } +EXPORT_SYMBOL(dvbdmx_ts_reset_pes_state); -static int dmx_ts_feed_stop_filtering(struct dmx_ts_feed *ts_feed) +static int dvbdmx_ts_flush_buffer(struct dmx_ts_feed *ts_feed, size_t length) { struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed; struct dvb_demux *demux = feed->demux; - int ret; - - mutex_lock(&demux->mutex); + int ret = 0; - if (feed->state < DMX_STATE_GO) { - mutex_unlock(&demux->mutex); - return -EINVAL; - } + if (mutex_lock_interruptible(&demux->mutex)) + return -ERESTARTSYS; - if (!demux->stop_feed) { - mutex_unlock(&demux->mutex); - return -ENODEV; - } + dvbdmx_ts_reset_pes_state(feed); - ret = demux->stop_feed(feed); + if ((feed->ts_type & TS_DECODER) && demux->flush_decoder_buffer) + /* Call decoder specific flushing if one exists */ + ret = demux->flush_decoder_buffer(feed, length); - spin_lock_irq(&demux->lock); - ts_feed->is_filtering = 0; - feed->state = DMX_STATE_ALLOCATED; - spin_unlock_irq(&demux->lock); mutex_unlock(&demux->mutex); - return ret; } @@ -795,8 +2685,21 @@ static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx, feed->cb.ts = callback; feed->demux = demux; feed->pid = 0xffff; - feed->peslen = 0xfffa; + feed->peslen = 0; + feed->pes_tei_counter = 0; + feed->pes_ts_packets_num = 0; + feed->pes_cont_err_counter = 0; + feed->secure_mode.is_secured = 0; feed->buffer = NULL; + feed->tsp_out_format = DMX_TSP_FORMAT_188; + feed->idx_params.enable = 0; + + /* default behaviour - pass first PES data even if it is + * partial PES data from previous PES that we didn't receive its header. + * Override this to 0 in your start_feed function in order to handle + * first PES differently. + */ + feed->pusi_seen = 1; (*ts_feed) = &feed->feed.ts; (*ts_feed)->parent = dmx; @@ -805,6 +2708,22 @@ static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx, (*ts_feed)->start_filtering = dmx_ts_feed_start_filtering; (*ts_feed)->stop_filtering = dmx_ts_feed_stop_filtering; (*ts_feed)->set = dmx_ts_feed_set; + (*ts_feed)->set_video_codec = dmx_ts_set_video_codec; + (*ts_feed)->set_idx_params = dmx_ts_set_idx_params; + (*ts_feed)->set_tsp_out_format = dmx_ts_set_tsp_out_format; + (*ts_feed)->get_decoder_buff_status = dmx_ts_feed_decoder_buff_status; + (*ts_feed)->reuse_decoder_buffer = dmx_ts_feed_reuse_decoder_buffer; + (*ts_feed)->data_ready_cb = dmx_ts_feed_data_ready_cb; + (*ts_feed)->notify_data_read = NULL; + (*ts_feed)->set_secure_mode = dmx_ts_set_secure_mode; + (*ts_feed)->set_cipher_ops = dmx_ts_set_cipher_ops; + (*ts_feed)->oob_command = dvbdmx_ts_feed_oob_cmd; + (*ts_feed)->get_scrambling_bits = dvbdmx_ts_get_scrambling_bits; + (*ts_feed)->ts_insertion_init = NULL; + (*ts_feed)->ts_insertion_terminate = NULL; + (*ts_feed)->ts_insertion_insert_buffer = + dvbdmx_ts_insertion_insert_buffer; + (*ts_feed)->flush_buffer = dvbdmx_ts_flush_buffer; if (!(feed->filter = dvb_dmx_filter_alloc(demux))) { feed->state = DMX_STATE_FREE; @@ -840,7 +2759,7 @@ static int dvbdmx_release_ts_feed(struct dmx_demux *dmx, feed->state = DMX_STATE_FREE; feed->filter->state = DMX_STATE_FREE; - + ts_feed->priv = NULL; dvb_demux_feed_del(feed); feed->pid = 0xffff; @@ -966,6 +2885,8 @@ static int dmx_section_feed_start_filtering(struct dmx_section_feed *feed) dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base; dvbdmxfeed->feed.sec.secbufp = 0; dvbdmxfeed->feed.sec.seclen = 0; + dvbdmxfeed->first_cc = 1; + dvbdmxfeed->scrambling_bits = 0; if (!dvbdmx->start_feed) { mutex_unlock(&dvbdmx->mutex); @@ -996,6 +2917,11 @@ static int dmx_section_feed_stop_filtering(struct dmx_section_feed *feed) mutex_lock(&dvbdmx->mutex); + if (dvbdmxfeed->state < DMX_STATE_GO) { + mutex_unlock(&dvbdmx->mutex); + return -EINVAL; + } + if (!dvbdmx->stop_feed) { mutex_unlock(&dvbdmx->mutex); return -ENODEV; @@ -1012,6 +2938,66 @@ static int dmx_section_feed_stop_filtering(struct dmx_section_feed *feed) return ret; } + +static int dmx_section_feed_data_ready_cb(struct dmx_section_feed *feed, + dmx_section_data_ready_cb callback) +{ + struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed; + struct dvb_demux *dvbdmx = dvbdmxfeed->demux; + + mutex_lock(&dvbdmx->mutex); + + if (dvbdmxfeed->state == DMX_STATE_GO) { + mutex_unlock(&dvbdmx->mutex); + return -EINVAL; + } + + dvbdmxfeed->data_ready_cb.sec = callback; + + mutex_unlock(&dvbdmx->mutex); + return 0; +} + +static int dmx_section_set_secure_mode(struct dmx_section_feed *feed, + struct dmx_secure_mode *secure_mode) +{ + struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed; + struct dvb_demux *dvbdmx = dvbdmxfeed->demux; + + mutex_lock(&dvbdmx->mutex); + + if (dvbdmxfeed->state == DMX_STATE_GO) { + mutex_unlock(&dvbdmx->mutex); + return -EBUSY; + } + + dvbdmxfeed->secure_mode = *secure_mode; + mutex_unlock(&dvbdmx->mutex); + return 0; +} + +static int dmx_section_set_cipher_ops(struct dmx_section_feed *feed, + struct dmx_cipher_operations *cipher_ops) +{ + struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *)feed; + struct dvb_demux *dvbdmx = dvbdmxfeed->demux; + int ret = 0; + + if (mutex_lock_interruptible(&dvbdmx->mutex)) + return -ERESTARTSYS; + + if ((dvbdmxfeed->state == DMX_STATE_GO) && + dvbdmx->set_cipher_op) { + ret = dvbdmx->set_cipher_op(dvbdmxfeed, cipher_ops); + } + + if (!ret) + dvbdmxfeed->cipher_ops = *cipher_ops; + + mutex_unlock(&dvbdmx->mutex); + return ret; +} + static int dmx_section_feed_release_filter(struct dmx_section_feed *feed, struct dmx_section_filter *filter) { @@ -1045,12 +3031,82 @@ static int dmx_section_feed_release_filter(struct dmx_section_feed *feed, f->next = f->next->next; } + filter->priv = NULL; dvbdmxfilter->state = DMX_STATE_FREE; spin_unlock_irq(&dvbdmx->lock); mutex_unlock(&dvbdmx->mutex); return 0; } +static int dvbdmx_section_feed_oob_cmd(struct dmx_section_feed *section_feed, + struct dmx_oob_command *cmd) +{ + struct dvb_demux_feed *feed = (struct dvb_demux_feed *)section_feed; + struct dvb_demux *dvbdmx = feed->demux; + struct dmx_data_ready data; + int ret = 0; + + data.data_length = 0; + + mutex_lock(&dvbdmx->mutex); + + if (feed->state != DMX_STATE_GO) { + mutex_unlock(&dvbdmx->mutex); + return -EINVAL; + } + + /* Secure section feeds are handled by the plug-in */ + if (feed->secure_mode.is_secured) { + if (feed->demux->oob_command) + ret = feed->demux->oob_command(feed, cmd); + else + ret = 0; + + mutex_unlock(&dvbdmx->mutex); + return ret; + } + + switch (cmd->type) { + case DMX_OOB_CMD_EOS: + data.status = DMX_OK_EOS; + break; + + case DMX_OOB_CMD_MARKER: + data.status = DMX_OK_MARKER; + data.marker.id = cmd->params.marker.id; + break; + + default: + ret = -EINVAL; + break; + } + + if (!ret) + ret = dvb_dmx_notify_section_event(feed, &data, 1); + + mutex_unlock(&dvbdmx->mutex); + return ret; +} + +static int dvbdmx_section_get_scrambling_bits( + struct dmx_section_feed *section_feed, u8 *value) +{ + struct dvb_demux_feed *feed = (struct dvb_demux_feed *)section_feed; + struct dvb_demux *demux = feed->demux; + + spin_lock(&demux->lock); + + if (!section_feed->is_filtering) { + spin_unlock(&demux->lock); + return -EINVAL; + } + + *value = feed->scrambling_bits; + spin_unlock(&demux->lock); + + return 0; +} + static int dvbdmx_allocate_section_feed(struct dmx_demux *demux, struct dmx_section_feed **feed, dmx_section_cb callback) @@ -1070,11 +3126,14 @@ static int dvbdmx_allocate_section_feed(struct dmx_demux *demux, dvbdmxfeed->cb.sec = callback; dvbdmxfeed->demux = dvbdmx; dvbdmxfeed->pid = 0xffff; + dvbdmxfeed->secure_mode.is_secured = 0; + dvbdmxfeed->tsp_out_format = DMX_TSP_FORMAT_188; dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base; dvbdmxfeed->feed.sec.secbufp = dvbdmxfeed->feed.sec.seclen = 0; dvbdmxfeed->feed.sec.tsfeedp = 0; dvbdmxfeed->filter = NULL; dvbdmxfeed->buffer = NULL; + dvbdmxfeed->idx_params.enable = 0; (*feed) = &dvbdmxfeed->feed.sec; (*feed)->is_filtering = 0; @@ -1086,6 +3145,13 @@ static int dvbdmx_allocate_section_feed(struct dmx_demux *demux, (*feed)->start_filtering = dmx_section_feed_start_filtering; (*feed)->stop_filtering = dmx_section_feed_stop_filtering; (*feed)->release_filter = dmx_section_feed_release_filter; + (*feed)->data_ready_cb = dmx_section_feed_data_ready_cb; + (*feed)->notify_data_read = NULL; + (*feed)->set_secure_mode = dmx_section_set_secure_mode; + (*feed)->set_cipher_ops = dmx_section_set_cipher_ops; + (*feed)->oob_command = dvbdmx_section_feed_oob_cmd; + (*feed)->get_scrambling_bits = dvbdmx_section_get_scrambling_bits; + (*feed)->flush_buffer = NULL; mutex_unlock(&dvbdmx->mutex); return 0; @@ -1108,7 +3174,7 @@ static int dvbdmx_release_section_feed(struct dmx_demux *demux, dvbdmxfeed->buffer = NULL; #endif dvbdmxfeed->state = DMX_STATE_FREE; - + feed->priv = NULL; dvb_demux_feed_del(dvbdmxfeed); dvbdmxfeed->pid = 0xffff; @@ -1144,23 +3210,18 @@ static int dvbdmx_close(struct dmx_demux *demux) return 0; } -static int dvbdmx_write(struct dmx_demux *demux, const char __user *buf, size_t count) +static int dvbdmx_write(struct dmx_demux *demux, const char *buf, size_t count) { struct dvb_demux *dvbdemux = (struct dvb_demux *)demux; - void *p; - if ((!demux->frontend) || (demux->frontend->source != DMX_MEMORY_FE)) + if (!demux->frontend || !buf || demux->dvr_input_protected || + (demux->frontend->source != DMX_MEMORY_FE)) return -EINVAL; - - p = memdup_user(buf, count); - if (IS_ERR(p)) - return PTR_ERR(p); - if (mutex_lock_interruptible(&dvbdemux->mutex)) { - kfree(p); + if (mutex_lock_interruptible(&dvbdemux->mutex)) return -ERESTARTSYS; - } - dvb_dmx_swfilter(dvbdemux, p, count); - kfree(p); + + dvb_dmx_swfilter_format(dvbdemux, buf, count, dvbdemux->tsp_format); + mutex_unlock(&dvbdemux->mutex); if (signal_pending(current)) @@ -1168,6 +3229,40 @@ static int dvbdmx_write(struct dmx_demux *demux, const char __user *buf, size_t return count; } +static int dvbdmx_write_cancel(struct dmx_demux *demux) +{ + struct dvb_demux *dvbdmx = (struct dvb_demux *)demux; + + spin_lock_irq(&dvbdmx->lock); + + /* cancel any pending wait for decoder's buffers */ + dvbdmx->sw_filter_abort = 1; + dvbdmx->tsbufp = 0; + dvb_dmx_configure_decoder_fullness(dvbdmx, 0); + + spin_unlock_irq(&dvbdmx->lock); + + return 0; +} + +static int dvbdmx_set_playback_mode(struct dmx_demux *demux, + enum dmx_playback_mode_t mode, + dmx_ts_fullness ts_fullness_callback, + dmx_section_fullness sec_fullness_callback) +{ + struct dvb_demux *dvbdmx = (struct dvb_demux *)demux; + + mutex_lock(&dvbdmx->mutex); + + dvbdmx->playback_mode = mode; + dvbdmx->buffer_ctrl.ts = ts_fullness_callback; + dvbdmx->buffer_ctrl.sec = sec_fullness_callback; + + mutex_unlock(&dvbdmx->mutex); + + return 0; +} + static int dvbdmx_add_frontend(struct dmx_demux *demux, struct dmx_frontend *frontend) { @@ -1225,7 +3320,7 @@ static int dvbdmx_disconnect_frontend(struct dmx_demux *demux) struct dvb_demux *dvbdemux = (struct dvb_demux *)demux; mutex_lock(&dvbdemux->mutex); - + dvbdemux->sw_filter_abort = 0; demux->frontend = NULL; mutex_unlock(&dvbdemux->mutex); return 0; @@ -1235,7 +3330,50 @@ static int dvbdmx_get_pes_pids(struct dmx_demux *demux, u16 * pids) { struct dvb_demux *dvbdemux = (struct dvb_demux *)demux; - memcpy(pids, dvbdemux->pids, 5 * sizeof(u16)); + /* 4 Demux Instances each with group of 5 pids */ + memcpy(pids, dvbdemux->pids, DMX_PES_OTHER*sizeof(u16)); + return 0; +} + +static int dvbdmx_get_tsp_size(struct dmx_demux *demux) +{ + int tsp_size; + struct dvb_demux *dvbdemux = (struct dvb_demux *)demux; + + mutex_lock(&dvbdemux->mutex); + tsp_size = dvbdemux->ts_packet_size; + mutex_unlock(&dvbdemux->mutex); + + return tsp_size; +} + +static int dvbdmx_set_tsp_format( + struct dmx_demux *demux, + enum dmx_tsp_format_t tsp_format) +{ + struct dvb_demux *dvbdemux = (struct dvb_demux *)demux; + + if ((tsp_format > DMX_TSP_FORMAT_204) || + (tsp_format < DMX_TSP_FORMAT_188)) + return -EINVAL; + + mutex_lock(&dvbdemux->mutex); + + dvbdemux->tsp_format = tsp_format; + switch (tsp_format) { + case DMX_TSP_FORMAT_188: + dvbdemux->ts_packet_size = 188; + break; + case DMX_TSP_FORMAT_192_TAIL: + case DMX_TSP_FORMAT_192_HEAD: + dvbdemux->ts_packet_size = 192; + break; + case DMX_TSP_FORMAT_204: + dvbdemux->ts_packet_size = 204; + break; + } + + mutex_unlock(&dvbdemux->mutex); return 0; } @@ -1257,13 +3395,50 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux) dvbdemux->filter = NULL; return -ENOMEM; } + + dvbdemux->rec_info_pool = vmalloc(dvbdemux->feednum * + sizeof(struct dvb_demux_rec_info)); + if (!dvbdemux->rec_info_pool) { + vfree(dvbdemux->feed); + vfree(dvbdemux->filter); + dvbdemux->feed = NULL; + dvbdemux->filter = NULL; + return -ENOMEM; + } + + dvbdemux->sw_filter_abort = 0; + dvbdemux->total_process_time = 0; + dvbdemux->total_crc_time = 0; + snprintf(dvbdemux->alias, + MAX_DVB_DEMUX_NAME_LEN, + "demux%d", + dvb_demux_index++); + + dvbdemux->dmx.debugfs_demux_dir = + debugfs_create_dir(dvbdemux->alias, NULL); + + if (dvbdemux->dmx.debugfs_demux_dir != NULL) { + debugfs_create_u32( + "total_processing_time", 0664, + dvbdemux->dmx.debugfs_demux_dir, + &dvbdemux->total_process_time); + + debugfs_create_u32( + "total_crc_time", 0664, + dvbdemux->dmx.debugfs_demux_dir, + &dvbdemux->total_crc_time); + } + for (i = 0; i < dvbdemux->filternum; i++) { dvbdemux->filter[i].state = DMX_STATE_FREE; dvbdemux->filter[i].index = i; } + for (i = 0; i < dvbdemux->feednum; i++) { dvbdemux->feed[i].state = DMX_STATE_FREE; dvbdemux->feed[i].index = i; + + dvbdemux->rec_info_pool[i].ref_count = 0; } dvbdemux->cnt_storage = vmalloc(MAX_PID + 1); @@ -1283,6 +3458,9 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux) dvbdemux->recording = 0; dvbdemux->tsbufp = 0; + dvbdemux->tsp_format = DMX_TSP_FORMAT_188; + dvbdemux->ts_packet_size = 188; + if (!dvbdemux->check_crc32) dvbdemux->check_crc32 = dvb_dmx_crc32; @@ -1294,10 +3472,14 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux) dmx->open = dvbdmx_open; dmx->close = dvbdmx_close; dmx->write = dvbdmx_write; + dmx->write_cancel = dvbdmx_write_cancel; + dmx->set_playback_mode = dvbdmx_set_playback_mode; dmx->allocate_ts_feed = dvbdmx_allocate_ts_feed; dmx->release_ts_feed = dvbdmx_release_ts_feed; dmx->allocate_section_feed = dvbdmx_allocate_section_feed; dmx->release_section_feed = dvbdmx_release_section_feed; + dmx->map_buffer = NULL; + dmx->unmap_buffer = NULL; dmx->add_frontend = dvbdmx_add_frontend; dmx->remove_frontend = dvbdmx_remove_frontend; @@ -1306,6 +3488,9 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux) dmx->disconnect_frontend = dvbdmx_disconnect_frontend; dmx->get_pes_pids = dvbdmx_get_pes_pids; + dmx->set_tsp_format = dvbdmx_set_tsp_format; + dmx->get_tsp_size = dvbdmx_get_tsp_size; + mutex_init(&dvbdemux->mutex); spin_lock_init(&dvbdemux->lock); @@ -1316,9 +3501,14 @@ EXPORT_SYMBOL(dvb_dmx_init); void dvb_dmx_release(struct dvb_demux *dvbdemux) { + if (dvbdemux->dmx.debugfs_demux_dir != NULL) + debugfs_remove_recursive(dvbdemux->dmx.debugfs_demux_dir); + + dvb_demux_index--; vfree(dvbdemux->cnt_storage); vfree(dvbdemux->filter); vfree(dvbdemux->feed); + vfree(dvbdemux->rec_info_pool); } EXPORT_SYMBOL(dvb_dmx_release); diff --git a/drivers/media/dvb-core/dvb_demux.h b/drivers/media/dvb-core/dvb_demux.h index 5ed3cab4ad28..7ba053d2badf 100644 --- a/drivers/media/dvb-core/dvb_demux.h +++ b/drivers/media/dvb-core/dvb_demux.h @@ -27,6 +27,7 @@ #include #include #include +#include #include "demux.h" @@ -44,6 +45,8 @@ #define MAX_PID 0x1fff +#define TIMESTAMP_LEN 4 + #define SPEED_PKTS_INTERVAL 50000 struct dvb_demux_filter { @@ -64,6 +67,92 @@ struct dvb_demux_filter { #define DMX_FEED_ENTRY(pos) list_entry(pos, struct dvb_demux_feed, list_head) + +struct dmx_index_entry { + struct dmx_index_event_info event; + struct list_head next; +}; + +#define DMX_IDX_EVENT_QUEUE_SIZE DMX_EVENT_QUEUE_SIZE + +struct dvb_demux_rec_info { + /* Reference counter for number of feeds using this information */ + int ref_count; + + /* Counter for number of TS packets output to recording buffer */ + u64 ts_output_count; + + /* Indexing information */ + struct { + /* + * Minimum TS packet number encountered in recording filter + * among all feeds that search for video patterns + */ + u64 min_pattern_tsp_num; + + /* Number of indexing-enabled feeds */ + u8 indexing_feeds_num; + + /* Number of feeds with video pattern search request */ + u8 pattern_search_feeds_num; + + /* Index entries pool */ + struct dmx_index_entry events[DMX_IDX_EVENT_QUEUE_SIZE]; + + /* List of free entries that can be used for new index events */ + struct list_head free_list; + + /* List holding ready index entries not notified to user yet */ + struct list_head ready_list; + } idx_info; +}; + +#define DVB_DMX_MAX_PATTERN_LEN 6 +struct dvb_dmx_video_patterns { + /* the byte pattern to look for */ + u8 pattern[DVB_DMX_MAX_PATTERN_LEN]; + + /* the byte mask to use (same length as pattern) */ + u8 mask[DVB_DMX_MAX_PATTERN_LEN]; + + /* the length of the pattern, in bytes */ + size_t size; + + /* the type of the pattern. One of DMX_IDX_* definitions */ + u64 type; +}; + +#define DVB_DMX_MAX_FOUND_PATTERNS 20 +#define DVB_DMX_MAX_SEARCH_PATTERN_NUM 20 +struct dvb_dmx_video_prefix_size_masks { + /* + * a bit mask (per pattern) of possible prefix sizes to use + * when searching for a pattern that started in the previous TS packet. + * Updated by dvb_dmx_video_pattern_search for use in the next lookup. + */ + u32 size_mask[DVB_DMX_MAX_FOUND_PATTERNS]; +}; + +struct dvb_dmx_video_patterns_results { + struct { + /* + * The offset in the buffer where the pattern was found. + * If a pattern is found using a prefix (i.e. started on the + * previous buffer), offset is zero. + */ + u32 offset; + + /* + * The type of the pattern found. + * One of DMX_IDX_* definitions. + */ + u64 type; + + /* The prefix size that was used to find this pattern */ + u32 used_prefix_size; + } info[DVB_DMX_MAX_FOUND_PATTERNS]; +}; + struct dvb_demux_feed { union { struct dmx_ts_feed ts; @@ -75,6 +164,11 @@ struct dvb_demux_feed { dmx_section_cb sec; } cb; + union { + dmx_ts_data_ready_cb ts; + dmx_section_data_ready_cb sec; + } data_ready_cb; + struct dvb_demux *demux; void *priv; int type; @@ -82,6 +176,9 @@ struct dvb_demux_feed { u16 pid; u8 *buffer; int buffer_size; + enum dmx_tsp_format_t tsp_out_format; + struct dmx_secure_mode secure_mode; + struct dmx_cipher_operations cipher_ops; ktime_t timeout; struct dvb_demux_filter *filter; @@ -90,12 +187,34 @@ struct dvb_demux_feed { enum dmx_ts_pes pes_type; int cc; + int first_cc; int pusi_seen; /* prevents feeding of garbage from previous section */ + u8 scrambling_bits; + + struct dvb_demux_rec_info *rec_info; + u64 prev_tsp_num; + u64 prev_stc; + u64 curr_pusi_tsp_num; + u64 prev_pusi_tsp_num; + int prev_frame_valid; + u64 prev_frame_type; + int first_frame_in_seq; + int first_frame_in_seq_notified; + u64 last_pattern_tsp_num; + int pattern_num; +const struct dvb_dmx_video_patterns *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM]; + struct dvb_dmx_video_prefix_size_masks prefix_size; u16 peslen; + u32 pes_tei_counter; + u32 pes_cont_err_counter; + u32 pes_ts_packets_num; struct list_head list_head; unsigned int index; /* a unique index for each feed (can be used as hardware pid filter index) */ + + enum dmx_video_codec video_codec; + struct dmx_indexing_params idx_params; }; struct dvb_demux { @@ -107,10 +226,27 @@ struct dvb_demux { int (*stop_feed)(struct dvb_demux_feed *feed); int (*write_to_decoder)(struct dvb_demux_feed *feed, const u8 *buf, size_t len); + int (*decoder_fullness_init)(struct dvb_demux_feed *feed); + int (*decoder_fullness_wait)(struct dvb_demux_feed *feed, + size_t required_space); + int (*decoder_fullness_abort)(struct dvb_demux_feed *feed); + int (*decoder_buffer_status)(struct dvb_demux_feed *feed, + struct dmx_buffer_status *dmx_buffer_status); + int (*reuse_decoder_buffer)(struct dvb_demux_feed *feed, + int cookie); + int (*set_cipher_op)(struct dvb_demux_feed *feed, + struct dmx_cipher_operations *cipher_ops); u32 (*check_crc32)(struct dvb_demux_feed *feed, const u8 *buf, size_t len); void (*memcopy)(struct dvb_demux_feed *feed, u8 *dst, const u8 *src, size_t len); + int (*oob_command)(struct dvb_demux_feed *feed, + struct dmx_oob_command *cmd); + void (*convert_ts)(struct dvb_demux_feed *feed, + const u8 timestamp[TIMESTAMP_LEN], + u64 *timestampIn27Mhz); + int (*set_indexing)(struct dvb_demux_feed *feed); + int (*flush_decoder_buffer)(struct dvb_demux_feed *feed, size_t length); int users; #define MAX_DVB_DEMUX_USERS 10 @@ -136,10 +272,35 @@ struct dvb_demux { ktime_t speed_last_time; /* for TS speed check */ uint32_t speed_pkts_cnt; /* for TS speed check */ + + enum dmx_tsp_format_t tsp_format; + size_t ts_packet_size; + + enum dmx_playback_mode_t playback_mode; + int sw_filter_abort; + + struct { + dmx_ts_fullness ts; + dmx_section_fullness sec; + } buffer_ctrl; + + struct dvb_demux_rec_info *rec_info_pool; + + /* + * the following is used for debugfs exposing info + * about dvb demux performance. + */ +#define MAX_DVB_DEMUX_NAME_LEN 10 + char alias[MAX_DVB_DEMUX_NAME_LEN]; + + u32 total_process_time; + u32 total_crc_time; }; int dvb_dmx_init(struct dvb_demux *dvbdemux); void dvb_dmx_release(struct dvb_demux *dvbdemux); +int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed, const u8 *buf, + int should_lock); void dvb_dmx_swfilter_packets(struct dvb_demux *dvbdmx, const u8 *buf, size_t count); void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count); @@ -147,5 +308,141 @@ void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, size_t count); void dvb_dmx_swfilter_raw(struct dvb_demux *demux, const u8 *buf, size_t count); +void dvb_dmx_swfilter_format( + struct dvb_demux *demux, const u8 *buf, + size_t count, + enum dmx_tsp_format_t tsp_format); +void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf, + const u8 timestamp[TIMESTAMP_LEN]); +const struct dvb_dmx_video_patterns *dvb_dmx_get_pattern(u64 dmx_idx_pattern); +int dvb_dmx_video_pattern_search( + const struct dvb_dmx_video_patterns + *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM], + int patterns_num, + const u8 *buf, size_t buf_size, + struct dvb_dmx_video_prefix_size_masks *prefix_size_masks, + struct dvb_dmx_video_patterns_results *results); +int dvb_demux_push_idx_event(struct dvb_demux_feed *feed, + struct dmx_index_event_info *idx_event, int should_lock); +void dvb_dmx_process_idx_pattern(struct dvb_demux_feed *feed, + struct dvb_dmx_video_patterns_results *patterns, int pattern, + u64 curr_stc, u64 prev_stc, + u64 curr_match_tsp, u64 prev_match_tsp, + u64 curr_pusi_tsp, u64 prev_pusi_tsp); +void dvb_dmx_notify_idx_events(struct dvb_demux_feed *feed, int should_lock); +int dvb_dmx_notify_section_event(struct dvb_demux_feed *feed, + struct dmx_data_ready *event, int should_lock); +void dvbdmx_ts_reset_pes_state(struct dvb_demux_feed *feed); + +/** + * dvb_dmx_is_video_feed - Returns whether the PES feed + * is video one. + * + * @feed: The feed to be checked. + * + * Return 1 if feed is video feed, 0 otherwise. + */ +static inline int dvb_dmx_is_video_feed(struct dvb_demux_feed *feed) +{ + if (feed->type != DMX_TYPE_TS) + return 0; + + if (feed->ts_type & (~TS_DECODER)) + return 0; + + if ((feed->pes_type == DMX_PES_VIDEO0) || + (feed->pes_type == DMX_PES_VIDEO1) || + (feed->pes_type == DMX_PES_VIDEO2) || + (feed->pes_type == DMX_PES_VIDEO3)) + return 1; + + return 0; +} + +/** + * dvb_dmx_is_audio_feed - Returns whether the PES feed + * is audio one. + * + * @feed: The feed to be checked. + * + * Return 1 if feed is audio feed, 0 otherwise. + */ +static inline int dvb_dmx_is_audio_feed(struct dvb_demux_feed *feed) +{ + if (feed->type != DMX_TYPE_TS) + return 0; + + if (feed->ts_type & (~TS_DECODER)) + return 0; + + if ((feed->pes_type == DMX_PES_AUDIO0) || + (feed->pes_type == DMX_PES_AUDIO1) || + (feed->pes_type == DMX_PES_AUDIO2) || + (feed->pes_type == DMX_PES_AUDIO3)) + return 1; + + return 0; +} + +/** + * dvb_dmx_is_pcr_feed - Returns whether the PES feed + * is PCR one. + * + * @feed: The feed to be checked. + * + * Return 1 if feed is PCR feed, 0 otherwise. + */ +static inline int dvb_dmx_is_pcr_feed(struct dvb_demux_feed *feed) +{ + if (feed->type != DMX_TYPE_TS) + return 0; + + if (feed->ts_type & (~TS_DECODER)) + return 0; + + if ((feed->pes_type == DMX_PES_PCR0) || + (feed->pes_type == DMX_PES_PCR1) || + (feed->pes_type == DMX_PES_PCR2) || + (feed->pes_type == DMX_PES_PCR3)) + return 1; + + return 0; +} + +/** + * dvb_dmx_is_sec_feed - Returns whether this is a section feed + * + * @feed: The feed to be checked. + * + * Return 1 if feed is a section feed, 0 otherwise. + */ +static inline int dvb_dmx_is_sec_feed(struct dvb_demux_feed *feed) +{ + return (feed->type == DMX_TYPE_SEC); +} + +/** + * dvb_dmx_is_rec_feed - Returns whether this is a recording feed + * + * @feed: The feed to be checked. + * + * Return 1 if feed is recording feed, 0 otherwise. + */ +static inline int dvb_dmx_is_rec_feed(struct dvb_demux_feed *feed) +{ + if (feed->type != DMX_TYPE_TS) + return 0; + + if (feed->ts_type & (TS_DECODER | TS_PAYLOAD_ONLY)) + return 0; + + return 1; +} + +static inline u16 ts_pid(const u8 *buf) +{ + return ((buf[1] & 0x1f) << 8) + buf[2]; +} + #endif /* _DVB_DEMUX_H_ */ diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c index 9914f69a4a02..efb7d5284946 100644 --- a/drivers/media/dvb-core/dvb_net.c +++ b/drivers/media/dvb-core/dvb_net.c @@ -997,7 +997,7 @@ static int dvb_net_feed_start(struct net_device *dev) netdev_dbg(dev, "start filtering\n"); priv->secfeed->start_filtering(priv->secfeed); } else if (priv->feedtype == DVB_NET_FEEDTYPE_ULE) { - ktime_t timeout = ns_to_ktime(10 * NSEC_PER_MSEC); + ktime_t timeout = ktime_set(0, 10*NSEC_PER_MSEC); // 10 msec /* we have payloads encapsulated in TS */ netdev_dbg(dev, "alloc tsfeed\n"); diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c index 7df7fb3738a0..d4514c1833f8 100644 --- a/drivers/media/dvb-core/dvb_ringbuffer.c +++ b/drivers/media/dvb-core/dvb_ringbuffer.c @@ -37,6 +37,8 @@ #define PKT_READY 0 #define PKT_DISPOSED 1 +#define PKT_PENDING 2 + void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len) @@ -209,18 +211,19 @@ ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, size_t } ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf, - const u8 __user *buf, size_t len) + const u8 __user *buf, size_t len) { - int status; size_t todo = len; size_t split; + ssize_t oldpwrite = rbuf->pwrite; - split = (rbuf->pwrite + len > rbuf->size) ? rbuf->size - rbuf->pwrite : 0; + split = (rbuf->pwrite + len > rbuf->size) ? + rbuf->size - rbuf->pwrite : + 0; if (split > 0) { - status = copy_from_user(rbuf->data+rbuf->pwrite, buf, split); - if (status) - return len - todo; + if (copy_from_user(rbuf->data + rbuf->pwrite, buf, split)) + return -EFAULT; buf += split; todo -= split; /* smp_store_release() for write pointer update to ensure that @@ -230,9 +233,12 @@ ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf, */ smp_store_release(&rbuf->pwrite, 0); } - status = copy_from_user(rbuf->data+rbuf->pwrite, buf, todo); - if (status) - return len - todo; + + if (copy_from_user(rbuf->data + rbuf->pwrite, buf, todo)) { + /* smp_store_release() for write pointer update */ + smp_store_release(&rbuf->pwrite, oldpwrite); + return -EFAULT; + } /* smp_store_release() for write pointer update, see above */ smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size); @@ -253,6 +259,31 @@ ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8* buf, size_t le return status; } +ssize_t dvb_ringbuffer_pkt_start(struct dvb_ringbuffer *rbuf, size_t len) +{ + ssize_t oldpwrite = rbuf->pwrite; + + DVB_RINGBUFFER_WRITE_BYTE(rbuf, len >> 8); + DVB_RINGBUFFER_WRITE_BYTE(rbuf, len & 0xff); + DVB_RINGBUFFER_WRITE_BYTE(rbuf, PKT_PENDING); + + return oldpwrite; +} +EXPORT_SYMBOL(dvb_ringbuffer_pkt_start); + +int dvb_ringbuffer_pkt_close(struct dvb_ringbuffer *rbuf, ssize_t idx) +{ + idx = (idx + 2) % rbuf->size; + + if (rbuf->data[idx] != PKT_PENDING) + return -EINVAL; + + rbuf->data[idx] = PKT_READY; + + return 0; +} +EXPORT_SYMBOL(dvb_ringbuffer_pkt_close); + ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, size_t idx, int offset, u8 __user *buf, size_t len) { @@ -260,6 +291,9 @@ ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, size_t idx, size_t split; size_t pktlen; + if (DVB_RINGBUFFER_PEEK(rbuf, (idx+2)) != PKT_READY) + return -EINVAL; + pktlen = rbuf->data[idx] << 8; pktlen |= rbuf->data[(idx + 1) % rbuf->size]; if (offset > pktlen) return -EINVAL; @@ -280,6 +314,7 @@ ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, size_t idx, return len; } +EXPORT_SYMBOL(dvb_ringbuffer_pkt_read_user); ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx, int offset, u8* buf, size_t len) @@ -288,6 +323,9 @@ ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx, size_t split; size_t pktlen; + if (rbuf->data[(idx + 2) % rbuf->size] != PKT_READY) + return -EINVAL; + pktlen = rbuf->data[idx] << 8; pktlen |= rbuf->data[(idx + 1) % rbuf->size]; if (offset > pktlen) return -EINVAL; @@ -305,6 +343,7 @@ ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx, memcpy(buf, rbuf->data+idx, todo); return len; } +EXPORT_SYMBOL(dvb_ringbuffer_pkt_read); void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx) { @@ -324,6 +363,7 @@ void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx) } } } +EXPORT_SYMBOL(dvb_ringbuffer_pkt_dispose); ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t* pktlen) { @@ -339,7 +379,10 @@ ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t* idx = (idx + curpktlen + DVB_RINGBUFFER_PKTHDRSIZE) % rbuf->size; } - consumed = (idx - rbuf->pread) % rbuf->size; + if (idx >= rbuf->pread) + consumed = idx - rbuf->pread; + else + consumed = rbuf->size - (rbuf->pread - idx); while((dvb_ringbuffer_avail(rbuf) - consumed) > DVB_RINGBUFFER_PKTHDRSIZE) { @@ -352,6 +395,9 @@ ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t* return idx; } + if (curpktstatus == PKT_PENDING) + return -EFAULT; + consumed += curpktlen + DVB_RINGBUFFER_PKTHDRSIZE; idx = (idx + curpktlen + DVB_RINGBUFFER_PKTHDRSIZE) % rbuf->size; } @@ -359,8 +405,7 @@ ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t* // no packets available return -1; } - - +EXPORT_SYMBOL(dvb_ringbuffer_pkt_next); EXPORT_SYMBOL(dvb_ringbuffer_init); EXPORT_SYMBOL(dvb_ringbuffer_empty); diff --git a/drivers/media/dvb-core/dvb_ringbuffer.h b/drivers/media/dvb-core/dvb_ringbuffer.h index bbe94873d44d..900630ed8f7f 100644 --- a/drivers/media/dvb-core/dvb_ringbuffer.h +++ b/drivers/media/dvb-core/dvb_ringbuffer.h @@ -124,6 +124,9 @@ extern void dvb_ringbuffer_flush_spinlock_wakeup(struct dvb_ringbuffer *rbuf); */ #define DVB_RINGBUFFER_PEEK(rbuf, offs) \ ((rbuf)->data[((rbuf)->pread + (offs)) % (rbuf)->size]) +#define DVB_RINGBUFFER_PUSH(rbuf, num) \ + ((rbuf)->pwrite = (((rbuf)->pwrite+(num))%(rbuf)->size)) + /** * DVB_RINGBUFFER_SKIP - advance read ptr by @num bytes @@ -274,7 +277,35 @@ extern void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx); * in bytes. * returns Packet index (if >=0), or -1 if no packets available. */ -extern ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, - size_t idx, size_t *pktlen); +extern ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, + size_t *pktlen); + + +/** + * Start a new packet that will be written directly by the user to the packet + * buffer. + * The function only writes the header of the packet into the packet buffer, + * and the packet is in pending state (can't be read by the reader) until it is + * closed using dvb_ringbuffer_pkt_close. You must write the data into the + * packet buffer using dvb_ringbuffer_write followed by + * dvb_ringbuffer_pkt_close. + * + * @rbuf: Ringbuffer concerned. + * @len: Size of the packet's data + * returns Index of the packet's header that was started. + */ +extern ssize_t dvb_ringbuffer_pkt_start(struct dvb_ringbuffer *rbuf, + size_t len); + +/** + * Close a packet that was started using dvb_ringbuffer_pkt_start. + * The packet will be marked as ready to be ready. + * + * @rbuf: Ringbuffer concerned. + * @idx: Packet index that was returned by dvb_ringbuffer_pkt_start + * returns error status, -EINVAL if the provided index is invalid + */ +extern int dvb_ringbuffer_pkt_close(struct dvb_ringbuffer *rbuf, ssize_t idx); + #endif /* _DVB_RINGBUFFER_H_ */ diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c index e6e5e90d8d95..b36594480d29 100644 --- a/drivers/media/tuners/xc5000.c +++ b/drivers/media/tuners/xc5000.c @@ -931,7 +931,7 @@ static void xc5000_config_tv(struct dvb_frontend *fe, static int xc5000_set_tv_freq(struct dvb_frontend *fe) { struct xc5000_priv *priv = fe->tuner_priv; - u16 pll_lock_status; + u16 pll_lock_status = 0; int ret; tune_channel: @@ -1040,7 +1040,6 @@ static int xc5000_set_radio_freq(struct dvb_frontend *fe) return 0; } - static int xc5000_set_params(struct dvb_frontend *fe) { struct xc5000_priv *priv = fe->tuner_priv; @@ -1133,7 +1132,7 @@ static int xc_load_fw_and_init_tuner(struct dvb_frontend *fe, int force) const struct xc5000_fw_cfg *desired_fw = xc5000_assign_firmware(priv->chip_id); const struct firmware *fw; int ret, i; - u16 pll_lock_status; + u16 pll_lock_status = 0; u16 fw_ck; cancel_delayed_work(&priv->timer_sleep); diff --git a/include/uapi/linux/dvb/dmx.h b/include/uapi/linux/dvb/dmx.h index 427e4899ed69..175534a26792 100644 --- a/include/uapi/linux/dvb/dmx.h +++ b/include/uapi/linux/dvb/dmx.h @@ -32,6 +32,11 @@ #define DMX_FILTER_SIZE 16 +/* Min recording chunk upon which event is generated */ +#define DMX_REC_BUFF_CHUNK_MIN_SIZE (100*188) + +#define DMX_MAX_DECODER_BUFFER_NUM (32) + enum dmx_output { DMX_OUT_DECODER, /* Streaming directly to decoder. */ @@ -108,6 +113,44 @@ struct dmx_sct_filter_params #define DMX_KERNEL_CLIENT 0x8000 }; +enum dmx_video_codec { + DMX_VIDEO_CODEC_MPEG2, + DMX_VIDEO_CODEC_H264, + DMX_VIDEO_CODEC_VC1 +}; + +/* Index entries types */ +#define DMX_IDX_RAI 0x00000001 +#define DMX_IDX_PUSI 0x00000002 +#define DMX_IDX_MPEG_SEQ_HEADER 0x00000004 +#define DMX_IDX_MPEG_GOP 0x00000008 +#define DMX_IDX_MPEG_FIRST_SEQ_FRAME_START 0x00000010 +#define DMX_IDX_MPEG_FIRST_SEQ_FRAME_END 0x00000020 +#define DMX_IDX_MPEG_I_FRAME_START 0x00000040 +#define DMX_IDX_MPEG_I_FRAME_END 0x00000080 +#define DMX_IDX_MPEG_P_FRAME_START 0x00000100 +#define DMX_IDX_MPEG_P_FRAME_END 0x00000200 +#define DMX_IDX_MPEG_B_FRAME_START 0x00000400 +#define DMX_IDX_MPEG_B_FRAME_END 0x00000800 +#define DMX_IDX_H264_SPS 0x00001000 +#define DMX_IDX_H264_PPS 0x00002000 +#define DMX_IDX_H264_FIRST_SPS_FRAME_START 0x00004000 +#define DMX_IDX_H264_FIRST_SPS_FRAME_END 0x00008000 +#define DMX_IDX_H264_IDR_START 0x00010000 +#define DMX_IDX_H264_IDR_END 0x00020000 +#define DMX_IDX_H264_NON_IDR_START 0x00040000 +#define DMX_IDX_H264_NON_IDR_END 0x00080000 +#define DMX_IDX_VC1_SEQ_HEADER 0x00100000 +#define DMX_IDX_VC1_ENTRY_POINT 0x00200000 +#define DMX_IDX_VC1_FIRST_SEQ_FRAME_START 0x00400000 +#define DMX_IDX_VC1_FIRST_SEQ_FRAME_END 0x00800000 +#define DMX_IDX_VC1_FRAME_START 0x01000000 +#define DMX_IDX_VC1_FRAME_END 0x02000000 +#define DMX_IDX_H264_ACCESS_UNIT_DEL 0x04000000 +#define DMX_IDX_H264_SEI 0x08000000 +#define DMX_IDX_H264_IDR_ISLICE_START 0x10000000 +#define DMX_IDX_H264_NON_IDR_PSLICE_START 0x20000000 +#define DMX_IDX_H264_NON_IDR_BSLICE_START 0x40000000 struct dmx_pes_filter_params { @@ -116,11 +159,457 @@ struct dmx_pes_filter_params dmx_output_t output; dmx_pes_type_t pes_type; __u32 flags; + + /* + * The following configures when the event + * DMX_EVENT_NEW_REC_CHUNK will be triggered. + * When new recorded data is received with size + * equal or larger than this value a new event + * will be triggered. This is relevant when + * output is DMX_OUT_TS_TAP or DMX_OUT_TSDEMUX_TAP, + * size must be at least DMX_REC_BUFF_CHUNK_MIN_SIZE + * and smaller than buffer size. + */ + __u32 rec_chunk_size; + + enum dmx_video_codec video_codec; +}; + +struct dmx_buffer_status { + /* size of buffer in bytes */ + unsigned int size; + + /* fullness of buffer in bytes */ + unsigned int fullness; + + /* + * How many bytes are free + * It's the same as: size-fullness-1 + */ + unsigned int free_bytes; + + /* read pointer offset in bytes */ + unsigned int read_offset; + + /* write pointer offset in bytes */ + unsigned int write_offset; + + /* non-zero if data error occurred */ + int error; +}; + +/* Events associated with each demux filter */ +enum dmx_event { + /* New PES packet is ready to be consumed */ + DMX_EVENT_NEW_PES = 0x00000001, + + /* New section is ready to be consumed */ + DMX_EVENT_NEW_SECTION = 0x00000002, + + /* New recording chunk is ready to be consumed */ + DMX_EVENT_NEW_REC_CHUNK = 0x00000004, + + /* New PCR value is ready */ + DMX_EVENT_NEW_PCR = 0x00000008, + + /* Overflow */ + DMX_EVENT_BUFFER_OVERFLOW = 0x00000010, + + /* Section was dropped due to CRC error */ + DMX_EVENT_SECTION_CRC_ERROR = 0x00000020, + + /* End-of-stream, no more data from this filter */ + DMX_EVENT_EOS = 0x00000040, + + /* New Elementary Stream data is ready */ + DMX_EVENT_NEW_ES_DATA = 0x00000080, + + /* Data markers */ + DMX_EVENT_MARKER = 0x00000100, + + /* New indexing entry is ready */ + DMX_EVENT_NEW_INDEX_ENTRY = 0x00000200, + + /* + * Section filter timer expired. This is notified + * when timeout is configured to section filter + * (dmx_sct_filter_params) and no sections were + * received for the given time. + */ + DMX_EVENT_SECTION_TIMEOUT = 0x00000400, + + /* Scrambling bits change between clear and scrambled */ + DMX_EVENT_SCRAMBLING_STATUS_CHANGE = 0x00000800 +}; + +enum dmx_oob_cmd { + /* End-of-stream, no more data from this filter */ + DMX_OOB_CMD_EOS, + + /* Data markers */ + DMX_OOB_CMD_MARKER, +}; + +/* Flags passed in filter events */ + +/* Continuity counter error was detected */ +#define DMX_FILTER_CC_ERROR 0x01 + +/* Discontinuity indicator was set */ +#define DMX_FILTER_DISCONTINUITY_INDICATOR 0x02 + +/* PES length in PES header is not correct */ +#define DMX_FILTER_PES_LENGTH_ERROR 0x04 + + +/* PES info associated with DMX_EVENT_NEW_PES event */ +struct dmx_pes_event_info { + /* Offset at which PES information starts */ + __u32 base_offset; + + /* + * Start offset at which PES data + * from the stream starts. + * Equal to base_offset if PES data + * starts from the beginning. + */ + __u32 start_offset; + + /* Total length holding the PES information */ + __u32 total_length; + + /* Actual length holding the PES data */ + __u32 actual_length; + + /* Local receiver timestamp in 27MHz */ + __u64 stc; + + /* Flags passed in filter events */ + __u32 flags; + + /* + * Number of TS packets with Transport Error Indicator (TEI) + * found while constructing the PES. + */ + __u32 transport_error_indicator_counter; + + /* Number of continuity errors found while constructing the PES */ + __u32 continuity_error_counter; + + /* Total number of TS packets holding the PES */ + __u32 ts_packets_num; +}; + +/* Section info associated with DMX_EVENT_NEW_SECTION event */ +struct dmx_section_event_info { + /* Offset at which section information starts */ + __u32 base_offset; + + /* + * Start offset at which section data + * from the stream starts. + * Equal to base_offset if section data + * starts from the beginning. + */ + __u32 start_offset; + + /* Total length holding the section information */ + __u32 total_length; + + /* Actual length holding the section data */ + __u32 actual_length; + + /* Flags passed in filter events */ + __u32 flags; +}; + +/* Recording info associated with DMX_EVENT_NEW_REC_CHUNK event */ +struct dmx_rec_chunk_event_info { + /* Offset at which recording chunk starts */ + __u32 offset; + + /* Size of recording chunk in bytes */ + __u32 size; +}; + +/* PCR info associated with DMX_EVENT_NEW_PCR event */ +struct dmx_pcr_event_info { + /* Local timestamp in 27MHz + * when PCR packet was received + */ + __u64 stc; + + /* PCR value in 27MHz */ + __u64 pcr; + + /* Flags passed in filter events */ + __u32 flags; +}; + +/* + * Elementary stream data information associated + * with DMX_EVENT_NEW_ES_DATA event + */ +struct dmx_es_data_event_info { + /* Buffer user-space handle */ + int buf_handle; + + /* + * Cookie to provide when releasing the buffer + * using the DMX_RELEASE_DECODER_BUFFER ioctl command + */ + int cookie; + + /* Offset of data from the beginning of the buffer */ + __u32 offset; + + /* Length of data in buffer (in bytes) */ + __u32 data_len; + + /* Indication whether PTS value is valid */ + int pts_valid; + + /* PTS value associated with the buffer */ + __u64 pts; + + /* Indication whether DTS value is valid */ + int dts_valid; + + /* DTS value associated with the buffer */ + __u64 dts; + + /* STC value associated with the buffer in 27MHz */ + __u64 stc; + + /* + * Number of TS packets with Transport Error Indicator (TEI) set + * in the TS packet header since last reported event + */ + __u32 transport_error_indicator_counter; + + /* Number of continuity errors since last reported event */ + __u32 continuity_error_counter; + + /* Total number of TS packets processed since last reported event */ + __u32 ts_packets_num; + + /* + * Number of dropped bytes due to insufficient buffer space, + * since last reported event + */ + __u32 ts_dropped_bytes; +}; + +/* Marker details associated with DMX_EVENT_MARKER event */ +struct dmx_marker_event_info { + /* Marker id */ + __u64 id; +}; + +/* Indexing information associated with DMX_EVENT_NEW_INDEX_ENTRY event */ +struct dmx_index_event_info { + /* Index entry type, one of DMX_IDX_* */ + __u64 type; + + /* + * The PID the index entry belongs to. + * In case of recording filter, multiple PIDs may exist in the same + * filter through DMX_ADD_PID ioctl and each can be indexed separately. + */ + __u16 pid; + + /* + * The TS packet number in the recorded data at which + * the indexing event is found. + */ + __u64 match_tsp_num; + + /* + * The TS packet number in the recorded data preceding + * match_tsp_num and has PUSI set. + */ + __u64 last_pusi_tsp_num; + + /* STC associated with match_tsp_num, in 27MHz */ + __u64 stc; +}; + +/* Scrambling information associated with DMX_EVENT_SCRAMBLING_STATUS_CHANGE */ +struct dmx_scrambling_status_event_info { + /* + * The PID which its scrambling bit status changed. + * In case of recording filter, multiple PIDs may exist in the same + * filter through DMX_ADD_PID ioctl, each may have + * different scrambling bits status. + */ + __u16 pid; + + /* old value of scrambling bits */ + __u8 old_value; + + /* new value of scrambling bits */ + __u8 new_value; +}; + +/* + * Filter's event returned through DMX_GET_EVENT. + * poll with POLLPRI would block until events are available. + */ +struct dmx_filter_event { + enum dmx_event type; + + union { + struct dmx_pes_event_info pes; + struct dmx_section_event_info section; + struct dmx_rec_chunk_event_info recording_chunk; + struct dmx_pcr_event_info pcr; + struct dmx_es_data_event_info es_data; + struct dmx_marker_event_info marker; + struct dmx_index_event_info index; + struct dmx_scrambling_status_event_info scrambling_status; + } params; +}; + +/* Filter's buffer requirement returned in dmx_caps */ +struct dmx_buffer_requirement { + /* Buffer size alignment, 0 means no special requirement */ + __u32 size_alignment; + + /* Maximum buffer size allowed */ + __u32 max_size; + + /* Maximum number of linear buffers handled by demux */ + __u32 max_buffer_num; + + /* Feature support bitmap as detailed below */ + __u32 flags; + +/* Buffer must be allocated as physically contiguous memory */ +#define DMX_BUFFER_CONTIGUOUS_MEM 0x1 + +/* If the filter's data is decrypted, the buffer should be secured one */ +#define DMX_BUFFER_SECURED_IF_DECRYPTED 0x2 + +/* Buffer can be allocated externally */ +#define DMX_BUFFER_EXTERNAL_SUPPORT 0x4 + +/* Buffer can be allocated internally */ +#define DMX_BUFFER_INTERNAL_SUPPORT 0x8 + +/* Filter output can be output to a linear buffer group */ +#define DMX_BUFFER_LINEAR_GROUP_SUPPORT 0x10 + +/* Buffer may be allocated as cached buffer */ +#define DMX_BUFFER_CACHED 0x20 +}; + +/* Out-of-band (OOB) command */ +struct dmx_oob_command { + enum dmx_oob_cmd type; + + union { + struct dmx_marker_event_info marker; + } params; }; typedef struct dmx_caps { __u32 caps; + +/* Indicates whether demux support playback from memory in pull mode */ +#define DMX_CAP_PULL_MODE 0x01 + +/* Indicates whether demux support indexing of recorded video stream */ +#define DMX_CAP_VIDEO_INDEXING 0x02 + +/* Indicates whether demux support sending data directly to video decoder */ +#define DMX_CAP_VIDEO_DECODER_DATA 0x04 + +/* Indicates whether demux support sending data directly to audio decoder */ +#define DMX_CAP_AUDIO_DECODER_DATA 0x08 + +/* Indicates whether demux support sending data directly to subtitle decoder */ +#define DMX_CAP_SUBTITLE_DECODER_DATA 0x10 + +/* Indicates whether TS insertion is supported */ +#define DMX_CAP_TS_INSERTION 0x20 + +/* Indicates whether playback from secured input is supported */ +#define DMX_CAP_SECURED_INPUT_PLAYBACK 0x40 + +/* Indicates whether automatic buffer flush upon overflow is allowed */ +#define DMX_CAP_AUTO_BUFFER_FLUSH 0x80 + + /* Number of decoders demux can output data to */ int num_decoders; + + /* Number of demux devices */ + int num_demux_devices; + + /* Max number of PID filters */ + int num_pid_filters; + + /* Max number of section filters */ + int num_section_filters; + + /* + * Max number of section filters using same PID, + * 0 if not supported + */ + int num_section_filters_per_pid; + + /* + * Length of section filter, not including section + * length field (2 bytes). + */ + int section_filter_length; + + /* Max number of demod based input */ + int num_demod_inputs; + + /* Max number of memory based input */ + int num_memory_inputs; + + /* Overall bitrate from all inputs concurrently. Mbit/sec */ + int max_bitrate; + + /* Max bitrate from single demod input. Mbit/sec */ + int demod_input_max_bitrate; + + /* Max bitrate from single memory input. Mbit/sec */ + int memory_input_max_bitrate; + + /* Max number of supported cipher operations per PID */ + int num_cipher_ops; + + /* Max possible value of STC reported by demux, in 27MHz */ + __u64 max_stc; + + /* + * For indexing support (DMX_CAP_VIDEO_INDEXING capability) this is + * the max number of video pids that can be indexed for a single + * recording filter. If 0, means there is not limitation. + */ + int recording_max_video_pids_indexed; + + struct dmx_buffer_requirement section; + + /* For PES not sent to decoder */ + struct dmx_buffer_requirement pes; + + /* For PES sent to decoder */ + struct dmx_buffer_requirement decoder; + + /* Recording buffer for recording of 188 bytes packets */ + struct dmx_buffer_requirement recording_188_tsp; + + /* Recording buffer for recording of 192 bytes packets */ + struct dmx_buffer_requirement recording_192_tsp; + + /* DVR input buffer for playback of 188 bytes packets */ + struct dmx_buffer_requirement playback_188_tsp; + + /* DVR input buffer for playback of 192 bytes packets */ + struct dmx_buffer_requirement playback_192_tsp; } dmx_caps_t; typedef enum dmx_source { @@ -134,12 +623,229 @@ typedef enum dmx_source { DMX_SOURCE_DVR3 } dmx_source_t; +enum dmx_tsp_format_t { + DMX_TSP_FORMAT_188 = 0, + DMX_TSP_FORMAT_192_TAIL, + DMX_TSP_FORMAT_192_HEAD, + DMX_TSP_FORMAT_204, +}; + +enum dmx_playback_mode_t { + /* + * In push mode, if one of output buffers + * is full, the buffer would overflow + * and demux continue processing incoming stream. + * This is the default mode. When playing from frontend, + * this is the only mode that is allowed. + */ + DMX_PB_MODE_PUSH = 0, + + /* + * In pull mode, if one of output buffers + * is full, demux stalls waiting for free space, + * this would cause DVR input buffer fullness + * to accumulate. + * This mode is possible only when playing + * from DVR. + */ + DMX_PB_MODE_PULL, +}; + struct dmx_stc { unsigned int num; /* input : which STC? 0..N */ unsigned int base; /* output: divisor for stc to get 90 kHz clock */ __u64 stc; /* output: stc in 'base'*90 kHz units */ }; +enum dmx_buffer_mode { + /* + * demux buffers are allocated internally + * by the demux driver. This is the default mode. + * DMX_SET_BUFFER_SIZE can be used to set the size of + * this buffer. + */ + DMX_BUFFER_MODE_INTERNAL, + + /* + * demux buffers are allocated externally and provided + * to demux through DMX_SET_BUFFER. + * When this mode is used DMX_SET_BUFFER_SIZE and + * mmap are prohibited. + */ + DMX_BUFFER_MODE_EXTERNAL, +}; + +struct dmx_buffer { + unsigned int size; + int handle; + + /* + * The following indication is relevant only when setting + * DVR input buffer. It indicates whether the input buffer + * being set is secured one or not. Secured (locked) buffers + * are required for playback from secured input. In such case + * write() syscall is not allowed. + */ + int is_protected; +}; + +struct dmx_decoder_buffers { + /* + * Specify if linear buffer support is requested. If set, buffers_num + * must be greater than 1 + */ + int is_linear; + + /* + * Specify number of external buffers allocated by user. + * If set to 0 means internal buffer allocation is requested + */ + __u32 buffers_num; + + /* Specify buffer size, either external or internal */ + __u32 buffers_size; + + /* Array of externally allocated buffer handles */ + int handles[DMX_MAX_DECODER_BUFFER_NUM]; +}; + +struct dmx_secure_mode { + /* + * Specifies whether the filter is secure or not. + * Filter should be set as secured if the filter's data *may* include + * encrypted data that would require decryption configured through + * DMX_SET_CIPHER ioctl. The setting may be done while + * filter is in idle state only. + */ + int is_secured; +}; + +struct dmx_cipher_operation { + /* Indication whether the operation is encryption or decryption */ + int encrypt; + + /* The ID of the key used for decryption or encryption */ + __u32 key_ladder_id; +}; + +#define DMX_MAX_CIPHER_OPERATIONS_COUNT 5 +struct dmx_cipher_operations { + /* + * The PID to perform the cipher operations on. + * In case of recording filter, multiple PIDs may exist in the same + * filter through DMX_ADD_PID ioctl, each may have different + * cipher operations. + */ + __u16 pid; + + /* Total number of operations */ + __u8 operations_count; + + /* + * Cipher operation to perform on the given PID. + * The operations are performed in the order they are given. + */ + struct dmx_cipher_operation operations[DMX_MAX_CIPHER_OPERATIONS_COUNT]; +}; + +struct dmx_events_mask { + /* + * Bitmask of events to be disabled (dmx_event). + * Disabled events will not be notified to the user. + * By default all events are enabled except for + * DMX_EVENT_NEW_ES_DATA. + * Overflow event can't be disabled. + */ + __u32 disable_mask; + + /* + * Bitmask of events that will not wake-up the user + * when user calls poll with POLLPRI flag. + * Events that are used as wake-up source should not be + * disabled in disable_mask or they would not be used + * as a wake-up source. + * By default all enabled events are set as wake-up events. + * Overflow event can't be disabled as a wake-up source. + */ + __u32 no_wakeup_mask; + + /* + * Number of ready wake-up events which will trigger + * a wake-up when user calls poll with POLLPRI flag. + * Default is set to 1. + */ + __u32 wakeup_threshold; +}; + +struct dmx_indexing_params { + /* + * PID to index. In case of recording filter, multiple PIDs + * may exist in the same filter through DMX_ADD_PID ioctl. + * It is assumed that the PID was already added using DMX_ADD_PID + * or an error will be reported. + */ + __u16 pid; + + /* enable or disable indexing, default is disabled */ + int enable; + + /* combination of DMX_IDX_* bits */ + __u64 types; +}; + +struct dmx_set_ts_insertion { + /* + * Unique identifier managed by the caller. + * This identifier can be used later to remove the + * insertion using DMX_ABORT_TS_INSERTION ioctl. + */ + __u32 identifier; + + /* + * Repetition time in msec, minimum allowed value is 25msec. + * 0 repetition time means one-shot insertion is done. + * Insertion done based on wall-clock. + */ + __u32 repetition_time; + + /* + * TS packets buffer to be inserted. + * The buffer is inserted as-is to the recording buffer + * without any modification. + * It is advised to set discontinuity flag in the very + * first TS packet in the buffer. + */ + const __u8 *ts_packets; + + /* + * Size in bytes of the TS packets buffer to be inserted. + * Should be in multiples of 188 or 192 bytes + * depending on recording filter output format. + */ + size_t size; +}; + +struct dmx_abort_ts_insertion { + /* + * Identifier of the insertion buffer previously set + * using DMX_SET_TS_INSERTION. + */ + __u32 identifier; +}; + +struct dmx_scrambling_bits { + /* + * The PID to return its scrambling bit value. + * In case of recording filter, multiple PIDs may exist in the same + * filter through DMX_ADD_PID ioctl, each may have different + * scrambling bits status. + */ + __u16 pid; + + /* Current value of scrambling bits: 0, 1, 2 or 3 */ + __u8 value; +}; + #define DMX_START _IO('o', 41) #define DMX_STOP _IO('o', 42) #define DMX_SET_FILTER _IOW('o', 43, struct dmx_sct_filter_params) @@ -151,5 +857,27 @@ struct dmx_stc { #define DMX_GET_STC _IOWR('o', 50, struct dmx_stc) #define DMX_ADD_PID _IOW('o', 51, __u16) #define DMX_REMOVE_PID _IOW('o', 52, __u16) +#define DMX_SET_TS_PACKET_FORMAT _IOW('o', 53, enum dmx_tsp_format_t) +#define DMX_SET_TS_OUT_FORMAT _IOW('o', 54, enum dmx_tsp_format_t) +#define DMX_SET_DECODER_BUFFER_SIZE _IO('o', 55) +#define DMX_GET_BUFFER_STATUS _IOR('o', 56, struct dmx_buffer_status) +#define DMX_RELEASE_DATA _IO('o', 57) +#define DMX_FEED_DATA _IO('o', 58) +#define DMX_SET_PLAYBACK_MODE _IOW('o', 59, enum dmx_playback_mode_t) +#define DMX_GET_EVENT _IOR('o', 60, struct dmx_filter_event) +#define DMX_SET_BUFFER_MODE _IOW('o', 61, enum dmx_buffer_mode) +#define DMX_SET_BUFFER _IOW('o', 62, struct dmx_buffer) +#define DMX_SET_DECODER_BUFFER _IOW('o', 63, struct dmx_decoder_buffers) +#define DMX_REUSE_DECODER_BUFFER _IO('o', 64) +#define DMX_SET_SECURE_MODE _IOW('o', 65, struct dmx_secure_mode) +#define DMX_SET_EVENTS_MASK _IOW('o', 66, struct dmx_events_mask) +#define DMX_GET_EVENTS_MASK _IOR('o', 67, struct dmx_events_mask) +#define DMX_PUSH_OOB_COMMAND _IOW('o', 68, struct dmx_oob_command) +#define DMX_SET_INDEXING_PARAMS _IOW('o', 69, struct dmx_indexing_params) +#define DMX_SET_TS_INSERTION _IOW('o', 70, struct dmx_set_ts_insertion) +#define DMX_ABORT_TS_INSERTION _IOW('o', 71, struct dmx_abort_ts_insertion) +#define DMX_GET_SCRAMBLING_BITS _IOWR('o', 72, struct dmx_scrambling_bits) +#define DMX_SET_CIPHER _IOW('o', 73, struct dmx_cipher_operations) +#define DMX_FLUSH_BUFFER _IO('o', 74) #endif /* _UAPI_DVBDMX_H_ */ -- GitLab From baeebf09dd19dcddb95b74aa7627524bc4b3ebc0 Mon Sep 17 00:00:00 2001 From: Yida Wang Date: Wed, 3 May 2017 14:46:14 -0400 Subject: [PATCH 365/786] seemp: check array index range Check whether array index is within the bounds in seemp_logk_get_bit_from_vector(). Change-Id: Idccf75736582b2390540f4d7b3351c018937186a Signed-off-by: Yida Wang --- drivers/platform/msm/seemp_core/seemp_logk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/msm/seemp_core/seemp_logk.c b/drivers/platform/msm/seemp_core/seemp_logk.c index ce073edbc129..a528e16116fd 100644 --- a/drivers/platform/msm/seemp_core/seemp_logk.c +++ b/drivers/platform/msm/seemp_core/seemp_logk.c @@ -289,7 +289,7 @@ static bool seemp_logk_get_bit_from_vector(__u8 *pVec, __u32 index) unsigned int bit_num = index%8; unsigned char byte; - if (DIV_ROUND_UP(index, 8) > MASK_BUFFER_SIZE) + if (byte_num >= MASK_BUFFER_SIZE) return false; byte = pVec[byte_num]; -- GitLab From 527059f4bd449fb38bc2db7f16dfdee3ae7ea397 Mon Sep 17 00:00:00 2001 From: Rohit Kumar Date: Wed, 21 Jun 2017 15:17:04 +0530 Subject: [PATCH 366/786] ASoC: wcd_cpe: remove wcd9330 related code wcd9330 codec is not used anymore on newer MSM targets. Remove wcd9330 related codes from wcd_cpe driver. Change-Id: Ib252e3a1a19c55f53b34b4f52f565e47ef04a314 Signed-off-by: Rohit Kumar --- sound/soc/codecs/wcd_cpe_services.c | 289 ++-------------------------- 1 file changed, 12 insertions(+), 277 deletions(-) diff --git a/sound/soc/codecs/wcd_cpe_services.c b/sound/soc/codecs/wcd_cpe_services.c index 0028ebc08d5f..ad8962b6f1fe 100644 --- a/sound/soc/codecs/wcd_cpe_services.c +++ b/sound/soc/codecs/wcd_cpe_services.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include "wcd_cpe_services.h" #include "wcd_cmi_api.h" @@ -46,9 +45,6 @@ #define LISTEN_CTL_SPE_VAL 0x0 #define LISTEN_CTL_MSM_VAL 0x1 -#define TOMTOM_A_SVASS_SPE_INBOX(N) (TOMTOM_A_SVASS_SPE_INBOX_0 + (N)) -#define TOMTOM_A_SVASS_SPE_OUTBOX(N) (TOMTOM_A_SVASS_SPE_OUTBOX_0 + (N)) - #define WCD9335_CPE_SS_SPE_DRAM_OFFSET 0x48000 #define WCD9335_CPE_SS_SPE_DRAM_SIZE 0x34000 #define WCD9335_CPE_SS_SPE_IRAM_OFFSET 0x80000 @@ -316,8 +312,7 @@ static int cpe_register_write(u32 reg, u32 val) { int ret = 0; - if (reg != TOMTOM_A_SVASS_MEM_BANK && - reg != WCD9335_CPE_SS_MEM_BANK_0) + if (reg != WCD9335_CPE_SS_MEM_BANK_0) pr_debug("%s: reg = 0x%x, value = 0x%x\n", __func__, reg, val); @@ -2149,73 +2144,27 @@ enum cpe_svc_result cpe_svc_ftm_test(void *cpe_handle, u32 *status) static enum cpe_svc_result cpe_tgt_tomtom_boot(int debug_mode) { - enum cpe_svc_result rc = CPE_SVC_SUCCESS; - - if (!debug_mode) - rc = cpe_update_bits(TOMTOM_A_SVASS_CPAR_WDOG_CFG, - 0x3F, 0x31); - else - pr_info("%s: CPE in debug mode, WDOG disabled\n", - __func__); - - rc = cpe_update_bits(TOMTOM_A_SVASS_CLKRST_CTL, - 0x02, 0x00); - rc = cpe_update_bits(TOMTOM_A_SVASS_CLKRST_CTL, - 0x0C, 0x04); - rc = cpe_update_bits(TOMTOM_A_SVASS_CPAR_CFG, - 0x01, 0x01); - - return rc; + return CPE_SVC_SUCCESS; } static u32 cpe_tgt_tomtom_is_cpar_init_done(void) { - u8 status = 0; - - cpe_register_read(TOMTOM_A_SVASS_STATUS, &status); - return status & 0x01; + return 0; } static u32 cpe_tgt_tomtom_is_active(void) { - u8 status = 0; - - cpe_register_read(TOMTOM_A_SVASS_STATUS, &status); - return status & 0x04; + return 0; } static enum cpe_svc_result cpe_tgt_tomtom_reset(void) { - enum cpe_svc_result rc = CPE_SVC_SUCCESS; - - rc = cpe_update_bits(TOMTOM_A_SVASS_CPAR_WDOG_CFG, - 0x30, 0x00); - - rc = cpe_update_bits(TOMTOM_A_SVASS_CPAR_CFG, - 0x01, 0x00); - rc = cpe_update_bits(TOMTOM_A_MEM_LEAKAGE_CTL, - 0x07, 0x03); - rc = cpe_update_bits(TOMTOM_A_SVASS_CLKRST_CTL, - 0x08, 0x08); - rc = cpe_update_bits(TOMTOM_A_SVASS_CLKRST_CTL, - 0x02, 0x02); - return rc; + return CPE_SVC_SUCCESS; } enum cpe_svc_result cpe_tgt_tomtom_voicetx(bool enable) { - enum cpe_svc_result rc = CPE_SVC_SUCCESS; - u8 val = 0; - - if (enable) - val = 0x02; - else - val = 0x00; - rc = cpe_update_bits(TOMTOM_A_SVASS_CFG, - 0x02, val); - val = 0; - cpe_register_read(TOMTOM_A_SVASS_CFG, &val); - return rc; + return CPE_SVC_SUCCESS; } enum cpe_svc_result cpe_svc_toggle_lab(void *cpe_handle, bool enable) @@ -2235,251 +2184,37 @@ enum cpe_svc_result cpe_svc_toggle_lab(void *cpe_handle, bool enable) static enum cpe_svc_result cpe_tgt_tomtom_read_mailbox(u8 *buffer, size_t size) { - enum cpe_svc_result rc = CPE_SVC_SUCCESS; - u32 cnt = 0; - - if (size >= TOMTOM_A_SVASS_SPE_OUTBOX_SIZE) - size = TOMTOM_A_SVASS_SPE_OUTBOX_SIZE - 1; - for (cnt = 0; (cnt < size) && (rc == CPE_SVC_SUCCESS); cnt++) { - rc = cpe_register_read(TOMTOM_A_SVASS_SPE_OUTBOX(cnt), - &(buffer[cnt])); - } - return rc; + return CPE_SVC_SUCCESS; } static enum cpe_svc_result cpe_tgt_tomtom_write_mailbox(u8 *buffer, size_t size) { - enum cpe_svc_result rc = CPE_SVC_SUCCESS; - u32 cnt = 0; - - if (size >= TOMTOM_A_SVASS_SPE_INBOX_SIZE) - size = TOMTOM_A_SVASS_SPE_INBOX_SIZE - 1; - for (cnt = 0; (cnt < size) && (rc == CPE_SVC_SUCCESS); cnt++) { - rc = cpe_register_write(TOMTOM_A_SVASS_SPE_INBOX(cnt), - buffer[cnt]); - } - - if (rc == CPE_SVC_SUCCESS) - rc = cpe_register_write(TOMTOM_A_SVASS_SPE_INBOX_TRG, 1); - - return rc; -} - -static enum cpe_svc_result cpe_get_mem_addr(struct cpe_info *t_info, - const struct cpe_svc_mem_segment *mem_seg, - u32 *addr, u8 *mem) -{ - enum cpe_svc_result rc = CPE_SVC_SUCCESS; - u32 offset, mem_sz, address; - u8 mem_type; - - switch (mem_seg->type) { - - case CPE_SVC_DATA_MEM: - mem_type = MEM_ACCESS_DRAM_VAL; - offset = TOMTOM_A_SVASS_SPE_DRAM_OFFSET; - mem_sz = TOMTOM_A_SVASS_SPE_DRAM_SIZE; - break; - - case CPE_SVC_INSTRUCTION_MEM: - mem_type = MEM_ACCESS_IRAM_VAL; - offset = TOMTOM_A_SVASS_SPE_IRAM_OFFSET; - mem_sz = TOMTOM_A_SVASS_SPE_IRAM_SIZE; - break; - - default: - pr_err("%s: Invalid mem type = %u\n", - __func__, mem_seg->type); - return CPE_SVC_INVALID_HANDLE; - } - - if (mem_seg->cpe_addr < offset) { - pr_err("%s: Invalid addr %x for mem type %u\n", - __func__, mem_seg->cpe_addr, mem_type); - return CPE_SVC_INVALID_HANDLE; - } - - address = mem_seg->cpe_addr - offset; - if (address + mem_seg->size > mem_sz) { - pr_err("%s: wrong size %zu, start address %x, mem_type %u\n", - __func__, mem_seg->size, address, mem_type); - return CPE_SVC_INVALID_HANDLE; - } - - (*addr) = address; - (*mem) = mem_type; - - return rc; + return CPE_SVC_SUCCESS; } static enum cpe_svc_result cpe_tgt_tomtom_read_RAM(struct cpe_info *t_info, struct cpe_svc_mem_segment *mem_seg) { - enum cpe_svc_result rc = CPE_SVC_SUCCESS; - u8 mem_reg_val = 0; - u32 cnt = 0; - bool autoinc; - u8 mem = MEM_ACCESS_NONE_VAL; - u32 addr = 0; - u32 ptr_update = true; - - if (!mem_seg) { - pr_err("%s: Invalid mem segment\n", - __func__); - return CPE_SVC_INVALID_HANDLE; - } - - rc = cpe_get_mem_addr(t_info, mem_seg, &addr, &mem); - - if (rc != CPE_SVC_SUCCESS) { - pr_err("%s: Cannot obtain address, mem_type %u\n", - __func__, mem_seg->type); - return rc; - } - - rc = cpe_register_write(TOMTOM_A_SVASS_MEM_CTL, 0); - autoinc = cpe_register_read_autoinc_supported(); - if (autoinc) - mem_reg_val |= 0x04; - - mem_reg_val |= 0x08; - mem_reg_val |= mem; - - do { - if (!autoinc || ptr_update) { - rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR0, - (addr & 0xFF)); - rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR1, - ((addr >> 8) & 0xFF)); - rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR2, - ((addr >> 16) & 0xFF)); - - rc = cpe_register_write(TOMTOM_A_SVASS_MEM_CTL, - mem_reg_val); - - ptr_update = false; - } - rc = cpe_register_read(TOMTOM_A_SVASS_MEM_BANK, - &mem_seg->data[cnt]); - - if (!autoinc) - rc = cpe_register_write(TOMTOM_A_SVASS_MEM_CTL, 0); - } while (++cnt < mem_seg->size); - - rc = cpe_register_write(TOMTOM_A_SVASS_MEM_CTL, 0); - - return rc; + return CPE_SVC_SUCCESS; } static enum cpe_svc_result cpe_tgt_tomtom_write_RAM(struct cpe_info *t_info, const struct cpe_svc_mem_segment *mem_seg) { - enum cpe_svc_result rc = CPE_SVC_SUCCESS; - u8 mem_reg_val = 0; - u8 mem = MEM_ACCESS_NONE_VAL; - u32 addr = 0; - u8 *temp_ptr = NULL; - u32 temp_size = 0; - bool autoinc; - - if (!mem_seg) { - pr_err("%s: Invalid mem segment\n", - __func__); - return CPE_SVC_INVALID_HANDLE; - } - - rc = cpe_get_mem_addr(t_info, mem_seg, &addr, &mem); - - if (rc != CPE_SVC_SUCCESS) { - pr_err("%s: Cannot obtain address, mem_type %u\n", - __func__, mem_seg->type); - return rc; - } - - autoinc = cpe_register_read_autoinc_supported(); - if (autoinc) - mem_reg_val |= 0x04; - mem_reg_val |= mem; - - rc = cpe_update_bits(TOMTOM_A_SVASS_MEM_CTL, - 0x0F, mem_reg_val); - - rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR0, - (addr & 0xFF)); - rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR1, - ((addr >> 8) & 0xFF)); - - rc = cpe_register_write(TOMTOM_A_SVASS_MEM_PTR2, - ((addr >> 16) & 0xFF)); - - temp_size = 0; - temp_ptr = mem_seg->data; - - while (temp_size <= mem_seg->size) { - u32 to_write = (mem_seg->size >= temp_size+CHUNK_SIZE) - ? CHUNK_SIZE : (mem_seg->size-temp_size); - - if (t_info->state == CPE_STATE_OFFLINE) { - pr_err("%s: CPE is offline\n", __func__); - return CPE_SVC_FAILED; - } - - cpe_register_write_repeat(TOMTOM_A_SVASS_MEM_BANK, - temp_ptr, to_write); - temp_size += CHUNK_SIZE; - temp_ptr += CHUNK_SIZE; - } - - rc = cpe_register_write(TOMTOM_A_SVASS_MEM_CTL, 0); - return rc; + return CPE_SVC_SUCCESS; } static enum cpe_svc_result cpe_tgt_tomtom_route_notification( enum cpe_svc_module module, enum cpe_svc_route_dest dest) { - enum cpe_svc_result rc = CPE_SVC_SUCCESS; - u8 ctl_reg_val = 0; - - switch (module) { - case CPE_SVC_LISTEN_PROC: - switch (dest) { - case CPE_SVC_EXTERNAL: - ctl_reg_val = LISTEN_CTL_MSM_VAL; - break; - case CPE_SVC_INTERNAL: - ctl_reg_val = LISTEN_CTL_SPE_VAL; - break; - default: - pr_err("%s: Invalid dest %d\n", - __func__, dest); - return CPE_SVC_FAILED; - } - - rc = cpe_update_bits(TOMTOM_A_SVASS_CFG, - 0x01, ctl_reg_val); - break; - default: - pr_err("%s: Invalid module %d\n", - __func__, module); - rc = CPE_SVC_FAILED; - break; - } - - return rc; + return CPE_SVC_SUCCESS; } static enum cpe_svc_result cpe_tgt_tomtom_set_debug_mode(u32 enable) { - enum cpe_svc_result rc = CPE_SVC_SUCCESS; - u8 dbg_reg_val = 0x00; - - if (enable) - dbg_reg_val = 0x08; - rc = cpe_update_bits(TOMTOM_A_SVASS_DEBUG, - 0x08, dbg_reg_val); - return rc; + return CPE_SVC_SUCCESS; } static const struct cpe_svc_hw_cfg *cpe_tgt_tomtom_get_cpe_info(void) -- GitLab From 34a92c7b9cbc48b472c23a3fe4e5e399e39a4ef0 Mon Sep 17 00:00:00 2001 From: Benjamin Chan Date: Wed, 28 Jun 2017 11:01:18 -0400 Subject: [PATCH 367/786] drm/msm: Fix naming for msm event notification Correct the naming typo of msm event notification function. Change-Id: I57cece3fbbab6ef850360e79b09fb6f118005cb7 Signed-off-by: Benjamin Chan --- drivers/gpu/drm/msm/msm_drv.c | 2 +- drivers/gpu/drm/msm/msm_drv.h | 2 +- drivers/gpu/drm/msm/sde/sde_color_processing.c | 2 +- drivers/gpu/drm/msm/sde/sde_connector.c | 2 +- drivers/gpu/drm/msm/sde/sde_crtc.c | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index f7d5d02d473c..020135ccdcee 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -1384,7 +1384,7 @@ static int msm_ioctl_deregister_event(struct drm_device *dev, void *data, return ret; } -void msm_mode_object_event_nofity(struct drm_mode_object *obj, +void msm_mode_object_event_notify(struct drm_mode_object *obj, struct drm_device *dev, struct drm_event *event, u8 *payload) { struct msm_drm_private *priv = NULL; diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 86fec8b36269..03a969a5a16e 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -732,7 +732,7 @@ enum msm_dsi_encoder_id { * @event: event that needs to be notified. * @payload: payload for the event. */ -void msm_mode_object_event_nofity(struct drm_mode_object *obj, +void msm_mode_object_event_notify(struct drm_mode_object *obj, struct drm_device *dev, struct drm_event *event, u8 *payload); #ifdef CONFIG_DRM_MSM_DSI void __init msm_dsi_register(void); diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c index b4103028e0e1..fa130240583d 100644 --- a/drivers/gpu/drm/msm/sde/sde_color_processing.c +++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c @@ -1357,7 +1357,7 @@ static void sde_cp_notify_ad_event(struct drm_crtc *crtc_drm, void *arg) hw_dspp->ops.ad_read_intr_resp(hw_dspp, AD4_BACKLIGHT, &bl); event.length = sizeof(u32); event.type = DRM_EVENT_AD_BACKLIGHT; - msm_mode_object_event_nofity(&crtc_drm->base, crtc_drm->dev, + msm_mode_object_event_notify(&crtc_drm->base, crtc_drm->dev, &event, (u8 *)&bl); } diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c index 4ede27121627..2970b280814f 100644 --- a/drivers/gpu/drm/msm/sde/sde_connector.c +++ b/drivers/gpu/drm/msm/sde/sde_connector.c @@ -83,7 +83,7 @@ static int sde_backlight_device_update_status(struct backlight_device *bd) if (c_conn->ops.set_backlight) { event.type = DRM_EVENT_SYS_BACKLIGHT; event.length = sizeof(u32); - msm_mode_object_event_nofity(&c_conn->base.base, + msm_mode_object_event_notify(&c_conn->base.base, c_conn->base.dev, &event, (u8 *)&brightness); c_conn->ops.set_backlight(c_conn->display, bl_lvl); } diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index 0f9d73931fb3..35739d58ffc2 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -2319,7 +2319,7 @@ static void _sde_crtc_set_suspend(struct drm_crtc *crtc, bool enable) _sde_crtc_vblank_enable_nolock(sde_crtc, !enable); sde_crtc->suspend = enable; - msm_mode_object_event_nofity(&crtc->base, crtc->dev, &event, + msm_mode_object_event_notify(&crtc->base, crtc->dev, &event, (u8 *)&power_on); mutex_unlock(&sde_crtc->crtc_lock); } -- GitLab From 07f413609a710f9e3341b5c6609332f22577d42c Mon Sep 17 00:00:00 2001 From: Vara Reddy Date: Tue, 20 Jun 2017 19:54:06 -0700 Subject: [PATCH 368/786] ARM: dts: msm: add support to 4k DSC command mode on sdm845 Add support for 4k DSC command mode on CDP, MTP and QRD 4k platforms. Change-Id: Idd4d38168a6e56edd85ac7c0dbd43312dbd8016f Signed-off-by: Vara Reddy --- .../boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi | 2 +- arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts | 11 +++++++++++ arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts | 11 +++++++++++ arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi | 12 ++++++++++++ 4 files changed, 35 insertions(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi index 061f1d9d900e..6534cdc9f414 100644 --- a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi +++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi @@ -40,7 +40,7 @@ qcom,mdss-dsi-lane-3-state; qcom,mdss-dsi-dma-trigger = "trigger_sw"; qcom,mdss-dsi-mdp-trigger = "none"; - qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 20>; + qcom,mdss-dsi-reset-sequence = <1 100>, <0 100>, <1 100>; qcom,mdss-dsi-te-pin-select = <1>; qcom,mdss-dsi-wr-mem-start = <0x2c>; qcom,mdss-dsi-wr-mem-continue = <0x3c>; diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts index 4b7a68078767..1427af9d1b64 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-cdp.dts @@ -41,6 +41,17 @@ qcom,platform-reset-gpio = <&tlmm 6 0>; }; +&dsi_sharp_4k_dsc_cmd { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,mdss-dsi-mode-sel-gpio-state = "dual_port"; + qcom,panel-mode-gpio = <&tlmm 52 0>; + qcom,platform-te-gpio = <&tlmm 10 0>; + qcom,platform-reset-gpio = <&tlmm 6 0>; +}; + &dsi_sharp_4k_dsc_video_display { qcom,dsi-display-active; }; diff --git a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts index 67c3bcdc6099..474c6a59d316 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-4k-panel-mtp.dts @@ -41,6 +41,17 @@ qcom,platform-reset-gpio = <&tlmm 6 0>; }; +&dsi_sharp_4k_dsc_cmd { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,mdss-dsi-mode-sel-gpio-state = "dual_port"; + qcom,panel-mode-gpio = <&tlmm 52 0>; + qcom,platform-te-gpio = <&tlmm 10 0>; + qcom,platform-reset-gpio = <&tlmm 6 0>; +}; + &dsi_sharp_4k_dsc_video_display { qcom,dsi-display-active; }; diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi index f14293b904c6..6f7bba1b6401 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi @@ -256,6 +256,18 @@ qcom,mdss-dsi-panel-orientation = "180"; }; +&dsi_sharp_4k_dsc_cmd { + qcom,panel-supply-entries = <&dsi_panel_pwr_supply>; + qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled"; + qcom,mdss-dsi-bl-min-level = <1>; + qcom,mdss-dsi-bl-max-level = <4095>; + qcom,mdss-dsi-mode-sel-gpio-state = "dual_port"; + qcom,panel-mode-gpio = <&tlmm 52 0>; + qcom,platform-te-gpio = <&tlmm 10 0>; + qcom,platform-reset-gpio = <&tlmm 6 0>; + qcom,mdss-dsi-panel-orientation = "180"; +}; + &dsi_sharp_4k_dsc_video_display { qcom,dsi-display-active; }; -- GitLab From e221a5ca5e1a70a65106da4f88e3adfa298cc307 Mon Sep 17 00:00:00 2001 From: Lloyd Atkinson Date: Mon, 1 May 2017 15:08:23 -0400 Subject: [PATCH 369/786] drm/msm: add crash analysis for important debug registers Add hooks to read and parse important SDE hardware debug registers at the time of doing a debug register dump to provide a first pass of analysis in the logs. Change-Id: I06b54d29dd0cda610f095f57d5cf3069eb9e9dba Signed-off-by: Lloyd Atkinson --- drivers/gpu/drm/msm/sde_dbg.c | 207 ++++++++++++++++++++-------------- 1 file changed, 124 insertions(+), 83 deletions(-) diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c index bcd3eaa187ae..b058bdd258b8 100644 --- a/drivers/gpu/drm/msm/sde_dbg.c +++ b/drivers/gpu/drm/msm/sde_dbg.c @@ -116,6 +116,7 @@ struct sde_debug_bus_entry { u32 wr_addr; u32 block_id; u32 test_id; + void (*analyzer)(struct sde_debug_bus_entry *entry, u32 val); }; struct vbif_debug_bus_entry { @@ -183,6 +184,43 @@ static struct sde_dbg_base { /* sde_dbg_base_evtlog - global pointer to main sde event log for macro use */ struct sde_dbg_evtlog *sde_dbg_base_evtlog; +static void _sde_debug_bus_xbar_dump(struct sde_debug_bus_entry *entry, + u32 val) +{ + dev_err(sde_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n", + entry->wr_addr, entry->block_id, entry->test_id, val); +} + +static void _sde_debug_bus_lm_dump(struct sde_debug_bus_entry *entry, + u32 val) +{ + if (!(val & 0xFFF000)) + return; + + dev_err(sde_dbg_base.dev, "lm 0x%x %d %d 0x%x\n", + entry->wr_addr, entry->block_id, entry->test_id, val); +} + +static void _sde_debug_bus_ppb0_dump(struct sde_debug_bus_entry *entry, + u32 val) +{ + if (!(val & BIT(15))) + return; + + dev_err(sde_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n", + entry->wr_addr, entry->block_id, entry->test_id, val); +} + +static void _sde_debug_bus_ppb1_dump(struct sde_debug_bus_entry *entry, + u32 val) +{ + if (!(val & BIT(15))) + return; + + dev_err(sde_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n", + entry->wr_addr, entry->block_id, entry->test_id, val); +} + static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { /* Unpack 0 sspp 0*/ @@ -662,16 +700,16 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 20, 3 }, /* ppb_0 */ - { DBGBUS_DSPP, 31, 0 }, - { DBGBUS_DSPP, 33, 0 }, - { DBGBUS_DSPP, 35, 0 }, - { DBGBUS_DSPP, 42, 0 }, + { DBGBUS_DSPP, 31, 0, _sde_debug_bus_ppb0_dump }, + { DBGBUS_DSPP, 33, 0, _sde_debug_bus_ppb0_dump }, + { DBGBUS_DSPP, 35, 0, _sde_debug_bus_ppb0_dump }, + { DBGBUS_DSPP, 42, 0, _sde_debug_bus_ppb0_dump }, /* ppb_1 */ - { DBGBUS_DSPP, 32, 0 }, - { DBGBUS_DSPP, 34, 0 }, - { DBGBUS_DSPP, 36, 0 }, - { DBGBUS_DSPP, 43, 0 }, + { DBGBUS_DSPP, 32, 0, _sde_debug_bus_ppb1_dump }, + { DBGBUS_DSPP, 34, 0, _sde_debug_bus_ppb1_dump }, + { DBGBUS_DSPP, 36, 0, _sde_debug_bus_ppb1_dump }, + { DBGBUS_DSPP, 43, 0, _sde_debug_bus_ppb1_dump }, /* lm_lut */ { DBGBUS_DSPP, 109, 0 }, @@ -686,7 +724,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_PERIPH, 74, 0 }, /* crossbar */ - { DBGBUS_DSPP, 0, 0}, + { DBGBUS_DSPP, 0, 0, _sde_debug_bus_xbar_dump }, /* rotator */ { DBGBUS_DSPP, 9, 0}, @@ -700,7 +738,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 63, 4}, { DBGBUS_DSPP, 63, 5}, { DBGBUS_DSPP, 63, 6}, - { DBGBUS_DSPP, 63, 7}, + { DBGBUS_DSPP, 63, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 64, 0}, { DBGBUS_DSPP, 64, 1}, @@ -709,7 +747,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 64, 4}, { DBGBUS_DSPP, 64, 5}, { DBGBUS_DSPP, 64, 6}, - { DBGBUS_DSPP, 64, 7}, + { DBGBUS_DSPP, 64, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 65, 0}, { DBGBUS_DSPP, 65, 1}, @@ -718,7 +756,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 65, 4}, { DBGBUS_DSPP, 65, 5}, { DBGBUS_DSPP, 65, 6}, - { DBGBUS_DSPP, 65, 7}, + { DBGBUS_DSPP, 65, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 66, 0}, { DBGBUS_DSPP, 66, 1}, @@ -727,7 +765,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 66, 4}, { DBGBUS_DSPP, 66, 5}, { DBGBUS_DSPP, 66, 6}, - { DBGBUS_DSPP, 66, 7}, + { DBGBUS_DSPP, 66, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 67, 0}, { DBGBUS_DSPP, 67, 1}, @@ -736,7 +774,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 67, 4}, { DBGBUS_DSPP, 67, 5}, { DBGBUS_DSPP, 67, 6}, - { DBGBUS_DSPP, 67, 7}, + { DBGBUS_DSPP, 67, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 68, 0}, { DBGBUS_DSPP, 68, 1}, @@ -745,7 +783,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 68, 4}, { DBGBUS_DSPP, 68, 5}, { DBGBUS_DSPP, 68, 6}, - { DBGBUS_DSPP, 68, 7}, + { DBGBUS_DSPP, 68, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 69, 0}, { DBGBUS_DSPP, 69, 1}, @@ -754,7 +792,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 69, 4}, { DBGBUS_DSPP, 69, 5}, { DBGBUS_DSPP, 69, 6}, - { DBGBUS_DSPP, 69, 7}, + { DBGBUS_DSPP, 69, 7, _sde_debug_bus_lm_dump }, /* LM1 */ { DBGBUS_DSPP, 70, 0}, @@ -764,7 +802,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 70, 4}, { DBGBUS_DSPP, 70, 5}, { DBGBUS_DSPP, 70, 6}, - { DBGBUS_DSPP, 70, 7}, + { DBGBUS_DSPP, 70, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 71, 0}, { DBGBUS_DSPP, 71, 1}, @@ -773,7 +811,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 71, 4}, { DBGBUS_DSPP, 71, 5}, { DBGBUS_DSPP, 71, 6}, - { DBGBUS_DSPP, 71, 7}, + { DBGBUS_DSPP, 71, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 72, 0}, { DBGBUS_DSPP, 72, 1}, @@ -782,7 +820,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 72, 4}, { DBGBUS_DSPP, 72, 5}, { DBGBUS_DSPP, 72, 6}, - { DBGBUS_DSPP, 72, 7}, + { DBGBUS_DSPP, 72, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 73, 0}, { DBGBUS_DSPP, 73, 1}, @@ -791,7 +829,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 73, 4}, { DBGBUS_DSPP, 73, 5}, { DBGBUS_DSPP, 73, 6}, - { DBGBUS_DSPP, 73, 7}, + { DBGBUS_DSPP, 73, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 74, 0}, { DBGBUS_DSPP, 74, 1}, @@ -800,7 +838,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 74, 4}, { DBGBUS_DSPP, 74, 5}, { DBGBUS_DSPP, 74, 6}, - { DBGBUS_DSPP, 74, 7}, + { DBGBUS_DSPP, 74, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 75, 0}, { DBGBUS_DSPP, 75, 1}, @@ -809,7 +847,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 75, 4}, { DBGBUS_DSPP, 75, 5}, { DBGBUS_DSPP, 75, 6}, - { DBGBUS_DSPP, 75, 7}, + { DBGBUS_DSPP, 75, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 76, 0}, { DBGBUS_DSPP, 76, 1}, @@ -818,7 +856,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 76, 4}, { DBGBUS_DSPP, 76, 5}, { DBGBUS_DSPP, 76, 6}, - { DBGBUS_DSPP, 76, 7}, + { DBGBUS_DSPP, 76, 7, _sde_debug_bus_lm_dump }, /* LM2 */ { DBGBUS_DSPP, 77, 0}, @@ -828,7 +866,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 77, 4}, { DBGBUS_DSPP, 77, 5}, { DBGBUS_DSPP, 77, 6}, - { DBGBUS_DSPP, 77, 7}, + { DBGBUS_DSPP, 77, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 78, 0}, { DBGBUS_DSPP, 78, 1}, @@ -837,7 +875,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 78, 4}, { DBGBUS_DSPP, 78, 5}, { DBGBUS_DSPP, 78, 6}, - { DBGBUS_DSPP, 78, 7}, + { DBGBUS_DSPP, 78, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 79, 0}, { DBGBUS_DSPP, 79, 1}, @@ -846,7 +884,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 79, 4}, { DBGBUS_DSPP, 79, 5}, { DBGBUS_DSPP, 79, 6}, - { DBGBUS_DSPP, 79, 7}, + { DBGBUS_DSPP, 79, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 80, 0}, { DBGBUS_DSPP, 80, 1}, @@ -855,7 +893,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 80, 4}, { DBGBUS_DSPP, 80, 5}, { DBGBUS_DSPP, 80, 6}, - { DBGBUS_DSPP, 80, 7}, + { DBGBUS_DSPP, 80, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 81, 0}, { DBGBUS_DSPP, 81, 1}, @@ -864,7 +902,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 81, 4}, { DBGBUS_DSPP, 81, 5}, { DBGBUS_DSPP, 81, 6}, - { DBGBUS_DSPP, 81, 7}, + { DBGBUS_DSPP, 81, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 82, 0}, { DBGBUS_DSPP, 82, 1}, @@ -873,7 +911,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 82, 4}, { DBGBUS_DSPP, 82, 5}, { DBGBUS_DSPP, 82, 6}, - { DBGBUS_DSPP, 82, 7}, + { DBGBUS_DSPP, 82, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 83, 0}, { DBGBUS_DSPP, 83, 1}, @@ -882,7 +920,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_8998[] = { { DBGBUS_DSPP, 83, 4}, { DBGBUS_DSPP, 83, 5}, { DBGBUS_DSPP, 83, 6}, - { DBGBUS_DSPP, 83, 7}, + { DBGBUS_DSPP, 83, 7, _sde_debug_bus_lm_dump }, /* csc */ { DBGBUS_SSPP0, 7, 0}, @@ -1386,16 +1424,16 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 20, 3 }, /* ppb_0 */ - { DBGBUS_DSPP, 31, 0 }, - { DBGBUS_DSPP, 33, 0 }, - { DBGBUS_DSPP, 35, 0 }, - { DBGBUS_DSPP, 42, 0 }, + { DBGBUS_DSPP, 31, 0, _sde_debug_bus_ppb0_dump }, + { DBGBUS_DSPP, 33, 0, _sde_debug_bus_ppb0_dump }, + { DBGBUS_DSPP, 35, 0, _sde_debug_bus_ppb0_dump }, + { DBGBUS_DSPP, 42, 0, _sde_debug_bus_ppb0_dump }, /* ppb_1 */ - { DBGBUS_DSPP, 32, 0 }, - { DBGBUS_DSPP, 34, 0 }, - { DBGBUS_DSPP, 36, 0 }, - { DBGBUS_DSPP, 43, 0 }, + { DBGBUS_DSPP, 32, 0, _sde_debug_bus_ppb1_dump }, + { DBGBUS_DSPP, 34, 0, _sde_debug_bus_ppb1_dump }, + { DBGBUS_DSPP, 36, 0, _sde_debug_bus_ppb1_dump }, + { DBGBUS_DSPP, 43, 0, _sde_debug_bus_ppb1_dump }, /* lm_lut */ { DBGBUS_DSPP, 109, 0 }, @@ -1403,7 +1441,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 103, 0 }, /* crossbar */ - { DBGBUS_DSPP, 0, 0}, + { DBGBUS_DSPP, 0, 0, _sde_debug_bus_xbar_dump }, /* rotator */ { DBGBUS_DSPP, 9, 0}, @@ -1416,7 +1454,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 63, 4}, { DBGBUS_DSPP, 63, 5}, { DBGBUS_DSPP, 63, 6}, - { DBGBUS_DSPP, 63, 7}, + { DBGBUS_DSPP, 63, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 64, 1}, { DBGBUS_DSPP, 64, 2}, @@ -1424,7 +1462,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 64, 4}, { DBGBUS_DSPP, 64, 5}, { DBGBUS_DSPP, 64, 6}, - { DBGBUS_DSPP, 64, 7}, + { DBGBUS_DSPP, 64, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 65, 1}, { DBGBUS_DSPP, 65, 2}, @@ -1432,7 +1470,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 65, 4}, { DBGBUS_DSPP, 65, 5}, { DBGBUS_DSPP, 65, 6}, - { DBGBUS_DSPP, 65, 7}, + { DBGBUS_DSPP, 65, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 66, 1}, { DBGBUS_DSPP, 66, 2}, @@ -1440,7 +1478,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 66, 4}, { DBGBUS_DSPP, 66, 5}, { DBGBUS_DSPP, 66, 6}, - { DBGBUS_DSPP, 66, 7}, + { DBGBUS_DSPP, 66, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 67, 1}, { DBGBUS_DSPP, 67, 2}, @@ -1448,7 +1486,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 67, 4}, { DBGBUS_DSPP, 67, 5}, { DBGBUS_DSPP, 67, 6}, - { DBGBUS_DSPP, 67, 7}, + { DBGBUS_DSPP, 67, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 68, 1}, { DBGBUS_DSPP, 68, 2}, @@ -1456,7 +1494,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 68, 4}, { DBGBUS_DSPP, 68, 5}, { DBGBUS_DSPP, 68, 6}, - { DBGBUS_DSPP, 68, 7}, + { DBGBUS_DSPP, 68, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 69, 1}, { DBGBUS_DSPP, 69, 2}, @@ -1464,7 +1502,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 69, 4}, { DBGBUS_DSPP, 69, 5}, { DBGBUS_DSPP, 69, 6}, - { DBGBUS_DSPP, 69, 7}, + { DBGBUS_DSPP, 69, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 84, 1}, { DBGBUS_DSPP, 84, 2}, @@ -1472,7 +1510,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 84, 4}, { DBGBUS_DSPP, 84, 5}, { DBGBUS_DSPP, 84, 6}, - { DBGBUS_DSPP, 84, 7}, + { DBGBUS_DSPP, 84, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 85, 1}, @@ -1481,7 +1519,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 85, 4}, { DBGBUS_DSPP, 85, 5}, { DBGBUS_DSPP, 85, 6}, - { DBGBUS_DSPP, 85, 7}, + { DBGBUS_DSPP, 85, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 86, 1}, @@ -1490,7 +1528,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 86, 4}, { DBGBUS_DSPP, 86, 5}, { DBGBUS_DSPP, 86, 6}, - { DBGBUS_DSPP, 86, 7}, + { DBGBUS_DSPP, 86, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 87, 1}, @@ -1499,7 +1537,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 87, 4}, { DBGBUS_DSPP, 87, 5}, { DBGBUS_DSPP, 87, 6}, - { DBGBUS_DSPP, 87, 7}, + { DBGBUS_DSPP, 87, 7, _sde_debug_bus_lm_dump }, /* LM1 */ { DBGBUS_DSPP, 70, 1}, @@ -1508,7 +1546,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 70, 4}, { DBGBUS_DSPP, 70, 5}, { DBGBUS_DSPP, 70, 6}, - { DBGBUS_DSPP, 70, 7}, + { DBGBUS_DSPP, 70, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 71, 1}, { DBGBUS_DSPP, 71, 2}, @@ -1516,7 +1554,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 71, 4}, { DBGBUS_DSPP, 71, 5}, { DBGBUS_DSPP, 71, 6}, - { DBGBUS_DSPP, 71, 7}, + { DBGBUS_DSPP, 71, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 72, 1}, { DBGBUS_DSPP, 72, 2}, @@ -1524,7 +1562,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 72, 4}, { DBGBUS_DSPP, 72, 5}, { DBGBUS_DSPP, 72, 6}, - { DBGBUS_DSPP, 72, 7}, + { DBGBUS_DSPP, 72, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 73, 1}, { DBGBUS_DSPP, 73, 2}, @@ -1532,7 +1570,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 73, 4}, { DBGBUS_DSPP, 73, 5}, { DBGBUS_DSPP, 73, 6}, - { DBGBUS_DSPP, 73, 7}, + { DBGBUS_DSPP, 73, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 74, 1}, { DBGBUS_DSPP, 74, 2}, @@ -1540,7 +1578,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 74, 4}, { DBGBUS_DSPP, 74, 5}, { DBGBUS_DSPP, 74, 6}, - { DBGBUS_DSPP, 74, 7}, + { DBGBUS_DSPP, 74, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 75, 1}, { DBGBUS_DSPP, 75, 2}, @@ -1548,7 +1586,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 75, 4}, { DBGBUS_DSPP, 75, 5}, { DBGBUS_DSPP, 75, 6}, - { DBGBUS_DSPP, 75, 7}, + { DBGBUS_DSPP, 75, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 76, 1}, { DBGBUS_DSPP, 76, 2}, @@ -1556,7 +1594,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 76, 4}, { DBGBUS_DSPP, 76, 5}, { DBGBUS_DSPP, 76, 6}, - { DBGBUS_DSPP, 76, 7}, + { DBGBUS_DSPP, 76, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 88, 1}, { DBGBUS_DSPP, 88, 2}, @@ -1564,7 +1602,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 88, 4}, { DBGBUS_DSPP, 88, 5}, { DBGBUS_DSPP, 88, 6}, - { DBGBUS_DSPP, 88, 7}, + { DBGBUS_DSPP, 88, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 89, 1}, { DBGBUS_DSPP, 89, 2}, @@ -1572,7 +1610,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 89, 4}, { DBGBUS_DSPP, 89, 5}, { DBGBUS_DSPP, 89, 6}, - { DBGBUS_DSPP, 89, 7}, + { DBGBUS_DSPP, 89, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 90, 1}, { DBGBUS_DSPP, 90, 2}, @@ -1580,7 +1618,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 90, 4}, { DBGBUS_DSPP, 90, 5}, { DBGBUS_DSPP, 90, 6}, - { DBGBUS_DSPP, 90, 7}, + { DBGBUS_DSPP, 90, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 91, 1}, { DBGBUS_DSPP, 91, 2}, @@ -1588,7 +1626,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 91, 4}, { DBGBUS_DSPP, 91, 5}, { DBGBUS_DSPP, 91, 6}, - { DBGBUS_DSPP, 91, 7}, + { DBGBUS_DSPP, 91, 7, _sde_debug_bus_lm_dump }, /* LM2 */ { DBGBUS_DSPP, 77, 0}, @@ -1598,7 +1636,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 77, 4}, { DBGBUS_DSPP, 77, 5}, { DBGBUS_DSPP, 77, 6}, - { DBGBUS_DSPP, 77, 7}, + { DBGBUS_DSPP, 77, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 78, 0}, { DBGBUS_DSPP, 78, 1}, @@ -1607,7 +1645,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 78, 4}, { DBGBUS_DSPP, 78, 5}, { DBGBUS_DSPP, 78, 6}, - { DBGBUS_DSPP, 78, 7}, + { DBGBUS_DSPP, 78, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 79, 0}, { DBGBUS_DSPP, 79, 1}, @@ -1616,7 +1654,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 79, 4}, { DBGBUS_DSPP, 79, 5}, { DBGBUS_DSPP, 79, 6}, - { DBGBUS_DSPP, 79, 7}, + { DBGBUS_DSPP, 79, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 80, 0}, { DBGBUS_DSPP, 80, 1}, @@ -1625,7 +1663,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 80, 4}, { DBGBUS_DSPP, 80, 5}, { DBGBUS_DSPP, 80, 6}, - { DBGBUS_DSPP, 80, 7}, + { DBGBUS_DSPP, 80, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 81, 0}, { DBGBUS_DSPP, 81, 1}, @@ -1634,7 +1672,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 81, 4}, { DBGBUS_DSPP, 81, 5}, { DBGBUS_DSPP, 81, 6}, - { DBGBUS_DSPP, 81, 7}, + { DBGBUS_DSPP, 81, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 82, 0}, { DBGBUS_DSPP, 82, 1}, @@ -1643,7 +1681,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 82, 4}, { DBGBUS_DSPP, 82, 5}, { DBGBUS_DSPP, 82, 6}, - { DBGBUS_DSPP, 82, 7}, + { DBGBUS_DSPP, 82, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 83, 0}, { DBGBUS_DSPP, 83, 1}, @@ -1652,7 +1690,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 83, 4}, { DBGBUS_DSPP, 83, 5}, { DBGBUS_DSPP, 83, 6}, - { DBGBUS_DSPP, 83, 7}, + { DBGBUS_DSPP, 83, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 92, 1}, { DBGBUS_DSPP, 92, 2}, @@ -1660,7 +1698,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 92, 4}, { DBGBUS_DSPP, 92, 5}, { DBGBUS_DSPP, 92, 6}, - { DBGBUS_DSPP, 92, 7}, + { DBGBUS_DSPP, 92, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 93, 1}, { DBGBUS_DSPP, 93, 2}, @@ -1668,7 +1706,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 93, 4}, { DBGBUS_DSPP, 93, 5}, { DBGBUS_DSPP, 93, 6}, - { DBGBUS_DSPP, 93, 7}, + { DBGBUS_DSPP, 93, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 94, 1}, { DBGBUS_DSPP, 94, 2}, @@ -1676,7 +1714,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 94, 4}, { DBGBUS_DSPP, 94, 5}, { DBGBUS_DSPP, 94, 6}, - { DBGBUS_DSPP, 94, 7}, + { DBGBUS_DSPP, 94, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 95, 1}, { DBGBUS_DSPP, 95, 2}, @@ -1684,7 +1722,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 95, 4}, { DBGBUS_DSPP, 95, 5}, { DBGBUS_DSPP, 95, 6}, - { DBGBUS_DSPP, 95, 7}, + { DBGBUS_DSPP, 95, 7, _sde_debug_bus_lm_dump }, /* LM5 */ { DBGBUS_DSPP, 110, 1}, @@ -1693,7 +1731,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 110, 4}, { DBGBUS_DSPP, 110, 5}, { DBGBUS_DSPP, 110, 6}, - { DBGBUS_DSPP, 110, 7}, + { DBGBUS_DSPP, 110, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 111, 1}, { DBGBUS_DSPP, 111, 2}, @@ -1701,7 +1739,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 111, 4}, { DBGBUS_DSPP, 111, 5}, { DBGBUS_DSPP, 111, 6}, - { DBGBUS_DSPP, 111, 7}, + { DBGBUS_DSPP, 111, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 112, 1}, { DBGBUS_DSPP, 112, 2}, @@ -1709,7 +1747,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 112, 4}, { DBGBUS_DSPP, 112, 5}, { DBGBUS_DSPP, 112, 6}, - { DBGBUS_DSPP, 112, 7}, + { DBGBUS_DSPP, 112, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 113, 1}, { DBGBUS_DSPP, 113, 2}, @@ -1717,7 +1755,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 113, 4}, { DBGBUS_DSPP, 113, 5}, { DBGBUS_DSPP, 113, 6}, - { DBGBUS_DSPP, 113, 7}, + { DBGBUS_DSPP, 113, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 114, 1}, { DBGBUS_DSPP, 114, 2}, @@ -1725,7 +1763,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 114, 4}, { DBGBUS_DSPP, 114, 5}, { DBGBUS_DSPP, 114, 6}, - { DBGBUS_DSPP, 114, 7}, + { DBGBUS_DSPP, 114, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 115, 1}, { DBGBUS_DSPP, 115, 2}, @@ -1733,7 +1771,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 115, 4}, { DBGBUS_DSPP, 115, 5}, { DBGBUS_DSPP, 115, 6}, - { DBGBUS_DSPP, 115, 7}, + { DBGBUS_DSPP, 115, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 116, 1}, { DBGBUS_DSPP, 116, 2}, @@ -1741,7 +1779,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 116, 4}, { DBGBUS_DSPP, 116, 5}, { DBGBUS_DSPP, 116, 6}, - { DBGBUS_DSPP, 116, 7}, + { DBGBUS_DSPP, 116, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 117, 1}, { DBGBUS_DSPP, 117, 2}, @@ -1749,7 +1787,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 117, 4}, { DBGBUS_DSPP, 117, 5}, { DBGBUS_DSPP, 117, 6}, - { DBGBUS_DSPP, 117, 7}, + { DBGBUS_DSPP, 117, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 118, 1}, { DBGBUS_DSPP, 118, 2}, @@ -1757,7 +1795,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 118, 4}, { DBGBUS_DSPP, 118, 5}, { DBGBUS_DSPP, 118, 6}, - { DBGBUS_DSPP, 118, 7}, + { DBGBUS_DSPP, 118, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 119, 1}, { DBGBUS_DSPP, 119, 2}, @@ -1765,7 +1803,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 119, 4}, { DBGBUS_DSPP, 119, 5}, { DBGBUS_DSPP, 119, 6}, - { DBGBUS_DSPP, 119, 7}, + { DBGBUS_DSPP, 119, 7, _sde_debug_bus_lm_dump }, { DBGBUS_DSPP, 120, 1}, { DBGBUS_DSPP, 120, 2}, @@ -1773,7 +1811,7 @@ static struct sde_debug_bus_entry dbg_bus_sde_sdm845[] = { { DBGBUS_DSPP, 120, 4}, { DBGBUS_DSPP, 120, 5}, { DBGBUS_DSPP, 120, 6}, - { DBGBUS_DSPP, 120, 7}, + { DBGBUS_DSPP, 120, 7, _sde_debug_bus_lm_dump }, /* csc */ { DBGBUS_SSPP0, 7, 0}, @@ -2276,6 +2314,9 @@ static void _sde_dbg_dump_sde_dbg_bus(struct sde_dbg_sde_debug_bus *bus) dump_addr[i*4 + 3] = status; } + if (head->analyzer) + head->analyzer(head, status); + /* Disable debug bus once we are done */ writel_relaxed(0, mem_base + head->wr_addr); -- GitLab From a375c88225a0e92dba91567469d55178cd041ef0 Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Mon, 13 Feb 2017 10:14:10 -0700 Subject: [PATCH 370/786] drm/msm: Remove the 'len' parameter from mmu map/unmap functions Nobody uses this now that we are entirely scatter/gather enabled. Remove it to avoid having to calculate the length unnecessarily. CRs-Fixed: 2050484 Change-Id: Ic0dedbad020998e1c8fd5d526789f73beb0d5755 Signed-off-by: Jordan Crouse Signed-off-by: Abhijit Kulkarni --- drivers/gpu/drm/msm/msm_gem.c | 3 +-- drivers/gpu/drm/msm/msm_iommu.c | 4 ++-- drivers/gpu/drm/msm/msm_mmu.h | 5 ++--- drivers/gpu/drm/msm/msm_smmu.c | 4 ++-- 4 files changed, 7 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index acd7af5ebe9f..655631fe00a9 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -305,8 +305,7 @@ put_iova(struct drm_gem_object *obj) obj->import_attach->dmabuf, DMA_BIDIRECTIONAL); else - mmu->funcs->unmap(mmu, offset, msm_obj->sgt, - obj->size); + mmu->funcs->unmap(mmu, offset, msm_obj->sgt); msm_obj->domain[id].iova = 0; } } diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 3a294d0da3a0..bc9877c583d7 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -46,7 +46,7 @@ static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names, } static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, - struct sg_table *sgt, unsigned len, int prot) + struct sg_table *sgt, int prot) { struct msm_iommu *iommu = to_msm_iommu(mmu); struct iommu_domain *domain = iommu->domain; @@ -85,7 +85,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, } static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova, - struct sg_table *sgt, unsigned len) + struct sg_table *sgt) { struct msm_iommu *iommu = to_msm_iommu(mmu); struct iommu_domain *domain = iommu->domain; diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index fbf7e7b677e0..0f161f53d8a4 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -35,9 +35,8 @@ struct msm_mmu_funcs { int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt); void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt); int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, - unsigned int len, int prot); - int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, - unsigned int len); + int prot); + int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt); int (*map_sg)(struct msm_mmu *mmu, struct sg_table *sgt, enum dma_data_direction dir); void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt, diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c index 7fbcff435d56..e3d2e34663e9 100644 --- a/drivers/gpu/drm/msm/msm_smmu.c +++ b/drivers/gpu/drm/msm/msm_smmu.c @@ -109,7 +109,7 @@ static void msm_smmu_detach(struct msm_mmu *mmu, const char * const *names, } static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova, - struct sg_table *sgt, unsigned int len, int prot) + struct sg_table *sgt, int prot) { struct msm_smmu *smmu = to_msm_smmu(mmu); struct msm_smmu_client *client = msm_smmu_to_client(smmu); @@ -177,7 +177,7 @@ static void msm_smmu_unmap_sg(struct msm_mmu *mmu, struct sg_table *sgt, } static int msm_smmu_unmap(struct msm_mmu *mmu, uint32_t iova, - struct sg_table *sgt, unsigned int len) + struct sg_table *sgt) { struct msm_smmu *smmu = to_msm_smmu(mmu); struct msm_smmu_client *client = msm_smmu_to_client(smmu); -- GitLab From e22a2fb8a148f56bf90cb2f38f88e5b082dd8ee5 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Mon, 13 Feb 2017 10:14:11 -0700 Subject: [PATCH 371/786] drm/msm: support multiple address spaces We can have various combinations of 64b and 32b address space, ie. 64b CPU but 32b display and gpu, or 64b CPU and GPU but 32b display. So best to decouple the device iova's from mmap offset. CRs-Fixed: 2050484 Change-Id: Ic0dedbad2b36b535df3e8fb2ddddc20add592cea Signed-off-by: Rob Clark Git-commit: 22877bcbdacd50d076f9b2f829e6a3753aa9821f Git-repo: https://github.com/freedreno/kernel-msm.git [jcrouse@codeaurora.org: Fix merge conflicts, remove mdp5 due to large infrastructure changes, compile fixes] Signed-off-by: Jordan Crouse [kabhijit@codeaurora.org: Fix merge conflicts, add support for SDE, compile fixes] Signed-off-by: Abhijit Kulkarni --- drivers/gpu/drm/msm/Makefile | 1 + drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 2 +- drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 2 +- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 2 +- drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | 37 ++++++++--- drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h | 2 +- drivers/gpu/drm/msm/msm_drv.c | 32 ++------- drivers/gpu/drm/msm/msm_drv.h | 35 ++++++++-- drivers/gpu/drm/msm/msm_gem.c | 44 +++---------- drivers/gpu/drm/msm/msm_gem.h | 18 ++++- drivers/gpu/drm/msm/msm_gem_vma.c | 87 +++++++++++++++++++++++++ drivers/gpu/drm/msm/msm_gpu.c | 21 +++--- drivers/gpu/drm/msm/msm_gpu.h | 2 +- 13 files changed, 191 insertions(+), 94 deletions(-) create mode 100644 drivers/gpu/drm/msm/msm_gem_vma.c diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 9ded82578a8c..1ac5c6c1ca62 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -166,6 +166,7 @@ msm_drm-$(CONFIG_DRM_MSM) += \ msm_gem_prime.o \ msm_gem_submit.o \ msm_gem_shrinker.o \ + msm_gem_vma.o \ msm_gpu.o \ msm_iommu.o \ msm_smmu.o \ diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index fd266ed963b6..156abf00c0e2 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -583,7 +583,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) #endif } - if (!gpu->mmu) { + if (!gpu->aspace) { /* TODO we think it is possible to configure the GPU to * restrict access to VRAM carveout. But the required * registers are unknown. For now just bail out and diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index d0d3c7baa8fe..2dc94122a959 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -672,7 +672,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) #endif } - if (!gpu->mmu) { + if (!gpu->aspace) { /* TODO we think it is possible to configure the GPU to * restrict access to VRAM carveout. But the required * registers are unknown. For now just bail out and diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index f386f463278d..b468d2a2cdeb 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -381,7 +381,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, return ret; } - mmu = gpu->mmu; + mmu = gpu->aspace->mmu; if (mmu) { ret = mmu->funcs->attach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index 571a91ee9607..fc61c39fbc2d 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -17,6 +17,7 @@ #include "msm_drv.h" +#include "msm_gem.h" #include "msm_mmu.h" #include "mdp4_kms.h" @@ -151,9 +152,22 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate, } } -static const char * const iommu_ports[] = { - "mdp_port0_cb0", "mdp_port1_cb0", -}; +static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) +{ + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + struct msm_drm_private *priv = mdp4_kms->dev->dev_private; + unsigned int i; + struct msm_gem_address_space *aspace = mdp4_kms->aspace; + + for (i = 0; i < priv->num_crtcs; i++) + mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file); + + if (aspace) { + aspace->mmu->funcs->detach(aspace->mmu, + iommu_ports, ARRAY_SIZE(iommu_ports)); + msm_gem_address_space_destroy(aspace); + } +} static void mdp4_destroy(struct msm_kms *kms) { @@ -442,6 +456,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) struct msm_kms *kms = NULL; struct msm_mmu *mmu; int irq, ret; + struct msm_gem_address_space *aspace; mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); if (!mdp4_kms) { @@ -531,12 +546,16 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) mdelay(16); if (config->iommu) { - mmu = msm_iommu_new(&pdev->dev, config->iommu); - if (IS_ERR(mmu)) { - ret = PTR_ERR(mmu); + aspace = msm_gem_address_space_create(&pdev->dev, + config->iommu, "mdp4"); + if (IS_ERR(aspace)) { + ret = PTR_ERR(aspace); goto fail; } - ret = mmu->funcs->attach(mmu, iommu_ports, + + mdp4_kms->aspace = aspace; + + ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); if (ret) goto fail; @@ -545,10 +564,10 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) } else { dev_info(dev->dev, "no iommu, fallback to phys " "contig buffers for scanout\n"); - mmu = NULL; + aspace = NULL; } - mdp4_kms->id = msm_register_mmu(dev, mmu); + mdp4_kms->id = msm_register_address_space(dev, aspace); if (mdp4_kms->id < 0) { ret = mdp4_kms->id; dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret); diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h index ddfabdef9f0c..1fe35b23038c 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h @@ -43,7 +43,7 @@ struct mdp4_kms { struct clk *pclk; struct clk *lut_clk; struct clk *axi_clk; - struct msm_mmu *mmu; + struct msm_gem_address_space *aspace; struct mdp_irq error_handler; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index f7d5d02d473c..e89afce515fa 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -140,42 +140,20 @@ static const struct drm_mode_config_funcs mode_config_funcs = { .atomic_commit = msm_atomic_commit, }; -int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu) +int msm_register_address_space(struct drm_device *dev, + struct msm_gem_address_space *aspace) { struct msm_drm_private *priv = dev->dev_private; - int idx = priv->num_mmus++; + int idx = priv->num_aspaces++; - if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus))) + if (WARN_ON(idx >= ARRAY_SIZE(priv->aspace))) return -EINVAL; - priv->mmus[idx] = mmu; + priv->aspace[idx] = aspace; return idx; } -void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu) -{ - struct msm_drm_private *priv = dev->dev_private; - int idx; - - if (priv->num_mmus <= 0) { - dev_err(dev->dev, "invalid num mmus %d\n", priv->num_mmus); - return; - } - - idx = priv->num_mmus - 1; - - /* only support reverse-order deallocation */ - if (priv->mmus[idx] != mmu) { - dev_err(dev->dev, "unexpected mmu at idx %d\n", idx); - return; - } - - --priv->num_mmus; - priv->mmus[idx] = 0; -} - - #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING static bool reglog = false; MODULE_PARM_DESC(reglog, "Enable register read/write logging"); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 86fec8b36269..31aadaf5aed5 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -61,6 +61,8 @@ struct msm_perf_state; struct msm_gem_submit; struct msm_fence_context; struct msm_fence_cb; +struct msm_gem_address_space; +struct msm_gem_vma; #define NUM_DOMAINS 4 /* one for KMS, then one per gpu core (?) */ #define MAX_CRTCS 8 @@ -528,9 +530,13 @@ struct msm_drm_private { uint32_t pending_crtcs; wait_queue_head_t pending_crtcs_event; - /* registered MMUs: */ - unsigned int num_mmus; - struct msm_mmu *mmus[NUM_DOMAINS]; + /* Registered address spaces.. currently this is fixed per # of + * iommu's. Ie. one for display block and one for gpu block. + * Eventually, to do per-process gpu pagetables, we'll want one + * of these per-process. + */ + unsigned int num_aspaces; + struct msm_gem_address_space *aspace[NUM_DOMAINS]; unsigned int num_planes; struct drm_plane *planes[MAX_PLANES]; @@ -635,10 +641,27 @@ static inline bool msm_is_suspend_blocked(struct drm_device *dev) int msm_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock); -int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); -void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu); - void msm_gem_submit_free(struct msm_gem_submit *submit); +static inline int msm_register_mmu(struct drm_device *dev, + struct msm_mmu *mmu) { + return -ENODEV; +} +static inline void msm_unregister_mmu(struct drm_device *dev, + struct msm_mmu *mmu) { +} +int msm_register_address_space(struct drm_device *dev, + struct msm_gem_address_space *aspace); + +void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt); +int msm_gem_map_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, int npages); + +void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace); +struct msm_gem_address_space * +msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, + const char *name); + int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 655631fe00a9..015174705321 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -296,18 +296,8 @@ put_iova(struct drm_gem_object *obj) WARN_ON(!mutex_is_locked(&dev->struct_mutex)); for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { - struct msm_mmu *mmu = priv->mmus[id]; - if (mmu && msm_obj->domain[id].iova) { - uint32_t offset = msm_obj->domain[id].iova; - - if (obj->import_attach && mmu->funcs->unmap_dma_buf) - mmu->funcs->unmap_dma_buf(mmu, msm_obj->sgt, - obj->import_attach->dmabuf, - DMA_BIDIRECTIONAL); - else - mmu->funcs->unmap(mmu, offset, msm_obj->sgt); - msm_obj->domain[id].iova = 0; - } + msm_gem_unmap_vma(priv->aspace[id], + &msm_obj->domain[id], msm_obj->sgt); } } @@ -332,23 +322,9 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, return PTR_ERR(pages); if (iommu_present(&platform_bus_type)) { - struct msm_mmu *mmu = priv->mmus[id]; - - if (WARN_ON(!mmu)) - return -EINVAL; - - if (obj->import_attach && mmu->funcs->map_dma_buf) { - ret = mmu->funcs->map_dma_buf(mmu, msm_obj->sgt, - obj->import_attach->dmabuf, - DMA_BIDIRECTIONAL, - msm_obj->flags); - if (ret) { - DRM_ERROR("Unable to map dma buf\n"); - return ret; - } - } - msm_obj->domain[id].iova = - sg_dma_address(msm_obj->sgt->sgl); + ret = msm_gem_map_vma(priv->aspace[id], + &msm_obj->domain[id], + msm_obj->sgt, obj->size >> PAGE_SHIFT); } else { WARN_ONCE(1, "physical address being used\n"); msm_obj->domain[id].iova = physaddr(obj); @@ -665,6 +641,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) } seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n", + msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', obj->name, obj->refcount.refcount.counter, off, msm_obj->vaddr, obj->size, madv); @@ -775,7 +752,6 @@ static int msm_gem_new_impl(struct drm_device *dev, { struct msm_drm_private *priv = dev->dev_private; struct msm_gem_object *msm_obj; - unsigned sz; bool use_vram = false; switch (flags & MSM_BO_CACHE_MASK) { @@ -797,16 +773,12 @@ static int msm_gem_new_impl(struct drm_device *dev, if (WARN_ON(use_vram && !priv->vram.size)) return -EINVAL; - sz = sizeof(*msm_obj); - if (use_vram) - sz += sizeof(struct drm_mm_node); - - msm_obj = kzalloc(sz, GFP_KERNEL); + msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL); if (!msm_obj) return -ENOMEM; if (use_vram) - msm_obj->vram_node = (void *)&msm_obj[1]; + msm_obj->vram_node = &msm_obj->domain[0].node; msm_obj->flags = flags; msm_obj->madv = MSM_MADV_WILLNEED; diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 19c7726a9361..beedf1e979d0 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -25,6 +25,20 @@ #define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */ #define MSM_BO_KEEPATTRS 0x20000000 /* keep h/w bus attributes */ +struct msm_gem_address_space { + const char *name; + /* NOTE: mm managed at the page level, size is in # of pages + * and position mm_node->start is in # of pages: + */ + struct drm_mm mm; + struct msm_mmu *mmu; +}; + +struct msm_gem_vma { + struct drm_mm_node node; + uint64_t iova; +}; + struct msm_gem_object { struct drm_gem_object base; @@ -62,9 +76,7 @@ struct msm_gem_object { struct sg_table *sgt; void *vaddr; - struct { - dma_addr_t iova; - } domain[NUM_DOMAINS]; + struct msm_gem_vma domain[NUM_DOMAINS]; /* normally (resv == &_resv) except for imported bo's */ struct reservation_object *resv; diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c new file mode 100644 index 000000000000..97e159b8b75c --- /dev/null +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2016 Red Hat + * Author: Rob Clark + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "msm_drv.h" +#include "msm_gem.h" +#include "msm_mmu.h" + +void +msm_gem_unmap_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt) +{ + if (!vma->iova) + return; + + if (aspace->mmu) + aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt); + + drm_mm_remove_node(&vma->node); + + vma->iova = 0; +} + +int +msm_gem_map_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, int npages) +{ + int ret; + + if (WARN_ON(drm_mm_node_allocated(&vma->node))) + return 0; + + ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages, + 0, DRM_MM_SEARCH_DEFAULT); + if (ret) + return ret; + + vma->iova = vma->node.start << PAGE_SHIFT; + + if (aspace->mmu) + ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, + IOMMU_READ | IOMMU_WRITE); + + return ret; +} + +void +msm_gem_address_space_destroy(struct msm_gem_address_space *aspace) +{ + drm_mm_takedown(&aspace->mm); + if (aspace->mmu) + aspace->mmu->funcs->destroy(aspace->mmu); + kfree(aspace); +} + +struct msm_gem_address_space * +msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, + const char *name) +{ + struct msm_gem_address_space *aspace; + + aspace = kzalloc(sizeof(*aspace), GFP_KERNEL); + if (!aspace) + return ERR_PTR(-ENOMEM); + + aspace->name = name; + aspace->mmu = msm_iommu_new(dev, domain); + + drm_mm_init(&aspace->mm, + (domain->geometry.aperture_start >> PAGE_SHIFT), + (domain->geometry.aperture_end >> PAGE_SHIFT) - 1); + + return aspace; +} diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 5bb09838b5ae..ded4226311cb 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -656,12 +656,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, */ iommu = iommu_domain_alloc(&platform_bus_type); if (iommu) { + /* TODO 32b vs 64b address space.. */ + iommu->geometry.aperture_start = 0x1000; + iommu->geometry.aperture_end = 0xffffffff; + dev_info(drm->dev, "%s: using IOMMU\n", name); - gpu->mmu = msm_iommu_new(&pdev->dev, iommu); - if (IS_ERR(gpu->mmu)) { - ret = PTR_ERR(gpu->mmu); + gpu->aspace = msm_gem_address_space_create(&pdev->dev, + iommu, "gpu"); + if (IS_ERR(gpu->aspace)) { + ret = PTR_ERR(gpu->aspace); dev_err(drm->dev, "failed to init iommu: %d\n", ret); - gpu->mmu = NULL; + gpu->aspace = NULL; iommu_domain_free(iommu); goto fail; } @@ -669,7 +674,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, } else { dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); } - gpu->id = msm_register_mmu(drm, gpu->mmu); + gpu->id = msm_register_address_space(drm, gpu->aspace); /* Create ringbuffer: */ @@ -705,9 +710,9 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) msm_ringbuffer_destroy(gpu->rb); } - if (gpu->mmu) - gpu->mmu->funcs->destroy(gpu->mmu); - if (gpu->fctx) msm_fence_context_free(gpu->fctx); + + if (gpu->aspace) + msm_gem_address_space_destroy(gpu->aspace); } diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index d61d98a6e047..c6bf5d6ebc20 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -98,7 +98,7 @@ struct msm_gpu { void __iomem *mmio; int irq; - struct msm_mmu *mmu; + struct msm_gem_address_space *aspace; int id; /* Power Control: */ -- GitLab From 12bf362f67338808bb6cb1d769303d63979e544b Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Mon, 13 Feb 2017 10:14:11 -0700 Subject: [PATCH 372/786] drm/msm: Support different SMMU backends for address spaces SDE and the GPU have different requirements for the SMMU backends - the SDE generates its own iova addresses and needs special support for DMA buffers and the GPU does its own IOMMU operations. Add a shim layer to aspace to break out the address generation and call the appropriate SMMU functions. There is probably consolidation that can be done, but for now this is the best way to deal with the two use cases. CRs-Fixed: 2050484 Change-Id: Ied6d0632c3420f8a5f61dbd80d6bc8330eb83010 Signed-off-by: Jordan Crouse Signed-off-by: Abhijit Kulkarni --- drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | 27 +++- drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | 33 +++-- drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h | 2 +- drivers/gpu/drm/msm/msm_drv.h | 22 +-- drivers/gpu/drm/msm/msm_gem.c | 18 ++- drivers/gpu/drm/msm/msm_gem.h | 16 +- drivers/gpu/drm/msm/msm_gem_vma.c | 185 ++++++++++++++++++++---- drivers/gpu/drm/msm/msm_mmu.h | 2 - drivers/gpu/drm/msm/sde/sde_kms.c | 32 ++-- drivers/gpu/drm/msm/sde/sde_kms.h | 3 +- 10 files changed, 266 insertions(+), 74 deletions(-) diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index fc61c39fbc2d..80b49a1f88a0 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -171,6 +171,9 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) static void mdp4_destroy(struct msm_kms *kms) { + struct device *dev = mdp4_kms->dev->dev; + struct msm_gem_address_space *aspace = mdp4_kms->aspace; + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); struct device *dev = mdp4_kms->dev->dev; struct msm_mmu *mmu = mdp4_kms->mmu; @@ -187,6 +190,12 @@ static void mdp4_destroy(struct msm_kms *kms) if (mdp4_kms->rpm_enabled) pm_runtime_disable(dev); + if (aspace) { + aspace->mmu->funcs->detach(aspace->mmu, + iommu_ports, ARRAY_SIZE(iommu_ports)); + msm_gem_address_space_destroy(aspace); + } + kfree(mdp4_kms); } @@ -454,7 +463,6 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) struct mdp4_platform_config *config = mdp4_get_config(pdev); struct mdp4_kms *mdp4_kms; struct msm_kms *kms = NULL; - struct msm_mmu *mmu; int irq, ret; struct msm_gem_address_space *aspace; @@ -546,8 +554,15 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) mdelay(16); if (config->iommu) { + struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, config->iommu); + + if (IS_ERR(mmu)) { + ret = PTR_ERR(mmu); + goto fail; + } + aspace = msm_gem_address_space_create(&pdev->dev, - config->iommu, "mdp4"); + mmu, "mdp4", 0x1000, 0xffffffff); if (IS_ERR(aspace)) { ret = PTR_ERR(aspace); goto fail; @@ -618,5 +633,13 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev) config.max_clk = 266667000; config.iommu = iommu_domain_alloc(&platform_bus_type); +#else + if (cpu_is_apq8064()) + config.max_clk = 266667000; + else + config.max_clk = 200000000; + + config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN); +#endif return &config; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index 4f204ff936f0..f022967475b3 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark * @@ -19,6 +19,7 @@ #include #include "msm_drv.h" +#include "msm_gem.h" #include "msm_mmu.h" #include "mdp5_kms.h" @@ -117,11 +118,12 @@ static int mdp5_set_split_display(struct msm_kms *kms, static void mdp5_kms_destroy(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - struct msm_mmu *mmu = mdp5_kms->mmu; + struct msm_gem_address_space *aspace = mdp5_kms->aspace; - if (mmu) { - mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); - mmu->funcs->destroy(mmu); + if (aspace) { + aspace->mmu->funcs->detach(aspace->mmu, + iommu_ports, ARRAY_SIZE(iommu_ports)); + msm_gem_address_space_destroy(aspace); } } @@ -564,8 +566,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) struct mdp5_kms *mdp5_kms; struct mdp5_cfg *config; struct msm_kms *kms; - struct msm_mmu *mmu; int irq, i, ret; + struct msm_gem_address_space *aspace; /* priv->kms would have been populated by the MDP5 driver */ kms = priv->kms; @@ -606,7 +608,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) mdelay(16); if (config->platform.iommu) { - mmu = msm_iommu_new(&pdev->dev, config->platform.iommu); + struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, + config->platform.iommu); if (IS_ERR(mmu)) { ret = PTR_ERR(mmu); dev_err(&pdev->dev, "failed to init iommu: %d\n", ret); @@ -614,7 +617,16 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) goto fail; } - ret = mmu->funcs->attach(mmu, iommu_ports, + aspace = msm_gem_smmu_address_space_create(&pdev->dev, + mmu, "mdp5"); + if (IS_ERR(aspace)) { + ret = PTR_ERR(aspace); + goto fail; + } + + mdp5_kms->aspace = aspace; + + ret = mmu->funcs->attach(aspace->mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); if (ret) { dev_err(&pdev->dev, "failed to attach iommu: %d\n", @@ -625,11 +637,10 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) } else { dev_info(&pdev->dev, "no iommu, fallback to phys contig buffers for scanout\n"); - mmu = NULL; + aspace = NULL; } - mdp5_kms->mmu = mmu; - mdp5_kms->id = msm_register_mmu(dev, mmu); + mdp5_kms->id = msm_register_address_space(dev, aspace); if (mdp5_kms->id < 0) { ret = mdp5_kms->id; dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index 03738927be10..623ac07c1970 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -39,7 +39,7 @@ struct mdp5_kms { /* mapper-id used to request GEM buffer mapped for scanout: */ int id; - struct msm_mmu *mmu; + struct msm_gem_address_space *aspace; struct mdp5_smp *smp; struct mdp5_ctl_manager *ctlm; diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 31aadaf5aed5..aba7c49451c4 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -642,26 +642,26 @@ int msm_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock); void msm_gem_submit_free(struct msm_gem_submit *submit); -static inline int msm_register_mmu(struct drm_device *dev, - struct msm_mmu *mmu) { - return -ENODEV; -} -static inline void msm_unregister_mmu(struct drm_device *dev, - struct msm_mmu *mmu) { -} int msm_register_address_space(struct drm_device *dev, struct msm_gem_address_space *aspace); - void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma, struct sg_table *sgt); + struct msm_gem_vma *vma, struct sg_table *sgt, + void *priv); int msm_gem_map_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma, struct sg_table *sgt, int npages); - + struct msm_gem_vma *vma, struct sg_table *sgt, + void *priv, unsigned int flags); void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace); + +/* For GPU and legacy display */ struct msm_gem_address_space * msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, const char *name); +/* For SDE display */ +struct msm_gem_address_space * +msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu, + const char *name); + int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 015174705321..43e2a266f8b8 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -26,6 +26,11 @@ #include "msm_gpu.h" #include "msm_mmu.h" +static void *get_dmabuf_ptr(struct drm_gem_object *obj) +{ + return (obj && obj->import_attach) ? obj->import_attach->dmabuf : NULL; +} + static dma_addr_t physaddr(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); @@ -296,8 +301,8 @@ put_iova(struct drm_gem_object *obj) WARN_ON(!mutex_is_locked(&dev->struct_mutex)); for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { - msm_gem_unmap_vma(priv->aspace[id], - &msm_obj->domain[id], msm_obj->sgt); + msm_gem_unmap_vma(priv->aspace[id], &msm_obj->domain[id], + msm_obj->sgt, get_dmabuf_ptr(obj)); } } @@ -323,10 +328,10 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, if (iommu_present(&platform_bus_type)) { ret = msm_gem_map_vma(priv->aspace[id], - &msm_obj->domain[id], - msm_obj->sgt, obj->size >> PAGE_SHIFT); + &msm_obj->domain[id], msm_obj->sgt, + get_dmabuf_ptr(obj), + msm_obj->flags); } else { - WARN_ONCE(1, "physical address being used\n"); msm_obj->domain[id].iova = physaddr(obj); } } @@ -697,7 +702,8 @@ void msm_gem_free_object(struct drm_gem_object *obj) if (obj->import_attach) { if (msm_obj->vaddr) - dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); + dma_buf_vunmap(obj->import_attach->dmabuf, + msm_obj->vaddr); /* Don't drop the pages for imported dmabuf, as they are not * ours, just free the array we allocated: diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index beedf1e979d0..b176c11197f6 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -25,16 +25,24 @@ #define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */ #define MSM_BO_KEEPATTRS 0x20000000 /* keep h/w bus attributes */ +struct msm_gem_aspace_ops { + int (*map)(struct msm_gem_address_space *, struct msm_gem_vma *, + struct sg_table *sgt, void *priv, unsigned int flags); + + void (*unmap)(struct msm_gem_address_space *, struct msm_gem_vma *, + struct sg_table *sgt, void *priv); + + void (*destroy)(struct msm_gem_address_space *); +}; + struct msm_gem_address_space { const char *name; - /* NOTE: mm managed at the page level, size is in # of pages - * and position mm_node->start is in # of pages: - */ - struct drm_mm mm; struct msm_mmu *mmu; + const struct msm_gem_aspace_ops *ops; }; struct msm_gem_vma { + /* Node used by the GPU address space, but not the SDE address space */ struct drm_mm_node node; uint64_t iova; }; diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 97e159b8b75c..8e56871bdef3 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -19,9 +19,88 @@ #include "msm_gem.h" #include "msm_mmu.h" -void -msm_gem_unmap_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma, struct sg_table *sgt) +/* SDE address space operations */ +static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, + void *priv) +{ + struct dma_buf *buf = priv; + + if (buf) + aspace->mmu->funcs->unmap_dma_buf(aspace->mmu, + sgt, buf, DMA_BIDIRECTIONAL); + else + aspace->mmu->funcs->unmap_sg(aspace->mmu, sgt, + DMA_BIDIRECTIONAL); + + vma->iova = 0; +} + + +static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, + void *priv, unsigned int flags) +{ + struct dma_buf *buf = priv; + int ret; + + if (buf) + ret = aspace->mmu->funcs->map_dma_buf(aspace->mmu, sgt, buf, + DMA_BIDIRECTIONAL, flags); + else + ret = aspace->mmu->funcs->map_sg(aspace->mmu, sgt, + DMA_BIDIRECTIONAL); + + if (!ret) + vma->iova = sg_dma_address(sgt->sgl); + + return ret; +} + +static void smmu_aspace_destroy(struct msm_gem_address_space *aspace) +{ + aspace->mmu->funcs->destroy(aspace->mmu); +} + + +static const struct msm_gem_aspace_ops smmu_aspace_ops = { + .map = smmu_aspace_map_vma, + .unmap = smmu_aspace_unmap_vma, + .destroy = smmu_aspace_destroy +}; + +struct msm_gem_address_space * +msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu, + const char *name) +{ + struct msm_gem_address_space *aspace; + + if (!mmu) + return ERR_PTR(-EINVAL); + + aspace = kzalloc(sizeof(*aspace), GFP_KERNEL); + if (!aspace) + return ERR_PTR(-ENOMEM); + + aspace->name = name; + aspace->mmu = mmu; + aspace->ops = &smmu_aspace_ops; + + return aspace; +} + +/* GPU address space operations */ +struct msm_iommu_aspace { + struct msm_gem_address_space base; + struct drm_mm mm; +}; + +#define to_iommu_aspace(aspace) \ + ((struct msm_iommu_aspace *) \ + container_of(aspace, struct msm_iommu_aspace, base)) + +static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, void *priv) { if (!vma->iova) return; @@ -34,16 +113,22 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace, vma->iova = 0; } -int -msm_gem_map_vma(struct msm_gem_address_space *aspace, - struct msm_gem_vma *vma, struct sg_table *sgt, int npages) +static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, + void *priv, unsigned int flags) { - int ret; + struct msm_iommu_aspace *local = to_iommu_aspace(aspace); + size_t size = 0; + struct scatterlist *sg; + int ret = 0, i; if (WARN_ON(drm_mm_node_allocated(&vma->node))) return 0; - ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages, + for_each_sg(sgt->sgl, sg, sgt->nents, i) + size += sg->length + sg->offset; + + ret = drm_mm_insert_node(&local->mm, &vma->node, size >> PAGE_SHIFT, 0, DRM_MM_SEARCH_DEFAULT); if (ret) return ret; @@ -51,37 +136,85 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace, vma->iova = vma->node.start << PAGE_SHIFT; if (aspace->mmu) - ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, - IOMMU_READ | IOMMU_WRITE); + ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, + sgt, IOMMU_READ | IOMMU_WRITE); return ret; } -void -msm_gem_address_space_destroy(struct msm_gem_address_space *aspace) +static void iommu_aspace_destroy(struct msm_gem_address_space *aspace) { - drm_mm_takedown(&aspace->mm); - if (aspace->mmu) - aspace->mmu->funcs->destroy(aspace->mmu); - kfree(aspace); + struct msm_iommu_aspace *local = to_iommu_aspace(aspace); + + drm_mm_takedown(&local->mm); + aspace->mmu->funcs->destroy(aspace->mmu); +} + +static const struct msm_gem_aspace_ops msm_iommu_aspace_ops = { + .map = iommu_aspace_map_vma, + .unmap = iommu_aspace_unmap_vma, + .destroy = iommu_aspace_destroy, +}; + +static struct msm_gem_address_space * +msm_gem_address_space_new(struct msm_mmu *mmu, const char *name, + uint64_t start, uint64_t end) +{ + struct msm_iommu_aspace *local; + + if (!mmu) + return ERR_PTR(-EINVAL); + + local = kzalloc(sizeof(*local), GFP_KERNEL); + if (!local) + return ERR_PTR(-ENOMEM); + + drm_mm_init(&local->mm, (start >> PAGE_SHIFT), + (end >> PAGE_SHIFT) - 1); + + local->base.name = name; + local->base.mmu = mmu; + local->base.ops = &msm_iommu_aspace_ops; + + return &local->base; +} + +int msm_gem_map_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, + void *priv, unsigned int flags) +{ + if (aspace && aspace->ops->map) + return aspace->ops->map(aspace, vma, sgt, priv, flags); + + return -EINVAL; +} + +void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, + struct msm_gem_vma *vma, struct sg_table *sgt, void *priv) +{ + if (aspace && aspace->ops->unmap) + aspace->ops->unmap(aspace, vma, sgt, priv); } struct msm_gem_address_space * msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain, const char *name) { - struct msm_gem_address_space *aspace; + struct msm_mmu *mmu = msm_iommu_new(dev, domain); - aspace = kzalloc(sizeof(*aspace), GFP_KERNEL); - if (!aspace) - return ERR_PTR(-ENOMEM); + if (IS_ERR(mmu)) + return (struct msm_gem_address_space *) mmu; - aspace->name = name; - aspace->mmu = msm_iommu_new(dev, domain); + return msm_gem_address_space_new(mmu, name, + domain->geometry.aperture_start, + domain->geometry.aperture_end); +} - drm_mm_init(&aspace->mm, - (domain->geometry.aperture_start >> PAGE_SHIFT), - (domain->geometry.aperture_end >> PAGE_SHIFT) - 1); +void +msm_gem_address_space_destroy(struct msm_gem_address_space *aspace) +{ + if (aspace && aspace->ops->destroy) + aspace->ops->destroy(aspace); - return aspace; + kfree(aspace); } diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index 0f161f53d8a4..dc7e5a6521e9 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -21,7 +21,6 @@ #include struct msm_mmu; -struct msm_gpu; enum msm_mmu_domain_type { MSM_SMMU_DOMAIN_UNSECURE, @@ -61,7 +60,6 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, } struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain); -struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu); struct msm_mmu *msm_smmu_new(struct device *dev, enum msm_mmu_domain_type domain); diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index 26125d8d9e5d..7d8670607005 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -1405,15 +1405,15 @@ static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms) int i; for (i = ARRAY_SIZE(sde_kms->mmu_id) - 1; i >= 0; i--) { - if (!sde_kms->mmu[i]) + mmu = sde_kms->aspace[i]->mmu; + + if (!mmu) continue; - mmu = sde_kms->mmu[i]; - msm_unregister_mmu(sde_kms->dev, mmu); mmu->funcs->detach(mmu, (const char **)iommu_ports, ARRAY_SIZE(iommu_ports)); - mmu->funcs->destroy(mmu); - sde_kms->mmu[i] = 0; + msm_gem_address_space_destroy(sde_kms->aspace[i]); + sde_kms->mmu_id[i] = 0; } @@ -1426,6 +1426,8 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms) int i, ret; for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) { + struct msm_gem_address_space *aspace; + mmu = msm_smmu_new(sde_kms->dev->dev, i); if (IS_ERR(mmu)) { ret = PTR_ERR(mmu); @@ -1434,25 +1436,35 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms) continue; } + aspace = msm_gem_smmu_address_space_create(sde_kms->dev->dev, + mmu, "sde"); + if (IS_ERR(aspace)) { + ret = PTR_ERR(aspace); + mmu->funcs->destroy(mmu); + goto fail; + } + + sde_kms->aspace[i] = aspace; + ret = mmu->funcs->attach(mmu, (const char **)iommu_ports, ARRAY_SIZE(iommu_ports)); if (ret) { SDE_ERROR("failed to attach iommu %d: %d\n", i, ret); - mmu->funcs->destroy(mmu); - continue; + msm_gem_address_space_destroy(aspace); + goto fail; } - sde_kms->mmu_id[i] = msm_register_mmu(sde_kms->dev, mmu); + sde_kms->mmu_id[i] = msm_register_address_space(sde_kms->dev, + aspace); if (sde_kms->mmu_id[i] < 0) { ret = sde_kms->mmu_id[i]; SDE_ERROR("failed to register sde iommu %d: %d\n", i, ret); mmu->funcs->detach(mmu, (const char **)iommu_ports, ARRAY_SIZE(iommu_ports)); + msm_gem_address_space_destroy(aspace); goto fail; } - - sde_kms->mmu[i] = mmu; } return 0; diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h index 5894fe2af53b..0c5c286167af 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.h +++ b/drivers/gpu/drm/msm/sde/sde_kms.h @@ -24,6 +24,7 @@ #include "msm_drv.h" #include "msm_kms.h" #include "msm_mmu.h" +#include "msm_gem.h" #include "sde_dbg.h" #include "sde_hw_catalog.h" #include "sde_hw_ctl.h" @@ -158,7 +159,7 @@ struct sde_kms { int core_rev; struct sde_mdss_cfg *catalog; - struct msm_mmu *mmu[MSM_SMMU_DOMAIN_MAX]; + struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX]; int mmu_id[MSM_SMMU_DOMAIN_MAX]; struct sde_power_client *core_client; -- GitLab From 6b6b3a5966df91fc17365e34886f0f77fc020232 Mon Sep 17 00:00:00 2001 From: Shrey Vijay Date: Wed, 21 Jun 2017 15:06:03 +0530 Subject: [PATCH 373/786] ARM: dts: msm: Add slimbus instance entries for SDM670 Add Audio and QCA slimbus instance entries. Entries are disabled by default. Need to enable along with slave instances. Signed-off-by: Shrey Vijay Change-Id: I24220e9af890c41725b074a3aa4dea38de18794e --- arch/arm64/boot/dts/qcom/sdm670.dtsi | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi index bb5217ef0838..5f7ff6178222 100644 --- a/arch/arm64/boot/dts/qcom/sdm670.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi @@ -546,6 +546,30 @@ #reset-cells = <1>; }; + slim_aud: slim@62dc0000 { + cell-index = <1>; + compatible = "qcom,slim-ngd"; + reg = <0x62dc0000 0x2c000>, + <0x62d84000 0x2a000>; + reg-names = "slimbus_physical", "slimbus_bam_physical"; + interrupts = <0 163 0>, <0 164 0>; + interrupt-names = "slimbus_irq", "slimbus_bam_irq"; + qcom,apps-ch-pipes = <0x780000>; + qcom,ea-pc = <0x290>; + status = "disabled"; + }; + + slim_qca: slim@62e40000 { + cell-index = <3>; + compatible = "qcom,slim-ngd"; + reg = <0x62e40000 0x2c000>, + <0x62e04000 0x20000>; + reg-names = "slimbus_physical", "slimbus_bam_physical"; + interrupts = <0 291 0>, <0 292 0>; + interrupt-names = "slimbus_irq", "slimbus_bam_irq"; + status = "disabled"; + }; + wdog: qcom,wdt@17980000{ compatible = "qcom,msm-watchdog"; reg = <0x17980000 0x1000>; -- GitLab From 3b48e5e02ceba56b6168d787efebb0211a228a0d Mon Sep 17 00:00:00 2001 From: Ram Chandrasekar Date: Thu, 4 May 2017 16:55:50 -0600 Subject: [PATCH 374/786] thermal: regulator_cooling: Add AOP based regulator cooling device QTI chipsets need to place a voltage floor restriction at low temperature as per the device operating spec. In order to place the floor voltage restriction for regulators, AOP has to be notified with when the low temperature reaches. The new regulator cooling device will use the QMP messaging interface to place a voltage floor restriction. Change-Id: I0a3b878b356bab55ee1bb6458ea5c4016996a194 Signed-off-by: Ram Chandrasekar --- .../bindings/thermal/qti-rpmh-reg-cdev.txt | 44 ++++ drivers/thermal/qcom/Kconfig | 9 + drivers/thermal/qcom/Makefile | 1 + drivers/thermal/qcom/regulator_cooling.c | 224 ++++++++++++++++++ 4 files changed, 278 insertions(+) create mode 100644 Documentation/devicetree/bindings/thermal/qti-rpmh-reg-cdev.txt create mode 100644 drivers/thermal/qcom/regulator_cooling.c diff --git a/Documentation/devicetree/bindings/thermal/qti-rpmh-reg-cdev.txt b/Documentation/devicetree/bindings/thermal/qti-rpmh-reg-cdev.txt new file mode 100644 index 000000000000..b7734adc5082 --- /dev/null +++ b/Documentation/devicetree/bindings/thermal/qti-rpmh-reg-cdev.txt @@ -0,0 +1,44 @@ +RPMh regulator cooling device. + +The RPMh regulator cooling device, will be used to place a voltage floor +restriction on a rail. This cooling device will use a QMP AOP mail box to send +the message to apply and clear voltage floor restriction. + +The cooling device node should be a child of the regulator devicetree node, +which it is trying to place the floor restriction. + +Properties: + +- compatible: + Usage: required + Value type: + Definition: shall be "qcom,rpmh-reg-cdev" + +- qcom,reg-resource-name: + Usage: required + Value type: + Definition: The regulator resource name to be used for communicating + with RPMh. This value should be any of the below + resource name, + cx -> For CX rail, + mx -> For MX rail, + ebi -> For EBI rail. + +- mboxes: + Usage: required + Value type: + Definition: A phandle to the QMP AOP mail box, that needs to be used + for sending the floor restriction message. + +- #cooling-cells: Must be 2. Please refer to + for more + details. + +Example: + + vdd_cx: rpmh-cx-regulator-cdev { + compatible = "qcom,rpmh-reg-cdev"; + mboxes = <&qmp_aop 0>; + qcom,reg-resource-name = "cx"; + #cooling-cells = <2>; + }; diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig index f6e1b86e5b63..38d5b932976f 100644 --- a/drivers/thermal/qcom/Kconfig +++ b/drivers/thermal/qcom/Kconfig @@ -40,3 +40,12 @@ config QTI_VIRTUAL_SENSOR The virtual sensor information includes the underlying thermal sensors to query for temperature and the aggregation logic to determine the virtual sensor temperature. + +config QTI_REG_COOLING_DEVICE + bool "QTI Regulator cooling device" + depends on THERMAL_OF && MSM_QMP + help + This enables the Regulator cooling device. This cooling device + will be used by QTI chipset to place a floor voltage restriction at + low temperatures. The regulator cooling device will message the AOP + using mail box to establish the floor voltage. diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile index 885938041140..5aae359c0c5a 100644 --- a/drivers/thermal/qcom/Makefile +++ b/drivers/thermal/qcom/Makefile @@ -3,3 +3,4 @@ qcom_tsens-y += tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o obj-$(CONFIG_MSM_BCL_PERIPHERAL_CTL) += bcl_peripheral.o obj-$(CONFIG_QTI_THERMAL_LIMITS_DCVS) += msm_lmh_dcvs.o obj-$(CONFIG_QTI_VIRTUAL_SENSOR) += qti_virtual_sensor.o +obj-$(CONFIG_QTI_REG_COOLING_DEVICE) += regulator_cooling.o diff --git a/drivers/thermal/qcom/regulator_cooling.c b/drivers/thermal/qcom/regulator_cooling.c new file mode 100644 index 000000000000..3cbf19825c4d --- /dev/null +++ b/drivers/thermal/qcom/regulator_cooling.c @@ -0,0 +1,224 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include + +#define REG_CDEV_DRIVER "reg-cooling-device" +#define REG_MSG_FORMAT "{class:volt_flr, event:zero_temp, res:%s, value:%s}" +#define REG_CDEV_MAX_STATE 1 +#define MBOX_TOUT_MS 1000 +#define REG_MSG_MAX_LEN 100 + +struct reg_cooling_device { + struct thermal_cooling_device *cdev; + unsigned int min_state; + const char *resource_name; + struct mbox_chan *qmp_chan; + struct mbox_client *client; +}; + +struct aop_msg { + uint32_t len; + void *msg; +}; + +enum regulator_rail_type { + REG_COOLING_CX, + REG_COOLING_MX, + REG_COOLING_EBI, + REG_COOLING_NR, +}; + +static char *regulator_rail[REG_COOLING_NR] = { + "cx", + "mx", + "ebi", +}; + +static int aop_send_msg(struct reg_cooling_device *reg_dev, int min_state) +{ + char msg_buf[REG_MSG_MAX_LEN] = {0}; + int ret = 0; + struct aop_msg msg; + + if (!reg_dev->qmp_chan) { + pr_err("mbox not initialized for resource:%s\n", + reg_dev->resource_name); + return -EINVAL; + } + + ret = snprintf(msg_buf, REG_MSG_MAX_LEN, REG_MSG_FORMAT, + reg_dev->resource_name, + (min_state == REG_CDEV_MAX_STATE) ? "off" : "on"); + if (ret >= REG_MSG_MAX_LEN) { + pr_err("Message too long for resource:%s\n", + reg_dev->resource_name); + return -E2BIG; + } + msg.len = REG_MSG_MAX_LEN; + msg.msg = msg_buf; + ret = mbox_send_message(reg_dev->qmp_chan, &msg); + + return (ret < 0) ? ret : 0; +} + +static int reg_get_max_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + *state = REG_CDEV_MAX_STATE; + return 0; +} + +static int reg_get_min_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + struct reg_cooling_device *reg_dev = cdev->devdata; + + *state = reg_dev->min_state; + return 0; +} + +static int reg_send_min_state(struct thermal_cooling_device *cdev, + unsigned long state) +{ + struct reg_cooling_device *reg_dev = cdev->devdata; + int ret = 0; + + if (state > REG_CDEV_MAX_STATE) + state = REG_CDEV_MAX_STATE; + + if (reg_dev->min_state == state) + return ret; + + ret = aop_send_msg(reg_dev, state); + if (ret) { + pr_err("regulator:%s switching to floor %lu error. err:%d\n", + reg_dev->resource_name, state, ret); + } else { + pr_debug("regulator:%s switched to %lu from %d\n", + reg_dev->resource_name, state, reg_dev->min_state); + reg_dev->min_state = state; + } + + return ret; +} + +static int reg_get_cur_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + *state = 0; + return 0; +} + +static int reg_send_cur_state(struct thermal_cooling_device *cdev, + unsigned long state) +{ + return 0; +} + +static struct thermal_cooling_device_ops reg_dev_ops = { + .get_max_state = reg_get_max_state, + .get_cur_state = reg_get_cur_state, + .set_cur_state = reg_send_cur_state, + .set_min_state = reg_send_min_state, + .get_min_state = reg_get_min_state, +}; + +static int reg_init_mbox(struct platform_device *pdev, + struct reg_cooling_device *reg_dev) +{ + reg_dev->client = devm_kzalloc(&pdev->dev, sizeof(*reg_dev->client), + GFP_KERNEL); + if (!reg_dev->client) + return -ENOMEM; + + reg_dev->client->dev = &pdev->dev; + reg_dev->client->tx_block = true; + reg_dev->client->tx_tout = MBOX_TOUT_MS; + reg_dev->client->knows_txdone = false; + + reg_dev->qmp_chan = mbox_request_channel(reg_dev->client, 0); + if (IS_ERR(reg_dev->qmp_chan)) { + dev_err(&pdev->dev, "Mbox request failed. err:%ld\n", + PTR_ERR(reg_dev->qmp_chan)); + return PTR_ERR(reg_dev->qmp_chan); + } + + return 0; +} + +static int reg_dev_probe(struct platform_device *pdev) +{ + int ret = 0, idx = 0; + struct reg_cooling_device *reg_dev = NULL; + + reg_dev = devm_kzalloc(&pdev->dev, sizeof(*reg_dev), GFP_KERNEL); + if (!reg_dev) + return -ENOMEM; + + ret = reg_init_mbox(pdev, reg_dev); + if (ret) + return ret; + + ret = of_property_read_string(pdev->dev.of_node, + "qcom,reg-resource-name", + ®_dev->resource_name); + if (ret) { + dev_err(&pdev->dev, "Error reading resource name. err:%d\n", + ret); + goto mbox_free; + } + + for (idx = 0; idx < REG_COOLING_NR; idx++) { + if (!strcmp(reg_dev->resource_name, regulator_rail[idx])) + break; + } + if (idx == REG_COOLING_NR) { + dev_err(&pdev->dev, "Invalid regulator resource name:%s\n", + reg_dev->resource_name); + ret = -EINVAL; + goto mbox_free; + } + reg_dev->min_state = REG_CDEV_MAX_STATE; + reg_dev->cdev = thermal_of_cooling_device_register( + pdev->dev.of_node, + (char *)reg_dev->resource_name, + reg_dev, ®_dev_ops); + if (IS_ERR(reg_dev->cdev)) + goto mbox_free; + + return ret; + +mbox_free: + mbox_free_channel(reg_dev->qmp_chan); + + return ret; +} + +static const struct of_device_id reg_dev_of_match[] = { + {.compatible = "qcom,rpmh-reg-cdev", }, + {} +}; + +static struct platform_driver reg_dev_driver = { + .driver = { + .name = REG_CDEV_DRIVER, + .of_match_table = reg_dev_of_match, + }, + .probe = reg_dev_probe, +}; +builtin_platform_driver(reg_dev_driver); -- GitLab From cfab38422d83b60c138c235dc4841994a7466453 Mon Sep 17 00:00:00 2001 From: Ram Chandrasekar Date: Tue, 9 May 2017 12:57:02 -0600 Subject: [PATCH 375/786] defconfig: Enable regulator cooling device Enable regulator cooling device to place voltage floor restriction at low temperatures. Change-Id: Ic143d48f81e6175186b36d166cbf45b0c8b2ae03 Signed-off-by: Ram Chandrasekar --- arch/arm64/configs/sdm845-perf_defconfig | 1 + arch/arm64/configs/sdm845_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig index c69e0153fea4..7bc8d90890d9 100644 --- a/arch/arm64/configs/sdm845-perf_defconfig +++ b/arch/arm64/configs/sdm845-perf_defconfig @@ -325,6 +325,7 @@ CONFIG_THERMAL_TSENS=y CONFIG_MSM_BCL_PERIPHERAL_CTL=y CONFIG_QTI_THERMAL_LIMITS_DCVS=y CONFIG_QTI_VIRTUAL_SENSOR=y +CONFIG_QTI_REG_COOLING_DEVICE=y CONFIG_MFD_I2C_PMIC=y CONFIG_MFD_SPMI_PMIC=y CONFIG_WCD934X_CODEC=y diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig index 2da9d07b6b70..70b685e79a8a 100644 --- a/arch/arm64/configs/sdm845_defconfig +++ b/arch/arm64/configs/sdm845_defconfig @@ -334,6 +334,7 @@ CONFIG_THERMAL_TSENS=y CONFIG_MSM_BCL_PERIPHERAL_CTL=y CONFIG_QTI_THERMAL_LIMITS_DCVS=y CONFIG_QTI_VIRTUAL_SENSOR=y +CONFIG_QTI_REG_COOLING_DEVICE=y CONFIG_MFD_I2C_PMIC=y CONFIG_MFD_SPMI_PMIC=y CONFIG_WCD934X_CODEC=y -- GitLab From 641d4412329447e8af02874696025a88d82725a9 Mon Sep 17 00:00:00 2001 From: Ram Chandrasekar Date: Thu, 18 May 2017 13:53:06 -0600 Subject: [PATCH 376/786] regulator: rpmh-regulator: initiate child device probe There can be child devices for a rpmh-regulator resource, which have to be probed after the rpmh-regulator resource probes successfully. Call of_platform_populate to initiate the probing of child devices. Change-Id: If45613e2e15a2179a62c1eb94a915407d80aff6d Signed-off-by: Ram Chandrasekar --- drivers/regulator/rpmh-regulator.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/regulator/rpmh-regulator.c b/drivers/regulator/rpmh-regulator.c index 4f5f86c4c72e..1ba892698be4 100644 --- a/drivers/regulator/rpmh-regulator.c +++ b/drivers/regulator/rpmh-regulator.c @@ -1187,6 +1187,9 @@ static int rpmh_regulator_allocate_vreg(struct rpmh_aggr_vreg *aggr_vreg) aggr_vreg->vreg_count = 0; for_each_available_child_of_node(aggr_vreg->dev->of_node, node) { + /* Skip child nodes handled by other drivers. */ + if (of_find_property(node, "compatible", NULL)) + continue; aggr_vreg->vreg_count++; } @@ -1202,6 +1205,10 @@ static int rpmh_regulator_allocate_vreg(struct rpmh_aggr_vreg *aggr_vreg) i = 0; for_each_available_child_of_node(aggr_vreg->dev->of_node, node) { + /* Skip child nodes handled by other drivers. */ + if (of_find_property(node, "compatible", NULL)) + continue; + aggr_vreg->vreg[i].of_node = node; aggr_vreg->vreg[i].aggr_vreg = aggr_vreg; @@ -1623,6 +1630,7 @@ static int rpmh_regulator_probe(struct platform_device *pdev) mutex_unlock(&aggr_vreg->lock); } + of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); platform_set_drvdata(pdev, aggr_vreg); aggr_vreg_debug(aggr_vreg, "successfully probed; addr=0x%05X, type=%s\n", -- GitLab From 46ab29850d12cd012e0bba86ff822c05518d1e21 Mon Sep 17 00:00:00 2001 From: Ram Chandrasekar Date: Wed, 21 Jun 2017 12:36:44 -0600 Subject: [PATCH 377/786] ARM: dts: msm: Add regulator cooling device for SDM845 Add regulator cooling device for CX, MX and EBI regulator for SDM845. The cooling device will be used to place voltage floor restriction. Change-Id: I66192d04b156a629baa3893db2c534468391f84b Signed-off-by: Ram Chandrasekar --- .../arm64/boot/dts/qcom/sdm845-regulator.dtsi | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi index 7befe3bdecbd..0b149ac9ca08 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi @@ -622,6 +622,13 @@ regulator-min-microvolt = ; regulator-max-microvolt = ; }; + + ebi_cdev: regulator-cdev { + compatible = "qcom,rpmh-reg-cdev"; + mboxes = <&qmp_aop 0>; + qcom,reg-resource-name = "ebi"; + #cooling-cells = <2>; + }; }; rpmh-regulator-smpa2 { @@ -718,6 +725,13 @@ regulator-max-microvolt = ; qcom,min-dropout-voltage-level = <(-1)>; }; + + cx_cdev: regulator-cdev { + compatible = "qcom,rpmh-reg-cdev"; + mboxes = <&qmp_aop 0>; + qcom,reg-resource-name = "cx"; + #cooling-cells = <2>; + }; }; rpmh-regulator-ldoa1 { @@ -786,6 +800,13 @@ regulator-min-microvolt = ; regulator-max-microvolt = ; }; + + mx_cdev: regulator-cdev { + compatible = "qcom,rpmh-reg-cdev"; + mboxes = <&qmp_aop 0>; + qcom,reg-resource-name = "mx"; + #cooling-cells = <2>; + }; }; rpmh-regulator-ldoa5 { -- GitLab From 3ec09c023bf61d9d59404af4cddae64d9b9e8f05 Mon Sep 17 00:00:00 2001 From: Ram Chandrasekar Date: Tue, 27 Jun 2017 11:03:15 -0600 Subject: [PATCH 378/786] ARM: dts: msm: Add regulator floor mitigation config for sdm845 Add the regulator floor mitigation configuration for the vdd restriction monitoring thermal zones in SDM845. Change-Id: I6bd1a878fd2e163a63dd12476ea35a345f509cac Signed-off-by: Ram Chandrasekar --- arch/arm64/boot/dts/qcom/sdm845.dtsi | 1472 +++++++++++++++----------- 1 file changed, 863 insertions(+), 609 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index 7ea200e5e1a1..4fbc875fb1a7 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -3351,615 +3351,6 @@ }; }; - aoss0-lowf { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-governor = "low_limits_floor"; - thermal-sensors = <&tsens0 0>; - tracks-low; - trips { - aoss0_trip: aoss0-trip { - temperature = <5000>; - hysteresis = <5000>; - type = "passive"; - }; - }; - cooling-maps { - cpu0_vdd_cdev { - trip = <&aoss0_trip>; - cooling-device = <&CPU0 4 4>; - }; - cpu4_vdd_cdev { - trip = <&aoss0_trip>; - cooling-device = <&CPU4 9 9>; - }; - gpu_vdd_cdev { - trip = <&aoss0_trip>; - cooling-device = <&msm_gpu 1 1>; - }; - }; - }; - - cpu0-silver-lowf { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-governor = "low_limits_floor"; - thermal-sensors = <&tsens0 1>; - tracks-low; - trips { - cpu0_trip: cpu0-trip { - temperature = <5000>; - hysteresis = <5000>; - type = "passive"; - }; - }; - cooling-maps { - cpu0_vdd_cdev { - trip = <&cpu0_trip>; - cooling-device = <&CPU0 4 4>; - }; - cpu4_vdd_cdev { - trip = <&cpu0_trip>; - cooling-device = <&CPU4 9 9>; - }; - gpu_vdd_cdev { - trip = <&cpu0_trip>; - cooling-device = <&msm_gpu 1 1>; - }; - }; - }; - - cpu1-silver-lowf { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-governor = "low_limits_floor"; - thermal-sensors = <&tsens0 2>; - tracks-low; - trips { - cpu1_trip: cpu1-trip { - temperature = <5000>; - hysteresis = <5000>; - type = "passive"; - }; - }; - cooling-maps { - cpu0_vdd_cdev { - trip = <&cpu1_trip>; - cooling-device = <&CPU0 4 4>; - }; - cpu4_vdd_cdev { - trip = <&cpu1_trip>; - cooling-device = <&CPU4 9 9>; - }; - gpu_vdd_cdev { - trip = <&cpu1_trip>; - cooling-device = <&msm_gpu 1 1>; - }; - }; - }; - - cpu2-silver-lowf { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-governor = "low_limits_floor"; - thermal-sensors = <&tsens0 3>; - tracks-low; - trips { - cpu2_trip: cpu2-trip { - temperature = <5000>; - hysteresis = <5000>; - type = "passive"; - }; - }; - cooling-maps { - cpu0_vdd_cdev { - trip = <&cpu2_trip>; - cooling-device = <&CPU0 4 4>; - }; - cpu4_vdd_cdev { - trip = <&cpu2_trip>; - cooling-device = <&CPU4 9 9>; - }; - gpu_vdd_cdev { - trip = <&cpu2_trip>; - cooling-device = <&msm_gpu 1 1>; - }; - }; - }; - - cpu3-silver-lowf { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-governor = "low_limits_floor"; - thermal-sensors = <&tsens0 4>; - tracks-low; - trips { - cpu3_trip: cpu3-trip { - temperature = <5000>; - hysteresis = <5000>; - type = "passive"; - }; - }; - cooling-maps { - cpu0_vdd_cdev { - trip = <&cpu3_trip>; - cooling-device = <&CPU0 4 4>; - }; - cpu4_vdd_cdev { - trip = <&cpu3_trip>; - cooling-device = <&CPU4 9 9>; - }; - gpu_vdd_cdev { - trip = <&cpu3_trip>; - cooling-device = <&msm_gpu 1 1>; - }; - }; - }; - - kryo-l3-0-lowf { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-governor = "low_limits_floor"; - thermal-sensors = <&tsens0 5>; - tracks-low; - trips { - l3_0_trip: l3-0-trip { - temperature = <5000>; - hysteresis = <5000>; - type = "passive"; - }; - }; - cooling-maps { - cpu0_vdd_cdev { - trip = <&l3_0_trip>; - cooling-device = <&CPU0 4 4>; - }; - cpu4_vdd_cdev { - trip = <&l3_0_trip>; - cooling-device = <&CPU4 9 9>; - }; - gpu_vdd_cdev { - trip = <&l3_0_trip>; - cooling-device = <&msm_gpu 1 1>; - }; - }; - }; - - kryo-l3-1-lowf { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-governor = "low_limits_floor"; - thermal-sensors = <&tsens0 6>; - tracks-low; - trips { - l3_1_trip: l3-1-trip { - temperature = <5000>; - hysteresis = <5000>; - type = "passive"; - }; - }; - cooling-maps { - cpu0_vdd_cdev { - trip = <&l3_1_trip>; - cooling-device = <&CPU0 4 4>; - }; - cpu4_vdd_cdev { - trip = <&l3_1_trip>; - cooling-device = <&CPU4 9 9>; - }; - gpu_vdd_cdev { - trip = <&l3_1_trip>; - cooling-device = <&msm_gpu 1 1>; - }; - }; - }; - - cpu0-gold-lowf { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-governor = "low_limits_floor"; - thermal-sensors = <&tsens0 7>; - tracks-low; - trips { - cpug0_trip: cpug0-trip { - temperature = <5000>; - hysteresis = <5000>; - type = "passive"; - }; - }; - cooling-maps { - cpu0_vdd_cdev { - trip = <&cpug0_trip>; - cooling-device = <&CPU0 4 4>; - }; - cpu4_vdd_cdev { - trip = <&cpug0_trip>; - cooling-device = <&CPU4 9 9>; - }; - gpu_vdd_cdev { - trip = <&cpug0_trip>; - cooling-device = <&msm_gpu 1 1>; - }; - }; - }; - - cpu1-gold-lowf { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-governor = "low_limits_floor"; - thermal-sensors = <&tsens0 8>; - tracks-low; - trips { - cpug1_trip: cpug1-trip { - temperature = <5000>; - hysteresis = <5000>; - type = "passive"; - }; - }; - cooling-maps { - cpu0_vdd_cdev { - trip = <&cpug1_trip>; - cooling-device = <&CPU0 4 4>; - }; - cpu4_vdd_cdev { - trip = <&cpug1_trip>; - cooling-device = <&CPU4 9 9>; - }; - gpu_vdd_cdev { - trip = <&cpug1_trip>; - cooling-device = <&msm_gpu 1 1>; - }; - }; - }; - - cpu2-gold-lowf { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-governor = "low_limits_floor"; - thermal-sensors = <&tsens0 9>; - tracks-low; - trips { - cpug2_trip: cpug2-trip { - temperature = <5000>; - hysteresis = <5000>; - type = "passive"; - }; - }; - cooling-maps { - cpu0_vdd_cdev { - trip = <&cpug2_trip>; - cooling-device = <&CPU0 4 4>; - }; - cpu4_vdd_cdev { - trip = <&cpug2_trip>; - cooling-device = <&CPU4 9 9>; - }; - gpu_vdd_cdev { - trip = <&cpug2_trip>; - cooling-device = <&msm_gpu 1 1>; - }; - }; - }; - - cpu3-gold-lowf { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-governor = "low_limits_floor"; - thermal-sensors = <&tsens0 10>; - tracks-low; - trips { - cpug3_trip: cpug3-trip { - temperature = <5000>; - hysteresis = <5000>; - type = "passive"; - }; - }; - cooling-maps { - cpu0_vdd_cdev { - trip = <&cpug3_trip>; - cooling-device = <&CPU0 4 4>; - }; - cpu4_vdd_cdev { - trip = <&cpug3_trip>; - cooling-device = <&CPU4 9 9>; - }; - gpu_vdd_cdev { - trip = <&cpug3_trip>; - cooling-device = <&msm_gpu 1 1>; - }; - }; - }; - - gpu0-lowf { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-governor = "low_limits_floor"; - thermal-sensors = <&tsens0 11>; - tracks-low; - trips { - gpu0_trip_l: gpu0-trip { - temperature = <5000>; - hysteresis = <5000>; - type = "passive"; - }; - }; - cooling-maps { - cpu0_vdd_cdev { - trip = <&gpu0_trip_l>; - cooling-device = <&CPU0 4 4>; - }; - cpu4_vdd_cdev { - trip = <&gpu0_trip_l>; - cooling-device = <&CPU4 9 9>; - }; - gpu_vdd_cdev { - trip = <&gpu0_trip_l>; - cooling-device = <&msm_gpu 1 1>; - }; - }; - }; - - gpu1-lowf { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-governor = "low_limits_floor"; - thermal-sensors = <&tsens0 12>; - tracks-low; - trips { - gpu1_trip_l: gpu1-trip_l { - temperature = <5000>; - hysteresis = <5000>; - type = "passive"; - }; - }; - cooling-maps { - cpu0_vdd_cdev { - trip = <&gpu1_trip_l>; - cooling-device = <&CPU0 4 4>; - }; - cpu4_vdd_cdev { - trip = <&gpu1_trip_l>; - cooling-device = <&CPU4 9 9>; - }; - gpu_vdd_cdev { - trip = <&gpu1_trip_l>; - cooling-device = <&msm_gpu 1 1>; - }; - }; - }; - - aoss1-lowf { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-governor = "low_limits_floor"; - thermal-sensors = <&tsens1 0>; - tracks-low; - trips { - aoss1_trip: aoss1-trip { - temperature = <5000>; - hysteresis = <5000>; - type = "passive"; - }; - }; - cooling-maps { - cpu0_vdd_cdev { - trip = <&aoss1_trip>; - cooling-device = <&CPU0 4 4>; - }; - cpu4_vdd_cdev { - trip = <&aoss1_trip>; - cooling-device = <&CPU4 9 9>; - }; - gpu_vdd_cdev { - trip = <&aoss1_trip>; - cooling-device = <&msm_gpu 1 1>; - }; - }; - }; - - mdm-dsp-lowf { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-governor = "low_limits_floor"; - thermal-sensors = <&tsens1 1>; - tracks-low; - trips { - dsp_trip: dsp-trip { - temperature = <5000>; - hysteresis = <5000>; - type = "passive"; - }; - }; - cooling-maps { - cpu0_vdd_cdev { - trip = <&dsp_trip>; - cooling-device = <&CPU0 4 4>; - }; - cpu4_vdd_cdev { - trip = <&dsp_trip>; - cooling-device = <&CPU4 9 9>; - }; - gpu_vdd_cdev { - trip = <&dsp_trip>; - cooling-device = <&msm_gpu 1 1>; - }; - }; - }; - - ddr-lowf { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-governor = "low_limits_floor"; - thermal-sensors = <&tsens1 2>; - tracks-low; - trips { - ddr_trip: ddr-trip { - temperature = <5000>; - hysteresis = <5000>; - type = "passive"; - }; - }; - cooling-maps { - cpu0_vdd_cdev { - trip = <&ddr_trip>; - cooling-device = <&CPU0 4 4>; - }; - cpu4_vdd_cdev { - trip = <&ddr_trip>; - cooling-device = <&CPU4 9 9>; - }; - gpu_vdd_cdev { - trip = <&ddr_trip>; - cooling-device = <&msm_gpu 1 1>; - }; - }; - }; - - wlan-lowf { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-governor = "low_limits_floor"; - thermal-sensors = <&tsens1 3>; - tracks-low; - trips { - wlan_trip: wlan-trip { - temperature = <5000>; - hysteresis = <5000>; - type = "passive"; - }; - }; - cooling-maps { - cpu0_vdd_cdev { - trip = <&wlan_trip>; - cooling-device = <&CPU0 4 4>; - }; - cpu4_vdd_cdev { - trip = <&wlan_trip>; - cooling-device = <&CPU4 9 9>; - }; - gpu_vdd_cdev { - trip = <&wlan_trip>; - cooling-device = <&msm_gpu 1 1>; - }; - }; - }; - - compute-hvx-lowf { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-governor = "low_limits_floor"; - thermal-sensors = <&tsens1 4>; - tracks-low; - trips { - hvx_trip: hvx-trip { - temperature = <5000>; - hysteresis = <5000>; - type = "passive"; - }; - }; - cooling-maps { - cpu0_vdd_cdev { - trip = <&hvx_trip>; - cooling-device = <&CPU0 4 4>; - }; - cpu4_vdd_cdev { - trip = <&hvx_trip>; - cooling-device = <&CPU4 9 9>; - }; - gpu_vdd_cdev { - trip = <&hvx_trip>; - cooling-device = <&msm_gpu 1 1>; - }; - }; - }; - - camera-lowf { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-governor = "low_limits_floor"; - thermal-sensors = <&tsens1 5>; - tracks-low; - trips { - camera_trip: camera-trip { - temperature = <5000>; - hysteresis = <5000>; - type = "passive"; - }; - }; - cooling-maps { - cpu0_vdd_cdev { - trip = <&camera_trip>; - cooling-device = <&CPU0 4 4>; - }; - cpu4_vdd_cdev { - trip = <&camera_trip>; - cooling-device = <&CPU4 9 9>; - }; - gpu_vdd_cdev { - trip = <&camera_trip>; - cooling-device = <&msm_gpu 1 1>; - }; - }; - }; - - mmss-lowf { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-governor = "low_limits_floor"; - thermal-sensors = <&tsens1 6>; - tracks-low; - trips { - mmss_trip: mmss-trip { - temperature = <5000>; - hysteresis = <5000>; - type = "passive"; - }; - }; - cooling-maps { - cpu0_vdd_cdev { - trip = <&mmss_trip>; - cooling-device = <&CPU0 4 4>; - }; - cpu4_vdd_cdev { - trip = <&mmss_trip>; - cooling-device = <&CPU4 9 9>; - }; - gpu_vdd_cdev { - trip = <&mmss_trip>; - cooling-device = <&msm_gpu 1 1>; - }; - }; - }; - - mdm-core-lowf { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-governor = "low_limits_floor"; - thermal-sensors = <&tsens1 7>; - tracks-low; - trips { - mdm_trip: mdm-trip { - temperature = <5000>; - hysteresis = <5000>; - type = "passive"; - }; - }; - cooling-maps { - cpu0_vdd_cdev { - trip = <&mdm_trip>; - cooling-device = <&CPU0 4 4>; - }; - cpu4_vdd_cdev { - trip = <&mdm_trip>; - cooling-device = <&CPU4 9 9>; - }; - gpu_vdd_cdev { - trip = <&mdm_trip>; - cooling-device = <&msm_gpu 1 1>; - }; - }; - }; - lmh-dcvs-01 { polling-delay-passive = <0>; polling-delay = <0>; @@ -4345,3 +3736,866 @@ }; }; }; + +&thermal_zones { + aoss0-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens0 0>; + tracks-low; + trips { + aoss0_trip: aoss0-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_vdd_cdev { + trip = <&aoss0_trip>; + cooling-device = <&CPU0 4 4>; + }; + cpu4_vdd_cdev { + trip = <&aoss0_trip>; + cooling-device = <&CPU4 9 9>; + }; + gpu_vdd_cdev { + trip = <&aoss0_trip>; + cooling-device = <&msm_gpu 1 1>; + }; + cx_vdd_cdev { + trip = <&aoss0_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&aoss0_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + ebi_vdd_cdev { + trip = <&aoss0_trip>; + cooling-device = <&ebi_cdev 0 0>; + }; + }; + }; + + cpu0-silver-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens0 1>; + tracks-low; + trips { + cpu0_trip: cpu0-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_vdd_cdev { + trip = <&cpu0_trip>; + cooling-device = <&CPU0 4 4>; + }; + cpu4_vdd_cdev { + trip = <&cpu0_trip>; + cooling-device = <&CPU4 9 9>; + }; + gpu_vdd_cdev { + trip = <&cpu0_trip>; + cooling-device = <&msm_gpu 1 1>; + }; + cx_vdd_cdev { + trip = <&cpu0_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&cpu0_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + ebi_vdd_cdev { + trip = <&cpu0_trip>; + cooling-device = <&ebi_cdev 0 0>; + }; + }; + }; + + cpu1-silver-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens0 2>; + tracks-low; + trips { + cpu1_trip: cpu1-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_vdd_cdev { + trip = <&cpu1_trip>; + cooling-device = <&CPU0 4 4>; + }; + cpu4_vdd_cdev { + trip = <&cpu1_trip>; + cooling-device = <&CPU4 9 9>; + }; + gpu_vdd_cdev { + trip = <&cpu1_trip>; + cooling-device = <&msm_gpu 1 1>; + }; + cx_vdd_cdev { + trip = <&cpu1_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&cpu1_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + ebi_vdd_cdev { + trip = <&cpu1_trip>; + cooling-device = <&ebi_cdev 0 0>; + }; + }; + }; + + cpu2-silver-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens0 3>; + tracks-low; + trips { + cpu2_trip: cpu2-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_vdd_cdev { + trip = <&cpu2_trip>; + cooling-device = <&CPU0 4 4>; + }; + cpu4_vdd_cdev { + trip = <&cpu2_trip>; + cooling-device = <&CPU4 9 9>; + }; + gpu_vdd_cdev { + trip = <&cpu2_trip>; + cooling-device = <&msm_gpu 1 1>; + }; + cx_vdd_cdev { + trip = <&cpu2_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&cpu2_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + ebi_vdd_cdev { + trip = <&cpu2_trip>; + cooling-device = <&ebi_cdev 0 0>; + }; + }; + }; + + cpu3-silver-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens0 4>; + tracks-low; + trips { + cpu3_trip: cpu3-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_vdd_cdev { + trip = <&cpu3_trip>; + cooling-device = <&CPU0 4 4>; + }; + cpu4_vdd_cdev { + trip = <&cpu3_trip>; + cooling-device = <&CPU4 9 9>; + }; + gpu_vdd_cdev { + trip = <&cpu3_trip>; + cooling-device = <&msm_gpu 1 1>; + }; + cx_vdd_cdev { + trip = <&cpu3_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&cpu3_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + ebi_vdd_cdev { + trip = <&cpu3_trip>; + cooling-device = <&ebi_cdev 0 0>; + }; + }; + }; + + kryo-l3-0-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens0 5>; + tracks-low; + trips { + l3_0_trip: l3-0-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_vdd_cdev { + trip = <&l3_0_trip>; + cooling-device = <&CPU0 4 4>; + }; + cpu4_vdd_cdev { + trip = <&l3_0_trip>; + cooling-device = <&CPU4 9 9>; + }; + gpu_vdd_cdev { + trip = <&l3_0_trip>; + cooling-device = <&msm_gpu 1 1>; + }; + cx_vdd_cdev { + trip = <&l3_0_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&l3_0_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + ebi_vdd_cdev { + trip = <&l3_0_trip>; + cooling-device = <&ebi_cdev 0 0>; + }; + }; + }; + + kryo-l3-1-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens0 6>; + tracks-low; + trips { + l3_1_trip: l3-1-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_vdd_cdev { + trip = <&l3_1_trip>; + cooling-device = <&CPU0 4 4>; + }; + cpu4_vdd_cdev { + trip = <&l3_1_trip>; + cooling-device = <&CPU4 9 9>; + }; + gpu_vdd_cdev { + trip = <&l3_1_trip>; + cooling-device = <&msm_gpu 1 1>; + }; + cx_vdd_cdev { + trip = <&l3_1_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&l3_1_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + ebi_vdd_cdev { + trip = <&l3_1_trip>; + cooling-device = <&ebi_cdev 0 0>; + }; + }; + }; + + cpu0-gold-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens0 7>; + tracks-low; + trips { + cpug0_trip: cpug0-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_vdd_cdev { + trip = <&cpug0_trip>; + cooling-device = <&CPU0 4 4>; + }; + cpu4_vdd_cdev { + trip = <&cpug0_trip>; + cooling-device = <&CPU4 9 9>; + }; + gpu_vdd_cdev { + trip = <&cpug0_trip>; + cooling-device = <&msm_gpu 1 1>; + }; + cx_vdd_cdev { + trip = <&cpug0_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&cpug0_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + ebi_vdd_cdev { + trip = <&cpug0_trip>; + cooling-device = <&ebi_cdev 0 0>; + }; + }; + }; + + cpu1-gold-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens0 8>; + tracks-low; + trips { + cpug1_trip: cpug1-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_vdd_cdev { + trip = <&cpug1_trip>; + cooling-device = <&CPU0 4 4>; + }; + cpu4_vdd_cdev { + trip = <&cpug1_trip>; + cooling-device = <&CPU4 9 9>; + }; + gpu_vdd_cdev { + trip = <&cpug1_trip>; + cooling-device = <&msm_gpu 1 1>; + }; + cx_vdd_cdev { + trip = <&cpug1_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&cpug1_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + ebi_vdd_cdev { + trip = <&cpug1_trip>; + cooling-device = <&ebi_cdev 0 0>; + }; + }; + }; + + cpu2-gold-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens0 9>; + tracks-low; + trips { + cpug2_trip: cpug2-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_vdd_cdev { + trip = <&cpug2_trip>; + cooling-device = <&CPU0 4 4>; + }; + cpu4_vdd_cdev { + trip = <&cpug2_trip>; + cooling-device = <&CPU4 9 9>; + }; + gpu_vdd_cdev { + trip = <&cpug2_trip>; + cooling-device = <&msm_gpu 1 1>; + }; + cx_vdd_cdev { + trip = <&cpug2_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&cpug2_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + ebi_vdd_cdev { + trip = <&cpug2_trip>; + cooling-device = <&ebi_cdev 0 0>; + }; + }; + }; + + cpu3-gold-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens0 10>; + tracks-low; + trips { + cpug3_trip: cpug3-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_vdd_cdev { + trip = <&cpug3_trip>; + cooling-device = <&CPU0 4 4>; + }; + cpu4_vdd_cdev { + trip = <&cpug3_trip>; + cooling-device = <&CPU4 9 9>; + }; + gpu_vdd_cdev { + trip = <&cpug3_trip>; + cooling-device = <&msm_gpu 1 1>; + }; + cx_vdd_cdev { + trip = <&cpug3_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&cpug3_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + ebi_vdd_cdev { + trip = <&cpug3_trip>; + cooling-device = <&ebi_cdev 0 0>; + }; + }; + }; + + gpu0-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens0 11>; + tracks-low; + trips { + gpu0_trip_l: gpu0-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_vdd_cdev { + trip = <&gpu0_trip_l>; + cooling-device = <&CPU0 4 4>; + }; + cpu4_vdd_cdev { + trip = <&gpu0_trip_l>; + cooling-device = <&CPU4 9 9>; + }; + gpu_vdd_cdev { + trip = <&gpu0_trip_l>; + cooling-device = <&msm_gpu 1 1>; + }; + cx_vdd_cdev { + trip = <&gpu0_trip_l>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&gpu0_trip_l>; + cooling-device = <&mx_cdev 0 0>; + }; + ebi_vdd_cdev { + trip = <&gpu0_trip_l>; + cooling-device = <&ebi_cdev 0 0>; + }; + }; + }; + + gpu1-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens0 12>; + tracks-low; + trips { + gpu1_trip_l: gpu1-trip_l { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_vdd_cdev { + trip = <&gpu1_trip_l>; + cooling-device = <&CPU0 4 4>; + }; + cpu4_vdd_cdev { + trip = <&gpu1_trip_l>; + cooling-device = <&CPU4 9 9>; + }; + gpu_vdd_cdev { + trip = <&gpu1_trip_l>; + cooling-device = <&msm_gpu 1 1>; + }; + cx_vdd_cdev { + trip = <&gpu1_trip_l>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&gpu1_trip_l>; + cooling-device = <&mx_cdev 0 0>; + }; + ebi_vdd_cdev { + trip = <&gpu1_trip_l>; + cooling-device = <&ebi_cdev 0 0>; + }; + }; + }; + + aoss1-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens1 0>; + tracks-low; + trips { + aoss1_trip: aoss1-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_vdd_cdev { + trip = <&aoss1_trip>; + cooling-device = <&CPU0 4 4>; + }; + cpu4_vdd_cdev { + trip = <&aoss1_trip>; + cooling-device = <&CPU4 9 9>; + }; + gpu_vdd_cdev { + trip = <&aoss1_trip>; + cooling-device = <&msm_gpu 1 1>; + }; + cx_vdd_cdev { + trip = <&aoss1_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&aoss1_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + ebi_vdd_cdev { + trip = <&aoss1_trip>; + cooling-device = <&ebi_cdev 0 0>; + }; + }; + }; + + mdm-dsp-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens1 1>; + tracks-low; + trips { + dsp_trip: dsp-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_vdd_cdev { + trip = <&dsp_trip>; + cooling-device = <&CPU0 4 4>; + }; + cpu4_vdd_cdev { + trip = <&dsp_trip>; + cooling-device = <&CPU4 9 9>; + }; + gpu_vdd_cdev { + trip = <&dsp_trip>; + cooling-device = <&msm_gpu 1 1>; + }; + cx_vdd_cdev { + trip = <&dsp_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&dsp_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + ebi_vdd_cdev { + trip = <&dsp_trip>; + cooling-device = <&ebi_cdev 0 0>; + }; + }; + }; + + ddr-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens1 2>; + tracks-low; + trips { + ddr_trip: ddr-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_vdd_cdev { + trip = <&ddr_trip>; + cooling-device = <&CPU0 4 4>; + }; + cpu4_vdd_cdev { + trip = <&ddr_trip>; + cooling-device = <&CPU4 9 9>; + }; + gpu_vdd_cdev { + trip = <&ddr_trip>; + cooling-device = <&msm_gpu 1 1>; + }; + cx_vdd_cdev { + trip = <&ddr_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&ddr_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + ebi_vdd_cdev { + trip = <&ddr_trip>; + cooling-device = <&ebi_cdev 0 0>; + }; + }; + }; + + wlan-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens1 3>; + tracks-low; + trips { + wlan_trip: wlan-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_vdd_cdev { + trip = <&wlan_trip>; + cooling-device = <&CPU0 4 4>; + }; + cpu4_vdd_cdev { + trip = <&wlan_trip>; + cooling-device = <&CPU4 9 9>; + }; + gpu_vdd_cdev { + trip = <&wlan_trip>; + cooling-device = <&msm_gpu 1 1>; + }; + cx_vdd_cdev { + trip = <&wlan_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&wlan_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + ebi_vdd_cdev { + trip = <&wlan_trip>; + cooling-device = <&ebi_cdev 0 0>; + }; + }; + }; + + compute-hvx-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens1 4>; + tracks-low; + trips { + hvx_trip: hvx-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_vdd_cdev { + trip = <&hvx_trip>; + cooling-device = <&CPU0 4 4>; + }; + cpu4_vdd_cdev { + trip = <&hvx_trip>; + cooling-device = <&CPU4 9 9>; + }; + gpu_vdd_cdev { + trip = <&hvx_trip>; + cooling-device = <&msm_gpu 1 1>; + }; + cx_vdd_cdev { + trip = <&hvx_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&hvx_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + ebi_vdd_cdev { + trip = <&hvx_trip>; + cooling-device = <&ebi_cdev 0 0>; + }; + }; + }; + + camera-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens1 5>; + tracks-low; + trips { + camera_trip: camera-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_vdd_cdev { + trip = <&camera_trip>; + cooling-device = <&CPU0 4 4>; + }; + cpu4_vdd_cdev { + trip = <&camera_trip>; + cooling-device = <&CPU4 9 9>; + }; + gpu_vdd_cdev { + trip = <&camera_trip>; + cooling-device = <&msm_gpu 1 1>; + }; + cx_vdd_cdev { + trip = <&camera_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&camera_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + ebi_vdd_cdev { + trip = <&camera_trip>; + cooling-device = <&ebi_cdev 0 0>; + }; + }; + }; + + mmss-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens1 6>; + tracks-low; + trips { + mmss_trip: mmss-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_vdd_cdev { + trip = <&mmss_trip>; + cooling-device = <&CPU0 4 4>; + }; + cpu4_vdd_cdev { + trip = <&mmss_trip>; + cooling-device = <&CPU4 9 9>; + }; + gpu_vdd_cdev { + trip = <&mmss_trip>; + cooling-device = <&msm_gpu 1 1>; + }; + cx_vdd_cdev { + trip = <&mmss_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&mmss_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + ebi_vdd_cdev { + trip = <&mmss_trip>; + cooling-device = <&ebi_cdev 0 0>; + }; + }; + }; + + mdm-core-lowf { + polling-delay-passive = <0>; + polling-delay = <0>; + thermal-governor = "low_limits_floor"; + thermal-sensors = <&tsens1 7>; + tracks-low; + trips { + mdm_trip: mdm-trip { + temperature = <5000>; + hysteresis = <5000>; + type = "passive"; + }; + }; + cooling-maps { + cpu0_vdd_cdev { + trip = <&mdm_trip>; + cooling-device = <&CPU0 4 4>; + }; + cpu4_vdd_cdev { + trip = <&mdm_trip>; + cooling-device = <&CPU4 9 9>; + }; + gpu_vdd_cdev { + trip = <&mdm_trip>; + cooling-device = <&msm_gpu 1 1>; + }; + cx_vdd_cdev { + trip = <&mdm_trip>; + cooling-device = <&cx_cdev 0 0>; + }; + mx_vdd_cdev { + trip = <&mdm_trip>; + cooling-device = <&mx_cdev 0 0>; + }; + ebi_vdd_cdev { + trip = <&mdm_trip>; + cooling-device = <&ebi_cdev 0 0>; + }; + }; + }; +}; -- GitLab From 884259c4e58c45f848ecbcb9ddc386337d8fc36c Mon Sep 17 00:00:00 2001 From: Maheshwar Ajja Date: Wed, 28 Jun 2017 11:22:54 -0700 Subject: [PATCH 379/786] msm: vidc: Fix lock issue in streamon failure queued_list lock is acquired before calling vb2_stream so don't acquire the lock within vb2_streamon function if driver streamon call failed. CRs-Fixed: 2064048 Change-Id: I8266def07d3056969943f209c63f638ea94de519 Signed-off-by: Maheshwar Ajja --- drivers/media/platform/msm/vidc/msm_vidc.c | 47 +++++++++++++--------- 1 file changed, 29 insertions(+), 18 deletions(-) diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c index 2ca3e8d513f0..2c4a390dcbf8 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_vidc.c @@ -918,24 +918,6 @@ static inline int start_streaming(struct msm_vidc_inst *inst) } fail_start: - if (rc) { - struct msm_vidc_buffer *temp, *next; - - mutex_lock(&inst->registeredbufs.lock); - list_for_each_entry_safe(temp, next, - &inst->registeredbufs.list, list) { - struct vb2_buffer *vb; - - print_vidc_buffer(VIDC_ERR, "return buf", inst, temp); - vb = msm_comm_get_vb_using_vidc_buffer(inst, temp); - if (vb) - vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED); - msm_comm_unmap_vidc_buffer(inst, temp); - list_del(&temp->list); - kfree(temp); - } - mutex_unlock(&inst->registeredbufs.lock); - } return rc; } @@ -987,6 +969,35 @@ static int msm_vidc_start_streaming(struct vb2_queue *q, unsigned int count) } stream_start_failed: + if (rc) { + struct msm_vidc_buffer *temp, *next; + struct vb2_buffer *vb; + + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry_safe(temp, next, &inst->registeredbufs.list, + list) { + if (temp->vvb.vb2_buf.type != q->type) + continue; + /* + * queued_list lock is already acquired before + * vb2_stream so no need to acquire it again. + */ + list_for_each_entry(vb, &q->queued_list, queued_entry) { + if (msm_comm_compare_vb2_planes(inst, temp, + vb)) { + print_vb2_buffer(VIDC_ERR, "return vb", + inst, vb); + vb2_buffer_done(vb, + VB2_BUF_STATE_QUEUED); + break; + } + } + msm_comm_unmap_vidc_buffer(inst, temp); + list_del(&temp->list); + kfree(temp); + } + mutex_unlock(&inst->registeredbufs.lock); + } return rc; } -- GitLab From ac52d887ffe73ffe53aba2082a2835a9fe64ab8d Mon Sep 17 00:00:00 2001 From: Harry Yang Date: Wed, 31 May 2017 22:45:47 -0700 Subject: [PATCH 380/786] qcom: smb1355: Support connector_temp_health property SMB1355's aux therm input is connected to the connector thermistor. Expose it via parallel psy connector_temp_health property. Change-Id: I93d7769472629f983d0b3016574c105328ed8549 Signed-off-by: Harry Yang --- drivers/power/supply/qcom/smb1355-charger.c | 36 +++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/drivers/power/supply/qcom/smb1355-charger.c b/drivers/power/supply/qcom/smb1355-charger.c index d5fff74459cb..50e5cde7ee27 100644 --- a/drivers/power/supply/qcom/smb1355-charger.c +++ b/drivers/power/supply/qcom/smb1355-charger.c @@ -61,6 +61,15 @@ #define CHGR_BATTOV_CFG_REG (CHGR_BASE + 0x70) #define BATTOV_SETTING_MASK GENMASK(7, 0) +#define TEMP_COMP_STATUS_REG (MISC_BASE + 0x07) +#define SKIN_TEMP_RST_HOT_BIT BIT(6) +#define SKIN_TEMP_UB_HOT_BIT BIT(5) +#define SKIN_TEMP_LB_HOT_BIT BIT(4) +#define DIE_TEMP_TSD_HOT_BIT BIT(3) +#define DIE_TEMP_RST_HOT_BIT BIT(2) +#define DIE_TEMP_UB_HOT_BIT BIT(1) +#define DIE_TEMP_LB_HOT_BIT BIT(0) + #define BARK_BITE_WDOG_PET_REG (MISC_BASE + 0x43) #define BARK_BITE_WDOG_PET_BIT BIT(0) @@ -260,6 +269,7 @@ static enum power_supply_property smb1355_parallel_props[] = { POWER_SUPPLY_PROP_VOLTAGE_MAX, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, POWER_SUPPLY_PROP_MODEL_NAME, + POWER_SUPPLY_PROP_CONNECTOR_HEALTH, }; static int smb1355_get_prop_batt_charge_type(struct smb1355 *chip, @@ -305,6 +315,29 @@ static int smb1355_get_parallel_charging(struct smb1355 *chip, int *disabled) return 0; } +static int smb1355_get_prop_connector_health(struct smb1355 *chip) +{ + u8 temp; + int rc; + + rc = smb1355_read(chip, TEMP_COMP_STATUS_REG, &temp); + if (rc < 0) { + pr_err("Couldn't read comp stat reg rc = %d\n", rc); + return POWER_SUPPLY_HEALTH_UNKNOWN; + } + + if (temp & SKIN_TEMP_RST_HOT_BIT) + return POWER_SUPPLY_HEALTH_OVERHEAT; + + if (temp & SKIN_TEMP_UB_HOT_BIT) + return POWER_SUPPLY_HEALTH_HOT; + + if (temp & SKIN_TEMP_LB_HOT_BIT) + return POWER_SUPPLY_HEALTH_WARM; + + return POWER_SUPPLY_HEALTH_COOL; +} + static int smb1355_parallel_get_prop(struct power_supply *psy, enum power_supply_property prop, union power_supply_propval *val) @@ -344,6 +377,9 @@ static int smb1355_parallel_get_prop(struct power_supply *psy, case POWER_SUPPLY_PROP_PARALLEL_MODE: val->intval = POWER_SUPPLY_PL_USBMID_USBMID; break; + case POWER_SUPPLY_PROP_CONNECTOR_HEALTH: + val->intval = smb1355_get_prop_connector_health(chip); + break; default: pr_err_ratelimited("parallel psy get prop %d not supported\n", prop); -- GitLab From 4200a9b58a5081c16554f8449d4626723b77ce74 Mon Sep 17 00:00:00 2001 From: Harry Yang Date: Wed, 21 Jun 2017 17:44:59 -0700 Subject: [PATCH 381/786] qcom: smb1355: Add charger temp properties in parallel psy Expose CHARGER_TEMP AND CHARGER_TEMP_MAX properties from RRADC readings. Change-Id: I4c9582d888cd02798fced268ed52406cf81a32d6 Signed-off-by: Harry Yang --- drivers/power/supply/qcom/smb1355-charger.c | 52 +++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/drivers/power/supply/qcom/smb1355-charger.c b/drivers/power/supply/qcom/smb1355-charger.c index 50e5cde7ee27..b2c005954eb4 100644 --- a/drivers/power/supply/qcom/smb1355-charger.c +++ b/drivers/power/supply/qcom/smb1355-charger.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -124,12 +125,18 @@ struct smb_irq_info { int irq; }; +struct smb_iio { + struct iio_channel *temp_chan; + struct iio_channel *temp_max_chan; +}; + struct smb1355 { struct device *dev; char *name; struct regmap *regmap; struct smb_params param; + struct smb_iio iio; struct mutex write_lock; @@ -266,9 +273,12 @@ static enum power_supply_property smb1355_parallel_props[] = { POWER_SUPPLY_PROP_CHARGING_ENABLED, POWER_SUPPLY_PROP_PIN_ENABLED, POWER_SUPPLY_PROP_INPUT_SUSPEND, + POWER_SUPPLY_PROP_CHARGER_TEMP, + POWER_SUPPLY_PROP_CHARGER_TEMP_MAX, POWER_SUPPLY_PROP_VOLTAGE_MAX, POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, POWER_SUPPLY_PROP_MODEL_NAME, + POWER_SUPPLY_PROP_PARALLEL_MODE, POWER_SUPPLY_PROP_CONNECTOR_HEALTH, }; @@ -338,6 +348,42 @@ static int smb1355_get_prop_connector_health(struct smb1355 *chip) return POWER_SUPPLY_HEALTH_COOL; } + +static int smb1355_get_prop_charger_temp(struct smb1355 *chip, + union power_supply_propval *val) +{ + int rc; + + if (!chip->iio.temp_chan || + PTR_ERR(chip->iio.temp_chan) == -EPROBE_DEFER) + chip->iio.temp_chan = devm_iio_channel_get(chip->dev, + "charger_temp"); + + if (IS_ERR(chip->iio.temp_chan)) + return PTR_ERR(chip->iio.temp_chan); + + rc = iio_read_channel_processed(chip->iio.temp_chan, &val->intval); + val->intval /= 100; + return rc; +} + +static int smb1355_get_prop_charger_temp_max(struct smb1355 *chip, + union power_supply_propval *val) +{ + int rc; + + if (!chip->iio.temp_max_chan || + PTR_ERR(chip->iio.temp_max_chan) == -EPROBE_DEFER) + chip->iio.temp_max_chan = devm_iio_channel_get(chip->dev, + "charger_temp_max"); + if (IS_ERR(chip->iio.temp_max_chan)) + return PTR_ERR(chip->iio.temp_max_chan); + + rc = iio_read_channel_processed(chip->iio.temp_max_chan, &val->intval); + val->intval /= 100; + return rc; +} + static int smb1355_parallel_get_prop(struct power_supply *psy, enum power_supply_property prop, union power_supply_propval *val) @@ -360,6 +406,12 @@ static int smb1355_parallel_get_prop(struct power_supply *psy, if (rc >= 0) val->intval = !(stat & DISABLE_CHARGING_BIT); break; + case POWER_SUPPLY_PROP_CHARGER_TEMP: + rc = smb1355_get_prop_charger_temp(chip, val); + break; + case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX: + rc = smb1355_get_prop_charger_temp_max(chip, val); + break; case POWER_SUPPLY_PROP_INPUT_SUSPEND: rc = smb1355_get_parallel_charging(chip, &val->intval); break; -- GitLab From 60c891af849c190cdf61d7397ab9cd087080a600 Mon Sep 17 00:00:00 2001 From: Harry Yang Date: Wed, 21 Jun 2017 16:35:04 -0700 Subject: [PATCH 382/786] ARM: dts: msm: specify RRADC temperature channels for smb1355 In SDM845 platforms with SMB1355 configuration, PMI8998 skin temp sensor is placed close to SMB1355 die, hence RRADC skin temp channels can be used for SMB1355 die temp reading. Change-Id: If417618986f199f5ad46d5ba8344b48522264b82 Signed-off-by: Harry Yang --- arch/arm64/boot/dts/qcom/smb1355.dtsi | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/smb1355.dtsi b/arch/arm64/boot/dts/qcom/smb1355.dtsi index 33c5e97c451b..999d87abaade 100644 --- a/arch/arm64/boot/dts/qcom/smb1355.dtsi +++ b/arch/arm64/boot/dts/qcom/smb1355.dtsi @@ -39,6 +39,11 @@ interrupt-parent = <&smb1355>; status = "disabled"; + io-channels = <&pmi8998_rradc 2>, + <&pmi8998_rradc 12>; + io-channel-names = "charger_temp", + "charger_temp_max"; + qcom,chgr@1000 { reg = <0x1000 0x100>; interrupts = <0x10 0x1 IRQ_TYPE_EDGE_RISING>; -- GitLab From dab74a7aa84119be815821735d8c9c49d95052fc Mon Sep 17 00:00:00 2001 From: David Dai Date: Wed, 21 Jun 2017 16:30:24 -0700 Subject: [PATCH 383/786] ARM: dts: msm: Add usb QoS clock references for sdm845 Allows configuration of USB3_1/USB3_0 QoS on the NOC by enabling needed clocks for access. Change-Id: Ia1d511e7635fff63bc42872085a2137d8bec3538 Signed-off-by: David Dai --- arch/arm64/boot/dts/qcom/sdm845-bus.dtsi | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi index b33b525a30ff..3ce5611dff6b 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-bus.dtsi @@ -680,7 +680,14 @@ qcom,qport = <10>; qcom,connections = <&slv_qns_a2noc_snoc>; qcom,bus-dev = <&fab_aggre2_noc>; + qcom,ap-owned; qcom,prio = <2>; + qcom,node-qos-clks { + clocks = + <&clock_gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>; + clock-names = + "clk-usb3-prim-axi-no-rate"; + }; }; mas_xm_usb3_1: mas-xm-usb3-1 { @@ -691,7 +698,14 @@ qcom,qport = <11>; qcom,connections = <&slv_qns_a2noc_snoc>; qcom,bus-dev = <&fab_aggre2_noc>; + qcom,ap-owned; qcom,prio = <2>; + qcom,node-qos-clks { + clocks = + <&clock_gcc GCC_AGGRE_USB3_SEC_AXI_CLK>; + clock-names = + "clk-usb3-sec-axi-no-rate"; + }; }; mas_qxm_camnoc_hf0_uncomp: mas-qxm-camnoc-hf0-uncomp { -- GitLab From db90fa1d1df6167145885ce79a22384002204d13 Mon Sep 17 00:00:00 2001 From: Gopikrishnaiah Anandan Date: Tue, 9 May 2017 17:56:08 -0700 Subject: [PATCH 384/786] drm: msm: sde: last command support for reg dma For each control path last command needs to be sent to reg dma hardware engine to de-assert it's busy signal which indicates that current frame can be displayed. Change adds support to send the last command before the control is flushed to ensure. Change-Id: I8219d0130bad5e9032a791e357a284ae55bcc852 Signed-off-by: Gopikrishnaiah Anandan --- .../gpu/drm/msm/sde/sde_color_processing.c | 24 +----- drivers/gpu/drm/msm/sde/sde_crtc.c | 1 + drivers/gpu/drm/msm/sde/sde_hw_ctl.c | 6 ++ drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c | 86 +++++++++++++++++++ drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.h | 4 + .../msm/sde/sde_hw_reg_dma_v1_color_proc.c | 3 - drivers/gpu/drm/msm/sde/sde_kms.c | 2 + drivers/gpu/drm/msm/sde/sde_reg_dma.c | 32 ++++++- drivers/gpu/drm/msm/sde/sde_reg_dma.h | 7 ++ 9 files changed, 141 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c index b4103028e0e1..9dcd5b78e473 100644 --- a/drivers/gpu/drm/msm/sde/sde_color_processing.c +++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c @@ -526,7 +526,7 @@ static void sde_cp_crtc_install_enum_property(struct drm_crtc *crtc, } static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node, - struct sde_crtc *sde_crtc, u32 last_feature) + struct sde_crtc *sde_crtc) { struct sde_hw_cp_cfg hw_cfg; struct sde_hw_mixer *hw_lm; @@ -541,16 +541,13 @@ static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node, hw_cfg.num_of_mixers = sde_crtc->num_mixers; hw_cfg.displayh = sde_crtc->base.mode.hdisplay; hw_cfg.displayv = sde_crtc->base.mode.vdisplay; + hw_cfg.last_feature = 0; for (i = 0; i < num_mixers && !ret; i++) { hw_lm = sde_crtc->mixers[i].hw_lm; hw_dspp = sde_crtc->mixers[i].hw_dspp; hw_cfg.ctl = sde_crtc->mixers[i].hw_ctl; hw_cfg.mixer_info = hw_lm; - if (i == num_mixers - 1) - hw_cfg.last_feature = last_feature; - else - hw_cfg.last_feature = 0; switch (prop_node->feature) { case SDE_CP_CRTC_DSPP_VLUT: if (!hw_dspp || !hw_dspp->ops.setup_vlut) { @@ -724,7 +721,6 @@ void sde_cp_crtc_apply_properties(struct drm_crtc *crtc) struct sde_hw_ctl *ctl; uint32_t flush_mask = 0; u32 num_mixers = 0, i = 0; - u32 num_of_features; if (!crtc || !crtc->dev) { DRM_ERROR("invalid crtc %pK dev %pK\n", crtc, @@ -757,15 +753,9 @@ void sde_cp_crtc_apply_properties(struct drm_crtc *crtc) set_dspp_flush = true; } - num_of_features = 0; - list_for_each_entry(prop_node, &sde_crtc->dirty_list, dirty_list) - num_of_features++; - list_for_each_entry_safe(prop_node, n, &sde_crtc->dirty_list, dirty_list) { - num_of_features--; - sde_cp_crtc_setfeature(prop_node, sde_crtc, - (num_of_features == 0)); + sde_cp_crtc_setfeature(prop_node, sde_crtc); /* Set the flush flag to true */ if (prop_node->is_dspp_feature) set_dspp_flush = true; @@ -773,16 +763,10 @@ void sde_cp_crtc_apply_properties(struct drm_crtc *crtc) set_lm_flush = true; } - num_of_features = 0; - list_for_each_entry(prop_node, &sde_crtc->ad_dirty, dirty_list) - num_of_features++; - list_for_each_entry_safe(prop_node, n, &sde_crtc->ad_dirty, dirty_list) { - num_of_features--; set_dspp_flush = true; - sde_cp_crtc_setfeature(prop_node, sde_crtc, - (num_of_features == 0)); + sde_cp_crtc_setfeature(prop_node, sde_crtc); } for (i = 0; i < num_mixers; i++) { diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index f2c7a505614a..85e80d8eb056 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -2488,6 +2488,7 @@ static void sde_crtc_handle_power_event(u32 event_type, void *arg) sde_plane_set_revalidate(plane, true); drm_modeset_unlock_crtc(crtc); + sde_cp_crtc_suspend(crtc); } mutex_unlock(&sde_crtc->crtc_lock); diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c index ba5508647bbe..0b3432b8baac 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c @@ -15,6 +15,7 @@ #include "sde_hw_ctl.h" #include "sde_dbg.h" #include "sde_kms.h" +#include "sde_reg_dma.h" #define CTL_LAYER(lm) \ (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004)) @@ -111,6 +112,11 @@ static u32 sde_hw_ctl_get_pending_flush(struct sde_hw_ctl *ctx) static inline void sde_hw_ctl_trigger_flush(struct sde_hw_ctl *ctx) { + struct sde_hw_reg_dma_ops *ops = sde_reg_dma_get_ops(); + + if (ops && ops->last_command) + ops->last_command(ctx, DMA_CTL_QUEUE0); + SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask); } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c index 678c84a1a4b0..dbd435b1441e 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c @@ -49,6 +49,7 @@ (cfg)->dma_buf->index) #define REG_DMA_DECODE_SEL 0x180AC060 +#define REG_DMA_LAST_CMD 0x180AC004 #define SINGLE_REG_WRITE_OPCODE (BIT(28)) #define REL_ADDR_OPCODE (BIT(27)) #define HW_INDEX_REG_WRITE_OPCODE (BIT(28) | BIT(29)) @@ -58,6 +59,7 @@ #define WRAP_MIN_SIZE 2 #define WRAP_MAX_SIZE (BIT(4) - 1) #define MAX_DWORDS_SZ (BIT(14) - 1) +#define REG_DMA_HEADERS_BUFFER_SZ (sizeof(u32) * 128) typedef int (*reg_dma_internal_ops) (struct sde_reg_dma_setup_ops_cfg *cfg); @@ -93,17 +95,20 @@ static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg); static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg); static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg); static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg); +static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg); static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg); static int write_single_reg(struct sde_reg_dma_setup_ops_cfg *cfg); static int write_multi_reg_index(struct sde_reg_dma_setup_ops_cfg *cfg); static int write_multi_reg_inc(struct sde_reg_dma_setup_ops_cfg *cfg); static int write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg); +static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg); static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf); static int check_support_v1(enum sde_reg_dma_features feature, enum sde_reg_dma_blk blk, bool *is_supported); static int setup_payload_v1(struct sde_reg_dma_setup_ops_cfg *cfg); static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg); static int reset_v1(struct sde_hw_ctl *ctl); +static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q); static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size); static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *lut_buf); @@ -123,6 +128,8 @@ static reg_dma_internal_ops validate_dma_op_params[REG_DMA_SETUP_OPS_MAX] = { [REG_BLK_WRITE_MULTIPLE] = validate_write_multi_lut_reg, }; +static struct sde_reg_dma_buffer *last_cmd_buf; + static void get_decode_sel(unsigned long blk, u32 *decode_sel) { int i = 0; @@ -474,6 +481,11 @@ int init_v1(struct sde_hw_reg_dma *cfg) return -EINVAL; reg_dma = cfg; + if (!last_cmd_buf) { + last_cmd_buf = alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ); + if (IS_ERR_OR_NULL(last_cmd_buf)) + return -EINVAL; + } reg_dma->ops.check_support = check_support_v1; reg_dma->ops.setup_payload = setup_payload_v1; reg_dma->ops.kick_off = kick_off_v1; @@ -481,6 +493,7 @@ int init_v1(struct sde_hw_reg_dma *cfg) reg_dma->ops.alloc_reg_dma_buf = alloc_reg_dma_buf_v1; reg_dma->ops.dealloc_reg_dma = dealloc_reg_dma_v1; reg_dma->ops.reset_reg_dma_buf = reset_reg_dma_buffer_v1; + reg_dma->ops.last_command = last_cmd_v1; reg_dma_ctl_queue_off[CTL_0] = REG_DMA_CTL0_QUEUE_0_CMD0_OFF; for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++) @@ -648,3 +661,76 @@ static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf) lut_buf->next_op_allowed = DECODE_SEL_OP; return 0; } + +static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg) +{ + u32 remain_len, write_len; + + remain_len = BUFFER_SPACE_LEFT(cfg); + write_len = sizeof(u32); + if (remain_len < write_len) { + DRM_ERROR("buffer is full sz %d needs %d bytes\n", + remain_len, write_len); + return -EINVAL; + } + return 0; +} + +static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg) +{ + u32 *loc = NULL; + + loc = (u32 *)((u8 *)cfg->dma_buf->vaddr + + cfg->dma_buf->index); + loc[0] = REG_DMA_LAST_CMD; + loc[1] = BIT(0); + cfg->dma_buf->index = sizeof(u32) * 2; + cfg->dma_buf->ops_completed = REG_WRITE_OP | DECODE_SEL_OP; + cfg->dma_buf->next_op_allowed = REG_WRITE_OP; + + return 0; +} + +static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q) +{ + struct sde_reg_dma_setup_ops_cfg cfg; + struct sde_reg_dma_kickoff_cfg kick_off; + + if (!last_cmd_buf || !ctl || q >= DMA_CTL_QUEUE_MAX) { + DRM_ERROR("invalid param buf %pK ctl %pK q %d\n", last_cmd_buf, + ctl, q); + return -EINVAL; + } + + cfg.dma_buf = last_cmd_buf; + reset_reg_dma_buffer_v1(last_cmd_buf); + if (validate_last_cmd(&cfg)) { + DRM_ERROR("validate buf failed\n"); + return -EINVAL; + } + + if (write_last_cmd(&cfg)) { + DRM_ERROR("write buf failed\n"); + return -EINVAL; + } + + kick_off.ctl = ctl; + kick_off.queue_select = q; + kick_off.trigger_mode = WRITE_IMMEDIATE; + kick_off.last_command = 1; + kick_off.op = REG_DMA_WRITE; + kick_off.dma_buf = last_cmd_buf; + if (kick_off_v1(&kick_off)) { + DRM_ERROR("kick off last cmd failed\n"); + return -EINVAL; + } + + return 0; +} + +void deinit_v1(void) +{ + if (last_cmd_buf) + dealloc_reg_dma_v1(last_cmd_buf); + last_cmd_buf = NULL; +} diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.h b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.h index 8e37d380e3d9..4f9ab4ee2a19 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.h @@ -20,4 +20,8 @@ */ int init_v1(struct sde_hw_reg_dma *reg_dma); +/** + * deinit_v1() - free up any resources allocated during the v1 reg dma init + */ +void deinit_v1(void); #endif /* _SDE_HW_REG_DMA_V1_H */ diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c index 0dcbb7e5e52e..285ef119c0dc 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1_color_proc.c @@ -408,7 +408,6 @@ static void dspp_3d_gamutv4_off(struct sde_hw_dspp *ctx, void *cfg) REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[GAMUT][ctx->idx], REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE); - kick_off.last_command = hw_cfg->last_feature; rc = dma_ops->kick_off(&kick_off); if (rc) DRM_ERROR("failed to kick off ret %d\n", rc); @@ -505,7 +504,6 @@ void reg_dmav1_setup_dspp_3d_gamutv4(struct sde_hw_dspp *ctx, void *cfg) REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[GAMUT][ctx->idx], REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE); - kick_off.last_command = hw_cfg->last_feature; rc = dma_ops->kick_off(&kick_off); if (rc) DRM_ERROR("failed to kick off ret %d\n", rc); @@ -598,7 +596,6 @@ void reg_dmav1_setup_dspp_gcv18(struct sde_hw_dspp *ctx, void *cfg) REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[GC][ctx->idx], REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE); - kick_off.last_command = hw_cfg->last_feature; rc = dma_ops->kick_off(&kick_off); if (rc) { DRM_ERROR("failed to kick off ret %d\n", rc); diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index 4c820c64db6d..426aa6ddfeeb 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -1336,6 +1336,8 @@ static void _sde_kms_hw_destroy(struct sde_kms *sde_kms, if (sde_kms->mmio) msm_iounmap(pdev, sde_kms->mmio); sde_kms->mmio = NULL; + + sde_reg_dma_deinit(); } static void sde_kms_destroy(struct msm_kms *kms) diff --git a/drivers/gpu/drm/msm/sde/sde_reg_dma.c b/drivers/gpu/drm/msm/sde/sde_reg_dma.c index cc115c550023..cc87aeb5c55c 100644 --- a/drivers/gpu/drm/msm/sde/sde_reg_dma.c +++ b/drivers/gpu/drm/msm/sde/sde_reg_dma.c @@ -62,10 +62,17 @@ static int default_buf_reset_reg_dma(struct sde_reg_dma_buffer *lut_buf) return -EINVAL; } +static int default_last_command(struct sde_hw_ctl *ctl, + enum sde_reg_dma_queue q) +{ + return 0; +} + static struct sde_hw_reg_dma reg_dma = { .ops = {default_check_support, default_setup_payload, default_kick_off, default_reset, default_alloc_reg_dma_buf, - default_dealloc_reg_dma, default_buf_reset_reg_dma}, + default_dealloc_reg_dma, default_buf_reset_reg_dma, + default_last_command}, }; int sde_reg_dma_init(void __iomem *addr, struct sde_mdss_cfg *m, @@ -103,3 +110,26 @@ struct sde_hw_reg_dma_ops *sde_reg_dma_get_ops(void) { return ®_dma.ops; } + +void sde_reg_dma_deinit(void) +{ + struct sde_hw_reg_dma op = { + .ops = {default_check_support, default_setup_payload, + default_kick_off, default_reset, default_alloc_reg_dma_buf, + default_dealloc_reg_dma, default_buf_reset_reg_dma, + default_last_command}, + }; + + if (!reg_dma.drm_dev || !reg_dma.caps) + return; + + switch (reg_dma.caps->version) { + case 1: + deinit_v1(); + break; + default: + break; + } + memset(®_dma, 0, sizeof(reg_dma)); + memcpy(®_dma.ops, &op.ops, sizeof(op.ops)); +} diff --git a/drivers/gpu/drm/msm/sde/sde_reg_dma.h b/drivers/gpu/drm/msm/sde/sde_reg_dma.h index c8e464d35e7a..70d995a3fc2b 100644 --- a/drivers/gpu/drm/msm/sde/sde_reg_dma.h +++ b/drivers/gpu/drm/msm/sde/sde_reg_dma.h @@ -251,6 +251,7 @@ struct sde_reg_dma_kickoff_cfg { * @alloc_reg_dma_buf: allocate reg dma buffer * @dealloc_reg_dma: de-allocate reg dma buffer * @reset_reg_dma_buf: reset the buffer to init state + * @last_command: notify control that last command is queued */ struct sde_hw_reg_dma_ops { int (*check_support)(enum sde_reg_dma_features feature, @@ -262,6 +263,7 @@ struct sde_hw_reg_dma_ops { struct sde_reg_dma_buffer* (*alloc_reg_dma_buf)(u32 size); int (*dealloc_reg_dma)(struct sde_reg_dma_buffer *lut_buf); int (*reset_reg_dma_buf)(struct sde_reg_dma_buffer *buf); + int (*last_command)(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q); }; /** @@ -298,4 +300,9 @@ int sde_reg_dma_init(void __iomem *addr, struct sde_mdss_cfg *m, * who call this api. */ struct sde_hw_reg_dma_ops *sde_reg_dma_get_ops(void); + +/** + * sde_reg_dma_deinit() - de-initialize the reg dma + */ +void sde_reg_dma_deinit(void); #endif /* _SDE_REG_DMA_H */ -- GitLab From f4b1582cd11213333c191dabec1b6c9072b41169 Mon Sep 17 00:00:00 2001 From: Praneeth Paladugu Date: Tue, 27 Jun 2017 15:39:24 -0700 Subject: [PATCH 385/786] msm: vidc: Fix buffer count related issues This change fixes various buffer counts in driver by properly updating them based on state. CRs-Fixed: 2066658 Change-Id: Iec37d67b88f8a6d06776a8cfba7d9812662d5325 Signed-off-by: Praneeth Paladugu --- drivers/media/platform/msm/vidc/msm_vidc.c | 68 ++++++++----------- .../media/platform/msm/vidc/msm_vidc_common.c | 9 +-- .../media/platform/msm/vidc/msm_vidc_common.h | 1 + 3 files changed, 34 insertions(+), 44 deletions(-) diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c index 2ca3e8d513f0..14703c854fe5 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_vidc.c @@ -709,8 +709,8 @@ static int msm_vidc_queue_setup(struct vb2_queue *q, sizes[i] = inst->bufq[OUTPUT_PORT].plane_sizes[i]; bufreq->buffer_count_actual = *num_buffers; - rc = set_buffer_count(inst, bufreq->buffer_count_actual, - *num_buffers, HAL_BUFFER_INPUT); + rc = set_buffer_count(inst, bufreq->buffer_count_min_host, + bufreq->buffer_count_min_host, HAL_BUFFER_INPUT); } break; @@ -743,8 +743,8 @@ static int msm_vidc_queue_setup(struct vb2_queue *q, sizes[i] = inst->bufq[CAPTURE_PORT].plane_sizes[i]; bufreq->buffer_count_actual = *num_buffers; - rc = set_buffer_count(inst, bufreq->buffer_count_actual, - *num_buffers, buffer_type); + rc = set_buffer_count(inst, bufreq->buffer_count_min_host, + bufreq->buffer_count_min_host, buffer_type); } break; default: @@ -1223,29 +1223,6 @@ static int msm_vidc_op_s_ctrl(struct v4l2_ctrl *ctrl) return rc; } -static int set_actual_buffer_count(struct msm_vidc_inst *inst, - int count, enum hal_buffer type) -{ - int rc = 0; - struct hfi_device *hdev; - struct hal_buffer_count_actual buf_count; - - hdev = inst->core->device; - - buf_count.buffer_type = type; - buf_count.buffer_count_min_host = count; - buf_count.buffer_count_actual = count; - rc = call_hfi_op(hdev, session_set_property, - inst->session, HAL_PARAM_BUFFER_COUNT_ACTUAL, - &buf_count); - if (rc) - dprintk(VIDC_ERR, - "Failed to set actual count %d for buffer type %d\n", - count, type); - return rc; -} - - static int msm_vidc_get_count(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) { @@ -1270,13 +1247,20 @@ static int msm_vidc_get_count(struct msm_vidc_inst *inst, "Buffer count Host changed from %d to %d\n", bufreq->buffer_count_min_host, ctrl->val); - bufreq->buffer_count_min_host = ctrl->val; + bufreq->buffer_count_actual = + bufreq->buffer_count_min = + bufreq->buffer_count_min_host = + ctrl->val; } else { ctrl->val = bufreq->buffer_count_min_host; } - rc = set_actual_buffer_count(inst, - bufreq->buffer_count_min_host, + rc = set_buffer_count(inst, + bufreq->buffer_count_min_host, + bufreq->buffer_count_actual, HAL_BUFFER_INPUT); + + msm_vidc_update_host_buff_counts(inst); + ctrl->val = bufreq->buffer_count_min_host; return rc; } else if (ctrl->id == V4L2_CID_MIN_BUFFERS_FOR_CAPTURE) { @@ -1297,31 +1281,37 @@ static int msm_vidc_get_count(struct msm_vidc_inst *inst, return 0; } - - if (inst->in_reconfig) { - ctrl->val = bufreq->buffer_count_min; - } if (inst->session_type == MSM_VIDC_DECODER && !inst->in_reconfig && inst->state < MSM_VIDC_LOAD_RESOURCES_DONE) { dprintk(VIDC_DBG, "Clients updates Buffer count from %d to %d\n", bufreq->buffer_count_min_host, ctrl->val); - bufreq->buffer_count_min_host = ctrl->val; + bufreq->buffer_count_actual = + bufreq->buffer_count_min = + bufreq->buffer_count_min_host = + ctrl->val; } if (ctrl->val > bufreq->buffer_count_min_host) { dprintk(VIDC_DBG, "Buffer count Host changed from %d to %d\n", bufreq->buffer_count_min_host, ctrl->val); - bufreq->buffer_count_min_host = ctrl->val; + bufreq->buffer_count_actual = + bufreq->buffer_count_min = + bufreq->buffer_count_min_host = + ctrl->val; } else { ctrl->val = bufreq->buffer_count_min_host; } - rc = set_actual_buffer_count(inst, - bufreq->buffer_count_min_host, + rc = set_buffer_count(inst, + bufreq->buffer_count_min_host, + bufreq->buffer_count_actual, HAL_BUFFER_OUTPUT); + msm_vidc_update_host_buff_counts(inst); + ctrl->val = bufreq->buffer_count_min_host; + return rc; } return -EINVAL; @@ -1367,6 +1357,8 @@ static int try_get_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) break; case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: + if (inst->in_reconfig) + msm_vidc_update_host_buff_counts(inst); buffer_type = msm_comm_get_hal_output_buffer(inst); bufreq = get_buff_req_buffer(inst, buffer_type); diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c index ac69ab855920..b66e9ffb90eb 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c @@ -36,7 +36,6 @@ #define L_MODE V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY #define MAX_SUPPORTED_INSTANCES 16 -static int msm_vidc_update_host_buff_counts(struct msm_vidc_inst *inst); const char *const mpeg_video_vidc_extradata[] = { "Extradata none", @@ -1604,10 +1603,8 @@ static void handle_event_change(enum hal_command_response cmd, void *data) return; } bufreq->buffer_count_min = event_notify->capture_buf_count; - } - msm_vidc_update_host_buff_counts(inst); mutex_unlock(&inst->lock); if (event == V4L2_EVENT_SEQ_CHANGED_INSUFFICIENT) { @@ -4044,7 +4041,7 @@ int msm_comm_qbuf(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf) return rc; } -static int msm_vidc_update_host_buff_counts(struct msm_vidc_inst *inst) +int msm_vidc_update_host_buff_counts(struct msm_vidc_inst *inst) { int extra_buffers; struct hal_buffer_requirements *bufreq; @@ -4143,8 +4140,8 @@ int msm_comm_try_get_bufreqs(struct msm_vidc_inst *inst) req.buffer_count_min, req.buffer_size); } } - - rc = msm_vidc_update_host_buff_counts(inst); + if (inst->session_type == MSM_VIDC_ENCODER) + rc = msm_vidc_update_host_buff_counts(inst); dprintk(VIDC_DBG, "Buffer requirements host adjusted:\n"); dprintk(VIDC_DBG, "%15s %8s %8s %8s %8s\n", diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h index 5c653f5c1e49..bfd89156b783 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h @@ -61,6 +61,7 @@ int msm_comm_release_output_buffers(struct msm_vidc_inst *inst, void msm_comm_validate_output_buffers(struct msm_vidc_inst *inst); int msm_comm_force_cleanup(struct msm_vidc_inst *inst); int msm_comm_suspend(int core_id); +int msm_vidc_update_host_buff_counts(struct msm_vidc_inst *inst); enum hal_extradata_id msm_comm_get_hal_extradata_index( enum v4l2_mpeg_vidc_extradata index); struct hal_buffer_requirements *get_buff_req_buffer( -- GitLab From aa84fae095e6debbfff3652bf2e0fab2365d7135 Mon Sep 17 00:00:00 2001 From: Chinmay Sawarkar Date: Tue, 27 Jun 2017 12:39:51 -0700 Subject: [PATCH 386/786] msm: vidc: Update extradata buffer count calculations During buffer requirements call, output extradata buffer count should be updated to match output buffer count. Same applies for output2 buffers. CRs-Fixed: 2066658 Change-Id: I8c72ae0185a85284f9c94644c1c68a06ab01703f Signed-off-by: Chinmay Sawarkar --- .../media/platform/msm/vidc/msm_vidc_common.c | 29 ++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c index ac69ab855920..b5bab31005e0 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c @@ -4075,7 +4075,6 @@ static int msm_vidc_update_host_buff_counts(struct msm_vidc_inst *inst) } /* For DPB buffers, no need to add Extra buffers */ - bufreq->buffer_count_min_host = bufreq->buffer_count_actual = bufreq->buffer_count_min; @@ -4093,6 +4092,20 @@ static int msm_vidc_update_host_buff_counts(struct msm_vidc_inst *inst) bufreq->buffer_count_min_host = bufreq->buffer_count_actual = bufreq->buffer_count_min + extra_buffers; + + bufreq = get_buff_req_buffer(inst, + HAL_BUFFER_EXTRADATA_OUTPUT2); + if (!bufreq) { + dprintk(VIDC_DBG, + "No buffer requirements : %x\n", + HAL_BUFFER_EXTRADATA_OUTPUT2); + } else { + if (bufreq->buffer_count_min) { + bufreq->buffer_count_min_host = + bufreq->buffer_count_actual = + bufreq->buffer_count_min + extra_buffers; + } + } } else { bufreq = get_buff_req_buffer(inst, @@ -4109,6 +4122,20 @@ static int msm_vidc_update_host_buff_counts(struct msm_vidc_inst *inst) bufreq->buffer_count_min_host = bufreq->buffer_count_actual = bufreq->buffer_count_min + extra_buffers; + + bufreq = get_buff_req_buffer(inst, + HAL_BUFFER_EXTRADATA_OUTPUT); + if (!bufreq) { + dprintk(VIDC_DBG, + "No buffer requirements : %x\n", + HAL_BUFFER_EXTRADATA_OUTPUT); + } else { + if (bufreq->buffer_count_min) { + bufreq->buffer_count_min_host = + bufreq->buffer_count_actual = + bufreq->buffer_count_min + extra_buffers; + } + } } return 0; -- GitLab From dc0b965aa203d76ea71e827aec0a721e29a064a4 Mon Sep 17 00:00:00 2001 From: Alex Sarraf Date: Thu, 18 May 2017 15:34:12 -0700 Subject: [PATCH 387/786] input: misc: hbtp_input: Add sysfs for suspend/resume Add display_pwr sysfs to trigger suspend resume event. Change-Id: I0a4c1dfdc072cfcd5380b753ae42e67d42ed2d33 Signed-off-by: Alex Sarraf --- drivers/input/misc/hbtp_input.c | 44 +++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/drivers/input/misc/hbtp_input.c b/drivers/input/misc/hbtp_input.c index fe7cc70c504c..c9ea89d884dd 100644 --- a/drivers/input/misc/hbtp_input.c +++ b/drivers/input/misc/hbtp_input.c @@ -87,6 +87,7 @@ struct hbtp_data { u32 power_on_delay; u32 power_off_delay; bool manage_pin_ctrl; + struct kobject *sysfs_kobject; }; static struct hbtp_data *hbtp; @@ -1350,6 +1351,39 @@ static struct platform_driver hbtp_pdev_driver = { }, }; +static ssize_t hbtp_display_pwr_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + u32 status; + ssize_t ret; + char *envp[2] = {HBTP_EVENT_TYPE_DISPLAY, NULL}; + + mutex_lock(&hbtp->mutex); + ret = kstrtou32(buf, 10, &status); + if (ret) { + pr_err("hbtp: ret error: %zd\n", ret); + return ret; + } + if (!hbtp || !hbtp->input_dev) { + pr_err("hbtp: hbtp or hbtp->input_dev not ready!\n"); + return ret; + } + if (status) { + pr_debug("hbtp: display power on!\n"); + kobject_uevent_env(&hbtp->input_dev->dev.kobj, + KOBJ_ONLINE, envp); + } else { + pr_debug("hbtp: display power off!\n"); + kobject_uevent_env(&hbtp->input_dev->dev.kobj, + KOBJ_OFFLINE, envp); + } + mutex_unlock(&hbtp->mutex); + return count; +} + +static struct kobj_attribute hbtp_display_attribute = + __ATTR(display_pwr, 0660, NULL, hbtp_display_pwr_store); + static int __init hbtp_init(void) { int error; @@ -1382,6 +1416,16 @@ static int __init hbtp_init(void) goto err_platform_drv_reg; } + hbtp->sysfs_kobject = kobject_create_and_add("hbtp", kernel_kobj); + if (!hbtp->sysfs_kobject) + pr_err("%s: Could not create sysfs kobject\n", __func__); + else { + error = sysfs_create_file(hbtp->sysfs_kobject, + &hbtp_display_attribute.attr); + if (error) + pr_err("failed to create the display_pwr sysfs\n"); + } + return 0; err_platform_drv_reg: -- GitLab From 98250b93ba26623fb36a36a49494e36129ce0fff Mon Sep 17 00:00:00 2001 From: Chris Lew Date: Wed, 10 May 2017 15:33:46 -0700 Subject: [PATCH 388/786] soc: qcom: sleepstate: Increase notifier priority Remote processors may have race conditions if the notification that the local processor is suspending comes late. This change increases the priority of our suspend notifier block. CRs-Fixed: 2045408 Change-Id: Ifb398c1fcf584a5dd6d53fc47ca66d01718c74b5 Signed-off-by: Chris Lew --- drivers/soc/qcom/smp2p_sleepstate.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/soc/qcom/smp2p_sleepstate.c b/drivers/soc/qcom/smp2p_sleepstate.c index 44192ff367d0..5b0129e644b1 100644 --- a/drivers/soc/qcom/smp2p_sleepstate.c +++ b/drivers/soc/qcom/smp2p_sleepstate.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -46,6 +46,7 @@ static int sleepstate_pm_notifier(struct notifier_block *nb, static struct notifier_block sleepstate_pm_nb = { .notifier_call = sleepstate_pm_notifier, + .priority = INT_MAX, }; static int smp2p_sleepstate_probe(struct platform_device *pdev) -- GitLab From 6c1fed3a5d6a67423cac25321ea70c0363a9600e Mon Sep 17 00:00:00 2001 From: Bhalchandra Gajare Date: Tue, 27 Jun 2017 13:04:06 -0700 Subject: [PATCH 389/786] ASoC: wcd-dsp-mgr: add support to get ops from components Add support in the wcd dsp manager driver to provide interface to components to get operations from other components. This can be used when components need to communicate amongst themselves without intervention from manager driver. Change-Id: Idddb60f0c8d6179046879bc358c840936c61299f Signed-off-by: Bhalchandra Gajare --- include/sound/wcd-dsp-mgr.h | 9 ++++++++- sound/soc/codecs/wcd-dsp-mgr.c | 20 ++++++++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/include/sound/wcd-dsp-mgr.h b/include/sound/wcd-dsp-mgr.h index 2beb9b38a46a..c1169862662f 100644 --- a/include/sound/wcd-dsp-mgr.h +++ b/include/sound/wcd-dsp-mgr.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -63,6 +63,9 @@ enum wdsp_event_type { /* Suspend/Resume related */ WDSP_EVENT_SUSPEND, WDSP_EVENT_RESUME, + + /* Misc */ + WDSP_EVENT_GET_DEVOPS }; enum wdsp_signal { @@ -109,6 +112,8 @@ struct wdsp_err_signal_arg { * their own ops to manager driver * @get_dev_for_cmpnt: components can use this to get handle * to struct device * of any other component + * @get_devops_for_cmpnt: components can use this to get ops + * from other related components. * @signal_handler: callback to notify manager driver that signal * has occurred. Cannot be called from interrupt * context as this can sleep @@ -126,6 +131,8 @@ struct wdsp_mgr_ops { struct wdsp_cmpnt_ops *ops); struct device *(*get_dev_for_cmpnt)(struct device *wdsp_dev, enum wdsp_cmpnt_type type); + int (*get_devops_for_cmpnt)(struct device *wdsp_dev, + enum wdsp_cmpnt_type type, void *data); int (*signal_handler)(struct device *wdsp_dev, enum wdsp_signal signal, void *arg); int (*vote_for_dsp)(struct device *wdsp_dev, bool vote); diff --git a/sound/soc/codecs/wcd-dsp-mgr.c b/sound/soc/codecs/wcd-dsp-mgr.c index 93c2fd192190..17b8e795b44c 100644 --- a/sound/soc/codecs/wcd-dsp-mgr.c +++ b/sound/soc/codecs/wcd-dsp-mgr.c @@ -610,6 +610,25 @@ static struct device *wdsp_get_dev_for_cmpnt(struct device *wdsp_dev, return cmpnt->cdev; } +static int wdsp_get_devops_for_cmpnt(struct device *wdsp_dev, + enum wdsp_cmpnt_type type, + void *data) +{ + struct wdsp_mgr_priv *wdsp; + int ret = 0; + + if (!wdsp_dev || type >= WDSP_CMPNT_TYPE_MAX) + return -EINVAL; + + wdsp = dev_get_drvdata(wdsp_dev); + ret = wdsp_unicast_event(wdsp, type, + WDSP_EVENT_GET_DEVOPS, data); + if (ret) + WDSP_ERR(wdsp, "get_dev_ops failed for cmpnt type %d", + type); + return ret; +} + static void wdsp_collect_ramdumps(struct wdsp_mgr_priv *wdsp) { struct wdsp_img_section img_section; @@ -941,6 +960,7 @@ static int wdsp_resume(struct device *wdsp_dev) static struct wdsp_mgr_ops wdsp_ops = { .register_cmpnt_ops = wdsp_register_cmpnt_ops, .get_dev_for_cmpnt = wdsp_get_dev_for_cmpnt, + .get_devops_for_cmpnt = wdsp_get_devops_for_cmpnt, .signal_handler = wdsp_signal_handler, .vote_for_dsp = wdsp_vote_for_dsp, .suspend = wdsp_suspend, -- GitLab From 38d189784e68c8870e214b12d34742c4dba252b8 Mon Sep 17 00:00:00 2001 From: Bhalchandra Gajare Date: Tue, 27 Jun 2017 13:46:38 -0700 Subject: [PATCH 390/786] ASoC: wcd-spi: handle WDSP_EVENT_GETOPS in the event handler Add functionality to handle the WDSP_EVENT_GETOPS event and provide the required operations to the manager driver. This is helpful for manager or other components to call into the operations to request functionality from this component. Change-Id: I49232e11cbfca89f6ff8ac711c12ae6a39dcbd00 Signed-off-by: Bhalchandra Gajare --- include/sound/wcd-spi.h | 8 +++++++- sound/soc/codecs/wcd-spi.c | 15 +++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/include/sound/wcd-spi.h b/include/sound/wcd-spi.h index 1fff58d727a1..a9c336177cb5 100644 --- a/include/sound/wcd-spi.h +++ b/include/sound/wcd-spi.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -54,4 +54,10 @@ int wcd_spi_data_read(struct spi_device *spi, struct wcd_spi_msg *msg) #endif /* End of CONFIG_SND_SOC_WCD_SPI */ +struct wcd_spi_ops { + struct spi_device *spi_dev; + int (*read_dev)(struct spi_device *spi, struct wcd_spi_msg *msg); + int (*write_dev)(struct spi_device *spi, struct wcd_spi_msg *msg); +}; + #endif /* End of __WCD_SPI_H__ */ diff --git a/sound/soc/codecs/wcd-spi.c b/sound/soc/codecs/wcd-spi.c index a08b5984cf08..6c01dafb42bc 100644 --- a/sound/soc/codecs/wcd-spi.c +++ b/sound/soc/codecs/wcd-spi.c @@ -925,6 +925,7 @@ static int wdsp_spi_event_handler(struct device *dev, void *priv_data, { struct spi_device *spi = to_spi_device(dev); struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi); + struct wcd_spi_ops *spi_ops; int ret = 0; dev_dbg(&spi->dev, "%s: event type %d\n", @@ -979,6 +980,20 @@ static int wdsp_spi_event_handler(struct device *dev, void *priv_data, ret = wcd_spi_wait_for_resume(wcd_spi); break; + case WDSP_EVENT_GET_DEVOPS: + if (!data) { + dev_err(&spi->dev, "%s: invalid data\n", + __func__); + ret = -EINVAL; + break; + } + + spi_ops = (struct wcd_spi_ops *) data; + spi_ops->spi_dev = spi; + spi_ops->read_dev = wcd_spi_data_read; + spi_ops->write_dev = wcd_spi_data_write; + break; + default: dev_dbg(&spi->dev, "%s: Unhandled event %d\n", __func__, event); -- GitLab From bb58f021a91f06f66007e5cea6ccf8cdf6f430f1 Mon Sep 17 00:00:00 2001 From: Chris Lew Date: Tue, 27 Jun 2017 16:54:11 -0700 Subject: [PATCH 391/786] soc: qcom: glink_spi_xprt: Change to spi function pointer interface Use function pointers provided by the wcd spi driver instead of using the exposed read and write functions. This change helps remove the static dependency on the wcd spi driver. CRs-Fixed: 2068106 Change-Id: I61121dd83079767f73a412e40f9f4ac7198fb210 Signed-off-by: Chris Lew --- drivers/soc/qcom/glink_spi_xprt.c | 33 +++++++++++++++---------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/drivers/soc/qcom/glink_spi_xprt.c b/drivers/soc/qcom/glink_spi_xprt.c index e02c07a99f1c..c44aa93a04c7 100644 --- a/drivers/soc/qcom/glink_spi_xprt.c +++ b/drivers/soc/qcom/glink_spi_xprt.c @@ -111,7 +111,7 @@ struct glink_cmpnt { * @xprt_cfg: The transport configuration for the glink core * assocaited with this edge. * @subsys_name: Name of the remote subsystem in the edge. - * @spi_dev: Pointer to the connectingSPI Device. + * @spi_ops: Function pointers for ops provided by spi. * @fifo_size: Size of the FIFO at the remote end. * @tx_fifo_start: Base Address of the TX FIFO. * @tx_fifo_end: End Address of the TX FIFO. @@ -147,7 +147,7 @@ struct edge_info { struct glink_transport_if xprt_if; struct glink_core_transport_cfg xprt_cfg; char subsys_name[GLINK_NAME_SIZE]; - struct spi_device *spi_dev; + struct wcd_spi_ops spi_ops; uint32_t fifo_size; uint32_t tx_fifo_start; @@ -286,11 +286,14 @@ static int glink_spi_xprt_rx_data(struct edge_info *einfo, void *src, { struct wcd_spi_msg spi_msg; + if (unlikely(!einfo->spi_ops.read_dev)) + return -EINVAL; + memset(&spi_msg, 0, sizeof(spi_msg)); spi_msg.data = dst; spi_msg.remote_addr = (uint32_t)(size_t)src; spi_msg.len = (size_t)size; - return wcd_spi_data_read(einfo->spi_dev, &spi_msg); + return einfo->spi_ops.read_dev(einfo->spi_ops.spi_dev, &spi_msg); } /** @@ -310,11 +313,14 @@ static int glink_spi_xprt_tx_data(struct edge_info *einfo, void *src, { struct wcd_spi_msg spi_msg; + if (unlikely(!einfo->spi_ops.write_dev)) + return -EINVAL; + memset(&spi_msg, 0, sizeof(spi_msg)); spi_msg.data = src; spi_msg.remote_addr = (uint32_t)(size_t)dst; spi_msg.len = (size_t)size; - return wcd_spi_data_write(einfo->spi_dev, &spi_msg); + return einfo->spi_ops.write_dev(einfo->spi_ops.spi_dev, &spi_msg); } /** @@ -1796,27 +1802,20 @@ static int glink_wdsp_cmpnt_event_handler(struct device *dev, { struct edge_info *einfo = dev_get_drvdata(dev); struct glink_cmpnt *cmpnt = &einfo->cmpnt; - struct device *sdev; - struct spi_device *spi_dev; + int rc = -EINVAL; switch (event) { case WDSP_EVENT_PRE_BOOTUP: if (cmpnt && cmpnt->master_dev && cmpnt->master_ops && - cmpnt->master_ops->get_dev_for_cmpnt) - sdev = cmpnt->master_ops->get_dev_for_cmpnt( - cmpnt->master_dev, WDSP_CMPNT_TRANSPORT); - else - sdev = NULL; + cmpnt->master_ops->get_devops_for_cmpnt) + rc = cmpnt->master_ops->get_devops_for_cmpnt( + cmpnt->master_dev, WDSP_CMPNT_TRANSPORT, + &einfo->spi_ops); - if (!sdev) { + if (rc) dev_err(dev, "%s: Failed to get transport device\n", __func__); - break; - } - - spi_dev = to_spi_device(sdev); - einfo->spi_dev = spi_dev; break; case WDSP_EVENT_POST_BOOTUP: einfo->in_ssr = false; -- GitLab From 71c6cc9caa2d152389c8307f8c7e4396af79afad Mon Sep 17 00:00:00 2001 From: Veera Sundaram Sankaran Date: Tue, 27 Jun 2017 20:49:55 -0700 Subject: [PATCH 392/786] drm/msm/sde: remove release fence signal from frame done Move back the release fence signaling to complete commit for cmd mode panels. As it is causing unbalanced prepare and signal output fence because few prepare fence calls are not followed by a kickoff. Change-Id: Ib1cef5883c66306119c7f7f98af7b2b72017e1c7 Signed-off-by: Veera Sundaram Sankaran --- drivers/gpu/drm/msm/sde/sde_crtc.c | 35 +++--------------------------- 1 file changed, 3 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index 0f9d73931fb3..fc8deca40ce5 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -1510,7 +1510,6 @@ static void sde_crtc_frame_event_work(struct kthread_work *work) struct sde_crtc *sde_crtc; struct sde_crtc_state *cstate; struct sde_kms *sde_kms; - struct drm_encoder *encoder; unsigned long flags; if (!work) { @@ -1564,26 +1563,8 @@ static void sde_crtc_frame_event_work(struct kthread_work *work) } if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE || - (fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR)) { - bool signal_fence = true; - - drm_for_each_encoder(encoder, crtc->dev) { - if (encoder->crtc != crtc) - continue; - - signal_fence &= - sde_encoder_is_cmd_mode(encoder); - } - - /* signal release fence only for cmd mode panels here */ - if (signal_fence) { - sde_fence_signal(&sde_crtc->output_fence, 0); - SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event, - SDE_EVTLOG_FUNC_CASE4); - } - + (fevent->event & SDE_ENCODER_FRAME_EVENT_ERROR)) complete_all(&sde_crtc->frame_done_comp); - } if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE) sde_core_perf_crtc_update(crtc, 0, false); @@ -1648,9 +1629,7 @@ void sde_crtc_complete_commit(struct drm_crtc *crtc, { struct sde_crtc *sde_crtc; struct sde_crtc_state *cstate; - struct drm_encoder *encoder; int i; - bool signal_fence = true; if (!crtc || !crtc->state) { SDE_ERROR("invalid crtc\n"); @@ -1661,16 +1640,8 @@ void sde_crtc_complete_commit(struct drm_crtc *crtc, cstate = to_sde_crtc_state(crtc->state); SDE_EVT32_VERBOSE(DRMID(crtc)); - drm_for_each_encoder(encoder, crtc->dev) { - if (encoder->crtc != crtc) - continue; - - signal_fence &= !sde_encoder_is_cmd_mode(encoder); - } - - /* signal release fence for non-cmd mode panels */ - if (signal_fence) - sde_fence_signal(&sde_crtc->output_fence, 0); + /* signal release fence */ + sde_fence_signal(&sde_crtc->output_fence, 0); /* signal retire fence */ for (i = 0; i < cstate->num_connectors; ++i) -- GitLab From d6681c1b183969aa5f1fdcb9ac94be5a938b819f Mon Sep 17 00:00:00 2001 From: Karthikeyan Mani Date: Tue, 27 Jun 2017 13:44:59 -0700 Subject: [PATCH 393/786] drivers: cpuidle: lpm-levels: Export symbols used by audio Add export symbol to symbols used by audio WCD. WCD is built as separate module and equires symbols called from WCD to be exported. CRs-Fixed: 2068871 Change-Id: I43ba8b2ed5c99416ede99213e625a7bcb868bb86 Signed-off-by: Karthikeyan Mani --- drivers/cpuidle/lpm-levels.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c index e890192ad011..7536aa9ba320 100644 --- a/drivers/cpuidle/lpm-levels.c +++ b/drivers/cpuidle/lpm-levels.c @@ -133,10 +133,16 @@ module_param_named(print_parsed_dt, print_parsed_dt, bool, 0664); static bool sleep_disabled; module_param_named(sleep_disabled, sleep_disabled, bool, 0664); +/** + * msm_cpuidle_get_deep_idle_latency - Get deep idle latency value + * + * Returns an s32 latency value + */ s32 msm_cpuidle_get_deep_idle_latency(void) { return 10; } +EXPORT_SYMBOL(msm_cpuidle_get_deep_idle_latency); void lpm_suspend_wake_time(uint64_t wakeup_time) { -- GitLab From aefa88fa7be8b0a275e18518c7dce36d27ebba8c Mon Sep 17 00:00:00 2001 From: Phani Kumar Uppalapati Date: Mon, 26 Jun 2017 18:44:32 -0700 Subject: [PATCH 394/786] ARM: dts: msm: Add audio device tree overlays for SDM845 Add support for device tree overlays for audio. Move all WCD and WSA related device tree nodes to corresponding overlay dtsi file and add the dtsi file in corresponding dts overlay file. CRs-Fixed: 2068887 Change-Id: I74332a490ec7c791b39870e97500b324e6bf870d Signed-off-by: Phani Kumar Uppalapati --- .../boot/dts/qcom/sdm845-audio-overlay.dtsi | 182 ++++++++++++++++++ arch/arm64/boot/dts/qcom/sdm845-audio.dtsi | 163 +--------------- .../dts/qcom/sdm845-cdp-audio-overlay.dtsi | 21 ++ .../boot/dts/qcom/sdm845-cdp-overlay.dts | 1 + arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi | 4 - .../boot/dts/qcom/sdm845-mtp-overlay.dts | 1 + .../dts/qcom/sdm845-qrd-audio-overlay.dtsi | 72 +++++++ .../boot/dts/qcom/sdm845-qrd-overlay.dts | 1 + arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi | 61 ------ 9 files changed, 279 insertions(+), 227 deletions(-) create mode 100644 arch/arm64/boot/dts/qcom/sdm845-audio-overlay.dtsi create mode 100644 arch/arm64/boot/dts/qcom/sdm845-cdp-audio-overlay.dtsi create mode 100644 arch/arm64/boot/dts/qcom/sdm845-qrd-audio-overlay.dtsi diff --git a/arch/arm64/boot/dts/qcom/sdm845-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm845-audio-overlay.dtsi new file mode 100644 index 000000000000..920830256b69 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdm845-audio-overlay.dtsi @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sdm845-wcd.dtsi" +#include "msm-wsa881x.dtsi" +#include + +&snd_934x { + qcom,audio-routing = + "AIF4 VI", "MCLK", + "RX_BIAS", "MCLK", + "MADINPUT", "MCLK", + "hifi amp", "LINEOUT1", + "hifi amp", "LINEOUT2", + "AMIC2", "MIC BIAS2", + "MIC BIAS2", "Headset Mic", + "AMIC3", "MIC BIAS2", + "MIC BIAS2", "ANCRight Headset Mic", + "AMIC4", "MIC BIAS2", + "MIC BIAS2", "ANCLeft Headset Mic", + "AMIC5", "MIC BIAS3", + "MIC BIAS3", "Handset Mic", + "DMIC0", "MIC BIAS1", + "MIC BIAS1", "Digital Mic0", + "DMIC1", "MIC BIAS1", + "MIC BIAS1", "Digital Mic1", + "DMIC2", "MIC BIAS3", + "MIC BIAS3", "Digital Mic2", + "DMIC3", "MIC BIAS3", + "MIC BIAS3", "Digital Mic3", + "DMIC4", "MIC BIAS4", + "MIC BIAS4", "Digital Mic4", + "DMIC5", "MIC BIAS4", + "MIC BIAS4", "Digital Mic5", + "SpkrLeft IN", "SPK1 OUT", + "SpkrRight IN", "SPK2 OUT"; + + qcom,msm-mbhc-hphl-swh = <1>; + qcom,msm-mbhc-gnd-swh = <1>; + qcom,hph-en0-gpio = <&tavil_hph_en0>; + qcom,hph-en1-gpio = <&tavil_hph_en1>; + qcom,tavil-mclk-clk-freq = <9600000>; + + asoc-codec = <&stub_codec>; + asoc-codec-names = "msm-stub-codec.1"; + + qcom,usbc-analog-en1-gpio = <&wcd_usbc_analog_en1_gpio>; + qcom,usbc-analog-en2-gpio = <&tlmm 51 0>; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&wcd_usbc_analog_en2_active>; + pinctrl-1 = <&wcd_usbc_analog_en2_idle>; + + qcom,wsa-max-devs = <2>; + qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>, + <&wsa881x_0213>, <&wsa881x_0214>; + qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight", + "SpkrLeft", "SpkrRight"; +}; + +&soc { + wcd_usbc_analog_en1_gpio: msm_cdc_pinctrl@49 { + compatible = "qcom,msm-cdc-pinctrl"; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&wcd_usbc_analog_en1_active>; + pinctrl-1 = <&wcd_usbc_analog_en1_idle>; + }; + + wcd9xxx_intc: wcd9xxx-irq { + status = "ok"; + compatible = "qcom,wcd9xxx-irq"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tlmm>; + qcom,gpio-connect = <&tlmm 54 0>; + pinctrl-names = "default"; + pinctrl-0 = <&wcd_intr_default>; + }; + + clock_audio_lnbb: audio_ext_clk_lnbb { + status = "ok"; + compatible = "qcom,audio-ref-clk"; + clock-names = "osr_clk"; + clocks = <&clock_rpmh RPMH_LN_BB_CLK2>; + qcom,node_has_rpm_clock; + #clock-cells = <1>; + }; + + wcd_rst_gpio: msm_cdc_pinctrl@64 { + compatible = "qcom,msm-cdc-pinctrl"; + qcom,cdc-rst-n-gpio = <&tlmm 64 0>; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&cdc_reset_active>; + pinctrl-1 = <&cdc_reset_sleep>; + }; + + qocm,wcd-dsp-glink { + compatible = "qcom,wcd-dsp-glink"; + }; + + qcom,wcd-dsp-mgr { + compatible = "qcom,wcd-dsp-mgr"; + qcom,wdsp-components = <&wcd934x_cdc 0>, + <&wcd_spi_0 1>, + <&glink_spi_xprt_wdsp 2>; + qcom,img-filename = "cpe_9340"; + }; +}; + +&slim_aud { + wcd934x_cdc: tavil_codec { + compatible = "qcom,tavil-slim-pgd"; + elemental-addr = [00 01 50 02 17 02]; + + interrupt-parent = <&wcd9xxx_intc>; + interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 + 17 18 19 20 21 22 23 24 25 26 27 28 29 + 30 31>; + + qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>; + + clock-names = "wcd_clk"; + clocks = <&clock_audio_lnbb AUDIO_PMIC_LNBB_CLK>; + + cdc-vdd-buck-supply = <&pm8998_s4>; + qcom,cdc-vdd-buck-voltage = <1800000 1800000>; + qcom,cdc-vdd-buck-current = <650000>; + + cdc-buck-sido-supply = <&pm8998_s4>; + qcom,cdc-buck-sido-voltage = <1800000 1800000>; + qcom,cdc-buck-sido-current = <250000>; + + cdc-vdd-tx-h-supply = <&pm8998_s4>; + qcom,cdc-vdd-tx-h-voltage = <1800000 1800000>; + qcom,cdc-vdd-tx-h-current = <25000>; + + cdc-vdd-rx-h-supply = <&pm8998_s4>; + qcom,cdc-vdd-rx-h-voltage = <1800000 1800000>; + qcom,cdc-vdd-rx-h-current = <25000>; + + cdc-vddpx-1-supply = <&pm8998_s4>; + qcom,cdc-vddpx-1-voltage = <1800000 1800000>; + qcom,cdc-vddpx-1-current = <10000>; + + qcom,cdc-static-supplies = "cdc-vdd-buck", + "cdc-buck-sido", + "cdc-vdd-tx-h", + "cdc-vdd-rx-h", + "cdc-vddpx-1"; + + qcom,cdc-micbias1-mv = <1800>; + qcom,cdc-micbias2-mv = <1800>; + qcom,cdc-micbias3-mv = <1800>; + qcom,cdc-micbias4-mv = <1800>; + + qcom,cdc-mclk-clk-rate = <9600000>; + qcom,cdc-slim-ifd = "tavil-slim-ifd"; + qcom,cdc-slim-ifd-elemental-addr = [00 00 50 02 17 02]; + qcom,cdc-dmic-sample-rate = <4800000>; + qcom,cdc-mad-dmic-rate = <600000>; + + qcom,wdsp-cmpnt-dev-name = "tavil_codec"; + + wcd_spi_0: wcd_spi { + compatible = "qcom,wcd-spi-v2"; + qcom,master-bus-num = <0>; + qcom,chip-select = <0>; + qcom,max-frequency = <9600000>; + qcom,mem-base-addr = <0x100000>; + }; + + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi index ad046e955093..dd82ad74ff16 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-audio.dtsi @@ -12,9 +12,6 @@ */ #include "msm-audio-lpass.dtsi" -#include "sdm845-wcd.dtsi" -#include "msm-wsa881x.dtsi" -#include &msm_audio_ion { iommus = <&apps_smmu 0x1821 0x0>; @@ -31,7 +28,7 @@ qcom,clk-mult = <10>; }; - sound-tavil { + snd_934x: sound-tavil { compatible = "qcom,sdm845-asoc-snd-tavil"; qcom,model = "sdm845-tavil-snd-card"; qcom,wcn-btfm; @@ -48,43 +45,6 @@ "lpaif_tert_mode_muxsel", "lpaif_quat_mode_muxsel"; - qcom,audio-routing = - "AIF4 VI", "MCLK", - "RX_BIAS", "MCLK", - "MADINPUT", "MCLK", - "hifi amp", "LINEOUT1", - "hifi amp", "LINEOUT2", - "AMIC2", "MIC BIAS2", - "MIC BIAS2", "Headset Mic", - "AMIC3", "MIC BIAS2", - "MIC BIAS2", "ANCRight Headset Mic", - "AMIC4", "MIC BIAS2", - "MIC BIAS2", "ANCLeft Headset Mic", - "AMIC5", "MIC BIAS3", - "MIC BIAS3", "Handset Mic", - "DMIC0", "MIC BIAS1", - "MIC BIAS1", "Digital Mic0", - "DMIC1", "MIC BIAS1", - "MIC BIAS1", "Digital Mic1", - "DMIC2", "MIC BIAS3", - "MIC BIAS3", "Digital Mic2", - "DMIC3", "MIC BIAS3", - "MIC BIAS3", "Digital Mic3", - "DMIC4", "MIC BIAS4", - "MIC BIAS4", "Digital Mic4", - "DMIC5", "MIC BIAS4", - "MIC BIAS4", "Digital Mic5", - "SpkrLeft IN", "SPK1 OUT", - "SpkrRight IN", "SPK2 OUT"; - - qcom,msm-mbhc-hphl-swh = <1>; - qcom,msm-mbhc-gnd-swh = <1>; - qcom,hph-en0-gpio = <&tavil_hph_en0>; - qcom,hph-en1-gpio = <&tavil_hph_en1>; - qcom,tavil-mclk-clk-freq = <9600000>; - - qcom,usbc-analog-en1-gpio = <&wcd_usbc_analog_en1_gpio>; - asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>, <&loopback>, <&compress>, <&hostless>, <&afe>, <&lsm>, <&routing>, <&compr>, @@ -136,65 +96,6 @@ "msm-dai-q6-tdm.36880", "msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897", "msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913"; - asoc-codec = <&stub_codec>; - asoc-codec-names = "msm-stub-codec.1"; - qcom,wsa-max-devs = <2>; - qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>, - <&wsa881x_0213>, <&wsa881x_0214>; - qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrRight", - "SpkrLeft", "SpkrRight"; - - qcom,usbc-analog-en2-gpio = <&tlmm 51 0>; - pinctrl-names = "aud_active", "aud_sleep"; - pinctrl-0 = <&wcd_usbc_analog_en2_active>; - pinctrl-1 = <&wcd_usbc_analog_en2_idle>; - }; - - wcd_usbc_analog_en1_gpio: msm_cdc_pinctrl@49 { - compatible = "qcom,msm-cdc-pinctrl"; - pinctrl-names = "aud_active", "aud_sleep"; - pinctrl-0 = <&wcd_usbc_analog_en1_active>; - pinctrl-1 = <&wcd_usbc_analog_en1_idle>; - }; - - wcd9xxx_intc: wcd9xxx-irq { - status = "ok"; - compatible = "qcom,wcd9xxx-irq"; - interrupt-controller; - #interrupt-cells = <1>; - interrupt-parent = <&tlmm>; - qcom,gpio-connect = <&tlmm 54 0>; - pinctrl-names = "default"; - pinctrl-0 = <&wcd_intr_default>; - }; - - clock_audio_lnbb: audio_ext_clk_lnbb { - status = "ok"; - compatible = "qcom,audio-ref-clk"; - clock-names = "osr_clk"; - clocks = <&clock_rpmh RPMH_LN_BB_CLK2>; - qcom,node_has_rpm_clock; - #clock-cells = <1>; - }; - - wcd_rst_gpio: msm_cdc_pinctrl@64 { - compatible = "qcom,msm-cdc-pinctrl"; - qcom,cdc-rst-n-gpio = <&tlmm 64 0>; - pinctrl-names = "aud_active", "aud_sleep"; - pinctrl-0 = <&cdc_reset_active>; - pinctrl-1 = <&cdc_reset_sleep>; - }; - - qocm,wcd-dsp-glink { - compatible = "qcom,wcd-dsp-glink"; - }; - - qcom,wcd-dsp-mgr { - compatible = "qcom,wcd-dsp-mgr"; - qcom,wdsp-components = <&wcd934x_cdc 0>, - <&wcd_spi_0 1>, - <&glink_spi_xprt_wdsp 2>; - qcom,img-filename = "cpe_9340"; }; }; @@ -203,66 +104,4 @@ compatible = "qcom,msm-dai-slim"; elemental-addr = [ff ff ff fe 17 02]; }; - - wcd934x_cdc: tavil_codec { - compatible = "qcom,tavil-slim-pgd"; - elemental-addr = [00 01 50 02 17 02]; - - interrupt-parent = <&wcd9xxx_intc>; - interrupts = <0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 - 17 18 19 20 21 22 23 24 25 26 27 28 29 - 30 31>; - - qcom,wcd-rst-gpio-node = <&wcd_rst_gpio>; - - clock-names = "wcd_clk"; - clocks = <&clock_audio_lnbb AUDIO_PMIC_LNBB_CLK>; - - cdc-vdd-buck-supply = <&pm8998_s4>; - qcom,cdc-vdd-buck-voltage = <1800000 1800000>; - qcom,cdc-vdd-buck-current = <650000>; - - cdc-buck-sido-supply = <&pm8998_s4>; - qcom,cdc-buck-sido-voltage = <1800000 1800000>; - qcom,cdc-buck-sido-current = <250000>; - - cdc-vdd-tx-h-supply = <&pm8998_s4>; - qcom,cdc-vdd-tx-h-voltage = <1800000 1800000>; - qcom,cdc-vdd-tx-h-current = <25000>; - - cdc-vdd-rx-h-supply = <&pm8998_s4>; - qcom,cdc-vdd-rx-h-voltage = <1800000 1800000>; - qcom,cdc-vdd-rx-h-current = <25000>; - - cdc-vddpx-1-supply = <&pm8998_s4>; - qcom,cdc-vddpx-1-voltage = <1800000 1800000>; - qcom,cdc-vddpx-1-current = <10000>; - - qcom,cdc-static-supplies = "cdc-vdd-buck", - "cdc-buck-sido", - "cdc-vdd-tx-h", - "cdc-vdd-rx-h", - "cdc-vddpx-1"; - - qcom,cdc-micbias1-mv = <1800>; - qcom,cdc-micbias2-mv = <1800>; - qcom,cdc-micbias3-mv = <1800>; - qcom,cdc-micbias4-mv = <1800>; - - qcom,cdc-mclk-clk-rate = <9600000>; - qcom,cdc-slim-ifd = "tavil-slim-ifd"; - qcom,cdc-slim-ifd-elemental-addr = [00 00 50 02 17 02]; - qcom,cdc-dmic-sample-rate = <4800000>; - qcom,cdc-mad-dmic-rate = <600000>; - - qcom,wdsp-cmpnt-dev-name = "tavil_codec"; - - wcd_spi_0: wcd_spi { - compatible = "qcom,wcd-spi-v2"; - qcom,master-bus-num = <0>; - qcom,chip-select = <0>; - qcom,max-frequency = <9600000>; - qcom,mem-base-addr = <0x100000>; - }; - }; }; diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp-audio-overlay.dtsi new file mode 100644 index 000000000000..68f2e51fea20 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdm845-cdp-audio-overlay.dtsi @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sdm845-audio-overlay.dtsi" + +&soc { + sound-tavil { + qcom,us-euro-gpios = <&tavil_us_euro_sw>; + }; +}; + diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts index 7d7c9cf532d9..4747c993a2f0 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-cdp-overlay.dts @@ -19,6 +19,7 @@ #include #include "sdm845-cdp.dtsi" +#include "sdm845-cdp-audio-overlay.dtsi" / { model = "Qualcomm Technologies, Inc. SDM845 v1 CDP"; diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi index 0430ea4cddc7..f9b0b777048e 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi @@ -37,10 +37,6 @@ }; &soc { - sound-tavil { - qcom,us-euro-gpios = <&tavil_us_euro_sw>; - }; - gpio_keys { compatible = "gpio-keys"; label = "gpio-keys"; diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts index e299744f57e4..52c0f05302c8 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-mtp-overlay.dts @@ -19,6 +19,7 @@ #include #include "sdm845-mtp.dtsi" +#include "sdm845-audio-overlay.dtsi" / { model = "Qualcomm Technologies, Inc. SDM845 v1 MTP"; diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd-audio-overlay.dtsi new file mode 100644 index 000000000000..2ee903103be8 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdm845-qrd-audio-overlay.dtsi @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include "sdm845-audio-overlay.dtsi" + +&soc { + sound-tavil { + qcom,wsa-max-devs = <1>; + qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>; + qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrRight"; + + qcom,msm-mbhc-usbc-audio-supported = <1>; + + qcom,usbc-analog-en2-gpio = <&tlmm 51 0>; + pinctrl-names = "aud_active", "aud_sleep"; + pinctrl-0 = <&wcd_usbc_analog_en2_active>; + pinctrl-1 = <&wcd_usbc_analog_en2_idle>; + }; +}; + +&wcd934x_cdc { + wcd_pinctrl@5 { + us_euro_sw_wcd_active { + mux { + pins = "gpio1"; + }; + + config { + pins = "gpio1"; + /delete-property/ output-high; + bias-high-impedance; + }; + }; + + us_euro_sw_wcd_sleep { + mux { + pins = "gpio1"; + }; + + config { + pins = "gpio1"; + /delete-property/ output-low; + bias-high-impedance; + }; + }; + }; + + swr_master { + wsa881x@20170211 { + compatible = "qcom,wsa881x"; + reg = <0x00 0x20170211>; + qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>; + }; + + wsa881x@21170213 { + compatible = "qcom,wsa881x"; + reg = <0x00 0x21170213>; + qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>; + }; + }; +}; + diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd-overlay.dts b/arch/arm64/boot/dts/qcom/sdm845-qrd-overlay.dts index 6243fec06329..5729d7676b0d 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-qrd-overlay.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-qrd-overlay.dts @@ -19,6 +19,7 @@ #include #include "sdm845-qrd.dtsi" +#include "sdm845-qrd-audio-overlay.dtsi" / { model = "Qualcomm Technologies, Inc. SDM845 v1 QRD"; diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi index 405e5459c287..561eb230abff 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi @@ -94,67 +94,6 @@ #cooling-cells = <2>; }; -&soc { - sound-tavil { - qcom,wsa-max-devs = <1>; - qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>; - qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrRight"; - - qcom,msm-mbhc-usbc-audio-supported = <1>; - - qcom,usbc-analog-en2-gpio = <&tlmm 51 0>; - pinctrl-names = "aud_active", "aud_sleep"; - pinctrl-0 = <&wcd_usbc_analog_en2_active>; - pinctrl-1 = <&wcd_usbc_analog_en2_idle>; - }; -}; - -&wcd934x_cdc { - wcd: wcd_pinctrl@5 { - us_euro_sw_wcd_active: us_euro_sw_wcd_active { - mux { - pins = "gpio1"; - }; - - config { - pins = "gpio1"; - /delete-property/ output-high; - bias-high-impedance; - }; - }; - - us_euro_sw_wcd_sleep: us_euro_sw_wcd_sleep { - mux { - pins = "gpio1"; - }; - - config { - pins = "gpio1"; - /delete-property/ output-low; - bias-high-impedance; - }; - }; - }; -}; - -&slim_aud { - tavil_codec { - swr_master { - wsa881x_0211: wsa881x@20170211 { - compatible = "qcom,wsa881x"; - reg = <0x00 0x20170211>; - qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>; - }; - - wsa881x_0213: wsa881x@21170213 { - compatible = "qcom,wsa881x"; - reg = <0x00 0x21170213>; - qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>; - }; - }; - }; -}; - &ufsphy_mem { compatible = "qcom,ufs-phy-qmp-v3"; -- GitLab From 9d63f3bc59cb852ad8aa17ce181654d82257d006 Mon Sep 17 00:00:00 2001 From: Karthikeyan Mani Date: Tue, 27 Jun 2017 14:23:26 -0700 Subject: [PATCH 395/786] ASoC: msm: export symbols used by WCD Add export symbol to symbols used by audio WCD. WCD is built as separate module and requires symbols called from WCD to be exported. CRs-Fixed: 2068871 Change-Id: Ic784d8671ae03775f1db5dee11d2152e5fc445ce Signed-off-by: Karthikeyan Mani --- sound/soc/msm/qdsp6v2/audio_cal_utils.c | 61 +++++++++++++++++++++++++ sound/soc/msm/qdsp6v2/q6afe.c | 20 ++++++++ sound/soc/msm/qdsp6v2/q6core.c | 7 ++- 3 files changed, 87 insertions(+), 1 deletion(-) diff --git a/sound/soc/msm/qdsp6v2/audio_cal_utils.c b/sound/soc/msm/qdsp6v2/audio_cal_utils.c index f5c6d6f434bd..7e69a7fe28f5 100644 --- a/sound/soc/msm/qdsp6v2/audio_cal_utils.c +++ b/sound/soc/msm/qdsp6v2/audio_cal_utils.c @@ -356,6 +356,15 @@ static struct cal_type_data *create_cal_type_data( return cal_type; } +/** + * cal_utils_create_cal_types + * + * @num_cal_types: number of types + * @cal_type: pointer to the cal types pointer + * @info: pointer to info + * + * Returns 0 on success, EINVAL otherwise + */ int cal_utils_create_cal_types(int num_cal_types, struct cal_type_data **cal_type, struct cal_type_info *info) @@ -411,6 +420,7 @@ int cal_utils_create_cal_types(int num_cal_types, done: return ret; } +EXPORT_SYMBOL(cal_utils_create_cal_types); static void delete_cal_block(struct cal_block_data *cal_block) { @@ -497,6 +507,13 @@ void cal_utils_destroy_cal_types(int num_cal_types, return; } +/** + * cal_utils_get_only_cal_block + * + * @cal_type: pointer to the cal type + * + * Returns cal_block structure + */ struct cal_block_data *cal_utils_get_only_cal_block( struct cal_type_data *cal_type) { @@ -516,7 +533,16 @@ struct cal_block_data *cal_utils_get_only_cal_block( done: return cal_block; } +EXPORT_SYMBOL(cal_utils_get_only_cal_block); +/** + * cal_utils_get_only_cal_block + * + * @cal_block: pointer to cal block struct + * @user_data: pointer to user data + * + * Returns true on match + */ bool cal_utils_match_buf_num(struct cal_block_data *cal_block, void *user_data) { @@ -528,6 +554,7 @@ bool cal_utils_match_buf_num(struct cal_block_data *cal_block, return ret; } +EXPORT_SYMBOL(cal_utils_match_buf_num); static struct cal_block_data *get_matching_cal_block( struct cal_type_data *cal_type, @@ -759,6 +786,17 @@ static int unmap_memory(struct cal_type_data *cal_type, return ret; } +/** + * cal_utils_alloc_cal + * + * @data_size: size of data to allocate + * @data: data pointer + * @cal_type: pointer to the cal type + * @client_info_size: client info size + * @client_info: pointer to client info + * + * Returns 0 on success, appropriate error code otherwise + */ int cal_utils_alloc_cal(size_t data_size, void *data, struct cal_type_data *cal_type, size_t client_info_size, void *client_info) @@ -827,7 +865,17 @@ int cal_utils_alloc_cal(size_t data_size, void *data, done: return ret; } +EXPORT_SYMBOL(cal_utils_alloc_cal); +/** + * cal_utils_dealloc_cal + * + * @data_size: size of data to allocate + * @data: data pointer + * @cal_type: pointer to the cal type + * + * Returns 0 on success, appropriate error code otherwise + */ int cal_utils_dealloc_cal(size_t data_size, void *data, struct cal_type_data *cal_type) { @@ -887,7 +935,19 @@ int cal_utils_dealloc_cal(size_t data_size, void *data, done: return ret; } +EXPORT_SYMBOL(cal_utils_dealloc_cal); +/** + * cal_utils_set_cal + * + * @data_size: size of data to allocate + * @data: data pointer + * @cal_type: pointer to the cal type + * @client_info_size: client info size + * @client_info: pointer to client info + * + * Returns 0 on success, appropriate error code otherwise + */ int cal_utils_set_cal(size_t data_size, void *data, struct cal_type_data *cal_type, size_t client_info_size, void *client_info) @@ -967,3 +1027,4 @@ int cal_utils_set_cal(size_t data_size, void *data, done: return ret; } +EXPORT_SYMBOL(cal_utils_set_cal); diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c index ebb8effb5f72..e1ce947cba48 100644 --- a/sound/soc/msm/qdsp6v2/q6afe.c +++ b/sound/soc/msm/qdsp6v2/q6afe.c @@ -2125,6 +2125,7 @@ int afe_set_config(enum afe_config_type config_type, void *config_data, int arg) return ret; } +EXPORT_SYMBOL(afe_set_config); /* * afe_clear_config - If SSR happens ADSP loses AFE configs, let AFE driver know @@ -2135,6 +2136,7 @@ void afe_clear_config(enum afe_config_type config) { clear_bit(config, &afe_configured_cmd); } +EXPORT_SYMBOL(afe_clear_config); bool afe_has_config(enum afe_config_type config) { @@ -5749,6 +5751,14 @@ int afe_set_lpass_clock(u16 port_id, struct afe_clk_cfg *cfg) return ret; } +/** + * afe_set_lpass_clk_cfg - Set AFE clk config + * + * @index: port index + * @cfg: pointer to clk set struct + * + * Returns 0 on success, appropriate error code otherwise + */ int afe_set_lpass_clk_cfg(int index, struct afe_clk_set *cfg) { struct afe_lpass_clk_config_command_v2 clk_cfg; @@ -5829,7 +5839,16 @@ int afe_set_lpass_clk_cfg(int index, struct afe_clk_set *cfg) mutex_unlock(&this_afe.afe_cmd_lock); return ret; } +EXPORT_SYMBOL(afe_set_lpass_clk_cfg); +/** + * afe_set_lpass_clock_v2 - Enable AFE lpass clock + * + * @port_id: AFE port id + * @cfg: pointer to clk set struct + * + * Returns 0 on success, appropriate error code otherwise + */ int afe_set_lpass_clock_v2(u16 port_id, struct afe_clk_set *cfg) { int index = 0; @@ -5855,6 +5874,7 @@ int afe_set_lpass_clock_v2(u16 port_id, struct afe_clk_set *cfg) return ret; } +EXPORT_SYMBOL(afe_set_lpass_clock_v2); int afe_set_lpass_internal_digital_codec_clock(u16 port_id, struct afe_digital_clk_cfg *cfg) diff --git a/sound/soc/msm/qdsp6v2/q6core.c b/sound/soc/msm/qdsp6v2/q6core.c index f6675a2b9f94..3aaaa35e535b 100644 --- a/sound/soc/msm/qdsp6v2/q6core.c +++ b/sound/soc/msm/qdsp6v2/q6core.c @@ -381,6 +381,11 @@ uint32_t core_set_dolby_manufacturer_id(int manufacturer_id) return rc; } +/** + * q6core_is_adsp_ready - check adsp ready status + * + * Returns true if adsp is ready otherwise returns false + */ bool q6core_is_adsp_ready(void) { int rc = 0; @@ -419,7 +424,7 @@ bool q6core_is_adsp_ready(void) mutex_unlock(&(q6core_lcl.cmd_lock)); return ret; } - +EXPORT_SYMBOL(q6core_is_adsp_ready); static int q6core_map_memory_regions(phys_addr_t *buf_add, uint32_t mempool_id, uint32_t *bufsz, uint32_t bufcnt, uint32_t *map_handle) -- GitLab From 1948319feac2bef84c4b69c4996ff8c320e308b7 Mon Sep 17 00:00:00 2001 From: Phani Kumar Uppalapati Date: Mon, 26 Jun 2017 18:53:55 -0700 Subject: [PATCH 396/786] ASoC: msm: Add loadable module support for machine driver Add support to compile machine driver as a loadable module. Add new entry in kconfig for machine driver tristate independent selection. Export any qdsp6v2 functions used by machine driver. Change-Id: Ib3cf75c634e37b808566b9f76ef2156fbe9a78be Signed-off-by: Phani Kumar Uppalapati Signed-off-by: Karthikeyan Mani --- sound/soc/msm/Kconfig | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig index 185857493432..9b3e5118508c 100644 --- a/sound/soc/msm/Kconfig +++ b/sound/soc/msm/Kconfig @@ -192,8 +192,17 @@ config SND_SOC_MSM8996 the machine driver and the corresponding DAI-links -config SND_SOC_MSM8998 +config SND_SOC_MACHINE_MSM8998 tristate "SoC Machine driver for MSM8998 boards" + + help + To enable the machine driver and the + corresponding DAI-links on MSM8998. + All platform specific audio modules are + enabled here. + +config SND_SOC_MSM8998 + tristate "Sound SoC drivers to interface with DSP" depends on ARCH_QCOM select SND_SOC_COMPRESS select SND_SOC_QDSP6V2 @@ -235,6 +244,15 @@ config SND_SOC_660 the machine driver and the corresponding DAI-links +config SND_SOC_MACHINE_SDM845 + tristate "SoC Machine driver for SDM845 boards" + + help + To enable the machine driver and the + corresponding DAI-links on SDM845. + All platform specific audio modules are + enabled here. + config SND_SOC_SDM845 tristate "SoC Machine driver for SDM845 boards" depends on ARCH_QCOM -- GitLab From 59861173a7d215374240ab71d2b8314c505a3f37 Mon Sep 17 00:00:00 2001 From: Phani Kumar Uppalapati Date: Mon, 26 Jun 2017 20:00:38 -0700 Subject: [PATCH 397/786] defconfig: Add loadable module machine driver config Add config entry for the loadable module machine driver for SDM845 target. CRs-Fixed: 2068879 Change-Id: I0e99f524019f51c20e0fe0706133b5aa4a9ed17f Signed-off-by: Phani Kumar Uppalapati --- arch/arm64/configs/sdm845-perf_defconfig | 1 + arch/arm64/configs/sdm845_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig index b2cfe11304e3..ec546ec6b4fc 100644 --- a/arch/arm64/configs/sdm845-perf_defconfig +++ b/arch/arm64/configs/sdm845-perf_defconfig @@ -365,6 +365,7 @@ CONFIG_SND=y CONFIG_SND_USB_AUDIO=y CONFIG_SND_USB_AUDIO_QMI=y CONFIG_SND_SOC=y +CONFIG_SND_SOC_MACHINE_SDM845=y CONFIG_SND_SOC_SDM845=y CONFIG_UHID=y CONFIG_HID_APPLE=y diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig index 43a8b581cdc6..5c97db25266b 100644 --- a/arch/arm64/configs/sdm845_defconfig +++ b/arch/arm64/configs/sdm845_defconfig @@ -372,6 +372,7 @@ CONFIG_SND=y CONFIG_SND_USB_AUDIO=y CONFIG_SND_USB_AUDIO_QMI=y CONFIG_SND_SOC=y +CONFIG_SND_SOC_MACHINE_SDM845=y CONFIG_SND_SOC_SDM845=y CONFIG_UHID=y CONFIG_HID_APPLE=y -- GitLab From 5b1f0d1bdf2908269181ccd67bcfcdb19a589d3b Mon Sep 17 00:00:00 2001 From: Phani Kumar Uppalapati Date: Mon, 26 Jun 2017 19:06:46 -0700 Subject: [PATCH 398/786] ASoC: wsa881x: Add loadable module support for WSA Add support to compile WSA as a loadable module. Move WSA kconfig tristate selection under the machine driver loadable module kconfig. Change-Id: If80208c85af44c06d951dbcd764d03f6511c1da8 Signed-off-by: Phani Kumar Uppalapati --- sound/soc/codecs/Kconfig | 6 +++++- sound/soc/msm/Kconfig | 4 ++-- sound/soc/msm/Makefile | 4 ++-- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 158b1ee2bab8..08fb16887cb2 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -900,10 +900,14 @@ config SND_SOC_WCD934X_MBHC select SND_SOC_WCD_MBHC select SND_SOC_WCD_MBHC_ADC +config REGMAP_SWR + tristate + default y + config SND_SOC_WSA881X tristate + depends on REGMAP_SWR select MSM_CDC_PINCTRL - select REGMAP_SWR config SND_SOC_WSA881X_ANALOG tristate diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig index 9b3e5118508c..4a0ce40f5241 100644 --- a/sound/soc/msm/Kconfig +++ b/sound/soc/msm/Kconfig @@ -194,6 +194,7 @@ config SND_SOC_MSM8996 config SND_SOC_MACHINE_MSM8998 tristate "SoC Machine driver for MSM8998 boards" + select SND_SOC_WSA881X help To enable the machine driver and the @@ -216,7 +217,6 @@ config SND_SOC_MSM8998 select MSM_QDSP6V2_CODECS select SND_SOC_WCD9335 select SND_SOC_WCD934X - select SND_SOC_WSA881X select SND_SOC_MSM_HDMI_CODEC_RX select DTS_SRS_TM select QTI_PP @@ -246,6 +246,7 @@ config SND_SOC_660 config SND_SOC_MACHINE_SDM845 tristate "SoC Machine driver for SDM845 boards" + select SND_SOC_WSA881X help To enable the machine driver and the @@ -267,7 +268,6 @@ config SND_SOC_SDM845 select MSM_QDSP6_NOTIFIER select MSM_QDSP6V2_CODECS select SND_SOC_WCD934X - select SND_SOC_WSA881X select DTS_SRS_TM select QTI_PP select MSM_ULTRASOUND diff --git a/sound/soc/msm/Makefile b/sound/soc/msm/Makefile index 5105cd9e38f0..caf884322d9e 100644 --- a/sound/soc/msm/Makefile +++ b/sound/soc/msm/Makefile @@ -18,7 +18,7 @@ obj-$(CONFIG_SND_SOC_MSM8996) += snd-soc-msm8996.o # for MSM8998 sound card driver snd-soc-msm8998-objs := msm8998.o -obj-$(CONFIG_SND_SOC_MSM8998) += snd-soc-msm8998.o +obj-$(CONFIG_SND_SOC_MACHINE_MSM8998) += snd-soc-msm8998.o # for SDM660 sound card driver snd-soc-sdm660-common-objs := sdm660-common.o @@ -36,4 +36,4 @@ obj-$(CONFIG_SND_SOC_EXT_CODEC) += snd-soc-ext-codec.o # for SDM845 sound card driver snd-soc-sdm845-objs := sdm845.o -obj-$(CONFIG_SND_SOC_SDM845) += snd-soc-sdm845.o +obj-$(CONFIG_SND_SOC_MACHINE_SDM845) += snd-soc-sdm845.o -- GitLab From 9c866bb6c6540c3f64c64b4dcdf97debfcfc4df3 Mon Sep 17 00:00:00 2001 From: Vicky Wallace Date: Fri, 5 May 2017 12:21:28 -0700 Subject: [PATCH 399/786] clk: qcom: Add support to round the frequency to kHz The divider round closest flag is designed for the rounding to the Hz of the requested frequency. Certain clock such as graphic clock on SDM845 needs the divider to round to the kHz. This change support this special round rate. CRs-Fixed: 2048646 Change-Id: I1d5950faea30f94593321509dcf647af1b3fa57f Signed-off-by: Vicky Wallace --- drivers/clk/clk-divider.c | 3 +++ include/linux/clk-provider.h | 1 + 2 files changed, 4 insertions(+) diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index 3d6754e74f69..bb7c8623a29b 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -258,6 +258,9 @@ static bool _is_best_div(unsigned long rate, unsigned long now, { if (flags & CLK_DIVIDER_ROUND_CLOSEST) return abs(rate - now) < abs(rate - best); + else if (flags & CLK_DIVIDER_ROUND_KHZ) + return (DIV_ROUND_CLOSEST(abs(rate - now), 1000) + < DIV_ROUND_CLOSEST(abs(rate - best), 1000)); return now <= rate && now > best; } diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 8ee110a04992..a52b65a14f53 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -494,6 +494,7 @@ struct clk_divider { #define CLK_DIVIDER_ROUND_CLOSEST BIT(4) #define CLK_DIVIDER_READ_ONLY BIT(5) #define CLK_DIVIDER_MAX_AT_ZERO BIT(6) +#define CLK_DIVIDER_ROUND_KHZ BIT(7) extern const struct clk_ops clk_divider_ops; extern const struct clk_ops clk_divider_ro_ops; -- GitLab From eea1836b7c71047a9ed2c88315459377338138f2 Mon Sep 17 00:00:00 2001 From: Karthikeyan Mani Date: Tue, 27 Jun 2017 18:09:46 -0700 Subject: [PATCH 400/786] drivers: mfd: Add support for wcd9xxx core module Move Codec reset functions from codec module so that it can be can be compiled separately to support module option. CRs-fixed: 2068879 Change-Id: I2d83c3216d42150610478f2bf71e348aa7984f4c Signed-off-by: Karthikeyan Mani --- drivers/mfd/Kconfig | 19 +- drivers/mfd/Makefile | 15 +- drivers/mfd/msm-cdc-pinctrl.c | 12 +- drivers/mfd/wcd9xxx-core-init.c | 55 +++ drivers/mfd/wcd9xxx-core.c | 6 +- drivers/mfd/wcd9xxx-irq.c | 48 ++- drivers/mfd/wcd9xxx-regmap.h | 23 +- drivers/mfd/wcd9xxx-rst.c | 443 ++++++++++++++++++++++ drivers/mfd/wcd9xxx-slimslave.c | 14 +- include/linux/mfd/msm-cdc-pinctrl.h | 12 +- include/linux/mfd/wcd9335/irq.h | 55 +++ include/linux/mfd/wcd934x/irq.h | 56 +++ include/linux/mfd/wcd9xxx/core.h | 3 + include/linux/mfd/wcd9xxx/wcd9xxx-irq.h | 2 + include/linux/mfd/wcd9xxx/wcd9xxx-utils.h | 106 +----- 15 files changed, 714 insertions(+), 155 deletions(-) create mode 100644 drivers/mfd/wcd9xxx-core-init.c create mode 100644 drivers/mfd/wcd9xxx-rst.c create mode 100644 include/linux/mfd/wcd9335/irq.h create mode 100644 include/linux/mfd/wcd934x/irq.h diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 505f99d962aa..dd6dbdace556 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -8,7 +8,7 @@ menu "Multifunction device drivers" config MFD_CORE tristate select IRQ_DOMAIN - default n + default y config MFD_CS5535 tristate "AMD CS5535 and CS5536 southbridge core functions" @@ -1636,11 +1636,16 @@ config MSM_CDC_SUPPLY power supply enable or disable. This driver acts as interface between codec and regulator framework. -config WCD9XXX_CODEC_UTIL - tristate "WCD9XXX Codec Utils" - select MFD_CORE +config WCD9XXX_CODEC_CORE + tristate "WCD9XXX Codec Core" + select SLIMBUS + select SOUNDWIRE_WCD_CTRL + select MSM_CDC_SUPPLY + select MSM_CDC_PINCTRL + select REGMAP_ALLOW_WRITE_DEBUGFS + select PINCTRL_WCD help - WCD9XXX Util driver provides APIs for WCD drivers to reset, + WCD9XXX Core driver provides APIs for WCD drivers to reset, suspend/resume, regmap bus callback functions and read/write functions. This driver also hides the underlying bus related functionalities. @@ -1649,8 +1654,6 @@ config WCD9335_CODEC tristate "WCD9335 Codec" select SLIMBUS select SOUNDWIRE_WCD_CTRL - select MFD_CORE - select WCD9XXX_CODEC_UTIL select MSM_CDC_SUPPLY select MSM_CDC_PINCTRL select REGMAP_ALLOW_WRITE_DEBUGFS @@ -1665,8 +1668,6 @@ config WCD934X_CODEC tristate "WCD934X Codec" depends on SLIMBUS select SOUNDWIRE_WCD_CTRL - select MFD_CORE - select WCD9XXX_CODEC_UTIL select MSM_CDC_SUPPLY select MSM_CDC_PINCTRL select REGMAP_ALLOW_WRITE_DEBUGFS diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 8e54d3253650..d276fa9d6265 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -205,14 +205,13 @@ obj-$(CONFIG_MFD_HI655X_PMIC) += hi655x-pmic.o obj-$(CONFIG_MFD_DLN2) += dln2.o obj-$(CONFIG_MFD_RT5033) += rt5033.o obj-$(CONFIG_MFD_SKY81452) += sky81452.o -obj-$(CONFIG_MSM_CDC_PINCTRL) += msm-cdc-pinctrl.o -obj-$(CONFIG_MSM_CDC_SUPPLY) += msm-cdc-supply.o -obj-$(CONFIG_WCD9XXX_CODEC_UTIL) += wcd9xxx-utils.o -obj-$(CONFIG_WCD9335_CODEC) += wcd9xxx-core.o wcd9xxx-irq.o wcd9xxx-slimslave.o\ - wcd9335-regmap.o wcd9335-tables.o -obj-$(CONFIG_WCD934X_CODEC) += wcd9xxx-core.o wcd9xxx-irq.o wcd9xxx-slimslave.o\ - wcd934x-regmap.o wcd934x-tables.o - +wcd-core-objs := wcd9xxx-rst.o wcd9xxx-core-init.o \ + wcd9xxx-core.o wcd9xxx-irq.o \ + wcd9xxx-slimslave.o wcd9xxx-utils.o \ + wcd934x-regmap.o wcd934x-tables.o \ + wcd9335-regmap.o wcd9335-tables.o \ + msm-cdc-pinctrl.o msm-cdc-supply.o +obj-$(CONFIG_WCD934X_CODEC) += wcd-core.o intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o intel-soc-pmic-$(CONFIG_INTEL_PMC_IPC) += intel_soc_pmic_bxtwc.o obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o diff --git a/drivers/mfd/msm-cdc-pinctrl.c b/drivers/mfd/msm-cdc-pinctrl.c index 9622256a280d..859a75f93bb5 100644 --- a/drivers/mfd/msm-cdc-pinctrl.c +++ b/drivers/mfd/msm-cdc-pinctrl.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -239,7 +239,15 @@ static struct platform_driver msm_cdc_pinctrl_driver = { .probe = msm_cdc_pinctrl_probe, .remove = msm_cdc_pinctrl_remove, }; -module_platform_driver(msm_cdc_pinctrl_driver); +int msm_cdc_pinctrl_drv_init(void) +{ + return platform_driver_register(&msm_cdc_pinctrl_driver); +} + +void msm_cdc_pinctrl_drv_exit(void) +{ + platform_driver_unregister(&msm_cdc_pinctrl_driver); +} MODULE_DESCRIPTION("MSM CODEC pin control platform driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/wcd9xxx-core-init.c b/drivers/mfd/wcd9xxx-core-init.c new file mode 100644 index 000000000000..7f933990682d --- /dev/null +++ b/drivers/mfd/wcd9xxx-core-init.c @@ -0,0 +1,55 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include + +#define NUM_DRIVERS_REG_RET 3 + +static int __init wcd9xxx_core_init(void) +{ + int ret[NUM_DRIVERS_REG_RET] = {0}; + int i = 0; + + ret[0] = msm_cdc_pinctrl_drv_init(); + if (ret[0]) + pr_err("%s: Failed init pinctrl drv: %d\n", __func__, ret[0]); + + ret[1] = wcd9xxx_irq_drv_init(); + if (ret[1]) + pr_err("%s: Failed init irq drv: %d\n", __func__, ret[1]); + + ret[2] = wcd9xxx_init(); + if (ret[2]) + pr_err("%s: Failed wcd core drv: %d\n", __func__, ret[2]); + + for (i = 0; i < NUM_DRIVERS_REG_RET; i++) { + if (ret[i]) + return ret[i]; + } + + return 0; +} +module_init(wcd9xxx_core_init); + +static void __exit wcd9xxx_core_exit(void) +{ + wcd9xxx_exit(); + wcd9xxx_irq_drv_exit(); + msm_cdc_pinctrl_drv_exit(); +} +module_exit(wcd9xxx_core_exit); + +MODULE_DESCRIPTION("WCD9XXX CODEC core init driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c index d143536cbdaa..b373acb11005 100644 --- a/drivers/mfd/wcd9xxx-core.c +++ b/drivers/mfd/wcd9xxx-core.c @@ -1665,7 +1665,7 @@ static struct i2c_driver wcd9335_i2c_driver = { .remove = wcd9xxx_i2c_remove, }; -static int __init wcd9xxx_init(void) +int wcd9xxx_init(void) { int ret[NUM_WCD9XXX_REG_RET] = {0}; int i = 0; @@ -1699,9 +1699,8 @@ static int __init wcd9xxx_init(void) return 0; } -module_init(wcd9xxx_init); -static void __exit wcd9xxx_exit(void) +void wcd9xxx_exit(void) { wcd9xxx_set_intf_type(WCD9XXX_INTERFACE_TYPE_PROBING); @@ -1710,7 +1709,6 @@ static void __exit wcd9xxx_exit(void) i2c_del_driver(&wcd9335_i2c_driver); slim_driver_unregister(&wcd_slim_driver); } -module_exit(wcd9xxx_exit); MODULE_DESCRIPTION("Codec core driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/wcd9xxx-irq.c b/drivers/mfd/wcd9xxx-irq.c index 0502e39dd721..092f44632e1b 100644 --- a/drivers/mfd/wcd9xxx-irq.c +++ b/drivers/mfd/wcd9xxx-irq.c @@ -406,30 +406,63 @@ static irqreturn_t wcd9xxx_irq_thread(int irq, void *data) return IRQ_NONE; } +/** + * wcd9xxx_free_irq + * + * @wcd9xxx_res: pointer to core resource + * irq: irq number + * @data: data pointer + * + */ void wcd9xxx_free_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq, void *data) { free_irq(phyirq_to_virq(wcd9xxx_res, irq), data); } +EXPORT_SYMBOL(wcd9xxx_free_irq); +/** + * wcd9xxx_enable_irq + * + * @wcd9xxx_res: pointer to core resource + * irq: irq number + * + */ void wcd9xxx_enable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq) { if (wcd9xxx_res->irq) enable_irq(phyirq_to_virq(wcd9xxx_res, irq)); } +EXPORT_SYMBOL(wcd9xxx_enable_irq); +/** + * wcd9xxx_disable_irq + * + * @wcd9xxx_res: pointer to core resource + * irq: irq number + * + */ void wcd9xxx_disable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq) { if (wcd9xxx_res->irq) disable_irq_nosync(phyirq_to_virq(wcd9xxx_res, irq)); } +EXPORT_SYMBOL(wcd9xxx_disable_irq); +/** + * wcd9xxx_disable_irq_sync + * + * @wcd9xxx_res: pointer to core resource + * irq: irq number + * + */ void wcd9xxx_disable_irq_sync( struct wcd9xxx_core_resource *wcd9xxx_res, int irq) { if (wcd9xxx_res->irq) disable_irq(phyirq_to_virq(wcd9xxx_res, irq)); } +EXPORT_SYMBOL(wcd9xxx_disable_irq_sync); static int wcd9xxx_irq_setup_downstream_irq( struct wcd9xxx_core_resource *wcd9xxx_res) @@ -470,6 +503,13 @@ static int wcd9xxx_irq_setup_downstream_irq( return 0; } +/** + * wcd9xxx_irq_init + * + * @wcd9xxx_res: pointer to core resource + * + * Returns 0 on success, appropriate error code otherwise + */ int wcd9xxx_irq_init(struct wcd9xxx_core_resource *wcd9xxx_res) { int i, ret; @@ -568,6 +608,7 @@ int wcd9xxx_irq_init(struct wcd9xxx_core_resource *wcd9xxx_res) mutex_destroy(&wcd9xxx_res->nested_irq_lock); return ret; } +EXPORT_SYMBOL(wcd9xxx_irq_init); int wcd9xxx_request_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq, irq_handler_t handler, @@ -580,6 +621,7 @@ int wcd9xxx_request_irq(struct wcd9xxx_core_resource *wcd9xxx_res, return request_threaded_irq(virq, NULL, handler, IRQF_TRIGGER_RISING, name, data); } +EXPORT_SYMBOL(wcd9xxx_request_irq); void wcd9xxx_irq_exit(struct wcd9xxx_core_resource *wcd9xxx_res) { @@ -799,15 +841,13 @@ static struct platform_driver wcd9xxx_irq_driver = { }, }; -static int wcd9xxx_irq_drv_init(void) +int wcd9xxx_irq_drv_init(void) { return platform_driver_register(&wcd9xxx_irq_driver); } -subsys_initcall(wcd9xxx_irq_drv_init); -static void wcd9xxx_irq_drv_exit(void) +void wcd9xxx_irq_drv_exit(void) { platform_driver_unregister(&wcd9xxx_irq_driver); } -module_exit(wcd9xxx_irq_drv_exit); #endif /* CONFIG_OF */ diff --git a/drivers/mfd/wcd9xxx-regmap.h b/drivers/mfd/wcd9xxx-regmap.h index 6db8fc55acae..f44e8b1cf532 100644 --- a/drivers/mfd/wcd9xxx-regmap.h +++ b/drivers/mfd/wcd9xxx-regmap.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -19,42 +19,25 @@ typedef int (*regmap_patch_fptr)(struct regmap *, int); -#ifdef CONFIG_WCD934X_CODEC extern struct regmap_config wcd934x_regmap_config; extern int wcd934x_regmap_register_patch(struct regmap *regmap, int version); -#endif -#ifdef CONFIG_WCD9335_CODEC extern struct regmap_config wcd9335_regmap_config; extern int wcd9335_regmap_register_patch(struct regmap *regmap, int version); -#endif - -#ifdef CONFIG_WCD9330_CODEC -extern struct regmap_config wcd9330_regmap_config; -#endif static inline struct regmap_config *wcd9xxx_get_regmap_config(int type) { struct regmap_config *regmap_config; switch (type) { -#ifdef CONFIG_WCD934X_CODEC case WCD934X: regmap_config = &wcd934x_regmap_config; break; -#endif -#ifdef CONFIG_WCD9335_CODEC case WCD9335: regmap_config = &wcd9335_regmap_config; break; -#endif -#ifdef CONFIG_WCD9330_CODEC - case WCD9330: - regmap_config = &wcd9330_regmap_config; - break; -#endif default: regmap_config = NULL; break; @@ -68,16 +51,12 @@ static inline regmap_patch_fptr wcd9xxx_get_regmap_reg_patch(int type) regmap_patch_fptr apply_patch; switch (type) { -#ifdef CONFIG_WCD9335_CODEC case WCD9335: apply_patch = wcd9335_regmap_register_patch; break; -#endif -#ifdef CONFIG_WCD934X_CODEC case WCD934X: apply_patch = wcd934x_regmap_register_patch; break; -#endif default: apply_patch = NULL; break; diff --git a/drivers/mfd/wcd9xxx-rst.c b/drivers/mfd/wcd9xxx-rst.c new file mode 100644 index 000000000000..c8e0b348254a --- /dev/null +++ b/drivers/mfd/wcd9xxx-rst.c @@ -0,0 +1,443 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* wcd9335 interrupt table */ +static const struct intr_data wcd9335_intr_table[] = { + {WCD9XXX_IRQ_SLIMBUS, false}, + {WCD9335_IRQ_MBHC_SW_DET, true}, + {WCD9335_IRQ_MBHC_BUTTON_PRESS_DET, true}, + {WCD9335_IRQ_MBHC_BUTTON_RELEASE_DET, true}, + {WCD9335_IRQ_MBHC_ELECT_INS_REM_DET, true}, + {WCD9335_IRQ_MBHC_ELECT_INS_REM_LEG_DET, true}, + {WCD9335_IRQ_FLL_LOCK_LOSS, false}, + {WCD9335_IRQ_HPH_PA_CNPL_COMPLETE, false}, + {WCD9335_IRQ_HPH_PA_CNPR_COMPLETE, false}, + {WCD9335_IRQ_EAR_PA_CNP_COMPLETE, false}, + {WCD9335_IRQ_LINE_PA1_CNP_COMPLETE, false}, + {WCD9335_IRQ_LINE_PA2_CNP_COMPLETE, false}, + {WCD9335_IRQ_LINE_PA3_CNP_COMPLETE, false}, + {WCD9335_IRQ_LINE_PA4_CNP_COMPLETE, false}, + {WCD9335_IRQ_HPH_PA_OCPL_FAULT, false}, + {WCD9335_IRQ_HPH_PA_OCPR_FAULT, false}, + {WCD9335_IRQ_EAR_PA_OCP_FAULT, false}, + {WCD9335_IRQ_SOUNDWIRE, false}, + {WCD9335_IRQ_VDD_DIG_RAMP_COMPLETE, false}, + {WCD9335_IRQ_RCO_ERROR, false}, + {WCD9335_IRQ_SVA_ERROR, false}, + {WCD9335_IRQ_MAD_AUDIO, false}, + {WCD9335_IRQ_MAD_BEACON, false}, + {WCD9335_IRQ_SVA_OUTBOX1, true}, + {WCD9335_IRQ_SVA_OUTBOX2, true}, + {WCD9335_IRQ_MAD_ULTRASOUND, false}, + {WCD9335_IRQ_VBAT_ATTACK, false}, + {WCD9335_IRQ_VBAT_RESTORE, false}, +}; + +static const struct intr_data wcd934x_intr_table[] = { + {WCD9XXX_IRQ_SLIMBUS, false}, + {WCD934X_IRQ_MBHC_SW_DET, true}, + {WCD934X_IRQ_MBHC_BUTTON_PRESS_DET, true}, + {WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET, true}, + {WCD934X_IRQ_MBHC_ELECT_INS_REM_DET, true}, + {WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET, true}, + {WCD934X_IRQ_MISC, false}, + {WCD934X_IRQ_HPH_PA_CNPL_COMPLETE, false}, + {WCD934X_IRQ_HPH_PA_CNPR_COMPLETE, false}, + {WCD934X_IRQ_EAR_PA_CNP_COMPLETE, false}, + {WCD934X_IRQ_LINE_PA1_CNP_COMPLETE, false}, + {WCD934X_IRQ_LINE_PA2_CNP_COMPLETE, false}, + {WCD934X_IRQ_SLNQ_ANALOG_ERROR, false}, + {WCD934X_IRQ_RESERVED_3, false}, + {WCD934X_IRQ_HPH_PA_OCPL_FAULT, false}, + {WCD934X_IRQ_HPH_PA_OCPR_FAULT, false}, + {WCD934X_IRQ_EAR_PA_OCP_FAULT, false}, + {WCD934X_IRQ_SOUNDWIRE, false}, + {WCD934X_IRQ_VDD_DIG_RAMP_COMPLETE, false}, + {WCD934X_IRQ_RCO_ERROR, false}, + {WCD934X_IRQ_CPE_ERROR, false}, + {WCD934X_IRQ_MAD_AUDIO, false}, + {WCD934X_IRQ_MAD_BEACON, false}, + {WCD934X_IRQ_CPE1_INTR, true}, + {WCD934X_IRQ_RESERVED_4, false}, + {WCD934X_IRQ_MAD_ULTRASOUND, false}, + {WCD934X_IRQ_VBAT_ATTACK, false}, + {WCD934X_IRQ_VBAT_RESTORE, false}, +}; + +/* + * wcd9335_bring_down: Bringdown WCD Codec + * + * @wcd9xxx: Pointer to wcd9xxx structure + * + * Returns 0 for success or negative error code for failure + */ +static int wcd9335_bring_down(struct wcd9xxx *wcd9xxx) +{ + if (!wcd9xxx || !wcd9xxx->regmap) + return -EINVAL; + + regmap_write(wcd9xxx->regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, + 0x04); + + return 0; +} + +/* + * wcd9335_bring_up: Bringup WCD Codec + * + * @wcd9xxx: Pointer to the wcd9xxx structure + * + * Returns 0 for success or negative error code for failure + */ +static int wcd9335_bring_up(struct wcd9xxx *wcd9xxx) +{ + int ret = 0; + int val, byte0; + struct regmap *wcd_regmap; + + if (!wcd9xxx) + return -EINVAL; + + if (!wcd9xxx->regmap) { + dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n", + __func__); + return -EINVAL; + } + wcd_regmap = wcd9xxx->regmap; + + regmap_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT0, &val); + regmap_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE0, &byte0); + + if ((val < 0) || (byte0 < 0)) { + dev_err(wcd9xxx->dev, "%s: tasha codec version detection fail!\n", + __func__); + return -EINVAL; + } + if ((val & 0x80) && (byte0 == 0x0)) { + dev_info(wcd9xxx->dev, "%s: wcd9335 codec version is v1.1\n", + __func__); + regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x01); + regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_2, 0xFC); + regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_4, 0x21); + regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, + 0x5); + regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, + 0x7); + regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, + 0x3); + regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x3); + } else if (byte0 == 0x1) { + dev_info(wcd9xxx->dev, "%s: wcd9335 codec version is v2.0\n", + __func__); + regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x01); + regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_TEST_2, 0x00); + regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_8, 0x6F); + regmap_write(wcd_regmap, WCD9335_BIAS_VBG_FINE_ADJ, 0x65); + regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, + 0x5); + regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, + 0x7); + regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, + 0x3); + regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x3); + } else if ((byte0 == 0) && (!(val & 0x80))) { + dev_info(wcd9xxx->dev, "%s: wcd9335 codec version is v1.0\n", + __func__); + regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x01); + regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_2, 0xFC); + regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_4, 0x21); + regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, + 0x3); + regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x3); + } else { + dev_err(wcd9xxx->dev, "%s: tasha codec version unknown\n", + __func__); + ret = -EINVAL; + } + + return ret; +} + +/* + * wcd9335_get_cdc_info: Get codec specific information + * + * @wcd9xxx: pointer to wcd9xxx structure + * @wcd_type: pointer to wcd9xxx_codec_type structure + * + * Returns 0 for success or negative error code for failure + */ +static int wcd9335_get_cdc_info(struct wcd9xxx *wcd9xxx, + struct wcd9xxx_codec_type *wcd_type) +{ + u16 id_minor, id_major; + struct regmap *wcd_regmap; + int rc, val, version = 0; + + if (!wcd9xxx || !wcd_type) + return -EINVAL; + + if (!wcd9xxx->regmap) { + dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n", + __func__); + return -EINVAL; + } + wcd_regmap = wcd9xxx->regmap; + + rc = regmap_bulk_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE0, + (u8 *)&id_minor, sizeof(u16)); + if (rc) + return -EINVAL; + + rc = regmap_bulk_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE2, + (u8 *)&id_major, sizeof(u16)); + if (rc) + return -EINVAL; + + dev_info(wcd9xxx->dev, "%s: wcd9xxx chip id major 0x%x, minor 0x%x\n", + __func__, id_major, id_minor); + + /* Version detection */ + if (id_major == TASHA_MAJOR) { + regmap_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT0, + &val); + version = ((u8)val & 0x80) >> 7; + } else if (id_major == TASHA2P0_MAJOR) + version = 2; + else + dev_err(wcd9xxx->dev, "%s: wcd9335 version unknown (major 0x%x, minor 0x%x)\n", + __func__, id_major, id_minor); + + /* Fill codec type info */ + wcd_type->id_major = id_major; + wcd_type->id_minor = id_minor; + wcd_type->num_irqs = WCD9335_NUM_IRQS; + wcd_type->version = version; + wcd_type->slim_slave_type = WCD9XXX_SLIM_SLAVE_ADDR_TYPE_1; + wcd_type->i2c_chip_status = 0x01; + wcd_type->intr_tbl = wcd9335_intr_table; + wcd_type->intr_tbl_size = ARRAY_SIZE(wcd9335_intr_table); + + wcd_type->intr_reg[WCD9XXX_INTR_STATUS_BASE] = + WCD9335_INTR_PIN1_STATUS0; + wcd_type->intr_reg[WCD9XXX_INTR_CLEAR_BASE] = + WCD9335_INTR_PIN1_CLEAR0; + wcd_type->intr_reg[WCD9XXX_INTR_MASK_BASE] = + WCD9335_INTR_PIN1_MASK0; + wcd_type->intr_reg[WCD9XXX_INTR_LEVEL_BASE] = + WCD9335_INTR_LEVEL0; + wcd_type->intr_reg[WCD9XXX_INTR_CLR_COMMIT] = + WCD9335_INTR_CLR_COMMIT; + + return rc; +} + +/* + * wcd934x_bring_down: Bringdown WCD Codec + * + * @wcd9xxx: Pointer to wcd9xxx structure + * + * Returns 0 for success or negative error code for failure + */ +static int wcd934x_bring_down(struct wcd9xxx *wcd9xxx) +{ + if (!wcd9xxx || !wcd9xxx->regmap) + return -EINVAL; + + regmap_write(wcd9xxx->regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, + 0x04); + + return 0; +} + +/* + * wcd934x_bring_up: Bringup WCD Codec + * + * @wcd9xxx: Pointer to the wcd9xxx structure + * + * Returns 0 for success or negative error code for failure + */ +static int wcd934x_bring_up(struct wcd9xxx *wcd9xxx) +{ + struct regmap *wcd_regmap; + + if (!wcd9xxx) + return -EINVAL; + + if (!wcd9xxx->regmap) { + dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n", + __func__); + return -EINVAL; + } + wcd_regmap = wcd9xxx->regmap; + + regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x01); + regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_A_STARTUP, 0x19); + regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_D_STARTUP, 0x15); + /* Add 1msec delay for VOUT to settle */ + usleep_range(1000, 1100); + regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x5); + regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x7); + regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x3); + regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x7); + regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3); + + return 0; +} + +/* + * wcd934x_get_cdc_info: Get codec specific information + * + * @wcd9xxx: pointer to wcd9xxx structure + * @wcd_type: pointer to wcd9xxx_codec_type structure + * + * Returns 0 for success or negative error code for failure + */ +static int wcd934x_get_cdc_info(struct wcd9xxx *wcd9xxx, + struct wcd9xxx_codec_type *wcd_type) +{ + u16 id_minor, id_major; + struct regmap *wcd_regmap; + int rc, version = -1; + + if (!wcd9xxx || !wcd_type) + return -EINVAL; + + if (!wcd9xxx->regmap) { + dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null\n", __func__); + return -EINVAL; + } + wcd_regmap = wcd9xxx->regmap; + + rc = regmap_bulk_read(wcd_regmap, WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0, + (u8 *)&id_minor, sizeof(u16)); + if (rc) + return -EINVAL; + + rc = regmap_bulk_read(wcd_regmap, WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE2, + (u8 *)&id_major, sizeof(u16)); + if (rc) + return -EINVAL; + + dev_info(wcd9xxx->dev, "%s: wcd9xxx chip id major 0x%x, minor 0x%x\n", + __func__, id_major, id_minor); + + if (id_major != TAVIL_MAJOR) + goto version_unknown; + + /* + * As fine version info cannot be retrieved before tavil probe. + * Assign coarse versions for possible future use before tavil probe. + */ + if (id_minor == cpu_to_le16(0)) + version = TAVIL_VERSION_1_0; + else if (id_minor == cpu_to_le16(0x01)) + version = TAVIL_VERSION_1_1; + +version_unknown: + if (version < 0) + dev_err(wcd9xxx->dev, "%s: wcd934x version unknown\n", + __func__); + + /* Fill codec type info */ + wcd_type->id_major = id_major; + wcd_type->id_minor = id_minor; + wcd_type->num_irqs = WCD934X_NUM_IRQS; + wcd_type->version = version; + wcd_type->slim_slave_type = WCD9XXX_SLIM_SLAVE_ADDR_TYPE_1; + wcd_type->i2c_chip_status = 0x01; + wcd_type->intr_tbl = wcd934x_intr_table; + wcd_type->intr_tbl_size = ARRAY_SIZE(wcd934x_intr_table); + + wcd_type->intr_reg[WCD9XXX_INTR_STATUS_BASE] = + WCD934X_INTR_PIN1_STATUS0; + wcd_type->intr_reg[WCD9XXX_INTR_CLEAR_BASE] = + WCD934X_INTR_PIN1_CLEAR0; + wcd_type->intr_reg[WCD9XXX_INTR_MASK_BASE] = + WCD934X_INTR_PIN1_MASK0; + wcd_type->intr_reg[WCD9XXX_INTR_LEVEL_BASE] = + WCD934X_INTR_LEVEL0; + wcd_type->intr_reg[WCD9XXX_INTR_CLR_COMMIT] = + WCD934X_INTR_CLR_COMMIT; + + return rc; +} + +codec_bringdown_fn wcd9xxx_bringdown_fn(int type) +{ + codec_bringdown_fn cdc_bdown_fn; + + switch (type) { + case WCD934X: + cdc_bdown_fn = wcd934x_bring_down; + break; + case WCD9335: + cdc_bdown_fn = wcd9335_bring_down; + break; + default: + cdc_bdown_fn = NULL; + break; + } + + return cdc_bdown_fn; +} + +codec_bringup_fn wcd9xxx_bringup_fn(int type) +{ + codec_bringup_fn cdc_bup_fn; + + switch (type) { + case WCD934X: + cdc_bup_fn = wcd934x_bring_up; + break; + case WCD9335: + cdc_bup_fn = wcd9335_bring_up; + break; + default: + cdc_bup_fn = NULL; + break; + } + + return cdc_bup_fn; +} + +codec_type_fn wcd9xxx_get_codec_info_fn(int type) +{ + codec_type_fn cdc_type_fn; + + switch (type) { + case WCD934X: + cdc_type_fn = wcd934x_get_cdc_info; + break; + case WCD9335: + cdc_type_fn = wcd9335_get_cdc_info; + break; + default: + cdc_type_fn = NULL; + break; + } + + return cdc_type_fn; +} + diff --git a/drivers/mfd/wcd9xxx-slimslave.c b/drivers/mfd/wcd9xxx-slimslave.c index 8bf1404b44be..a99ad5a2f9c8 100644 --- a/drivers/mfd/wcd9xxx-slimslave.c +++ b/drivers/mfd/wcd9xxx-slimslave.c @@ -47,7 +47,18 @@ static int wcd9xxx_configure_ports(struct wcd9xxx *wcd9xxx) return 0; } - +/** + * wcd9xxx_init_slimslave + * + * @wcd9xxx: pointer to wcd9xxx struct + * @wcd9xxx_pgd_la: pgd_la value + * @tx_num: tx number + * @rx_num: rx number + * @tx_slot: pointer to tx slot + * @rx_slot: pointer to rx slot + * + * Returns 0 on success, appropriate error code otherwise + */ int wcd9xxx_init_slimslave(struct wcd9xxx *wcd9xxx, u8 wcd9xxx_pgd_la, unsigned int tx_num, unsigned int *tx_slot, unsigned int rx_num, unsigned int *rx_slot) @@ -117,6 +128,7 @@ int wcd9xxx_init_slimslave(struct wcd9xxx *wcd9xxx, u8 wcd9xxx_pgd_la, err: return ret; } +EXPORT_SYMBOL(wcd9xxx_init_slimslave); int wcd9xxx_deinit_slimslave(struct wcd9xxx *wcd9xxx) { diff --git a/include/linux/mfd/msm-cdc-pinctrl.h b/include/linux/mfd/msm-cdc-pinctrl.h index 14b18fe46cc4..7eabefb80e19 100644 --- a/include/linux/mfd/msm-cdc-pinctrl.h +++ b/include/linux/mfd/msm-cdc-pinctrl.h @@ -16,11 +16,13 @@ #include #include -#ifdef CONFIG_MSM_CDC_PINCTRL +#if IS_ENABLED(CONFIG_MSM_CDC_PINCTRL) extern int msm_cdc_pinctrl_select_sleep_state(struct device_node *np); extern int msm_cdc_pinctrl_select_active_state(struct device_node *np); extern bool msm_cdc_pinctrl_get_state(struct device_node *np); extern int msm_cdc_get_gpio_state(struct device_node *np); +int msm_cdc_pinctrl_drv_init(void); +void msm_cdc_pinctrl_drv_exit(void); #else int msm_cdc_pinctrl_select_sleep_state(struct device_node *np) @@ -35,7 +37,13 @@ int msm_cdc_get_gpio_state(struct device_node *np) { return 0; } -# +int msm_cdc_pinctrl_drv_init(void) +{ + return 0; +} +void msm_cdc_pinctrl_drv_exit(void) +{ +} #endif #endif diff --git a/include/linux/mfd/wcd9335/irq.h b/include/linux/mfd/wcd9335/irq.h new file mode 100644 index 000000000000..c666d3144359 --- /dev/null +++ b/include/linux/mfd/wcd9335/irq.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __WCD9335_IRQ_H_ +#define __WCD9335_IRQ_H_ + +enum { + /* INTR_REG 0 */ + WCD9335_IRQ_FLL_LOCK_LOSS = 1, + WCD9335_IRQ_HPH_PA_OCPL_FAULT, + WCD9335_IRQ_HPH_PA_OCPR_FAULT, + WCD9335_IRQ_EAR_PA_OCP_FAULT, + WCD9335_IRQ_HPH_PA_CNPL_COMPLETE, + WCD9335_IRQ_HPH_PA_CNPR_COMPLETE, + WCD9335_IRQ_EAR_PA_CNP_COMPLETE, + /* INTR_REG 1 */ + WCD9335_IRQ_MBHC_SW_DET, + WCD9335_IRQ_MBHC_ELECT_INS_REM_DET, + WCD9335_IRQ_MBHC_BUTTON_PRESS_DET, + WCD9335_IRQ_MBHC_BUTTON_RELEASE_DET, + WCD9335_IRQ_MBHC_ELECT_INS_REM_LEG_DET, + WCD9335_IRQ_RESERVED_0, + WCD9335_IRQ_RESERVED_1, + WCD9335_IRQ_RESERVED_2, + /* INTR_REG 2 */ + WCD9335_IRQ_LINE_PA1_CNP_COMPLETE, + WCD9335_IRQ_LINE_PA2_CNP_COMPLETE, + WCD9335_IRQ_LINE_PA3_CNP_COMPLETE, + WCD9335_IRQ_LINE_PA4_CNP_COMPLETE, + WCD9335_IRQ_SOUNDWIRE, + WCD9335_IRQ_VDD_DIG_RAMP_COMPLETE, + WCD9335_IRQ_RCO_ERROR, + WCD9335_IRQ_SVA_ERROR, + /* INTR_REG 3 */ + WCD9335_IRQ_MAD_AUDIO, + WCD9335_IRQ_MAD_BEACON, + WCD9335_IRQ_MAD_ULTRASOUND, + WCD9335_IRQ_VBAT_ATTACK, + WCD9335_IRQ_VBAT_RESTORE, + WCD9335_IRQ_SVA_OUTBOX1, + WCD9335_IRQ_SVA_OUTBOX2, + WCD9335_NUM_IRQS, +}; + +#endif diff --git a/include/linux/mfd/wcd934x/irq.h b/include/linux/mfd/wcd934x/irq.h new file mode 100644 index 000000000000..1a18be376eb1 --- /dev/null +++ b/include/linux/mfd/wcd934x/irq.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __WCD934X_IRQ_H_ +#define __WCD934X_IRQ_H_ + +enum { + /* INTR_REG 0 */ + WCD934X_IRQ_MISC = 1, + WCD934X_IRQ_HPH_PA_OCPL_FAULT, + WCD934X_IRQ_HPH_PA_OCPR_FAULT, + WCD934X_IRQ_EAR_PA_OCP_FAULT, + WCD934X_IRQ_HPH_PA_CNPL_COMPLETE, + WCD934X_IRQ_HPH_PA_CNPR_COMPLETE, + WCD934X_IRQ_EAR_PA_CNP_COMPLETE, + /* INTR_REG 1 */ + WCD934X_IRQ_MBHC_SW_DET, + WCD934X_IRQ_MBHC_ELECT_INS_REM_DET, + WCD934X_IRQ_MBHC_BUTTON_PRESS_DET, + WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET, + WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET, + WCD934X_IRQ_RESERVED_0, + WCD934X_IRQ_RESERVED_1, + WCD934X_IRQ_RESERVED_2, + /* INTR_REG 2 */ + WCD934X_IRQ_LINE_PA1_CNP_COMPLETE, + WCD934X_IRQ_LINE_PA2_CNP_COMPLETE, + WCD934X_IRQ_SLNQ_ANALOG_ERROR, + WCD934X_IRQ_RESERVED_3, + WCD934X_IRQ_SOUNDWIRE, + WCD934X_IRQ_VDD_DIG_RAMP_COMPLETE, + WCD934X_IRQ_RCO_ERROR, + WCD934X_IRQ_CPE_ERROR, + /* INTR_REG 3 */ + WCD934X_IRQ_MAD_AUDIO, + WCD934X_IRQ_MAD_BEACON, + WCD934X_IRQ_MAD_ULTRASOUND, + WCD934X_IRQ_VBAT_ATTACK, + WCD934X_IRQ_VBAT_RESTORE, + WCD934X_IRQ_CPE1_INTR, + WCD934X_IRQ_RESERVED_4, + WCD934X_IRQ_SLNQ_DIGITAL, + WCD934X_NUM_IRQS, +}; + +#endif diff --git a/include/linux/mfd/wcd9xxx/core.h b/include/linux/mfd/wcd9xxx/core.h index c6c8d244e2c7..b4c1be40ff31 100644 --- a/include/linux/mfd/wcd9xxx/core.h +++ b/include/linux/mfd/wcd9xxx/core.h @@ -434,4 +434,7 @@ static inline int __init wcd9xxx_irq_of_init(struct device_node *node, { return 0; } + +int wcd9xxx_init(void); +void wcd9xxx_exit(void); #endif diff --git a/include/linux/mfd/wcd9xxx/wcd9xxx-irq.h b/include/linux/mfd/wcd9xxx/wcd9xxx-irq.h index 1e428a1e8b26..99ce60383cc2 100644 --- a/include/linux/mfd/wcd9xxx/wcd9xxx-irq.h +++ b/include/linux/mfd/wcd9xxx/wcd9xxx-irq.h @@ -32,4 +32,6 @@ void wcd9xxx_disable_irq_sync(struct wcd9xxx_core_resource *wcd9xxx_res, int wcd9xxx_irq_init(struct wcd9xxx_core_resource *wcd9xxx_res); void wcd9xxx_irq_exit(struct wcd9xxx_core_resource *wcd9xxx_res); +int wcd9xxx_irq_drv_init(void); +void wcd9xxx_irq_drv_exit(void); #endif diff --git a/include/linux/mfd/wcd9xxx/wcd9xxx-utils.h b/include/linux/mfd/wcd9xxx/wcd9xxx-utils.h index d0ac0ac17587..7a13dd19e8c0 100644 --- a/include/linux/mfd/wcd9xxx/wcd9xxx-utils.h +++ b/include/linux/mfd/wcd9xxx/wcd9xxx-utils.h @@ -33,108 +33,8 @@ typedef int (*codec_bringdown_fn)(struct wcd9xxx *); typedef int (*codec_type_fn)(struct wcd9xxx *, struct wcd9xxx_codec_type *); -#ifdef CONFIG_WCD934X_CODEC -extern int wcd934x_bringup(struct wcd9xxx *wcd9xxx); -extern int wcd934x_bringdown(struct wcd9xxx *wcd9xxx); -extern int wcd934x_get_codec_info(struct wcd9xxx *wcd9xxx, - struct wcd9xxx_codec_type *wcd_type); -#endif - -#ifdef CONFIG_WCD9335_CODEC -extern int wcd9335_bringup(struct wcd9xxx *wcd9xxx); -extern int wcd9335_bringdown(struct wcd9xxx *wcd9xxx); -extern int wcd9335_get_codec_info(struct wcd9xxx *wcd9xxx, - struct wcd9xxx_codec_type *wcd_type); -#endif - -#ifdef CONFIG_WCD9330_CODEC -extern int wcd9330_bringup(struct wcd9xxx *wcd9xxx); -extern int wcd9330_bringdown(struct wcd9xxx *wcd9xxx); -extern int wcd9330_get_codec_info(struct wcd9xxx *wcd9xxx, - struct wcd9xxx_codec_type *wcd_type); -#endif - -static inline codec_bringdown_fn wcd9xxx_bringdown_fn(int type) -{ - codec_bringdown_fn cdc_bdown_fn; - - switch (type) { -#ifdef CONFIG_WCD934X_CODEC - case WCD934X: - cdc_bdown_fn = wcd934x_bringdown; - break; -#endif -#ifdef CONFIG_WCD9335_CODEC - case WCD9335: - cdc_bdown_fn = wcd9335_bringdown; - break; -#endif -#ifdef CONFIG_WCD9330_CODEC - case WCD9330: - cdc_bdown_fn = wcd9330_bringdown; - break; -#endif - default: - cdc_bdown_fn = NULL; - break; - } - - return cdc_bdown_fn; -} - -static inline codec_bringup_fn wcd9xxx_bringup_fn(int type) -{ - codec_bringup_fn cdc_bup_fn; - - switch (type) { -#ifdef CONFIG_WCD934X_CODEC - case WCD934X: - cdc_bup_fn = wcd934x_bringup; - break; -#endif -#ifdef CONFIG_WCD9335_CODEC - case WCD9335: - cdc_bup_fn = wcd9335_bringup; - break; -#endif -#ifdef CONFIG_WCD9330_CODEC - case WCD9330: - cdc_bup_fn = wcd9330_bringup; - break; -#endif - default: - cdc_bup_fn = NULL; - break; - } - - return cdc_bup_fn; -} - -static inline codec_type_fn wcd9xxx_get_codec_info_fn(int type) -{ - codec_type_fn cdc_type_fn; - - switch (type) { -#ifdef CONFIG_WCD934X_CODEC - case WCD934X: - cdc_type_fn = wcd934x_get_codec_info; - break; -#endif -#ifdef CONFIG_WCD9335_CODEC - case WCD9335: - cdc_type_fn = wcd9335_get_codec_info; - break; -#endif -#ifdef CONFIG_WCD9330_CODEC - case WCD9330: - cdc_type_fn = wcd9330_get_codec_info; - break; -#endif - default: - cdc_type_fn = NULL; - break; - } +codec_bringdown_fn wcd9xxx_bringdown_fn(int type); +codec_bringup_fn wcd9xxx_bringup_fn(int type); +codec_type_fn wcd9xxx_get_codec_info_fn(int type); - return cdc_type_fn; -} #endif -- GitLab From 0e051f17bd048d96eb533f1b8f436f3b27c6109b Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Wed, 3 May 2017 11:13:46 +0800 Subject: [PATCH 401/786] clk: sunxi-ng: a31: Correct lcd1-ch1 clock register offset commit 38b8f823864707eb1cf331d2247608c419ed388c upstream. The register offset for the lcd1-ch1 clock was incorrectly pointing to the lcd0-ch1 clock. This resulted in the lcd0-ch1 clock being disabled when the clk core disables unused clocks. This then stops the simplefb HDMI output path. Reported-by: Bob Ham Fixes: c6e6c96d8fa6 ("clk: sunxi-ng: Add A31/A31s clocks") Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard Signed-off-by: Greg Kroah-Hartman --- drivers/clk/sunxi-ng/ccu-sun6i-a31.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 8ca07fe8d3f3..0cca3601d99e 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c @@ -556,7 +556,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(lcd0_ch1_clk, "lcd0-ch1", lcd_ch1_parents, 0x12c, 0, 4, 24, 3, BIT(31), CLK_SET_RATE_PARENT); static SUNXI_CCU_M_WITH_MUX_GATE(lcd1_ch1_clk, "lcd1-ch1", lcd_ch1_parents, - 0x12c, 0, 4, 24, 3, BIT(31), + 0x130, 0, 4, 24, 3, BIT(31), CLK_SET_RATE_PARENT); static const char * const csi_sclk_parents[] = { "pll-video0", "pll-video1", -- GitLab From e5c49c1703ae906a8f41f58aabd5258a06fc9e6d Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Thu, 18 May 2017 17:28:47 +0200 Subject: [PATCH 402/786] xen/blkback: fix disconnect while I/Os in flight MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 46464411307746e6297a034a9983a22c9dfc5a0c upstream. Today disconnecting xen-blkback is broken in case there are still I/Os in flight: xen_blkif_disconnect() will bail out early without releasing all resources in the hope it will be called again when the last request has terminated. This, however, won't happen as xen_blkif_free() won't be called on termination of the last running request: xen_blkif_put() won't decrement the blkif refcnt to 0 as xen_blkif_disconnect() didn't finish before thus some xen_blkif_put() calls in xen_blkif_disconnect() didn't happen. To solve this deadlock xen_blkif_disconnect() and xen_blkif_alloc_rings() shouldn't use xen_blkif_put() and xen_blkif_get() but use some other way to do their accounting of resources. This at once fixes another error in xen_blkif_disconnect(): when it returned early with -EBUSY for another ring than 0 it would call xen_blkif_put() again for already handled rings on a subsequent call. This will lead to inconsistencies in the refcnt handling. Signed-off-by: Juergen Gross Tested-by: Steven Haigh Acked-by: Roger Pau Monné Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Greg Kroah-Hartman --- drivers/block/xen-blkback/common.h | 1 + drivers/block/xen-blkback/xenbus.c | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index dea61f6ab8cb..638597b17a38 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -281,6 +281,7 @@ struct xen_blkif_ring { wait_queue_head_t wq; atomic_t inflight; + bool active; /* One thread per blkif ring. */ struct task_struct *xenblkd; unsigned int waiting_reqs; diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 3cc6d1d86f1e..9b69fe410c08 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -159,7 +159,7 @@ static int xen_blkif_alloc_rings(struct xen_blkif *blkif) init_waitqueue_head(&ring->shutdown_wq); ring->blkif = blkif; ring->st_print = jiffies; - xen_blkif_get(blkif); + ring->active = true; } return 0; @@ -249,6 +249,9 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) struct xen_blkif_ring *ring = &blkif->rings[r]; unsigned int i = 0; + if (!ring->active) + continue; + if (ring->xenblkd) { kthread_stop(ring->xenblkd); wake_up(&ring->shutdown_wq); @@ -296,7 +299,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) BUG_ON(ring->free_pages_num != 0); BUG_ON(ring->persistent_gnt_c != 0); WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages)); - xen_blkif_put(blkif); + ring->active = false; } blkif->nr_ring_pages = 0; /* -- GitLab From 4ae2cb91a6365a6472fad7f04785cc0420ea5ada Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 13 Jun 2017 16:28:27 -0400 Subject: [PATCH 403/786] xen-blkback: don't leak stack data via response ring commit 089bc0143f489bd3a4578bdff5f4ca68fb26f341 upstream. Rather than constructing a local structure instance on the stack, fill the fields directly on the shared ring, just like other backends do. Build on the fact that all response structure flavors are actually identical (the old code did make this assumption too). This is XSA-216. Signed-off-by: Jan Beulich Reviewed-by: Konrad Rzeszutek Wilk Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Greg Kroah-Hartman --- drivers/block/xen-blkback/blkback.c | 23 ++++++++++++----------- drivers/block/xen-blkback/common.h | 25 +++++-------------------- 2 files changed, 17 insertions(+), 31 deletions(-) diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 4a80ee752597..c42202d63567 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -1436,34 +1436,35 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring, static void make_response(struct xen_blkif_ring *ring, u64 id, unsigned short op, int st) { - struct blkif_response resp; + struct blkif_response *resp; unsigned long flags; union blkif_back_rings *blk_rings; int notify; - resp.id = id; - resp.operation = op; - resp.status = st; - spin_lock_irqsave(&ring->blk_ring_lock, flags); blk_rings = &ring->blk_rings; /* Place on the response ring for the relevant domain. */ switch (ring->blkif->blk_protocol) { case BLKIF_PROTOCOL_NATIVE: - memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), - &resp, sizeof(resp)); + resp = RING_GET_RESPONSE(&blk_rings->native, + blk_rings->native.rsp_prod_pvt); break; case BLKIF_PROTOCOL_X86_32: - memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), - &resp, sizeof(resp)); + resp = RING_GET_RESPONSE(&blk_rings->x86_32, + blk_rings->x86_32.rsp_prod_pvt); break; case BLKIF_PROTOCOL_X86_64: - memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), - &resp, sizeof(resp)); + resp = RING_GET_RESPONSE(&blk_rings->x86_64, + blk_rings->x86_64.rsp_prod_pvt); break; default: BUG(); } + + resp->id = id; + resp->operation = op; + resp->status = st; + blk_rings->common.rsp_prod_pvt++; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); spin_unlock_irqrestore(&ring->blk_ring_lock, flags); diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 638597b17a38..ecb35fe8ca8d 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -75,9 +75,8 @@ extern unsigned int xenblk_max_queues; struct blkif_common_request { char dummy; }; -struct blkif_common_response { - char dummy; -}; + +/* i386 protocol version */ struct blkif_x86_32_request_rw { uint8_t nr_segments; /* number of segments */ @@ -129,14 +128,6 @@ struct blkif_x86_32_request { } u; } __attribute__((__packed__)); -/* i386 protocol version */ -#pragma pack(push, 4) -struct blkif_x86_32_response { - uint64_t id; /* copied from request */ - uint8_t operation; /* copied from request */ - int16_t status; /* BLKIF_RSP_??? */ -}; -#pragma pack(pop) /* x86_64 protocol version */ struct blkif_x86_64_request_rw { @@ -193,18 +184,12 @@ struct blkif_x86_64_request { } u; } __attribute__((__packed__)); -struct blkif_x86_64_response { - uint64_t __attribute__((__aligned__(8))) id; - uint8_t operation; /* copied from request */ - int16_t status; /* BLKIF_RSP_??? */ -}; - DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, - struct blkif_common_response); + struct blkif_response); DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, - struct blkif_x86_32_response); + struct blkif_response __packed); DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, - struct blkif_x86_64_response); + struct blkif_response); union blkif_back_rings { struct blkif_back_ring native; -- GitLab From 8c9c55a0f5764337ff4429ae6e15c3b6a45d1124 Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Sun, 11 Jun 2017 16:08:21 +0900 Subject: [PATCH 404/786] ALSA: firewire-lib: Fix stall of process context at packet error commit 4a9bfafc64f44ef83de4e00ca1b57352af6cd8c2 upstream. At Linux v3.5, packet processing can be done in process context of ALSA PCM application as well as software IRQ context for OHCI 1394. Below is an example of the callgraph (some calls are omitted). ioctl(2) with e.g. HWSYNC (sound/core/pcm_native.c) ->snd_pcm_common_ioctl1() ->snd_pcm_hwsync() ->snd_pcm_stream_lock_irq (sound/core/pcm_lib.c) ->snd_pcm_update_hw_ptr() ->snd_pcm_udpate_hw_ptr0() ->struct snd_pcm_ops.pointer() (sound/firewire/*) = Each handler on drivers in ALSA firewire stack (sound/firewire/amdtp-stream.c) ->amdtp_stream_pcm_pointer() (drivers/firewire/core-iso.c) ->fw_iso_context_flush_completions() ->struct fw_card_driver.flush_iso_completion() (drivers/firewire/ohci.c) = flush_iso_completions() ->struct fw_iso_context.callback.sc (sound/firewire/amdtp-stream.c) = in_stream_callback() or out_stream_callback() ->... ->snd_pcm_stream_unlock_irq When packet queueing error occurs or detecting invalid packets in 'in_stream_callback()' or 'out_stream_callback()', 'snd_pcm_stop_xrun()' is called on local CPU with disabled IRQ. (sound/firewire/amdtp-stream.c) in_stream_callback() or out_stream_callback() ->amdtp_stream_pcm_abort() ->snd_pcm_stop_xrun() ->snd_pcm_stream_lock_irqsave() ->snd_pcm_stop() ->snd_pcm_stream_unlock_irqrestore() The process is stalled on the CPU due to attempt to acquire recursive lock. [ 562.630853] INFO: rcu_sched detected stalls on CPUs/tasks: [ 562.630861] 2-...: (1 GPs behind) idle=37d/140000000000000/0 softirq=38323/38323 fqs=7140 [ 562.630862] (detected by 3, t=15002 jiffies, g=21036, c=21035, q=5933) [ 562.630866] Task dump for CPU 2: [ 562.630867] alsa-source-OXF R running task 0 6619 1 0x00000008 [ 562.630870] Call Trace: [ 562.630876] ? vt_console_print+0x79/0x3e0 [ 562.630880] ? msg_print_text+0x9d/0x100 [ 562.630883] ? up+0x32/0x50 [ 562.630885] ? irq_work_queue+0x8d/0xa0 [ 562.630886] ? console_unlock+0x2b6/0x4b0 [ 562.630888] ? vprintk_emit+0x312/0x4a0 [ 562.630892] ? dev_vprintk_emit+0xbf/0x230 [ 562.630895] ? do_sys_poll+0x37a/0x550 [ 562.630897] ? dev_printk_emit+0x4e/0x70 [ 562.630900] ? __dev_printk+0x3c/0x80 [ 562.630903] ? _raw_spin_lock+0x20/0x30 [ 562.630909] ? snd_pcm_stream_lock+0x31/0x50 [snd_pcm] [ 562.630914] ? _snd_pcm_stream_lock_irqsave+0x2e/0x40 [snd_pcm] [ 562.630918] ? snd_pcm_stop_xrun+0x16/0x70 [snd_pcm] [ 562.630922] ? in_stream_callback+0x3e6/0x450 [snd_firewire_lib] [ 562.630925] ? handle_ir_packet_per_buffer+0x8e/0x1a0 [firewire_ohci] [ 562.630928] ? ohci_flush_iso_completions+0xa3/0x130 [firewire_ohci] [ 562.630932] ? fw_iso_context_flush_completions+0x15/0x20 [firewire_core] [ 562.630935] ? amdtp_stream_pcm_pointer+0x2d/0x40 [snd_firewire_lib] [ 562.630938] ? pcm_capture_pointer+0x19/0x20 [snd_oxfw] [ 562.630943] ? snd_pcm_update_hw_ptr0+0x47/0x3d0 [snd_pcm] [ 562.630945] ? poll_select_copy_remaining+0x150/0x150 [ 562.630947] ? poll_select_copy_remaining+0x150/0x150 [ 562.630952] ? snd_pcm_update_hw_ptr+0x10/0x20 [snd_pcm] [ 562.630956] ? snd_pcm_hwsync+0x45/0xb0 [snd_pcm] [ 562.630960] ? snd_pcm_common_ioctl1+0x1ff/0xc90 [snd_pcm] [ 562.630962] ? futex_wake+0x90/0x170 [ 562.630966] ? snd_pcm_capture_ioctl1+0x136/0x260 [snd_pcm] [ 562.630970] ? snd_pcm_capture_ioctl+0x27/0x40 [snd_pcm] [ 562.630972] ? do_vfs_ioctl+0xa3/0x610 [ 562.630974] ? vfs_read+0x11b/0x130 [ 562.630976] ? SyS_ioctl+0x79/0x90 [ 562.630978] ? entry_SYSCALL_64_fastpath+0x1e/0xad This commit fixes the above bug. This assumes two cases: 1. Any error is detected in software IRQ context of OHCI 1394 context. In this case, PCM substream should be aborted in packet handler. On the other hand, it should not be done in any process context. TO distinguish these two context, use 'in_interrupt()' macro. 2. Any error is detect in process context of ALSA PCM application. In this case, PCM substream should not be aborted in packet handler because PCM substream lock is acquired. The task to abort PCM substream should be done in ALSA PCM core. For this purpose, SNDRV_PCM_POS_XRUN is returned at 'struct snd_pcm_ops.pointer()'. Suggested-by: Clemens Ladisch Fixes: e9148dddc3c7("ALSA: firewire-lib: flush completed packets when reading PCM position") Signed-off-by: Takashi Sakamoto Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/firewire/amdtp-stream.c | 8 ++++++-- sound/firewire/amdtp-stream.h | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c index 00060c4a9deb..9741757436be 100644 --- a/sound/firewire/amdtp-stream.c +++ b/sound/firewire/amdtp-stream.c @@ -606,7 +606,9 @@ static void out_stream_callback(struct fw_iso_context *context, u32 tstamp, cycle = increment_cycle_count(cycle, 1); if (handle_out_packet(s, cycle, i) < 0) { s->packet_index = -1; - amdtp_stream_pcm_abort(s); + if (in_interrupt()) + amdtp_stream_pcm_abort(s); + WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); return; } } @@ -658,7 +660,9 @@ static void in_stream_callback(struct fw_iso_context *context, u32 tstamp, /* Queueing error or detecting invalid payload. */ if (i < packets) { s->packet_index = -1; - amdtp_stream_pcm_abort(s); + if (in_interrupt()) + amdtp_stream_pcm_abort(s); + WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); return; } diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h index c1bc7fad056e..f7c054bc9d92 100644 --- a/sound/firewire/amdtp-stream.h +++ b/sound/firewire/amdtp-stream.h @@ -124,7 +124,7 @@ struct amdtp_stream { /* For a PCM substream processing. */ struct snd_pcm_substream *pcm; struct tasklet_struct period_tasklet; - unsigned int pcm_buffer_pointer; + snd_pcm_uframes_t pcm_buffer_pointer; unsigned int pcm_period_pointer; /* To wait for first packet. */ -- GitLab From 552a14a572a21cac91d190b38ac1a8aba9730f35 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 14 Jun 2017 16:20:32 +0200 Subject: [PATCH 405/786] ALSA: pcm: Don't treat NULL chmap as a fatal error commit 2deaeaf102d692cb6f764123b1df7aa118a8e97c upstream. The standard PCM chmap helper callbacks treat the NULL info->chmap as a fatal error and spews the kernel warning with stack trace when CONFIG_SND_DEBUG is on. This was OK, originally it was supposed to be always static and non-NULL. But, as the recent addition of Intel LPE audio driver shows, the chmap content may vary dynamically, and it can be even NULL when disconnected. The user still sees the kernel warning unnecessarily. For clearing such a confusion, this patch simply removes the snd_BUG_ON() in each place, just returns an error without warning. Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/pcm_lib.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index bb1261591a1f..7f0598b32f13 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -2491,7 +2491,7 @@ static int pcm_chmap_ctl_get(struct snd_kcontrol *kcontrol, struct snd_pcm_substream *substream; const struct snd_pcm_chmap_elem *map; - if (snd_BUG_ON(!info->chmap)) + if (!info->chmap) return -EINVAL; substream = snd_pcm_chmap_substream(info, idx); if (!substream) @@ -2523,7 +2523,7 @@ static int pcm_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag, unsigned int __user *dst; int c, count = 0; - if (snd_BUG_ON(!info->chmap)) + if (!info->chmap) return -EINVAL; if (size < 8) return -ENOMEM; -- GitLab From 3d6848e491df6abbf5fb5b1fabb7a5df2e2b8f4f Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 23 Jun 2017 15:08:57 -0700 Subject: [PATCH 406/786] fs/exec.c: account for argv/envp pointers commit 98da7d08850fb8bdeb395d6368ed15753304aa0c upstream. When limiting the argv/envp strings during exec to 1/4 of the stack limit, the storage of the pointers to the strings was not included. This means that an exec with huge numbers of tiny strings could eat 1/4 of the stack limit in strings and then additional space would be later used by the pointers to the strings. For example, on 32-bit with a 8MB stack rlimit, an exec with 1677721 single-byte strings would consume less than 2MB of stack, the max (8MB / 4) amount allowed, but the pointers to the strings would consume the remaining additional stack space (1677721 * 4 == 6710884). The result (1677721 + 6710884 == 8388605) would exhaust stack space entirely. Controlling this stack exhaustion could result in pathological behavior in setuid binaries (CVE-2017-1000365). [akpm@linux-foundation.org: additional commenting from Kees] Fixes: b6a2fea39318 ("mm: variable length argument support") Link: http://lkml.kernel.org/r/20170622001720.GA32173@beast Signed-off-by: Kees Cook Acked-by: Rik van Riel Acked-by: Michal Hocko Cc: Alexander Viro Cc: Qualys Security Advisory Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/exec.c | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/fs/exec.c b/fs/exec.c index 67e86571685a..91441402d706 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -215,8 +215,26 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, if (write) { unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; + unsigned long ptr_size; struct rlimit *rlim; + /* + * Since the stack will hold pointers to the strings, we + * must account for them as well. + * + * The size calculation is the entire vma while each arg page is + * built, so each time we get here it's calculating how far it + * is currently (rather than each call being just the newly + * added size from the arg page). As a result, we need to + * always add the entire size of the pointers, so that on the + * last call to get_arg_page() we'll actually have the entire + * correct size. + */ + ptr_size = (bprm->argc + bprm->envc) * sizeof(void *); + if (ptr_size > ULONG_MAX - size) + goto fail; + size += ptr_size; + acct_arg_size(bprm, size / PAGE_SIZE); /* @@ -234,13 +252,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, * to work from. */ rlim = current->signal->rlim; - if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) { - put_page(page); - return NULL; - } + if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) + goto fail; } return page; + +fail: + put_page(page); + return NULL; } static void put_arg_page(struct page *page) -- GitLab From 4b660fcbc64ebd8a9a0acd12f15e3dc1ab0480b2 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Thu, 15 Jun 2017 19:16:48 +0530 Subject: [PATCH 407/786] powerpc/perf: Fix oops when kthread execs user process commit bf05fc25f268cd62f147f368fe65ad3e5b04fe9f upstream. When a kthread calls call_usermodehelper() the steps are: 1. allocate current->mm 2. load_elf_binary() 3. populate current->thread.regs While doing this, interrupts are not disabled. If there is a perf interrupt in the middle of this process (i.e. step 1 has completed but not yet reached to step 3) and if perf tries to read userspace regs, kernel oops with following log: Unable to handle kernel paging request for data at address 0x00000000 Faulting instruction address: 0xc0000000000da0fc ... Call Trace: perf_output_sample_regs+0x6c/0xd0 perf_output_sample+0x4e4/0x830 perf_event_output_forward+0x64/0x90 __perf_event_overflow+0x8c/0x1e0 record_and_restart+0x220/0x5c0 perf_event_interrupt+0x2d8/0x4d0 performance_monitor_exception+0x54/0x70 performance_monitor_common+0x158/0x160 --- interrupt: f01 at avtab_search_node+0x150/0x1a0 LR = avtab_search_node+0x100/0x1a0 ... load_elf_binary+0x6e8/0x15a0 search_binary_handler+0xe8/0x290 do_execveat_common.isra.14+0x5f4/0x840 call_usermodehelper_exec_async+0x170/0x210 ret_from_kernel_thread+0x5c/0x7c Fix it by setting abi to PERF_SAMPLE_REGS_ABI_NONE when userspace pt_regs are not set. Fixes: ed4a4ef85cf5 ("powerpc/perf: Add support for sampling interrupt register state") Signed-off-by: Ravi Bangoria Acked-by: Naveen N. Rao Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/perf/perf_regs.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c index d24a8a3668fa..28ae8bd6228e 100644 --- a/arch/powerpc/perf/perf_regs.c +++ b/arch/powerpc/perf/perf_regs.c @@ -100,5 +100,6 @@ void perf_get_regs_user(struct perf_regs *regs_user, struct pt_regs *regs_user_copy) { regs_user->regs = task_pt_regs(current); - regs_user->abi = perf_reg_abi(current); + regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) : + PERF_SAMPLE_REGS_ABI_NONE; } -- GitLab From bc6eecff3d9594b2d5b9f41d4b480dba720e00ca Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 23 Jun 2017 15:08:43 -0700 Subject: [PATCH 408/786] autofs: sanity check status reported with AUTOFS_DEV_IOCTL_FAIL commit 9fa4eb8e490a28de40964b1b0e583d8db4c7e57c upstream. If a positive status is passed with the AUTOFS_DEV_IOCTL_FAIL ioctl, autofs4_d_automount() will return ERR_PTR(status) with that status to follow_automount(), which will then dereference an invalid pointer. So treat a positive status the same as zero, and map to ENOENT. See comment in systemd src/core/automount.c::automount_send_ready(). Link: http://lkml.kernel.org/r/871sqwczx5.fsf@notabene.neil.brown.name Signed-off-by: NeilBrown Cc: Ian Kent Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/autofs4/dev-ioctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c index fc09eb77ddf3..ffc69dd28928 100644 --- a/fs/autofs4/dev-ioctl.c +++ b/fs/autofs4/dev-ioctl.c @@ -345,7 +345,7 @@ static int autofs_dev_ioctl_fail(struct file *fp, int status; token = (autofs_wqt_t) param->fail.token; - status = param->fail.status ? param->fail.status : -ENOENT; + status = param->fail.status < 0 ? param->fail.status : -ENOENT; return autofs4_wait_release(sbi, token, status); } -- GitLab From 7c679fe729c258f09f169d9e3f8551b6e91d97e2 Mon Sep 17 00:00:00 2001 From: Ilya Matveychikov Date: Fri, 23 Jun 2017 15:08:49 -0700 Subject: [PATCH 409/786] lib/cmdline.c: fix get_options() overflow while parsing ranges commit a91e0f680bcd9e10c253ae8b62462a38bd48f09f upstream. When using get_options() it's possible to specify a range of numbers, like 1-100500. The problem is that it doesn't track array size while calling internally to get_range() which iterates over the range and fills the memory with numbers. Link: http://lkml.kernel.org/r/2613C75C-B04D-4BFF-82A6-12F97BA0F620@gmail.com Signed-off-by: Ilya V. Matveychikov Cc: Jonathan Corbet Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- lib/cmdline.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/cmdline.c b/lib/cmdline.c index 8f13cf73c2ec..79069d7938ea 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c @@ -22,14 +22,14 @@ * the values[M, M+1, ..., N] into the ints array in get_options. */ -static int get_range(char **str, int *pint) +static int get_range(char **str, int *pint, int n) { int x, inc_counter, upper_range; (*str)++; upper_range = simple_strtol((*str), NULL, 0); inc_counter = upper_range - *pint; - for (x = *pint; x < upper_range; x++) + for (x = *pint; n && x < upper_range; x++, n--) *pint++ = x; return inc_counter; } @@ -96,7 +96,7 @@ char *get_options(const char *str, int nints, int *ints) break; if (res == 3) { int range_nums; - range_nums = get_range((char **)&str, ints + i); + range_nums = get_range((char **)&str, ints + i, nints - i); if (range_nums < 0) break; /* -- GitLab From 5220378bd91c081d2371070cedccffcb7008f1bf Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Mon, 19 Jun 2017 07:26:09 -0700 Subject: [PATCH 410/786] perf/x86/intel: Add 1G DTLB load/store miss support for SKL commit fb3a5055cd7098f8d1dd0cd38d7172211113255f upstream. Current DTLB load/store miss events (0x608/0x649) only counts 4K,2M and 4M page size. Need to extend the events to support any page size (4K/2M/4M/1G). The complete DTLB load/store miss events are: DTLB_LOAD_MISSES.WALK_COMPLETED 0xe08 DTLB_STORE_MISSES.WALK_COMPLETED 0xe49 Signed-off-by: Kan Liang Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: eranian@google.com Link: http://lkml.kernel.org/r/20170619142609.11058-1-kan.liang@intel.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/events/intel/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index cb8522290e6a..3bdb917716b1 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -431,11 +431,11 @@ static __initconst const u64 skl_hw_cache_event_ids [ C(DTLB) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_INST_RETIRED.ALL_LOADS */ - [ C(RESULT_MISS) ] = 0x608, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ + [ C(RESULT_MISS) ] = 0xe08, /* DTLB_LOAD_MISSES.WALK_COMPLETED */ }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_INST_RETIRED.ALL_STORES */ - [ C(RESULT_MISS) ] = 0x649, /* DTLB_STORE_MISSES.WALK_COMPLETED */ + [ C(RESULT_MISS) ] = 0xe49, /* DTLB_STORE_MISSES.WALK_COMPLETED */ }, [ C(OP_PREFETCH) ] = { [ C(RESULT_ACCESS) ] = 0x0, -- GitLab From df3a787b3a71ec9dffddad9704e28a809fa9fdc7 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 19 Jun 2017 08:02:28 +0200 Subject: [PATCH 411/786] KVM: s390: gaccess: fix real-space designation asce handling for gmap shadows commit addb63c18a0d52a9ce2611d039f981f7b6148d2b upstream. For real-space designation asces the asce origin part is only a token. The asce token origin must not be used to generate an effective address for storage references. This however is erroneously done within kvm_s390_shadow_tables(). Furthermore within the same function the wrong parts of virtual addresses are used to generate a corresponding real address (e.g. the region second index is used as region first index). Both of the above can result in incorrect address translations. Only for real space designations with a token origin of zero and addresses below one megabyte the translation was correct. Furthermore replace a "!asce.r" statement with a "!*fake" statement to make it more obvious that a specific condition has nothing to do with the architecture, but with the fake handling of real space designations. Fixes: 3218f7094b6b ("s390/mm: support real-space for gmap shadows") Cc: David Hildenbrand Signed-off-by: Heiko Carstens Reviewed-by: Martin Schwidefsky Signed-off-by: Christian Borntraeger Signed-off-by: Greg Kroah-Hartman --- arch/s390/kvm/gaccess.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index 4aa8a7e2a1da..f5d79840f4e0 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -972,11 +972,12 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, ptr = asce.origin * 4096; if (asce.r) { *fake = 1; + ptr = 0; asce.dt = ASCE_TYPE_REGION1; } switch (asce.dt) { case ASCE_TYPE_REGION1: - if (vaddr.rfx01 > asce.tl && !asce.r) + if (vaddr.rfx01 > asce.tl && !*fake) return PGM_REGION_FIRST_TRANS; break; case ASCE_TYPE_REGION2: @@ -1004,8 +1005,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, union region1_table_entry rfte; if (*fake) { - /* offset in 16EB guest memory block */ - ptr = ptr + ((unsigned long) vaddr.rsx << 53UL); + ptr += (unsigned long) vaddr.rfx << 53; rfte.val = ptr; goto shadow_r2t; } @@ -1031,8 +1031,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, union region2_table_entry rste; if (*fake) { - /* offset in 8PB guest memory block */ - ptr = ptr + ((unsigned long) vaddr.rtx << 42UL); + ptr += (unsigned long) vaddr.rsx << 42; rste.val = ptr; goto shadow_r3t; } @@ -1059,8 +1058,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, union region3_table_entry rtte; if (*fake) { - /* offset in 4TB guest memory block */ - ptr = ptr + ((unsigned long) vaddr.sx << 31UL); + ptr += (unsigned long) vaddr.rtx << 31; rtte.val = ptr; goto shadow_sgt; } @@ -1096,8 +1094,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr, union segment_table_entry ste; if (*fake) { - /* offset in 2G guest memory block */ - ptr = ptr + ((unsigned long) vaddr.sx << 20UL); + ptr += (unsigned long) vaddr.sx << 20; ste.val = ptr; goto shadow_pgt; } -- GitLab From 468aa930c0a299f07ee449fd9c204d48852af372 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 15 Jun 2017 16:10:27 +1000 Subject: [PATCH 412/786] KVM: PPC: Book3S HV: Preserve userspace HTM state properly commit 46a704f8409f79fd66567ad3f8a7304830a84293 upstream. If userspace attempts to call the KVM_RUN ioctl when it has hardware transactional memory (HTM) enabled, the values that it has put in the HTM-related SPRs TFHAR, TFIAR and TEXASR will get overwritten by guest values. To fix this, we detect this condition and save those SPR values in the thread struct, and disable HTM for the task. If userspace goes to access those SPRs or the HTM facility in future, a TM-unavailable interrupt will occur and the handler will reload those SPRs and re-enable HTM. If userspace has started a transaction and suspended it, we would currently lose the transactional state in the guest entry path and would almost certainly get a "TM Bad Thing" interrupt, which would cause the host to crash. To avoid this, we detect this case and return from the KVM_RUN ioctl with an EINVAL error, with the KVM exit reason set to KVM_EXIT_FAIL_ENTRY. Fixes: b005255e12a3 ("KVM: PPC: Book3S HV: Context-switch new POWER8 SPRs", 2014-01-08) Signed-off-by: Paul Mackerras Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kvm/book3s_hv.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 094deb60c6fe..2f15bafa68b5 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2813,6 +2813,27 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) return -EINVAL; } + /* + * Don't allow entry with a suspended transaction, because + * the guest entry/exit code will lose it. + * If the guest has TM enabled, save away their TM-related SPRs + * (they will get restored by the TM unavailable interrupt). + */ +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && + (current->thread.regs->msr & MSR_TM)) { + if (MSR_TM_ACTIVE(current->thread.regs->msr)) { + run->exit_reason = KVM_EXIT_FAIL_ENTRY; + run->fail_entry.hardware_entry_failure_reason = 0; + return -EINVAL; + } + current->thread.tm_tfhar = mfspr(SPRN_TFHAR); + current->thread.tm_tfiar = mfspr(SPRN_TFIAR); + current->thread.tm_texasr = mfspr(SPRN_TEXASR); + current->thread.regs->msr &= ~MSR_TM; + } +#endif + kvmppc_core_prepare_to_enter(vcpu); /* No need to go into the guest when all we'll do is come back out */ -- GitLab From 2f1527e359f4245bfaf9c3047427f3a2abcdb508 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 6 Jun 2017 16:47:22 +1000 Subject: [PATCH 413/786] KVM: PPC: Book3S HV: Context-switch EBB registers properly commit ca8efa1df1d15a1795a2da57f9f6aada6ed6b946 upstream. This adds code to save the values of three SPRs (special-purpose registers) used by userspace to control event-based branches (EBBs), which are essentially interrupts that get delivered directly to userspace. These registers are loaded up with guest values when entering the guest, and their values are saved when exiting the guest, but we were not saving the host values and restoring them before going back to userspace. On POWER8 this would only affect userspace programs which explicitly request the use of EBBs and also use the KVM_RUN ioctl, since the only source of EBBs on POWER8 is the PMU, and there is an explicit enable bit in the PMU registers (and those PMU registers do get properly context-switched between host and guest). On POWER9 there is provision for externally-generated EBBs, and these are not subject to the control in the PMU registers. Since these registers only affect userspace, we can save them when we first come in from userspace and restore them before returning to userspace, rather than saving/restoring the host values on every guest entry/exit. Similarly, we don't need to worry about their values on offline secondary threads since they execute in the context of the idle task, which never executes in userspace. Fixes: b005255e12a3 ("KVM: PPC: Book3S HV: Context-switch new POWER8 SPRs", 2014-01-08) Signed-off-by: Paul Mackerras Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kvm/book3s_hv.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 2f15bafa68b5..5c0298422300 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2807,6 +2807,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) { int r; int srcu_idx; + unsigned long ebb_regs[3] = {}; /* shut up GCC */ if (!vcpu->arch.sane) { run->exit_reason = KVM_EXIT_INTERNAL_ERROR; @@ -2855,6 +2856,13 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) flush_all_to_thread(current); + /* Save userspace EBB register values */ + if (cpu_has_feature(CPU_FTR_ARCH_207S)) { + ebb_regs[0] = mfspr(SPRN_EBBHR); + ebb_regs[1] = mfspr(SPRN_EBBRR); + ebb_regs[2] = mfspr(SPRN_BESCR); + } + vcpu->arch.wqp = &vcpu->arch.vcore->wq; vcpu->arch.pgdir = current->mm->pgd; vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; @@ -2877,6 +2885,13 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) r = kvmppc_xics_rm_complete(vcpu, 0); } while (is_kvmppc_resume_guest(r)); + /* Restore userspace EBB register values */ + if (cpu_has_feature(CPU_FTR_ARCH_207S)) { + mtspr(SPRN_EBBHR, ebb_regs[0]); + mtspr(SPRN_EBBRR, ebb_regs[1]); + mtspr(SPRN_BESCR, ebb_regs[2]); + } + out: vcpu->arch.state = KVMPPC_VCPU_NOTREADY; atomic_dec(&vcpu->kvm->arch.vcpus_running); -- GitLab From fb6dc831b5cfdf8faa9255d6dd7b9b1b1265921d Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Tue, 6 Jun 2017 16:58:58 -0700 Subject: [PATCH 414/786] CIFS: Improve readdir verbosity commit dcd87838c06f05ab7650b249ebf0d5b57ae63e1e upstream. Downgrade the loglevel for SMB2 to prevent filling the log with messages if e.g. readdir was interrupted. Also make SMB2 and SMB1 codepaths do the same logging during readdir. Signed-off-by: Pavel Shilovsky Signed-off-by: Steve French Signed-off-by: Greg Kroah-Hartman --- fs/cifs/smb1ops.c | 9 +++++++-- fs/cifs/smb2ops.c | 4 ++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 87b87e091e8e..efd72e1fae74 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -849,8 +849,13 @@ cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *fid, __u16 search_flags, struct cifs_search_info *srch_inf) { - return CIFSFindFirst(xid, tcon, path, cifs_sb, - &fid->netfid, search_flags, srch_inf, true); + int rc; + + rc = CIFSFindFirst(xid, tcon, path, cifs_sb, + &fid->netfid, search_flags, srch_inf, true); + if (rc) + cifs_dbg(FYI, "find first failed=%d\n", rc); + return rc; } static int diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 36334fe3266c..b6968241c26f 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -964,7 +964,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL); kfree(utf16_path); if (rc) { - cifs_dbg(VFS, "open dir failed\n"); + cifs_dbg(FYI, "open dir failed rc=%d\n", rc); return rc; } @@ -974,7 +974,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_query_directory(xid, tcon, fid->persistent_fid, fid->volatile_fid, 0, srch_inf); if (rc) { - cifs_dbg(VFS, "query directory failed\n"); + cifs_dbg(FYI, "query directory failed rc=%d\n", rc); SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid); } return rc; -- GitLab From cdf300d6105d8d95cb7821476cd84c47d0192623 Mon Sep 17 00:00:00 2001 From: Raju Rangoju Date: Mon, 19 Jun 2017 19:46:00 +0530 Subject: [PATCH 415/786] cxgb4: notify uP to route ctrlq compl to rdma rspq commit dec6b33163d24e2c19ba521c89fffbaab53ae986 upstream. During the module initialisation there is a possible race (basically race between uld and lld) where neither the uld nor lld notifies the uP about where to route the ctrl queue completions. LLD skips notifying uP as the rdma queues were not created by then (will leave it to ULD to notify the uP). As the ULD comes up, it also skips notifying the uP as the flag FULL_INIT_DONE is not set yet (ULD assumes that the interface is not up yet). Consequently, this race between uld and lld leaves uP unnotified about where to send the ctrl queue completions to, leading to iwarp RI_RES WR failure. Here is the race: CPU 0 CPU1 - allocates nic rx queus - t4_sge_alloc_ctrl_txq() (if rdma rsp queues exists, tell uP to route ctrl queue compl to rdma rspq) - acquires the mutex_lock - allocates rdma response queues - if FULL_INIT_DONE set, tell uP to route ctrl queue compl to rdma rspq - relinquishes mutex_lock - acquires the mutex_lock - enable_rx() - set FULL_INIT_DONE - relinquishes mutex_lock This patch fixes the above issue. Fixes: e7519f9926f1('cxgb4: avoid enabling napi twice to the same queue') Signed-off-by: Raju Rangoju Acked-by: Steve Wise Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index f9c2feb4a4e7..0c2a32a305bc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2201,9 +2201,10 @@ static int cxgb_up(struct adapter *adap) { int err; + mutex_lock(&uld_mutex); err = setup_sge_queues(adap); if (err) - goto out; + goto rel_lock; err = setup_rss(adap); if (err) goto freeq; @@ -2227,7 +2228,6 @@ static int cxgb_up(struct adapter *adap) goto irq_err; } - mutex_lock(&uld_mutex); enable_rx(adap); t4_sge_start(adap); t4_intr_enable(adap); @@ -2240,13 +2240,15 @@ static int cxgb_up(struct adapter *adap) #endif /* Initialize hash mac addr list*/ INIT_LIST_HEAD(&adap->mac_hlist); - out: return err; + irq_err: dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); freeq: t4_free_sge_resources(adap); - goto out; + rel_lock: + mutex_unlock(&uld_mutex); + return err; } static void cxgb_down(struct adapter *adapter) -- GitLab From 99afebe8fef928457b931f2d596168240ea5e328 Mon Sep 17 00:00:00 2001 From: Sebastian Parschauer Date: Tue, 6 Jun 2017 13:53:13 +0200 Subject: [PATCH 416/786] HID: Add quirk for Dell PIXART OEM mouse commit 3db28271f0feae129262d30e41384a7c4c767987 upstream. This mouse is also known under other IDs. It needs the quirk ALWAYS_POLL or will disconnect in runlevel 1 or 3. Signed-off-by: Sebastian Parschauer Signed-off-by: Jiri Kosina Signed-off-by: Greg Kroah-Hartman --- drivers/hid/hid-ids.h | 3 +++ drivers/hid/usbhid/hid-quirks.c | 1 + 2 files changed, 4 insertions(+) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index da9307701abe..cfca43f635a6 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -311,6 +311,9 @@ #define USB_VENDOR_ID_DELCOM 0x0fc5 #define USB_DEVICE_ID_DELCOM_VISUAL_IND 0xb080 +#define USB_VENDOR_ID_DELL 0x413c +#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a + #define USB_VENDOR_ID_DELORME 0x1163 #define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100 #define USB_DEVICE_ID_DELORME_EM_LT20 0x0200 diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 97dbb2562ace..2b1620797959 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -81,6 +81,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT }, -- GitLab From f719f20abe2a52fff61ffc3b230308279b841475 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 13 Jun 2017 04:31:16 -0500 Subject: [PATCH 417/786] signal: Only reschedule timers on signals timers have sent commit 57db7e4a2d92c2d3dfbca4ef8057849b2682436b upstream. Thomas Gleixner wrote: > The CRIU support added a 'feature' which allows a user space task to send > arbitrary (kernel) signals to itself. The changelog says: > > The kernel prevents sending of siginfo with positive si_code, because > these codes are reserved for kernel. I think we can allow a task to > send such a siginfo to itself. This operation should not be dangerous. > > Quite contrary to that claim, it turns out that it is outright dangerous > for signals with info->si_code == SI_TIMER. The following code sequence in > a user space task allows to crash the kernel: > > id = timer_create(CLOCK_XXX, ..... signo = SIGX); > timer_set(id, ....); > info->si_signo = SIGX; > info->si_code = SI_TIMER: > info->_sifields._timer._tid = id; > info->_sifields._timer._sys_private = 2; > rt_[tg]sigqueueinfo(..., SIGX, info); > sigemptyset(&sigset); > sigaddset(&sigset, SIGX); > rt_sigtimedwait(sigset, info); > > For timers based on CLOCK_PROCESS_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID this > results in a kernel crash because sigwait() dequeues the signal and the > dequeue code observes: > > info->si_code == SI_TIMER && info->_sifields._timer._sys_private != 0 > > which triggers the following callchain: > > do_schedule_next_timer() -> posix_cpu_timer_schedule() -> arm_timer() > > arm_timer() executes a list_add() on the timer, which is already armed via > the timer_set() syscall. That's a double list add which corrupts the posix > cpu timer list. As a consequence the kernel crashes on the next operation > touching the posix cpu timer list. > > Posix clocks which are internally implemented based on hrtimers are not > affected by this because hrtimer_start() can handle already armed timers > nicely, but it's a reliable way to trigger the WARN_ON() in > hrtimer_forward(), which complains about calling that function on an > already armed timer. This problem has existed since the posix timer code was merged into 2.5.63. A few releases earlier in 2.5.60 ptrace gained the ability to inject not just a signal (which linux has supported since 1.0) but the full siginfo of a signal. The core problem is that the code will reschedule in response to signals getting dequeued not just for signals the timers sent but for other signals that happen to a si_code of SI_TIMER. Avoid this confusion by testing to see if the queued signal was preallocated as all timer signals are preallocated, and so far only the timer code preallocates signals. Move the check for if a timer needs to be rescheduled up into collect_signal where the preallocation check must be performed, and pass the result back to dequeue_signal where the code reschedules timers. This makes it clear why the code cares about preallocated timers. Reported-by: Thomas Gleixner History Tree: https://git.kernel.org/pub/scm/linux/kernel/git/tglx/history.git Reference: 66dd34ad31e5 ("signal: allow to send any siginfo to itself") Reference: 1669ce53e2ff ("Add PTRACE_GETSIGINFO and PTRACE_SETSIGINFO") Fixes: db8b50ba75f2 ("[PATCH] POSIX clocks & timers") Signed-off-by: "Eric W. Biederman" Signed-off-by: Greg Kroah-Hartman --- kernel/signal.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/kernel/signal.c b/kernel/signal.c index 0b1415720a15..deb04d5983ed 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -503,7 +503,8 @@ int unhandled_signal(struct task_struct *tsk, int sig) return !tsk->ptrace; } -static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) +static void collect_signal(int sig, struct sigpending *list, siginfo_t *info, + bool *resched_timer) { struct sigqueue *q, *first = NULL; @@ -525,6 +526,12 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) still_pending: list_del_init(&first->list); copy_siginfo(info, &first->info); + + *resched_timer = + (first->flags & SIGQUEUE_PREALLOC) && + (info->si_code == SI_TIMER) && + (info->si_sys_private); + __sigqueue_free(first); } else { /* @@ -541,12 +548,12 @@ static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) } static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, - siginfo_t *info) + siginfo_t *info, bool *resched_timer) { int sig = next_signal(pending, mask); if (sig) - collect_signal(sig, pending, info); + collect_signal(sig, pending, info, resched_timer); return sig; } @@ -558,15 +565,16 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, */ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) { + bool resched_timer = false; int signr; /* We only dequeue private signals from ourselves, we don't let * signalfd steal them */ - signr = __dequeue_signal(&tsk->pending, mask, info); + signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); if (!signr) { signr = __dequeue_signal(&tsk->signal->shared_pending, - mask, info); + mask, info, &resched_timer); /* * itimer signal ? * @@ -611,7 +619,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) */ current->jobctl |= JOBCTL_STOP_DEQUEUED; } - if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { + if (resched_timer) { /* * Release the siglock to ensure proper locking order * of timer locks outside of siglocks. Note, we leave -- GitLab From 414f51ceb6ffbdd66e185357e40d59832cc33433 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Thu, 1 Jun 2017 16:18:15 +0530 Subject: [PATCH 418/786] powerpc/kprobes: Pause function_graph tracing during jprobes handling commit a9f8553e935f26cb5447f67e280946b0923cd2dc upstream. This fixes a crash when function_graph and jprobes are used together. This is essentially commit 237d28db036e ("ftrace/jprobes/x86: Fix conflict between jprobes and function graph tracing"), but for powerpc. Jprobes breaks function_graph tracing since the jprobe hook needs to use jprobe_return(), which never returns back to the hook, but instead to the original jprobe'd function. The solution is to momentarily pause function_graph tracing before invoking the jprobe hook and re-enable it when returning back to the original jprobe'd function. Fixes: 6794c78243bf ("powerpc64: port of the function graph tracer") Signed-off-by: Naveen N. Rao Acked-by: Masami Hiramatsu Acked-by: Steven Rostedt (VMware) Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/kprobes.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index e785cc9e1ecd..fe97cbe04576 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c @@ -511,6 +511,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); #endif + /* + * jprobes use jprobe_return() which skips the normal return + * path of the function, and this messes up the accounting of the + * function graph tracer. + * + * Pause function graph tracing while performing the jprobe function. + */ + pause_graph_tracing(); + return 1; } @@ -533,6 +542,8 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) * saved regs... */ memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); + /* It's OK to start function graph tracing again */ + unpause_graph_tracing(); preempt_enable_no_resched(); return 1; } -- GitLab From 8eaa481dfb4c405abe02000662ad4e1d7c48f742 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Wed, 14 Jun 2017 00:12:00 +0530 Subject: [PATCH 419/786] powerpc/64s: Handle data breakpoints in Radix mode commit d89ba5353f301971dd7d2f9fdf25c4432728f38e upstream. On Power9, trying to use data breakpoints throws the splat shown below. This is because the check for a data breakpoint in DSISR is in do_hash_page(), which is not called when in Radix mode. Unable to handle kernel paging request for data at address 0xc000000000e19218 Faulting instruction address: 0xc0000000001155e8 cpu 0x0: Vector: 300 (Data Access) at [c0000000ef1e7b20] pc: c0000000001155e8: find_pid_ns+0x48/0xe0 lr: c000000000116ac4: find_task_by_vpid+0x44/0x90 sp: c0000000ef1e7da0 msr: 9000000000009033 dar: c000000000e19218 dsisr: 400000 Move the check to handle_page_fault() so as to catch data breakpoints in both Hash and Radix MMU modes. We have to change the check in do_hash_page() against 0xa410 to use 0xa450, so as to include the value of (DSISR_DABRMATCH << 16). There are two sites that call handle_page_fault() when in Radix, both already pass DSISR in r4. Fixes: caca285e5ab4 ("powerpc/mm/radix: Use STD_MMU_64 to properly isolate hash related code") Reported-by: Shriya R. Kulkarni Signed-off-by: Naveen N. Rao [mpe: Fix the fall-through case on hash, we need to reload DSISR] Signed-off-by: Michael Ellerman Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/exceptions-64s.S | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 1ba82ea90230..2e2fc1e37715 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1411,10 +1411,8 @@ USE_TEXT_SECTION() .align 7 do_hash_page: #ifdef CONFIG_PPC_STD_MMU_64 - andis. r0,r4,0xa410 /* weird error? */ + andis. r0,r4,0xa450 /* weird error? */ bne- handle_page_fault /* if not, try to insert a HPTE */ - andis. r0,r4,DSISR_DABRMATCH@h - bne- handle_dabr_fault CURRENT_THREAD_INFO(r11, r1) lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ @@ -1438,11 +1436,16 @@ do_hash_page: /* Error */ blt- 13f + + /* Reload DSISR into r4 for the DABR check below */ + ld r4,_DSISR(r1) #endif /* CONFIG_PPC_STD_MMU_64 */ /* Here we have a page fault that hash_page can't handle. */ handle_page_fault: -11: ld r4,_DAR(r1) +11: andis. r0,r4,DSISR_DABRMATCH@h + bne- handle_dabr_fault + ld r4,_DAR(r1) ld r5,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD bl do_page_fault -- GitLab From 20d8f785f9749a9fb4522ff255a7df60b2314cb2 Mon Sep 17 00:00:00 2001 From: Daniel Drake Date: Mon, 19 Jun 2017 19:48:52 -0700 Subject: [PATCH 420/786] Input: i8042 - add Fujitsu Lifebook AH544 to notimeout list commit 817ae460c784f32cd45e60b2b1b21378c3c6a847 upstream. Without this quirk, the touchpad is not responsive on this product, with the following message repeated in the logs: psmouse serio1: bad data from KBC - timeout Add it to the notimeout list alongside other similar Fujitsu laptops. Signed-off-by: Daniel Drake Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman --- drivers/input/serio/i8042-x86ia64io.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index e7b96f1ac2c5..5be14ad29d46 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -788,6 +788,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"), }, }, + { + /* Fujitsu UH554 laptop */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"), + }, + }, { } }; -- GitLab From 1dd15bd62221be131279340d2b91a3b993365a72 Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Mon, 12 Jun 2017 12:47:32 +0100 Subject: [PATCH 421/786] brcmfmac: add parameter to pass error code in firmware callback commit 6d0507a777fbc533f7f1bf5664a81982dd50dece upstream. Extend the parameters in the firmware callback so it can be called upon success and failure. This allows the caller to properly clear all resources in the failure path. Right now the error code is always zero, ie. success. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo Signed-off-by: Greg Kroah-Hartman --- .../broadcom/brcm80211/brcmfmac/firmware.c | 10 +++++----- .../broadcom/brcm80211/brcmfmac/firmware.h | 4 ++-- .../wireless/broadcom/brcm80211/brcmfmac/pcie.c | 17 ++++++++++++----- .../wireless/broadcom/brcm80211/brcmfmac/sdio.c | 17 +++++++++++------ .../wireless/broadcom/brcm80211/brcmfmac/usb.c | 6 ++++-- 5 files changed, 34 insertions(+), 20 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index c7c1e9906500..ae61a24202ac 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -442,7 +442,7 @@ struct brcmf_fw { const char *nvram_name; u16 domain_nr; u16 bus_nr; - void (*done)(struct device *dev, const struct firmware *fw, + void (*done)(struct device *dev, int err, const struct firmware *fw, void *nvram_image, u32 nvram_len); }; @@ -477,7 +477,7 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) if (!nvram && !(fwctx->flags & BRCMF_FW_REQ_NV_OPTIONAL)) goto fail; - fwctx->done(fwctx->dev, fwctx->code, nvram, nvram_length); + fwctx->done(fwctx->dev, 0, fwctx->code, nvram, nvram_length); kfree(fwctx); return; @@ -499,7 +499,7 @@ static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx) /* only requested code so done here */ if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) { - fwctx->done(fwctx->dev, fw, NULL, 0); + fwctx->done(fwctx->dev, 0, fw, NULL, 0); kfree(fwctx); return; } @@ -522,7 +522,7 @@ static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx) int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, const char *code, const char *nvram, - void (*fw_cb)(struct device *dev, + void (*fw_cb)(struct device *dev, int err, const struct firmware *fw, void *nvram_image, u32 nvram_len), u16 domain_nr, u16 bus_nr) @@ -555,7 +555,7 @@ int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, int brcmf_fw_get_firmwares(struct device *dev, u16 flags, const char *code, const char *nvram, - void (*fw_cb)(struct device *dev, + void (*fw_cb)(struct device *dev, int err, const struct firmware *fw, void *nvram_image, u32 nvram_len)) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h index d3c9f0d52ae3..8fa4b7e1ab3d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h @@ -73,13 +73,13 @@ void brcmf_fw_nvram_free(void *nvram); */ int brcmf_fw_get_firmwares_pcie(struct device *dev, u16 flags, const char *code, const char *nvram, - void (*fw_cb)(struct device *dev, + void (*fw_cb)(struct device *dev, int err, const struct firmware *fw, void *nvram_image, u32 nvram_len), u16 domain_nr, u16 bus_nr); int brcmf_fw_get_firmwares(struct device *dev, u16 flags, const char *code, const char *nvram, - void (*fw_cb)(struct device *dev, + void (*fw_cb)(struct device *dev, int err, const struct firmware *fw, void *nvram_image, u32 nvram_len)); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 3deba90c7eb5..d3d79219fbb0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -1618,16 +1618,23 @@ static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = { .write32 = brcmf_pcie_buscore_write32, }; -static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw, +static void brcmf_pcie_setup(struct device *dev, int ret, + const struct firmware *fw, void *nvram, u32 nvram_len) { - struct brcmf_bus *bus = dev_get_drvdata(dev); - struct brcmf_pciedev *pcie_bus_dev = bus->bus_priv.pcie; - struct brcmf_pciedev_info *devinfo = pcie_bus_dev->devinfo; + struct brcmf_bus *bus; + struct brcmf_pciedev *pcie_bus_dev; + struct brcmf_pciedev_info *devinfo; struct brcmf_commonring **flowrings; - int ret; u32 i; + /* check firmware loading result */ + if (ret) + goto fail; + + bus = dev_get_drvdata(dev); + pcie_bus_dev = bus->bus_priv.pcie; + devinfo = pcie_bus_dev->devinfo; brcmf_pcie_attach(devinfo); /* Some of the firmwares have the size of the memory of the device diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 2458e6e05276..57b04b82e263 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -3975,21 +3975,26 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = { .get_memdump = brcmf_sdio_bus_get_memdump, }; -static void brcmf_sdio_firmware_callback(struct device *dev, +static void brcmf_sdio_firmware_callback(struct device *dev, int err, const struct firmware *code, void *nvram, u32 nvram_len) { - struct brcmf_bus *bus_if = dev_get_drvdata(dev); - struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; - struct brcmf_sdio *bus = sdiodev->bus; - int err = 0; + struct brcmf_bus *bus_if; + struct brcmf_sdio_dev *sdiodev; + struct brcmf_sdio *bus; u8 saveclk; - brcmf_dbg(TRACE, "Enter: dev=%s\n", dev_name(dev)); + brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err); + if (err) + goto fail; + bus_if = dev_get_drvdata(dev); if (!bus_if->drvr) return; + sdiodev = bus_if->bus_priv.sdio; + bus = sdiodev->bus; + /* try to download image and nvram to the dongle */ bus->alp_only = true; err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index 2f978a39b58a..52b9fc11199b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -1158,13 +1158,15 @@ static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo) return ret; } -static void brcmf_usb_probe_phase2(struct device *dev, +static void brcmf_usb_probe_phase2(struct device *dev, int ret, const struct firmware *fw, void *nvram, u32 nvlen) { struct brcmf_bus *bus = dev_get_drvdata(dev); struct brcmf_usbdev_info *devinfo; - int ret; + + if (ret) + goto error; brcmf_dbg(USB, "Start fw downloading\n"); -- GitLab From ba2d8d67875c67fdbd676a3990bbf99064826b03 Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Mon, 12 Jun 2017 12:47:33 +0100 Subject: [PATCH 422/786] brcmfmac: use firmware callback upon failure to load commit 03fb0e8393fae8ebb6710a99387853ed0becbc8e upstream. When firmware loading failed the code used to unbind the device provided by the calling code. However, for the sdio driver two devices are bound and both need to be released upon failure. The callback has been extended with parameter to pass error code so add that in this commit upon firmware loading failure. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo Signed-off-by: Greg Kroah-Hartman --- .../broadcom/brcm80211/brcmfmac/firmware.c | 27 +++++++++---------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index ae61a24202ac..d231042f19d6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -484,39 +484,38 @@ static void brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) fail: brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); release_firmware(fwctx->code); - device_release_driver(fwctx->dev); + fwctx->done(fwctx->dev, -ENOENT, NULL, NULL, 0); kfree(fwctx); } static void brcmf_fw_request_code_done(const struct firmware *fw, void *ctx) { struct brcmf_fw *fwctx = ctx; - int ret; + int ret = 0; brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev)); - if (!fw) + if (!fw) { + ret = -ENOENT; goto fail; - - /* only requested code so done here */ - if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) { - fwctx->done(fwctx->dev, 0, fw, NULL, 0); - kfree(fwctx); - return; } + /* only requested code so done here */ + if (!(fwctx->flags & BRCMF_FW_REQUEST_NVRAM)) + goto done; + fwctx->code = fw; ret = request_firmware_nowait(THIS_MODULE, true, fwctx->nvram_name, fwctx->dev, GFP_KERNEL, fwctx, brcmf_fw_request_nvram_done); - if (!ret) - return; - - brcmf_fw_request_nvram_done(NULL, fwctx); + /* pass NULL to nvram callback for bcm47xx fallback */ + if (ret) + brcmf_fw_request_nvram_done(NULL, fwctx); return; fail: brcmf_dbg(TRACE, "failed: dev=%s\n", dev_name(fwctx->dev)); - device_release_driver(fwctx->dev); +done: + fwctx->done(fwctx->dev, ret, fw, NULL, 0); kfree(fwctx); } -- GitLab From c81d034bd09aeac70ad0e0a51d281659fff304a8 Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Mon, 12 Jun 2017 12:47:34 +0100 Subject: [PATCH 423/786] brcmfmac: unbind all devices upon failure in firmware callback commit 7a51461fc2da82a6c565a3ee65c41c197f28225d upstream. When request firmware fails, brcmf_ops_sdio_remove is being called and brcmf_bus freed. In such circumstancies if you do a suspend/resume cycle the kernel hangs on resume due a NULL pointer dereference in resume function. So in brcmf_sdio_firmware_callback() we need to unbind the driver from both sdio_func devices when firmware load failure is indicated. Tested-by: Enric Balletbo i Serra Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 57b04b82e263..8744b9beda33 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -3985,14 +3985,14 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, u8 saveclk; brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err); + bus_if = dev_get_drvdata(dev); + sdiodev = bus_if->bus_priv.sdio; if (err) goto fail; - bus_if = dev_get_drvdata(dev); if (!bus_if->drvr) return; - sdiodev = bus_if->bus_priv.sdio; bus = sdiodev->bus; /* try to download image and nvram to the dongle */ @@ -4081,6 +4081,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, fail: brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err); device_release_driver(dev); + device_release_driver(&sdiodev->func[2]->dev); } struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) -- GitLab From 02a37ccd6347897b9227c8ff7f11526321ec2048 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Thu, 8 Jun 2017 16:44:20 -0700 Subject: [PATCH 424/786] time: Fix clock->read(clock) race around clocksource changes commit ceea5e3771ed2378668455fa21861bead7504df5 upstream. In tests, which excercise switching of clocksources, a NULL pointer dereference can be observed on AMR64 platforms in the clocksource read() function: u64 clocksource_mmio_readl_down(struct clocksource *c) { return ~(u64)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask; } This is called from the core timekeeping code via: cycle_now = tkr->read(tkr->clock); tkr->read is the cached tkr->clock->read() function pointer. When the clocksource is changed then tkr->clock and tkr->read are updated sequentially. The code above results in a sequential load operation of tkr->read and tkr->clock as well. If the store to tkr->clock hits between the loads of tkr->read and tkr->clock, then the old read() function is called with the new clock pointer. As a consequence the read() function dereferences a different data structure and the resulting 'reg' pointer can point anywhere including NULL. This problem was introduced when the timekeeping code was switched over to use struct tk_read_base. Before that, it was theoretically possible as well when the compiler decided to reload clock in the code sequence: now = tk->clock->read(tk->clock); Add a helper function which avoids the issue by reading tk_read_base->clock once into a local variable clk and then issue the read function via clk->read(clk). This guarantees that the read() function always gets the proper clocksource pointer handed in. Since there is now no use for the tkr.read pointer, this patch also removes it, and to address stopping the fast timekeeper during suspend/resume, it introduces a dummy clocksource to use rather then just a dummy read function. Signed-off-by: John Stultz Acked-by: Ingo Molnar Cc: Prarit Bhargava Cc: Richard Cochran Cc: Stephen Boyd Cc: Miroslav Lichvar Cc: Daniel Mentz Link: http://lkml.kernel.org/r/1496965462-20003-2-git-send-email-john.stultz@linaro.org Signed-off-by: Thomas Gleixner Signed-off-by: Greg Kroah-Hartman --- include/linux/timekeeper_internal.h | 1 - kernel/time/timekeeping.c | 52 ++++++++++++++++++++--------- 2 files changed, 36 insertions(+), 17 deletions(-) diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index e88005459035..100e47d40dcd 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -29,7 +29,6 @@ */ struct tk_read_base { struct clocksource *clock; - cycle_t (*read)(struct clocksource *cs); cycle_t mask; cycle_t cycle_last; u32 mult; diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 46e312e9be38..9d861849bb82 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -116,6 +116,26 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) tk->offs_boot = ktime_add(tk->offs_boot, delta); } +/* + * tk_clock_read - atomic clocksource read() helper + * + * This helper is necessary to use in the read paths because, while the + * seqlock ensures we don't return a bad value while structures are updated, + * it doesn't protect from potential crashes. There is the possibility that + * the tkr's clocksource may change between the read reference, and the + * clock reference passed to the read function. This can cause crashes if + * the wrong clocksource is passed to the wrong read function. + * This isn't necessary to use when holding the timekeeper_lock or doing + * a read of the fast-timekeeper tkrs (which is protected by its own locking + * and update logic). + */ +static inline u64 tk_clock_read(struct tk_read_base *tkr) +{ + struct clocksource *clock = READ_ONCE(tkr->clock); + + return clock->read(clock); +} + #ifdef CONFIG_DEBUG_TIMEKEEPING #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */ @@ -173,7 +193,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr) */ do { seq = read_seqcount_begin(&tk_core.seq); - now = tkr->read(tkr->clock); + now = tk_clock_read(tkr); last = tkr->cycle_last; mask = tkr->mask; max = tkr->clock->max_cycles; @@ -207,7 +227,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr) cycle_t cycle_now, delta; /* read clocksource */ - cycle_now = tkr->read(tkr->clock); + cycle_now = tk_clock_read(tkr); /* calculate the delta since the last update_wall_time */ delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask); @@ -236,12 +256,10 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) ++tk->cs_was_changed_seq; old_clock = tk->tkr_mono.clock; tk->tkr_mono.clock = clock; - tk->tkr_mono.read = clock->read; tk->tkr_mono.mask = clock->mask; - tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock); + tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono); tk->tkr_raw.clock = clock; - tk->tkr_raw.read = clock->read; tk->tkr_raw.mask = clock->mask; tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last; @@ -405,7 +423,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf) now += timekeeping_delta_to_ns(tkr, clocksource_delta( - tkr->read(tkr->clock), + tk_clock_read(tkr), tkr->cycle_last, tkr->mask)); } while (read_seqcount_retry(&tkf->seq, seq)); @@ -433,6 +451,10 @@ static cycle_t dummy_clock_read(struct clocksource *cs) return cycles_at_suspend; } +static struct clocksource dummy_clock = { + .read = dummy_clock_read, +}; + /** * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource. * @tk: Timekeeper to snapshot. @@ -449,13 +471,13 @@ static void halt_fast_timekeeper(struct timekeeper *tk) struct tk_read_base *tkr = &tk->tkr_mono; memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); - cycles_at_suspend = tkr->read(tkr->clock); - tkr_dummy.read = dummy_clock_read; + cycles_at_suspend = tk_clock_read(tkr); + tkr_dummy.clock = &dummy_clock; update_fast_timekeeper(&tkr_dummy, &tk_fast_mono); tkr = &tk->tkr_raw; memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); - tkr_dummy.read = dummy_clock_read; + tkr_dummy.clock = &dummy_clock; update_fast_timekeeper(&tkr_dummy, &tk_fast_raw); } @@ -621,11 +643,10 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) */ static void timekeeping_forward_now(struct timekeeper *tk) { - struct clocksource *clock = tk->tkr_mono.clock; cycle_t cycle_now, delta; s64 nsec; - cycle_now = tk->tkr_mono.read(clock); + cycle_now = tk_clock_read(&tk->tkr_mono); delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask); tk->tkr_mono.cycle_last = cycle_now; tk->tkr_raw.cycle_last = cycle_now; @@ -901,8 +922,7 @@ void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot) do { seq = read_seqcount_begin(&tk_core.seq); - - now = tk->tkr_mono.read(tk->tkr_mono.clock); + now = tk_clock_read(&tk->tkr_mono); systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq; systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq; base_real = ktime_add(tk->tkr_mono.base, @@ -1081,7 +1101,7 @@ int get_device_system_crosststamp(int (*get_time_fn) * Check whether the system counter value provided by the * device driver is on the current timekeeping interval. */ - now = tk->tkr_mono.read(tk->tkr_mono.clock); + now = tk_clock_read(&tk->tkr_mono); interval_start = tk->tkr_mono.cycle_last; if (!cycle_between(interval_start, cycles, now)) { clock_was_set_seq = tk->clock_was_set_seq; @@ -1639,7 +1659,7 @@ void timekeeping_resume(void) * The less preferred source will only be tried if there is no better * usable source. The rtc part is handled separately in rtc core code. */ - cycle_now = tk->tkr_mono.read(clock); + cycle_now = tk_clock_read(&tk->tkr_mono); if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && cycle_now > tk->tkr_mono.cycle_last) { u64 num, max = ULLONG_MAX; @@ -2057,7 +2077,7 @@ void update_wall_time(void) #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET offset = real_tk->cycle_interval; #else - offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock), + offset = clocksource_delta(tk_clock_read(&tk->tkr_mono), tk->tkr_mono.cycle_last, tk->tkr_mono.mask); #endif -- GitLab From a53bfdda06ac114c42796b4193aee10a8108bca1 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Thu, 8 Jun 2017 16:44:21 -0700 Subject: [PATCH 425/786] time: Fix CLOCK_MONOTONIC_RAW sub-nanosecond accounting commit 3d88d56c5873f6eebe23e05c3da701960146b801 upstream. Due to how the MONOTONIC_RAW accumulation logic was handled, there is the potential for a 1ns discontinuity when we do accumulations. This small discontinuity has for the most part gone un-noticed, but since ARM64 enabled CLOCK_MONOTONIC_RAW in their vDSO clock_gettime implementation, we've seen failures with the inconsistency-check test in kselftest. This patch addresses the issue by using the same sub-ns accumulation handling that CLOCK_MONOTONIC uses, which avoids the issue for in-kernel users. Since the ARM64 vDSO implementation has its own clock_gettime calculation logic, this patch reduces the frequency of errors, but failures are still seen. The ARM64 vDSO will need to be updated to include the sub-nanosecond xtime_nsec values in its calculation for this issue to be completely fixed. Signed-off-by: John Stultz Tested-by: Daniel Mentz Cc: Prarit Bhargava Cc: Kevin Brodsky Cc: Richard Cochran Cc: Stephen Boyd Cc: Will Deacon Cc: Miroslav Lichvar Link: http://lkml.kernel.org/r/1496965462-20003-3-git-send-email-john.stultz@linaro.org Signed-off-by: Thomas Gleixner Signed-off-by: Greg Kroah-Hartman --- include/linux/timekeeper_internal.h | 4 ++-- kernel/time/timekeeping.c | 20 ++++++++++---------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 100e47d40dcd..2c225d46a428 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -57,7 +57,7 @@ struct tk_read_base { * interval. * @xtime_remainder: Shifted nano seconds left over when rounding * @cycle_interval - * @raw_interval: Raw nano seconds accumulated per NTP interval. + * @raw_interval: Shifted raw nano seconds accumulated per NTP interval. * @ntp_error: Difference between accumulated time and NTP time in ntp * shifted nano seconds. * @ntp_error_shift: Shift conversion between clock shifted nano seconds and @@ -99,7 +99,7 @@ struct timekeeper { cycle_t cycle_interval; u64 xtime_interval; s64 xtime_remainder; - u32 raw_interval; + u64 raw_interval; /* The ntp_tick_length() value currently being used. * This cached copy ensures we consistently apply the tick * length for an entire tick, as ntp_tick_length may change diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 9d861849bb82..d831827d7ab0 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -278,8 +278,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) /* Go back from cycles -> shifted ns */ tk->xtime_interval = (u64) interval * clock->mult; tk->xtime_remainder = ntpinterval - tk->xtime_interval; - tk->raw_interval = - ((u64) interval * clock->mult) >> clock->shift; + tk->raw_interval = interval * clock->mult; /* if changing clocks, convert xtime_nsec shift units */ if (old_clock) { @@ -2023,7 +2022,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, unsigned int *clock_set) { cycle_t interval = tk->cycle_interval << shift; - u64 raw_nsecs; + u64 snsec_per_sec; /* If the offset is smaller than a shifted interval, do nothing */ if (offset < interval) @@ -2038,14 +2037,15 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, *clock_set |= accumulate_nsecs_to_secs(tk); /* Accumulate raw time */ - raw_nsecs = (u64)tk->raw_interval << shift; - raw_nsecs += tk->raw_time.tv_nsec; - if (raw_nsecs >= NSEC_PER_SEC) { - u64 raw_secs = raw_nsecs; - raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); - tk->raw_time.tv_sec += raw_secs; + tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift; + tk->tkr_raw.xtime_nsec += tk->raw_interval << shift; + snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift; + while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) { + tk->tkr_raw.xtime_nsec -= snsec_per_sec; + tk->raw_time.tv_sec++; } - tk->raw_time.tv_nsec = raw_nsecs; + tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift; + tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift; /* Accumulate error between NTP and clock interval */ tk->ntp_error += tk->ntp_tick << shift; -- GitLab From 99f66b5182a4f3d89563634f2fe156d4629b9c10 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 8 Jun 2017 16:44:22 -0700 Subject: [PATCH 426/786] arm64/vdso: Fix nsec handling for CLOCK_MONOTONIC_RAW commit dbb236c1ceb697a559e0694ac4c9e7b9131d0b16 upstream. Recently vDSO support for CLOCK_MONOTONIC_RAW was added in 49eea433b326 ("arm64: Add support for CLOCK_MONOTONIC_RAW in clock_gettime() vDSO"). Noticing that the core timekeeping code never set tkr_raw.xtime_nsec, the vDSO implementation didn't bother exposing it via the data page and instead took the unshifted tk->raw_time.tv_nsec value which was then immediately shifted left in the vDSO code. Unfortunately, by accellerating the MONOTONIC_RAW clockid, it uncovered potential 1ns time inconsistencies caused by the timekeeping core not handing sub-ns resolution. Now that the core code has been fixed and is actually setting tkr_raw.xtime_nsec, we need to take that into account in the vDSO by adding it to the shifted raw_time value, in order to fix the user-visible inconsistency. Rather than do that at each use (and expand the data page in the process), instead perform the shift/addition operation when populating the data page and remove the shift from the vDSO code entirely. [jstultz: minor whitespace tweak, tried to improve commit message to make it more clear this fixes a regression] Reported-by: John Stultz Signed-off-by: Will Deacon Signed-off-by: John Stultz Tested-by: Daniel Mentz Acked-by: Kevin Brodsky Cc: Prarit Bhargava Cc: Richard Cochran Cc: Stephen Boyd Cc: Miroslav Lichvar Link: http://lkml.kernel.org/r/1496965462-20003-4-git-send-email-john.stultz@linaro.org Signed-off-by: Thomas Gleixner Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/vdso.c | 5 +++-- arch/arm64/kernel/vdso/gettimeofday.S | 1 - 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index a2c2478e7d78..4bcfe01b5aad 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -217,10 +217,11 @@ void update_vsyscall(struct timekeeper *tk) /* tkr_mono.cycle_last == tkr_raw.cycle_last */ vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; vdso_data->raw_time_sec = tk->raw_time.tv_sec; - vdso_data->raw_time_nsec = tk->raw_time.tv_nsec; + vdso_data->raw_time_nsec = (tk->raw_time.tv_nsec << + tk->tkr_raw.shift) + + tk->tkr_raw.xtime_nsec; vdso_data->xtime_clock_sec = tk->xtime_sec; vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; - /* tkr_raw.xtime_nsec == 0 */ vdso_data->cs_mono_mult = tk->tkr_mono.mult; vdso_data->cs_raw_mult = tk->tkr_raw.mult; /* tkr_mono.shift == tkr_raw.shift */ diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index e00b4671bd7c..76320e920965 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S @@ -256,7 +256,6 @@ monotonic_raw: seqcnt_check fail=monotonic_raw /* All computations are done with left-shifted nsecs. */ - lsl x14, x14, x12 get_nsec_per_sec res=x9 lsl x9, x9, x12 -- GitLab From 1f576d53d854b336affc844c616055b518e53bff Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 2 Jun 2017 20:00:17 -0700 Subject: [PATCH 427/786] target: Fix kref->refcount underflow in transport_cmd_finish_abort commit 73d4e580ccc5c3e05cea002f18111f66c9c07034 upstream. This patch fixes a se_cmd->cmd_kref underflow during CMD_T_ABORTED when a fabric driver drops it's second reference from below the target_core_tmr.c based callers of transport_cmd_finish_abort(). Recently with the conversion of kref to refcount_t, this bug was manifesting itself as: [705519.601034] refcount_t: underflow; use-after-free. [705519.604034] INFO: NMI handler (kgdb_nmi_handler) took too long to run: 20116.512 msecs [705539.719111] ------------[ cut here ]------------ [705539.719117] WARNING: CPU: 3 PID: 26510 at lib/refcount.c:184 refcount_sub_and_test+0x33/0x51 Since the original kref atomic_t based kref_put() didn't check for underflow and only invoked the final callback when zero was reached, this bug did not manifest in practice since all se_cmd memory is using preallocated tags. To address this, go ahead and propigate the existing return from transport_put_cmd() up via transport_cmd_finish_abort(), and change transport_cmd_finish_abort() + core_tmr_handle_tas_abort() callers to only do their local target_put_sess_cmd() if necessary. Reported-by: Bart Van Assche Tested-by: Bart Van Assche Cc: Mike Christie Cc: Hannes Reinecke Cc: Christoph Hellwig Cc: Himanshu Madhani Cc: Sagi Grimberg Tested-by: Gary Guo Tested-by: Chu Yuan Lin Signed-off-by: Nicholas Bellinger Signed-off-by: Greg Kroah-Hartman --- drivers/target/target_core_internal.h | 2 +- drivers/target/target_core_tmr.c | 16 ++++++++-------- drivers/target/target_core_transport.c | 9 ++++++--- 3 files changed, 15 insertions(+), 12 deletions(-) diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index e2c970a9d61c..be52838cc1a8 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -131,7 +131,7 @@ int init_se_kmem_caches(void); void release_se_kmem_caches(void); u32 scsi_get_new_index(scsi_index_t); void transport_subsystem_check_init(void); -void transport_cmd_finish_abort(struct se_cmd *, int); +int transport_cmd_finish_abort(struct se_cmd *, int); unsigned char *transport_dump_cmd_direction(struct se_cmd *); void transport_dump_dev_state(struct se_device *, char *, int *); void transport_dump_dev_info(struct se_device *, struct se_lun *, diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 4f229e711e1c..27dd1e12f246 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -75,7 +75,7 @@ void core_tmr_release_req(struct se_tmr_req *tmr) kfree(tmr); } -static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) +static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) { unsigned long flags; bool remove = true, send_tas; @@ -91,7 +91,7 @@ static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) transport_send_task_abort(cmd); } - transport_cmd_finish_abort(cmd, remove); + return transport_cmd_finish_abort(cmd, remove); } static int target_check_cdb_and_preempt(struct list_head *list, @@ -185,8 +185,8 @@ void core_tmr_abort_task( cancel_work_sync(&se_cmd->work); transport_wait_for_tasks(se_cmd); - transport_cmd_finish_abort(se_cmd, true); - target_put_sess_cmd(se_cmd); + if (!transport_cmd_finish_abort(se_cmd, true)) + target_put_sess_cmd(se_cmd); printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" " ref_tag: %llu\n", ref_tag); @@ -286,8 +286,8 @@ static void core_tmr_drain_tmr_list( cancel_work_sync(&cmd->work); transport_wait_for_tasks(cmd); - transport_cmd_finish_abort(cmd, 1); - target_put_sess_cmd(cmd); + if (!transport_cmd_finish_abort(cmd, 1)) + target_put_sess_cmd(cmd); } } @@ -385,8 +385,8 @@ static void core_tmr_drain_state_list( cancel_work_sync(&cmd->work); transport_wait_for_tasks(cmd); - core_tmr_handle_tas_abort(cmd, tas); - target_put_sess_cmd(cmd); + if (!core_tmr_handle_tas_abort(cmd, tas)) + target_put_sess_cmd(cmd); } } diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 077344cc819f..1f9bfa4195ea 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -673,9 +673,10 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) percpu_ref_put(&lun->lun_ref); } -void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) +int transport_cmd_finish_abort(struct se_cmd *cmd, int remove) { bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); + int ret = 0; if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) transport_lun_remove_cmd(cmd); @@ -687,9 +688,11 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) cmd->se_tfo->aborted_task(cmd); if (transport_cmd_check_stop_to_fabric(cmd)) - return; + return 1; if (remove && ack_kref) - transport_put_cmd(cmd); + ret = transport_put_cmd(cmd); + + return ret; } static void target_complete_failure_work(struct work_struct *work) -- GitLab From 463440e6de4a53e9441c5aadb027d4ab5ea2a3c8 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 3 Jun 2017 05:35:47 -0700 Subject: [PATCH 428/786] iscsi-target: Fix delayed logout processing greater than SECONDS_FOR_LOGOUT_COMP commit 105fa2f44e504c830697b0c794822112d79808dc upstream. This patch fixes a BUG() in iscsit_close_session() that could be triggered when iscsit_logout_post_handler() execution from within tx thread context was not run for more than SECONDS_FOR_LOGOUT_COMP (15 seconds), and the TCP connection didn't already close before then forcing tx thread context to automatically exit. This would manifest itself during explicit logout as: [33206.974254] 1 connection(s) still exist for iSCSI session to iqn.1993-08.org.debian:01:3f5523242179 [33206.980184] INFO: NMI handler (kgdb_nmi_handler) took too long to run: 2100.772 msecs [33209.078643] ------------[ cut here ]------------ [33209.078646] kernel BUG at drivers/target/iscsi/iscsi_target.c:4346! Normally when explicit logout attempt fails, the tx thread context exits and iscsit_close_connection() from rx thread context does the extra cleanup once it detects conn->conn_logout_remove has not been cleared by the logout type specific post handlers. To address this special case, if the logout post handler in tx thread context detects conn->tx_thread_active has already been cleared, simply return and exit in order for existing iscsit_close_connection() logic from rx thread context do failed logout cleanup. Reported-by: Bart Van Assche Tested-by: Bart Van Assche Cc: Mike Christie Cc: Hannes Reinecke Cc: Sagi Grimberg Tested-by: Gary Guo Tested-by: Chu Yuan Lin Signed-off-by: Nicholas Bellinger Signed-off-by: Greg Kroah-Hartman --- drivers/target/iscsi/iscsi_target.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 01ea228358ea..df68a38390e7 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -4431,8 +4431,11 @@ static void iscsit_logout_post_handler_closesession( * always sleep waiting for RX/TX thread shutdown to complete * within iscsit_close_connection(). */ - if (!conn->conn_transport->rdma_shutdown) + if (!conn->conn_transport->rdma_shutdown) { sleep = cmpxchg(&conn->tx_thread_active, true, false); + if (!sleep) + return; + } atomic_set(&conn->conn_logout_remove, 0); complete(&conn->conn_logout_comp); @@ -4448,8 +4451,11 @@ static void iscsit_logout_post_handler_samecid( { int sleep = 1; - if (!conn->conn_transport->rdma_shutdown) + if (!conn->conn_transport->rdma_shutdown) { sleep = cmpxchg(&conn->tx_thread_active, true, false); + if (!sleep) + return; + } atomic_set(&conn->conn_logout_remove, 0); complete(&conn->conn_logout_comp); -- GitLab From 3900f24aa6fac681b2c141a6e359c0cd1fed9676 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Wed, 7 Jun 2017 20:29:50 -0700 Subject: [PATCH 429/786] iscsi-target: Reject immediate data underflow larger than SCSI transfer length commit abb85a9b512e8ca7ad04a5a8a6db9664fe644974 upstream. When iscsi WRITE underflow occurs there are two different scenarios that can happen. Normally in practice, when an EDTL vs. SCSI CDB TRANSFER LENGTH underflow is detected, the iscsi immediate data payload is the smaller SCSI CDB TRANSFER LENGTH. That is, when a host fabric LLD is using a fixed size EDTL for a specific control CDB, the SCSI CDB TRANSFER LENGTH and actual SCSI payload ends up being smaller than EDTL. In iscsi, this means the received iscsi immediate data payload matches the smaller SCSI CDB TRANSFER LENGTH, because there is no more SCSI payload to accept beyond SCSI CDB TRANSFER LENGTH. However, it's possible for a malicous host to send a WRITE underflow where EDTL is larger than SCSI CDB TRANSFER LENGTH, but incoming iscsi immediate data actually matches EDTL. In the wild, we've never had a iscsi host environment actually try to do this. For this special case, it's wrong to truncate part of the control CDB payload and continue to process the command during underflow when immediate data payload received was larger than SCSI CDB TRANSFER LENGTH, so go ahead and reject and drop the bogus payload as a defensive action. Note this potential bug was originally relaxed by the following for allowing WRITE underflow in MSFT FCP host environments: commit c72c5250224d475614a00c1d7e54a67f77cd3410 Author: Roland Dreier Date: Wed Jul 22 15:08:18 2015 -0700 target: allow underflow/overflow for PR OUT etc. commands Cc: Roland Dreier Cc: Mike Christie Cc: Hannes Reinecke Cc: Martin K. Petersen Signed-off-by: Nicholas Bellinger Signed-off-by: Greg Kroah-Hartman --- drivers/target/iscsi/iscsi_target.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index df68a38390e7..155fe0e0623f 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1287,6 +1287,18 @@ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, */ if (dump_payload) goto after_immediate_data; + /* + * Check for underflow case where both EDTL and immediate data payload + * exceeds what is presented by CDB's TRANSFER LENGTH, and what has + * already been set in target_cmd_size_check() as se_cmd->data_length. + * + * For this special case, fail the command and dump the immediate data + * payload. + */ + if (cmd->first_burst_len > cmd->se_cmd.data_length) { + cmd->sense_reason = TCM_INVALID_CDB_FIELD; + goto after_immediate_data; + } immed_ret = iscsit_handle_immediate_data(cmd, hdr, cmd->first_burst_len); -- GitLab From 61ea7c2817bdcdef0f5240b180d1cc073a2c4388 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 19 Jun 2017 12:52:47 -0400 Subject: [PATCH 430/786] drm/radeon: add a PX quirk for another K53TK variant commit 4eb59793cca00b0e629b6d55b5abb5acb82c5868 upstream. Disable PX on these systems. bug: https://bugs.freedesktop.org/show_bug.cgi?id=101491 Signed-off-by: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/radeon/radeon_device.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 621af069a3d2..3b21ca5a6c81 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -136,6 +136,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = { * https://bugzilla.kernel.org/show_bug.cgi?id=51381 */ { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, + /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU + * https://bugs.freedesktop.org/show_bug.cgi?id=101491 + */ + { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX }, /* macbook pro 8.2 */ { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP }, { 0, 0, 0, 0, 0 }, -- GitLab From e4b8d1e8441003ee4e3e68f5cc0296e8e5d5e61f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 19 Jun 2017 15:59:58 -0400 Subject: [PATCH 431/786] drm/radeon: add a quirk for Toshiba Satellite L20-183 commit acfd6ee4fa7ebeee75511825fe02be3f7ac1d668 upstream. Fixes resume from suspend. bug: https://bugzilla.kernel.org/show_bug.cgi?id=196121 Reported-by: Przemek Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/radeon/radeon_combios.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 432480ff9d22..3178ba0c537c 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -3393,6 +3393,13 @@ void radeon_combios_asic_init(struct drm_device *dev) rdev->pdev->subsystem_vendor == 0x103c && rdev->pdev->subsystem_device == 0x280a) return; + /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume + * - it hangs on resume inside the dynclk 1 table. + */ + if (rdev->family == CHIP_RS400 && + rdev->pdev->subsystem_vendor == 0x1179 && + rdev->pdev->subsystem_device == 0xff31) + return; /* DYN CLK 1 */ table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); -- GitLab From 217e035d5120de80a6b2224878e157ebb4a390b8 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 15 Jun 2017 10:55:11 -0400 Subject: [PATCH 432/786] drm/amdgpu/atom: fix ps allocation size for EnableDispPowerGating MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 05b4017b37f1fce4b7185f138126dd8decdb381f upstream. We were using the wrong structure which lead to an overflow on some boards. bug: https://bugs.freedesktop.org/show_bug.cgi?id=101387 Acked-by: Chunming Zhou Acked-by: Christian König Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/atombios_crtc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c index f7d236f95e74..57fbde14e978 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c @@ -164,7 +164,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); - ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; + ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; memset(&args, 0, sizeof(args)); @@ -177,7 +177,7 @@ void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state) void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev) { int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating); - ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args; + ENABLE_DISP_POWER_GATING_PS_ALLOCATION args; memset(&args, 0, sizeof(args)); -- GitLab From 581659a878200a4e2c15548e3df6a7b82e170cb8 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 15 Jun 2017 11:12:28 -0400 Subject: [PATCH 433/786] drm/amdgpu: adjust default display clock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 52b482b0f4fd6d5267faf29fe91398e203f3c230 upstream. Increase the default display clock on newer asics to accomodate some high res modes with really high refresh rates. bug: https://bugs.freedesktop.org/show_bug.cgi?id=93826 Acked-by: Chunming Zhou Acked-by: Christian König Signed-off-by: Alex Deucher Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 8e6bf548d689..f8fdbd1378a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -693,6 +693,10 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n", adev->clock.default_dispclk / 100); adev->clock.default_dispclk = 60000; + } else if (adev->clock.default_dispclk <= 60000) { + DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n", + adev->clock.default_dispclk / 100); + adev->clock.default_dispclk = 62500; } adev->clock.dp_extclk = le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); -- GitLab From f2060387421109ac389dd209355918b566fc6f84 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 15 Jun 2017 00:12:24 +0100 Subject: [PATCH 434/786] rxrpc: Fix several cases where a padded len isn't checked in ticket decode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 5f2f97656ada8d811d3c1bef503ced266fcd53a0 upstream. This fixes CVE-2017-7482. When a kerberos 5 ticket is being decoded so that it can be loaded into an rxrpc-type key, there are several places in which the length of a variable-length field is checked to make sure that it's not going to overrun the available data - but the data is padded to the nearest four-byte boundary and the code doesn't check for this extra. This could lead to the size-remaining variable wrapping and the data pointer going over the end of the buffer. Fix this by making the various variable-length data checks use the padded length. Reported-by: 石磊 Signed-off-by: David Howells Reviewed-by: Marc Dionne Reviewed-by: Dan Carpenter Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/rxrpc/key.c | 64 ++++++++++++++++++++++++++----------------------- 1 file changed, 34 insertions(+), 30 deletions(-) diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c index 18c737a61d80..7fc340726d03 100644 --- a/net/rxrpc/key.c +++ b/net/rxrpc/key.c @@ -217,7 +217,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, unsigned int *_toklen) { const __be32 *xdr = *_xdr; - unsigned int toklen = *_toklen, n_parts, loop, tmp; + unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen; /* there must be at least one name, and at least #names+1 length * words */ @@ -247,16 +247,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, toklen -= 4; if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX) return -EINVAL; - if (tmp > toklen) + paddedlen = (tmp + 3) & ~3; + if (paddedlen > toklen) return -EINVAL; princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL); if (!princ->name_parts[loop]) return -ENOMEM; memcpy(princ->name_parts[loop], xdr, tmp); princ->name_parts[loop][tmp] = 0; - tmp = (tmp + 3) & ~3; - toklen -= tmp; - xdr += tmp >> 2; + toklen -= paddedlen; + xdr += paddedlen >> 2; } if (toklen < 4) @@ -265,16 +265,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, toklen -= 4; if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX) return -EINVAL; - if (tmp > toklen) + paddedlen = (tmp + 3) & ~3; + if (paddedlen > toklen) return -EINVAL; princ->realm = kmalloc(tmp + 1, GFP_KERNEL); if (!princ->realm) return -ENOMEM; memcpy(princ->realm, xdr, tmp); princ->realm[tmp] = 0; - tmp = (tmp + 3) & ~3; - toklen -= tmp; - xdr += tmp >> 2; + toklen -= paddedlen; + xdr += paddedlen >> 2; _debug("%s/...@%s", princ->name_parts[0], princ->realm); @@ -293,7 +293,7 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td, unsigned int *_toklen) { const __be32 *xdr = *_xdr; - unsigned int toklen = *_toklen, len; + unsigned int toklen = *_toklen, len, paddedlen; /* there must be at least one tag and one length word */ if (toklen <= 8) @@ -307,15 +307,17 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td, toklen -= 8; if (len > max_data_size) return -EINVAL; + paddedlen = (len + 3) & ~3; + if (paddedlen > toklen) + return -EINVAL; td->data_len = len; if (len > 0) { td->data = kmemdup(xdr, len, GFP_KERNEL); if (!td->data) return -ENOMEM; - len = (len + 3) & ~3; - toklen -= len; - xdr += len >> 2; + toklen -= paddedlen; + xdr += paddedlen >> 2; } _debug("tag %x len %x", td->tag, td->data_len); @@ -387,7 +389,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, const __be32 **_xdr, unsigned int *_toklen) { const __be32 *xdr = *_xdr; - unsigned int toklen = *_toklen, len; + unsigned int toklen = *_toklen, len, paddedlen; /* there must be at least one length word */ if (toklen <= 4) @@ -399,6 +401,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, toklen -= 4; if (len > AFSTOKEN_K5_TIX_MAX) return -EINVAL; + paddedlen = (len + 3) & ~3; + if (paddedlen > toklen) + return -EINVAL; *_tktlen = len; _debug("ticket len %u", len); @@ -407,9 +412,8 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, *_ticket = kmemdup(xdr, len, GFP_KERNEL); if (!*_ticket) return -ENOMEM; - len = (len + 3) & ~3; - toklen -= len; - xdr += len >> 2; + toklen -= paddedlen; + xdr += paddedlen >> 2; } *_xdr = xdr; @@ -552,7 +556,7 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep) { const __be32 *xdr = prep->data, *token; const char *cp; - unsigned int len, tmp, loop, ntoken, toklen, sec_ix; + unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix; size_t datalen = prep->datalen; int ret; @@ -578,22 +582,21 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep) if (len < 1 || len > AFSTOKEN_CELL_MAX) goto not_xdr; datalen -= 4; - tmp = (len + 3) & ~3; - if (tmp > datalen) + paddedlen = (len + 3) & ~3; + if (paddedlen > datalen) goto not_xdr; cp = (const char *) xdr; for (loop = 0; loop < len; loop++) if (!isprint(cp[loop])) goto not_xdr; - if (len < tmp) - for (; loop < tmp; loop++) - if (cp[loop]) - goto not_xdr; + for (; loop < paddedlen; loop++) + if (cp[loop]) + goto not_xdr; _debug("cellname: [%u/%u] '%*.*s'", - len, tmp, len, len, (const char *) xdr); - datalen -= tmp; - xdr += tmp >> 2; + len, paddedlen, len, len, (const char *) xdr); + datalen -= paddedlen; + xdr += paddedlen >> 2; /* get the token count */ if (datalen < 12) @@ -614,10 +617,11 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep) sec_ix = ntohl(*xdr); datalen -= 4; _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix); - if (toklen < 20 || toklen > datalen) + paddedlen = (toklen + 3) & ~3; + if (toklen < 20 || toklen > datalen || paddedlen > datalen) goto not_xdr; - datalen -= (toklen + 3) & ~3; - xdr += (toklen + 3) >> 2; + datalen -= paddedlen; + xdr += paddedlen >> 2; } while (--loop > 0); -- GitLab From dcd015f733a74552eeefc5d70f55125c33eaa5d2 Mon Sep 17 00:00:00 2001 From: Tobias Wolf Date: Wed, 23 Nov 2016 10:40:07 +0100 Subject: [PATCH 435/786] of: Add check to of_scan_flat_dt() before accessing initial_boot_params commit 3ec754410cb3e931a6c4920b1a150f21a94a2bf4 upstream. An empty __dtb_start to __dtb_end section might result in initial_boot_params being null for arch/mips/ralink. This showed that the boot process hangs indefinitely in of_scan_flat_dt(). Signed-off-by: Tobias Wolf Cc: Sergei Shtylyov Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/14605/ Signed-off-by: Ralf Baechle Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- drivers/of/fdt.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 6a43fd3d0576..502f5547a1f2 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -741,9 +741,12 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node, const char *pathp; int offset, rc = 0, depth = -1; - for (offset = fdt_next_node(blob, -1, &depth); - offset >= 0 && depth >= 0 && !rc; - offset = fdt_next_node(blob, offset, &depth)) { + if (!blob) + return 0; + + for (offset = fdt_next_node(blob, -1, &depth); + offset >= 0 && depth >= 0 && !rc; + offset = fdt_next_node(blob, offset, &depth)) { pathp = fdt_get_name(blob, offset, NULL); if (*pathp == '/') -- GitLab From 5306119473b2dceb63de2e1aa0e6b81de432d039 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=ABl=20Esponde?= Date: Wed, 23 Nov 2016 12:47:40 +0100 Subject: [PATCH 436/786] mtd: spi-nor: fix spansion quad enable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 807c16253319ee6ccf8873ae64f070f7eb532cd5 upstream. With the S25FL127S nor flash part, each writing to the configuration register takes hundreds of ms. During that time, no more accesses to the flash should be done (even reads). This commit adds a wait loop after the register writing until the flash finishes its work. This issue could make rootfs mounting fail when the latter was done too much closely to this quad enable bit setting step. And in this case, a driver as UBIFS may try to recover the filesystem and may broke it completely. Signed-off-by: Joël Esponde Signed-off-by: Cyrille Pitchen Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/spi-nor/spi-nor.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index d0fc165d7d66..21dde5249085 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -1255,6 +1255,13 @@ static int spansion_quad_enable(struct spi_nor *nor) return -EINVAL; } + ret = spi_nor_wait_till_ready(nor); + if (ret) { + dev_err(nor->dev, + "timeout while writing configuration register\n"); + return ret; + } + /* read back and check it */ ret = read_cr(nor); if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) { -- GitLab From 225969acc0f15aa2bc34602889ce040d4b51f2d4 Mon Sep 17 00:00:00 2001 From: William Wu Date: Tue, 25 Apr 2017 17:45:48 +0800 Subject: [PATCH 437/786] usb: gadget: f_fs: avoid out of bounds access on comp_desc commit b7f73850bb4fac1e2209a4dd5e636d39be92f42c upstream. Companion descriptor is only used for SuperSpeed endpoints, if the endpoints are HighSpeed or FullSpeed, the Companion descriptor will not allocated, so we can only access it if gadget is SuperSpeed. I can reproduce this issue on Rockchip platform rk3368 SoC which supports USB 2.0, and use functionfs for ADB. Kernel build with CONFIG_KASAN=y and CONFIG_SLUB_DEBUG=y report the following BUG: ================================================================== BUG: KASAN: slab-out-of-bounds in ffs_func_set_alt+0x224/0x3a0 at addr ffffffc0601f6509 Read of size 1 by task swapper/0/0 ============================================================================ BUG kmalloc-256 (Not tainted): kasan: bad access detected ---------------------------------------------------------------------------- Disabling lock debugging due to kernel taint INFO: Allocated in ffs_func_bind+0x52c/0x99c age=1275 cpu=0 pid=1 alloc_debug_processing+0x128/0x17c ___slab_alloc.constprop.58+0x50c/0x610 __slab_alloc.isra.55.constprop.57+0x24/0x34 __kmalloc+0xe0/0x250 ffs_func_bind+0x52c/0x99c usb_add_function+0xd8/0x1d4 configfs_composite_bind+0x48c/0x570 udc_bind_to_driver+0x6c/0x170 usb_udc_attach_driver+0xa4/0xd0 gadget_dev_desc_UDC_store+0xcc/0x118 configfs_write_file+0x1a0/0x1f8 __vfs_write+0x64/0x174 vfs_write+0xe4/0x200 SyS_write+0x68/0xc8 el0_svc_naked+0x24/0x28 INFO: Freed in inode_doinit_with_dentry+0x3f0/0x7c4 age=1275 cpu=7 pid=247 ... Call trace: [] dump_backtrace+0x0/0x230 [] show_stack+0x14/0x1c [] dump_stack+0xa0/0xc8 [] print_trailer+0x188/0x198 [] object_err+0x3c/0x4c [] kasan_report+0x324/0x4dc [] __asan_load1+0x24/0x50 [] ffs_func_set_alt+0x224/0x3a0 [] composite_setup+0xdcc/0x1ac8 [] android_setup+0x124/0x1a0 [] _setup+0x54/0x74 [] handle_ep0+0x3288/0x4390 [] dwc_otg_pcd_handle_out_ep_intr+0x14dc/0x2ae4 [] dwc_otg_pcd_handle_intr+0x1ec/0x298 [] dwc_otg_pcd_irq+0x10/0x20 [] handle_irq_event_percpu+0x124/0x3ac [] handle_irq_event+0x60/0xa0 [] handle_fasteoi_irq+0x10c/0x1d4 [] generic_handle_irq+0x30/0x40 [] __handle_domain_irq+0xac/0xdc [] gic_handle_irq+0x64/0xa4 ... Memory state around the buggy address: ffffffc0601f6400: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ffffffc0601f6480: 00 00 00 00 00 00 00 00 00 00 06 fc fc fc fc fc >ffffffc0601f6500: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ^ ffffffc0601f6580: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ffffffc0601f6600: fc fc fc fc fc fc fc fc 00 00 00 00 00 00 00 00 ================================================================== Signed-off-by: William Wu Signed-off-by: Felipe Balbi Cc: Jerry Zhang Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/function/f_fs.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 89081b834615..04ffd7640c33 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1858,12 +1858,12 @@ static int ffs_func_eps_enable(struct ffs_function *func) ep->ep->driver_data = ep; ep->ep->desc = ds; - comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds + - USB_DT_ENDPOINT_SIZE); - ep->ep->maxburst = comp_desc->bMaxBurst + 1; - - if (needs_comp_desc) + if (needs_comp_desc) { + comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds + + USB_DT_ENDPOINT_SIZE); + ep->ep->maxburst = comp_desc->bMaxBurst + 1; ep->ep->comp_desc = comp_desc; + } ret = usb_ep_enable(ep->ep); if (likely(!ret)) { -- GitLab From 948c4f17ab7ed9342e3dfdff88bb31266fe72790 Mon Sep 17 00:00:00 2001 From: Stanislaw Gruszka Date: Thu, 2 Feb 2017 10:57:40 +0100 Subject: [PATCH 438/786] rt2x00: avoid introducing a USB dependency in the rt2x00lib module commit 6232c17438ed01f43665197db5a98a4a4f77ef47 upstream. As reported by Felix: Though protected by an ifdef, introducing an usb symbol dependency in the rt2x00lib module is a major inconvenience for distributions that package kernel modules split into individual packages. Get rid of this unnecessary dependency by calling the usb related function from a more suitable place. Cc: Vishal Thanki Reported-by: Felix Fietkau Fixes: 8b4c0009313f ("rt2x00usb: Use usb anchor to manage URB") Signed-off-by: Stanislaw Gruszka Signed-off-by: Kalle Valo Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- .../net/wireless/ralink/rt2x00/rt2x00dev.c | 23 +++++++------------ .../net/wireless/ralink/rt2x00/rt2x00usb.c | 5 ++++ 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index b7273be9303d..c8d9075339cf 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -1422,21 +1422,6 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) cancel_work_sync(&rt2x00dev->intf_work); cancel_delayed_work_sync(&rt2x00dev->autowakeup_work); cancel_work_sync(&rt2x00dev->sleep_work); -#if IS_ENABLED(CONFIG_RT2X00_LIB_USB) - if (rt2x00_is_usb(rt2x00dev)) { - usb_kill_anchored_urbs(rt2x00dev->anchor); - hrtimer_cancel(&rt2x00dev->txstatus_timer); - cancel_work_sync(&rt2x00dev->rxdone_work); - cancel_work_sync(&rt2x00dev->txdone_work); - } -#endif - if (rt2x00dev->workqueue) - destroy_workqueue(rt2x00dev->workqueue); - - /* - * Free the tx status fifo. - */ - kfifo_free(&rt2x00dev->txstatus_fifo); /* * Kill the tx status tasklet. @@ -1452,6 +1437,14 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) */ rt2x00lib_uninitialize(rt2x00dev); + if (rt2x00dev->workqueue) + destroy_workqueue(rt2x00dev->workqueue); + + /* + * Free the tx status fifo. + */ + kfifo_free(&rt2x00dev->txstatus_fifo); + /* * Free extra components */ diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c index 662705e31136..631df690adbe 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c @@ -740,6 +740,11 @@ void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; + usb_kill_anchored_urbs(rt2x00dev->anchor); + hrtimer_cancel(&rt2x00dev->txstatus_timer); + cancel_work_sync(&rt2x00dev->rxdone_work); + cancel_work_sync(&rt2x00dev->txdone_work); + queue_for_each(rt2x00dev, queue) rt2x00usb_free_entries(queue); } -- GitLab From 81135c71bde3346a39cec07b4a0b5de1f5fabdf1 Mon Sep 17 00:00:00 2001 From: Yendapally Reddy Dhananjaya Reddy Date: Wed, 8 Feb 2017 17:14:26 -0500 Subject: [PATCH 439/786] net: phy: Initialize mdio clock at probe function commit bb1a619735b4660f21bce3e728b937640024b4ad upstream. USB PHYs need the MDIO clock divisor enabled earlier to work. Initialize mdio clock divisor in probe function. The ext bus bit available in the same register will be used by mdio mux to enable external mdio. Signed-off-by: Yendapally Reddy Dhananjaya Reddy Fixes: ddc24ae1 ("net: phy: Broadcom iProc MDIO bus driver") Reviewed-by: Florian Fainelli Signed-off-by: Jon Mason Signed-off-by: David S. Miller Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- drivers/net/phy/mdio-bcm-iproc.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c index c0b4e65267af..46fe1ae919a3 100644 --- a/drivers/net/phy/mdio-bcm-iproc.c +++ b/drivers/net/phy/mdio-bcm-iproc.c @@ -81,8 +81,6 @@ static int iproc_mdio_read(struct mii_bus *bus, int phy_id, int reg) if (rc) return rc; - iproc_mdio_config_clk(priv->base); - /* Prepare the read operation */ cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) | (reg << MII_DATA_RA_SHIFT) | @@ -112,8 +110,6 @@ static int iproc_mdio_write(struct mii_bus *bus, int phy_id, if (rc) return rc; - iproc_mdio_config_clk(priv->base); - /* Prepare the write operation */ cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) | (reg << MII_DATA_RA_SHIFT) | @@ -163,6 +159,8 @@ static int iproc_mdio_probe(struct platform_device *pdev) bus->read = iproc_mdio_read; bus->write = iproc_mdio_write; + iproc_mdio_config_clk(priv->base); + rc = of_mdiobus_register(bus, pdev->dev.of_node); if (rc) { dev_err(&pdev->dev, "MDIO bus registration failed\n"); -- GitLab From 25c7794ed046e9190200983e330f221170cb2b0a Mon Sep 17 00:00:00 2001 From: Matthias Reichl Date: Mon, 20 Feb 2017 20:01:16 +0100 Subject: [PATCH 440/786] dmaengine: bcm2835: Fix cyclic DMA period splitting commit 2201ac6129fa162ac24da089a034bb0971648ebb upstream. The code responsible for splitting periods into chunks that can be handled by the DMA controller missed to update total_len, the number of bytes processed in the current period, when there are more chunks to follow. Therefore total_len was stuck at 0 and the code didn't work at all. This resulted in a wrong control block layout and audio issues because the cyclic DMA callback wasn't executing on period boundaries. Fix this by adding the missing total_len update. Signed-off-by: Matthias Reichl Signed-off-by: Martin Sperl Tested-by: Clive Messer Reviewed-by: Eric Anholt Signed-off-by: Vinod Koul Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- drivers/dma/bcm2835-dma.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index e18dc596cf24..6204cc32d09c 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -251,8 +251,11 @@ static void bcm2835_dma_create_cb_set_length( */ /* have we filled in period_length yet? */ - if (*total_len + control_block->length < period_len) + if (*total_len + control_block->length < period_len) { + /* update number of bytes in this period so far */ + *total_len += control_block->length; return; + } /* calculate the length that remains to reach period_length */ control_block->length = period_len - *total_len; -- GitLab From 5da6415e427beaca48ae4f0d7385e2bcdba5b308 Mon Sep 17 00:00:00 2001 From: Hauke Mehrtens Date: Mon, 17 Apr 2017 01:38:05 +0200 Subject: [PATCH 441/786] spi: double time out tolerance commit 833bfade96561216aa2129516a5926a0326860a2 upstream. The generic SPI code calculates how long the issued transfer would take and adds 100ms in addition to the timeout as tolerance. On my 500 MHz Lantiq Mips SoC I am getting timeouts from the SPI like this when the system boots up: m25p80 spi32766.4: SPI transfer timed out blk_update_request: I/O error, dev mtdblock3, sector 2 SQUASHFS error: squashfs_read_data failed to read block 0x6e After increasing the tolerance for the timeout to 200ms I haven't seen these SPI transfer time outs any more. The Lantiq SPI driver in use here has an extra work queue in between, which gets triggered when the controller send the last word and the hardware FIFOs used for reading and writing are only 8 words long. Signed-off-by: Hauke Mehrtens Signed-off-by: Mark Brown Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- drivers/spi/spi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 838783c3fed0..24d4492d0168 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1004,7 +1004,7 @@ static int spi_transfer_one_message(struct spi_master *master, ret = 0; ms = 8LL * 1000LL * xfer->len; do_div(ms, xfer->speed_hz); - ms += ms + 100; /* some tolerance */ + ms += ms + 200; /* some tolerance */ if (ms > UINT_MAX) ms = UINT_MAX; -- GitLab From ffa96c1a6afe251d667ae9a9d83f1ef87c3b029e Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 30 May 2017 16:21:51 +0100 Subject: [PATCH 442/786] net: phy: fix marvell phy status reading commit 898805e0cdf7fd860ec21bf661d3a0285a3defbd upstream. The Marvell driver incorrectly provides phydev->lp_advertising as the logical and of the link partner's advert and our advert. This is incorrect - this field is supposed to store the link parter's unmodified advertisment. This allows ethtool to report the correct link partner auto-negotiation status. Fixes: be937f1f89ca ("Marvell PHY m88e1111 driver fix") Signed-off-by: Russell King Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- drivers/net/phy/marvell.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index d6a541bde331..2f70f80de27f 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1114,8 +1114,6 @@ static int marvell_read_status_page(struct phy_device *phydev, int page) if (adv < 0) return adv; - lpa &= adv; - if (status & MII_M1011_PHY_STATUS_FULLDUPLEX) phydev->duplex = DUPLEX_FULL; else -- GitLab From 21eaaa76b701df5becdc6163cb88eeb7915ba629 Mon Sep 17 00:00:00 2001 From: Gleb Fotengauer-Malinovskiy Date: Mon, 20 Mar 2017 20:15:53 +0300 Subject: [PATCH 443/786] jump label: fix passing kbuild_cflags when checking for asm goto support commit 7292ae3d5a18fb922be496e6bb687647193569b4 upstream. The latest change of asm goto support check added passing of KBUILD_CFLAGS to compiler. When these flags reference gcc plugins that are not built yet, the check fails. When one runs "make bzImage" followed by "make modules", the kernel is always built with HAVE_JUMP_LABEL disabled, while the modules are built depending on CONFIG_JUMP_LABEL. If HAVE_JUMP_LABEL macro happens to be different, modules are built with undefined references, e.g.: ERROR: "static_key_slow_inc" [net/netfilter/xt_TEE.ko] undefined! ERROR: "static_key_slow_dec" [net/netfilter/xt_TEE.ko] undefined! ERROR: "static_key_slow_dec" [net/netfilter/nft_meta.ko] undefined! ERROR: "static_key_slow_inc" [net/netfilter/nft_meta.ko] undefined! ERROR: "nf_hooks_needed" [net/netfilter/ipvs/ip_vs.ko] undefined! ERROR: "nf_hooks_needed" [net/ipv6/ipv6.ko] undefined! ERROR: "static_key_count" [net/ipv6/ipv6.ko] undefined! ERROR: "static_key_slow_inc" [net/ipv6/ipv6.ko] undefined! This change moves the check before all these references are added to KBUILD_CFLAGS. This is correct because subsequent KBUILD_CFLAGS modifications are not relevant to this check. Reported-by: Anton V. Boyarshinov Fixes: 35f860f9ba6a ("jump label: pass kbuild_cflags when checking for asm goto support") Signed-off-by: Gleb Fotengauer-Malinovskiy Signed-off-by: Dmitry V. Levin Acked-by: Steven Rostedt (VMware) Acked-by: David Lin Signed-off-by: Masahiro Yamada Signed-off-by: Greg Kroah-Hartman --- Makefile | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index a40b373eba3a..ef02260cc2ba 100644 --- a/Makefile +++ b/Makefile @@ -651,6 +651,12 @@ KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0409, \ # Tell gcc to never replace conditional load with a non-conditional one KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0) +# check for 'asm goto' +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) + KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO + KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO +endif + include scripts/Makefile.gcc-plugins ifdef CONFIG_READABLE_ASM @@ -796,12 +802,6 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types) # use the deterministic mode of AR if available KBUILD_ARFLAGS := $(call ar-option,D) -# check for 'asm goto' -ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y) - KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO - KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO -endif - include scripts/Makefile.kasan include scripts/Makefile.extrawarn include scripts/Makefile.ubsan -- GitLab From 855b08e57777f54a7f26563a3061a385f6d04ebc Mon Sep 17 00:00:00 2001 From: Arend Van Spriel Date: Fri, 16 Jun 2017 09:36:35 +0100 Subject: [PATCH 444/786] brcmfmac: fix uninitialized warning in brcmf_usb_probe_phase2() commit 35abcd4f9f303ac4f10f99b3f7e993e5f2e6fa37 upstream. This fixes the following warning: drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c: In function 'brcmf_usb_probe_phase2': drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c:1198:2: warning: 'devinfo' may be used uninitialized in this function [-Wmaybe-uninitialized] mutex_unlock(&devinfo->dev_init_lock); Fixes: 6d0507a777fb ("brcmfmac: add parameter to pass error code in firmware callback") Cc: Stephen Rothwell Reported-by: Kalle Valo Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index 52b9fc11199b..053f3b59f21e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -1163,14 +1163,13 @@ static void brcmf_usb_probe_phase2(struct device *dev, int ret, void *nvram, u32 nvlen) { struct brcmf_bus *bus = dev_get_drvdata(dev); - struct brcmf_usbdev_info *devinfo; + struct brcmf_usbdev_info *devinfo = bus->bus_priv.usb->devinfo; if (ret) goto error; brcmf_dbg(USB, "Start fw downloading\n"); - devinfo = bus->bus_priv.usb->devinfo; ret = check_file(fw->data); if (ret < 0) { brcmf_err("invalid firmware\n"); -- GitLab From 92905e331aea278665c4b27fbb13c4547a8bcbb0 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 29 Jun 2017 13:00:49 +0200 Subject: [PATCH 445/786] Linux 4.9.35 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index ef02260cc2ba..0a8d47465f97 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 9 -SUBLEVEL = 34 +SUBLEVEL = 35 EXTRAVERSION = NAME = Roaring Lionus -- GitLab From c652ccf16675a64da6f3f3179e15c8e8bca42b30 Mon Sep 17 00:00:00 2001 From: Clarence Ip Date: Wed, 28 Jun 2017 18:21:18 -0400 Subject: [PATCH 446/786] drm/msm/sde: disable sspp multirect during plane disable Update the sspp programming for multirect during disable updates for virtual planes. This prevents an issue with the master plane being left in multirect mode if the virtual plane is removed without an atomic update to the master plane. CRs-Fixed: 2067183 Change-Id: Iadde83cfdec19a7246e38a7eb30a6031e5ba5ddb Signed-off-by: Clarence Ip --- drivers/gpu/drm/msm/sde/sde_plane.c | 39 +++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c index 2a98af45d2f6..d00e720457d1 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.c +++ b/drivers/gpu/drm/msm/sde/sde_plane.c @@ -3453,13 +3453,12 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane, return 0; } -static void sde_plane_atomic_update(struct drm_plane *plane, +static void _sde_plane_atomic_disable(struct drm_plane *plane, struct drm_plane_state *old_state) { struct sde_plane *psde; struct drm_plane_state *state; struct sde_plane_state *pstate; - struct sde_plane_state *old_pstate; if (!plane) { SDE_ERROR("invalid plane\n"); @@ -3467,20 +3466,50 @@ static void sde_plane_atomic_update(struct drm_plane *plane, } else if (!plane->state) { SDE_ERROR("invalid plane state\n"); return; + } else if (!old_state) { + SDE_ERROR("invalid old state\n"); + return; } psde = to_sde_plane(plane); - psde->is_error = false; state = plane->state; pstate = to_sde_plane_state(state); - old_pstate = to_sde_plane_state(old_state); + + SDE_EVT32(DRMID(plane), is_sde_plane_virtual(plane), + pstate->multirect_mode); + + pstate->pending = true; + + if (is_sde_plane_virtual(plane) && + psde->pipe_hw && psde->pipe_hw->ops.setup_multirect) + psde->pipe_hw->ops.setup_multirect(psde->pipe_hw, + SDE_SSPP_RECT_SOLO, SDE_SSPP_MULTIRECT_NONE); +} + +static void sde_plane_atomic_update(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct sde_plane *psde; + struct drm_plane_state *state; + + if (!plane) { + SDE_ERROR("invalid plane\n"); + return; + } else if (!plane->state) { + SDE_ERROR("invalid plane state\n"); + return; + } + + psde = to_sde_plane(plane); + psde->is_error = false; + state = plane->state; SDE_DEBUG_PLANE(psde, "\n"); sde_plane_rot_atomic_update(plane, old_state); if (!sde_plane_sspp_enabled(state)) { - pstate->pending = true; + _sde_plane_atomic_disable(plane, old_state); } else { int ret; -- GitLab From 74ff856e8d6e0d320bfd075491ea4453c6f4d7f3 Mon Sep 17 00:00:00 2001 From: Arun Kumar Neelakantam Date: Fri, 26 May 2017 17:57:52 +0530 Subject: [PATCH 447/786] net: ipc_router: Add dynamic enable/disable wakeup source feature By default IPC Router core uses edge and port specific wakeup sources which are blocking system suspend in special use cases like streaming non wakeup sensors data at high speed. Add dynamic wakeup source enable/disable functionality to acquire the wakeup source only during the APPS suspend stage. CRs-Fixed: 2057391 Change-Id: I2a5ea4e2c31432a9dd195c702ef7001b26eed33d Signed-off-by: Arun Kumar Neelakantam --- .../arm/msm/msm_ipc_router_glink_xprt.txt | 2 ++ drivers/soc/qcom/ipc_router_glink_xprt.c | 21 +++++++++++++- include/linux/ipc_router.h | 12 +++++++- include/linux/ipc_router_xprt.h | 6 +++- net/ipc_router/ipc_router_core.c | 28 +++++++++++++++++-- 5 files changed, 63 insertions(+), 6 deletions(-) diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_glink_xprt.txt b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_glink_xprt.txt index 9e1d230432cf..c5d052cd6039 100644 --- a/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_glink_xprt.txt +++ b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_glink_xprt.txt @@ -17,6 +17,8 @@ Optional properties: by pil. Absence of this property indicates that subsystem loading through pil voting is disabled for that subsystem. +-qcom,dynamic-wakeup-source: Boolean property to indicate that G-Link + transport supports dynamic wakeup source Example: qcom,ipc_router_modem_xprt { diff --git a/drivers/soc/qcom/ipc_router_glink_xprt.c b/drivers/soc/qcom/ipc_router_glink_xprt.c index 458e39d53fa1..cef3c7716e8f 100644 --- a/drivers/soc/qcom/ipc_router_glink_xprt.c +++ b/drivers/soc/qcom/ipc_router_glink_xprt.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -70,6 +70,7 @@ if (ipc_router_glink_xprt_debug_mask) \ * @xprt_version: IPC Router header version supported by this XPRT. * @xprt_option: XPRT specific options to be handled by IPC Router. * @disable_pil_loading: Disable PIL Loading of the subsystem. + * @dynamic_wakeup_source: Dynamic wakeup source for this subsystem. */ struct ipc_router_glink_xprt { struct list_head list; @@ -91,6 +92,7 @@ struct ipc_router_glink_xprt { uint32_t cur_lo_intents_cnt; uint32_t cur_md_intents_cnt; uint32_t cur_hi_intents_cnt; + bool dynamic_wakeup_source; }; struct ipc_router_glink_xprt_work { @@ -127,6 +129,7 @@ static void glink_xprt_close_event(struct work_struct *work); * @link_id: Network Cluster ID to which this XPRT belongs to. * @xprt_version: IPC Router header version supported by this XPRT. * @disable_pil_loading:Disable PIL Loading of the subsystem. + * @dynamic_wakeup_source: Dynamic wakeup source for this subsystem. */ struct ipc_router_glink_xprt_config { char ch_name[GLINK_NAME_SIZE]; @@ -138,6 +141,7 @@ struct ipc_router_glink_xprt_config { unsigned int xprt_version; unsigned int xprt_option; bool disable_pil_loading; + bool dynamic_wakeup_source; }; #define MODULE_NAME "ipc_router_glink_xprt" @@ -294,6 +298,14 @@ static void glink_xprt_sft_close_done(struct msm_ipc_router_xprt *xprt) complete_all(&glink_xprtp->sft_close_complete); } +static bool ipc_router_glink_xprt_get_ws_info(struct msm_ipc_router_xprt *xprt) +{ + struct ipc_router_glink_xprt *glink_xprtp = + container_of(xprt, struct ipc_router_glink_xprt, xprt); + + return glink_xprtp->dynamic_wakeup_source; +} + static struct rr_packet *glink_xprt_copy_data(struct read_work *rx_work) { void *buf, *pbuf, *dest_buf; @@ -706,6 +718,8 @@ static int ipc_router_glink_config_init( glink_xprtp->xprt_option = glink_xprt_config->xprt_option; glink_xprtp->disable_pil_loading = glink_xprt_config->disable_pil_loading; + glink_xprtp->dynamic_wakeup_source = + glink_xprt_config->dynamic_wakeup_source; if (!glink_xprtp->disable_pil_loading) strlcpy(glink_xprtp->pil_edge, glink_xprt_config->pil_edge, @@ -728,6 +742,7 @@ static int ipc_router_glink_config_init( glink_xprtp->xprt.write = ipc_router_glink_xprt_write; glink_xprtp->xprt.close = ipc_router_glink_xprt_close; glink_xprtp->xprt.sft_close_done = glink_xprt_sft_close_done; + glink_xprtp->xprt.get_ws_info = ipc_router_glink_xprt_get_ws_info; glink_xprtp->xprt.priv = NULL; init_rwsem(&glink_xprtp->ss_reset_rwlock); @@ -822,6 +837,10 @@ static int parse_devicetree(struct device_node *node, scnprintf(glink_xprt_config->ipc_rtr_xprt_name, IPC_RTR_XPRT_NAME_LEN, "%s_%s", edge, ch_name); + key = "qcom,dynamic-wakeup-source"; + glink_xprt_config->dynamic_wakeup_source = + of_property_read_bool(node, key); + return 0; error: diff --git a/include/linux/ipc_router.h b/include/linux/ipc_router.h index 8adf72388897..767551e71ad7 100644 --- a/include/linux/ipc_router.h +++ b/include/linux/ipc_router.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -269,6 +269,14 @@ int register_ipcrtr_af_init_notifier(struct notifier_block *nb); */ int unregister_ipcrtr_af_init_notifier(struct notifier_block *nb); +/** + * msm_ipc_router_set_ws_allowed() - To Enable/disable the wakeup source allowed + * flag + * @flag: Flag to set/clear the wakeup soruce allowed + * + */ +void msm_ipc_router_set_ws_allowed(bool flag); + #else struct msm_ipc_port *msm_ipc_router_create_port( @@ -341,6 +349,8 @@ int unregister_ipcrtr_af_init_notifier(struct notifier_block *nb) return -ENODEV; } +void msm_ipc_router_set_ws_allowed(bool flag) { } + #endif #endif diff --git a/include/linux/ipc_router_xprt.h b/include/linux/ipc_router_xprt.h index e33a10a6cbd4..c4b78540ff3d 100644 --- a/include/linux/ipc_router_xprt.h +++ b/include/linux/ipc_router_xprt.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -100,6 +100,7 @@ struct rr_opt_hdr { * @pkt_fragment_q: Queue of SKBs containing payload. * @length: Length of data in the chain of SKBs * @ref: Reference count for the packet. + * @ws_need: Flag to check wakeup soruce need */ struct rr_packet { struct list_head list; @@ -108,6 +109,7 @@ struct rr_packet { struct sk_buff_head *pkt_fragment_q; uint32_t length; struct kref ref; + bool ws_need; }; /** @@ -125,6 +127,7 @@ struct rr_packet { * @close: Method to close the XPRT. * @sft_close_done: Method to indicate to the XPRT that handling of reset * event is complete. + * @get_ws_info: Method to get the wakeup soruce inforamtion of the XPRT */ struct msm_ipc_router_xprt { char *name; @@ -143,6 +146,7 @@ struct msm_ipc_router_xprt { struct msm_ipc_router_xprt *xprt); int (*close)(struct msm_ipc_router_xprt *xprt); void (*sft_close_done)(struct msm_ipc_router_xprt *xprt); + bool (*get_ws_info)(struct msm_ipc_router_xprt *xprt); }; void msm_ipc_router_xprt_notify(struct msm_ipc_router_xprt *xprt, diff --git a/net/ipc_router/ipc_router_core.c b/net/ipc_router/ipc_router_core.c index e0578870a1ad..7c8af29fac13 100644 --- a/net/ipc_router/ipc_router_core.c +++ b/net/ipc_router/ipc_router_core.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -148,6 +148,7 @@ struct msm_ipc_router_xprt_info { void *log_ctx; struct kref ref; struct completion ref_complete; + bool dynamic_ws; }; #define RT_HASH_SIZE 4 @@ -215,6 +216,13 @@ enum { UP, }; +static bool is_wakeup_source_allowed; + +void msm_ipc_router_set_ws_allowed(bool flag) +{ + is_wakeup_source_allowed = flag; +} + static void init_routing_table(void) { int i; @@ -580,6 +588,7 @@ struct rr_packet *clone_pkt(struct rr_packet *pkt) } cloned_pkt->pkt_fragment_q = pkt_fragment_q; cloned_pkt->length = pkt->length; + cloned_pkt->ws_need = pkt->ws_need; return cloned_pkt; fail_clone: @@ -1162,7 +1171,8 @@ static int post_pkt_to_port(struct msm_ipc_port *port_ptr, } mutex_lock(&port_ptr->port_rx_q_lock_lhc3); - __pm_stay_awake(port_ptr->port_rx_ws); + if (pkt->ws_need) + __pm_stay_awake(port_ptr->port_rx_ws); list_add_tail(&temp_pkt->list, &port_ptr->port_rx_q); wake_up(&port_ptr->port_rx_wait_q); notify = port_ptr->notify; @@ -4043,6 +4053,9 @@ static int msm_ipc_router_add_xprt(struct msm_ipc_router_xprt *xprt) INIT_LIST_HEAD(&xprt_info->list); kref_init(&xprt_info->ref); init_completion(&xprt_info->ref_complete); + xprt_info->dynamic_ws = 0; + if (xprt->get_ws_info) + xprt_info->dynamic_ws = xprt->get_ws_info(xprt); xprt_info->workqueue = create_singlethread_workqueue(xprt->name); if (!xprt_info->workqueue) { @@ -4193,9 +4206,18 @@ void msm_ipc_router_xprt_notify(struct msm_ipc_router_xprt *xprt, if (!pkt) return; + pkt->ws_need = false; mutex_lock(&xprt_info->rx_lock_lhb2); list_add_tail(&pkt->list, &xprt_info->pkt_list); - __pm_stay_awake(&xprt_info->ws); + if (!xprt_info->dynamic_ws) { + __pm_stay_awake(&xprt_info->ws); + pkt->ws_need = true; + } else { + if (is_wakeup_source_allowed) { + __pm_stay_awake(&xprt_info->ws); + pkt->ws_need = true; + } + } mutex_unlock(&xprt_info->rx_lock_lhb2); queue_work(xprt_info->workqueue, &xprt_info->read_data); } -- GitLab From 6947b8b591ab7476e24942bf481a7708e83cacc4 Mon Sep 17 00:00:00 2001 From: Arun Kumar Neelakantam Date: Thu, 29 Jun 2017 21:39:22 +0530 Subject: [PATCH 448/786] ARM: dts: msm: Add new property to DSPS IPC router node for SDM845 Add the dynamic wakeup source property to IPC router dsps xprt. CRs-Fixed: 2057391 Change-Id: Ia4eac4490b0cb46251dc86af63db75bcc78f868d Signed-off-by: Arun Kumar Neelakantam --- arch/arm64/boot/dts/qcom/sdm845.dtsi | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index 2f718bb83cf2..2180a26fdf8e 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -2471,6 +2471,7 @@ qcom,xprt-linkid = <1>; qcom,xprt-version = <1>; qcom,fragmented-data; + qcom,dynamic-wakeup-source; }; qcom,ipc_router_cdsp_xprt { -- GitLab From f5312af82cf8e16ca1cb45665db59de355d51ca3 Mon Sep 17 00:00:00 2001 From: Karthikeyan Mani Date: Wed, 28 Jun 2017 20:32:44 -0700 Subject: [PATCH 449/786] defconfig: msm: Add loadable wcd9xxx core driver config Add config entry for the loadable module wcd9xxx core driver, codec core is selected from this loadable driver config. CRs-fixed: 2068879 Change-Id: I329efee478f083a08838d6871476da58eabb451c Signed-off-by: Karthikeyan Mani --- arch/arm64/configs/sdm845-perf_defconfig | 1 + arch/arm64/configs/sdm845_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig index ec546ec6b4fc..e27d3d5a5b52 100644 --- a/arch/arm64/configs/sdm845-perf_defconfig +++ b/arch/arm64/configs/sdm845-perf_defconfig @@ -331,6 +331,7 @@ CONFIG_QTI_THERMAL_LIMITS_DCVS=y CONFIG_QTI_VIRTUAL_SENSOR=y CONFIG_MFD_I2C_PMIC=y CONFIG_MFD_SPMI_PMIC=y +CONFIG_WCD9XXX_CODEC_CORE=y CONFIG_WCD934X_CODEC=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_CPRH_KBSS=y diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig index 5c97db25266b..31acac3e9699 100644 --- a/arch/arm64/configs/sdm845_defconfig +++ b/arch/arm64/configs/sdm845_defconfig @@ -338,6 +338,7 @@ CONFIG_QTI_THERMAL_LIMITS_DCVS=y CONFIG_QTI_VIRTUAL_SENSOR=y CONFIG_MFD_I2C_PMIC=y CONFIG_MFD_SPMI_PMIC=y +CONFIG_WCD9XXX_CODEC_CORE=y CONFIG_WCD934X_CODEC=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_CPRH_KBSS=y -- GitLab From 38e9b250ad808645616de017d1914378d00398c3 Mon Sep 17 00:00:00 2001 From: Mayank Rana Date: Thu, 23 Mar 2017 12:35:57 -0700 Subject: [PATCH 450/786] usb: pd: Start USB host mode functionality with SRC_STARTUP USB super speed host mode functionality and DP can be supported concurrently. Hence start USB host functionality into super speed mode on entering to SRC_STARTUP state. This change removes delayed USB host mode functionality based on status of non-PD capable device OR PD non-DP capable device. Change-Id: Iffebe0077222872039d5547ea8cdbef057a75ca5 Signed-off-by: Mayank Rana --- drivers/usb/pd/policy_engine.c | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c index 5643613f5e24..2682d297dc78 100644 --- a/drivers/usb/pd/policy_engine.c +++ b/drivers/usb/pd/policy_engine.c @@ -763,10 +763,7 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state) case PE_SRC_STARTUP: if (pd->current_dr == DR_NONE) { pd->current_dr = DR_DFP; - /* - * Defer starting USB host mode until PE_SRC_READY or - * when PE_SRC_SEND_CAPABILITIES fails - */ + start_usb_host(pd, true); } dual_role_instance_changed(pd->dual_role); @@ -1302,14 +1299,6 @@ static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg) if (svid == 0xFF01) has_dp = true; } - - /* - * Finally start USB host now that we have determined - * if DisplayPort mode is present or not and limit USB - * to HS-only mode if so. - */ - start_usb_host(pd, !has_dp); - break; default: @@ -1326,7 +1315,6 @@ static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg) switch (cmd) { case USBPD_SVDM_DISCOVER_IDENTITY: case USBPD_SVDM_DISCOVER_SVIDS: - start_usb_host(pd, true); break; default: break; @@ -1723,11 +1711,7 @@ static void usbpd_sm(struct work_struct *w) ARRAY_SIZE(default_src_caps), SOP_MSG); if (ret) { pd->caps_count++; - - if (pd->caps_count == 10 && pd->current_dr == DR_DFP) { - /* Likely not PD-capable, start host now */ - start_usb_host(pd, true); - } else if (pd->caps_count >= PD_CAPS_COUNT) { + if (pd->caps_count >= PD_CAPS_COUNT) { usbpd_dbg(&pd->dev, "Src CapsCounter exceeded, disabling PD\n"); usbpd_set_state(pd, PE_SRC_DISABLED); -- GitLab From 858132b0c3f385b2579be1a1f6ff52cf5c95b3eb Mon Sep 17 00:00:00 2001 From: Karthikeyan Mani Date: Tue, 27 Jun 2017 20:03:08 -0700 Subject: [PATCH 451/786] ASoC: wcd9xxx: Add support to build codecs as module Add support to build, load and unload wcd9335 and wcd934x codec modules separately from the kernel. CRs-fixed: 2068879 Change-Id: I15f0b495691b7708894b512efedf677bf0f7d871 Signed-off-by: Karthikeyan Mani --- include/sound/wcd-dsp-mgr.h | 5 +- sound/soc/codecs/Kconfig | 11 +- sound/soc/codecs/Makefile | 23 +-- sound/soc/codecs/audio-ext-clk-up.c | 7 +- sound/soc/codecs/audio-ext-clk-up.h | 20 +++ sound/soc/codecs/audio-ext-clk.c | 7 +- sound/soc/codecs/wcd-dsp-mgr.c | 11 +- sound/soc/codecs/wcd-mbhc-adc.h | 2 +- sound/soc/codecs/wcd-mbhc-v2-api.h | 2 +- sound/soc/codecs/wcd9335.c | 203 +----------------------- sound/soc/codecs/wcd9335.h | 38 ----- sound/soc/codecs/wcd934x/Makefile | 7 +- sound/soc/codecs/wcd934x/wcd934x-dsd.h | 4 +- sound/soc/codecs/wcd934x/wcd934x-mbhc.c | 1 + sound/soc/codecs/wcd934x/wcd934x-mbhc.h | 2 +- sound/soc/codecs/wcd934x/wcd934x.c | 166 +------------------ sound/soc/codecs/wcd934x/wcd934x.h | 39 ----- sound/soc/codecs/wcd9xxx-common-v2.c | 1 + sound/soc/codecs/wcd9xxx-resmgr-v2.c | 15 +- sound/soc/codecs/wcd9xxx-soc-init.c | 45 ++++++ sound/soc/codecs/wcd_cpe_core.c | 1 + sound/soc/msm/Kconfig | 8 +- 22 files changed, 129 insertions(+), 489 deletions(-) create mode 100644 sound/soc/codecs/audio-ext-clk-up.h create mode 100644 sound/soc/codecs/wcd9xxx-soc-init.c diff --git a/include/sound/wcd-dsp-mgr.h b/include/sound/wcd-dsp-mgr.h index 2beb9b38a46a..8b6b446b6187 100644 --- a/include/sound/wcd-dsp-mgr.h +++ b/include/sound/wcd-dsp-mgr.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -15,6 +15,7 @@ #define __WCD_DSP_MGR_H__ #include +#include /* * These enums correspond to the component types @@ -133,4 +134,6 @@ struct wdsp_mgr_ops { int (*resume)(struct device *wdsp_dev); }; +int wcd_dsp_mgr_init(void); +void wcd_dsp_mgr_exit(void); #endif /* end of __WCD_DSP_MGR_H__ */ diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 08fb16887cb2..e7475fb1901e 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -880,19 +880,20 @@ config SND_SOC_WCD934X_DSD config SND_SOC_WCD9335 tristate - depends on WCD9335_CODEC + select SND_SOC_WCD9XXX select SND_SOC_WCD_MBHC select SND_SOC_WCD_MBHC_LEGACY + select SND_SOC_WCD_CPE config SND_SOC_WCD934X tristate - depends on WCD934X_CODEC - select SND_SOC_WCD9XXX_V2 + select SND_SOC_WCD9XXX select AUDIO_EXT_CLK select SND_SOC_WCD_DSP_MGR select SND_SOC_WCD_SPI select SND_SOC_WCD934X_MBHC select SND_SOC_WCD934X_DSD + select WCD_DSP_GLINK config SND_SOC_WCD934X_MBHC tristate @@ -913,9 +914,9 @@ config SND_SOC_WSA881X_ANALOG tristate select REGMAP_I2C -config SND_SOC_WCD9XXX_V2 +config SND_SOC_WCD9XXX tristate - default y if SND_SOC_WCD9335=y + default y if SND_SOC_WCD9335=y || SND_SOC_WCD934X=y config SND_SOC_WCD_CPE tristate diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile index 96ebd536fc35..d5e4ab25533d 100644 --- a/sound/soc/codecs/Makefile +++ b/sound/soc/codecs/Makefile @@ -160,25 +160,25 @@ snd-soc-uda134x-objs := uda134x.o snd-soc-uda1380-objs := uda1380.o snd-soc-wcd9335-objs := wcd9335.o snd-soc-wcd934x-objs := wcd934x.o -snd-soc-wcd9xxx-v2-objs := wcd9xxx-common-v2.o wcd9xxx-resmgr-v2.o wcdcal-hwdep.o +snd-soc-wcd9xxx-objs := wcd9xxx-common-v2.o wcd9xxx-resmgr-v2.o \ + wcdcal-hwdep.o wcd-dsp-mgr.o wcd-dsp-utils.o \ + wcd9xxx-soc-init.o ifeq ($(CONFIG_COMMON_CLK_MSM), y) - audio-ext-clock-objs := audio-ext-clk.o + snd-soc-wcd9xxx-objs += audio-ext-clk.o endif ifeq ($(CONFIG_COMMON_CLK_QCOM), y) - audio-ext-clock-up-objs := audio-ext-clk-up.o + snd-soc-wcd9xxx-objs += audio-ext-clk-up.o endif snd-soc-wcd-cpe-objs := wcd_cpe_services.o wcd_cpe_core.o snd-soc-wsa881x-objs := wsa881x.o wsa881x-tables.o wsa881x-regmap.o wsa881x-temp-sensor.o -ifeq ($(CONFIG_SND_SOC_WCD_MBHC_LEGACY), y) +ifneq (,$(filter $(CONFIG_SND_SOC_WCD_MBHC_LEGACY),y m)) snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o wcd-mbhc-legacy.o -else ifeq ($(CONFIG_SND_SOC_WCD_MBHC_ADC), y) +else ifneq (,$(filter $(CONFIG_SND_SOC_WCD_MBHC_ADC),y m)) snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o wcd-mbhc-adc.o endif snd-soc-wsa881x-analog-objs := wsa881x-analog.o wsa881x-tables-analog.o snd-soc-wsa881x-analog-objs += wsa881x-regmap-analog.o wsa881x-irq.o -snd-soc-wcd-dsp-utils-objs := wcd-dsp-utils.o -snd-soc-wcd-dsp-mgr-objs := wcd-dsp-mgr.o snd-soc-wcd-spi-objs := wcd-spi.o snd-soc-wl1273-objs := wl1273.o snd-soc-wm-adsp-objs := wm_adsp.o @@ -406,19 +406,12 @@ obj-$(CONFIG_SND_SOC_UDA1380) += snd-soc-uda1380.o obj-$(CONFIG_SND_SOC_WCD9320) += snd-soc-wcd9320.o obj-$(CONFIG_SND_SOC_WCD9335) += snd-soc-wcd9335.o obj-$(CONFIG_SND_SOC_WCD934X) += wcd934x/ -ifeq ($(CONFIG_COMMON_CLK_MSM), y) - obj-$(CONFIG_AUDIO_EXT_CLK) += audio-ext-clock.o -endif -ifeq ($(CONFIG_COMMON_CLK_QCOM), y) - obj-$(CONFIG_AUDIO_EXT_CLK) += audio-ext-clock-up.o -endif -obj-$(CONFIG_SND_SOC_WCD9XXX_V2) += snd-soc-wcd9xxx-v2.o +obj-$(CONFIG_SND_SOC_WCD9XXX) += snd-soc-wcd9xxx.o obj-$(CONFIG_SND_SOC_WCD_CPE) += snd-soc-wcd-cpe.o obj-$(CONFIG_SND_SOC_WCD_MBHC) += snd-soc-wcd-mbhc.o obj-$(CONFIG_SND_SOC_WSA881X) += snd-soc-wsa881x.o obj-$(CONFIG_SND_SOC_WSA881X_ANALOG) += snd-soc-wsa881x-analog.o obj-$(CONFIG_SND_SOC_WL1273) += snd-soc-wl1273.o -obj-$(CONFIG_SND_SOC_WCD_DSP_MGR) += snd-soc-wcd-dsp-mgr.o snd-soc-wcd-dsp-utils.o obj-$(CONFIG_SND_SOC_WCD_SPI) += snd-soc-wcd-spi.o obj-$(CONFIG_SND_SOC_WM0010) += snd-soc-wm0010.o obj-$(CONFIG_SND_SOC_WM1250_EV1) += snd-soc-wm1250-ev1.o diff --git a/sound/soc/codecs/audio-ext-clk-up.c b/sound/soc/codecs/audio-ext-clk-up.c index f12f4ca65a54..3b54096b8347 100644 --- a/sound/soc/codecs/audio-ext-clk-up.c +++ b/sound/soc/codecs/audio-ext-clk-up.c @@ -23,6 +23,7 @@ #include #include #include +#include "audio-ext-clk-up.h" enum audio_clk_mux { AP_CLK2, @@ -611,17 +612,15 @@ static struct platform_driver audio_ref_clk_driver = { .remove = audio_ref_clk_remove, }; -static int __init audio_ref_clk_platform_init(void) +int audio_ref_clk_platform_init(void) { return platform_driver_register(&audio_ref_clk_driver); } -module_init(audio_ref_clk_platform_init); -static void __exit audio_ref_clk_platform_exit(void) +void audio_ref_clk_platform_exit(void) { platform_driver_unregister(&audio_ref_clk_driver); } -module_exit(audio_ref_clk_platform_exit); MODULE_DESCRIPTION("Audio Ref Up Clock module platform driver"); MODULE_LICENSE("GPL v2"); diff --git a/sound/soc/codecs/audio-ext-clk-up.h b/sound/soc/codecs/audio-ext-clk-up.h new file mode 100644 index 000000000000..8a0232e11d0f --- /dev/null +++ b/sound/soc/codecs/audio-ext-clk-up.h @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __AUDIO_EXT_CLK_UP_H_ +#define __AUDIO_EXT_CLK_UP_H_ + +int audio_ref_clk_platform_init(void); +void audio_ref_clk_platform_exit(void); + +#endif diff --git a/sound/soc/codecs/audio-ext-clk.c b/sound/soc/codecs/audio-ext-clk.c index ef795dfe9920..72f16f5848dd 100644 --- a/sound/soc/codecs/audio-ext-clk.c +++ b/sound/soc/codecs/audio-ext-clk.c @@ -23,6 +23,7 @@ #include #include #include +#include "audio-ext-clk-up.h" struct pinctrl_info { struct pinctrl *pinctrl; @@ -333,17 +334,15 @@ static struct platform_driver audio_ref_clk_driver = { .remove = audio_ref_clk_remove, }; -static int __init audio_ref_clk_platform_init(void) +int audio_ref_clk_platform_init(void) { return platform_driver_register(&audio_ref_clk_driver); } -module_init(audio_ref_clk_platform_init); -static void __exit audio_ref_clk_platform_exit(void) +void audio_ref_clk_platform_exit(void) { platform_driver_unregister(&audio_ref_clk_driver); } -module_exit(audio_ref_clk_platform_exit); MODULE_DESCRIPTION("Audio Ref Clock module platform driver"); MODULE_LICENSE("GPL v2"); diff --git a/sound/soc/codecs/wcd-dsp-mgr.c b/sound/soc/codecs/wcd-dsp-mgr.c index 93c2fd192190..f8fac1eefae6 100644 --- a/sound/soc/codecs/wcd-dsp-mgr.c +++ b/sound/soc/codecs/wcd-dsp-mgr.c @@ -1217,7 +1217,16 @@ static struct platform_driver wdsp_mgr_driver = { .probe = wdsp_mgr_probe, .remove = wdsp_mgr_remove, }; -module_platform_driver(wdsp_mgr_driver); + +int wcd_dsp_mgr_init(void) +{ + return platform_driver_register(&wdsp_mgr_driver); +} + +void wcd_dsp_mgr_exit(void) +{ + platform_driver_unregister(&wdsp_mgr_driver); +} MODULE_DESCRIPTION("WCD DSP manager driver"); MODULE_DEVICE_TABLE(of, wdsp_mgr_dt_match); diff --git a/sound/soc/codecs/wcd-mbhc-adc.h b/sound/soc/codecs/wcd-mbhc-adc.h index 112d508ada78..31161089e793 100644 --- a/sound/soc/codecs/wcd-mbhc-adc.h +++ b/sound/soc/codecs/wcd-mbhc-adc.h @@ -24,7 +24,7 @@ enum wcd_mbhc_adc_mux_ctl { MUX_CTL_NONE, }; -#ifdef CONFIG_SND_SOC_WCD_MBHC_ADC +#if IS_ENABLED(CONFIG_SND_SOC_WCD_MBHC_ADC) void wcd_mbhc_adc_init(struct wcd_mbhc *mbhc); #else static inline void wcd_mbhc_adc_init(struct wcd_mbhc *mbhc) diff --git a/sound/soc/codecs/wcd-mbhc-v2-api.h b/sound/soc/codecs/wcd-mbhc-v2-api.h index fab2b496621f..7b6e94507ad0 100644 --- a/sound/soc/codecs/wcd-mbhc-v2-api.h +++ b/sound/soc/codecs/wcd-mbhc-v2-api.h @@ -14,7 +14,7 @@ #include "wcd-mbhc-v2.h" -#ifdef CONFIG_SND_SOC_WCD_MBHC +#if IS_ENABLED(CONFIG_SND_SOC_WCD_MBHC) int wcd_mbhc_start(struct wcd_mbhc *mbhc, struct wcd_mbhc_config *mbhc_cfg); void wcd_mbhc_stop(struct wcd_mbhc *mbhc); diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c index 90d16fbcf38a..329aa7a4c466 100644 --- a/sound/soc/codecs/wcd9335.c +++ b/sound/soc/codecs/wcd9335.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -546,38 +547,6 @@ enum { SPLINE_SRC_MAX, }; -/* wcd9335 interrupt table */ -static const struct intr_data wcd9335_intr_table[] = { - {WCD9XXX_IRQ_SLIMBUS, false}, - {WCD9335_IRQ_MBHC_SW_DET, true}, - {WCD9335_IRQ_MBHC_BUTTON_PRESS_DET, true}, - {WCD9335_IRQ_MBHC_BUTTON_RELEASE_DET, true}, - {WCD9335_IRQ_MBHC_ELECT_INS_REM_DET, true}, - {WCD9335_IRQ_MBHC_ELECT_INS_REM_LEG_DET, true}, - {WCD9335_IRQ_FLL_LOCK_LOSS, false}, - {WCD9335_IRQ_HPH_PA_CNPL_COMPLETE, false}, - {WCD9335_IRQ_HPH_PA_CNPR_COMPLETE, false}, - {WCD9335_IRQ_EAR_PA_CNP_COMPLETE, false}, - {WCD9335_IRQ_LINE_PA1_CNP_COMPLETE, false}, - {WCD9335_IRQ_LINE_PA2_CNP_COMPLETE, false}, - {WCD9335_IRQ_LINE_PA3_CNP_COMPLETE, false}, - {WCD9335_IRQ_LINE_PA4_CNP_COMPLETE, false}, - {WCD9335_IRQ_HPH_PA_OCPL_FAULT, false}, - {WCD9335_IRQ_HPH_PA_OCPR_FAULT, false}, - {WCD9335_IRQ_EAR_PA_OCP_FAULT, false}, - {WCD9335_IRQ_SOUNDWIRE, false}, - {WCD9335_IRQ_VDD_DIG_RAMP_COMPLETE, false}, - {WCD9335_IRQ_RCO_ERROR, false}, - {WCD9335_IRQ_SVA_ERROR, false}, - {WCD9335_IRQ_MAD_AUDIO, false}, - {WCD9335_IRQ_MAD_BEACON, false}, - {WCD9335_IRQ_SVA_OUTBOX1, true}, - {WCD9335_IRQ_SVA_OUTBOX2, true}, - {WCD9335_IRQ_MAD_ULTRASOUND, false}, - {WCD9335_IRQ_VBAT_ATTACK, false}, - {WCD9335_IRQ_VBAT_RESTORE, false}, -}; - static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0); static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1); static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1); @@ -875,176 +844,6 @@ static const struct tasha_reg_mask_val tasha_spkr_mode1[] = { {WCD9335_CDC_BOOST1_BOOST_CTL, 0x7C, 0x44}, }; -/* - * wcd9335_get_codec_info: Get codec specific information - * - * @wcd9xxx: pointer to wcd9xxx structure - * @wcd_type: pointer to wcd9xxx_codec_type structure - * - * Returns 0 for success or negative error code for failure - */ -int wcd9335_get_codec_info(struct wcd9xxx *wcd9xxx, - struct wcd9xxx_codec_type *wcd_type) -{ - u16 id_minor, id_major; - struct regmap *wcd_regmap; - int rc, val, version = 0; - - if (!wcd9xxx || !wcd_type) - return -EINVAL; - - if (!wcd9xxx->regmap) { - dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n", - __func__); - return -EINVAL; - } - wcd_regmap = wcd9xxx->regmap; - - rc = regmap_bulk_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE0, - (u8 *)&id_minor, sizeof(u16)); - if (rc) - return -EINVAL; - - rc = regmap_bulk_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE2, - (u8 *)&id_major, sizeof(u16)); - if (rc) - return -EINVAL; - - dev_info(wcd9xxx->dev, "%s: wcd9xxx chip id major 0x%x, minor 0x%x\n", - __func__, id_major, id_minor); - - /* Version detection */ - if (id_major == TASHA_MAJOR) { - regmap_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT0, - &val); - version = ((u8)val & 0x80) >> 7; - } else if (id_major == TASHA2P0_MAJOR) - version = 2; - else - dev_err(wcd9xxx->dev, "%s: wcd9335 version unknown (major 0x%x, minor 0x%x)\n", - __func__, id_major, id_minor); - - /* Fill codec type info */ - wcd_type->id_major = id_major; - wcd_type->id_minor = id_minor; - wcd_type->num_irqs = WCD9335_NUM_IRQS; - wcd_type->version = version; - wcd_type->slim_slave_type = WCD9XXX_SLIM_SLAVE_ADDR_TYPE_1; - wcd_type->i2c_chip_status = 0x01; - wcd_type->intr_tbl = wcd9335_intr_table; - wcd_type->intr_tbl_size = ARRAY_SIZE(wcd9335_intr_table); - - wcd_type->intr_reg[WCD9XXX_INTR_STATUS_BASE] = - WCD9335_INTR_PIN1_STATUS0; - wcd_type->intr_reg[WCD9XXX_INTR_CLEAR_BASE] = - WCD9335_INTR_PIN1_CLEAR0; - wcd_type->intr_reg[WCD9XXX_INTR_MASK_BASE] = - WCD9335_INTR_PIN1_MASK0; - wcd_type->intr_reg[WCD9XXX_INTR_LEVEL_BASE] = - WCD9335_INTR_LEVEL0; - wcd_type->intr_reg[WCD9XXX_INTR_CLR_COMMIT] = - WCD9335_INTR_CLR_COMMIT; - - return rc; -} -EXPORT_SYMBOL(wcd9335_get_codec_info); - -/* - * wcd9335_bringdown: Bringdown WCD Codec - * - * @wcd9xxx: Pointer to wcd9xxx structure - * - * Returns 0 for success or negative error code for failure - */ -int wcd9335_bringdown(struct wcd9xxx *wcd9xxx) -{ - if (!wcd9xxx || !wcd9xxx->regmap) - return -EINVAL; - - regmap_write(wcd9xxx->regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, - 0x04); - - return 0; -} -EXPORT_SYMBOL(wcd9335_bringdown); - -/* - * wcd9335_bringup: Bringup WCD Codec - * - * @wcd9xxx: Pointer to the wcd9xxx structure - * - * Returns 0 for success or negative error code for failure - */ -int wcd9335_bringup(struct wcd9xxx *wcd9xxx) -{ - int ret = 0; - int val, byte0; - struct regmap *wcd_regmap; - - if (!wcd9xxx) - return -EINVAL; - - if (!wcd9xxx->regmap) { - dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n", - __func__); - return -EINVAL; - } - wcd_regmap = wcd9xxx->regmap; - - regmap_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_EFUSE_VAL_OUT0, &val); - regmap_read(wcd_regmap, WCD9335_CHIP_TIER_CTRL_CHIP_ID_BYTE0, &byte0); - - if ((val < 0) || (byte0 < 0)) { - dev_err(wcd9xxx->dev, "%s: tasha codec version detection fail!\n", - __func__); - return -EINVAL; - } - if ((val & 0x80) && (byte0 == 0x0)) { - dev_info(wcd9xxx->dev, "%s: wcd9335 codec version is v1.1\n", - __func__); - regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x01); - regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_2, 0xFC); - regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_4, 0x21); - regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, - 0x5); - regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, - 0x7); - regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, - 0x3); - regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x3); - } else if (byte0 == 0x1) { - dev_info(wcd9xxx->dev, "%s: wcd9335 codec version is v2.0\n", - __func__); - regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x01); - regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_TEST_2, 0x00); - regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_8, 0x6F); - regmap_write(wcd_regmap, WCD9335_BIAS_VBG_FINE_ADJ, 0x65); - regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, - 0x5); - regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, - 0x7); - regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, - 0x3); - regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x3); - } else if ((byte0 == 0) && (!(val & 0x80))) { - dev_info(wcd9xxx->dev, "%s: wcd9335 codec version is v1.0\n", - __func__); - regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x01); - regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_2, 0xFC); - regmap_write(wcd_regmap, WCD9335_SIDO_SIDO_CCL_4, 0x21); - regmap_write(wcd_regmap, WCD9335_CODEC_RPM_PWR_CDC_DIG_HM_CTL, - 0x3); - regmap_write(wcd_regmap, WCD9335_CODEC_RPM_RST_CTL, 0x3); - } else { - dev_err(wcd9xxx->dev, "%s: tasha codec version unknown\n", - __func__); - ret = -EINVAL; - } - - return ret; -} -EXPORT_SYMBOL(wcd9335_bringup); - /** * tasha_set_spkr_gain_offset - offset the speaker path * gain with the given offset value. diff --git a/sound/soc/codecs/wcd9335.h b/sound/soc/codecs/wcd9335.h index d27bb96abeaf..c76461edecf1 100644 --- a/sound/soc/codecs/wcd9335.h +++ b/sound/soc/codecs/wcd9335.h @@ -83,44 +83,6 @@ enum { TASHA_TX_MAX, }; -enum { - /* INTR_REG 0 */ - WCD9335_IRQ_FLL_LOCK_LOSS = 1, - WCD9335_IRQ_HPH_PA_OCPL_FAULT, - WCD9335_IRQ_HPH_PA_OCPR_FAULT, - WCD9335_IRQ_EAR_PA_OCP_FAULT, - WCD9335_IRQ_HPH_PA_CNPL_COMPLETE, - WCD9335_IRQ_HPH_PA_CNPR_COMPLETE, - WCD9335_IRQ_EAR_PA_CNP_COMPLETE, - /* INTR_REG 1 */ - WCD9335_IRQ_MBHC_SW_DET, - WCD9335_IRQ_MBHC_ELECT_INS_REM_DET, - WCD9335_IRQ_MBHC_BUTTON_PRESS_DET, - WCD9335_IRQ_MBHC_BUTTON_RELEASE_DET, - WCD9335_IRQ_MBHC_ELECT_INS_REM_LEG_DET, - WCD9335_IRQ_RESERVED_0, - WCD9335_IRQ_RESERVED_1, - WCD9335_IRQ_RESERVED_2, - /* INTR_REG 2 */ - WCD9335_IRQ_LINE_PA1_CNP_COMPLETE, - WCD9335_IRQ_LINE_PA2_CNP_COMPLETE, - WCD9335_IRQ_LINE_PA3_CNP_COMPLETE, - WCD9335_IRQ_LINE_PA4_CNP_COMPLETE, - WCD9335_IRQ_SOUNDWIRE, - WCD9335_IRQ_VDD_DIG_RAMP_COMPLETE, - WCD9335_IRQ_RCO_ERROR, - WCD9335_IRQ_SVA_ERROR, - /* INTR_REG 3 */ - WCD9335_IRQ_MAD_AUDIO, - WCD9335_IRQ_MAD_BEACON, - WCD9335_IRQ_MAD_ULTRASOUND, - WCD9335_IRQ_VBAT_ATTACK, - WCD9335_IRQ_VBAT_RESTORE, - WCD9335_IRQ_SVA_OUTBOX1, - WCD9335_IRQ_SVA_OUTBOX2, - WCD9335_NUM_IRQS, -}; - enum wcd9335_codec_event { WCD9335_CODEC_EVENT_CODEC_UP = 0, }; diff --git a/sound/soc/codecs/wcd934x/Makefile b/sound/soc/codecs/wcd934x/Makefile index 2843fa11d58e..12781f6d4556 100644 --- a/sound/soc/codecs/wcd934x/Makefile +++ b/sound/soc/codecs/wcd934x/Makefile @@ -1,9 +1,6 @@ # # Makefile for wcd934x codec driver. # -snd-soc-wcd934x-objs := wcd934x.o wcd934x-dsp-cntl.o +snd-soc-wcd934x-objs := wcd934x.o wcd934x-dsp-cntl.o \ + wcd934x-mbhc.o wcd934x-dsd.o obj-$(CONFIG_SND_SOC_WCD934X) += snd-soc-wcd934x.o -snd-soc-wcd934x-mbhc-objs := wcd934x-mbhc.o -obj-$(CONFIG_SND_SOC_WCD934X_MBHC) += snd-soc-wcd934x-mbhc.o -snd-soc-wcd934x-dsd-objs := wcd934x-dsd.o -obj-$(CONFIG_SND_SOC_WCD934X_DSD) += snd-soc-wcd934x-dsd.o diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsd.h b/sound/soc/codecs/wcd934x/wcd934x-dsd.h index 498288335b3b..834b96cd1805 100644 --- a/sound/soc/codecs/wcd934x/wcd934x-dsd.h +++ b/sound/soc/codecs/wcd934x/wcd934x-dsd.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -43,7 +43,7 @@ struct tavil_dsd_config { int version; }; -#ifdef CONFIG_SND_SOC_WCD934X_DSD +#if IS_ENABLED(CONFIG_SND_SOC_WCD934X_DSD) int tavil_dsd_set_mixer_value(struct tavil_dsd_config *dsd_conf, int interp_num, int sw_value); int tavil_dsd_get_current_mixer_value(struct tavil_dsd_config *dsd_conf, diff --git a/sound/soc/codecs/wcd934x/wcd934x-mbhc.c b/sound/soc/codecs/wcd934x/wcd934x-mbhc.c index 578c347c5d14..a1a5e2d65062 100644 --- a/sound/soc/codecs/wcd934x/wcd934x-mbhc.c +++ b/sound/soc/codecs/wcd934x/wcd934x-mbhc.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include diff --git a/sound/soc/codecs/wcd934x/wcd934x-mbhc.h b/sound/soc/codecs/wcd934x/wcd934x-mbhc.h index d40546a7e143..53c886da0f6b 100644 --- a/sound/soc/codecs/wcd934x/wcd934x-mbhc.h +++ b/sound/soc/codecs/wcd934x/wcd934x-mbhc.h @@ -35,7 +35,7 @@ struct wcd934x_mbhc { bool is_hph_recover; }; -#ifdef CONFIG_SND_SOC_WCD934X_MBHC +#if IS_ENABLED(CONFIG_SND_SOC_WCD934X_MBHC) extern int tavil_mbhc_init(struct wcd934x_mbhc **mbhc, struct snd_soc_codec *codec, struct fw_info *fw_data); diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c index ff08ccb43dae..ca16ed8c8ae3 100644 --- a/sound/soc/codecs/wcd934x/wcd934x.c +++ b/sound/soc/codecs/wcd934x/wcd934x.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -233,37 +234,6 @@ struct tavil_idle_detect_config { u8 hph_idle_detect_en; }; -static const struct intr_data wcd934x_intr_table[] = { - {WCD9XXX_IRQ_SLIMBUS, false}, - {WCD934X_IRQ_MBHC_SW_DET, true}, - {WCD934X_IRQ_MBHC_BUTTON_PRESS_DET, true}, - {WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET, true}, - {WCD934X_IRQ_MBHC_ELECT_INS_REM_DET, true}, - {WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET, true}, - {WCD934X_IRQ_MISC, false}, - {WCD934X_IRQ_HPH_PA_CNPL_COMPLETE, false}, - {WCD934X_IRQ_HPH_PA_CNPR_COMPLETE, false}, - {WCD934X_IRQ_EAR_PA_CNP_COMPLETE, false}, - {WCD934X_IRQ_LINE_PA1_CNP_COMPLETE, false}, - {WCD934X_IRQ_LINE_PA2_CNP_COMPLETE, false}, - {WCD934X_IRQ_SLNQ_ANALOG_ERROR, false}, - {WCD934X_IRQ_RESERVED_3, false}, - {WCD934X_IRQ_HPH_PA_OCPL_FAULT, false}, - {WCD934X_IRQ_HPH_PA_OCPR_FAULT, false}, - {WCD934X_IRQ_EAR_PA_OCP_FAULT, false}, - {WCD934X_IRQ_SOUNDWIRE, false}, - {WCD934X_IRQ_VDD_DIG_RAMP_COMPLETE, false}, - {WCD934X_IRQ_RCO_ERROR, false}, - {WCD934X_IRQ_CPE_ERROR, false}, - {WCD934X_IRQ_MAD_AUDIO, false}, - {WCD934X_IRQ_MAD_BEACON, false}, - {WCD934X_IRQ_CPE1_INTR, true}, - {WCD934X_IRQ_RESERVED_4, false}, - {WCD934X_IRQ_MAD_ULTRASOUND, false}, - {WCD934X_IRQ_VBAT_ATTACK, false}, - {WCD934X_IRQ_VBAT_RESTORE, false}, -}; - struct tavil_cpr_reg_defaults { int wr_data; int wr_addr; @@ -676,140 +646,6 @@ static const struct tavil_reg_mask_val tavil_spkr_mode1[] = { static int __tavil_enable_efuse_sensing(struct tavil_priv *tavil); -/* - * wcd934x_get_codec_info: Get codec specific information - * - * @wcd9xxx: pointer to wcd9xxx structure - * @wcd_type: pointer to wcd9xxx_codec_type structure - * - * Returns 0 for success or negative error code for failure - */ -int wcd934x_get_codec_info(struct wcd9xxx *wcd9xxx, - struct wcd9xxx_codec_type *wcd_type) -{ - u16 id_minor, id_major; - struct regmap *wcd_regmap; - int rc, version = -1; - - if (!wcd9xxx || !wcd_type) - return -EINVAL; - - if (!wcd9xxx->regmap) { - dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null\n", __func__); - return -EINVAL; - } - wcd_regmap = wcd9xxx->regmap; - - rc = regmap_bulk_read(wcd_regmap, WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0, - (u8 *)&id_minor, sizeof(u16)); - if (rc) - return -EINVAL; - - rc = regmap_bulk_read(wcd_regmap, WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE2, - (u8 *)&id_major, sizeof(u16)); - if (rc) - return -EINVAL; - - dev_info(wcd9xxx->dev, "%s: wcd9xxx chip id major 0x%x, minor 0x%x\n", - __func__, id_major, id_minor); - - if (id_major != TAVIL_MAJOR) - goto version_unknown; - - /* - * As fine version info cannot be retrieved before tavil probe. - * Assign coarse versions for possible future use before tavil probe. - */ - if (id_minor == cpu_to_le16(0)) - version = TAVIL_VERSION_1_0; - else if (id_minor == cpu_to_le16(0x01)) - version = TAVIL_VERSION_1_1; - -version_unknown: - if (version < 0) - dev_err(wcd9xxx->dev, "%s: wcd934x version unknown\n", - __func__); - - /* Fill codec type info */ - wcd_type->id_major = id_major; - wcd_type->id_minor = id_minor; - wcd_type->num_irqs = WCD934X_NUM_IRQS; - wcd_type->version = version; - wcd_type->slim_slave_type = WCD9XXX_SLIM_SLAVE_ADDR_TYPE_1; - wcd_type->i2c_chip_status = 0x01; - wcd_type->intr_tbl = wcd934x_intr_table; - wcd_type->intr_tbl_size = ARRAY_SIZE(wcd934x_intr_table); - - wcd_type->intr_reg[WCD9XXX_INTR_STATUS_BASE] = - WCD934X_INTR_PIN1_STATUS0; - wcd_type->intr_reg[WCD9XXX_INTR_CLEAR_BASE] = - WCD934X_INTR_PIN1_CLEAR0; - wcd_type->intr_reg[WCD9XXX_INTR_MASK_BASE] = - WCD934X_INTR_PIN1_MASK0; - wcd_type->intr_reg[WCD9XXX_INTR_LEVEL_BASE] = - WCD934X_INTR_LEVEL0; - wcd_type->intr_reg[WCD9XXX_INTR_CLR_COMMIT] = - WCD934X_INTR_CLR_COMMIT; - - return rc; -} -EXPORT_SYMBOL(wcd934x_get_codec_info); - -/* - * wcd934x_bringdown: Bringdown WCD Codec - * - * @wcd9xxx: Pointer to wcd9xxx structure - * - * Returns 0 for success or negative error code for failure - */ -int wcd934x_bringdown(struct wcd9xxx *wcd9xxx) -{ - if (!wcd9xxx || !wcd9xxx->regmap) - return -EINVAL; - - regmap_write(wcd9xxx->regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, - 0x04); - - return 0; -} -EXPORT_SYMBOL(wcd934x_bringdown); - -/* - * wcd934x_bringup: Bringup WCD Codec - * - * @wcd9xxx: Pointer to the wcd9xxx structure - * - * Returns 0 for success or negative error code for failure - */ -int wcd934x_bringup(struct wcd9xxx *wcd9xxx) -{ - struct regmap *wcd_regmap; - - if (!wcd9xxx) - return -EINVAL; - - if (!wcd9xxx->regmap) { - dev_err(wcd9xxx->dev, "%s: wcd9xxx regmap is null!\n", - __func__); - return -EINVAL; - } - wcd_regmap = wcd9xxx->regmap; - - regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x01); - regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_A_STARTUP, 0x19); - regmap_write(wcd_regmap, WCD934X_SIDO_NEW_VOUT_D_STARTUP, 0x15); - /* Add 1msec delay for VOUT to settle */ - usleep_range(1000, 1100); - regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x5); - regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x7); - regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x3); - regmap_write(wcd_regmap, WCD934X_CODEC_RPM_RST_CTL, 0x7); - regmap_write(wcd_regmap, WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL, 0x3); - - return 0; -} -EXPORT_SYMBOL(wcd934x_bringup); - /** * tavil_set_spkr_gain_offset - offset the speaker path * gain with the given offset value. diff --git a/sound/soc/codecs/wcd934x/wcd934x.h b/sound/soc/codecs/wcd934x/wcd934x.h index ae70175de239..c3bf50a4ffdb 100644 --- a/sound/soc/codecs/wcd934x/wcd934x.h +++ b/sound/soc/codecs/wcd934x/wcd934x.h @@ -95,45 +95,6 @@ enum { INTERP_MAX, }; -enum { - /* INTR_REG 0 */ - WCD934X_IRQ_MISC = 1, - WCD934X_IRQ_HPH_PA_OCPL_FAULT, - WCD934X_IRQ_HPH_PA_OCPR_FAULT, - WCD934X_IRQ_EAR_PA_OCP_FAULT, - WCD934X_IRQ_HPH_PA_CNPL_COMPLETE, - WCD934X_IRQ_HPH_PA_CNPR_COMPLETE, - WCD934X_IRQ_EAR_PA_CNP_COMPLETE, - /* INTR_REG 1 */ - WCD934X_IRQ_MBHC_SW_DET, - WCD934X_IRQ_MBHC_ELECT_INS_REM_DET, - WCD934X_IRQ_MBHC_BUTTON_PRESS_DET, - WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET, - WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET, - WCD934X_IRQ_RESERVED_0, - WCD934X_IRQ_RESERVED_1, - WCD934X_IRQ_RESERVED_2, - /* INTR_REG 2 */ - WCD934X_IRQ_LINE_PA1_CNP_COMPLETE, - WCD934X_IRQ_LINE_PA2_CNP_COMPLETE, - WCD934X_IRQ_SLNQ_ANALOG_ERROR, - WCD934X_IRQ_RESERVED_3, - WCD934X_IRQ_SOUNDWIRE, - WCD934X_IRQ_VDD_DIG_RAMP_COMPLETE, - WCD934X_IRQ_RCO_ERROR, - WCD934X_IRQ_CPE_ERROR, - /* INTR_REG 3 */ - WCD934X_IRQ_MAD_AUDIO, - WCD934X_IRQ_MAD_BEACON, - WCD934X_IRQ_MAD_ULTRASOUND, - WCD934X_IRQ_VBAT_ATTACK, - WCD934X_IRQ_VBAT_RESTORE, - WCD934X_IRQ_CPE1_INTR, - WCD934X_IRQ_RESERVED_4, - WCD934X_IRQ_SLNQ_DIGITAL, - WCD934X_NUM_IRQS, -}; - /* * Selects compander and smart boost settings * for a given speaker mode diff --git a/sound/soc/codecs/wcd9xxx-common-v2.c b/sound/soc/codecs/wcd9xxx-common-v2.c index 9ac38c2c3e79..62166579342a 100644 --- a/sound/soc/codecs/wcd9xxx-common-v2.c +++ b/sound/soc/codecs/wcd9xxx-common-v2.c @@ -1316,6 +1316,7 @@ void wcd_clsh_fsm(struct snd_soc_codec *codec, break; }; } +EXPORT_SYMBOL(wcd_clsh_fsm); int wcd_clsh_get_clsh_state(struct wcd_clsh_cdc_data *clsh) { diff --git a/sound/soc/codecs/wcd9xxx-resmgr-v2.c b/sound/soc/codecs/wcd9xxx-resmgr-v2.c index 825aaeecb665..feef0a48af79 100644 --- a/sound/soc/codecs/wcd9xxx-resmgr-v2.c +++ b/sound/soc/codecs/wcd9xxx-resmgr-v2.c @@ -108,6 +108,7 @@ int wcd_resmgr_get_clk_type(struct wcd9xxx_resmgr_v2 *resmgr) } return resmgr->clk_type; } +EXPORT_SYMBOL(wcd_resmgr_get_clk_type); static void wcd_resmgr_cdc_specific_get_clk(struct wcd9xxx_resmgr_v2 *resmgr, int clk_users) @@ -123,6 +124,10 @@ static void wcd_resmgr_cdc_specific_get_clk(struct wcd9xxx_resmgr_v2 *resmgr, } } +/* + * wcd_resmgr_post_ssr_v2 + * @resmgr: handle to struct wcd9xxx_resmgr_v2 + */ void wcd_resmgr_post_ssr_v2(struct wcd9xxx_resmgr_v2 *resmgr) { int old_bg_audio_users; @@ -157,7 +162,7 @@ void wcd_resmgr_post_ssr_v2(struct wcd9xxx_resmgr_v2 *resmgr) WCD9XXX_V2_BG_CLK_UNLOCK(resmgr); } - +EXPORT_SYMBOL(wcd_resmgr_post_ssr_v2); /* * wcd_resmgr_enable_master_bias: enable codec master bias @@ -190,6 +195,7 @@ int wcd_resmgr_enable_master_bias(struct wcd9xxx_resmgr_v2 *resmgr) mutex_unlock(&resmgr->master_bias_lock); return 0; } +EXPORT_SYMBOL(wcd_resmgr_enable_master_bias); /* * wcd_resmgr_disable_master_bias: disable codec master bias @@ -213,6 +219,7 @@ int wcd_resmgr_disable_master_bias(struct wcd9xxx_resmgr_v2 *resmgr) mutex_unlock(&resmgr->master_bias_lock); return 0; } +EXPORT_SYMBOL(wcd_resmgr_disable_master_bias); static int wcd_resmgr_enable_clk_mclk(struct wcd9xxx_resmgr_v2 *resmgr) { @@ -511,6 +518,7 @@ int wcd_resmgr_enable_clk_block(struct wcd9xxx_resmgr_v2 *resmgr, return ret; } +EXPORT_SYMBOL(wcd_resmgr_enable_clk_block); void wcd_resmgr_set_sido_input_src(struct wcd9xxx_resmgr_v2 *resmgr, int sido_src) @@ -601,6 +609,7 @@ int wcd_resmgr_disable_clk_block(struct wcd9xxx_resmgr_v2 *resmgr, return ret; } +EXPORT_SYMBOL(wcd_resmgr_disable_clk_block); /* * wcd_resmgr_init: initialize wcd resource manager @@ -639,6 +648,7 @@ struct wcd9xxx_resmgr_v2 *wcd_resmgr_init( return resmgr; } +EXPORT_SYMBOL(wcd_resmgr_init); /* * wcd_resmgr_remove: Clean-up wcd resource manager @@ -649,6 +659,7 @@ void wcd_resmgr_remove(struct wcd9xxx_resmgr_v2 *resmgr) mutex_destroy(&resmgr->master_bias_lock); kfree(resmgr); } +EXPORT_SYMBOL(wcd_resmgr_remove); /* * wcd_resmgr_post_init: post init call to assign codec handle @@ -676,5 +687,7 @@ int wcd_resmgr_post_init(struct wcd9xxx_resmgr_v2 *resmgr, return 0; } +EXPORT_SYMBOL(wcd_resmgr_post_init); + MODULE_DESCRIPTION("wcd9xxx resmgr v2 module"); MODULE_LICENSE("GPL v2"); diff --git a/sound/soc/codecs/wcd9xxx-soc-init.c b/sound/soc/codecs/wcd9xxx-soc-init.c new file mode 100644 index 000000000000..fa8abb7de5e4 --- /dev/null +++ b/sound/soc/codecs/wcd9xxx-soc-init.c @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include "audio-ext-clk-up.h" + +static int __init wcd9xxx_soc_init(void) +{ + int ret = 0; + + ret = wcd_dsp_mgr_init(); + if (!ret) { + ret = audio_ref_clk_platform_init(); + if (ret) { + pr_err("%s: init extclk fail: %d\n", __func__, ret); + wcd_dsp_mgr_exit(); + } + } else { + pr_err("%s: init dsp mgr fail: %d\n", __func__, ret); + } + + return ret; +} +module_init(wcd9xxx_soc_init); + +static void __exit wcd9xxx_soc_exit(void) +{ + audio_ref_clk_platform_exit(); + wcd_dsp_mgr_exit(); +} +module_exit(wcd9xxx_soc_exit); + +MODULE_DESCRIPTION("WCD9XXX CODEC soc init driver"); +MODULE_LICENSE("GPL v2"); diff --git a/sound/soc/codecs/wcd_cpe_core.c b/sound/soc/codecs/wcd_cpe_core.c index 153cc2ee8769..f2a20d51d0e2 100644 --- a/sound/soc/codecs/wcd_cpe_core.c +++ b/sound/soc/codecs/wcd_cpe_core.c @@ -648,6 +648,7 @@ void *wcd_cpe_get_core_handle( done: return core; } +EXPORT_SYMBOL(wcd_cpe_get_core_handle); /* * svass_engine_irq: threaded interrupt handler for svass engine irq diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig index 4a0ce40f5241..c557ae06e95f 100644 --- a/sound/soc/msm/Kconfig +++ b/sound/soc/msm/Kconfig @@ -195,6 +195,9 @@ config SND_SOC_MSM8996 config SND_SOC_MACHINE_MSM8998 tristate "SoC Machine driver for MSM8998 boards" select SND_SOC_WSA881X + select SND_SOC_WCD9335 + select SND_SOC_WCD934X + select SND_SOC_CPE help To enable the machine driver and the @@ -215,12 +218,9 @@ config SND_SOC_MSM8998 select MSM_QDSP6_PDR select MSM_QDSP6_NOTIFIER select MSM_QDSP6V2_CODECS - select SND_SOC_WCD9335 - select SND_SOC_WCD934X select SND_SOC_MSM_HDMI_CODEC_RX select DTS_SRS_TM select QTI_PP - select SND_SOC_CPE select MSM_ULTRASOUND select DOLBY_LICENSE select SND_HWDEP @@ -247,6 +247,7 @@ config SND_SOC_660 config SND_SOC_MACHINE_SDM845 tristate "SoC Machine driver for SDM845 boards" select SND_SOC_WSA881X + select SND_SOC_WCD934X help To enable the machine driver and the @@ -267,7 +268,6 @@ config SND_SOC_SDM845 select MSM_QDSP6_PDR select MSM_QDSP6_NOTIFIER select MSM_QDSP6V2_CODECS - select SND_SOC_WCD934X select DTS_SRS_TM select QTI_PP select MSM_ULTRASOUND -- GitLab From 60d2ba56c280123432280b6d7a9e9a2076dded53 Mon Sep 17 00:00:00 2001 From: George Shen Date: Thu, 29 Jun 2017 10:45:07 -0700 Subject: [PATCH 452/786] msm: kgsl: update SP clock gating setting Fix SP clock incorrectly gated in shader bypass. CRs-Fixed: 2062271 Change-Id: Iad651e59c829970a9e90a18d715dea9863bf87a5 Signed-off-by: George Shen --- drivers/gpu/msm/adreno_a6xx.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c index fb745ad7786c..341082c4e10b 100644 --- a/drivers/gpu/msm/adreno_a6xx.c +++ b/drivers/gpu/msm/adreno_a6xx.c @@ -64,10 +64,10 @@ static const struct kgsl_hwcg_reg a630_hwcg_regs[] = { {A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222}, {A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222}, {A6XX_RBBM_CLOCK_CNTL_SP3, 0x22222222}, - {A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220}, - {A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220}, - {A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220}, - {A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220}, + {A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220}, + {A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220}, + {A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220}, + {A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220}, {A6XX_RBBM_CLOCK_DELAY_SP0, 0x0000F3CF}, {A6XX_RBBM_CLOCK_DELAY_SP1, 0x0000F3CF}, {A6XX_RBBM_CLOCK_DELAY_SP2, 0x0000F3CF}, -- GitLab From ea65e0e64a0a82df43cc091bd5e2afc1748474d7 Mon Sep 17 00:00:00 2001 From: George Shen Date: Thu, 29 Jun 2017 11:16:35 -0700 Subject: [PATCH 453/786] msm: kgsl: Fix spinlock corruption in hfi receiver In case GMU failed to send back ACK to GMU driver on CPU in time, GMU driver hfi message receiver shall ignore the delayed ACK from GMU. The original request message had been deleted by GMU driver. Handling obsolete ACK could lead driver to use an invalid spinlock structure in original request message. CRs-Fixed: 2061912 Change-Id: I27a333950cb21c3832af201719ea16f8c0ce281d Signed-off-by: George Shen --- drivers/gpu/msm/kgsl_hfi.c | 7 +++++-- drivers/gpu/msm/kgsl_hfi.h | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/msm/kgsl_hfi.c b/drivers/gpu/msm/kgsl_hfi.c index 067b276f92d1..cc878aaa6190 100644 --- a/drivers/gpu/msm/kgsl_hfi.c +++ b/drivers/gpu/msm/kgsl_hfi.c @@ -177,6 +177,7 @@ static void receive_ack_msg(struct gmu_device *gmu, struct hfi_msg_rsp *rsp) { struct kgsl_hfi *hfi = &gmu->hfi; struct pending_msg *msg = NULL, *next; + bool in_queue = false; trace_kgsl_hfi_receive(rsp->ret_hdr.id, rsp->ret_hdr.size, @@ -185,11 +186,13 @@ static void receive_ack_msg(struct gmu_device *gmu, struct hfi_msg_rsp *rsp) spin_lock(&hfi->msglock); list_for_each_entry_safe(msg, next, &hfi->msglist, node) { if (msg->msg_id == rsp->ret_hdr.id && - msg->seqnum == rsp->ret_hdr.seqnum) + msg->seqnum == rsp->ret_hdr.seqnum) { + in_queue = true; break; + } } - if (msg == NULL) { + if (in_queue == false) { spin_unlock(&hfi->msglock); dev_err(&gmu->pdev->dev, "Cannot find receiver of ack msg with id=%d\n", diff --git a/drivers/gpu/msm/kgsl_hfi.h b/drivers/gpu/msm/kgsl_hfi.h index 8eedbfa217f8..47d07d90f338 100644 --- a/drivers/gpu/msm/kgsl_hfi.h +++ b/drivers/gpu/msm/kgsl_hfi.h @@ -115,7 +115,7 @@ enum hfi_f2h_qpri { HFI_F2H_QPRI_DEBUG = 40, }; -#define HFI_RSP_TIMEOUT 100 /* msec */ +#define HFI_RSP_TIMEOUT 500 /* msec */ #define HFI_H2F_CMD_IRQ_MASK BIT(0) enum hfi_msg_type { -- GitLab From 3d73a424db529fe42400eb5487aca5cd1ec7fa4f Mon Sep 17 00:00:00 2001 From: Karthikeyan Mani Date: Wed, 28 Jun 2017 00:18:59 -0700 Subject: [PATCH 454/786] pinctrl: qcom: update wcd pinctrl config depends on relation Codec core configs are combined into one single core config. Update depends on for pinctrl with this new codec core config. CRs-fixed: 2068879 Change-Id: I41806e1ceab9f71ed0158fdb8bf3dc335103cedc Signed-off-by: Karthikeyan Mani --- drivers/pinctrl/qcom/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig index 40ee6473a8c1..02b28bdc271a 100644 --- a/drivers/pinctrl/qcom/Kconfig +++ b/drivers/pinctrl/qcom/Kconfig @@ -150,7 +150,7 @@ config PINCTRL_QCOM_SSBI_PMIC config PINCTRL_WCD tristate "Qualcomm Technologies, Inc WCD pin controller driver" - depends on WCD934X_CODEC + depends on WCD9XXX_CODEC_CORE help This is the pinctrl, pinmux, pinconf and gpiolib driver for the WCD gpio controller block. -- GitLab From 0f97dba326662efe85d21189d6c4d95d7a0a8184 Mon Sep 17 00:00:00 2001 From: Karthikeyan Mani Date: Wed, 28 Jun 2017 00:22:43 -0700 Subject: [PATCH 455/786] soundwire: update soundwire config depends on relation Codec core configs are combined into one single core config. Update depends on for soundwire with this new codec core config. CRs-fixed: 2068879 Change-Id: I26845d26961b17956095fea25cad2ead87d96a36 Signed-off-by: Karthikeyan Mani --- drivers/soundwire/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig index a4c2f0cdfb7a..e1ca532c22f5 100644 --- a/drivers/soundwire/Kconfig +++ b/drivers/soundwire/Kconfig @@ -9,7 +9,7 @@ menuconfig SOUNDWIRE if SOUNDWIRE config SOUNDWIRE_WCD_CTRL - depends on WCD9335_CODEC || WCD934X_CODEC + depends on WCD9XXX_CODEC_CORE tristate "QTI WCD CODEC Soundwire controller" default n help -- GitLab From 4b8306d898cda63838b98ed1e750f43f343b8849 Mon Sep 17 00:00:00 2001 From: Karthikeyan Mani Date: Tue, 27 Jun 2017 20:28:07 -0700 Subject: [PATCH 456/786] mfd: wcd9xxx: Combine all codec configs into single config Add config entry for Codec core to combine all mfd codec drivers under single config select value. CRs-fixed: 2068879 Change-Id: I772d07dc64648cdeeec5134aabb04dbfe93d0742 Signed-off-by: Karthikeyan Mani --- drivers/mfd/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index d276fa9d6265..0ce70f371a16 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -211,7 +211,7 @@ wcd-core-objs := wcd9xxx-rst.o wcd9xxx-core-init.o \ wcd934x-regmap.o wcd934x-tables.o \ wcd9335-regmap.o wcd9335-tables.o \ msm-cdc-pinctrl.o msm-cdc-supply.o -obj-$(CONFIG_WCD934X_CODEC) += wcd-core.o +obj-$(CONFIG_WCD9XXX_CODEC_CORE) += wcd-core.o intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o intel-soc-pmic-$(CONFIG_INTEL_PMC_IPC) += intel_soc_pmic_bxtwc.o obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o -- GitLab From c337258b226b61622d2f369c08e840f2be2ccc4c Mon Sep 17 00:00:00 2001 From: Karthikeyan Mani Date: Tue, 27 Jun 2017 20:37:49 -0700 Subject: [PATCH 457/786] ASoC: wcd9xxx: Add wcd9xxx_core depends on relation for codecs Codec modules should depend on the Core module config value. CRs-fixed: 2068879 Change-Id: Iabedfa6c25223f9a03a2475716a7fb7a01f86b23 Signed-off-by: Karthikeyan Mani --- sound/soc/codecs/Kconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index e7475fb1901e..f8008585554a 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -880,6 +880,7 @@ config SND_SOC_WCD934X_DSD config SND_SOC_WCD9335 tristate + depends on WCD9XXX_CODEC_CORE select SND_SOC_WCD9XXX select SND_SOC_WCD_MBHC select SND_SOC_WCD_MBHC_LEGACY @@ -887,6 +888,7 @@ config SND_SOC_WCD9335 config SND_SOC_WCD934X tristate + depends on WCD9XXX_CODEC_CORE select SND_SOC_WCD9XXX select AUDIO_EXT_CLK select SND_SOC_WCD_DSP_MGR -- GitLab From 365a78928fb6597505f2acde69fa67f3f507883f Mon Sep 17 00:00:00 2001 From: Karthikeyan Mani Date: Tue, 27 Jun 2017 23:53:42 -0700 Subject: [PATCH 458/786] defconfig: msm: Remove codec specific config for core Remove wcd934x codec config as it has been replaced by a single core config that combines all codecs. CRs-fixed: 2068879 Change-Id: Ib31d81810cdefb65732a76f1043251b2f75d4cb9 Signed-off-by: Karthikeyan Mani --- arch/arm64/configs/sdm845-perf_defconfig | 1 - arch/arm64/configs/sdm845_defconfig | 1 - 2 files changed, 2 deletions(-) diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig index e27d3d5a5b52..e88d21c87577 100644 --- a/arch/arm64/configs/sdm845-perf_defconfig +++ b/arch/arm64/configs/sdm845-perf_defconfig @@ -332,7 +332,6 @@ CONFIG_QTI_VIRTUAL_SENSOR=y CONFIG_MFD_I2C_PMIC=y CONFIG_MFD_SPMI_PMIC=y CONFIG_WCD9XXX_CODEC_CORE=y -CONFIG_WCD934X_CODEC=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_CPRH_KBSS=y CONFIG_REGULATOR_QPNP_LABIBB=y diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig index 31acac3e9699..a51c3d9b3e4b 100644 --- a/arch/arm64/configs/sdm845_defconfig +++ b/arch/arm64/configs/sdm845_defconfig @@ -339,7 +339,6 @@ CONFIG_QTI_VIRTUAL_SENSOR=y CONFIG_MFD_I2C_PMIC=y CONFIG_MFD_SPMI_PMIC=y CONFIG_WCD9XXX_CODEC_CORE=y -CONFIG_WCD934X_CODEC=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_CPRH_KBSS=y CONFIG_REGULATOR_QPNP_LABIBB=y -- GitLab From 7826bb50f9bddc78fb85b1b862c03cb28c0f9d3a Mon Sep 17 00:00:00 2001 From: Karthikeyan Mani Date: Thu, 29 Jun 2017 00:09:58 -0700 Subject: [PATCH 459/786] mfd: wcd9xxx: Remove unused codec specific configs Removed codec specific core configs as codec core drivers are selected under single config select value. CRs-fixed: 2068879 Change-Id: I9641d74b9500af39f1f89c2d82f79a9f21273bd8 Signed-off-by: Karthikeyan Mani --- drivers/mfd/Kconfig | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index dd6dbdace556..71341a746752 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -1650,35 +1650,6 @@ config WCD9XXX_CODEC_CORE functions. This driver also hides the underlying bus related functionalities. -config WCD9335_CODEC - tristate "WCD9335 Codec" - select SLIMBUS - select SOUNDWIRE_WCD_CTRL - select MSM_CDC_SUPPLY - select MSM_CDC_PINCTRL - select REGMAP_ALLOW_WRITE_DEBUGFS - help - Enables the WCD9xxx codec core driver. The core driver provides - read/write capability to registers which are part of the - WCD9335 core and gives the ability to use the WCD9335 codec. - The WCD9335 codec support either I2C/I2S or Slimbus for - control and data exchnage with master processor. - -config WCD934X_CODEC - tristate "WCD934X Codec" - depends on SLIMBUS - select SOUNDWIRE_WCD_CTRL - select MSM_CDC_SUPPLY - select MSM_CDC_PINCTRL - select REGMAP_ALLOW_WRITE_DEBUGFS - select PINCTRL_WCD - help - Enables the WCD9xxx codec core driver. The core driver provides - read/write capability to registers which are part of the - WCD934X core and gives the ability to use the WCD934X codec. - The WCD934X codec supports either I2C/I2S or Slimbus for - control and data exchange with master processor. - menu "Multimedia Capabilities Port drivers" depends on ARCH_SA1100 -- GitLab From 38f645bbce5a803a931edd9cb92b744424a827c7 Mon Sep 17 00:00:00 2001 From: Deepak Katragadda Date: Thu, 29 Jun 2017 12:16:06 -0700 Subject: [PATCH 460/786] ARM: dts: msm: Add the hw-trigger flag to the IPE and BPS GDSCs on SDM845 Add the qcom,support-hw-trigger flag to the ipe_0_gdsc, ipe_1_gdsc and bps_gdsc GDSC nodes in order to allow enabling/disabling HW control for them. Change-Id: I0cfe5f1e2d96edfcbb73ba11df29577b499c4413 Signed-off-by: Deepak Katragadda --- arch/arm64/boot/dts/qcom/sdm845.dtsi | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index e1a0d771e2a9..09c772eb5597 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -4165,6 +4165,7 @@ }; &bps_gdsc { + qcom,support-hw-trigger; status = "ok"; }; @@ -4177,10 +4178,12 @@ }; &ipe_0_gdsc { + qcom,support-hw-trigger; status = "ok"; }; &ipe_1_gdsc { + qcom,support-hw-trigger; status = "ok"; }; -- GitLab From 795cd39bd9f4c56831789167a4a8008ecbe0528a Mon Sep 17 00:00:00 2001 From: Karthikeyan Mani Date: Thu, 29 Jun 2017 12:17:13 -0700 Subject: [PATCH 461/786] ASoC: wcd-spi: cleanup to remove unused SPI APIs spi data read and write are handled by WDSP_EVENT_GETOPS and can be removed from being exported, rather make them static. CRs-Fixed: 2068106 Change-Id: I815f0e7c2ac02236e23c7d1b98929dad93e48d6d Signed-off-by: Karthikeyan Mani --- include/sound/wcd-spi.h | 19 ------------------- sound/soc/codecs/wcd-spi.c | 6 ++---- 2 files changed, 2 insertions(+), 23 deletions(-) diff --git a/include/sound/wcd-spi.h b/include/sound/wcd-spi.h index a9c336177cb5..b85c68eba429 100644 --- a/include/sound/wcd-spi.h +++ b/include/sound/wcd-spi.h @@ -35,25 +35,6 @@ struct wcd_spi_msg { u32 flags; }; -#ifdef CONFIG_SND_SOC_WCD_SPI - -int wcd_spi_data_write(struct spi_device *spi, struct wcd_spi_msg *msg); -int wcd_spi_data_read(struct spi_device *spi, struct wcd_spi_msg *msg); - -#else - -int wcd_spi_data_write(struct spi_device *spi, struct wcd_spi_msg *msg) -{ - return -ENODEV; -} - -int wcd_spi_data_read(struct spi_device *spi, struct wcd_spi_msg *msg) -{ - return -ENODEV; -} - -#endif /* End of CONFIG_SND_SOC_WCD_SPI */ - struct wcd_spi_ops { struct spi_device *spi_dev; int (*read_dev)(struct spi_device *spi, struct wcd_spi_msg *msg); diff --git a/sound/soc/codecs/wcd-spi.c b/sound/soc/codecs/wcd-spi.c index 6c01dafb42bc..957d6428427c 100644 --- a/sound/soc/codecs/wcd-spi.c +++ b/sound/soc/codecs/wcd-spi.c @@ -837,7 +837,7 @@ static int wcd_spi_data_xfer(struct spi_device *spi, * about the write are encapsulated in @msg. Write size should be multiple * of 4 bytes and write address should be 4-byte aligned. */ -int wcd_spi_data_write(struct spi_device *spi, +static int wcd_spi_data_write(struct spi_device *spi, struct wcd_spi_msg *msg) { if (!spi || !msg) { @@ -850,7 +850,6 @@ int wcd_spi_data_write(struct spi_device *spi, __func__, msg->remote_addr, msg->len); return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_WRITE); } -EXPORT_SYMBOL(wcd_spi_data_write); /* * wcd_spi_data_read: Read data from WCD SPI @@ -861,7 +860,7 @@ EXPORT_SYMBOL(wcd_spi_data_write); * about the read are encapsulated in @msg. Read size should be multiple * of 4 bytes and read address should be 4-byte aligned. */ -int wcd_spi_data_read(struct spi_device *spi, +static int wcd_spi_data_read(struct spi_device *spi, struct wcd_spi_msg *msg) { if (!spi || !msg) { @@ -874,7 +873,6 @@ int wcd_spi_data_read(struct spi_device *spi, __func__, msg->remote_addr, msg->len); return wcd_spi_data_xfer(spi, msg, WCD_SPI_XFER_READ); } -EXPORT_SYMBOL(wcd_spi_data_read); static int wdsp_spi_dload_section(struct spi_device *spi, void *data) -- GitLab From da6ef63735fb0c6b8b471dce23c9aa8a5a0f45ec Mon Sep 17 00:00:00 2001 From: Kyle Piefer Date: Thu, 29 Jun 2017 13:18:51 -0700 Subject: [PATCH 462/786] msm: kgsl: Trace GMU register writes before adjusting offset Move the tracing of GMU register writes to before we subtract the gmu2gpu offset. This will make offsets in the trace consistent with the offsets in the A6XX register file. CRs-Fixed: 2062271 Change-Id: Ic6a29c564ed0445060dff081da3d45aad10b4ebb Signed-off-by: Kyle Piefer --- drivers/gpu/msm/adreno.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 36722738919b..6f465aa9f137 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -2645,10 +2645,9 @@ static void adreno_gmu_regwrite(struct kgsl_device *device, void __iomem *reg; struct gmu_device *gmu = &device->gmu; - offsetwords -= gmu->gmu2gpu_offset; - trace_kgsl_regwrite(device, offsetwords, value); + offsetwords -= gmu->gmu2gpu_offset; reg = gmu->reg_virt + (offsetwords << 2); /* -- GitLab From dc7916f262de5f3e77b5ed087657c9f836caef12 Mon Sep 17 00:00:00 2001 From: Archana Sathyakumar Date: Thu, 29 Jun 2017 14:10:41 -0600 Subject: [PATCH 463/786] kernel: power: qos: Set PM_QOS_SUM type for each cpu If a memory bandwidth constraint is added, pm_qos_set_value_for_cpu() throws an BUG as PM_QOS_SUM type is not handled. Add the new value to the target_per_cpu variable to provide an overall memory bandwidth requirement for each cpu. Change-Id: I44b2954bb799ea1f8e21de5c590ea6fa483359ac Signed-off-by: Archana Sathyakumar --- kernel/power/qos.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 0854263cbe33..12fe7827aff7 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -284,6 +284,9 @@ static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c) if (req->node.prio > qos_val[cpu]) qos_val[cpu] = req->node.prio; break; + case PM_QOS_SUM: + qos_val[cpu] += req->node.prio; + break; default: BUG(); break; -- GitLab From 032c2e18d3d5b1bfc12996b35cb783d55888fe28 Mon Sep 17 00:00:00 2001 From: Alan Kwong Date: Thu, 29 Jun 2017 16:31:38 -0400 Subject: [PATCH 464/786] drm/msm/sde: correct inline rotator qos remap loop Inline rotator qos remap loop is not checking against loop index, and will access invalid vbif configuration entry. Correct loop exit bound check to compare against the loop index. Change-Id: I94cea9ac6211aea49fbdea378fcb8806a4c44cd6 Signed-off-by: Alan Kwong --- drivers/gpu/drm/msm/sde/sde_plane.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c index 2a98af45d2f6..67bfcf5d2ca5 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.c +++ b/drivers/gpu/drm/msm/sde/sde_plane.c @@ -1819,7 +1819,7 @@ static int sde_plane_rot_submit_command(struct drm_plane *plane, rot_cmd->dst_planes = layout.num_planes; /* VBIF remapper settings */ - for (i = 0; rstate->rot_hw->caps->xin_count; i++) { + for (i = 0; i < rstate->rot_hw->caps->xin_count; i++) { const struct sde_rot_vbif_cfg *cfg = &rstate->rot_hw->caps->vbif_cfg[i]; -- GitLab From efd87da62e94863d9e75cc12d8b00c2b5bef7f1f Mon Sep 17 00:00:00 2001 From: Vicky Wallace Date: Mon, 5 Jun 2017 19:03:25 -0700 Subject: [PATCH 465/786] clk: qcom: clk-alpha-pll: Update round rate to use kHz for the divider For the PLL that have 32 bits support for the division ratio. The DIVIDER_ROUND_CLOSEST flag is designed to round the frequency to the closest Hz for the requested rate. However the Fabia PLLs have only 16 bits support for the division ratio. Using the closest rounding flag results in a parent PLL being configured with the rate larger than Fmax. This change solves the issue by allowing the frequency to round to nearest kHz. CRs-Fixed: 2048646 Change-Id: I336945df289e383dea2b831ec8aa24da2aca54c1 Signed-off-by: Vicky Wallace --- drivers/clk/qcom/clk-alpha-pll.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index 4d3b4278f957..e7d3ee4396b0 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -815,7 +815,7 @@ static long clk_generic_pll_postdiv_round_rate(struct clk_hw *hw, return -EINVAL; return divider_round_rate(hw, rate, prate, pll->post_div_table, - pll->width, CLK_DIVIDER_ROUND_CLOSEST); + pll->width, CLK_DIVIDER_ROUND_KHZ); } static int clk_generic_pll_postdiv_set_rate(struct clk_hw *hw, -- GitLab From 04e777243808f7f346ca335f72da405a1951d9b4 Mon Sep 17 00:00:00 2001 From: Praneeth Paladugu Date: Wed, 21 Jun 2017 11:38:31 -0700 Subject: [PATCH 466/786] msm: vidc: Introduce Performance Monitoring System Enhance current DCVS algorithm to take care of Venus performance fluctuations by adjusting clock, bus BW using pending buffer counts. CRs-Fixed: 2012520 Change-Id: I448201ec596b71b692ee5b993fc36716f420612d Signed-off-by: Praneeth Paladugu --- .../msm/vidc/governors/msm_vidc_dyn_gov.c | 14 +- .../media/platform/msm/vidc/msm_vidc_clocks.c | 143 ++++++++++++------ .../media/platform/msm/vidc/msm_vidc_clocks.h | 12 +- .../platform/msm/vidc/msm_vidc_internal.h | 1 + .../media/platform/msm/vidc/vidc_hfi_api.h | 1 + 5 files changed, 113 insertions(+), 58 deletions(-) diff --git a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c index 06187687beb0..9daf0535aaa8 100644 --- a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c +++ b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c @@ -387,13 +387,23 @@ static unsigned long __calculate_decoder(struct vidc_bus_vote_data *d, integer_part = d->compression_ratio >> 16; frac_part = - ((d->compression_ratio - (integer_part * 65536)) * 100) >> 16; + ((d->compression_ratio - (integer_part << 16)) * 100) >> 16; dpb_read_compression_factor = FP(integer_part, frac_part, 100); + integer_part = d->complexity_factor >> 16; + frac_part = + ((d->complexity_factor - (integer_part << 16)) * 100) >> 16; + + motion_vector_complexity = FP(integer_part, frac_part, 100); + dpb_write_compression_factor = !dpb_compression_enabled ? FP_ONE : __compression_ratio(__lut(width, height, fps), opb_bpp); + dpb_write_compression_factor = d->use_dpb_read ? + dpb_read_compression_factor : + dpb_write_compression_factor; + opb_compression_factor = !opb_compression_enabled ? FP_ONE : __compression_ratio(__lut(width, height, fps), opb_bpp); @@ -437,8 +447,6 @@ static unsigned long __calculate_decoder(struct vidc_bus_vote_data *d, lcu_per_frame * fps / bps(1)); ddr.line_buffer_write = ddr.line_buffer_read; - motion_vector_complexity = FP_INT(4); - bw_for_1x_8bpc = fp_div(FP_INT(width * height), FP_INT(32 * 8)); bw_for_1x_8bpc = fp_mult(bw_for_1x_8bpc, diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c index 6867735aeca7..cb3c526ec172 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c @@ -16,6 +16,9 @@ #include "msm_vidc_debug.h" #include "msm_vidc_clocks.h" +#define MSM_VIDC_MIN_UBWC_COMPLEXITY_FACTOR 1 +#define MSM_VIDC_MAX_UBWC_COMPLEXITY_FACTOR 4 + static inline unsigned long int get_ubwc_compression_ratio( struct ubwc_cr_stats_info_type ubwc_stats_info) { @@ -91,16 +94,30 @@ static int fill_recon_stats(struct msm_vidc_inst *inst, struct vidc_bus_vote_data *vote_data) { struct recon_buf *binfo; - u32 CR = 0, CF = 0; + u32 CR = 0, min_cf = MSM_VIDC_MIN_UBWC_COMPLEXITY_FACTOR, + max_cf = MSM_VIDC_MAX_UBWC_COMPLEXITY_FACTOR; mutex_lock(&inst->reconbufs.lock); list_for_each_entry(binfo, &inst->reconbufs.list, list) { CR = max(CR, binfo->CR); - CF = max(CF, binfo->CF); + min_cf = min(min_cf, binfo->CF); + max_cf = max(max_cf, binfo->CF); } mutex_unlock(&inst->reconbufs.lock); - vote_data->complexity_factor = CF; vote_data->compression_ratio = CR; + + vote_data->complexity_factor = max_cf; + vote_data->use_dpb_read = false; + if (inst->clk_data.load <= inst->clk_data.load_norm) { + vote_data->complexity_factor = min_cf; + vote_data->use_dpb_read = true; + } + + dprintk(VIDC_DBG, + "Complression Ratio = %d Complexity Factor = %d\n", + vote_data->compression_ratio, + vote_data->complexity_factor); + return 0; } @@ -129,6 +146,32 @@ int msm_comm_vote_bus(struct msm_vidc_core *core) mutex_lock(&core->lock); list_for_each_entry(inst, &core->instances, list) { int codec = 0; + struct msm_vidc_buffer *temp, *next; + u32 filled_len = 0; + u32 device_addr = 0; + + if (!inst) { + dprintk(VIDC_ERR, "%s Invalid args\n", + __func__); + return -EINVAL; + } + + mutex_lock(&inst->registeredbufs.lock); + list_for_each_entry_safe(temp, next, + &inst->registeredbufs.list, list) { + if (temp->vvb.vb2_buf.type == + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + filled_len = max(filled_len, + temp->vvb.vb2_buf.planes[0].bytesused); + device_addr = temp->smem[0].device_addr; + } + } + mutex_unlock(&inst->registeredbufs.lock); + + if (!filled_len || !device_addr) { + dprintk(VIDC_DBG, "%s No ETBs\n", __func__); + continue; + } ++vote_data_count; @@ -253,17 +296,17 @@ static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst) /* Buffers outside FW are with display */ buffers_outside_fw = total_output_buf - fw_pending_bufs; - dprintk(VIDC_DBG, + dprintk(VIDC_PROF, "Counts : total_output_buf = %d fw_pending_bufs = %d buffers_outside_fw = %d\n", total_output_buf, fw_pending_bufs, buffers_outside_fw); - if (buffers_outside_fw >= dcvs->min_threshold && - dcvs->load > dcvs->load_low) { + if (buffers_outside_fw >= dcvs->min_threshold) dcvs->load = dcvs->load_low; - } else if (buffers_outside_fw < dcvs->min_threshold && - dcvs->load == dcvs->load_low) { + else if (buffers_outside_fw <= dcvs->max_threshold) dcvs->load = dcvs->load_high; - } + else + dcvs->load = dcvs->load_norm; + return rc; } @@ -291,8 +334,6 @@ static void msm_vidc_update_freq_entry(struct msm_vidc_inst *inst, mutex_unlock(&inst->freqs.lock); } -// TODO this needs to be removed later and use queued_list - void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst, u32 device_addr) { @@ -322,9 +363,8 @@ static unsigned long msm_vidc_adjust_freq(struct msm_vidc_inst *inst) /* If current requirement is within DCVS limits, try DCVS. */ - if (freq < inst->clk_data.load_high) { + if (freq < inst->clk_data.load_norm) { dprintk(VIDC_DBG, "Calling DCVS now\n"); - // TODO calling DCVS here may reduce the residency. Re-visit. msm_dcvs_scale_clocks(inst); freq = inst->clk_data.load; } @@ -346,6 +386,18 @@ void msm_comm_free_freq_table(struct msm_vidc_inst *inst) mutex_unlock(&inst->freqs.lock); } +static unsigned long msm_vidc_max_freq(struct msm_vidc_core *core) +{ + struct allowed_clock_rates_table *allowed_clks_tbl = NULL; + unsigned long freq = 0; + + allowed_clks_tbl = core->resources.allowed_clks_tbl; + freq = allowed_clks_tbl[0].clock_rate; + dprintk(VIDC_PROF, "Max rate = %lu", freq); + + return freq; +} + static unsigned long msm_vidc_calc_freq(struct msm_vidc_inst *inst, u32 filled_len) { @@ -377,17 +429,17 @@ static unsigned long msm_vidc_calc_freq(struct msm_vidc_inst *inst, vsp_cycles = mbs_per_second * inst->clk_data.entry->vsp_cycles; /* 10 / 7 is overhead factor */ - vsp_cycles += (inst->prop.fps * filled_len * 8 * 10) / 7; + vsp_cycles += ((inst->prop.fps * filled_len * 8) / 7) * 10; } else { - // TODO return Min or Max ? dprintk(VIDC_ERR, "Unknown session type = %s\n", __func__); - return freq; + return msm_vidc_max_freq(inst->core); } freq = max(vpp_cycles, vsp_cycles); - dprintk(VIDC_PROF, "%s Inst %pK : Freq = %lu\n", __func__, inst, freq); + dprintk(VIDC_PROF, "%s Inst %pK : Filled Len = %d Freq = %lu\n", + __func__, inst, filled_len, freq); return freq; } @@ -429,18 +481,6 @@ static int msm_vidc_set_clocks(struct msm_vidc_core *core) return rc; } -static unsigned long msm_vidc_max_freq(struct msm_vidc_core *core) -{ - struct allowed_clock_rates_table *allowed_clks_tbl = NULL; - unsigned long freq = 0; - - allowed_clks_tbl = core->resources.allowed_clks_tbl; - freq = allowed_clks_tbl[0].clock_rate; - dprintk(VIDC_PROF, "Max rate = %lu", freq); - - return freq; -} - int msm_vidc_update_operating_rate(struct msm_vidc_inst *inst) { struct v4l2_ctrl *ctrl = NULL; @@ -531,8 +571,7 @@ int msm_comm_scale_clocks(struct msm_vidc_inst *inst) mutex_lock(&inst->registeredbufs.lock); list_for_each_entry_safe(temp, next, &inst->registeredbufs.list, list) { if (temp->vvb.vb2_buf.type == - V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && - temp->deferred) { + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { filled_len = max(filled_len, temp->vvb.vb2_buf.planes[0].bytesused); device_addr = temp->smem[0].device_addr; @@ -541,8 +580,8 @@ int msm_comm_scale_clocks(struct msm_vidc_inst *inst) mutex_unlock(&inst->registeredbufs.lock); if (!filled_len || !device_addr) { - dprintk(VIDC_PROF, "No Change in frequency\n"); - goto decision_done; + dprintk(VIDC_DBG, "%s No ETBs\n", __func__); + goto no_clock_change; } freq = msm_vidc_calc_freq(inst, filled_len); @@ -559,8 +598,9 @@ int msm_comm_scale_clocks(struct msm_vidc_inst *inst) else inst->clk_data.curr_freq = freq; -decision_done: msm_vidc_set_clocks(inst->core); + +no_clock_change: return 0; } @@ -607,7 +647,6 @@ int msm_dcvs_try_enable(struct msm_vidc_inst *inst) } inst->clk_data.dcvs_mode = true; - // TODO : Update with proper number based on on-target tuning. inst->clk_data.extra_capture_buffer_count = DCVS_DEC_EXTRA_OUTPUT_BUFFERS; inst->clk_data.extra_output_buffer_count = @@ -645,12 +684,13 @@ int msm_comm_init_clocks_and_bus_data(struct msm_vidc_inst *inst) static inline void msm_dcvs_print_dcvs_stats(struct clock_data *dcvs) { - dprintk(VIDC_DBG, - "DCVS: Load_Low %d, Load High %d\n", + dprintk(VIDC_PROF, + "DCVS: Load_Low %d, Load Norm %d, Load High %d\n", dcvs->load_low, + dcvs->load_norm, dcvs->load_high); - dprintk(VIDC_DBG, + dprintk(VIDC_PROF, "DCVS: min_threshold %d, max_threshold %d\n", dcvs->min_threshold, dcvs->max_threshold); } @@ -663,6 +703,7 @@ void msm_clock_data_reset(struct msm_vidc_inst *inst) u64 total_freq = 0, rate = 0, load; int cycles; struct clock_data *dcvs; + struct hal_buffer_requirements *output_buf_req; dprintk(VIDC_DBG, "Init DCVS Load\n"); @@ -683,12 +724,22 @@ void msm_clock_data_reset(struct msm_vidc_inst *inst) cycles; dcvs->buffer_type = HAL_BUFFER_INPUT; - // TODO : Update with proper no based on Buffer counts change. - dcvs->min_threshold = 7; + dcvs->min_threshold = + msm_vidc_get_extra_buff_count(inst, HAL_BUFFER_INPUT); } else if (inst->session_type == MSM_VIDC_DECODER) { dcvs->buffer_type = msm_comm_get_hal_output_buffer(inst); - // TODO : Update with proper no based on Buffer counts change. - dcvs->min_threshold = 4; + output_buf_req = get_buff_req_buffer(inst, + dcvs->buffer_type); + if (!output_buf_req) { + dprintk(VIDC_ERR, + "%s: No bufer req for buffer type %x\n", + __func__, dcvs->buffer_type); + return; + } + dcvs->max_threshold = output_buf_req->buffer_count_actual - + output_buf_req->buffer_count_min_host + 1; + dcvs->min_threshold = + msm_vidc_get_extra_buff_count(inst, dcvs->buffer_type); } else { return; } @@ -701,8 +752,12 @@ void msm_clock_data_reset(struct msm_vidc_inst *inst) break; } - dcvs->load = dcvs->load_high = rate; - dcvs->load_low = allowed_clks_tbl[i+1].clock_rate; + dcvs->load = dcvs->load_norm = rate; + + dcvs->load_low = i < (core->resources.allowed_clks_tbl_size - 1) ? + allowed_clks_tbl[i+1].clock_rate : dcvs->load_norm; + dcvs->load_high = i > 0 ? allowed_clks_tbl[i-1].clock_rate : + dcvs->load_norm; inst->clk_data.buffer_counter = 0; diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h index e1226e4ceb6a..705cb7c3b262 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h @@ -15,21 +15,11 @@ #define _MSM_VIDC_CLOCKS_H_ #include "msm_vidc_internal.h" -/* Low threshold for encoder dcvs */ -#define DCVS_ENC_LOW_THR 4 -/* High threshold for encoder dcvs */ -#define DCVS_ENC_HIGH_THR 9 /* extra o/p buffers in case of encoder dcvs */ #define DCVS_ENC_EXTRA_OUTPUT_BUFFERS 2 + /* extra o/p buffers in case of decoder dcvs */ #define DCVS_DEC_EXTRA_OUTPUT_BUFFERS 4 -/* Default threshold to reduce the core frequency */ -#define DCVS_NOMINAL_THRESHOLD 8 -/* Default threshold to increase the core frequency */ -#define DCVS_TURBO_THRESHOLD 4 - -/* Considering one safeguard buffer */ -#define DCVS_BUFFER_SAFEGUARD (DCVS_DEC_EXTRA_OUTPUT_BUFFERS - 1) void msm_clock_data_reset(struct msm_vidc_inst *inst); int msm_vidc_update_operating_rate(struct msm_vidc_inst *inst); diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h index 22772ef021bc..373dbba33ce8 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h @@ -243,6 +243,7 @@ struct clock_data { int buffer_counter; int load; int load_low; + int load_norm; int load_high; int min_threshold; int max_threshold; diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h index 4b3b2fd39e3b..79ce858eb1c0 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h @@ -1339,6 +1339,7 @@ struct vidc_bus_vote_data { int output_height, output_width; int compression_ratio; int complexity_factor; + bool use_dpb_read; unsigned int lcu_size; enum msm_vidc_power_mode power_mode; enum hal_work_mode work_mode; -- GitLab From 74906ec348eebfec4d7c333f21ec1bd8f3515e71 Mon Sep 17 00:00:00 2001 From: Praneeth Paladugu Date: Thu, 29 Jun 2017 14:59:27 -0700 Subject: [PATCH 467/786] ARM: dts: msm: Add new clock level for Venus for sdm845 Add SVS3 clock level for Venus so that video driver votes for correct Venus clock based on current load. CRs-Fixed: 2069458 Change-Id: I358e39e11b114f53d348000afdf810d92b33dfd3 Signed-off-by: Praneeth Paladugu --- arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi index 71c521a86d25..1c07c5ef4e93 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-vidc.dtsi @@ -45,8 +45,8 @@ "bus_clk", "core0_clk", "core0_bus_clk", "core1_clk", "core1_bus_clk"; qcom,clock-configs = <0x1 0x0 0x0 0x1 0x0 0x1 0x0>; - qcom,allowed-clock-rates = <200000000 320000000 380000000 - 444000000 533000000>; + qcom,allowed-clock-rates = <100000000 200000000 320000000 + 380000000 444000000 533000000>; /* Buses */ bus_cnoc { -- GitLab From 60d41685451ad9deac291d4e14f4f5dceed930c8 Mon Sep 17 00:00:00 2001 From: Vicky Wallace Date: Mon, 5 Jun 2017 19:34:56 -0700 Subject: [PATCH 468/786] clk: qcom: Fix the clock scaling issue for GFX3D clock For the setup GFX CRC, the CRC subdivider will add 1 to the GFX3D clock divider. It results in 1/2 of the requested clock rate. Add clock fixed factor to have a fixed divider 1 and multiplier by 2 for GFX3D clock source to address this issue. CRs-Fixed: 2048646 Change-Id: I8873c1ebb262088c4bc6f1ac5c313c058448ee9f Signed-off-by: Vicky Wallace --- drivers/clk/qcom/gpucc-sdm845.c | 68 ++++++++++++++++++++++++++------- 1 file changed, 55 insertions(+), 13 deletions(-) diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c index 8442890f52d1..a0aaafc0e33f 100644 --- a/drivers/clk/qcom/gpucc-sdm845.c +++ b/drivers/clk/qcom/gpucc-sdm845.c @@ -37,7 +37,6 @@ #include "vdd-level-sdm845.h" #define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) } -#define F_SLEW(f, s, h, m, n, sf) { (f), (s), (2 * (h) - 1), (m), (n), (sf) } static int vdd_gx_corner[] = { RPMH_REGULATOR_LEVEL_OFF, /* VDD_GX_NONE */ @@ -67,6 +66,7 @@ enum { P_GPU_CC_PLL1_OUT_EVEN, P_GPU_CC_PLL1_OUT_MAIN, P_GPU_CC_PLL1_OUT_ODD, + P_CRC_DIV, }; static const struct parent_map gpu_cc_parent_map_0[] = { @@ -107,8 +107,28 @@ static const char * const gpu_cc_parent_names_1[] = { "core_bi_pll_test_se", }; +static const struct parent_map gpu_cc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_CRC_DIV, 1 }, + { P_GPU_CC_PLL0_OUT_ODD, 2 }, + { P_GPU_CC_PLL1_OUT_EVEN, 3 }, + { P_GPU_CC_PLL1_OUT_ODD, 4 }, + { P_GPLL0_OUT_MAIN, 5 }, + { P_CORE_BI_PLL_TEST_SE, 7 }, +}; + +static const char * const gpu_cc_parent_names_2[] = { + "bi_tcxo", + "crc_div", + "gpu_cc_pll0_out_odd", + "gpu_cc_pll1_out_even", + "gpu_cc_pll1_out_odd", + "gcc_gpu_gpll0_clk_src", + "core_bi_pll_test_se", +}; + static struct pll_vco fabia_vco[] = { - { 250000000, 2000000000, 0 }, + { 249600000, 2000000000, 0 }, { 125000000, 1000000000, 1 }, }; @@ -186,12 +206,27 @@ static struct clk_rcg2 gpu_cc_gmu_clk_src = { }, }; +static struct clk_fixed_factor crc_div = { + .mult = 1, + .div = 1, + .hw.init = &(struct clk_init_data){ + .name = "crc_div", + .parent_names = (const char *[]){ "gpu_cc_pll0_out_even" }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_fixed_factor_ops, + }, +}; + static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src[] = { - F_SLEW(147000000, P_GPU_CC_PLL0_OUT_EVEN, 1, 0, 0, 294000000), - F_SLEW(210000000, P_GPU_CC_PLL0_OUT_EVEN, 1, 0, 0, 420000000), - F_SLEW(338000000, P_GPU_CC_PLL0_OUT_EVEN, 1, 0, 0, 676000000), - F_SLEW(425000000, P_GPU_CC_PLL0_OUT_EVEN, 1, 0, 0, 850000000), - F_SLEW(600000000, P_GPU_CC_PLL0_OUT_EVEN, 1, 0, 0, 1200000000), + F(147000000, P_CRC_DIV, 1, 0, 0), + F(210000000, P_CRC_DIV, 1, 0, 0), + F(280000000, P_CRC_DIV, 1, 0, 0), + F(338000000, P_CRC_DIV, 1, 0, 0), + F(425000000, P_CRC_DIV, 1, 0, 0), + F(487000000, P_CRC_DIV, 1, 0, 0), + F(548000000, P_CRC_DIV, 1, 0, 0), + F(600000000, P_CRC_DIV, 1, 0, 0), { } }; @@ -199,12 +234,12 @@ static struct clk_rcg2 gpu_cc_gx_gfx3d_clk_src = { .cmd_rcgr = 0x101c, .mnd_width = 0, .hid_width = 5, - .parent_map = gpu_cc_parent_map_1, + .parent_map = gpu_cc_parent_map_2, .freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src, .flags = FORCE_ENABLE_RCG, .clkr.hw.init = &(struct clk_init_data){ .name = "gpu_cc_gx_gfx3d_clk_src", - .parent_names = gpu_cc_parent_names_1, + .parent_names = gpu_cc_parent_names_2, .num_parents = 7, .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, @@ -532,16 +567,23 @@ static int gpu_cc_gfx_sdm845_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { - dev_err(&pdev->dev, "Failed to get resources for clock_gfxcc.\n"); + dev_err(&pdev->dev, "Failed to get resources for clock_gfxcc\n"); return -EINVAL; } base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (IS_ERR(base)) { - dev_err(&pdev->dev, "Failed to ioremap the GFX CC base.\n"); + dev_err(&pdev->dev, "Failed to ioremap the GFX CC base\n"); return PTR_ERR(base); } + /* Register clock fixed factor for CRC divide. */ + ret = devm_clk_hw_register(&pdev->dev, &crc_div.hw); + if (ret) { + dev_err(&pdev->dev, "Failed to register hardware clock\n"); + return ret; + } + regmap = devm_regmap_init_mmio(&pdev->dev, base, gpu_cc_gfx_sdm845_desc.config); if (IS_ERR(regmap)) { @@ -577,7 +619,7 @@ static int gpu_cc_gfx_sdm845_probe(struct platform_device *pdev) return ret; } - dev_info(&pdev->dev, "Registered GFX CC clocks.\n"); + dev_info(&pdev->dev, "Registered GFX CC clocks\n"); return ret; } @@ -626,7 +668,7 @@ static int gpu_cc_sdm845_probe(struct platform_device *pdev) return ret; } - dev_info(&pdev->dev, "Registered GPU CC clocks.\n"); + dev_info(&pdev->dev, "Registered GPU CC clocks\n"); return ret; } -- GitLab From eed1deac09eae323eebc93750f5776c3b71e4859 Mon Sep 17 00:00:00 2001 From: Veerabhadrarao Badiganti Date: Wed, 21 Jun 2017 19:27:32 +0530 Subject: [PATCH 469/786] mmc: sdhci-msm: Don't turnoff Vcc during initialization As per emmc specification, device should be notified either with power-off or sleep notification before turning off the Vcc. Failing to do so might affect the device longevity. In mmc driver initialization phase even before device probing gets completed, Vcc is getting turned off without these notifications. Since it can't send commands at this stage, So just ensure that Vcc is not turned off till initialization gets completed. Change-Id: I0bbf0077357d66c888147be40a0c5d312b9ce063 Signed-off-by: Veerabhadrarao Badiganti --- drivers/mmc/host/sdhci-msm.c | 23 ++++++----------------- drivers/mmc/host/sdhci-msm.h | 1 + 2 files changed, 7 insertions(+), 17 deletions(-) diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index fea297143c11..f3f181d1f16d 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -2345,21 +2345,6 @@ static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata, return ret; } -/* - * Reset vreg by ensuring it is off during probe. A call - * to enable vreg is needed to balance disable vreg - */ -static int sdhci_msm_vreg_reset(struct sdhci_msm_pltfm_data *pdata) -{ - int ret; - - ret = sdhci_msm_setup_vreg(pdata, 1, true); - if (ret) - return ret; - ret = sdhci_msm_setup_vreg(pdata, 0, true); - return ret; -} - /* This init function should be called only once for each SDHC slot */ static int sdhci_msm_vreg_init(struct device *dev, struct sdhci_msm_pltfm_data *pdata, @@ -2394,7 +2379,7 @@ static int sdhci_msm_vreg_init(struct device *dev, if (ret) goto vdd_reg_deinit; } - ret = sdhci_msm_vreg_reset(pdata); + if (ret) dev_err(dev, "vreg reset failed (%d)\n", ret); goto out; @@ -2571,7 +2556,9 @@ static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data) io_level = REQ_IO_HIGH; } if (irq_status & CORE_PWRCTL_BUS_OFF) { - ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false); + if (msm_host->pltfm_init_done) + ret = sdhci_msm_setup_vreg(msm_host->pdata, + false, false); if (!ret) { ret = sdhci_msm_setup_pins(msm_host->pdata, false); ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata, @@ -4512,6 +4499,8 @@ static int sdhci_msm_probe(struct platform_device *pdev) goto vreg_deinit; } + msm_host->pltfm_init_done = true; + pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS); diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h index 2b907e94f7d1..c536a7db0fee 100644 --- a/drivers/mmc/host/sdhci-msm.h +++ b/drivers/mmc/host/sdhci-msm.h @@ -225,6 +225,7 @@ struct sdhci_msm_host { bool mci_removed; const struct sdhci_msm_offset *offset; bool core_3_0v_support; + bool pltfm_init_done; }; extern char *saved_command_line; -- GitLab From a24cb2cce11c76cbed3ef37608010eb2cbdd083b Mon Sep 17 00:00:00 2001 From: Dhoat Harpal Date: Tue, 6 Jun 2017 20:39:54 +0530 Subject: [PATCH 470/786] ARM: dts: msm: Add ipc-spinlock device node for sdm670 Add the ipc-spinlock device node to facilitate locking between apps and non-apps processors. CRs-Fixed: 2059063 Change-Id: I474dcf4e29f981f6eab69e09368d513dbb2c17ee Signed-off-by: Dhoat Harpal --- arch/arm64/boot/dts/qcom/sdm670.dtsi | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi index 90d454726fba..414ddc5121f1 100644 --- a/arch/arm64/boot/dts/qcom/sdm670.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi @@ -681,6 +681,12 @@ "l3-scu-faultirq"; }; + qcom,ipc-spinlock@1f40000 { + compatible = "qcom,ipc-spinlock-sfpb"; + reg = <0x1f40000 0x8000>; + qcom,num-locks = <8>; + }; + qcom,chd_sliver { compatible = "qcom,core-hang-detect"; label = "silver"; -- GitLab From dd9bfaf1f1d345419a0dee049c7f1cd11986d1a3 Mon Sep 17 00:00:00 2001 From: Dhoat Harpal Date: Tue, 6 Jun 2017 20:43:16 +0530 Subject: [PATCH 471/786] ARM: dts: msm: Add SMEM device node for sdm670 Add the SMEM device node to enable the use of shared memory by different processors in the SoC. CRs-Fixed: 2059063 Change-Id: I87a11e07cdf5f718ac24040500b2bf0c23fdb948 Signed-off-by: Dhoat Harpal --- arch/arm64/boot/dts/qcom/sdm670.dtsi | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi index 414ddc5121f1..58d62ef0f9d1 100644 --- a/arch/arm64/boot/dts/qcom/sdm670.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi @@ -687,6 +687,17 @@ qcom,num-locks = <8>; }; + qcom,smem@86000000 { + compatible = "qcom,smem"; + reg = <0x86000000 0x200000>, + <0x17911008 0x4>, + <0x778000 0x7000>, + <0x1fd4000 0x8>; + reg-names = "smem", "irq-reg-base", "aux-mem1", + "smem_targ_info_reg"; + qcom,mpu-enabled; + }; + qcom,chd_sliver { compatible = "qcom,core-hang-detect"; label = "silver"; -- GitLab From 466ffcc86cb376ed23a57511c176b426f91b9c0c Mon Sep 17 00:00:00 2001 From: Dhoat Harpal Date: Tue, 6 Jun 2017 20:54:51 +0530 Subject: [PATCH 472/786] ARM: dts: msm: Add G-Link SMEM Transport device nodes for sdm670 These device nodes allow the G-Link SMEM Transport to function, which allows G-Link to use shared memory as an underlying physical transport. CRs-Fixed: 2059063 Change-Id: I8ba268711d75d77348cf9564c3760a154c9742c4 Signed-off-by: Dhoat Harpal --- arch/arm64/boot/dts/qcom/sdm670.dtsi | 71 ++++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi index 58d62ef0f9d1..a68292d58e36 100644 --- a/arch/arm64/boot/dts/qcom/sdm670.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi @@ -698,6 +698,77 @@ qcom,mpu-enabled; }; + qcom,glink-smem-native-xprt-modem@86000000 { + compatible = "qcom,glink-smem-native-xprt"; + reg = <0x86000000 0x200000>, + <0x1799000c 0x4>; + reg-names = "smem", "irq-reg-base"; + qcom,irq-mask = <0x1000>; + interrupts = ; + label = "mpss"; + }; + + qcom,glink-smem-native-xprt-adsp@86000000 { + compatible = "qcom,glink-smem-native-xprt"; + reg = <0x86000000 0x200000>, + <0x1799000c 0x4>; + reg-names = "smem", "irq-reg-base"; + qcom,irq-mask = <0x100>; + interrupts = ; + label = "lpass"; + qcom,qos-config = <&glink_qos_adsp>; + qcom,ramp-time = <0xaf>; + }; + + glink_qos_adsp: qcom,glink-qos-config-adsp { + compatible = "qcom,glink-qos-config"; + qcom,flow-info = <0x3c 0x0>, + <0x3c 0x0>, + <0x3c 0x0>, + <0x3c 0x0>; + qcom,mtu-size = <0x800>; + qcom,tput-stats-cycle = <0xa>; + }; + + glink_spi_xprt_wdsp: qcom,glink-spi-xprt-wdsp { + compatible = "qcom,glink-spi-xprt"; + label = "wdsp"; + qcom,remote-fifo-config = <&glink_fifo_wdsp>; + qcom,qos-config = <&glink_qos_wdsp>; + qcom,ramp-time = <0x10>, + <0x20>, + <0x30>, + <0x40>; + }; + + glink_fifo_wdsp: qcom,glink-fifo-config-wdsp { + compatible = "qcom,glink-fifo-config"; + qcom,out-read-idx-reg = <0x12000>; + qcom,out-write-idx-reg = <0x12004>; + qcom,in-read-idx-reg = <0x1200C>; + qcom,in-write-idx-reg = <0x12010>; + }; + + glink_qos_wdsp: qcom,glink-qos-config-wdsp { + compatible = "qcom,glink-qos-config"; + qcom,flow-info = <0x80 0x0>, + <0x70 0x1>, + <0x60 0x2>, + <0x50 0x3>; + qcom,mtu-size = <0x800>; + qcom,tput-stats-cycle = <0xa>; + }; + + qcom,glink-smem-native-xprt-cdsp@86000000 { + compatible = "qcom,glink-smem-native-xprt"; + reg = <0x86000000 0x200000>, + <0x1799000c 0x4>; + reg-names = "smem", "irq-reg-base"; + qcom,irq-mask = <0x10>; + interrupts = ; + label = "cdsp"; + }; + qcom,chd_sliver { compatible = "qcom,core-hang-detect"; label = "silver"; -- GitLab From 9cb73cca14167b4031a0501589a85007326d6f49 Mon Sep 17 00:00:00 2001 From: Dhoat Harpal Date: Tue, 6 Jun 2017 20:58:14 +0530 Subject: [PATCH 473/786] ARM: dts: msm: Add G-Link SSR device nodes for sdm670 SSR device nodes contain the list of subsystems to notify when an SSR for a given subsystem occurs. This information enables G-Link to handle closing and re-opening channels in the case of SSR. CRs-Fixed: 2059063 Change-Id: I1205e590384c34caae735034be5c0a444be94279 Signed-off-by: Dhoat Harpal --- arch/arm64/boot/dts/qcom/sdm670.dtsi | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi index a68292d58e36..5db3360b51ea 100644 --- a/arch/arm64/boot/dts/qcom/sdm670.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi @@ -769,6 +769,30 @@ label = "cdsp"; }; + glink_mpss: qcom,glink-ssr-modem { + compatible = "qcom,glink_ssr"; + label = "modem"; + qcom,edge = "mpss"; + qcom,notify-edges = <&glink_lpass>, <&glink_cdsp>; + qcom,xprt = "smem"; + }; + + glink_lpass: qcom,glink-ssr-adsp { + compatible = "qcom,glink_ssr"; + label = "adsp"; + qcom,edge = "lpass"; + qcom,notify-edges = <&glink_mpss>, <&glink_cdsp>; + qcom,xprt = "smem"; + }; + + glink_cdsp: qcom,glink-ssr-cdsp { + compatible = "qcom,glink_ssr"; + label = "cdsp"; + qcom,edge = "cdsp"; + qcom,notify-edges = <&glink_mpss>, <&glink_lpass>; + qcom,xprt = "smem"; + }; + qcom,chd_sliver { compatible = "qcom,core-hang-detect"; label = "silver"; -- GitLab From 11d34486ff9d4d4fc44a2857030db8249c8ca239 Mon Sep 17 00:00:00 2001 From: Dhoat Harpal Date: Tue, 6 Jun 2017 21:00:14 +0530 Subject: [PATCH 474/786] ARM: dts: msm: Add glink_pkt devices for sdm670 Glink_pkt devices expose access to certain SMD resources to userspace. CRs-Fixed: 2059063 Change-Id: I6a545466c21f7cd673093c627b895590c550e356 Signed-off-by: Dhoat Harpal --- arch/arm64/boot/dts/qcom/sdm670.dtsi | 60 ++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi index 5db3360b51ea..bf753eb1b497 100644 --- a/arch/arm64/boot/dts/qcom/sdm670.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi @@ -793,6 +793,66 @@ qcom,xprt = "smem"; }; + qcom,glink_pkt { + compatible = "qcom,glinkpkt"; + + qcom,glinkpkt-at-mdm0 { + qcom,glinkpkt-transport = "smem"; + qcom,glinkpkt-edge = "mpss"; + qcom,glinkpkt-ch-name = "DS"; + qcom,glinkpkt-dev-name = "at_mdm0"; + }; + + qcom,glinkpkt-loopback_cntl { + qcom,glinkpkt-transport = "lloop"; + qcom,glinkpkt-edge = "local"; + qcom,glinkpkt-ch-name = "LOCAL_LOOPBACK_CLNT"; + qcom,glinkpkt-dev-name = "glink_pkt_loopback_ctrl"; + }; + + qcom,glinkpkt-loopback_data { + qcom,glinkpkt-transport = "lloop"; + qcom,glinkpkt-edge = "local"; + qcom,glinkpkt-ch-name = "glink_pkt_lloop_CLNT"; + qcom,glinkpkt-dev-name = "glink_pkt_loopback"; + }; + + qcom,glinkpkt-apr-apps2 { + qcom,glinkpkt-transport = "smem"; + qcom,glinkpkt-edge = "adsp"; + qcom,glinkpkt-ch-name = "apr_apps2"; + qcom,glinkpkt-dev-name = "apr_apps2"; + }; + + qcom,glinkpkt-data40-cntl { + qcom,glinkpkt-transport = "smem"; + qcom,glinkpkt-edge = "mpss"; + qcom,glinkpkt-ch-name = "DATA40_CNTL"; + qcom,glinkpkt-dev-name = "smdcntl8"; + }; + + qcom,glinkpkt-data1 { + qcom,glinkpkt-transport = "smem"; + qcom,glinkpkt-edge = "mpss"; + qcom,glinkpkt-ch-name = "DATA1"; + qcom,glinkpkt-dev-name = "smd7"; + }; + + qcom,glinkpkt-data4 { + qcom,glinkpkt-transport = "smem"; + qcom,glinkpkt-edge = "mpss"; + qcom,glinkpkt-ch-name = "DATA4"; + qcom,glinkpkt-dev-name = "smd8"; + }; + + qcom,glinkpkt-data11 { + qcom,glinkpkt-transport = "smem"; + qcom,glinkpkt-edge = "mpss"; + qcom,glinkpkt-ch-name = "DATA11"; + qcom,glinkpkt-dev-name = "smd11"; + }; + }; + qcom,chd_sliver { compatible = "qcom,core-hang-detect"; label = "silver"; -- GitLab From 22dafa995e4cd156ea339e285000e223ce51b4a7 Mon Sep 17 00:00:00 2001 From: Dhoat Harpal Date: Tue, 6 Jun 2017 21:03:34 +0530 Subject: [PATCH 475/786] ARM: dts: msm: Add IPC Router devices for sdm670 The IPC Router devices define the topology for high-level interprocessor communication within the SoC. CRs-Fixed: 2059063 Change-Id: I03fa919a67a2add2a8f87b9a1422690ce8aa9d69 Signed-off-by: Dhoat Harpal --- arch/arm64/boot/dts/qcom/sdm670.dtsi | 35 ++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi index bf753eb1b497..13bd0981fab2 100644 --- a/arch/arm64/boot/dts/qcom/sdm670.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi @@ -793,6 +793,41 @@ qcom,xprt = "smem"; }; + qcom,ipc_router { + compatible = "qcom,ipc_router"; + qcom,node-id = <1>; + }; + + qcom,ipc_router_modem_xprt { + compatible = "qcom,ipc_router_glink_xprt"; + qcom,ch-name = "IPCRTR"; + qcom,xprt-remote = "mpss"; + qcom,glink-xprt = "smem"; + qcom,xprt-linkid = <1>; + qcom,xprt-version = <1>; + qcom,fragmented-data; + }; + + qcom,ipc_router_q6_xprt { + compatible = "qcom,ipc_router_glink_xprt"; + qcom,ch-name = "IPCRTR"; + qcom,xprt-remote = "lpass"; + qcom,glink-xprt = "smem"; + qcom,xprt-linkid = <1>; + qcom,xprt-version = <1>; + qcom,fragmented-data; + }; + + qcom,ipc_router_cdsp_xprt { + compatible = "qcom,ipc_router_glink_xprt"; + qcom,ch-name = "IPCRTR"; + qcom,xprt-remote = "cdsp"; + qcom,glink-xprt = "smem"; + qcom,xprt-linkid = <1>; + qcom,xprt-version = <1>; + qcom,fragmented-data; + }; + qcom,glink_pkt { compatible = "qcom,glinkpkt"; -- GitLab From 8bdee6783cc112733819d853db89a466893c999f Mon Sep 17 00:00:00 2001 From: Jeff Vander Stoep Date: Tue, 20 Jun 2017 09:35:33 -0700 Subject: [PATCH 476/786] UPSTREAM: selinux: enable genfscon labeling for tracefs In kernel version 4.1, tracefs was separated from debugfs into its own filesystem. Prior to this split, files in /sys/kernel/debug/tracing could be labeled during filesystem creation using genfscon or later from userspace using setxattr. This change re-enables support for genfscon labeling. Signed-off-by: Jeff Vander Stoep Acked-by: Stephen Smalley Signed-off-by: Paul Moore (cherry picked from commit 6a3911837da0a90ed599fd0a9836472f5e7ddf1b) Change-Id: I98ad8c829302346705c1abcdc8f019f479fdefb6 Bug: 62413700 --- security/selinux/hooks.c | 1 + 1 file changed, 1 insertion(+) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 8b918f8d099b..20b2e7d65389 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -814,6 +814,7 @@ static int selinux_set_mnt_opts(struct super_block *sb, sbsec->flags |= SE_SBPROC | SE_SBGENFS; if (!strcmp(sb->s_type->name, "debugfs") || + !strcmp(sb->s_type->name, "tracefs") || !strcmp(sb->s_type->name, "sysfs") || !strcmp(sb->s_type->name, "pstore")) sbsec->flags |= SE_SBGENFS; -- GitLab From a1ecfd66c897183a5a07bf7b808575c27a668389 Mon Sep 17 00:00:00 2001 From: Skylar Chang Date: Fri, 30 Jun 2017 11:19:42 -0700 Subject: [PATCH 477/786] msm: ipa: move rndis_ipa and ecm_ipa to ipa folder rndis_ipa.c and ecm_ipa.c are IPA netdevs for USB tethering. Move those to ipa folder. Change-Id: Iad62b9bf21842e3220f5d71c6b8531334ef74d5b CRs-Fixed: 2069418 Acked-by: Ady Abraham Signed-off-by: Skylar Chang --- arch/arm/configs/sdxpoorwills-perf_defconfig | 4 ++-- arch/arm/configs/sdxpoorwills_defconfig | 4 ++-- arch/arm64/configs/sdm845-perf_defconfig | 2 +- arch/arm64/configs/sdm845_defconfig | 2 +- drivers/net/ethernet/Kconfig | 1 - drivers/net/ethernet/Makefile | 1 - drivers/net/ethernet/msm/Kconfig | 22 ------------------- drivers/net/ethernet/msm/Makefile | 6 ----- drivers/platform/msm/Kconfig | 18 +++++++++++++++ drivers/platform/msm/ipa/ipa_clients/Makefile | 2 ++ .../msm/ipa/ipa_clients}/ecm_ipa.c | 0 .../msm/ipa/ipa_clients}/rndis_ipa.c | 0 .../msm/ipa/ipa_clients}/rndis_ipa_trace.h | 0 13 files changed, 26 insertions(+), 36 deletions(-) delete mode 100644 drivers/net/ethernet/msm/Kconfig delete mode 100644 drivers/net/ethernet/msm/Makefile rename drivers/{net/ethernet/msm => platform/msm/ipa/ipa_clients}/ecm_ipa.c (100%) rename drivers/{net/ethernet/msm => platform/msm/ipa/ipa_clients}/rndis_ipa.c (100%) rename drivers/{net/ethernet/msm => platform/msm/ipa/ipa_clients}/rndis_ipa_trace.h (100%) diff --git a/arch/arm/configs/sdxpoorwills-perf_defconfig b/arch/arm/configs/sdxpoorwills-perf_defconfig index 1674c11332e3..c2252c01cc67 100644 --- a/arch/arm/configs/sdxpoorwills-perf_defconfig +++ b/arch/arm/configs/sdxpoorwills-perf_defconfig @@ -175,8 +175,6 @@ CONFIG_TUN=y # CONFIG_NET_VENDOR_INTEL is not set CONFIG_KS8851=y # CONFIG_NET_VENDOR_MICROCHIP is not set -CONFIG_ECM_IPA=y -CONFIG_RNDIS_IPA=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set @@ -270,6 +268,8 @@ CONFIG_UIO=y CONFIG_STAGING=y CONFIG_GSI=y CONFIG_IPA3=y +CONFIG_ECM_IPA=y +CONFIG_RNDIS_IPA=y CONFIG_IPA_UT=y CONFIG_SPS=y CONFIG_SPS_SUPPORT_NDP_BAM=y diff --git a/arch/arm/configs/sdxpoorwills_defconfig b/arch/arm/configs/sdxpoorwills_defconfig index 38a531f98223..e8fa052ab674 100644 --- a/arch/arm/configs/sdxpoorwills_defconfig +++ b/arch/arm/configs/sdxpoorwills_defconfig @@ -167,8 +167,6 @@ CONFIG_TUN=y # CONFIG_NET_VENDOR_INTEL is not set CONFIG_KS8851=y # CONFIG_NET_VENDOR_MICROCHIP is not set -CONFIG_ECM_IPA=y -CONFIG_RNDIS_IPA=y # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SMSC is not set @@ -266,6 +264,8 @@ CONFIG_UIO=y CONFIG_STAGING=y CONFIG_GSI=y CONFIG_IPA3=y +CONFIG_ECM_IPA=y +CONFIG_RNDIS_IPA=y CONFIG_IPA_UT=y CONFIG_SPS=y CONFIG_SPS_SUPPORT_NDP_BAM=y diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig index db30129f1234..2645d762a5bc 100644 --- a/arch/arm64/configs/sdm845-perf_defconfig +++ b/arch/arm64/configs/sdm845-perf_defconfig @@ -260,7 +260,6 @@ CONFIG_BONDING=y CONFIG_DUMMY=y CONFIG_TUN=y CONFIG_SKY2=y -CONFIG_RNDIS_IPA=y CONFIG_SMSC911X=y CONFIG_PPP=y CONFIG_PPP_BSDCOMP=y @@ -442,6 +441,7 @@ CONFIG_ION_MSM=y CONFIG_GSI=y CONFIG_IPA3=y CONFIG_RMNET_IPA3=y +CONFIG_RNDIS_IPA=y CONFIG_IPA_UT=y CONFIG_SPS=y CONFIG_SPS_SUPPORT_NDP_BAM=y diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig index db4c3af22ca2..ed8d387c72f8 100644 --- a/arch/arm64/configs/sdm845_defconfig +++ b/arch/arm64/configs/sdm845_defconfig @@ -269,7 +269,6 @@ CONFIG_NETDEVICES=y CONFIG_BONDING=y CONFIG_DUMMY=y CONFIG_TUN=y -CONFIG_RNDIS_IPA=y CONFIG_PPP=y CONFIG_PPP_BSDCOMP=y CONFIG_PPP_DEFLATE=y @@ -458,6 +457,7 @@ CONFIG_ION_MSM=y CONFIG_GSI=y CONFIG_IPA3=y CONFIG_RMNET_IPA3=y +CONFIG_RNDIS_IPA=y CONFIG_IPA_UT=y CONFIG_SPS=y CONFIG_SPS_SUPPORT_NDP_BAM=y diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 9d91f96ef001..8cc7467b6c1f 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -112,7 +112,6 @@ source "drivers/net/ethernet/mellanox/Kconfig" source "drivers/net/ethernet/micrel/Kconfig" source "drivers/net/ethernet/microchip/Kconfig" source "drivers/net/ethernet/moxa/Kconfig" -source "drivers/net/ethernet/msm/Kconfig" source "drivers/net/ethernet/myricom/Kconfig" config FEALNX diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index b31cbc274474..a09423df83f2 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -52,7 +52,6 @@ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/ obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/ -obj-$(CONFIG_ARCH_QCOM) += msm/ obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ obj-$(CONFIG_FEALNX) += fealnx.o obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/ diff --git a/drivers/net/ethernet/msm/Kconfig b/drivers/net/ethernet/msm/Kconfig deleted file mode 100644 index 586e03e786ad..000000000000 --- a/drivers/net/ethernet/msm/Kconfig +++ /dev/null @@ -1,22 +0,0 @@ -# -# msm network device configuration -# - -config ECM_IPA - tristate "STD ECM LAN Driver support" - depends on IPA || IPA3 - help - Enables LAN between applications processor and a tethered - host using the STD ECM protocol. - This Network interface is aimed to allow data path go through - IPA core while using STD ECM protocol. - -config RNDIS_IPA - tristate "RNDIS_IPA Network Interface Driver support" - depends on IPA || IPA3 - help - Enables LAN between applications processor and a tethered - host using the RNDIS protocol. - This Network interface is aimed to allow data path go through - IPA core while using RNDIS protocol. - diff --git a/drivers/net/ethernet/msm/Makefile b/drivers/net/ethernet/msm/Makefile deleted file mode 100644 index ec2699ae7f64..000000000000 --- a/drivers/net/ethernet/msm/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -# -# Makefile for the msm networking support. -# - -obj-$(CONFIG_ECM_IPA) += ecm_ipa.o -obj-$(CONFIG_RNDIS_IPA) += rndis_ipa.o diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig index 194620485aa8..e5fe6bae4e04 100644 --- a/drivers/platform/msm/Kconfig +++ b/drivers/platform/msm/Kconfig @@ -56,6 +56,24 @@ config RMNET_IPA3 for RmNet Data Driver and also exchange of QMI messages between A7 and Q6 IPA-driver. +config ECM_IPA + tristate "STD ECM LAN Driver support" + depends on IPA || IPA3 + help + Enables LAN between applications processor and a tethered + host using the STD ECM protocol. + This Network interface is aimed to allow data path go through + IPA core while using STD ECM protocol. + +config RNDIS_IPA + tristate "RNDIS_IPA Network Interface Driver support" + depends on IPA || IPA3 + help + Enables LAN between applications processor and a tethered + host using the RNDIS protocol. + This Network interface is aimed to allow data path go through + IPA core while using RNDIS protocol. + config IPA_UT tristate "IPA Unit-Test Framework and Test Suites" depends on IPA3 && DEBUG_FS diff --git a/drivers/platform/msm/ipa/ipa_clients/Makefile b/drivers/platform/msm/ipa/ipa_clients/Makefile index 61cef2d71960..61625f562178 100644 --- a/drivers/platform/msm/ipa/ipa_clients/Makefile +++ b/drivers/platform/msm/ipa/ipa_clients/Makefile @@ -1,2 +1,4 @@ obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o obj-$(CONFIG_IPA) += odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o +obj-$(CONFIG_ECM_IPA) += ecm_ipa.o +obj-$(CONFIG_RNDIS_IPA) += rndis_ipa.o diff --git a/drivers/net/ethernet/msm/ecm_ipa.c b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c similarity index 100% rename from drivers/net/ethernet/msm/ecm_ipa.c rename to drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c diff --git a/drivers/net/ethernet/msm/rndis_ipa.c b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c similarity index 100% rename from drivers/net/ethernet/msm/rndis_ipa.c rename to drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c diff --git a/drivers/net/ethernet/msm/rndis_ipa_trace.h b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa_trace.h similarity index 100% rename from drivers/net/ethernet/msm/rndis_ipa_trace.h rename to drivers/platform/msm/ipa/ipa_clients/rndis_ipa_trace.h -- GitLab From a0826c668bbff6d26358c5a8e1226c2eebf07091 Mon Sep 17 00:00:00 2001 From: Narendra Muppalla Date: Mon, 12 Jun 2017 11:55:33 -0700 Subject: [PATCH 478/786] ARM: dts: msm: align dither offset to pingpong offset for sdm845 Align dither offset with ping pong offset on sdm845. Change-Id: Iede3fed3b112d15d303f8b573db913121d431315 Signed-off-by: Narendra Muppalla --- arch/arm64/boot/dts/qcom/sdm845-sde.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi index e31f8fdd1cd5..c3462c05e81d 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi @@ -82,7 +82,7 @@ qcom,sde-dsc-off = <0x81000 0x81400 0x81800 0x81c00>; qcom,sde-dsc-size = <0x140>; - qcom,sde-dither-off = <0x30e0 0x30e0 0x30e0 0x30e0>; + qcom,sde-dither-off = <0x30e0 0x30e0 0x30e0 0x30e0 0x0>; qcom,sde-dither-version = <0x00010000>; qcom,sde-dither-size = <0x20>; -- GitLab From 8430ee19156ceb43e70f84b10fba95af39403831 Mon Sep 17 00:00:00 2001 From: Ping Li Date: Fri, 24 Feb 2017 14:14:44 -0800 Subject: [PATCH 479/786] drm/msm/sde: Add dither feature support Add dither feature interface for default panel dither config and user dither config. Change-Id: I2e67a9f1372355724c380e9643921025bf98fc97 Signed-off-by: Ping Li Signed-off-by: Narendra Muppalla --- .../devicetree/bindings/display/msm/sde.txt | 3 + drivers/gpu/drm/msm/dsi-staging/dsi_display.c | 17 ++ drivers/gpu/drm/msm/dsi-staging/dsi_display.h | 7 + drivers/gpu/drm/msm/msm_drv.h | 1 + drivers/gpu/drm/msm/sde/sde_connector.c | 147 +++++++++++++++++- drivers/gpu/drm/msm/sde/sde_connector.h | 24 ++- drivers/gpu/drm/msm/sde/sde_encoder.c | 22 +++ drivers/gpu/drm/msm/sde/sde_hw_catalog.c | 18 +++ drivers/gpu/drm/msm/sde/sde_hw_catalog.h | 3 + .../msm/sde/sde_hw_color_processing_v1_7.c | 2 - drivers/gpu/drm/msm/sde/sde_hw_pingpong.c | 72 ++++++++- drivers/gpu/drm/msm/sde/sde_hw_pingpong.h | 7 + drivers/gpu/drm/msm/sde/sde_hw_util.h | 1 + drivers/gpu/drm/msm/sde/sde_kms.c | 6 +- include/uapi/drm/msm_drm_pp.h | 22 +++ 15 files changed, 340 insertions(+), 12 deletions(-) diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt index 863a1696b8c2..ed718de56ead 100644 --- a/Documentation/devicetree/bindings/display/msm/sde.txt +++ b/Documentation/devicetree/bindings/display/msm/sde.txt @@ -178,6 +178,9 @@ Optional properties: - qcom,sde-te-size: A u32 value indicates the te block address range. - qcom,sde-te2-size: A u32 value indicates the te2 block address range. - qcom,sde-dsc-off: A u32 offset indicates the dsc block offset on pingpong. +- qcom,sde-dither-off: A u32 offset indicates the dither block offset on pingpong. +- qcom,sde-dither-version: A u32 value indicates the dither block version. +- qcom,sde-dither-size: A u32 value indicates the dither block address range. - qcom,sde-sspp-vig-blocks: A node that lists the blocks inside the VIG hardware. The block entries will contain the offset and version (if needed) of each feature block. The presence of a block entry diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c index 52b1dcbec664..62a6b5623abe 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c @@ -110,6 +110,23 @@ int dsi_display_soft_reset(void *display) return rc; } + +enum dsi_pixel_format dsi_display_get_dst_format(void *display) +{ + enum dsi_pixel_format format = DSI_PIXEL_FORMAT_MAX; + struct dsi_display *dsi_display = (struct dsi_display *)display; + + if (!dsi_display || !dsi_display->panel) { + pr_err("Invalid params(s) dsi_display %pK, panel %pK\n", + dsi_display, + ((dsi_display) ? dsi_display->panel : NULL)); + return format; + } + + format = dsi_display->panel->host_config.dst_format; + return format; +} + static ssize_t debugfs_dump_info_read(struct file *file, char __user *buff, size_t count, diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h index 38af37b59b6a..b382e4a281cc 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h @@ -480,5 +480,12 @@ int dsi_display_soft_reset(void *display); */ int dsi_display_pre_kickoff(struct dsi_display *display, struct msm_display_kickoff_params *params); +/** + * dsi_display_get_dst_format() - get dst_format from DSI display + * @display: Handle to display + * + * Return: enum dsi_pixel_format type + */ +enum dsi_pixel_format dsi_display_get_dst_format(void *display); #endif /* _DSI_DISPLAY_H_ */ diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 665ed36527a2..01fd97cdaad8 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -154,6 +154,7 @@ enum msm_mdp_conn_property { /* blob properties, always put these first */ CONNECTOR_PROP_SDE_INFO, CONNECTOR_PROP_HDR_INFO, + CONNECTOR_PROP_PP_DITHER, /* # of blob properties */ CONNECTOR_PROP_BLOBCOUNT, diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c index 2970b280814f..77a25eef7348 100644 --- a/drivers/gpu/drm/msm/sde/sde_connector.c +++ b/drivers/gpu/drm/msm/sde/sde_connector.c @@ -29,6 +29,9 @@ #define SDE_ERROR_CONN(c, fmt, ...) SDE_ERROR("conn%d " fmt,\ (c) ? (c)->base.base.id : -1, ##__VA_ARGS__) +static u32 dither_matrix[DITHER_MATRIX_SZ] = { + 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10 +}; static const struct drm_prop_enum_list e_topology_name[] = { {SDE_RM_TOPOLOGY_NONE, "sde_none"}, @@ -217,6 +220,129 @@ void sde_connector_unregister_event(struct drm_connector *connector, (void)sde_connector_register_event(connector, event_idx, 0, 0); } +static int _sde_connector_get_default_dither_cfg_v1( + struct sde_connector *c_conn, void *cfg) +{ + struct drm_msm_dither *dither_cfg = (struct drm_msm_dither *)cfg; + enum dsi_pixel_format dst_format = DSI_PIXEL_FORMAT_MAX; + + if (!c_conn || !cfg) { + SDE_ERROR("invalid argument(s), c_conn %pK, cfg %pK\n", + c_conn, cfg); + return -EINVAL; + } + + if (!c_conn->ops.get_dst_format) { + SDE_ERROR("get_dst_format is invalid\n"); + return -EINVAL; + } + + dst_format = c_conn->ops.get_dst_format(c_conn->display); + switch (dst_format) { + case DSI_PIXEL_FORMAT_RGB888: + dither_cfg->c0_bitdepth = 8; + dither_cfg->c1_bitdepth = 8; + dither_cfg->c2_bitdepth = 8; + dither_cfg->c3_bitdepth = 8; + break; + case DSI_PIXEL_FORMAT_RGB666: + case DSI_PIXEL_FORMAT_RGB666_LOOSE: + dither_cfg->c0_bitdepth = 6; + dither_cfg->c1_bitdepth = 6; + dither_cfg->c2_bitdepth = 6; + dither_cfg->c3_bitdepth = 6; + break; + default: + SDE_DEBUG("no default dither config for dst_format %d\n", + dst_format); + return -ENODATA; + } + + memcpy(&dither_cfg->matrix, dither_matrix, + sizeof(u32) * DITHER_MATRIX_SZ); + dither_cfg->temporal_en = 0; + return 0; +} + +static void _sde_connector_install_dither_property(struct drm_device *dev, + struct sde_kms *sde_kms, struct sde_connector *c_conn) +{ + char prop_name[DRM_PROP_NAME_LEN]; + struct sde_mdss_cfg *catalog = NULL; + struct drm_property_blob *blob_ptr; + void *cfg; + int ret = 0; + u32 version = 0, len = 0; + bool defalut_dither_needed = false; + + if (!dev || !sde_kms || !c_conn) { + SDE_ERROR("invld args (s), dev %pK, sde_kms %pK, c_conn %pK\n", + dev, sde_kms, c_conn); + return; + } + + catalog = sde_kms->catalog; + version = SDE_COLOR_PROCESS_MAJOR( + catalog->pingpong[0].sblk->dither.version); + snprintf(prop_name, ARRAY_SIZE(prop_name), "%s%d", + "SDE_PP_DITHER_V", version); + switch (version) { + case 1: + msm_property_install_blob(&c_conn->property_info, prop_name, + DRM_MODE_PROP_BLOB, + CONNECTOR_PROP_PP_DITHER); + len = sizeof(struct drm_msm_dither); + cfg = kzalloc(len, GFP_KERNEL); + if (!cfg) + return; + + ret = _sde_connector_get_default_dither_cfg_v1(c_conn, cfg); + if (!ret) + defalut_dither_needed = true; + break; + default: + SDE_ERROR("unsupported dither version %d\n", version); + return; + } + + if (defalut_dither_needed) { + blob_ptr = drm_property_create_blob(dev, len, cfg); + if (IS_ERR_OR_NULL(blob_ptr)) + goto exit; + c_conn->blob_dither = blob_ptr; + } +exit: + kfree(cfg); +} + +int sde_connector_get_dither_cfg(struct drm_connector *conn, + struct drm_connector_state *state, void **cfg, + size_t *len) +{ + struct sde_connector *c_conn = NULL; + struct sde_connector_state *c_state = NULL; + size_t dither_sz = 0; + + if (!conn || !state || !(*cfg)) + return -EINVAL; + + c_conn = to_sde_connector(conn); + c_state = to_sde_connector_state(state); + + /* try to get user config data first */ + *cfg = msm_property_get_blob(&c_conn->property_info, + c_state->property_blobs, + &dither_sz, + CONNECTOR_PROP_PP_DITHER); + /* if user config data doesn't exist, use default dither blob */ + if (*cfg == NULL && c_conn->blob_dither) { + *cfg = &c_conn->blob_dither->data; + dither_sz = c_conn->blob_dither->length; + } + *len = dither_sz; + return 0; +} + int sde_connector_get_info(struct drm_connector *connector, struct msm_display_info *info) { @@ -305,6 +431,8 @@ static void sde_connector_destroy(struct drm_connector *connector) drm_property_unreference_blob(c_conn->blob_caps); if (c_conn->blob_hdr) drm_property_unreference_blob(c_conn->blob_hdr); + if (c_conn->blob_dither) + drm_property_unreference_blob(c_conn->blob_dither); msm_property_destroy(&c_conn->property_info); drm_connector_unregister(connector); @@ -369,7 +497,8 @@ static void sde_connector_atomic_destroy_state(struct drm_connector *connector, } else { /* destroy value helper */ msm_property_destroy_state(&c_conn->property_info, c_state, - c_state->property_values, 0); + c_state->property_values, + c_state->property_blobs); } } @@ -398,7 +527,7 @@ static void sde_connector_atomic_reset(struct drm_connector *connector) /* reset value helper, zero out state structure and reset properties */ msm_property_reset_state(&c_conn->property_info, c_state, - c_state->property_values, 0); + c_state->property_values, c_state->property_blobs); c_state->base.connector = connector; connector->state = &c_state->base; @@ -426,7 +555,8 @@ sde_connector_atomic_duplicate_state(struct drm_connector *connector) /* duplicate value helper */ msm_property_duplicate_state(&c_conn->property_info, - c_oldstate, c_state, c_state->property_values, 0); + c_oldstate, c_state, c_state->property_values, + c_state->property_blobs); /* additional handling for drm framebuffer objects */ if (c_state->out_fb) { @@ -626,7 +756,8 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector, /* generic property handling */ rc = msm_property_atomic_set(&c_conn->property_info, - c_state->property_values, 0, property, val); + c_state->property_values, c_state->property_blobs, + property, val); if (rc) goto end; @@ -733,7 +864,8 @@ static int sde_connector_atomic_get_property(struct drm_connector *connector, else /* get cached property value */ rc = msm_property_atomic_get(&c_conn->property_info, - c_state->property_values, 0, property, val); + c_state->property_values, + c_state->property_blobs, property, val); /* allow for custom override */ if (c_conn->ops.get_property) @@ -1116,6 +1248,8 @@ struct drm_connector *sde_connector_init(struct drm_device *dev, &c_conn->property_info, "sde_drm_roi_v1", 0x0, 0, ~0, 0, CONNECTOR_PROP_ROI_V1); } + /* install PP_DITHER properties */ + _sde_connector_install_dither_property(dev, sde_kms, c_conn); msm_property_install_range(&c_conn->property_info, "RETIRE_FENCE", 0x0, 0, INR_OPEN_MAX, 0, CONNECTOR_PROP_RETIRE_FENCE); @@ -1156,6 +1290,9 @@ struct drm_connector *sde_connector_init(struct drm_device *dev, drm_property_unreference_blob(c_conn->blob_caps); if (c_conn->blob_hdr) drm_property_unreference_blob(c_conn->blob_hdr); + if (c_conn->blob_dither) + drm_property_unreference_blob(c_conn->blob_dither); + msm_property_destroy(&c_conn->property_info); error_cleanup_fence: mutex_destroy(&c_conn->lock); diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h index 497d0dba1879..3972d93b7060 100644 --- a/drivers/gpu/drm/msm/sde/sde_connector.h +++ b/drivers/gpu/drm/msm/sde/sde_connector.h @@ -13,6 +13,7 @@ #ifndef _SDE_CONNECTOR_H_ #define _SDE_CONNECTOR_H_ +#include #include #include #include @@ -183,6 +184,13 @@ struct sde_connector_ops { */ int (*set_power)(struct drm_connector *connector, int power_mode, void *display); + + /** + * get_dst_format - get dst_format from display + * @display: Pointer to private display handle + * Returns: dst_format of display + */ + enum dsi_pixel_format (*get_dst_format)(void *display); }; /** @@ -227,6 +235,7 @@ struct sde_connector_evt { * @property_data: Array of private data for generic property handling * @blob_caps: Pointer to blob structure for 'capabilities' property * @blob_hdr: Pointer to blob structure for 'hdr_properties' property + * @blob_dither: Pointer to blob structure for default dither config * @fb_kmap: true if kernel mapping of framebuffer is requested * @event_table: Array of registered events * @event_lock: Lock object for event_table @@ -255,6 +264,7 @@ struct sde_connector { struct msm_property_data property_data[CONNECTOR_PROP_COUNT]; struct drm_property_blob *blob_caps; struct drm_property_blob *blob_hdr; + struct drm_property_blob *blob_dither; bool fb_kmap; struct sde_connector_evt event_table[SDE_CONN_EVENT_COUNT]; @@ -307,6 +317,7 @@ struct sde_connector { * @mmu_id: MMU ID for accessing frame buffer objects, if applicable * @property_values: Local cache of current connector property values * @rois: Regions of interest structure for mapping CRTC to Connector output + * @property_blobs: blob properties */ struct sde_connector_state { struct drm_connector_state base; @@ -315,6 +326,7 @@ struct sde_connector_state { uint64_t property_values[CONNECTOR_PROP_COUNT]; struct msm_roi_list rois; + struct drm_property_blob *property_blobs[CONNECTOR_PROP_BLOBCOUNT]; }; /** @@ -497,5 +509,15 @@ static inline bool sde_connector_needs_offset(struct drm_connector *connector) return (c_conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL); } -#endif /* _SDE_CONNECTOR_H_ */ +/** + * sde_connector_get_dither_cfg - get dither property data + * @conn: Pointer to drm_connector struct + * @state: Pointer to drm_connector_state struct + * @cfg: Pointer to pointer to dither cfg + * @len: length of the dither data + * Returns: Zero on success + */ +int sde_connector_get_dither_cfg(struct drm_connector *conn, + struct drm_connector_state *state, void **cfg, size_t *len); +#endif /* _SDE_CONNECTOR_H_ */ diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c index 2ff8c38fc805..56e1151c61ea 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder.c @@ -2269,6 +2269,27 @@ void sde_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc) } } +static void _sde_encoder_setup_dither(struct sde_encoder_phys *phys) +{ + void *dither_cfg; + int ret = 0; + size_t len = 0; + enum sde_rm_topology_name topology; + + if (!phys || !phys->connector || !phys->hw_pp || + !phys->hw_pp->ops.setup_dither) + return; + topology = sde_connector_get_topology_name(phys->connector); + if ((topology == SDE_RM_TOPOLOGY_PPSPLIT) && + (phys->split_role == ENC_ROLE_SLAVE)) + return; + + ret = sde_connector_get_dither_cfg(phys->connector, + phys->connector->state, &dither_cfg, &len); + if (!ret) + phys->hw_pp->ops.setup_dither(phys->hw_pp, dither_cfg, len); +} + void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, struct sde_encoder_kickoff_params *params) { @@ -2295,6 +2316,7 @@ void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, phys->ops.prepare_for_kickoff(phys, params); if (phys->enable_state == SDE_ENC_ERR_NEEDS_HW_RESET) needs_hw_reset = true; + _sde_encoder_setup_dither(phys); } } diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c index 1cbbe1e5d7b4..1612640e0b10 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c @@ -217,6 +217,9 @@ enum { TE2_OFF, TE2_LEN, PP_SLAVE, + DITHER_OFF, + DITHER_LEN, + DITHER_VER, PP_PROP_MAX, }; @@ -494,6 +497,9 @@ static struct sde_prop_type pp_prop[] = { {TE2_OFF, "qcom,sde-te2-off", false, PROP_TYPE_U32_ARRAY}, {TE2_LEN, "qcom,sde-te2-size", false, PROP_TYPE_U32}, {PP_SLAVE, "qcom,sde-pp-slave", false, PROP_TYPE_U32_ARRAY}, + {DITHER_OFF, "qcom,sde-dither-off", false, PROP_TYPE_U32_ARRAY}, + {DITHER_LEN, "qcom,sde-dither-size", false, PROP_TYPE_U32}, + {DITHER_VER, "qcom,sde-dither-version", false, PROP_TYPE_U32}, }; static struct sde_prop_type dsc_prop[] = { @@ -2334,6 +2340,18 @@ static int sde_pp_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg) pp->id - PINGPONG_0); set_bit(SDE_PINGPONG_DSC, &pp->features); } + + sblk->dither.base = PROP_VALUE_ACCESS(prop_value, DITHER_OFF, + i); + if (sblk->dither.base) { + sblk->dither.id = SDE_PINGPONG_DITHER; + snprintf(sblk->dither.name, SDE_HW_BLK_NAME_LEN, + "dither_%u", pp->id); + set_bit(SDE_PINGPONG_DITHER, &pp->features); + } + sblk->dither.len = PROP_VALUE_ACCESS(prop_value, DITHER_LEN, 0); + sblk->dither.version = PROP_VALUE_ACCESS(prop_value, DITHER_VER, + 0); } end: diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h index 74fa8f99f1f5..29698bccdd90 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h @@ -194,6 +194,7 @@ enum { * @SDE_PINGPONG_SPLIT PP block supports split fifo * @SDE_PINGPONG_SLAVE PP block is a suitable slave for split fifo * @SDE_PINGPONG_DSC, Display stream compression blocks + * @SDE_PINGPONG_DITHER, Dither blocks * @SDE_PINGPONG_MAX */ enum { @@ -202,6 +203,7 @@ enum { SDE_PINGPONG_SPLIT, SDE_PINGPONG_SLAVE, SDE_PINGPONG_DSC, + SDE_PINGPONG_DITHER, SDE_PINGPONG_MAX }; @@ -457,6 +459,7 @@ struct sde_pingpong_sub_blks { struct sde_pp_blk te; struct sde_pp_blk te2; struct sde_pp_blk dsc; + struct sde_pp_blk dither; }; struct sde_wb_sub_blocks { diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c index ab2c47335988..4191367c992f 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c @@ -70,8 +70,6 @@ #define DSPP_OP_PA_FOL_EN BIT(6) #define DSPP_OP_PA_SKY_EN BIT(7) -#define REG_MASK(n) ((BIT(n)) - 1) - #define PA_VIG_DISABLE_REQUIRED(x) \ !((x) & (VIG_OP_PA_SKIN_EN | VIG_OP_PA_SKY_EN | \ VIG_OP_PA_FOL_EN | VIG_OP_PA_HUE_EN | \ diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c index 37b74df330be..e88f40fa27ea 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c @@ -40,6 +40,11 @@ #define PP_DCE_DATA_IN_SWAP 0x0ac #define PP_DCE_DATA_OUT_SWAP 0x0c8 +#define DITHER_DEPTH_MAP_INDEX 9 +static u32 dither_depth_map[DITHER_DEPTH_MAP_INDEX] = { + 0, 0, 0, 0, 0, 1, 2, 3, 3 +}; + static struct sde_pingpong_cfg *_pingpong_offset(enum sde_pingpong pp, struct sde_mdss_cfg *m, void __iomem *addr, @@ -167,6 +172,57 @@ static int sde_hw_pp_setup_dsc(struct sde_hw_pingpong *pp) return 0; } +static int sde_hw_pp_setup_dither_v1(struct sde_hw_pingpong *pp, + void *cfg, size_t len) +{ + struct sde_hw_blk_reg_map *c; + struct drm_msm_dither *dither = (struct drm_msm_dither *)cfg; + u32 base = 0, offset = 0, data = 0, i = 0; + + if (!pp) + return -EINVAL; + + c = &pp->hw; + base = pp->caps->sblk->dither.base; + if (!dither) { + /* dither property disable case */ + SDE_REG_WRITE(c, base, 0); + return 0; + } + + if (len != sizeof(struct drm_msm_dither)) { + DRM_ERROR("input len %zu, expected len %zu\n", len, + sizeof(struct drm_msm_dither)); + return -EINVAL; + } + + if (dither->c0_bitdepth >= DITHER_DEPTH_MAP_INDEX || + dither->c1_bitdepth >= DITHER_DEPTH_MAP_INDEX || + dither->c2_bitdepth >= DITHER_DEPTH_MAP_INDEX || + dither->c3_bitdepth >= DITHER_DEPTH_MAP_INDEX) + return -EINVAL; + + offset += 4; + data = dither_depth_map[dither->c0_bitdepth] & REG_MASK(2); + data |= (dither_depth_map[dither->c1_bitdepth] & REG_MASK(2)) << 2; + data |= (dither_depth_map[dither->c2_bitdepth] & REG_MASK(2)) << 4; + data |= (dither_depth_map[dither->c3_bitdepth] & REG_MASK(2)) << 6; + data |= (dither->temporal_en) ? (1 << 8) : 0; + SDE_REG_WRITE(c, base + offset, data); + + for (i = 0; i < DITHER_MATRIX_SZ - 3; i += 4) { + offset += 4; + data = (dither->matrix[i] & REG_MASK(4)) | + ((dither->matrix[i + 1] & REG_MASK(4)) << 4) | + ((dither->matrix[i + 2] & REG_MASK(4)) << 8) | + ((dither->matrix[i + 3] & REG_MASK(4)) << 12); + SDE_REG_WRITE(c, base + offset, data); + } + SDE_REG_WRITE(c, base, 1); + + return 0; +} + static int sde_hw_pp_enable_te(struct sde_hw_pingpong *pp, bool enable) { struct sde_hw_blk_reg_map *c = &pp->hw; @@ -218,8 +274,10 @@ static int sde_hw_pp_get_vsync_info(struct sde_hw_pingpong *pp, } static void _setup_pingpong_ops(struct sde_hw_pingpong_ops *ops, - unsigned long cap) + const struct sde_pingpong_cfg *hw_cap) { + u32 version = 0; + ops->setup_tearcheck = sde_hw_pp_setup_te_config; ops->enable_tearcheck = sde_hw_pp_enable_te; ops->connect_external_te = sde_hw_pp_connect_external_te; @@ -230,6 +288,16 @@ static void _setup_pingpong_ops(struct sde_hw_pingpong_ops *ops, ops->disable_dsc = sde_hw_pp_dsc_disable; ops->get_autorefresh = sde_hw_pp_get_autorefresh_config; ops->poll_timeout_wr_ptr = sde_hw_pp_poll_timeout_wr_ptr; + + version = SDE_COLOR_PROCESS_MAJOR(hw_cap->sblk->dither.version); + switch (version) { + case 1: + ops->setup_dither = sde_hw_pp_setup_dither_v1; + break; + default: + ops->setup_dither = NULL; + break; + } }; static struct sde_hw_blk_ops sde_hw_ops = { @@ -257,7 +325,7 @@ struct sde_hw_pingpong *sde_hw_pingpong_init(enum sde_pingpong idx, c->idx = idx; c->caps = cfg; - _setup_pingpong_ops(&c->ops, c->caps->features); + _setup_pingpong_ops(&c->ops, c->caps); rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_PINGPONG, idx, &sde_hw_ops); if (rc) { diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h index 6dbf4aac70d6..f0a20541d632 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h @@ -17,6 +17,7 @@ #include "sde_hw_mdss.h" #include "sde_hw_util.h" #include "sde_hw_blk.h" +#include struct sde_hw_pingpong; @@ -62,6 +63,7 @@ struct sde_hw_dsc_cfg { * @setup_dsc : program DSC block with encoding details * @enable_dsc : enables DSC encoder * @disable_dsc : disables DSC encoder + * @setup_dither : function to program the dither hw block */ struct sde_hw_pingpong_ops { /** @@ -123,6 +125,11 @@ struct sde_hw_pingpong_ops { * Disables DSC encoder */ void (*disable_dsc)(struct sde_hw_pingpong *pp); + + /** + * Program the dither hw block + */ + int (*setup_dither)(struct sde_hw_pingpong *pp, void *cfg, size_t len); }; struct sde_hw_pingpong { diff --git a/drivers/gpu/drm/msm/sde/sde_hw_util.h b/drivers/gpu/drm/msm/sde/sde_hw_util.h index 8f469b298862..aa3d5b963a2b 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_util.h +++ b/drivers/gpu/drm/msm/sde/sde_hw_util.h @@ -17,6 +17,7 @@ #include #include "sde_hw_mdss.h" +#define REG_MASK(n) ((BIT(n)) - 1) struct sde_format_extended; /* diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index a636816eb14d..7ef43d8f1280 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -642,7 +642,8 @@ static int _sde_kms_setup_displays(struct drm_device *dev, .soft_reset = dsi_display_soft_reset, .pre_kickoff = dsi_conn_pre_kickoff, .clk_ctrl = dsi_display_clk_ctrl, - .get_topology = dsi_conn_get_topology + .get_topology = dsi_conn_get_topology, + .get_dst_format = dsi_display_get_dst_format }; static const struct sde_connector_ops wb_ops = { .post_init = sde_wb_connector_post_init, @@ -651,7 +652,8 @@ static int _sde_kms_setup_displays(struct drm_device *dev, .set_property = sde_wb_connector_set_property, .get_info = sde_wb_get_info, .soft_reset = NULL, - .get_topology = sde_wb_get_topology + .get_topology = sde_wb_get_topology, + .get_dst_format = NULL }; static const struct sde_connector_ops dp_ops = { .post_init = dp_connector_post_init, diff --git a/include/uapi/drm/msm_drm_pp.h b/include/uapi/drm/msm_drm_pp.h index d9155a957cbe..7945af0d1a01 100644 --- a/include/uapi/drm/msm_drm_pp.h +++ b/include/uapi/drm/msm_drm_pp.h @@ -281,4 +281,26 @@ struct drm_msm_ad4_cfg { __u32 cfg_param_053; }; +#define DITHER_MATRIX_SZ 16 + +/** + * struct drm_msm_dither - dither feature structure + * @flags: for customizing operations + * @temporal_en: temperal dither enable + * @c0_bitdepth: c0 component bit depth + * @c1_bitdepth: c1 component bit depth + * @c2_bitdepth: c2 component bit depth + * @c3_bitdepth: c2 component bit depth + * @matrix: dither strength matrix + */ +struct drm_msm_dither { + __u64 flags; + __u32 temporal_en; + __u32 c0_bitdepth; + __u32 c1_bitdepth; + __u32 c2_bitdepth; + __u32 c3_bitdepth; + __u32 matrix[DITHER_MATRIX_SZ]; +}; + #endif /* _MSM_DRM_PP_H_ */ -- GitLab From f2c1df91d68f13bdb04eb4a5b551010873898d6f Mon Sep 17 00:00:00 2001 From: Dhaval Patel Date: Wed, 28 Jun 2017 18:32:39 -0700 Subject: [PATCH 480/786] drm/msm: exit from mode-2 if mdss gdsc power collapse fail Exit from mode-2 and switch to back to current rsc state if mdss gdsc power collapse fail due to mode-2 entry. Change-Id: I290440c071cd7fc6924d87d590024f2570466e80 Signed-off-by: Dhaval Patel --- drivers/gpu/drm/msm/sde_rsc_hw.c | 152 +++++++++++++++---------------- 1 file changed, 71 insertions(+), 81 deletions(-) diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c index e5ae0ad4bcc4..87a350eac441 100644 --- a/drivers/gpu/drm/msm/sde_rsc_hw.c +++ b/drivers/gpu/drm/msm/sde_rsc_hw.c @@ -294,7 +294,70 @@ static int rsc_hw_solver_init(struct sde_rsc_priv *rsc) return 0; } -int sde_rsc_mode2_entry(struct sde_rsc_priv *rsc) +static int sde_rsc_mode2_exit(struct sde_rsc_priv *rsc, + enum sde_rsc_state state) +{ + int rc = -EBUSY; + int count, reg; + + rsc_event_trigger(rsc, SDE_RSC_EVENT_PRE_CORE_RESTORE); + + /** + * force busy and idle during clk & video mode state because it + * is trying to entry in mode-2 without turning on the vysnc. + */ + if ((state == SDE_RSC_VID_STATE) || (state == SDE_RSC_CLK_STATE)) { + reg = dss_reg_r(&rsc->wrapper_io, + SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode); + reg &= ~(BIT(8) | BIT(0)); + dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL, + reg, rsc->debug_mode); + } + + // needs review with HPG sequence + dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_LO, + 0x0, rsc->debug_mode); + dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_HI, + 0x0, rsc->debug_mode); + + reg = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL, + rsc->debug_mode); + reg &= ~BIT(3); + dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL, + reg, rsc->debug_mode); + + reg = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_SPARE_PWR_EVENT, + rsc->debug_mode); + reg |= BIT(13); + dss_reg_w(&rsc->wrapper_io, SDE_RSCC_SPARE_PWR_EVENT, + reg, rsc->debug_mode); + + /* make sure that mode-2 exit before wait*/ + wmb(); + + /* check for sequence running status before exiting */ + for (count = MAX_CHECK_LOOPS; count > 0; count--) { + if (regulator_is_enabled(rsc->fs)) { + rc = 0; + break; + } + usleep_range(10, 100); + } + + reg = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_SPARE_PWR_EVENT, + rsc->debug_mode); + reg &= ~BIT(13); + dss_reg_w(&rsc->wrapper_io, SDE_RSCC_SPARE_PWR_EVENT, + reg, rsc->debug_mode); + if (rc) + pr_err("vdd reg is not enabled yet\n"); + + rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_RESTORE); + + return rc; +} + +static int sde_rsc_mode2_entry(struct sde_rsc_priv *rsc) { int rc; int count, wrapper_status; @@ -309,8 +372,6 @@ int sde_rsc_mode2_entry(struct sde_rsc_priv *rsc) return rc; } - rc = -EBUSY; - rsc_event_trigger(rsc, SDE_RSC_EVENT_PRE_CORE_PC); /* update qtimers to high during clk & video mode state */ @@ -345,9 +406,12 @@ int sde_rsc_mode2_entry(struct sde_rsc_priv *rsc) /* make sure that mode-2 is triggered before wait*/ wmb(); - /* check for sequence running status before exiting */ + rc = -EBUSY; + /* this wait is required to turn off the rscc clocks */ for (count = MAX_CHECK_LOOPS; count > 0; count--) { - if (!regulator_is_enabled(rsc->fs)) { + reg = dss_reg_r(&rsc->wrapper_io, + SDE_RSCC_PWR_CTRL, rsc->debug_mode); + if (test_bit(POWER_CTRL_BIT_12, ®)) { rc = 0; break; } @@ -355,20 +419,8 @@ int sde_rsc_mode2_entry(struct sde_rsc_priv *rsc) } if (rc) { - pr_err("vdd fs is still enabled\n"); + pr_err("mdss gdsc power down failed rc:%d\n", rc); goto end; - } else { - rc = -EINVAL; - /* this wait is required to turn off the rscc clocks */ - for (count = MAX_CHECK_LOOPS; count > 0; count--) { - reg = dss_reg_r(&rsc->wrapper_io, - SDE_RSCC_PWR_CTRL, rsc->debug_mode); - if (test_bit(POWER_CTRL_BIT_12, ®)) { - rc = 0; - break; - } - usleep_range(1, 2); - } } if ((rsc->current_state == SDE_RSC_VID_STATE) || @@ -383,69 +435,7 @@ int sde_rsc_mode2_entry(struct sde_rsc_priv *rsc) return 0; end: - rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_RESTORE); - - return rc; -} - -int sde_rsc_mode2_exit(struct sde_rsc_priv *rsc, enum sde_rsc_state state) -{ - int rc = -EBUSY; - int count, reg; - - rsc_event_trigger(rsc, SDE_RSC_EVENT_PRE_CORE_RESTORE); - - /** - * force busy and idle during clk & video mode state because it - * is trying to entry in mode-2 without turning on the vysnc. - */ - if ((state == SDE_RSC_VID_STATE) || (state == SDE_RSC_CLK_STATE)) { - reg = dss_reg_r(&rsc->wrapper_io, - SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode); - reg &= ~(BIT(8) | BIT(0)); - dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL, - reg, rsc->debug_mode); - } - - // needs review with HPG sequence - dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_LO, - 0x0, rsc->debug_mode); - dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_HI, - 0x0, rsc->debug_mode); - - reg = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL, - rsc->debug_mode); - reg &= ~BIT(3); - dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL, - reg, rsc->debug_mode); - - reg = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_SPARE_PWR_EVENT, - rsc->debug_mode); - reg |= BIT(13); - dss_reg_w(&rsc->wrapper_io, SDE_RSCC_SPARE_PWR_EVENT, - reg, rsc->debug_mode); - - /* make sure that mode-2 exit before wait*/ - wmb(); - - /* check for sequence running status before exiting */ - for (count = MAX_CHECK_LOOPS; count > 0; count--) { - if (regulator_is_enabled(rsc->fs)) { - rc = 0; - break; - } - usleep_range(10, 100); - } - - reg = dss_reg_r(&rsc->wrapper_io, SDE_RSCC_SPARE_PWR_EVENT, - rsc->debug_mode); - reg &= ~BIT(13); - dss_reg_w(&rsc->wrapper_io, SDE_RSCC_SPARE_PWR_EVENT, - reg, rsc->debug_mode); - if (rc) - pr_err("vdd reg is not enabled yet\n"); - - rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_RESTORE); + sde_rsc_mode2_exit(rsc, rsc->current_state); return rc; } -- GitLab From 172033f6a551457be29a79ebe4a873f969d05ae5 Mon Sep 17 00:00:00 2001 From: Clarence Ip Date: Tue, 13 Jun 2017 10:52:56 -0400 Subject: [PATCH 481/786] drm/msm/sde: move scaler config data into plane state Move the parsed user data for scaler configuration from the sde plane structure into the sde plane state structure. This is needed to properly support asynchronous commits; a current working copy of user properties is provided to the worker thread via a local plane state structure during asynchronous commits while incoming configuration for subsequent frame updates operate on a different instance of the plane state structures. CRs-Fixed: 2062084 Change-Id: I353c57bb7691c0d427b7426b52d70f8ef982a2fd Signed-off-by: Clarence Ip --- drivers/gpu/drm/msm/sde/sde_plane.c | 173 +++++++++++----------------- drivers/gpu/drm/msm/sde/sde_plane.h | 27 +++++ 2 files changed, 95 insertions(+), 105 deletions(-) diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c index 2a98af45d2f6..9955a85c0725 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.c +++ b/drivers/gpu/drm/msm/sde/sde_plane.c @@ -94,25 +94,6 @@ enum sde_plane_qos { SDE_PLANE_QOS_PANIC_CTRL = BIT(2), }; -/** - * enum sde_plane_sclcheck_state - User scaler data status - * - * @SDE_PLANE_SCLCHECK_NONE: No user data provided - * @SDE_PLANE_SCLCHECK_INVALID: Invalid user data provided - * @SDE_PLANE_SCLCHECK_SCALER_V1: Valid scaler v1 data - * @SDE_PLANE_SCLCHECK_SCALER_V1_CHECK: Unchecked scaler v1 data - * @SDE_PLANE_SCLCHECK_SCALER_V2: Valid scaler v2 data - * @SDE_PLANE_SCLCHECK_SCALER_V2_CHECK: Unchecked scaler v2 data - */ -enum sde_plane_sclcheck_state { - SDE_PLANE_SCLCHECK_NONE, - SDE_PLANE_SCLCHECK_INVALID, - SDE_PLANE_SCLCHECK_SCALER_V1, - SDE_PLANE_SCLCHECK_SCALER_V1_CHECK, - SDE_PLANE_SCLCHECK_SCALER_V2, - SDE_PLANE_SCLCHECK_SCALER_V2_CHECK, -}; - /* * struct sde_plane - local sde plane structure * @csc_cfg: Decoded user configuration for csc @@ -123,7 +104,6 @@ enum sde_plane_sclcheck_state { * @sbuf_mode: force stream buffer mode if set * @sbuf_writeback: force stream buffer writeback if set * @revalidate: force revalidation of all the plane properties - * @scaler_check_state: Indicates status of user provided pixle extension data * @blob_rot_caps: Pointer to rotator capability blob */ struct sde_plane { @@ -141,7 +121,6 @@ struct sde_plane { struct sde_hw_pipe *pipe_hw; struct sde_hw_pipe_cfg pipe_cfg; struct sde_hw_sharp_cfg sharp_cfg; - struct sde_hw_scaler3_cfg *scaler3_cfg; struct sde_hw_pipe_qos_cfg pipe_qos_cfg; uint32_t color_fill; bool is_error; @@ -153,9 +132,6 @@ struct sde_plane { u32 sbuf_writeback; bool revalidate; - struct sde_hw_pixel_ext pixel_ext; - enum sde_plane_sclcheck_state scaler_check_state; - struct sde_csc_cfg csc_cfg; struct sde_csc_cfg *csc_usr_ptr; struct sde_csc_cfg *csc_ptr; @@ -917,16 +893,12 @@ static int _sde_plane_setup_scaler3_lut(struct sde_plane *psde, struct sde_hw_scaler3_cfg *cfg; int ret = 0; - if (!psde || !psde->scaler3_cfg) { + if (!psde || !pstate) { SDE_ERROR("invalid args\n"); return -EINVAL; - } else if (!pstate) { - /* pstate is expected to be null on forced color fill */ - SDE_DEBUG("null pstate\n"); - return -EINVAL; } - cfg = psde->scaler3_cfg; + cfg = &pstate->scaler3_cfg; cfg->dir_lut = msm_property_get_blob( &psde->property_info, @@ -946,6 +918,7 @@ static int _sde_plane_setup_scaler3_lut(struct sde_plane *psde, } static void _sde_plane_setup_scaler3(struct sde_plane *psde, + struct sde_plane_state *pstate, uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h, struct sde_hw_scaler3_cfg *scale_cfg, const struct sde_format *fmt, @@ -953,16 +926,17 @@ static void _sde_plane_setup_scaler3(struct sde_plane *psde, { uint32_t decimated, i; - if (!psde || !scale_cfg || !fmt || !chroma_subsmpl_h || + if (!psde || !pstate || !scale_cfg || !fmt || !chroma_subsmpl_h || !chroma_subsmpl_v) { - SDE_ERROR("psde %pK scale_cfg %pK fmt %pK smp_h %d smp_v %d\n" - , psde, scale_cfg, fmt, chroma_subsmpl_h, + SDE_ERROR( + "psde %d pstate %d scale_cfg %d fmt %d smp_h %d smp_v %d\n", + !!psde, !!pstate, !!scale_cfg, !!fmt, chroma_subsmpl_h, chroma_subsmpl_v); return; } memset(scale_cfg, 0, sizeof(*scale_cfg)); - memset(&psde->pixel_ext, 0, sizeof(struct sde_hw_pixel_ext)); + memset(&pstate->pixel_ext, 0, sizeof(struct sde_hw_pixel_ext)); decimated = DECIMATED_DIMENSION(src_w, psde->pipe_cfg.horz_decimation); @@ -1000,9 +974,9 @@ static void _sde_plane_setup_scaler3(struct sde_plane *psde, } scale_cfg->preload_x[i] = SDE_QSEED3_DEFAULT_PRELOAD_H; scale_cfg->preload_y[i] = SDE_QSEED3_DEFAULT_PRELOAD_V; - psde->pixel_ext.num_ext_pxls_top[i] = + pstate->pixel_ext.num_ext_pxls_top[i] = scale_cfg->src_height[i]; - psde->pixel_ext.num_ext_pxls_left[i] = + pstate->pixel_ext.num_ext_pxls_left[i] = scale_cfg->src_width[i]; } if (!(SDE_FORMAT_IS_YUV(fmt)) && (src_h == dst_h) @@ -1272,19 +1246,19 @@ static void sde_color_process_plane_setup(struct drm_plane *plane) } static void _sde_plane_setup_scaler(struct sde_plane *psde, - const struct sde_format *fmt, - struct sde_plane_state *pstate) + struct sde_plane_state *pstate, + const struct sde_format *fmt, bool color_fill) { struct sde_hw_pixel_ext *pe; uint32_t chroma_subsmpl_h, chroma_subsmpl_v; - if (!psde || !fmt) { + if (!psde || !fmt || !pstate) { SDE_ERROR("invalid arg(s), plane %d fmt %d state %d\n", psde != 0, fmt != 0, pstate != 0); return; } - pe = &(psde->pixel_ext); + pe = &pstate->pixel_ext; psde->pipe_cfg.horz_decimation = sde_plane_get_property(pstate, PLANE_PROP_H_DECIMATE); @@ -1299,23 +1273,25 @@ static void _sde_plane_setup_scaler(struct sde_plane *psde, /* update scaler */ if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3)) { - int error; + int rc; - error = _sde_plane_setup_scaler3_lut(psde, pstate); - if (error || psde->debugfs_default_scale || - psde->scaler_check_state != - SDE_PLANE_SCLCHECK_SCALER_V2) { + if (!color_fill && !psde->debugfs_default_scale) + rc = _sde_plane_setup_scaler3_lut(psde, pstate); + else + rc = -EINVAL; + if (rc || pstate->scaler_check_state != + SDE_PLANE_SCLCHECK_SCALER_V2) { /* calculate default config for QSEED3 */ - _sde_plane_setup_scaler3(psde, + _sde_plane_setup_scaler3(psde, pstate, psde->pipe_cfg.src_rect.w, psde->pipe_cfg.src_rect.h, psde->pipe_cfg.dst_rect.w, psde->pipe_cfg.dst_rect.h, - psde->scaler3_cfg, fmt, + &pstate->scaler3_cfg, fmt, chroma_subsmpl_h, chroma_subsmpl_v); } - } else if (psde->scaler_check_state != SDE_PLANE_SCLCHECK_SCALER_V1 || - !pstate || psde->debugfs_default_scale) { + } else if (pstate->scaler_check_state != SDE_PLANE_SCLCHECK_SCALER_V1 || + color_fill || psde->debugfs_default_scale) { uint32_t deci_dim, i; /* calculate default configuration for QSEED2 */ @@ -1391,9 +1367,9 @@ static int _sde_plane_color_fill(struct sde_plane *psde, { const struct sde_format *fmt; const struct drm_plane *plane; - const struct sde_plane_state *pstate; + struct sde_plane_state *pstate; - if (!psde) { + if (!psde || !psde->base.state) { SDE_ERROR("invalid plane\n"); return -EINVAL; } @@ -1425,7 +1401,7 @@ static int _sde_plane_color_fill(struct sde_plane *psde, psde->pipe_cfg.src_rect.y = 0; psde->pipe_cfg.src_rect.w = psde->pipe_cfg.dst_rect.w; psde->pipe_cfg.src_rect.h = psde->pipe_cfg.dst_rect.h; - _sde_plane_setup_scaler(psde, fmt, 0); + _sde_plane_setup_scaler(psde, pstate, fmt, true); if (psde->pipe_hw->ops.setup_format) psde->pipe_hw->ops.setup_format(psde->pipe_hw, @@ -1439,7 +1415,7 @@ static int _sde_plane_color_fill(struct sde_plane *psde, if (psde->pipe_hw->ops.setup_pe) psde->pipe_hw->ops.setup_pe(psde->pipe_hw, - &psde->pixel_ext); + &pstate->pixel_ext); } return 0; @@ -2791,6 +2767,7 @@ static void _sde_plane_sspp_atomic_check_mode_changed(struct sde_plane *psde, } static int _sde_plane_validate_scaler_v2(struct sde_plane *psde, + struct sde_plane_state *pstate, const struct sde_format *fmt, uint32_t img_w, uint32_t img_h, uint32_t src_w, uint32_t src_h, @@ -2798,17 +2775,16 @@ static int _sde_plane_validate_scaler_v2(struct sde_plane *psde, { int i; - if (!psde || !fmt) { + if (!psde || !pstate || !fmt) { SDE_ERROR_PLANE(psde, "invalid arguments\n"); return -EINVAL; } /* don't run checks unless scaler data was changed */ - if (psde->scaler_check_state != SDE_PLANE_SCLCHECK_SCALER_V2_CHECK || - !psde->scaler3_cfg) + if (pstate->scaler_check_state != SDE_PLANE_SCLCHECK_SCALER_V2_CHECK) return 0; - psde->scaler_check_state = SDE_PLANE_SCLCHECK_INVALID; + pstate->scaler_check_state = SDE_PLANE_SCLCHECK_INVALID; for (i = 0; i < SDE_MAX_PLANES; i++) { uint32_t hor_req_pixels, hor_fetch_pixels; @@ -2838,17 +2814,17 @@ static int _sde_plane_validate_scaler_v2(struct sde_plane *psde, src_h_tmp >>= 1; } - hor_req_pixels = psde->pixel_ext.roi_w[i]; - vert_req_pixels = psde->pixel_ext.roi_h[i]; + hor_req_pixels = pstate->pixel_ext.roi_w[i]; + vert_req_pixels = pstate->pixel_ext.roi_h[i]; hor_fetch_pixels = DECIMATED_DIMENSION(src_w_tmp + - (int8_t)(psde->pixel_ext.left_ftch[i] & 0xFF) + - (int8_t)(psde->pixel_ext.right_ftch[i] & 0xFF), - deci_w); + (int8_t)(pstate->pixel_ext.left_ftch[i] & 0xFF) + + (int8_t)(pstate->pixel_ext.right_ftch[i] & 0xFF), + deci_w); vert_fetch_pixels = DECIMATED_DIMENSION(src_h_tmp + - (int8_t)(psde->pixel_ext.top_ftch[i] & 0xFF) + - (int8_t)(psde->pixel_ext.btm_ftch[i] & 0xFF), - deci_h); + (int8_t)(pstate->pixel_ext.top_ftch[i] & 0xFF) + + (int8_t)(pstate->pixel_ext.btm_ftch[i] & 0xFF), + deci_h); if ((hor_req_pixels != hor_fetch_pixels) || (hor_fetch_pixels > img_w) || @@ -2868,20 +2844,20 @@ static int _sde_plane_validate_scaler_v2(struct sde_plane *psde, * for Y and UV plane */ if (i != 3 && - (hor_req_pixels != psde->scaler3_cfg->src_width[i] || - vert_req_pixels != psde->scaler3_cfg->src_height[i])) { + (hor_req_pixels != pstate->scaler3_cfg.src_width[i] || + vert_req_pixels != pstate->scaler3_cfg.src_height[i])) { SDE_ERROR_PLANE(psde, "roi[%d] %d/%d, scaler src %dx%d, src %dx%d\n", - i, psde->pixel_ext.roi_w[i], - psde->pixel_ext.roi_h[i], - psde->scaler3_cfg->src_width[i], - psde->scaler3_cfg->src_height[i], + i, pstate->pixel_ext.roi_w[i], + pstate->pixel_ext.roi_h[i], + pstate->scaler3_cfg.src_width[i], + pstate->scaler3_cfg.src_height[i], src_w, src_h); return -EINVAL; } } - psde->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V2; + pstate->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V2; return 0; } @@ -3020,7 +2996,7 @@ static int sde_plane_sspp_atomic_check(struct drm_plane *plane, "too much scaling requested %ux%u->%ux%u\n", src_deci_w, src_deci_h, dst.w, dst.h); ret = -E2BIG; - } else if (_sde_plane_validate_scaler_v2(psde, fmt, + } else if (_sde_plane_validate_scaler_v2(psde, pstate, fmt, rstate->out_fb_width, rstate->out_fb_height, src.w, src.h, deci_w, deci_h)) { @@ -3309,7 +3285,7 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane, psde->pipe_cfg.src_rect = src; psde->pipe_cfg.dst_rect = dst; - _sde_plane_setup_scaler(psde, fmt, pstate); + _sde_plane_setup_scaler(psde, pstate, fmt, false); /* check for color fill */ psde->color_fill = (uint32_t)sde_plane_get_property(pstate, @@ -3326,7 +3302,7 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane, if (psde->pipe_hw->ops.setup_pe && (pstate->multirect_index != SDE_SSPP_RECT_1)) psde->pipe_hw->ops.setup_pe(psde->pipe_hw, - &psde->pixel_ext); + &pstate->pixel_ext); /** * when programmed in multirect mode, scalar block will be @@ -3336,8 +3312,8 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane, if (psde->pipe_hw->ops.setup_scaler && pstate->multirect_index != SDE_SSPP_RECT_1) psde->pipe_hw->ops.setup_scaler(psde->pipe_hw, - &psde->pipe_cfg, &psde->pixel_ext, - psde->scaler3_cfg); + &psde->pipe_cfg, &pstate->pixel_ext, + &pstate->scaler3_cfg); /* update excl rect */ if (psde->pipe_hw->ops.setup_excl_rect) @@ -3759,18 +3735,19 @@ static inline void _sde_plane_set_csc_v1(struct sde_plane *psde, void *usr_ptr) psde->csc_usr_ptr = &psde->csc_cfg; } -static inline void _sde_plane_set_scaler_v1(struct sde_plane *psde, void *usr) +static inline void _sde_plane_set_scaler_v1(struct sde_plane *psde, + struct sde_plane_state *pstate, void *usr) { struct sde_drm_scaler_v1 scale_v1; struct sde_hw_pixel_ext *pe; int i; - if (!psde) { - SDE_ERROR("invalid plane\n"); + if (!psde || !pstate) { + SDE_ERROR("invalid argument(s)\n"); return; } - psde->scaler_check_state = SDE_PLANE_SCLCHECK_NONE; + pstate->scaler_check_state = SDE_PLANE_SCLCHECK_NONE; if (!usr) { SDE_DEBUG_PLANE(psde, "scale data removed\n"); return; @@ -3785,7 +3762,7 @@ static inline void _sde_plane_set_scaler_v1(struct sde_plane *psde, void *usr) msm_property_set_dirty(&psde->property_info, PLANE_PROP_SCALER_V1); /* populate from user space */ - pe = &(psde->pixel_ext); + pe = &pstate->pixel_ext; memset(pe, 0, sizeof(struct sde_hw_pixel_ext)); for (i = 0; i < SDE_MAX_PLANES; i++) { pe->init_phase_x[i] = scale_v1.init_phase_x[i]; @@ -3810,7 +3787,7 @@ static inline void _sde_plane_set_scaler_v1(struct sde_plane *psde, void *usr) pe->roi_h[i] = scale_v1.pe.num_ext_pxls_tb[i]; } - psde->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V1; + pstate->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V1; SDE_EVT32_VERBOSE(DRMID(&psde->base)); SDE_DEBUG_PLANE(psde, "user property data copied\n"); @@ -3824,13 +3801,13 @@ static inline void _sde_plane_set_scaler_v2(struct sde_plane *psde, int i; struct sde_hw_scaler3_cfg *cfg; - if (!psde || !psde->scaler3_cfg) { - SDE_ERROR("invalid plane\n"); + if (!psde || !pstate) { + SDE_ERROR("invalid argument(s)\n"); return; } - cfg = psde->scaler3_cfg; - psde->scaler_check_state = SDE_PLANE_SCLCHECK_NONE; + cfg = &pstate->scaler3_cfg; + pstate->scaler_check_state = SDE_PLANE_SCLCHECK_NONE; if (!usr) { SDE_DEBUG_PLANE(psde, "scale data removed\n"); return; @@ -3851,7 +3828,7 @@ static inline void _sde_plane_set_scaler_v2(struct sde_plane *psde, msm_property_set_dirty(&psde->property_info, PLANE_PROP_SCALER_V2); /* populate from user space */ - pe = &(psde->pixel_ext); + pe = &pstate->pixel_ext; memset(pe, 0, sizeof(struct sde_hw_pixel_ext)); cfg->enable = scale_v2.enable; cfg->dir_en = scale_v2.dir_en; @@ -3909,7 +3886,7 @@ static inline void _sde_plane_set_scaler_v2(struct sde_plane *psde, pe->btm_rpt[i] = scale_v2.pe.btm_rpt[i]; pe->roi_h[i] = scale_v2.pe.num_ext_pxls_tb[i]; } - psde->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V2_CHECK; + pstate->scaler_check_state = SDE_PLANE_SCLCHECK_SCALER_V2_CHECK; SDE_EVT32_VERBOSE(DRMID(&psde->base), cfg->enable, cfg->de.enable, cfg->src_width[0], cfg->src_height[0], @@ -3978,7 +3955,8 @@ static int sde_plane_atomic_set_property(struct drm_plane *plane, _sde_plane_set_csc_v1(psde, (void *)val); break; case PLANE_PROP_SCALER_V1: - _sde_plane_set_scaler_v1(psde, (void *)val); + _sde_plane_set_scaler_v1(psde, pstate, + (void *)val); break; case PLANE_PROP_SCALER_V2: _sde_plane_set_scaler_v2(psde, pstate, @@ -4483,7 +4461,6 @@ struct drm_plane *sde_plane_init(struct drm_device *dev, psde->pipe = pipe; psde->mmu_id = kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE]; psde->is_virtual = (master_plane_id != 0); - psde->scaler_check_state = SDE_PLANE_SCLCHECK_NONE; INIT_LIST_HEAD(&psde->mplane_list); master_plane = drm_plane_find(dev, master_plane_id); if (master_plane) { @@ -4512,17 +4489,6 @@ struct drm_plane *sde_plane_init(struct drm_device *dev, goto clean_sspp; } - if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3)) { - psde->scaler3_cfg = kzalloc(sizeof(struct sde_hw_scaler3_cfg), - GFP_KERNEL); - if (!psde->scaler3_cfg) { - SDE_ERROR("[%u]failed to allocate scale struct\n", - pipe); - ret = -ENOMEM; - goto clean_sspp; - } - } - format_list = psde->pipe_sblk->format_list; if (master_plane_id) { @@ -4592,9 +4558,6 @@ struct drm_plane *sde_plane_init(struct drm_device *dev, clean_sspp: if (psde && psde->pipe_hw) sde_hw_sspp_destroy(psde->pipe_hw); - - if (psde && psde->scaler3_cfg) - kfree(psde->scaler3_cfg); clean_plane: kfree(psde); exit: diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h index f83a891d1994..bdea7f6ed2da 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.h +++ b/drivers/gpu/drm/msm/sde/sde_plane.h @@ -98,6 +98,25 @@ struct sde_plane_rot_state { #define SDE_PLANE_DIRTY_PERF 0x8 #define SDE_PLANE_DIRTY_ALL 0xFFFFFFFF +/** + * enum sde_plane_sclcheck_state - User scaler data status + * + * @SDE_PLANE_SCLCHECK_NONE: No user data provided + * @SDE_PLANE_SCLCHECK_INVALID: Invalid user data provided + * @SDE_PLANE_SCLCHECK_SCALER_V1: Valid scaler v1 data + * @SDE_PLANE_SCLCHECK_SCALER_V1_CHECK: Unchecked scaler v1 data + * @SDE_PLANE_SCLCHECK_SCALER_V2: Valid scaler v2 data + * @SDE_PLANE_SCLCHECK_SCALER_V2_CHECK: Unchecked scaler v2 data + */ +enum sde_plane_sclcheck_state { + SDE_PLANE_SCLCHECK_NONE, + SDE_PLANE_SCLCHECK_INVALID, + SDE_PLANE_SCLCHECK_SCALER_V1, + SDE_PLANE_SCLCHECK_SCALER_V1_CHECK, + SDE_PLANE_SCLCHECK_SCALER_V2, + SDE_PLANE_SCLCHECK_SCALER_V2_CHECK, +}; + /** * struct sde_plane_state: Define sde extension of drm plane state object * @base: base drm plane state object @@ -110,6 +129,9 @@ struct sde_plane_rot_state { * @multirect_index: index of the rectangle of SSPP * @multirect_mode: parallel or time multiplex multirect mode * @pending: whether the current update is still pending + * @scaler3_cfg: configuration data for scaler3 + * @pixel_ext: configuration data for pixel extensions + * @scaler_check_state: indicates status of user provided pixel extension data * @cdp_cfg: CDP configuration */ struct sde_plane_state { @@ -124,6 +146,11 @@ struct sde_plane_state { uint32_t multirect_mode; bool pending; + /* scaler configuration */ + struct sde_hw_scaler3_cfg scaler3_cfg; + struct sde_hw_pixel_ext pixel_ext; + enum sde_plane_sclcheck_state scaler_check_state; + /* @sc_cfg: system_cache configuration */ struct sde_hw_pipe_sc_cfg sc_cfg; struct sde_plane_rot_state rot; -- GitLab From ffb8742900cde59f7570d9b4ae22bb320e053795 Mon Sep 17 00:00:00 2001 From: Clarence Ip Date: Fri, 30 Jun 2017 13:37:48 -0400 Subject: [PATCH 482/786] drm/msm/sde: avoid checking roi symmetry if zero mixers Avoid checking whether the mixer ROI's are the same if the number of mixers is zero. Change-Id: Iefb92b2fc788a73187f0cfe0023dd7c07e603357 Signed-off-by: Clarence Ip --- drivers/gpu/drm/msm/sde/sde_crtc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index f9bd2f3b20c9..f0ec6b9460fc 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -979,7 +979,7 @@ static int _sde_crtc_check_rois_centered_and_symmetric(struct drm_crtc *crtc, * On certain HW, if using 2 LM, ROIs must be split evenly between the * LMs and be of equal width. */ - if (sde_crtc->num_mixers == 1) + if (sde_crtc->num_mixers < 2) return 0; roi[0] = &crtc_state->lm_roi[0]; -- GitLab From ec38293d5fcbda67c194a082d02aa95f1dc87f90 Mon Sep 17 00:00:00 2001 From: Maheshwar Ajja Date: Fri, 30 Jun 2017 13:47:01 -0700 Subject: [PATCH 483/786] msm: vidc: Fix buffer count issue Set actual buffer count to video hardware instead of min host count to resolve video encoding failures. CRs-Fixed: 2066658 Change-Id: I13e1cb33f261181ef97b66ad2d6cb5e8894ba291 Signed-off-by: Maheshwar Ajja --- drivers/media/platform/msm/vidc/msm_vidc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c index 77a43d393385..21ad17a814a9 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc.c +++ b/drivers/media/platform/msm/vidc/msm_vidc.c @@ -710,7 +710,7 @@ static int msm_vidc_queue_setup(struct vb2_queue *q, bufreq->buffer_count_actual = *num_buffers; rc = set_buffer_count(inst, bufreq->buffer_count_min_host, - bufreq->buffer_count_min_host, HAL_BUFFER_INPUT); + bufreq->buffer_count_actual, HAL_BUFFER_INPUT); } break; @@ -744,7 +744,7 @@ static int msm_vidc_queue_setup(struct vb2_queue *q, bufreq->buffer_count_actual = *num_buffers; rc = set_buffer_count(inst, bufreq->buffer_count_min_host, - bufreq->buffer_count_min_host, buffer_type); + bufreq->buffer_count_actual, buffer_type); } break; default: -- GitLab From f76f3e10a9106ca133737f5e956eb19c94364b12 Mon Sep 17 00:00:00 2001 From: David Dai Date: Fri, 30 Jun 2017 11:58:09 -0700 Subject: [PATCH 484/786] msm: msm_bus: Do not set commit bit for RSCs in solver mode Use only Fire and Forget requests for the sleep set for the display RSC while it's in solver mode. Change-Id: I393786616ae817aa7e425b49acf3bf9652dec0f1 Signed-off-by: David Dai --- drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c index b331e74c2cb1..35369549acd2 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c @@ -386,6 +386,10 @@ static int tcs_cmd_list_gen(int *n_active, tcs_cmd_gen(cur_bcm, &cmdlist_wake[k], cur_bcm->node_vec[ACTIVE_CTX].vec_a, cur_bcm->node_vec[ACTIVE_CTX].vec_b, commit); + + if (cur_rsc->rscdev->req_state == RPMH_AWAKE_STATE) + commit = false; + tcs_cmd_gen(cur_bcm, &cmdlist_sleep[k], cur_bcm->node_vec[DUAL_CTX].vec_a, cur_bcm->node_vec[DUAL_CTX].vec_b, commit); -- GitLab From d67723c6f86f789afc74cf0211a769dafafa8d13 Mon Sep 17 00:00:00 2001 From: Olav Haugan Date: Fri, 30 Jun 2017 11:45:19 -0700 Subject: [PATCH 485/786] drivers: soc: qcom: Use number of bytes returned from PRNG for entropy Use the number of bytes returned from the PRNG to specify the amount of entropy that we are contributing to the random pool instead of assuming we receive the amount requested. Change-Id: I5d49206146fd5f18f69516e4ba31dd3a38a81d62 Signed-off-by: Olav Haugan --- drivers/soc/qcom/early_random.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/soc/qcom/early_random.c b/drivers/soc/qcom/early_random.c index 5156bc17f969..06601dd0b1b4 100644 --- a/drivers/soc/qcom/early_random.c +++ b/drivers/soc/qcom/early_random.c @@ -56,9 +56,18 @@ void __init init_random_pool(void) &desc); if (!ret) { + u64 bytes_received = desc.ret[0]; + + if (bytes_received != SZ_512) + pr_warn("Did not receive the expected number of bytes from PRNG: %llu\n", + bytes_received); + dmac_inv_range(random_buffer, random_buffer + RANDOM_BUFFER_SIZE); - add_hwgenerator_randomness(random_buffer, SZ_512, SZ_512 << 3); + bytes_received = (bytes_received <= RANDOM_BUFFER_SIZE) ? + bytes_received : RANDOM_BUFFER_SIZE; + add_hwgenerator_randomness(random_buffer, bytes_received, + bytes_received << 3); } } -- GitLab From ae86053da9af50dee39b9ac66b779e189899c0f4 Mon Sep 17 00:00:00 2001 From: tharun kumar Date: Wed, 28 Jun 2017 16:56:10 +0530 Subject: [PATCH 486/786] msm: rdbg: Add snapshot of remote debugger driver The Remote Debugger driver allows a debugger running on a host PC to communicate with a remotestub running on peripheral subsystems. This snapshot was taken as of msm-3.10 commit ("Merge "msm: msm_bus: Provide valid input pointer argument") Change-Id: I944c298446d3786cb6fde20e4b2edaf02ce8e6f1 Acked-by: Chenna Kesava Raju Signed-off-by: Tharun Kumar Merugu --- Documentation/arm/msm/remote_debug_drv.txt | 468 +++++++ .../bindings/arm/msm/rdbg-smp2p.txt | 17 + drivers/char/Kconfig | 7 + drivers/char/Makefile | 1 + drivers/char/rdbg.c | 1167 +++++++++++++++++ 5 files changed, 1660 insertions(+) create mode 100644 Documentation/arm/msm/remote_debug_drv.txt create mode 100644 Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt create mode 100644 drivers/char/rdbg.c diff --git a/Documentation/arm/msm/remote_debug_drv.txt b/Documentation/arm/msm/remote_debug_drv.txt new file mode 100644 index 000000000000..13a35f43e86b --- /dev/null +++ b/Documentation/arm/msm/remote_debug_drv.txt @@ -0,0 +1,468 @@ +Introduction +============ + +The goal of this debug feature is to provide a reliable, responsive, +accurate and secure debug capability to developers interested in +debugging MSM subsystem processor images without the use of a hardware +debugger. + +The Debug Agent along with the Remote Debug Driver implements a shared +memory based transport mechanism that allows for a debugger (ex. GDB) +running on a host PC to communicate with a remote stub running on +peripheral subsystems such as the ADSP, MODEM etc. + +The diagram below depicts end to end the components involved to +support remote debugging: + + +: : +: HOST (PC) : MSM +: ,--------, : ,-------, +: | | : | Debug | ,--------, +: |Debugger|<--:-->| Agent | | Remote | +: | | : | App | +----->| Debug | +: `--------` : |-------| ,--------, | | Stub | +: : | Remote| | |<---+ `--------` +: : | Debug |<-->|--------| +: : | Driver| | |<---+ ,--------, +: : `-------` `--------` | | Remote | +: : LA Shared +----->| Debug | +: : Memory | Stub | +: : `--------` +: : Peripheral Subsystems +: : (ADSP, MODEM, ...) + + +Debugger: Debugger application running on the host PC that + communicates with the remote stub. + Examples: GDB, LLDB + +Debug Agent: Software that runs on the Linux Android platform + that provides connectivity from the MSM to the + host PC. This involves two portions: + 1) User mode Debug Agent application that discovers + processes running on the subsystems and creates + TCP/IP sockets for the host to connect to. In addition + to this, it creates an info (or meta) port that + users can connect to discover the various + processes and their corresponding debug ports. + +Remote Debug A character based driver that the Debug +Driver: Agent uses to transport the payload received from the + host to the debug stub running on the subsystem + processor over shared memory and vice versa. + +Shared Memory: Shared memory from the SMEM pool that is accessible + from the Applications Processor (AP) and the + subsystem processors. + +Remote Debug Privileged code that runs in the kernels of the +Stub: subsystem processors that receives debug commands + from the debugger running on the host and + acts on these commands. These commands include reading + and writing to registers and memory belonging to the + subsystem's address space, setting breakpoints, + single stepping etc. + +Hardware description +==================== + +The Remote Debug Driver interfaces with the Remote Debug stubs +running on the subsystem processors and does not drive or +manage any hardware resources. + +Software description +==================== + +The debugger and the remote stubs use Remote Serial Protocol (RSP) +to communicate with each other. This is widely used protocol by both +software and hardware debuggers. RSP is an ASCII based protocol +and used when it is not possible to run GDB server on the target under +debug. + +The Debug Agent application along with the Remote Debug Driver +is responsible for establishing a bi-directional connection from +the debugger application running on the host to the remote debug +stub running on a subsystem. The Debug Agent establishes connectivity +to the host PC via TCP/IP sockets. + +This feature uses ADB port forwarding to establish connectivity +between the debugger running on the host and the target under debug. + +Please note the Debug Agent does not expose HLOS memory to the +remote subsystem processors. + +Design +====== + +Here is the overall flow: + +1) When the Debug Agent application starts up, it opens up a shared memory +based transport channel to the various subsystem processor images. + +2) The Debug Agent application sends messages across to the remote stubs +to discover the various processes that are running on the subsystem and +creates debug sockets for each of them. + +3) Whenever a process running on a subsystem exits, the Debug Agent +is notified by the stub so that the debug port and other resources +can be reclaimed. + +4) The Debug Agent uses the services of the Remote Debug Driver to +transport payload from the host debugger to the remote stub and vice versa. + +5) Communication between the Remote Debug Driver and the Remote Debug stub +running on the subsystem processor is done over shared memory (see figure). +SMEM services are used to allocate the shared memory that will +be readable and writeable by the AP and the subsystem image under debug. + +A separate SMEM allocation takes place for each subsystem processor +involved in remote debugging. The remote stub running on each of the +subsystems allocates a SMEM buffer using a unique identifier so that both +the AP and subsystem get the same physical block of memory. It should be +noted that subsystem images can be restarted at any time. +However, when a subsystem comes back up, its stub uses the same unique +SMEM identifier to allocate the SMEM block. This would not result in a +new allocation rather the same block of memory in the first bootup instance +is provided back to the stub running on the subsystem. + +An 8KB chunk of shared memory is allocated and used for communication +per subsystem. For multi-process capable subsystems, 16KB chunk of shared +memory is allocated to allow for simultaneous debugging of more than one +process running on a single subsystem. + +The shared memory is used as a circular ring buffer in each direction. +Thus we have a bi-directional shared memory channel between the AP +and a subsystem. We call this SMQ. Each memory channel contains a header, +data and a control mechanism that is used to synchronize read and write +of data between the AP and the remote subsystem. + +Overall SMQ memory view: +: +: +------------------------------------------------+ +: | SMEM buffer | +: |-----------------------+------------------------| +: |Producer: LA | Producer: Remote | +: |Consumer: Remote | subsystem | +: | subsystem | Consumer: LA | +: | | | +: | Producer| Consumer| +: +-----------------------+------------------------+ +: | | +: | | +: | +--------------------------------------+ +: | | +: | | +: v v +: +--------------------------------------------------------------+ +: | Header | Data | Control | +: +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+ +: | | b | b | b | | S |n |n | | S |n |n | | +: | Producer | l | l | l | | M |o |o | | M |o |o | | +: | Ver | o | o | o | | Q |d |d | | Q |d |d | | +: |-----------| c | c | c | ... | |e |e | ... | |e |e | ... | +: | | k | k | k | | O | | | | I | | | | +: | Consumer | | | | | u |0 |1 | | n |0 |1 | | +: | Ver | 0 | 1 | 2 | | t | | | | | | | | +: +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+ +: | | +: + | +: | +: +------------------------+ +: | +: v +: +----+----+----+----+ +: | SMQ Nodes | +: |----|----|----|----| +: Node # | 0 | 1 | 2 | ...| +: |----|----|----|----| +: Starting Block Index # | 0 | 3 | 8 | ...| +: |----|----|----|----| +: # of blocks | 3 | 5 | 1 | ...| +: +----+----+----+----+ +: + +Header: Contains version numbers for software compatibility to ensure +that both producers and consumers on the AP and subsystems know how to +read from and write to the queue. +Both the producer and consumer versions are 1. +: +---------+-------------------+ +: | Size | Field | +: +---------+-------------------+ +: | 1 byte | Producer Version | +: +---------+-------------------+ +: | 1 byte | Consumer Version | +: +---------+-------------------+ + + +Data: The data portion contains multiple blocks [0..N] of a fixed size. +The block size SM_BLOCKSIZE is fixed to 128 bytes for header version #1. +Payload sent from the debug agent app is split (if necessary) and placed +in these blocks. The first data block is placed at the next 8 byte aligned +address after the header. + +The number of blocks for a given SMEM allocation is derived as follows: + Number of Blocks = ((Total Size - Alignment - Size of Header + - Size of SMQIn - Size of SMQOut)/(SM_BLOCKSIZE)) + +The producer maintains a private block map of each of these blocks to +determine which of these blocks in the queue is available and which are free. + +Control: +The control portion contains a list of nodes [0..N] where N is number +of available data blocks. Each node identifies the data +block indexes that contain a particular debug message to be transferred, +and the number of blocks it took to hold the contents of the message. + +Each node has the following structure: +: +---------+-------------------+ +: | Size | Field | +: +---------+-------------------+ +: | 2 bytes |Staring Block Index| +: +---------+-------------------+ +: | 2 bytes |Number of Blocks | +: +---------+-------------------+ + +The producer and the consumer update different parts of the control channel +(SMQOut / SMQIn) respectively. Each of these control data structures contains +information about the last node that was written / read, and the actual nodes +that were written/read. + +SMQOut Structure (R/W by producer, R by consumer): +: +---------+-------------------+ +: | Size | Field | +: +---------+-------------------+ +: | 4 bytes | Magic Init Number | +: +---------+-------------------+ +: | 4 bytes | Reset | +: +---------+-------------------+ +: | 4 bytes | Last Sent Index | +: +---------+-------------------+ +: | 4 bytes | Index Free Read | +: +---------+-------------------+ + +SMQIn Structure (R/W by consumer, R by producer): +: +---------+-------------------+ +: | Size | Field | +: +---------+-------------------+ +: | 4 bytes | Magic Init Number | +: +---------+-------------------+ +: | 4 bytes | Reset ACK | +: +---------+-------------------+ +: | 4 bytes | Last Read Index | +: +---------+-------------------+ +: | 4 bytes | Index Free Write | +: +---------+-------------------+ + +Magic Init Number: +Both SMQ Out and SMQ In initialize this field with a predefined magic +number so as to make sure that both the consumer and producer blocks +have fully initialized and have valid data in the shared memory control area. + Producer Magic #: 0xFF00FF01 + Consumer Magic #: 0xFF00FF02 + +SMQ Out's Last Sent Index and Index Free Read: + Only a producer can write to these indexes and they are updated whenever + there is new payload to be inserted into the SMQ in order to be sent to a + consumer. + + The number of blocks required for the SMQ allocation is determined as: + (payload size + SM_BLOCKSIZE - 1) / SM_BLOCKSIZE + + The private block map is searched for a large enough continuous set of blocks + and the user data is copied into the data blocks. + + The starting index of the free block(s) is updated in the SMQOut's Last Sent + Index. This update keeps track of which index was last written to and the + producer uses it to determine where the the next allocation could be done. + + Every allocation, a producer updates the Index Free Read from its + collaborating consumer's Index Free Write field (if they are unequal). + This index value indicates that the consumer has read all blocks associated + with allocation on the SMQ and that the producer can reuse these blocks for + subsquent allocations since this is a circular queue. + + At cold boot and restart, these indexes are initialized to zero and all + blocks are marked as available for allocation. + +SMQ In's Last Read Index and Index Free Write: + These indexes are written to only by a consumer and are updated whenever + there is new payload to be read from the SMQ. The Last Read Index keeps + track of which index was last read by the consumer and using this, it + determines where the next read should be done. + After completing a read, Last Read Index is incremented to the + next block index. A consumer updates Index Free Write to the starting + index of an allocation whenever it has completed processing the blocks. + This is an optimization that can be used to prevent an additional copy + of data from the queue into a client's data buffer and the data in the queue + itself can be used. + Once Index Free Write is updated, the collaborating producer (on the next + data allocation) reads the updated Index Free Write value and it then + updates its corresponding SMQ Out's Index Free Read and marks the blocks + associated with that index as available for allocation. At cold boot and + restart, these indexes are initialized to zero. + +SMQ Out Reset# and SMQ In Reset ACK #: + Since subsystems can restart at anytime, the data blocks and control channel + can be in an inconsistent state when a producer or consumer comes up. + We use Reset and Reset ACK to manage this. At cold boot, the producer + initializes the Reset# to a known number ex. 1. Every other reset that the + producer undergoes, the Reset#1 is simply incremented by 1. All the producer + indexes are reset. + When the producer notifies the consumer of data availability, the consumer + reads the producers Reset # and copies that into its SMQ In Reset ACK# + field when they differ. When that occurs, the consumer resets its + indexes to 0. + +6) Asynchronous notifications between a producer and consumer are +done using the SMP2P service which is interrupt based. + +Power Management +================ + +None + +SMP/multi-core +============== + +The driver uses completion to wake up the Debug Agent client threads. + +Security +======== + +From the perspective of the subsystem, the AP is untrusted. The remote +stubs consult the secure debug fuses to determine whether or not the +remote debugging will be enabled at the subsystem. + +If the hardware debug fuses indicate that debugging is disabled, the +remote stubs will not be functional on the subsystem. Writes to the +queue will only be done if the driver sees that the remote stub has been +initialized on the subsystem. + +Therefore even if any untrusted software running on the AP requests +the services of the Remote Debug Driver and inject RSP messages +into the shared memory buffer, these RSP messages will be discarded and +an appropriate error code will be sent up to the invoking application. + +Performance +=========== + +During operation, the Remote Debug Driver copies RSP messages +asynchronously sent from the host debugger to the remote stub and vice +versa. The debug messages are ASCII based and relatively short +(<25 bytes) and may once in a while go up to a maximum 700 bytes +depending on the command the user requested. Thus we do not +anticipate any major performance impact. Moreover, in a typical +functional debug scenario performance should not be a concern. + +Interface +========= + +The Remote Debug Driver is a character based device that manages +a piece of shared memory that is used as a bi-directional +single producer/consumer circular queue using a next fit allocator. +Every subsystem, has its own shared memory buffer that is managed +like a separate device. + +The driver distinguishes each subsystem processor's buffer by +registering a node with a different minor number. + +For each subsystem that is supported, the driver exposes a user space +interface through the following node: + - /dev/rdbg- + Ex. /dev/rdbg-adsp (for the ADSP subsystem) + +The standard open(), close(), read() and write() API set is +implemented. + +The open() syscall will fail if a subsystem is not present or supported +by the driver or a shared memory buffer cannot be allocated for the +AP - subsystem communication. It will also fail if the subsytem has +not initialized the queue on its side. Here are the error codes returned +in case a call to open() fails: +ENODEV - memory was not yet allocated for the device +EEXIST - device is already opened +ENOMEM - SMEM allocation failed +ECOMM - Subsytem queue is not yet setup +ENOMEM - Failure to initialize SMQ + +read() is a blocking call that will return with the number of bytes written +by the subsystem whenever the subsystem sends it some payload. Here are the +error codes returned in case a call to read() fails: +EINVAL - Invalid input +ENODEV - Device has not been opened yet +ERESTARTSYS - call to wait_for_completion_interruptible is interrupted +ENODATA - call to smq_receive failed + +write() attempts to send user mode payload out to the subsystem. It can fail +if the SMQ is full. The number of bytes written is returned back to the user. +Here are the error codes returned in case a call to write() fails: +EINVAL - Invalid input +ECOMM - SMQ send failed + +In the close() syscall, the control information state of the SMQ is +initialized to zero thereby preventing any further communication between +the AP and the subsystem. Here is the error code returned in case +a call to close() fails: +ENODEV - device wasn't opened/initialized + +The Remote Debug driver uses SMP2P for bi-directional AP to subsystem +notification. Notifications are sent to indicate that there are new +debug messages available for processing. Each subsystem that is +supported will need to add a device tree entry per the usage +specification of SMP2P driver. + +In case the remote stub becomes non operational or the security configuration +on the subsystem does not permit debugging, any messages put in the SMQ will +not be responded to. It is the responsibility of the Debug Agent app and the +host debugger application such as GDB to timeout and notify the user of the +non availability of remote debugging. + +Driver parameters +================= + +None + +Config options +============== + +The driver is configured with a device tree entry to map an SMP2P entry +to the device. The SMP2P entry name used is "rdbg". Please see +kernel\Documentation\arm\msm\msm_smp2p.txt for information about the +device tree entry required to configure SMP2P. + +The driver uses the SMEM allocation type SMEM_LC_DEBUGGER to allocate memory +for the queue that is used to share data with the subsystems. + +Dependencies +============ + +The Debug Agent driver requires services of SMEM to +allocate shared memory buffers. + +SMP2P is used as a bi-directional notification +mechanism between the AP and a subsystem processor. + +User space utilities +==================== + +This driver is meant to be used in conjunction with the user mode +Remote Debug Agent application. + +Other +===== + +None + +Known issues +============ +For targets with an external subsystem, we cannot use +shared memory for communication and would have to use the prevailing +transport mechanisms that exists between the AP and the external subsystem. + +This driver cannot be leveraged for such targets. + +To do +===== + +None diff --git a/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt b/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt new file mode 100644 index 000000000000..ce2d8bd54e43 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/msm/rdbg-smp2p.txt @@ -0,0 +1,17 @@ +Qualcomm Technologies, Inc. Remote Debugger (RDBG) driver + +Required properties: +-compatible : Should be one of + To communicate with modem + qcom,smp2pgpio_client_rdbg_2_in (inbound) + qcom,smp2pgpio_client_rdbg_2_out (outbound) + To communicate with modem + qcom,smp2pgpio_client_rdbg_1_in (inbound) + qcom,smp2pgpio_client_rdbg_1_out (outbound) +-gpios : the relevant gpio pins of the entry. + +Example: + qcom,smp2pgpio_client_rdbg_2_in { + compatible = "qcom,smp2pgpio_client_rdbg_2_in"; + gpios = <&smp2pgpio_rdbg_2_in 0 0>; + }; diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 3e1367a7cb4c..49fb8e506a9e 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -604,5 +604,12 @@ config MSM_ADSPRPC applications DSP processor. Say M if you want to enable this module. +config MSM_RDBG + tristate "QTI Remote debug driver" + help + Implements a shared memory based transport mechanism that allows + for a debugger running on a host PC to communicate with a remote + stub running on peripheral subsystems such as the ADSP, MODEM etc. + endmenu diff --git a/drivers/char/Makefile b/drivers/char/Makefile index b73165a7d3c1..19c3c987518f 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -65,3 +65,4 @@ obj-$(CONFIG_MSM_ADSPRPC) += adsprpc.o ifdef CONFIG_COMPAT obj-$(CONFIG_MSM_ADSPRPC) += adsprpc_compat.o endif +obj-$(CONFIG_MSM_RDBG) += rdbg.o diff --git a/drivers/char/rdbg.c b/drivers/char/rdbg.c new file mode 100644 index 000000000000..92d9399180ff --- /dev/null +++ b/drivers/char/rdbg.c @@ -0,0 +1,1167 @@ +/* + * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SMP2P_NUM_PROCS 8 +#define MAX_RETRIES 20 + +#define SM_VERSION 1 +#define SM_BLOCKSIZE 128 + +#define SMQ_MAGIC_INIT 0xFF00FF00 +#define SMQ_MAGIC_PRODUCER (SMQ_MAGIC_INIT | 0x1) +#define SMQ_MAGIC_CONSUMER (SMQ_MAGIC_INIT | 0x2) + +enum SMQ_STATUS { + SMQ_SUCCESS = 0, + SMQ_ENOMEMORY = -1, + SMQ_EBADPARM = -2, + SMQ_UNDERFLOW = -3, + SMQ_OVERFLOW = -4 +}; + +enum smq_type { + PRODUCER = 1, + CONSUMER = 2, + INVALID = 3 +}; + +struct smq_block_map { + uint32_t index_read; + uint32_t num_blocks; + uint8_t *map; +}; + +struct smq_node { + uint16_t index_block; + uint16_t num_blocks; +} __attribute__ ((__packed__)); + +struct smq_hdr { + uint8_t producer_version; + uint8_t consumer_version; +} __attribute__ ((__packed__)); + +struct smq_out_state { + uint32_t init; + uint32_t index_check_queue_for_reset; + uint32_t index_sent_write; + uint32_t index_free_read; +} __attribute__ ((__packed__)); + +struct smq_out { + struct smq_out_state s; + struct smq_node sent[1]; +}; + +struct smq_in_state { + uint32_t init; + uint32_t index_check_queue_for_reset_ack; + uint32_t index_sent_read; + uint32_t index_free_write; +} __attribute__ ((__packed__)); + +struct smq_in { + struct smq_in_state s; + struct smq_node free[1]; +}; + +struct smq { + struct smq_hdr *hdr; + struct smq_out *out; + struct smq_in *in; + uint8_t *blocks; + uint32_t num_blocks; + struct mutex *lock; + uint32_t initialized; + struct smq_block_map block_map; + enum smq_type type; +}; + +struct gpio_info { + int gpio_base_id; + int irq_base_id; +}; + +struct rdbg_data { + struct device *device; + struct completion work; + struct gpio_info in; + struct gpio_info out; + bool device_initialized; + int gpio_out_offset; + bool device_opened; + void *smem_addr; + size_t smem_size; + struct smq producer_smrb; + struct smq consumer_smrb; + struct mutex write_mutex; +}; + +struct rdbg_device { + struct cdev cdev; + struct class *class; + dev_t dev_no; + int num_devices; + struct rdbg_data *rdbg_data; +}; + +static struct rdbg_device g_rdbg_instance = { + { {0} }, + NULL, + 0, + SMP2P_NUM_PROCS, + NULL +}; + +struct processor_specific_info { + char *name; + unsigned int smem_buffer_addr; + size_t smem_buffer_size; +}; + +static struct processor_specific_info proc_info[SMP2P_NUM_PROCS] = { + {0}, /*APPS*/ + {"rdbg_modem", 0, 0}, /*MODEM*/ + {"rdbg_adsp", SMEM_LC_DEBUGGER, 16*1024}, /*ADSP*/ + {0}, /*SMP2P_RESERVED_PROC_1*/ + {"rdbg_wcnss", 0, 0}, /*WCNSS*/ + {0}, /*SMP2P_RESERVED_PROC_2*/ + {0}, /*SMP2P_POWER_PROC*/ + {0} /*SMP2P_REMOTE_MOCK_PROC*/ +}; + +static int smq_blockmap_get(struct smq_block_map *block_map, + uint32_t *block_index, uint32_t n) +{ + uint32_t start; + uint32_t mark = 0; + uint32_t found = 0; + uint32_t i = 0; + + start = block_map->index_read; + + if (n == 1) { + do { + if (!block_map->map[block_map->index_read]) { + *block_index = block_map->index_read; + block_map->map[block_map->index_read] = 1; + block_map->index_read++; + block_map->index_read %= block_map->num_blocks; + return SMQ_SUCCESS; + } + block_map->index_read++; + } while (start != (block_map->index_read %= + block_map->num_blocks)); + } else { + mark = block_map->num_blocks; + + do { + if (!block_map->map[block_map->index_read]) { + if (mark > block_map->index_read) { + mark = block_map->index_read; + start = block_map->index_read; + found = 0; + } + + found++; + if (found == n) { + *block_index = mark; + for (i = 0; i < n; i++) + block_map->map[mark + i] = + (uint8_t)(n - i); + block_map->index_read += block_map->map + [block_map->index_read] - 1; + return SMQ_SUCCESS; + } + } else { + found = 0; + block_map->index_read += block_map->map + [block_map->index_read] - 1; + mark = block_map->num_blocks; + } + block_map->index_read++; + } while (start != (block_map->index_read %= + block_map->num_blocks)); + } + + return SMQ_ENOMEMORY; +} + +static void smq_blockmap_put(struct smq_block_map *block_map, uint32_t i) +{ + uint32_t num_blocks = block_map->map[i]; + + while (num_blocks--) { + block_map->map[i] = 0; + i++; + } +} + +static int smq_blockmap_reset(struct smq_block_map *block_map) +{ + if (!block_map->map) + return SMQ_ENOMEMORY; + memset(block_map->map, 0, block_map->num_blocks + 1); + block_map->index_read = 0; + + return SMQ_SUCCESS; +} + +static int smq_blockmap_ctor(struct smq_block_map *block_map, + uint32_t num_blocks) +{ + if (num_blocks <= 1) + return SMQ_ENOMEMORY; + + block_map->map = kcalloc(num_blocks, sizeof(uint8_t), GFP_KERNEL); + if (!block_map->map) + return SMQ_ENOMEMORY; + + block_map->num_blocks = num_blocks - 1; + smq_blockmap_reset(block_map); + + return SMQ_SUCCESS; +} + +static void smq_blockmap_dtor(struct smq_block_map *block_map) +{ + kfree(block_map->map); + block_map->map = NULL; +} + +static int smq_free(struct smq *smq, void *data) +{ + struct smq_node node; + uint32_t index_block; + int err = SMQ_SUCCESS; + + if (smq->lock) + mutex_lock(smq->lock); + + if ((smq->hdr->producer_version != SM_VERSION) && + (smq->out->s.init != SMQ_MAGIC_PRODUCER)) { + err = SMQ_UNDERFLOW; + goto bail; + } + + index_block = ((uint8_t *)data - smq->blocks) / SM_BLOCKSIZE; + if (index_block >= smq->num_blocks) { + err = SMQ_EBADPARM; + goto bail; + } + + node.index_block = (uint16_t)index_block; + node.num_blocks = 0; + *((struct smq_node *)(smq->in->free + smq->in-> + s.index_free_write)) = node; + + smq->in->s.index_free_write = (smq->in->s.index_free_write + 1) + % smq->num_blocks; + +bail: + if (smq->lock) + mutex_unlock(smq->lock); + return err; +} + +static int smq_receive(struct smq *smq, void **pp, int *pnsize, int *pbmore) +{ + struct smq_node *node; + int err = SMQ_SUCCESS; + int more = 0; + + if ((smq->hdr->producer_version != SM_VERSION) && + (smq->out->s.init != SMQ_MAGIC_PRODUCER)) + return SMQ_UNDERFLOW; + + if (smq->in->s.index_sent_read == smq->out->s.index_sent_write) { + err = SMQ_UNDERFLOW; + goto bail; + } + + node = (struct smq_node *)(smq->out->sent + smq->in->s.index_sent_read); + if (node->index_block >= smq->num_blocks) { + err = SMQ_EBADPARM; + goto bail; + } + + smq->in->s.index_sent_read = (smq->in->s.index_sent_read + 1) + % smq->num_blocks; + + *pp = smq->blocks + (node->index_block * SM_BLOCKSIZE); + *pnsize = SM_BLOCKSIZE * node->num_blocks; + + /* + * Ensure that the reads and writes are updated in the memory + * when they are done and not cached. Also, ensure that the reads + * and writes are not reordered as they are shared between two cores. + */ + rmb(); + if (smq->in->s.index_sent_read != smq->out->s.index_sent_write) + more = 1; + +bail: + *pbmore = more; + return err; +} + +static int smq_alloc_send(struct smq *smq, const uint8_t *pcb, int nsize) +{ + void *pv = 0; + int num_blocks; + uint32_t index_block = 0; + int err = SMQ_SUCCESS; + struct smq_node *node = NULL; + + mutex_lock(smq->lock); + + if ((smq->in->s.init == SMQ_MAGIC_CONSUMER) && + (smq->hdr->consumer_version == SM_VERSION)) { + if (smq->out->s.index_check_queue_for_reset == + smq->in->s.index_check_queue_for_reset_ack) { + while (smq->out->s.index_free_read != + smq->in->s.index_free_write) { + node = (struct smq_node *)( + smq->in->free + + smq->out->s.index_free_read); + if (node->index_block >= smq->num_blocks) { + err = SMQ_EBADPARM; + goto bail; + } + + smq->out->s.index_free_read = + (smq->out->s.index_free_read + 1) + % smq->num_blocks; + + smq_blockmap_put(&smq->block_map, + node->index_block); + /* + * Ensure that the reads and writes are + * updated in the memory when they are done + * and not cached. Also, ensure that the reads + * and writes are not reordered as they are + * shared between two cores. + */ + rmb(); + } + } + } + + num_blocks = ALIGN(nsize, SM_BLOCKSIZE)/SM_BLOCKSIZE; + err = smq_blockmap_get(&smq->block_map, &index_block, num_blocks); + if (err != SMQ_SUCCESS) + goto bail; + + pv = smq->blocks + (SM_BLOCKSIZE * index_block); + + err = copy_from_user((void *)pv, (void *)pcb, nsize); + if (err != 0) + goto bail; + + ((struct smq_node *)(smq->out->sent + + smq->out->s.index_sent_write))->index_block + = (uint16_t)index_block; + ((struct smq_node *)(smq->out->sent + + smq->out->s.index_sent_write))->num_blocks + = (uint16_t)num_blocks; + + smq->out->s.index_sent_write = (smq->out->s.index_sent_write + 1) + % smq->num_blocks; + +bail: + if (err != SMQ_SUCCESS) { + if (pv) + smq_blockmap_put(&smq->block_map, index_block); + } + mutex_unlock(smq->lock); + return err; +} + +static int smq_reset_producer_queue_internal(struct smq *smq, + uint32_t reset_num) +{ + int retval = 0; + uint32_t i; + + if (smq->type != PRODUCER) + goto bail; + + mutex_lock(smq->lock); + if (smq->out->s.index_check_queue_for_reset != reset_num) { + smq->out->s.index_check_queue_for_reset = reset_num; + for (i = 0; i < smq->num_blocks; i++) + (smq->out->sent + i)->index_block = 0xFFFF; + + smq_blockmap_reset(&smq->block_map); + smq->out->s.index_sent_write = 0; + smq->out->s.index_free_read = 0; + retval = 1; + } + mutex_unlock(smq->lock); + +bail: + return retval; +} + +static int smq_check_queue_reset(struct smq *p_cons, struct smq *p_prod) +{ + int retval = 0; + uint32_t reset_num, i; + + if ((p_cons->type != CONSUMER) || + (p_cons->out->s.init != SMQ_MAGIC_PRODUCER) || + (p_cons->hdr->producer_version != SM_VERSION)) + goto bail; + + reset_num = p_cons->out->s.index_check_queue_for_reset; + if (p_cons->in->s.index_check_queue_for_reset_ack != reset_num) { + p_cons->in->s.index_check_queue_for_reset_ack = reset_num; + for (i = 0; i < p_cons->num_blocks; i++) + (p_cons->in->free + i)->index_block = 0xFFFF; + + p_cons->in->s.index_sent_read = 0; + p_cons->in->s.index_free_write = 0; + + retval = smq_reset_producer_queue_internal(p_prod, reset_num); + } + +bail: + return retval; +} + +static int check_subsystem_debug_enabled(void *base_addr, int size) +{ + int num_blocks; + uint8_t *pb_orig; + uint8_t *pb; + struct smq smq; + int err = 0; + + pb = pb_orig = (uint8_t *)base_addr; + pb += sizeof(struct smq_hdr); + pb = PTR_ALIGN(pb, 8); + size -= pb - (uint8_t *)pb_orig; + num_blocks = (int)((size - sizeof(struct smq_out_state) - + sizeof(struct smq_in_state))/(SM_BLOCKSIZE + + sizeof(struct smq_node) * 2)); + if (num_blocks <= 0) { + err = SMQ_EBADPARM; + goto bail; + } + + pb += num_blocks * SM_BLOCKSIZE; + smq.out = (struct smq_out *)pb; + pb += sizeof(struct smq_out_state) + (num_blocks * + sizeof(struct smq_node)); + smq.in = (struct smq_in *)pb; + + if (smq.in->s.init != SMQ_MAGIC_CONSUMER) { + pr_err("%s, smq in consumer not initialized", __func__); + err = -ECOMM; + } + +bail: + return err; +} + +static void smq_dtor(struct smq *smq) +{ + if (smq->initialized == SMQ_MAGIC_INIT) { + switch (smq->type) { + case PRODUCER: + smq->out->s.init = 0; + smq_blockmap_dtor(&smq->block_map); + break; + case CONSUMER: + smq->in->s.init = 0; + break; + default: + case INVALID: + break; + } + + smq->initialized = 0; + } +} + +/* + * The shared memory is used as a circular ring buffer in each direction. + * Thus we have a bi-directional shared memory channel between the AP + * and a subsystem. We call this SMQ. Each memory channel contains a header, + * data and a control mechanism that is used to synchronize read and write + * of data between the AP and the remote subsystem. + * + * Overall SMQ memory view: + * + * +------------------------------------------------+ + * | SMEM buffer | + * |-----------------------+------------------------| + * |Producer: LA | Producer: Remote | + * |Consumer: Remote | subsystem | + * | subsystem | Consumer: LA | + * | | | + * | Producer| Consumer| + * +-----------------------+------------------------+ + * | | + * | | + * | +--------------------------------------+ + * | | + * | | + * v v + * +--------------------------------------------------------------+ + * | Header | Data | Control | + * +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+ + * | | b | b | b | | S |n |n | | S |n |n | | + * | Producer | l | l | l | | M |o |o | | M |o |o | | + * | Ver | o | o | o | | Q |d |d | | Q |d |d | | + * |-----------| c | c | c | ... | |e |e | ... | |e |e | ... | + * | | k | k | k | | O | | | | I | | | | + * | Consumer | | | | | u |0 |1 | | n |0 |1 | | + * | Ver | 0 | 1 | 2 | | t | | | | | | | | + * +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+ + * | | + * + | + * | + * +------------------------+ + * | + * v + * +----+----+----+----+ + * | SMQ Nodes | + * |----|----|----|----| + * Node # | 0 | 1 | 2 | ...| + * |----|----|----|----| + * Starting Block Index # | 0 | 3 | 8 | ...| + * |----|----|----|----| + * # of blocks | 3 | 5 | 1 | ...| + * +----+----+----+----+ + * + * Header: Contains version numbers for software compatibility to ensure + * that both producers and consumers on the AP and subsystems know how to + * read from and write to the queue. + * Both the producer and consumer versions are 1. + * +---------+-------------------+ + * | Size | Field | + * +---------+-------------------+ + * | 1 byte | Producer Version | + * +---------+-------------------+ + * | 1 byte | Consumer Version | + * +---------+-------------------+ + * + * Data: The data portion contains multiple blocks [0..N] of a fixed size. + * The block size SM_BLOCKSIZE is fixed to 128 bytes for header version #1. + * Payload sent from the debug agent app is split (if necessary) and placed + * in these blocks. The first data block is placed at the next 8 byte aligned + * address after the header. + * + * The number of blocks for a given SMEM allocation is derived as follows: + * Number of Blocks = ((Total Size - Alignment - Size of Header + * - Size of SMQIn - Size of SMQOut)/(SM_BLOCKSIZE)) + * + * The producer maintains a private block map of each of these blocks to + * determine which of these blocks in the queue is available and which are free. + * + * Control: + * The control portion contains a list of nodes [0..N] where N is number + * of available data blocks. Each node identifies the data + * block indexes that contain a particular debug message to be transferred, + * and the number of blocks it took to hold the contents of the message. + * + * Each node has the following structure: + * +---------+-------------------+ + * | Size | Field | + * +---------+-------------------+ + * | 2 bytes |Staring Block Index| + * +---------+-------------------+ + * | 2 bytes |Number of Blocks | + * +---------+-------------------+ + * + * The producer and the consumer update different parts of the control channel + * (SMQOut / SMQIn) respectively. Each of these control data structures contains + * information about the last node that was written / read, and the actual nodes + * that were written/read. + * + * SMQOut Structure (R/W by producer, R by consumer): + * +---------+-------------------+ + * | Size | Field | + * +---------+-------------------+ + * | 4 bytes | Magic Init Number | + * +---------+-------------------+ + * | 4 bytes | Reset | + * +---------+-------------------+ + * | 4 bytes | Last Sent Index | + * +---------+-------------------+ + * | 4 bytes | Index Free Read | + * +---------+-------------------+ + * + * SMQIn Structure (R/W by consumer, R by producer): + * +---------+-------------------+ + * | Size | Field | + * +---------+-------------------+ + * | 4 bytes | Magic Init Number | + * +---------+-------------------+ + * | 4 bytes | Reset ACK | + * +---------+-------------------+ + * | 4 bytes | Last Read Index | + * +---------+-------------------+ + * | 4 bytes | Index Free Write | + * +---------+-------------------+ + * + * Magic Init Number: + * Both SMQ Out and SMQ In initialize this field with a predefined magic + * number so as to make sure that both the consumer and producer blocks + * have fully initialized and have valid data in the shared memory control area. + * Producer Magic #: 0xFF00FF01 + * Consumer Magic #: 0xFF00FF02 + */ +static int smq_ctor(struct smq *smq, void *base_addr, int size, + enum smq_type type, struct mutex *lock_ptr) +{ + int num_blocks; + uint8_t *pb_orig; + uint8_t *pb; + uint32_t i; + int err; + + if (smq->initialized == SMQ_MAGIC_INIT) { + err = SMQ_EBADPARM; + goto bail; + } + + if (!base_addr || !size) { + err = SMQ_EBADPARM; + goto bail; + } + + if (type == PRODUCER) + smq->lock = lock_ptr; + + pb_orig = (uint8_t *)base_addr; + smq->hdr = (struct smq_hdr *)pb_orig; + pb = pb_orig; + pb += sizeof(struct smq_hdr); + pb = PTR_ALIGN(pb, 8); + size -= pb - (uint8_t *)pb_orig; + num_blocks = (int)((size - sizeof(struct smq_out_state) - + sizeof(struct smq_in_state))/(SM_BLOCKSIZE + + sizeof(struct smq_node) * 2)); + if (num_blocks <= 0) { + err = SMQ_ENOMEMORY; + goto bail; + } + + smq->blocks = pb; + smq->num_blocks = num_blocks; + pb += num_blocks * SM_BLOCKSIZE; + smq->out = (struct smq_out *)pb; + pb += sizeof(struct smq_out_state) + (num_blocks * + sizeof(struct smq_node)); + smq->in = (struct smq_in *)pb; + smq->type = type; + if (type == PRODUCER) { + smq->hdr->producer_version = SM_VERSION; + for (i = 0; i < smq->num_blocks; i++) + (smq->out->sent + i)->index_block = 0xFFFF; + + err = smq_blockmap_ctor(&smq->block_map, smq->num_blocks); + if (err != SMQ_SUCCESS) + goto bail; + + smq->out->s.index_sent_write = 0; + smq->out->s.index_free_read = 0; + if (smq->out->s.init == SMQ_MAGIC_PRODUCER) { + smq->out->s.index_check_queue_for_reset += 1; + } else { + smq->out->s.index_check_queue_for_reset = 1; + smq->out->s.init = SMQ_MAGIC_PRODUCER; + } + } else { + smq->hdr->consumer_version = SM_VERSION; + for (i = 0; i < smq->num_blocks; i++) + (smq->in->free + i)->index_block = 0xFFFF; + + smq->in->s.index_sent_read = 0; + smq->in->s.index_free_write = 0; + if (smq->out->s.init == SMQ_MAGIC_PRODUCER) { + smq->in->s.index_check_queue_for_reset_ack = + smq->out->s.index_check_queue_for_reset; + } else { + smq->in->s.index_check_queue_for_reset_ack = 0; + } + + smq->in->s.init = SMQ_MAGIC_CONSUMER; + } + smq->initialized = SMQ_MAGIC_INIT; + err = SMQ_SUCCESS; + +bail: + return err; +} + +static void send_interrupt_to_subsystem(struct rdbg_data *rdbgdata) +{ + int offset = rdbgdata->gpio_out_offset; + int val = 1 ^ gpio_get_value(rdbgdata->out.gpio_base_id + offset); + + gpio_set_value(rdbgdata->out.gpio_base_id + offset, val); + rdbgdata->gpio_out_offset = (offset + 1) % 32; + + dev_dbg(rdbgdata->device, "%s: sent interrupt %d to subsystem", + __func__, val); +} + +static irqreturn_t on_interrupt_from(int irq, void *ptr) +{ + struct rdbg_data *rdbgdata = (struct rdbg_data *) ptr; + + dev_dbg(rdbgdata->device, "%s: Received interrupt %d from subsystem", + __func__, irq); + + complete(&(rdbgdata->work)); + return IRQ_HANDLED; +} + +static int initialize_smq(struct rdbg_data *rdbgdata) +{ + int err = 0; + unsigned char *smem_consumer_buffer = rdbgdata->smem_addr; + + smem_consumer_buffer += (rdbgdata->smem_size/2); + + if (smq_ctor(&(rdbgdata->producer_smrb), (void *)(rdbgdata->smem_addr), + ((rdbgdata->smem_size)/2), PRODUCER, &rdbgdata->write_mutex)) { + dev_err(rdbgdata->device, "%s: smq producer allocation failed", + __func__); + err = -ENOMEM; + goto bail; + } + + if (smq_ctor(&(rdbgdata->consumer_smrb), (void *)smem_consumer_buffer, + ((rdbgdata->smem_size)/2), CONSUMER, NULL)) { + dev_err(rdbgdata->device, "%s: smq conmsumer allocation failed", + __func__); + err = -ENOMEM; + } + +bail: + return err; + +} + +static int rdbg_open(struct inode *inode, struct file *filp) +{ + int device_id = -1; + struct rdbg_device *device = &g_rdbg_instance; + struct rdbg_data *rdbgdata = NULL; + int err = 0; + + if (!inode || !device->rdbg_data) { + pr_err("Memory not allocated yet"); + err = -ENODEV; + goto bail; + } + + device_id = MINOR(inode->i_rdev); + rdbgdata = &device->rdbg_data[device_id]; + + if (rdbgdata->device_opened) { + dev_err(rdbgdata->device, "%s: Device already opened", + __func__); + err = -EEXIST; + goto bail; + } + + rdbgdata->smem_size = proc_info[device_id].smem_buffer_size; + if (!rdbgdata->smem_size) { + dev_err(rdbgdata->device, "%s: smem not initialized", __func__); + err = -ENOMEM; + goto bail; + } + + rdbgdata->smem_addr = smem_find(proc_info[device_id].smem_buffer_addr, + rdbgdata->smem_size, 0, SMEM_ANY_HOST_FLAG); + if (!rdbgdata->smem_addr) { + dev_err(rdbgdata->device, "%s: Could not allocate smem memory", + __func__); + err = -ENOMEM; + goto bail; + } + dev_dbg(rdbgdata->device, "%s: SMEM address=0x%lx smem_size=%d", + __func__, (unsigned long)rdbgdata->smem_addr, + (unsigned int)rdbgdata->smem_size); + + if (check_subsystem_debug_enabled(rdbgdata->smem_addr, + rdbgdata->smem_size/2)) { + dev_err(rdbgdata->device, "%s: Subsystem %s is not debug enabled", + __func__, proc_info[device_id].name); + err = -ECOMM; + goto bail; + } + + init_completion(&rdbgdata->work); + + err = request_irq(rdbgdata->in.irq_base_id, on_interrupt_from, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + proc_info[device_id].name, + (void *)&device->rdbg_data[device_id]); + if (err) { + dev_err(rdbgdata->device, + "%s: Failed to register interrupt.Err=%d,irqid=%d.", + __func__, err, rdbgdata->in.irq_base_id); + goto irq_bail; + } + + err = enable_irq_wake(rdbgdata->in.irq_base_id); + if (err < 0) { + dev_dbg(rdbgdata->device, "enable_irq_wake() failed with err=%d", + err); + err = 0; + } + + mutex_init(&rdbgdata->write_mutex); + + err = initialize_smq(rdbgdata); + if (err) { + dev_err(rdbgdata->device, "Error initializing smq. Err=%d", + err); + goto smq_bail; + } + + rdbgdata->device_opened = 1; + + filp->private_data = (void *)rdbgdata; + + return 0; + +smq_bail: + smq_dtor(&(rdbgdata->producer_smrb)); + smq_dtor(&(rdbgdata->consumer_smrb)); + mutex_destroy(&rdbgdata->write_mutex); +irq_bail: + free_irq(rdbgdata->in.irq_base_id, (void *) + &device->rdbg_data[device_id]); +bail: + return err; +} + +static int rdbg_release(struct inode *inode, struct file *filp) +{ + int device_id = -1; + struct rdbg_device *rdbgdevice = &g_rdbg_instance; + struct rdbg_data *rdbgdata = NULL; + int err = 0; + + if (!inode || !rdbgdevice->rdbg_data) { + pr_err("Memory not allocated yet"); + err = -ENODEV; + goto bail; + } + + device_id = MINOR(inode->i_rdev); + rdbgdata = &rdbgdevice->rdbg_data[device_id]; + + if (rdbgdata->device_opened == 1) { + dev_dbg(rdbgdata->device, "%s: Destroying %s.", __func__, + proc_info[device_id].name); + rdbgdata->device_opened = 0; + complete(&(rdbgdata->work)); + free_irq(rdbgdata->in.irq_base_id, (void *) + &rdbgdevice->rdbg_data[device_id]); + if (rdbgdevice->rdbg_data[device_id].producer_smrb.initialized) + smq_dtor(&(rdbgdevice->rdbg_data[device_id]. + producer_smrb)); + if (rdbgdevice->rdbg_data[device_id].consumer_smrb.initialized) + smq_dtor(&(rdbgdevice->rdbg_data[device_id]. + consumer_smrb)); + mutex_destroy(&rdbgdata->write_mutex); + } + + filp->private_data = NULL; + +bail: + return err; +} + +static ssize_t rdbg_read(struct file *filp, char __user *buf, size_t size, + loff_t *offset) +{ + int err = 0; + struct rdbg_data *rdbgdata = filp->private_data; + void *p_sent_buffer = NULL; + int nsize = 0; + int more = 0; + + if (!rdbgdata) { + pr_err("Invalid argument"); + err = -EINVAL; + goto bail; + } + + dev_dbg(rdbgdata->device, "%s: In receive", __func__); + err = wait_for_completion_interruptible(&(rdbgdata->work)); + if (err) { + dev_err(rdbgdata->device, "%s: Error in wait", __func__); + goto bail; + } + + smq_check_queue_reset(&(rdbgdata->consumer_smrb), + &(rdbgdata->producer_smrb)); + if (smq_receive(&(rdbgdata->consumer_smrb), &p_sent_buffer, + &nsize, &more) != SMQ_SUCCESS) { + dev_err(rdbgdata->device, "%s: Error in smq_recv(). Err code = %d", + __func__, err); + err = -ENODATA; + goto bail; + } + + size = ((size < nsize) ? size : nsize); + err = copy_to_user(buf, p_sent_buffer, size); + if (err != 0) { + dev_err(rdbgdata->device, "%s: Error in copy_to_user(). Err code = %d", + __func__, err); + err = -ENODATA; + goto bail; + } + + smq_free(&(rdbgdata->consumer_smrb), p_sent_buffer); + err = size; + dev_dbg(rdbgdata->device, "%s: Read data to buffer with address 0x%lx", + __func__, (unsigned long) buf); + +bail: + return err; +} + +static ssize_t rdbg_write(struct file *filp, const char __user *buf, + size_t size, loff_t *offset) +{ + int err = 0; + int num_retries = 0; + struct rdbg_data *rdbgdata = filp->private_data; + + if (!rdbgdata) { + pr_err("Invalid argument"); + err = -EINVAL; + goto bail; + } + + do { + err = smq_alloc_send(&(rdbgdata->producer_smrb), buf, size); + dev_dbg(rdbgdata->device, "%s, smq_alloc_send returned %d.", + __func__, err); + } while (err != 0 && num_retries++ < MAX_RETRIES); + + if (err != 0) { + err = -ECOMM; + goto bail; + } + + send_interrupt_to_subsystem(rdbgdata); + + err = size; + +bail: + return err; +} + + +static const struct file_operations rdbg_fops = { + .open = rdbg_open, + .read = rdbg_read, + .write = rdbg_write, + .release = rdbg_release, +}; + +static int register_smp2p(char *node_name, struct gpio_info *gpio_info_ptr) +{ + struct device_node *node = NULL; + int cnt = 0; + int id = 0; + + node = of_find_compatible_node(NULL, NULL, node_name); + if (node) { + cnt = of_gpio_count(node); + if (cnt && gpio_info_ptr) { + id = of_get_gpio(node, 0); + gpio_info_ptr->gpio_base_id = id; + gpio_info_ptr->irq_base_id = gpio_to_irq(id); + return 0; + } + } + return -EINVAL; +} + +static int __init rdbg_init(void) +{ + int err = 0; + struct rdbg_device *rdbgdevice = &g_rdbg_instance; + int minor = 0; + int major = 0; + int minor_nodes_created = 0; + + char *rdbg_compatible_string = "qcom,smp2pgpio_client_rdbg_"; + int max_len = strlen(rdbg_compatible_string) + strlen("xx_out"); + + char *node_name = kcalloc(max_len, sizeof(char), GFP_KERNEL); + + if (!node_name) { + err = -ENOMEM; + goto bail; + } + + if (rdbgdevice->num_devices < 1 || + rdbgdevice->num_devices > SMP2P_NUM_PROCS) { + pr_err("rgdb: invalid num_devices"); + err = -EDOM; + goto name_bail; + } + + rdbgdevice->rdbg_data = kcalloc(rdbgdevice->num_devices, + sizeof(struct rdbg_data), GFP_KERNEL); + if (!rdbgdevice->rdbg_data) { + err = -ENOMEM; + goto name_bail; + } + + err = alloc_chrdev_region(&rdbgdevice->dev_no, 0, + rdbgdevice->num_devices, "rdbgctl"); + if (err) { + pr_err("Error in alloc_chrdev_region."); + goto data_bail; + } + major = MAJOR(rdbgdevice->dev_no); + + cdev_init(&rdbgdevice->cdev, &rdbg_fops); + rdbgdevice->cdev.owner = THIS_MODULE; + err = cdev_add(&rdbgdevice->cdev, MKDEV(major, 0), + rdbgdevice->num_devices); + if (err) { + pr_err("Error in cdev_add"); + goto chrdev_bail; + } + + rdbgdevice->class = class_create(THIS_MODULE, "rdbg"); + if (IS_ERR(rdbgdevice->class)) { + err = PTR_ERR(rdbgdevice->class); + pr_err("Error in class_create"); + goto cdev_bail; + } + + for (minor = 0; minor < rdbgdevice->num_devices; minor++) { + if (!proc_info[minor].name) + continue; + + if (snprintf(node_name, max_len, "%s%d_in", + rdbg_compatible_string, minor) <= 0) { + pr_err("Error in snprintf"); + err = -ENOMEM; + goto device_bail; + } + + if (register_smp2p(node_name, + &rdbgdevice->rdbg_data[minor].in)) { + pr_debug("No incoming device tree entry found for %s", + proc_info[minor].name); + continue; + } + + if (snprintf(node_name, max_len, "%s%d_out", + rdbg_compatible_string, minor) <= 0) { + pr_err("Error in snprintf"); + err = -ENOMEM; + goto device_bail; + } + + if (register_smp2p(node_name, + &rdbgdevice->rdbg_data[minor].out)) { + pr_err("No outgoing device tree entry found for %s", + proc_info[minor].name); + err = -EINVAL; + goto device_bail; + } + + rdbgdevice->rdbg_data[minor].device = device_create( + rdbgdevice->class, NULL, MKDEV(major, minor), + NULL, "%s", proc_info[minor].name); + if (IS_ERR(rdbgdevice->rdbg_data[minor].device)) { + err = PTR_ERR(rdbgdevice->rdbg_data[minor].device); + pr_err("Error in device_create"); + goto device_bail; + } + rdbgdevice->rdbg_data[minor].device_initialized = 1; + minor_nodes_created++; + dev_dbg(rdbgdevice->rdbg_data[minor].device, + "%s: created /dev/%s c %d %d'", __func__, + proc_info[minor].name, major, minor); + } + + if (!minor_nodes_created) { + pr_err("No device tree entries found"); + err = -EINVAL; + goto class_bail; + } + + goto name_bail; + +device_bail: + for (--minor; minor >= 0; minor--) { + if (rdbgdevice->rdbg_data[minor].device_initialized) + device_destroy(rdbgdevice->class, + MKDEV(MAJOR(rdbgdevice->dev_no), minor)); + } +class_bail: + class_destroy(rdbgdevice->class); +cdev_bail: + cdev_del(&rdbgdevice->cdev); +chrdev_bail: + unregister_chrdev_region(rdbgdevice->dev_no, rdbgdevice->num_devices); +data_bail: + kfree(rdbgdevice->rdbg_data); +name_bail: + kfree(node_name); +bail: + return err; +} + +static void __exit rdbg_exit(void) +{ + struct rdbg_device *rdbgdevice = &g_rdbg_instance; + int minor; + + for (minor = 0; minor < rdbgdevice->num_devices; minor++) { + if (rdbgdevice->rdbg_data[minor].device_initialized) { + device_destroy(rdbgdevice->class, + MKDEV(MAJOR(rdbgdevice->dev_no), minor)); + } + } + class_destroy(rdbgdevice->class); + cdev_del(&rdbgdevice->cdev); + unregister_chrdev_region(rdbgdevice->dev_no, 1); + kfree(rdbgdevice->rdbg_data); +} + +module_init(rdbg_init); +module_exit(rdbg_exit); + +MODULE_DESCRIPTION("rdbg module"); +MODULE_LICENSE("GPL v2"); -- GitLab From 7eca0bbe8cd72e067375472cd68c40fb9b5b230b Mon Sep 17 00:00:00 2001 From: tharun kumar Date: Wed, 28 Jun 2017 16:49:18 +0530 Subject: [PATCH 487/786] ARM: dts: msm: Add remote debugger device configuration for sdm845 Adding remote debugger device configuration. The Remote Debugger driver allows a debugger running on a host PC to communicate with a remote stub running on peripheral subsystems. Change-Id: I6a6c251ac7642af85dbd7374374c4a1adfb75b48 Acked-by: Chenna Kesava Raju Signed-off-by: Tharun Kumar Merugu --- arch/arm64/boot/dts/qcom/msm-rdbg.dtsi | 75 ++++++++++++++++++++++++++ arch/arm64/boot/dts/qcom/sdm845.dtsi | 1 + 2 files changed, 76 insertions(+) create mode 100644 arch/arm64/boot/dts/qcom/msm-rdbg.dtsi diff --git a/arch/arm64/boot/dts/qcom/msm-rdbg.dtsi b/arch/arm64/boot/dts/qcom/msm-rdbg.dtsi new file mode 100644 index 000000000000..2b8f22e09edf --- /dev/null +++ b/arch/arm64/boot/dts/qcom/msm-rdbg.dtsi @@ -0,0 +1,75 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + smp2pgpio_rdbg_2_in: qcom,smp2pgpio-rdbg-2-in { + compatible = "qcom,smp2pgpio"; + qcom,entry-name = "rdbg"; + qcom,remote-pid = <2>; + qcom,is-inbound; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + qcom,smp2pgpio_client_rdbg_2_in { + compatible = "qcom,smp2pgpio_client_rdbg_2_in"; + gpios = <&smp2pgpio_rdbg_2_in 0 0>; + }; + + smp2pgpio_rdbg_2_out: qcom,smp2pgpio-rdbg-2-out { + compatible = "qcom,smp2pgpio"; + qcom,entry-name = "rdbg"; + qcom,remote-pid = <2>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + qcom,smp2pgpio_client_rdbg_2_out { + compatible = "qcom,smp2pgpio_client_rdbg_2_out"; + gpios = <&smp2pgpio_rdbg_2_out 0 0>; + }; + + smp2pgpio_rdbg_1_in: qcom,smp2pgpio-rdbg-1-in { + compatible = "qcom,smp2pgpio"; + qcom,entry-name = "rdbg"; + qcom,remote-pid = <1>; + qcom,is-inbound; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + qcom,smp2pgpio_client_rdbg_1_in { + compatible = "qcom,smp2pgpio_client_rdbg_1_in"; + gpios = <&smp2pgpio_rdbg_1_in 0 0>; + }; + + smp2pgpio_rdbg_1_out: qcom,smp2pgpio-rdbg-1-out { + compatible = "qcom,smp2pgpio"; + qcom,entry-name = "rdbg"; + qcom,remote-pid = <1>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + qcom,smp2pgpio_client_rdbg_1_out { + compatible = "qcom,smp2pgpio_client_rdbg_1_out"; + gpios = <&smp2pgpio_rdbg_1_out 0 0>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index e1a0d771e2a9..c6e39dcb15b0 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -639,6 +639,7 @@ #include "msm-gdsc-sdm845.dtsi" #include "sdm845-sde-pll.dtsi" +#include "msm-rdbg.dtsi" #include "sdm845-sde.dtsi" #include "sdm845-sde-display.dtsi" #include "sdm845-qupv3.dtsi" -- GitLab From 853f23659c607fc43fcf82e59df4190f5be0c686 Mon Sep 17 00:00:00 2001 From: tharun kumar Date: Thu, 29 Jun 2017 19:28:39 +0530 Subject: [PATCH 488/786] defconfig: msm: enable remote debugger driver on sdm845 Adding remote debugger device configuration. The Remote Debugger driver allows a debugger running on a host PC to communicate with a remote stub running on peripheral subsystems. Change-Id: Id9859252f5e64b006b90c3a8e61ce1b1c59a15c0 Acked-by: Chenna Kesava Raju Signed-off-by: Tharun Kumar Merugu --- arch/arm64/configs/sdm845-perf_defconfig | 1 + arch/arm64/configs/sdm845_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig index db30129f1234..aefa5c7e5e30 100644 --- a/arch/arm64/configs/sdm845-perf_defconfig +++ b/arch/arm64/configs/sdm845-perf_defconfig @@ -291,6 +291,7 @@ CONFIG_DIAG_CHAR=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_MSM_LEGACY=y CONFIG_MSM_ADSPRPC=y +CONFIG_MSM_RDBG=m CONFIG_I2C_CHARDEV=y CONFIG_I2C_QCOM_GENI=y CONFIG_SOUNDWIRE=y diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig index db4c3af22ca2..fa8663df4f88 100644 --- a/arch/arm64/configs/sdm845_defconfig +++ b/arch/arm64/configs/sdm845_defconfig @@ -298,6 +298,7 @@ CONFIG_DIAG_CHAR=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_MSM_LEGACY=y CONFIG_MSM_ADSPRPC=y +CONFIG_MSM_RDBG=m CONFIG_I2C_CHARDEV=y CONFIG_I2C_QCOM_GENI=y CONFIG_SOUNDWIRE=y -- GitLab From 7d0f2b2c517e10e97201467acbdd1bde4ed52f5b Mon Sep 17 00:00:00 2001 From: Sagar Dharia Date: Sat, 1 Jul 2017 18:26:15 -0600 Subject: [PATCH 489/786] slim: msm: ngd: Fix incorrect casting of slimbus context-bank devices Slimbus controller's context-bank(CB) devices are created to facilitate IOMMU mapping for respective slimbus controllers. Calling the suspend/resume routines on CB-devices will result in incorrect casting of these devices to controller devices leading to a kernel-panic. CB-devices for slimbus don't need to do anything in suspend/resume since power management aspects are handled by controller devices. Change-Id: Ib83ef58c2a8e212a74bb2b215345805ef3c032bd Signed-off-by: Sagar Dharia --- drivers/slimbus/slim-msm-ngd.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c index 1b7b591175aa..ca56462dc971 100644 --- a/drivers/slimbus/slim-msm-ngd.c +++ b/drivers/slimbus/slim-msm-ngd.c @@ -2074,8 +2074,13 @@ static int ngd_slim_suspend(struct device *dev) { int ret = -EBUSY; struct platform_device *pdev = to_platform_device(dev); - struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev); + struct msm_slim_ctrl *cdev; + if (of_device_is_compatible(pdev->dev.of_node, + "qcom,iommu-slim-ctrl-cb")) + return 0; + + cdev = platform_get_drvdata(pdev); if (!pm_runtime_enabled(dev) || (!pm_runtime_suspended(dev) && cdev->state == MSM_CTRL_IDLE)) { @@ -2111,9 +2116,14 @@ static int ngd_slim_suspend(struct device *dev) static int ngd_slim_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); - struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev); + struct msm_slim_ctrl *cdev; int ret = 0; + if (of_device_is_compatible(pdev->dev.of_node, + "qcom,iommu-slim-ctrl-cb")) + return 0; + + cdev = platform_get_drvdata(pdev); /* * If deferred response was requested for power-off and it failed, * mark runtime-pm status as active to be consistent -- GitLab From c0c0357415545c9917de5c44d8ea91ef8ccf221c Mon Sep 17 00:00:00 2001 From: Manoj Prabhu B Date: Mon, 24 Apr 2017 18:16:50 +0530 Subject: [PATCH 490/786] memshare: Add support for memshare driver MPSS does the handshake operation with kernel QMI service which interacts with APSS. The memshare driver implements a kernel QMI service in APSS, which is responsible for providing contiguous physical memory to MPSS when modem requires additional memory. CRs-Fixed: 2030912 Change-Id: I3fdc019248cfb0e8cd338596dcf2c212abd4f00a Signed-off-by: Manoj Prabhu B --- .../bindings/arm/msm/heap-sharing.txt | 44 + drivers/soc/qcom/Kconfig | 2 + drivers/soc/qcom/Makefile | 1 + drivers/soc/qcom/memshare/Kconfig | 9 + drivers/soc/qcom/memshare/Makefile | 1 + drivers/soc/qcom/memshare/heap_mem_ext_v01.c | 472 ++++++++ drivers/soc/qcom/memshare/heap_mem_ext_v01.h | 356 ++++++ drivers/soc/qcom/memshare/msm_memshare.c | 1074 +++++++++++++++++ drivers/soc/qcom/memshare/msm_memshare.h | 64 + 9 files changed, 2023 insertions(+) create mode 100644 Documentation/devicetree/bindings/arm/msm/heap-sharing.txt create mode 100644 drivers/soc/qcom/memshare/Kconfig create mode 100644 drivers/soc/qcom/memshare/Makefile create mode 100644 drivers/soc/qcom/memshare/heap_mem_ext_v01.c create mode 100644 drivers/soc/qcom/memshare/heap_mem_ext_v01.h create mode 100644 drivers/soc/qcom/memshare/msm_memshare.c create mode 100644 drivers/soc/qcom/memshare/msm_memshare.h diff --git a/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt b/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt new file mode 100644 index 000000000000..e63d09b4c6da --- /dev/null +++ b/Documentation/devicetree/bindings/arm/msm/heap-sharing.txt @@ -0,0 +1,44 @@ +* Memory Share Driver (MEMSHARE) + +The Memshare driver implements a Kernel QMI service on the +LA-APSS, which is responsible for providing contiguous physical +memory to MPSS for use cases when the modem requires additional +memory (e.g. GPS). + +Required properties for Memshare + +-Root Node- + +- compatible: Must be "qcom,memshare" + +Required properties for child nodes: + +- compatible: Must be "qcom,memshare-peripheral" + +- qcom,peripheral-size: Indicates the size (in bytes) required for that child. + +- qcom,client-id: Indicates the client id of the child node. + +- label: Indicates the peripheral information for the node. Should be one of + the following: + - modem /* Represent Modem Peripheral */ + - adsp /* Represent ADSP Peripheral */ + - wcnss /* Represent WCNSS Peripheral */ + +Optional properties for child nodes: + +- qcom,allocate-boot-time: Indicates whether clients needs boot time memory allocation. + +Example: + +qcom,memshare { + compatible = "qcom,memshare"; + + qcom,client_1 { + compatible = "qcom,memshare-peripheral"; + qcom,peripheral-size = <0x200000>; + qcom,client-id = <0>; + qcom,allocate-boot-time; + label = "modem"; + }; +}; diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 8317c0992ad1..121fa342406d 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -701,3 +701,5 @@ config QCOM_FORCE_WDOG_BITE_ON_PANIC This forces a watchdog bite when the device restarts due to a kernel panic. On certain MSM SoCs, this provides us additional debugging information. + +source "drivers/soc/qcom/memshare/Kconfig" diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index dc303e217a85..64fb7a057f87 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -56,6 +56,7 @@ obj-$(CONFIG_MSM_SYSTEM_HEALTH_MONITOR) += system_health_monitor.o obj-$(CONFIG_MSM_SYSMON_GLINK_COMM) += sysmon-glink.o sysmon-qmi.o obj-$(CONFIG_ICNSS) += icnss.o wlan_firmware_service_v01.o icnss_utils.o +obj-$(CONFIG_MEM_SHARE_QMI_SERVICE) += memshare/ obj-$(CONFIG_MSM_PIL_SSR_GENERIC) += subsys-pil-tz.o obj-$(CONFIG_MSM_PIL_MSS_QDSP6V5) += pil-q6v5.o pil-msa.o pil-q6v5-mss.o obj-$(CONFIG_MSM_PIL) += peripheral-loader.o diff --git a/drivers/soc/qcom/memshare/Kconfig b/drivers/soc/qcom/memshare/Kconfig new file mode 100644 index 000000000000..7eb1415b350b --- /dev/null +++ b/drivers/soc/qcom/memshare/Kconfig @@ -0,0 +1,9 @@ +config MEM_SHARE_QMI_SERVICE + depends on MSM_QMI_INTERFACE + bool "Shared Heap for external processors" + help + Memory Share Kernel Qualcomm Messaging Interface Service + receives requests from Modem Processor Sub System + for heap alloc/free from Application Processor + Sub System and send a response back to client with + proper handle/address. diff --git a/drivers/soc/qcom/memshare/Makefile b/drivers/soc/qcom/memshare/Makefile new file mode 100644 index 000000000000..cf49fbcfdb21 --- /dev/null +++ b/drivers/soc/qcom/memshare/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_MEM_SHARE_QMI_SERVICE) := heap_mem_ext_v01.o msm_memshare.o \ No newline at end of file diff --git a/drivers/soc/qcom/memshare/heap_mem_ext_v01.c b/drivers/soc/qcom/memshare/heap_mem_ext_v01.c new file mode 100644 index 000000000000..afe9a873dd5f --- /dev/null +++ b/drivers/soc/qcom/memshare/heap_mem_ext_v01.c @@ -0,0 +1,472 @@ +/* Copyright (c) 2013-2015, 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include "heap_mem_ext_v01.h" + +struct elem_info mem_alloc_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct mem_alloc_req_msg_v01, + num_bytes), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_alloc_req_msg_v01, + block_alignment_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_alloc_req_msg_v01, + block_alignment), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info mem_alloc_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_SIGNED_2_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct mem_alloc_resp_msg_v01, + resp), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_alloc_resp_msg_v01, + handle_valid), + }, + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_alloc_resp_msg_v01, + handle), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct mem_alloc_resp_msg_v01, + num_bytes_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct mem_alloc_resp_msg_v01, + num_bytes), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info mem_free_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct mem_free_req_msg_v01, + handle), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info mem_free_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_SIGNED_2_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(uint16_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct mem_free_resp_msg_v01, + resp), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info dhms_mem_alloc_addr_info_type_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint64_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dhms_mem_alloc_addr_info_type_v01, + phy_addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + .offset = offsetof(struct + dhms_mem_alloc_addr_info_type_v01, + num_bytes), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info mem_alloc_generic_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct mem_alloc_generic_req_msg_v01, + num_bytes), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct mem_alloc_generic_req_msg_v01, + client_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x03, + .offset = offsetof(struct mem_alloc_generic_req_msg_v01, + proc_id), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x04, + .offset = offsetof(struct mem_alloc_generic_req_msg_v01, + sequence_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_alloc_generic_req_msg_v01, + alloc_contiguous_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_alloc_generic_req_msg_v01, + alloc_contiguous), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct mem_alloc_generic_req_msg_v01, + block_alignment_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct mem_alloc_generic_req_msg_v01, + block_alignment), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info mem_alloc_generic_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + mem_alloc_generic_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + mem_alloc_generic_resp_msg_v01, + sequence_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct + mem_alloc_generic_resp_msg_v01, + sequence_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct + mem_alloc_generic_resp_msg_v01, + dhms_mem_alloc_addr_info_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct + mem_alloc_generic_resp_msg_v01, + dhms_mem_alloc_addr_info_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = MAX_ARR_CNT_V01, + .elem_size = sizeof(struct + dhms_mem_alloc_addr_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct + mem_alloc_generic_resp_msg_v01, + dhms_mem_alloc_addr_info), + .ei_array = dhms_mem_alloc_addr_info_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info mem_free_generic_req_msg_data_v01_ei[] = { + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct mem_free_generic_req_msg_v01, + dhms_mem_alloc_addr_info_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = MAX_ARR_CNT_V01, + .elem_size = sizeof(struct + dhms_mem_alloc_addr_info_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct mem_free_generic_req_msg_v01, + dhms_mem_alloc_addr_info), + .ei_array = dhms_mem_alloc_addr_info_type_v01_ei, + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_free_generic_req_msg_v01, + client_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_free_generic_req_msg_v01, + client_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct mem_free_generic_req_msg_v01, + proc_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof(struct mem_free_generic_req_msg_v01, + proc_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info mem_free_generic_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + mem_free_generic_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info mem_query_size_req_msg_data_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct mem_query_size_req_msg_v01, + client_id), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_query_size_req_msg_v01, + proc_id_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_query_size_req_msg_v01, + proc_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info mem_query_size_resp_msg_data_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + mem_query_size_rsp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_query_size_rsp_msg_v01, + size_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint32_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof(struct mem_query_size_rsp_msg_v01, + size), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; diff --git a/drivers/soc/qcom/memshare/heap_mem_ext_v01.h b/drivers/soc/qcom/memshare/heap_mem_ext_v01.h new file mode 100644 index 000000000000..cfe3e49c38b3 --- /dev/null +++ b/drivers/soc/qcom/memshare/heap_mem_ext_v01.h @@ -0,0 +1,356 @@ +/* Copyright (c) 2013-2015, 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef HEAP_MEM_EXT_SERVICE_01_H +#define HEAP_MEM_EXT_SERVICE_01_H + +#include + +#define MEM_ALLOC_REQ_MAX_MSG_LEN_V01 255 +#define MEM_FREE_REQ_MAX_MSG_LEN_V01 255 +#define MAX_ARR_CNT_V01 64 + +struct dhms_mem_alloc_addr_info_type_v01 { + uint64_t phy_addr; + uint32_t num_bytes; +}; + +enum dhms_mem_proc_id_v01 { + /* To force a 32 bit signed enum. Do not change or use */ + DHMS_MEM_PROC_ID_MIN_ENUM_VAL_V01 = -2147483647, + /* Request from MPSS processor */ + DHMS_MEM_PROC_MPSS_V01 = 0, + /* Request from ADSP processor */ + DHMS_MEM_PROC_ADSP_V01 = 1, + /* Request from WCNSS processor */ + DHMS_MEM_PROC_WCNSS_V01 = 2, + /* To force a 32 bit signed enum. Do not change or use */ + DHMS_MEM_PROC_ID_MAX_ENUM_VAL_V01 = 2147483647 +}; + +enum dhms_mem_client_id_v01 { + /*To force a 32 bit signed enum. Do not change or use*/ + DHMS_MEM_CLIENT_ID_MIN_ENUM_VAL_V01 = -2147483647, + /* Request from GPS Client */ + DHMS_MEM_CLIENT_GPS_V01 = 0, + /* Invalid Client */ + DHMS_MEM_CLIENT_INVALID = 1000, + /* To force a 32 bit signed enum. Do not change or use */ + DHMS_MEM_CLIENT_ID_MAX_ENUM_VAL_V01 = 2147483647 +}; + +enum dhms_mem_block_align_enum_v01 { + /* To force a 32 bit signed enum. Do not change or use + */ + DHMS_MEM_BLOCK_ALIGN_ENUM_MIN_ENUM_VAL_V01 = -2147483647, + /* Align allocated memory by 2 bytes */ + DHMS_MEM_BLOCK_ALIGN_2_V01 = 0, + /* Align allocated memory by 4 bytes */ + DHMS_MEM_BLOCK_ALIGN_4_V01 = 1, + /**< Align allocated memory by 8 bytes */ + DHMS_MEM_BLOCK_ALIGN_8_V01 = 2, + /**< Align allocated memory by 16 bytes */ + DHMS_MEM_BLOCK_ALIGN_16_V01 = 3, + /**< Align allocated memory by 32 bytes */ + DHMS_MEM_BLOCK_ALIGN_32_V01 = 4, + /**< Align allocated memory by 64 bytes */ + DHMS_MEM_BLOCK_ALIGN_64_V01 = 5, + /**< Align allocated memory by 128 bytes */ + DHMS_MEM_BLOCK_ALIGN_128_V01 = 6, + /**< Align allocated memory by 256 bytes */ + DHMS_MEM_BLOCK_ALIGN_256_V01 = 7, + /**< Align allocated memory by 512 bytes */ + DHMS_MEM_BLOCK_ALIGN_512_V01 = 8, + /**< Align allocated memory by 1024 bytes */ + DHMS_MEM_BLOCK_ALIGN_1K_V01 = 9, + /**< Align allocated memory by 2048 bytes */ + DHMS_MEM_BLOCK_ALIGN_2K_V01 = 10, + /**< Align allocated memory by 4096 bytes */ + DHMS_MEM_BLOCK_ALIGN_4K_V01 = 11, + DHMS_MEM_BLOCK_ALIGN_ENUM_MAX_ENUM_VAL_V01 = 2147483647 + /* To force a 32 bit signed enum. Do not change or use + */ +}; + +/* Request Message; This command is used for getting + * the multiple physically contiguous + * memory blocks from the server memory subsystem + */ +struct mem_alloc_req_msg_v01 { + + /* Mandatory */ + /*requested size*/ + uint32_t num_bytes; + + /* Optional */ + /* Must be set to true if block_alignment + * is being passed + */ + uint8_t block_alignment_valid; + /* The block alignment for the memory block to be allocated + */ + enum dhms_mem_block_align_enum_v01 block_alignment; +}; /* Message */ + +/* Response Message; This command is used for getting + * the multiple physically contiguous memory blocks + * from the server memory subsystem + */ +struct mem_alloc_resp_msg_v01 { + + /* Mandatory */ + /* Result Code */ + /* The result of the requested memory operation + */ + enum qmi_result_type_v01 resp; + /* Optional */ + /* Memory Block Handle + */ + /* Must be set to true if handle is being passed + */ + uint8_t handle_valid; + /* The physical address of the memory allocated on the HLOS + */ + uint64_t handle; + /* Optional */ + /* Memory block size */ + /* Must be set to true if num_bytes is being passed + */ + uint8_t num_bytes_valid; + /* The number of bytes actually allocated for the request. + * This value can be smaller than the size requested in + * QMI_DHMS_MEM_ALLOC_REQ_MSG. + */ + uint32_t num_bytes; +}; /* Message */ + +/* Request Message; This command is used for releasing + * the multiple physically contiguous + * memory blocks to the server memory subsystem + */ +struct mem_free_req_msg_v01 { + + /* Mandatory */ + /* Physical address of memory to be freed + */ + uint32_t handle; +}; /* Message */ + +/* Response Message; This command is used for releasing + * the multiple physically contiguous + * memory blocks to the server memory subsystem + */ +struct mem_free_resp_msg_v01 { + + /* Mandatory */ + /* Result of the requested memory operation, todo, + * need to check the async operation for free + */ + enum qmi_result_type_v01 resp; +}; /* Message */ + +/* Request Message; This command is used for getting + * the multiple physically contiguous + * memory blocks from the server memory subsystem + */ +struct mem_alloc_generic_req_msg_v01 { + + /* Mandatory */ + /*requested size*/ + uint32_t num_bytes; + + /* Mandatory */ + /* client id */ + enum dhms_mem_client_id_v01 client_id; + + /* Mandatory */ + /* Peripheral Id*/ + enum dhms_mem_proc_id_v01 proc_id; + + /* Mandatory */ + /* Sequence id */ + uint32_t sequence_id; + + /* Optional */ + /* alloc_contiguous */ + /* Must be set to true if alloc_contiguous is being passed */ + uint8_t alloc_contiguous_valid; + + /* Alloc_contiguous is used to identify that clients are requesting + * for contiguous or non contiguous memory, default is contiguous + * 0 = non contiguous else contiguous + */ + uint8_t alloc_contiguous; + + /* Optional */ + /* Must be set to true if block_alignment + * is being passed + */ + uint8_t block_alignment_valid; + + /* The block alignment for the memory block to be allocated + */ + enum dhms_mem_block_align_enum_v01 block_alignment; + +}; /* Message */ + +/* Response Message; This command is used for getting + * the multiple physically contiguous memory blocks + * from the server memory subsystem + */ +struct mem_alloc_generic_resp_msg_v01 { + + /* Mandatory */ + /* Result Code */ + /* The result of the requested memory operation + */ + struct qmi_response_type_v01 resp; + + /* Optional */ + /* Sequence ID */ + /* Must be set to true if sequence_id is being passed */ + uint8_t sequence_id_valid; + + + /* Mandatory */ + /* Sequence id */ + uint32_t sequence_id; + + /* Optional */ + /* Memory Block Handle + */ + /* Must be set to true if handle is being passed + */ + uint8_t dhms_mem_alloc_addr_info_valid; + + /* Optional */ + /* Handle Size */ + uint32_t dhms_mem_alloc_addr_info_len; + + /* Optional */ + /* The physical address of the memory allocated on the HLOS + */ + struct dhms_mem_alloc_addr_info_type_v01 + dhms_mem_alloc_addr_info[MAX_ARR_CNT_V01]; + +}; /* Message */ + +/* Request Message; This command is used for releasing + * the multiple physically contiguous + * memory blocks to the server memory subsystem + */ +struct mem_free_generic_req_msg_v01 { + + /* Mandatory */ + /* Must be set to # of elments in array*/ + uint32_t dhms_mem_alloc_addr_info_len; + + /* Mandatory */ + /* Physical address and size of the memory allocated + * on the HLOS to be freed. + */ + struct dhms_mem_alloc_addr_info_type_v01 + dhms_mem_alloc_addr_info[MAX_ARR_CNT_V01]; + + /* Optional */ + /* Client ID */ + /* Must be set to true if client_id is being passed */ + uint8_t client_id_valid; + + /* Optional */ + /* Client Id */ + enum dhms_mem_client_id_v01 client_id; + + /* Optional */ + /* Proc ID */ + /* Must be set to true if proc_id is being passed */ + uint8_t proc_id_valid; + + /* Optional */ + /* Peripheral */ + enum dhms_mem_proc_id_v01 proc_id; + +}; /* Message */ + +/* Response Message; This command is used for releasing + * the multiple physically contiguous + * memory blocks to the server memory subsystem + */ +struct mem_free_generic_resp_msg_v01 { + + /* + * Mandatory + * Result of the requested memory operation, todo, + * need to check the async operation for free + */ + struct qmi_response_type_v01 resp; + +}; /* Message */ + +struct mem_query_size_req_msg_v01 { + + /* Mandatory */ + enum dhms_mem_client_id_v01 client_id; + + /* + * Optional + * Proc ID + * proc_id_valid must be set to true if proc_id is being passed + */ + uint8_t proc_id_valid; + + enum dhms_mem_proc_id_v01 proc_id; +}; /* Message */ + +struct mem_query_size_rsp_msg_v01 { + + /* + * Mandatory + * Result Code + */ + struct qmi_response_type_v01 resp; + + /* + * Optional + * size_valid must be set to true if size is being passed + */ + uint8_t size_valid; + + uint32_t size; +}; /* Message */ + + +extern struct elem_info mem_alloc_req_msg_data_v01_ei[]; +extern struct elem_info mem_alloc_resp_msg_data_v01_ei[]; +extern struct elem_info mem_free_req_msg_data_v01_ei[]; +extern struct elem_info mem_free_resp_msg_data_v01_ei[]; +extern struct elem_info mem_alloc_generic_req_msg_data_v01_ei[]; +extern struct elem_info mem_alloc_generic_resp_msg_data_v01_ei[]; +extern struct elem_info mem_free_generic_req_msg_data_v01_ei[]; +extern struct elem_info mem_free_generic_resp_msg_data_v01_ei[]; +extern struct elem_info mem_query_size_req_msg_data_v01_ei[]; +extern struct elem_info mem_query_size_resp_msg_data_v01_ei[]; + +/*Service Message Definition*/ +#define MEM_ALLOC_REQ_MSG_V01 0x0020 +#define MEM_ALLOC_RESP_MSG_V01 0x0020 +#define MEM_FREE_REQ_MSG_V01 0x0021 +#define MEM_FREE_RESP_MSG_V01 0x0021 +#define MEM_ALLOC_GENERIC_REQ_MSG_V01 0x0022 +#define MEM_ALLOC_GENERIC_RESP_MSG_V01 0x0022 +#define MEM_FREE_GENERIC_REQ_MSG_V01 0x0023 +#define MEM_FREE_GENERIC_RESP_MSG_V01 0x0023 +#define MEM_QUERY_SIZE_REQ_MSG_V01 0x0024 +#define MEM_QUERY_SIZE_RESP_MSG_V01 0x0024 + +#endif diff --git a/drivers/soc/qcom/memshare/msm_memshare.c b/drivers/soc/qcom/memshare/msm_memshare.c new file mode 100644 index 000000000000..7298f30be0ce --- /dev/null +++ b/drivers/soc/qcom/memshare/msm_memshare.c @@ -0,0 +1,1074 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "msm_memshare.h" +#include "heap_mem_ext_v01.h" + +#include +#include + +/* Macros */ +#define MEMSHARE_DEV_NAME "memshare" +#define MEMSHARE_CHILD_DEV_NAME "memshare_child" +static unsigned long(attrs); + +static struct qmi_handle *mem_share_svc_handle; +static void mem_share_svc_recv_msg(struct work_struct *work); +static DECLARE_DELAYED_WORK(work_recv_msg, mem_share_svc_recv_msg); +static struct workqueue_struct *mem_share_svc_workqueue; +static uint64_t bootup_request; +static bool ramdump_event; +static void *memshare_ramdump_dev[MAX_CLIENTS]; +static struct device *memshare_dev[MAX_CLIENTS]; + +/* Memshare Driver Structure */ +struct memshare_driver { + struct device *dev; + struct mutex mem_share; + struct mutex mem_free; + struct work_struct memshare_init_work; +}; + +struct memshare_child { + struct device *dev; +}; + +static struct memshare_driver *memsh_drv; +static struct memshare_child *memsh_child; +static struct mem_blocks memblock[MAX_CLIENTS]; +static uint32_t num_clients; +static struct msg_desc mem_share_svc_alloc_req_desc = { + .max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01, + .msg_id = MEM_ALLOC_REQ_MSG_V01, + .ei_array = mem_alloc_req_msg_data_v01_ei, +}; + +static struct msg_desc mem_share_svc_alloc_resp_desc = { + .max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01, + .msg_id = MEM_ALLOC_RESP_MSG_V01, + .ei_array = mem_alloc_resp_msg_data_v01_ei, +}; + +static struct msg_desc mem_share_svc_free_req_desc = { + .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01, + .msg_id = MEM_FREE_REQ_MSG_V01, + .ei_array = mem_free_req_msg_data_v01_ei, +}; + +static struct msg_desc mem_share_svc_free_resp_desc = { + .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01, + .msg_id = MEM_FREE_RESP_MSG_V01, + .ei_array = mem_free_resp_msg_data_v01_ei, +}; + +static struct msg_desc mem_share_svc_alloc_generic_req_desc = { + .max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01, + .msg_id = MEM_ALLOC_GENERIC_REQ_MSG_V01, + .ei_array = mem_alloc_generic_req_msg_data_v01_ei, +}; + +static struct msg_desc mem_share_svc_alloc_generic_resp_desc = { + .max_msg_len = MEM_ALLOC_REQ_MAX_MSG_LEN_V01, + .msg_id = MEM_ALLOC_GENERIC_RESP_MSG_V01, + .ei_array = mem_alloc_generic_resp_msg_data_v01_ei, +}; + +static struct msg_desc mem_share_svc_free_generic_req_desc = { + .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01, + .msg_id = MEM_FREE_GENERIC_REQ_MSG_V01, + .ei_array = mem_free_generic_req_msg_data_v01_ei, +}; + +static struct msg_desc mem_share_svc_free_generic_resp_desc = { + .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01, + .msg_id = MEM_FREE_GENERIC_RESP_MSG_V01, + .ei_array = mem_free_generic_resp_msg_data_v01_ei, +}; + +static struct msg_desc mem_share_svc_size_query_req_desc = { + .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01, + .msg_id = MEM_QUERY_SIZE_REQ_MSG_V01, + .ei_array = mem_query_size_req_msg_data_v01_ei, +}; + +static struct msg_desc mem_share_svc_size_query_resp_desc = { + .max_msg_len = MEM_FREE_REQ_MAX_MSG_LEN_V01, + .msg_id = MEM_QUERY_SIZE_RESP_MSG_V01, + .ei_array = mem_query_size_resp_msg_data_v01_ei, +}; + +/* + * This API creates ramdump dev handlers + * for each of the memshare clients. + * These dev handlers will be used for + * extracting the ramdump for loaned memory + * segments. + */ + +static int mem_share_configure_ramdump(int client) +{ + char client_name[18]; + const char *clnt = NULL; + + switch (client) { + case 0: + clnt = "GPS"; + break; + case 1: + clnt = "FTM"; + break; + case 2: + clnt = "DIAG"; + break; + default: + pr_err("memshare: no memshare clients registered\n"); + return -EINVAL; + } + + snprintf(client_name, sizeof(client_name), + "memshare_%s", clnt); + if (memshare_dev[client]) { + memshare_ramdump_dev[client] = + create_ramdump_device(client_name, + memshare_dev[client]); + } else { + pr_err("memshare:%s: invalid memshare device\n", __func__); + return -ENODEV; + } + if (IS_ERR_OR_NULL(memshare_ramdump_dev[client])) { + pr_err("memshare: %s: Unable to create memshare ramdump device\n", + __func__); + memshare_ramdump_dev[client] = NULL; + return -ENOMEM; + } + + return 0; +} + +static int check_client(int client_id, int proc, int request) +{ + int i = 0, rc; + int found = DHMS_MEM_CLIENT_INVALID; + + for (i = 0; i < MAX_CLIENTS; i++) { + if (memblock[i].client_id == client_id && + memblock[i].peripheral == proc) { + found = i; + break; + } + } + if ((found == DHMS_MEM_CLIENT_INVALID) && !request) { + pr_debug("memshare: No registered client, adding a new client\n"); + /* Add a new client */ + for (i = 0; i < MAX_CLIENTS; i++) { + if (memblock[i].client_id == DHMS_MEM_CLIENT_INVALID) { + memblock[i].client_id = client_id; + memblock[i].allotted = 0; + memblock[i].guarantee = 0; + memblock[i].peripheral = proc; + found = i; + + if (!memblock[i].file_created) { + rc = mem_share_configure_ramdump(i); + if (rc) + pr_err("memshare: %s, Cannot create ramdump for client: %d\n", + __func__, client_id); + else + memblock[i].file_created = 1; + } + + break; + } + } + } + + return found; +} + +static void free_client(int id) +{ + memblock[id].phy_addr = 0; + memblock[id].virtual_addr = 0; + memblock[id].allotted = 0; + memblock[id].guarantee = 0; + memblock[id].sequence_id = -1; + memblock[id].memory_type = MEMORY_CMA; + +} + +static void fill_alloc_response(struct mem_alloc_generic_resp_msg_v01 *resp, + int id, int *flag) +{ + resp->sequence_id_valid = 1; + resp->sequence_id = memblock[id].sequence_id; + resp->dhms_mem_alloc_addr_info_valid = 1; + resp->dhms_mem_alloc_addr_info_len = 1; + resp->dhms_mem_alloc_addr_info[0].phy_addr = memblock[id].phy_addr; + resp->dhms_mem_alloc_addr_info[0].num_bytes = memblock[id].size; + if (!*flag) { + resp->resp.result = QMI_RESULT_SUCCESS_V01; + resp->resp.error = QMI_ERR_NONE_V01; + } else { + resp->resp.result = QMI_RESULT_FAILURE_V01; + resp->resp.error = QMI_ERR_NO_MEMORY_V01; + } + +} + +static void initialize_client(void) +{ + int i; + + for (i = 0; i < MAX_CLIENTS; i++) { + memblock[i].allotted = 0; + memblock[i].size = 0; + memblock[i].guarantee = 0; + memblock[i].phy_addr = 0; + memblock[i].virtual_addr = 0; + memblock[i].client_id = DHMS_MEM_CLIENT_INVALID; + memblock[i].peripheral = -1; + memblock[i].sequence_id = -1; + memblock[i].memory_type = MEMORY_CMA; + memblock[i].free_memory = 0; + memblock[i].hyp_mapping = 0; + memblock[i].file_created = 0; + } + attrs |= DMA_ATTR_NO_KERNEL_MAPPING; +} + +/* + * mem_share_do_ramdump() function initializes the + * ramdump segments with the physical address and + * size of the memshared clients. Extraction of ramdump + * is skipped if memshare client is not allotted + * This calls the ramdump api in extracting the + * ramdump in elf format. + */ + +static int mem_share_do_ramdump(void) +{ + int i = 0, ret; + char *client_name = NULL; + + for (i = 0; i < num_clients; i++) { + + struct ramdump_segment *ramdump_segments_tmp = NULL; + + switch (i) { + case 0: + client_name = "GPS"; + break; + case 1: + client_name = "FTM"; + break; + case 2: + client_name = "DIAG"; + break; + default: + pr_err("memshare: no memshare clients registered\n"); + return -EINVAL; + } + + if (!memblock[i].allotted) { + pr_err("memshare:%s memblock is not allotted\n", + client_name); + continue; + } + + ramdump_segments_tmp = kcalloc(1, + sizeof(struct ramdump_segment), + GFP_KERNEL); + if (!ramdump_segments_tmp) + return -ENOMEM; + + ramdump_segments_tmp[0].size = memblock[i].size; + ramdump_segments_tmp[0].address = memblock[i].phy_addr; + + pr_debug("memshare: %s:%s client:id: %d:size = %d\n", + __func__, client_name, i, memblock[i].size); + + ret = do_elf_ramdump(memshare_ramdump_dev[i], + ramdump_segments_tmp, 1); + kfree(ramdump_segments_tmp); + if (ret < 0) { + pr_err("memshare: Unable to dump: %d\n", ret); + return ret; + } + } + return 0; +} + +static int modem_notifier_cb(struct notifier_block *this, unsigned long code, + void *_cmd) +{ + int i; + int ret; + u32 source_vmlist[2] = {VMID_HLOS, VMID_MSS_MSA}; + int dest_vmids[1] = {VMID_HLOS}; + int dest_perms[1] = {PERM_READ|PERM_WRITE|PERM_EXEC}; + struct notif_data *notifdata = NULL; + + mutex_lock(&memsh_drv->mem_share); + + switch (code) { + + case SUBSYS_BEFORE_SHUTDOWN: + bootup_request++; + break; + + case SUBSYS_RAMDUMP_NOTIFICATION: + ramdump_event = 1; + break; + + case SUBSYS_BEFORE_POWERUP: + if (_cmd) { + notifdata = (struct notif_data *) _cmd; + } else { + ramdump_event = 0; + break; + } + + if (notifdata->enable_ramdump && ramdump_event) { + pr_debug("memshare: %s, Ramdump collection is enabled\n", + __func__); + ret = mem_share_do_ramdump(); + if (ret) + pr_err("memshare: Ramdump collection failed\n"); + ramdump_event = 0; + } + break; + + case SUBSYS_AFTER_POWERUP: + pr_debug("memshare: Modem has booted up\n"); + for (i = 0; i < MAX_CLIENTS; i++) { + if (memblock[i].free_memory > 0 && + bootup_request >= 2) { + memblock[i].free_memory -= 1; + pr_debug("memshare: free_memory count: %d for client id: %d\n", + memblock[i].free_memory, + memblock[i].client_id); + } + + if (memblock[i].free_memory == 0) { + if (memblock[i].peripheral == + DHMS_MEM_PROC_MPSS_V01 && + !memblock[i].guarantee && + memblock[i].allotted) { + pr_debug("memshare: hypervisor unmapping for client id: %d\n", + memblock[i].client_id); + ret = hyp_assign_phys( + memblock[i].phy_addr, + memblock[i].size, + source_vmlist, + 2, dest_vmids, + dest_perms, 1); + if (ret && + memblock[i].hyp_mapping == 1) { + /* + * This is an error case as hyp + * mapping was successful + * earlier but during unmap + * it lead to failure. + */ + pr_err("memshare: %s, failed to unmap the region\n", + __func__); + memblock[i].hyp_mapping = 1; + } else { + memblock[i].hyp_mapping = 0; + } + dma_free_attrs(memsh_drv->dev, + memblock[i].size, + memblock[i].virtual_addr, + memblock[i].phy_addr, + attrs); + free_client(i); + } + } + } + bootup_request++; + break; + + default: + break; + } + + mutex_unlock(&memsh_drv->mem_share); + return NOTIFY_DONE; +} + +static struct notifier_block nb = { + .notifier_call = modem_notifier_cb, +}; + +static void shared_hyp_mapping(int client_id) +{ + int ret; + u32 source_vmlist[1] = {VMID_HLOS}; + int dest_vmids[2] = {VMID_HLOS, VMID_MSS_MSA}; + int dest_perms[2] = {PERM_READ|PERM_WRITE, + PERM_READ|PERM_WRITE}; + + if (client_id == DHMS_MEM_CLIENT_INVALID) { + pr_err("memshare: %s, Invalid Client\n", __func__); + return; + } + + ret = hyp_assign_phys(memblock[client_id].phy_addr, + memblock[client_id].size, + source_vmlist, 1, dest_vmids, + dest_perms, 2); + + if (ret != 0) { + pr_err("memshare: hyp_assign_phys failed size=%u err=%d\n", + memblock[client_id].size, ret); + return; + } + memblock[client_id].hyp_mapping = 1; +} + +static int handle_alloc_req(void *req_h, void *req, void *conn_h) +{ + struct mem_alloc_req_msg_v01 *alloc_req; + struct mem_alloc_resp_msg_v01 alloc_resp; + int rc = 0; + + mutex_lock(&memsh_drv->mem_share); + alloc_req = (struct mem_alloc_req_msg_v01 *)req; + pr_debug("memshare: %s: Received Alloc Request: alloc_req->num_bytes = %d\n", + __func__, alloc_req->num_bytes); + if (!memblock[GPS].size) { + memset(&alloc_resp, 0, sizeof(alloc_resp)); + alloc_resp.resp = QMI_RESULT_FAILURE_V01; + rc = memshare_alloc(memsh_drv->dev, alloc_req->num_bytes, + &memblock[GPS]); + } + alloc_resp.num_bytes_valid = 1; + alloc_resp.num_bytes = alloc_req->num_bytes; + alloc_resp.handle_valid = 1; + alloc_resp.handle = memblock[GPS].phy_addr; + if (rc) { + alloc_resp.resp = QMI_RESULT_FAILURE_V01; + memblock[GPS].size = 0; + } else { + alloc_resp.resp = QMI_RESULT_SUCCESS_V01; + } + + mutex_unlock(&memsh_drv->mem_share); + + pr_debug("memshare: %s, alloc_resp.num_bytes :%d, alloc_resp.resp :%lx\n", + __func__, alloc_resp.num_bytes, + (unsigned long int)alloc_resp.resp); + rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h, + &mem_share_svc_alloc_resp_desc, &alloc_resp, + sizeof(alloc_resp)); + if (rc < 0) + pr_err("memshare: %s, Error sending the alloc request: %d\n", + __func__, rc); + + return rc; +} + +static int handle_alloc_generic_req(void *req_h, void *req, void *conn_h) +{ + struct mem_alloc_generic_req_msg_v01 *alloc_req; + struct mem_alloc_generic_resp_msg_v01 *alloc_resp; + int rc, resp = 0; + int client_id; + + mutex_lock(&memsh_drv->mem_share); + alloc_req = (struct mem_alloc_generic_req_msg_v01 *)req; + pr_debug("memshare: alloc request client id: %d proc _id: %d\n", + alloc_req->client_id, alloc_req->proc_id); + alloc_resp = kzalloc(sizeof(*alloc_resp), + GFP_KERNEL); + if (!alloc_resp) { + mutex_unlock(&memsh_drv->mem_share); + return -ENOMEM; + } + alloc_resp->resp.result = QMI_RESULT_FAILURE_V01; + alloc_resp->resp.error = QMI_ERR_NO_MEMORY_V01; + client_id = check_client(alloc_req->client_id, alloc_req->proc_id, + CHECK); + + if (client_id >= MAX_CLIENTS) { + pr_err("memshare: %s client not found, requested client: %d, proc_id: %d\n", + __func__, alloc_req->client_id, + alloc_req->proc_id); + kfree(alloc_resp); + alloc_resp = NULL; + mutex_unlock(&memsh_drv->mem_share); + return -EINVAL; + } + + memblock[client_id].free_memory += 1; + pr_debug("memshare: %s, free memory count for client id: %d = %d", + __func__, memblock[client_id].client_id, + memblock[client_id].free_memory); + if (!memblock[client_id].allotted) { + rc = memshare_alloc(memsh_drv->dev, alloc_req->num_bytes, + &memblock[client_id]); + if (rc) { + pr_err("memshare: %s,Unable to allocate memory for requested client\n", + __func__); + resp = 1; + } + if (!resp) { + memblock[client_id].allotted = 1; + memblock[client_id].size = alloc_req->num_bytes; + memblock[client_id].peripheral = alloc_req->proc_id; + } + } + memblock[client_id].sequence_id = alloc_req->sequence_id; + + fill_alloc_response(alloc_resp, client_id, &resp); + /* + * Perform the Hypervisor mapping in order to avoid XPU viloation + * to the allocated region for Modem Clients + */ + if (!memblock[client_id].hyp_mapping && + memblock[client_id].allotted) + shared_hyp_mapping(client_id); + mutex_unlock(&memsh_drv->mem_share); + pr_debug("memshare: alloc_resp.num_bytes :%d, alloc_resp.resp.result :%lx\n", + alloc_resp->dhms_mem_alloc_addr_info[0].num_bytes, + (unsigned long int)alloc_resp->resp.result); + rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h, + &mem_share_svc_alloc_generic_resp_desc, alloc_resp, + sizeof(alloc_resp)); + + if (rc < 0) + pr_err("memshare: %s, Error sending the alloc request: %d\n", + __func__, rc); + + kfree(alloc_resp); + alloc_resp = NULL; + return rc; +} + +static int handle_free_req(void *req_h, void *req, void *conn_h) +{ + struct mem_free_req_msg_v01 *free_req; + struct mem_free_resp_msg_v01 free_resp; + int rc; + + mutex_lock(&memsh_drv->mem_free); + if (!memblock[GPS].guarantee) { + free_req = (struct mem_free_req_msg_v01 *)req; + pr_debug("memshare: %s: Received Free Request\n", __func__); + memset(&free_resp, 0, sizeof(free_resp)); + dma_free_coherent(memsh_drv->dev, memblock[GPS].size, + memblock[GPS].virtual_addr, + free_req->handle); + } + free_resp.resp = QMI_RESULT_SUCCESS_V01; + mutex_unlock(&memsh_drv->mem_free); + rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h, + &mem_share_svc_free_resp_desc, &free_resp, + sizeof(free_resp)); + if (rc < 0) + pr_err("memshare: %s, Error sending the free request: %d\n", + __func__, rc); + + return rc; +} + +static int handle_free_generic_req(void *req_h, void *req, void *conn_h) +{ + struct mem_free_generic_req_msg_v01 *free_req; + struct mem_free_generic_resp_msg_v01 free_resp; + int rc; + int flag = 0; + uint32_t client_id; + + mutex_lock(&memsh_drv->mem_free); + free_req = (struct mem_free_generic_req_msg_v01 *)req; + pr_debug("memshare: %s: Received Free Request\n", __func__); + memset(&free_resp, 0, sizeof(free_resp)); + free_resp.resp.error = QMI_ERR_INTERNAL_V01; + free_resp.resp.result = QMI_RESULT_FAILURE_V01; + pr_debug("memshare: Client id: %d proc id: %d\n", free_req->client_id, + free_req->proc_id); + client_id = check_client(free_req->client_id, free_req->proc_id, FREE); + if (client_id == DHMS_MEM_CLIENT_INVALID) { + pr_err("memshare: %s, Invalid client request to free memory\n", + __func__); + flag = 1; + } else if (!memblock[client_id].guarantee && + memblock[client_id].allotted) { + pr_debug("memshare: %s: size: %d", + __func__, memblock[client_id].size); + dma_free_attrs(memsh_drv->dev, memblock[client_id].size, + memblock[client_id].virtual_addr, + memblock[client_id].phy_addr, + attrs); + free_client(client_id); + } else { + pr_err("memshare: %s, Request came for a guaranteed client cannot free up the memory\n", + __func__); + } + + if (flag) { + free_resp.resp.result = QMI_RESULT_FAILURE_V01; + free_resp.resp.error = QMI_ERR_INVALID_ID_V01; + } else { + free_resp.resp.result = QMI_RESULT_SUCCESS_V01; + free_resp.resp.error = QMI_ERR_NONE_V01; + } + + mutex_unlock(&memsh_drv->mem_free); + rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h, + &mem_share_svc_free_generic_resp_desc, &free_resp, + sizeof(free_resp)); + + if (rc < 0) + pr_err("memshare: %s, Error sending the free request: %d\n", + __func__, rc); + + return rc; +} + +static int handle_query_size_req(void *req_h, void *req, void *conn_h) +{ + int rc, client_id; + struct mem_query_size_req_msg_v01 *query_req; + struct mem_query_size_rsp_msg_v01 *query_resp; + + mutex_lock(&memsh_drv->mem_share); + query_req = (struct mem_query_size_req_msg_v01 *)req; + query_resp = kzalloc(sizeof(*query_resp), + GFP_KERNEL); + if (!query_resp) { + mutex_unlock(&memsh_drv->mem_share); + return -ENOMEM; + } + pr_debug("memshare: query request client id: %d proc _id: %d\n", + query_req->client_id, query_req->proc_id); + client_id = check_client(query_req->client_id, query_req->proc_id, + CHECK); + + if (client_id >= MAX_CLIENTS) { + pr_err("memshare: %s client not found, requested client: %d, proc_id: %d\n", + __func__, query_req->client_id, + query_req->proc_id); + kfree(query_resp); + query_resp = NULL; + mutex_unlock(&memsh_drv->mem_share); + return -EINVAL; + } + + if (memblock[client_id].size) { + query_resp->size_valid = 1; + query_resp->size = memblock[client_id].size; + } else { + query_resp->size_valid = 1; + query_resp->size = 0; + } + query_resp->resp.result = QMI_RESULT_SUCCESS_V01; + query_resp->resp.error = QMI_ERR_NONE_V01; + mutex_unlock(&memsh_drv->mem_share); + + pr_debug("memshare: query_resp.size :%d, query_resp.resp.result :%lx\n", + query_resp->size, + (unsigned long int)query_resp->resp.result); + rc = qmi_send_resp_from_cb(mem_share_svc_handle, conn_h, req_h, + &mem_share_svc_size_query_resp_desc, query_resp, + sizeof(query_resp)); + + if (rc < 0) + pr_err("memshare: %s, Error sending the query request: %d\n", + __func__, rc); + + kfree(query_resp); + query_resp = NULL; + return rc; +} + +static int mem_share_svc_connect_cb(struct qmi_handle *handle, + void *conn_h) +{ + if (mem_share_svc_handle != handle || !conn_h) + return -EINVAL; + + return 0; +} + +static int mem_share_svc_disconnect_cb(struct qmi_handle *handle, + void *conn_h) +{ + if (mem_share_svc_handle != handle || !conn_h) + return -EINVAL; + + return 0; +} + +static int mem_share_svc_req_desc_cb(unsigned int msg_id, + struct msg_desc **req_desc) +{ + int rc; + + pr_debug("memshare: %s\n", __func__); + switch (msg_id) { + case MEM_ALLOC_REQ_MSG_V01: + *req_desc = &mem_share_svc_alloc_req_desc; + rc = sizeof(struct mem_alloc_req_msg_v01); + break; + + case MEM_FREE_REQ_MSG_V01: + *req_desc = &mem_share_svc_free_req_desc; + rc = sizeof(struct mem_free_req_msg_v01); + break; + + case MEM_ALLOC_GENERIC_REQ_MSG_V01: + *req_desc = &mem_share_svc_alloc_generic_req_desc; + rc = sizeof(struct mem_alloc_generic_req_msg_v01); + break; + + case MEM_FREE_GENERIC_REQ_MSG_V01: + *req_desc = &mem_share_svc_free_generic_req_desc; + rc = sizeof(struct mem_free_generic_req_msg_v01); + break; + + case MEM_QUERY_SIZE_REQ_MSG_V01: + *req_desc = &mem_share_svc_size_query_req_desc; + rc = sizeof(struct mem_query_size_req_msg_v01); + break; + + default: + rc = -ENOTSUPP; + break; + } + return rc; +} + +static int mem_share_svc_req_cb(struct qmi_handle *handle, void *conn_h, + void *req_h, unsigned int msg_id, void *req) +{ + int rc; + + pr_debug("memshare: %s\n", __func__); + if (mem_share_svc_handle != handle || !conn_h) + return -EINVAL; + + switch (msg_id) { + case MEM_ALLOC_REQ_MSG_V01: + rc = handle_alloc_req(req_h, req, conn_h); + break; + + case MEM_FREE_REQ_MSG_V01: + rc = handle_free_req(req_h, req, conn_h); + break; + + case MEM_ALLOC_GENERIC_REQ_MSG_V01: + rc = handle_alloc_generic_req(req_h, req, conn_h); + break; + + case MEM_FREE_GENERIC_REQ_MSG_V01: + rc = handle_free_generic_req(req_h, req, conn_h); + break; + + case MEM_QUERY_SIZE_REQ_MSG_V01: + rc = handle_query_size_req(req_h, req, conn_h); + break; + + default: + rc = -ENOTSUPP; + break; + } + return rc; +} + +static void mem_share_svc_recv_msg(struct work_struct *work) +{ + int rc; + + pr_debug("memshare: %s\n", __func__); + do { + rc = qmi_recv_msg(mem_share_svc_handle); + pr_debug("memshare: %s: Notified about a Receive Event", + __func__); + } while (!rc); + + if (rc != -ENOMSG) + pr_err("memshare: %s: Error = %d while receiving message\n", + __func__, rc); +} + +static void qmi_mem_share_svc_ntfy(struct qmi_handle *handle, + enum qmi_event_type event, void *priv) +{ + pr_debug("memshare: %s\n", __func__); + + if (event == QMI_RECV_MSG) + queue_delayed_work(mem_share_svc_workqueue, + &work_recv_msg, 0); +} + +static struct qmi_svc_ops_options mem_share_svc_ops_options = { + .version = 1, + .service_id = MEM_SHARE_SERVICE_SVC_ID, + .service_vers = MEM_SHARE_SERVICE_VERS, + .service_ins = MEM_SHARE_SERVICE_INS_ID, + .connect_cb = mem_share_svc_connect_cb, + .disconnect_cb = mem_share_svc_disconnect_cb, + .req_desc_cb = mem_share_svc_req_desc_cb, + .req_cb = mem_share_svc_req_cb, +}; + +int memshare_alloc(struct device *dev, + unsigned int block_size, + struct mem_blocks *pblk) +{ + pr_debug("memshare: %s", __func__); + + if (!pblk) { + pr_err("memshare: %s: Failed memory block allocation\n", + __func__); + return -ENOMEM; + } + + pblk->virtual_addr = dma_alloc_attrs(dev, block_size, + &pblk->phy_addr, GFP_KERNEL, + attrs); + if (pblk->virtual_addr == NULL) + return -ENOMEM; + + return 0; +} + +static void memshare_init_worker(struct work_struct *work) +{ + int rc; + + mem_share_svc_workqueue = + create_singlethread_workqueue("mem_share_svc"); + if (!mem_share_svc_workqueue) + return; + + mem_share_svc_handle = qmi_handle_create(qmi_mem_share_svc_ntfy, NULL); + if (!mem_share_svc_handle) { + pr_err("memshare: %s: Creating mem_share_svc qmi handle failed\n", + __func__); + destroy_workqueue(mem_share_svc_workqueue); + return; + } + rc = qmi_svc_register(mem_share_svc_handle, &mem_share_svc_ops_options); + if (rc < 0) { + pr_err("memshare: %s: Registering mem share svc failed %d\n", + __func__, rc); + qmi_handle_destroy(mem_share_svc_handle); + destroy_workqueue(mem_share_svc_workqueue); + return; + } + pr_debug("memshare: memshare_init successful\n"); +} + +static int memshare_child_probe(struct platform_device *pdev) +{ + int rc; + uint32_t size, client_id; + const char *name; + struct memshare_child *drv; + + drv = devm_kzalloc(&pdev->dev, sizeof(struct memshare_child), + GFP_KERNEL); + + if (!drv) + return -ENOMEM; + + drv->dev = &pdev->dev; + memsh_child = drv; + platform_set_drvdata(pdev, memsh_child); + + rc = of_property_read_u32(pdev->dev.of_node, "qcom,peripheral-size", + &size); + if (rc) { + pr_err("memshare: %s, Error reading size of clients, rc: %d\n", + __func__, rc); + return rc; + } + + rc = of_property_read_u32(pdev->dev.of_node, "qcom,client-id", + &client_id); + if (rc) { + pr_err("memshare: %s, Error reading client id, rc: %d\n", + __func__, rc); + return rc; + } + + memblock[num_clients].guarantee = of_property_read_bool( + pdev->dev.of_node, + "qcom,allocate-boot-time"); + + rc = of_property_read_string(pdev->dev.of_node, "label", + &name); + if (rc) { + pr_err("memshare: %s, Error reading peripheral info for client, rc: %d\n", + __func__, rc); + return rc; + } + + if (strcmp(name, "modem") == 0) + memblock[num_clients].peripheral = DHMS_MEM_PROC_MPSS_V01; + else if (strcmp(name, "adsp") == 0) + memblock[num_clients].peripheral = DHMS_MEM_PROC_ADSP_V01; + else if (strcmp(name, "wcnss") == 0) + memblock[num_clients].peripheral = DHMS_MEM_PROC_WCNSS_V01; + + memblock[num_clients].size = size; + memblock[num_clients].client_id = client_id; + + /* + * Memshare allocation for guaranteed clients + */ + if (memblock[num_clients].guarantee) { + rc = memshare_alloc(memsh_child->dev, + memblock[num_clients].size, + &memblock[num_clients]); + if (rc) { + pr_err("memshare: %s, Unable to allocate memory for guaranteed clients, rc: %d\n", + __func__, rc); + return rc; + } + memblock[num_clients].allotted = 1; + } + + /* + * call for creating ramdump dev handlers for + * memshare clients + */ + + memshare_dev[num_clients] = &pdev->dev; + + if (!memblock[num_clients].file_created) { + rc = mem_share_configure_ramdump(num_clients); + if (rc) + pr_err("memshare: %s, cannot collect dumps for client id: %d\n", + __func__, + memblock[num_clients].client_id); + else + memblock[num_clients].file_created = 1; + } + + num_clients++; + + return 0; +} + +static int memshare_probe(struct platform_device *pdev) +{ + int rc; + struct memshare_driver *drv; + + drv = devm_kzalloc(&pdev->dev, sizeof(struct memshare_driver), + GFP_KERNEL); + + if (!drv) + return -ENOMEM; + + /* Memory allocation has been done successfully */ + mutex_init(&drv->mem_free); + mutex_init(&drv->mem_share); + + INIT_WORK(&drv->memshare_init_work, memshare_init_worker); + schedule_work(&drv->memshare_init_work); + + drv->dev = &pdev->dev; + memsh_drv = drv; + platform_set_drvdata(pdev, memsh_drv); + initialize_client(); + num_clients = 0; + + rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, + &pdev->dev); + + if (rc) { + pr_err("memshare: %s, error populating the devices\n", + __func__); + return rc; + } + + subsys_notif_register_notifier("modem", &nb); + pr_debug("memshare: %s, Memshare inited\n", __func__); + + return 0; +} + +static int memshare_remove(struct platform_device *pdev) +{ + if (!memsh_drv) + return 0; + + qmi_svc_unregister(mem_share_svc_handle); + flush_workqueue(mem_share_svc_workqueue); + qmi_handle_destroy(mem_share_svc_handle); + destroy_workqueue(mem_share_svc_workqueue); + + return 0; +} + +static int memshare_child_remove(struct platform_device *pdev) +{ + return 0; +} + +static const struct of_device_id memshare_match_table[] = { + { + .compatible = "qcom,memshare", + }, + {} +}; + +static const struct of_device_id memshare_match_table1[] = { + { + .compatible = "qcom,memshare-peripheral", + }, + {} +}; + + +static struct platform_driver memshare_pdriver = { + .probe = memshare_probe, + .remove = memshare_remove, + .driver = { + .name = MEMSHARE_DEV_NAME, + .owner = THIS_MODULE, + .of_match_table = memshare_match_table, + }, +}; + +static struct platform_driver memshare_pchild = { + .probe = memshare_child_probe, + .remove = memshare_child_remove, + .driver = { + .name = MEMSHARE_CHILD_DEV_NAME, + .owner = THIS_MODULE, + .of_match_table = memshare_match_table1, + }, +}; + +module_platform_driver(memshare_pdriver); +module_platform_driver(memshare_pchild); + +MODULE_DESCRIPTION("Mem Share QMI Service Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/soc/qcom/memshare/msm_memshare.h b/drivers/soc/qcom/memshare/msm_memshare.h new file mode 100644 index 000000000000..f3b594a5c3c3 --- /dev/null +++ b/drivers/soc/qcom/memshare/msm_memshare.h @@ -0,0 +1,64 @@ +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_MEM_SHARE_H +#define _LINUX_MEM_SHARE_H + +#define MEM_SHARE_SERVICE_SVC_ID 0x00000034 +#define MEM_SHARE_SERVICE_INS_ID 1 +#define MEM_SHARE_SERVICE_VERS 1 + +#define MEMORY_CMA 1 +#define MEMORY_NON_CMA 0 +#define MAX_CLIENTS 10 +#define GPS 0 +#define CHECK 0 +#define FREE 1 + +struct mem_blocks { + /* Client Id information */ + uint32_t client_id; + /* Peripheral associated with client */ + uint32_t peripheral; + /* Sequence Id */ + uint32_t sequence_id; + /* CMA or Non-CMA region */ + uint32_t memory_type; + /* Guaranteed Memory */ + uint32_t guarantee; + /* Memory alloted or not */ + uint32_t allotted; + /* Size required for client */ + uint32_t size; + /* + * start address of the memory block reserved by server memory + * subsystem to client + */ + phys_addr_t phy_addr; + /* Virtual address for the physical address allocated */ + void *virtual_addr; + /* Release memory only when XPU is released*/ + uint8_t free_memory; + /* Need Hypervisor mapping*/ + uint8_t hyp_mapping; + /* Status flag which checks if ramdump file is created*/ + int file_created; + +}; + +int memshare_alloc(struct device *dev, + unsigned int block_size, + struct mem_blocks *pblk); +void memshare_free(unsigned int block_size, + struct mem_blocks *pblk); +#endif /* _LINUX_MEM_SHARE_H */ -- GitLab From 55ef0cbfb8a9e7f32686d7c7991fc46121e70e86 Mon Sep 17 00:00:00 2001 From: Udaya Bhaskara Reddy Mallavarapu Date: Mon, 12 Jun 2017 14:45:28 +0530 Subject: [PATCH 491/786] ARM: dts: msm: add TSPP node for sdm845 Add TSPP device tree node to support TSPP on sdm845, together with the required pinctrl definitions. Change-Id: I1551943d867d15b22303fe24cb70f31f077467b0 Signed-off-by: Udaya Bhaskara Reddy Mallavarapu --- arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi | 60 ++++++++++++++++++++ arch/arm64/boot/dts/qcom/sdm845.dtsi | 51 +++++++++++++++++ 2 files changed, 111 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi index dc58f9c36da3..2a7b6d13e81b 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi @@ -2796,6 +2796,66 @@ bias-disable; }; }; + + tsif0_signals_active: tsif0_signals_active { + tsif1_clk { + pins = "gpio89"; /* TSIF0 CLK */ + function = "tsif1_clk"; + }; + tsif1_en { + pins = "gpio90"; /* TSIF0 Enable */ + function = "tsif1_en"; + }; + tsif1_data { + pins = "gpio91"; /* TSIF0 DATA */ + function = "tsif1_data"; + }; + signals_cfg { + pins = "gpio89", "gpio90", "gpio91"; + drive_strength = <2>; /* 2 mA */ + bias-pull-down; /* pull down */ + }; + }; + + /* sync signal is only used if configured to mode-2 */ + tsif0_sync_active: tsif0_sync_active { + tsif1_sync { + pins = "gpio12"; /* TSIF0 SYNC */ + function = "tsif1_sync"; + drive_strength = <2>; /* 2 mA */ + bias-pull-down; /* pull down */ + }; + }; + + tsif1_signals_active: tsif1_signals_active { + tsif2_clk { + pins = "gpio93"; /* TSIF1 CLK */ + function = "tsif2_clk"; + }; + tsif2_en { + pins = "gpio94"; /* TSIF1 Enable */ + function = "tsif2_en"; + }; + tsif2_data { + pins = "gpio95"; /* TSIF1 DATA */ + function = "tsif2_data"; + }; + signals_cfg { + pins = "gpio93", "gpio94", "gpio95"; + drive_strength = <2>; /* 2 mA */ + bias-pull-down; /* pull down */ + }; + }; + + /* sync signal is only used if configured to mode-2 */ + tsif1_sync_active: tsif1_sync_active { + tsif2_sync { + pins = "gpio96"; /* TSIF1 SYNC */ + function = "tsif2_sync"; + drive_strength = <2>; /* 2 mA */ + bias-pull-down; /* pull down */ + }; + }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index fc9a114de173..27361e55c029 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -4088,6 +4088,57 @@ iommus = <&apps_smmu 0x06d6 0x0>; status = "ok"; }; + + tspp: msm_tspp@0x8880000 { + compatible = "qcom,msm_tspp"; + reg = <0x088a7000 0x200>, /* MSM_TSIF0_PHYS */ + <0x088a8000 0x200>, /* MSM_TSIF1_PHYS */ + <0x088a9000 0x1000>, /* MSM_TSPP_PHYS */ + <0x08884000 0x23000>; /* MSM_TSPP_BAM_PHYS */ + reg-names = "MSM_TSIF0_PHYS", + "MSM_TSIF1_PHYS", + "MSM_TSPP_PHYS", + "MSM_TSPP_BAM_PHYS"; + interrupts = <0 121 0>, /* TSIF_TSPP_IRQ */ + <0 119 0>, /* TSIF0_IRQ */ + <0 120 0>, /* TSIF1_IRQ */ + <0 122 0>; /* TSIF_BAM_IRQ */ + interrupt-names = "TSIF_TSPP_IRQ", + "TSIF0_IRQ", + "TSIF1_IRQ", + "TSIF_BAM_IRQ"; + + clock-names = "iface_clk", "ref_clk"; + clocks = <&clock_gcc GCC_TSIF_AHB_CLK>, + <&clock_gcc GCC_TSIF_REF_CLK>; + + qcom,msm-bus,name = "tsif"; + qcom,msm-bus,num-cases = <2>; + qcom,msm-bus,num-paths = <1>; + qcom,msm-bus,vectors-KBps = + <82 512 0 0>, /* No vote */ + <82 512 12288 24576>; + /* Max. bandwidth, 2xTSIF, each max of 96Mbps */ + + pinctrl-names = "disabled", + "tsif0-mode1", "tsif0-mode2", + "tsif1-mode1", "tsif1-mode2", + "dual-tsif-mode1", "dual-tsif-mode2"; + + pinctrl-0 = <>; /* disabled */ + pinctrl-1 = <&tsif0_signals_active>; /* tsif0-mode1 */ + pinctrl-2 = <&tsif0_signals_active + &tsif0_sync_active>; /* tsif0-mode2 */ + pinctrl-3 = <&tsif1_signals_active>; /* tsif1-mode1 */ + pinctrl-4 = <&tsif1_signals_active + &tsif1_sync_active>; /* tsif1-mode2 */ + pinctrl-5 = <&tsif0_signals_active + &tsif1_signals_active>; /* dual-tsif-mode1 */ + pinctrl-6 = <&tsif0_signals_active + &tsif0_sync_active + &tsif1_signals_active + &tsif1_sync_active>; /* dual-tsif-mode2 */ + }; }; &clock_cpucc { -- GitLab From 67138a5e60c57ff56e131fa7bd02489c263a4fe8 Mon Sep 17 00:00:00 2001 From: Udaya Bhaskara Reddy Mallavarapu Date: Mon, 12 Jun 2017 15:18:00 +0530 Subject: [PATCH 492/786] defconfig: msm: enable dvb demux modules compilation Enable dvb_core, mpq demux plugin, TSPP modules compilation. Change-Id: I188ae4a3402f760a361b0c43fc75271d0f45e3a2 Signed-off-by: Udaya Bhaskara Reddy Mallavarapu --- arch/arm64/configs/sdm845-perf_defconfig | 5 +++++ arch/arm64/configs/sdm845_defconfig | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig index 096fbab05e26..8024e3492bcf 100644 --- a/arch/arm64/configs/sdm845-perf_defconfig +++ b/arch/arm64/configs/sdm845-perf_defconfig @@ -340,6 +340,7 @@ CONFIG_REGULATOR_RPMH=y CONFIG_REGULATOR_STUB=y CONFIG_MEDIA_SUPPORT=y CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y CONFIG_MEDIA_CONTROLLER=y CONFIG_VIDEO_V4L2_SUBDEV_API=y CONFIG_VIDEO_ADV_DEBUG=y @@ -350,6 +351,10 @@ CONFIG_MSM_VIDC_V4L2=y CONFIG_MSM_VIDC_GOVERNORS=y CONFIG_MSM_SDE_ROTATOR=y CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y +CONFIG_DVB_MPQ=m +CONFIG_DVB_MPQ_DEMUX=m +CONFIG_DVB_MPQ_TSPP1=y +CONFIG_TSPP=m CONFIG_QCOM_KGSL=y CONFIG_DRM=y CONFIG_DRM_SDE_EVTLOG_DEBUG=y diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig index 5a608ea2db75..a2e049ca0bf3 100644 --- a/arch/arm64/configs/sdm845_defconfig +++ b/arch/arm64/configs/sdm845_defconfig @@ -347,6 +347,7 @@ CONFIG_REGULATOR_RPMH=y CONFIG_REGULATOR_STUB=y CONFIG_MEDIA_SUPPORT=y CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y CONFIG_MEDIA_CONTROLLER=y CONFIG_VIDEO_V4L2_SUBDEV_API=y CONFIG_VIDEO_ADV_DEBUG=y @@ -357,6 +358,10 @@ CONFIG_MSM_VIDC_V4L2=y CONFIG_MSM_VIDC_GOVERNORS=y CONFIG_MSM_SDE_ROTATOR=y CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y +CONFIG_DVB_MPQ=m +CONFIG_DVB_MPQ_DEMUX=m +CONFIG_DVB_MPQ_TSPP1=y +CONFIG_TSPP=m CONFIG_QCOM_KGSL=y CONFIG_DRM=y CONFIG_DRM_SDE_EVTLOG_DEBUG=y -- GitLab From 099af9cb212f9ee8c8033b23a55821f4b7355517 Mon Sep 17 00:00:00 2001 From: Sayali Lokhande Date: Thu, 8 Jun 2017 10:18:29 +0530 Subject: [PATCH 493/786] ARM: dts: msm: Enable UFS support for sdm670 This change enables UFS support on sdm670 RUMI platform. Change-Id: I5cacfcbf3c6bc3294a90bd220707bd8a303c8812 Signed-off-by: Sayali Lokhande --- arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi | 32 +++++++++++ arch/arm64/boot/dts/qcom/sdm670.dtsi | 65 ++++++++++++++++++++++- 2 files changed, 96 insertions(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi index b8812526a242..17b90c7ecb21 100644 --- a/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670-rumi.dtsi @@ -45,3 +45,35 @@ &qupv3_se6_4uart { status = "disabled"; }; + +&ufsphy_mem { + compatible = "qcom,ufs-phy-qrbtc-sdm845"; + + vdda-phy-supply = <&pm660l_l1>; /* 0.88v */ + vdda-pll-supply = <&pm660_l1>; /* 1.2v */ + vdda-phy-max-microamp = <62900>; + vdda-pll-max-microamp = <18300>; + + status = "ok"; +}; + +&ufshc_mem { + limit-tx-hs-gear = <1>; + limit-rx-hs-gear = <1>; + scsi-cmd-timeout = <300000>; + + vdd-hba-supply = <&ufs_phy_gdsc>; + vdd-hba-fixed-regulator; + vcc-supply = <&pm660l_l4>; + vccq2-supply = <&pm660_l8>; + vcc-max-microamp = <600000>; + vccq2-max-microamp = <600000>; + + qcom,vddp-ref-clk-supply = <&pm660_l1>; + qcom,vddp-ref-clk-max-microamp = <100>; + + qcom,disable-lpm; + rpm-level = <0>; + spm-level = <0>; + status = "ok"; +}; diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi index 90d454726fba..1ae5674d0d55 100644 --- a/arch/arm64/boot/dts/qcom/sdm670.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi @@ -27,7 +27,9 @@ qcom,msm-id = <336 0x0>; interrupt-parent = <&intc>; - aliases { }; + aliases { + ufshc1 = &ufshc_mem; /* Embedded UFS slot */ + }; cpus { #address-cells = <2>; @@ -768,6 +770,67 @@ #interrupt-cells = <4>; cell-index = <0>; }; + + ufsphy_mem: ufsphy_mem@1d87000 { + reg = <0x1d87000 0xe00>; /* PHY regs */ + reg-names = "phy_mem"; + #phy-cells = <0>; + + lanes-per-direction = <1>; + + clock-names = "ref_clk_src", + "ref_clk", + "ref_aux_clk"; + clocks = <&clock_rpmh RPMH_CXO_CLK>, + <&clock_gcc GCC_UFS_MEM_CLKREF_CLK>, + <&clock_gcc GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK>; + + status = "disabled"; + }; + + ufshc_mem: ufshc@1d84000 { + compatible = "qcom,ufshc"; + reg = <0x1d84000 0x3000>; + interrupts = <0 265 0>; + phys = <&ufsphy_mem>; + phy-names = "ufsphy"; + + lanes-per-direction = <1>; + dev-ref-clk-freq = <0>; /* 19.2 MHz */ + + clock-names = + "core_clk", + "bus_aggr_clk", + "iface_clk", + "core_clk_unipro", + "core_clk_ice", + "ref_clk", + "tx_lane0_sync_clk", + "rx_lane0_sync_clk"; + clocks = + <&clock_gcc GCC_UFS_PHY_AXI_HW_CTL_CLK>, + <&clock_gcc GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK>, + <&clock_gcc GCC_UFS_PHY_AHB_CLK>, + <&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK>, + <&clock_gcc GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK>, + <&clock_rpmh RPMH_CXO_CLK>, + <&clock_gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>, + <&clock_gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>; + freq-table-hz = + <50000000 200000000>, + <0 0>, + <0 0>, + <37500000 150000000>, + <75000000 300000000>, + <0 0>, + <0 0>, + <0 0>; + + resets = <&clock_gcc GCC_UFS_PHY_BCR>; + reset-names = "core_reset"; + + status = "disabled"; + }; }; #include "sdm670-pinctrl.dtsi" -- GitLab From f1ffe203c7da91254ea94b3b824b37d06df81702 Mon Sep 17 00:00:00 2001 From: Deepak Kumar Date: Wed, 21 Jun 2017 13:12:33 +0530 Subject: [PATCH 494/786] msm: kgsl: Limit the frequency of logging on memory allocation failure Excessive logging due to several successive memory allocation failure may cause a watchdog bite. Hence, this change adds ratelimit to logging on memory allocation failure. Change-Id: I8e5d78918a32c48ef7fa587f3dc63cbd1f065d5f Signed-off-by: Deepak Kumar --- drivers/gpu/msm/kgsl_sharedmem.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c index eb676575249a..f0f202b5adba 100644 --- a/drivers/gpu/msm/kgsl_sharedmem.c +++ b/drivers/gpu/msm/kgsl_sharedmem.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "kgsl.h" #include "kgsl_sharedmem.h" @@ -700,6 +701,10 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc, size_t len; unsigned int align; + static DEFINE_RATELIMIT_STATE(_rs, + DEFAULT_RATELIMIT_INTERVAL, + DEFAULT_RATELIMIT_BURST); + size = PAGE_ALIGN(size); if (size == 0 || size > UINT_MAX) return -EINVAL; @@ -762,7 +767,8 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc, */ memdesc->size = (size - len); - if (sharedmem_noretry_flag != true) + if (sharedmem_noretry_flag != true && + __ratelimit(&_rs)) KGSL_CORE_ERR( "Out of memory: only allocated %lldKB of %lldKB requested\n", (size - len) >> 10, size >> 10); -- GitLab From ce925b21358218348754cd408dcec294fc73d70b Mon Sep 17 00:00:00 2001 From: Deepak Kumar Date: Tue, 20 Jun 2017 16:06:52 +0530 Subject: [PATCH 495/786] msm: kgsl: Directly return page size of the supported pool In current code, if a request comes to allocate a page of a size for which pool is not supported EAGAIN is returned with a page size of PAGE_SIZE << --order. This is not efficient as it results in multiple retries in case pool of size = PAGE_SIZE << --order is also not supported. Instead of retrying with lower order page in a sequential manner this change directly returns the page size of the pool that is supported. Change-Id: Ib82ae5be7e4109fdc0a3d72bcbcd4b47cfb2e266 Signed-off-by: Deepak Kumar --- drivers/gpu/msm/kgsl_pool.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c index bb92b8b79d93..c31a85b07447 100644 --- a/drivers/gpu/msm/kgsl_pool.c +++ b/drivers/gpu/msm/kgsl_pool.c @@ -280,6 +280,17 @@ static int kgsl_pool_idx_lookup(unsigned int order) return -ENOMEM; } +static int kgsl_pool_get_retry_order(unsigned int order) +{ + int i; + + for (i = kgsl_num_pools-1; i > 0; i--) + if (order >= kgsl_pools[i].pool_order) + return kgsl_pools[i].pool_order; + + return 0; +} + /** * kgsl_pool_alloc_page() - Allocate a page of requested size * @page_size: Size of the page to be allocated @@ -326,7 +337,7 @@ int kgsl_pool_alloc_page(int *page_size, struct page **pages, if (pool == NULL) { /* Retry with lower order pages */ if (order > 0) { - size = PAGE_SIZE << --order; + size = PAGE_SIZE << kgsl_pool_get_retry_order(order); goto eagain; } else { /* -- GitLab From 3c61a7d08ac1fdb985a29ffeae69858a77544dd0 Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Thu, 22 Jun 2017 13:33:08 +0530 Subject: [PATCH 496/786] genriq: pick only one CPU while overriding the affinity during migration With commit bfc60d474137 ("genirq: Use irq_set_affinity_locked to change irq affinity"), affinity listeners receive the notification when the irq affinity is changed during migration. If there is no online and un-isolated CPU available from the user specified affinity, the affinity is overridden with all online and un-isolated CPUs. The same cpumask is notified to PM QOS affinity listener which applies PM_QOS_CPU_DMA_LATENCY vote to all those CPUs. As the low level irqchip driver sets affinity to only one CPU, do the same while overriding the affinity during migration. Change-Id: I0bcb75dd356658da100fbeeefd33ef8b121f4d6d Signed-off-by: Pavankumar Kondeti --- kernel/irq/cpuhotplug.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c index dac3724e4c1e..e072f71255c7 100644 --- a/kernel/irq/cpuhotplug.c +++ b/kernel/irq/cpuhotplug.c @@ -40,6 +40,17 @@ static bool migrate_one_irq(struct irq_desc *desc) cpu_isolated_mask); if (cpumask_empty(affinity)) affinity = cpu_online_mask; + /* + * We are overriding the affinity with all online and + * un-isolated cpus. irq_set_affinity_locked() call + * below notify this mask to PM QOS affinity listener. + * That results in applying the CPU_DMA_LATENCY QOS + * to all the CPUs specified in the mask. But the low + * level irqchip driver sets the affinity of an irq + * to only one CPU. So pick only one CPU from the + * prepared mask while overriding the user affinity. + */ + affinity = cpumask_of(cpumask_any(affinity)); ret = true; } -- GitLab From 68435b04f34204d89b9234becf7a01bab9a658a2 Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Thu, 29 Jun 2017 16:17:55 +0530 Subject: [PATCH 497/786] cpu-hotplug: Keep atleast 1 online and un-isolated CPU The PM_QOS_CPU_DMA_LATENCY vote attached to an IRQ is discarded, if it is affined to an isolated CPU. So we need atleast 1 CPU in online and un-isolate state. The scheduler rejects isolating a CPU if it is the only online and un-isolated CPU in the system. Add the same check for CPU hotplug. Change-Id: I5bdfe6e3bb0352ed3ae5a2de90097b73d248f3fc Signed-off-by: Pavankumar Kondeti --- include/linux/cpumask.h | 8 ++++++++ kernel/cpu.c | 3 +++ 2 files changed, 11 insertions(+) diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index b8eb25b3b991..4fbc62e89101 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -103,6 +103,13 @@ extern struct cpumask __cpu_isolated_mask; #define num_present_cpus() cpumask_weight(cpu_present_mask) #define num_active_cpus() cpumask_weight(cpu_active_mask) #define num_isolated_cpus() cpumask_weight(cpu_isolated_mask) +#define num_online_uniso_cpus() \ +({ \ + cpumask_t mask; \ + \ + cpumask_andnot(&mask, cpu_online_mask, cpu_isolated_mask); \ + cpumask_weight(&mask); \ +}) #define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) #define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) #define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) @@ -114,6 +121,7 @@ extern struct cpumask __cpu_isolated_mask; #define num_present_cpus() 1U #define num_active_cpus() 1U #define num_isolated_cpus() 0U +#define num_online_uniso_cpus() 1U #define cpu_online(cpu) ((cpu) == 0) #define cpu_possible(cpu) ((cpu) == 0) #define cpu_present(cpu) ((cpu) == 0) diff --git a/kernel/cpu.c b/kernel/cpu.c index 78b72d5f7d42..3577ec6a36e5 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -886,6 +886,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, if (!cpu_present(cpu)) return -EINVAL; + if (!tasks_frozen && !cpu_isolated(cpu) && num_online_uniso_cpus() == 1) + return -EBUSY; + cpu_hotplug_begin(); cpuhp_tasks_frozen = tasks_frozen; -- GitLab From f817f4536c46c8484bca628bfaffb0787572680b Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Fri, 16 Jun 2017 09:36:25 +0530 Subject: [PATCH 498/786] genirq: honour default IRQ affinity setting during migration Userspace can set the default IRQ affinity setting by writing into /proc/irq/default_smp_affinity file. When an IRQ affinity is broken during isolation/hotplug,override the affinity to online and un-isolated CPUs from the default affinity CPUs. If no such CPU is available, then only override with cpu_online_mask. Change-Id: I7578728ed0d7c17c5890d9916cfd6451d1968568 Signed-off-by: Pavankumar Kondeti --- kernel/irq/cpuhotplug.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c index e072f71255c7..007482b8ee2e 100644 --- a/kernel/irq/cpuhotplug.c +++ b/kernel/irq/cpuhotplug.c @@ -36,10 +36,21 @@ static bool migrate_one_irq(struct irq_desc *desc) affinity = &available_cpus; if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { + /* + * The order of preference for selecting a fallback CPU is + * + * (1) online and un-isolated CPU from default affinity + * (2) online and un-isolated CPU + * (3) online CPU + */ cpumask_andnot(&available_cpus, cpu_online_mask, cpu_isolated_mask); - if (cpumask_empty(affinity)) + if (cpumask_intersects(&available_cpus, irq_default_affinity)) + cpumask_and(&available_cpus, &available_cpus, + irq_default_affinity); + else if (cpumask_empty(&available_cpus)) affinity = cpu_online_mask; + /* * We are overriding the affinity with all online and * un-isolated cpus. irq_set_affinity_locked() call -- GitLab From f087456b5c7186be24f77bda21351a88e288dfc2 Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Fri, 16 Jun 2017 09:36:34 +0530 Subject: [PATCH 499/786] genirq: Don't allow user space to set IRQ affinity to isolated CPUs The PM_QOS_CPU_DMA_LATENCY QOS request attached to an IRQ is ignored if the IRQ is affined to an isolated CPU. As isolated CPUs enter deep sleep state, it is better not to affine IRQs to those CPUs. Change-Id: Ieab4a04eca222b91159208b21bc9e14390ecd62e Signed-off-by: Pavankumar Kondeti --- kernel/irq/proc.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index feaa813b84a9..88a02e3ff3b8 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -114,6 +114,11 @@ static ssize_t write_irq_affinity(int type, struct file *file, goto free_cpumask; } + if (cpumask_subset(new_value, cpu_isolated_mask)) { + err = -EINVAL; + goto free_cpumask; + } + /* * Do not allow disabling IRQs completely - it's a too easy * way to make the system unusable accidentally :-) At least -- GitLab From 29d1a782941a1fc0156c0d9001e5e9b1242ecde0 Mon Sep 17 00:00:00 2001 From: Vijayanand Jitta Date: Mon, 3 Jul 2017 15:17:09 +0530 Subject: [PATCH 500/786] ARM: dts: msm: Update cdsp memory map for sdm670 Update cdsp carveout memory region from 6MB to 8MB for sdm670. Change-Id: I7f1fb6f9d568b5deecd7cfe6a8dd45485af9a7e8 Signed-off-by: Vijayanand Jitta --- arch/arm64/boot/dts/qcom/sdm670.dtsi | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi index 90d454726fba..b6c9672adab3 100644 --- a/arch/arm64/boot/dts/qcom/sdm670.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi @@ -315,37 +315,37 @@ pil_cdsp_mem: cdsp_regions@93300000 { compatible = "removed-dma-pool"; no-map; - reg = <0 0x93300000 0 0x600000>; + reg = <0 0x93300000 0 0x800000>; }; - pil_mba_mem: pil_mba_region@0x93900000 { + pil_mba_mem: pil_mba_region@0x93b00000 { compatible = "removed-dma-pool"; no-map; - reg = <0 0x93900000 0 0x200000>; + reg = <0 0x93b00000 0 0x200000>; }; - pil_adsp_mem: pil_adsp_region@93b00000 { + pil_adsp_mem: pil_adsp_region@93d00000 { compatible = "removed-dma-pool"; no-map; - reg = <0 0x93b00000 0 0x1e00000>; + reg = <0 0x93d00000 0 0x1e00000>; }; - pil_ipa_fw_mem: pil_ipa_fw_region@95900000 { + pil_ipa_fw_mem: pil_ipa_fw_region@95b00000 { compatible = "removed-dma-pool"; no-map; - reg = <0 0x95900000 0 0x10000>; + reg = <0 0x95b00000 0 0x10000>; }; - pil_ipa_gsi_mem: pil_ipa_gsi_region@95910000 { + pil_ipa_gsi_mem: pil_ipa_gsi_region@95b10000 { compatible = "removed-dma-pool"; no-map; - reg = <0 0x95910000 0 0x5000>; + reg = <0 0x95b10000 0 0x5000>; }; - pil_gpu_mem: pil_gpu_region@95915000 { + pil_gpu_mem: pil_gpu_region@95b15000 { compatible = "removed-dma-pool"; no-map; - reg = <0 0x95915000 0 0x1000>; + reg = <0 0x95b15000 0 0x1000>; }; adsp_mem: adsp_region { -- GitLab From 07eb703645ccc6ead859c67c357f368091b7de25 Mon Sep 17 00:00:00 2001 From: Dhoat Harpal Date: Wed, 19 Apr 2017 11:46:59 +0530 Subject: [PATCH 501/786] soc: qcom: glink_smem_native_xport: Allocate smem item in non cache region rx fifo is allocated in cached region of smem, this leads to inconsistency when reading at remote side. rx fifo is allocated in non cached region of smem. CRs-Fixed: 2056955 Change-Id: I0c7d5bf55222920cffdcd2c7f48968a4f49ee790 Signed-off-by: Dhoat Harpal --- drivers/soc/qcom/glink_smem_native_xprt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c index 0ce1bdaf69ad..94dffa539ad6 100644 --- a/drivers/soc/qcom/glink_smem_native_xprt.c +++ b/drivers/soc/qcom/glink_smem_native_xprt.c @@ -2388,7 +2388,7 @@ static int glink_smem_native_probe(struct platform_device *pdev) einfo->tx_fifo = smem_alloc(SMEM_GLINK_NATIVE_XPRT_FIFO_0, einfo->tx_fifo_size, einfo->remote_proc_id, - SMEM_ITEM_CACHED_FLAG); + 0); if (!einfo->tx_fifo) { pr_err("%s: smem alloc of tx fifo failed\n", __func__); rc = -ENOMEM; -- GitLab From f59ff97090a688dbba5bd1eff9dfbfa2f79b6c49 Mon Sep 17 00:00:00 2001 From: Dhoat Harpal Date: Fri, 12 May 2017 20:33:21 +0530 Subject: [PATCH 502/786] soc: qcom: glink: Move ctx initialization of xprt ptr In glink_open function, channel context initialization with transport pointer is done quite after after its creation. This create race condition, if parallel thread try to use transport pointer of ctx. Ctx is initialized with transport pointer right at the time of its creation. CRs-Fixed: 2061645 Change-Id: Idcddf1ab10b8673a20bc1f23d8702bf870f79dbd Signed-off-by: Dhoat Harpal --- drivers/soc/qcom/glink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c index dcf6654293d7..d03770687bd1 100644 --- a/drivers/soc/qcom/glink.c +++ b/drivers/soc/qcom/glink.c @@ -1930,6 +1930,7 @@ static struct channel_ctx *ch_name_to_ch_ctx_create( kfree(flcid); } + ctx->transport_ptr = xprt_ctx; list_add_tail(&ctx->port_list_node, &xprt_ctx->channels); GLINK_INFO_PERF_CH_XPRT(ctx, xprt_ctx, @@ -2616,7 +2617,6 @@ void *glink_open(const struct glink_open_config *cfg) ctx->local_xprt_req = best_id; ctx->no_migrate = cfg->transport && !(cfg->options & GLINK_OPT_INITIAL_XPORT); - ctx->transport_ptr = transport_ptr; ctx->local_open_state = GLINK_CHANNEL_OPENING; GLINK_INFO_PERF_CH(ctx, "%s: local:GLINK_CHANNEL_CLOSED->GLINK_CHANNEL_OPENING\n", -- GitLab From 960b3b8e40a45ba8a6bd11982e4f8db196e74314 Mon Sep 17 00:00:00 2001 From: Dhoat Harpal Date: Fri, 12 May 2017 21:31:47 +0530 Subject: [PATCH 503/786] soc: qcom: glink: Move tx_info allocation in beginning TX_info is allocated after pop remote intent, this can cause problem when there is no memory for allocation then glink has to push back the intent, which again needs memory. Tx_info allocation is moved before op remote intent. CRs-Fixed: 2063427 Change-Id: I4f174c4b0143454596ac8f7a1c639c853b98a2ce Signed-off-by: Dhoat Harpal --- drivers/soc/qcom/glink.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c index d03770687bd1..d31bf8de948b 100644 --- a/drivers/soc/qcom/glink.c +++ b/drivers/soc/qcom/glink.c @@ -2862,7 +2862,7 @@ static int glink_tx_common(void *handle, void *pkt_priv, struct channel_ctx *ctx = (struct channel_ctx *)handle; uint32_t riid; int ret = 0; - struct glink_core_tx_pkt *tx_info; + struct glink_core_tx_pkt *tx_info = NULL; size_t intent_size; bool is_atomic = tx_flags & (GLINK_TX_SINGLE_THREADED | GLINK_TX_ATOMIC); @@ -2877,6 +2877,13 @@ static int glink_tx_common(void *handle, void *pkt_priv, return ret; rwref_read_get_atomic(&ctx->ch_state_lhb2, is_atomic); + tx_info = kzalloc(sizeof(struct glink_core_tx_pkt), + is_atomic ? GFP_ATOMIC : GFP_KERNEL); + if (!tx_info) { + GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__); + ret = -ENOMEM; + goto glink_tx_common_err; + } if (!(vbuf_provider || pbuf_provider)) { ret = -EINVAL; goto glink_tx_common_err; @@ -2996,14 +3003,7 @@ static int glink_tx_common(void *handle, void *pkt_priv, GLINK_INFO_PERF_CH(ctx, "%s: R[%u]:%zu data[%p], size[%zu]. TID %u\n", __func__, riid, intent_size, data ? data : iovec, size, current->pid); - tx_info = kzalloc(sizeof(struct glink_core_tx_pkt), - is_atomic ? GFP_ATOMIC : GFP_KERNEL); - if (!tx_info) { - GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__); - ch_push_remote_rx_intent(ctx, intent_size, riid, cookie); - ret = -ENOMEM; - goto glink_tx_common_err; - } + rwref_lock_init(&tx_info->pkt_ref, glink_tx_pkt_release); INIT_LIST_HEAD(&tx_info->list_done); INIT_LIST_HEAD(&tx_info->list_node); @@ -3028,10 +3028,15 @@ static int glink_tx_common(void *handle, void *pkt_priv, else xprt_schedule_tx(ctx->transport_ptr, ctx, tx_info); + rwref_read_put(&ctx->ch_state_lhb2); + glink_put_ch_ctx(ctx, false); + return ret; + glink_tx_common_err: rwref_read_put(&ctx->ch_state_lhb2); glink_tx_common_err_2: glink_put_ch_ctx(ctx, false); + kfree(tx_info); return ret; } -- GitLab From a9de12ae60e53554708a86e665cd6aff2ad2343e Mon Sep 17 00:00:00 2001 From: AnilKumar Chimata Date: Mon, 3 Jul 2017 18:00:34 +0530 Subject: [PATCH 504/786] ARM: dts: msm: Set FDE key size to 256bit for sdm845 Set FDE encryption key size to 256bit for sdm845 target. Change-Id: Ida1eaad6d7cb91c595bf698f760d28786cc49c8a Signed-off-by: AnilKumar Chimata --- arch/arm64/boot/dts/qcom/sdm845.dtsi | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index fc9a114de173..40f46c4f7cc4 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -2605,6 +2605,7 @@ qcom,disk-encrypt-pipe-pair = <2>; qcom,support-fde; qcom,no-clock-support; + qcom,fde-key-size; qcom,msm-bus,name = "qseecom-noc"; qcom,msm-bus,num-cases = <4>; qcom,msm-bus,num-paths = <1>; -- GitLab From 92d63dea8c6acb2b510067f04452c1fd541e8ee0 Mon Sep 17 00:00:00 2001 From: Dhoat Harpal Date: Tue, 6 Jun 2017 21:20:26 +0530 Subject: [PATCH 505/786] ARM: dts: msm: Add SMP2P entries for sdm670 Add the SMP2P entries to facilitate SMP2P interprocessor communication. CRs-Fixed: 2059063 Change-Id: Idd4da462476edfe513fb38517166a0bb2915f7ea Signed-off-by: Dhoat Harpal --- arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi | 179 +++++++++++++++++++++ arch/arm64/boot/dts/qcom/sdm670.dtsi | 2 + 2 files changed, 181 insertions(+) create mode 100644 arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi diff --git a/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi b/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi new file mode 100644 index 000000000000..4ab0839c5db8 --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi @@ -0,0 +1,179 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include + +&soc { + qcom,smp2p-modem@1799000c { + compatible = "qcom,smp2p"; + reg = <0x1799000c 0x4>; + qcom,remote-pid = <1>; + qcom,irq-bitmask = <0x4000>; + interrupts = ; + }; + + qcom,smp2p-adsp@1799000c { + compatible = "qcom,smp2p"; + reg = <0x1799000c 0x4>; + qcom,remote-pid = <2>; + qcom,irq-bitmask = <0x200>; + interrupts = ; + }; + + qcom,smp2p-cdsp@1799000c { + compatible = "qcom,smp2p"; + reg = <0x1799000c 0x4>; + qcom,remote-pid = <5>; + qcom,irq-bitmask = <0x40>; + interrupts = ; + }; + + + smp2pgpio_smp2p_15_in: qcom,smp2pgpio-smp2p-15-in { + compatible = "qcom,smp2pgpio"; + qcom,entry-name = "smp2p"; + qcom,remote-pid = <15>; + qcom,is-inbound; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + qcom,smp2pgpio_test_smp2p_15_in { + compatible = "qcom,smp2pgpio_test_smp2p_15_in"; + gpios = <&smp2pgpio_smp2p_15_in 0 0>; + }; + + smp2pgpio_smp2p_15_out: qcom,smp2pgpio-smp2p-15-out { + compatible = "qcom,smp2pgpio"; + qcom,entry-name = "smp2p"; + qcom,remote-pid = <15>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + qcom,smp2pgpio_test_smp2p_15_out { + compatible = "qcom,smp2pgpio_test_smp2p_15_out"; + gpios = <&smp2pgpio_smp2p_15_out 0 0>; + }; + + smp2pgpio_smp2p_1_in: qcom,smp2pgpio-smp2p-1-in { + compatible = "qcom,smp2pgpio"; + qcom,entry-name = "smp2p"; + qcom,remote-pid = <1>; + qcom,is-inbound; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + qcom,smp2pgpio_test_smp2p_1_in { + compatible = "qcom,smp2pgpio_test_smp2p_1_in"; + gpios = <&smp2pgpio_smp2p_1_in 0 0>; + }; + + smp2pgpio_smp2p_1_out: qcom,smp2pgpio-smp2p-1-out { + compatible = "qcom,smp2pgpio"; + qcom,entry-name = "smp2p"; + qcom,remote-pid = <1>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + qcom,smp2pgpio_test_smp2p_1_out { + compatible = "qcom,smp2pgpio_test_smp2p_1_out"; + gpios = <&smp2pgpio_smp2p_1_out 0 0>; + }; + + smp2pgpio_smp2p_2_in: qcom,smp2pgpio-smp2p-2-in { + compatible = "qcom,smp2pgpio"; + qcom,entry-name = "smp2p"; + qcom,remote-pid = <2>; + qcom,is-inbound; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + qcom,smp2pgpio_test_smp2p_2_in { + compatible = "qcom,smp2pgpio_test_smp2p_2_in"; + gpios = <&smp2pgpio_smp2p_2_in 0 0>; + }; + + smp2pgpio_smp2p_2_out: qcom,smp2pgpio-smp2p-2-out { + compatible = "qcom,smp2pgpio"; + qcom,entry-name = "smp2p"; + qcom,remote-pid = <2>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + qcom,smp2pgpio_test_smp2p_2_out { + compatible = "qcom,smp2pgpio_test_smp2p_2_out"; + gpios = <&smp2pgpio_smp2p_2_out 0 0>; + }; + + smp2pgpio_sleepstate_2_out: qcom,smp2pgpio-sleepstate-gpio-2-out { + compatible = "qcom,smp2pgpio"; + qcom,entry-name = "sleepstate"; + qcom,remote-pid = <2>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + qcom,smp2pgpio-sleepstate-2-out { + compatible = "qcom,smp2pgpio-sleepstate-out"; + gpios = <&smp2pgpio_sleepstate_2_out 0 0>; + }; + + smp2pgpio_smp2p_5_in: qcom,smp2pgpio-smp2p-5-in { + compatible = "qcom,smp2pgpio"; + qcom,entry-name = "smp2p"; + qcom,remote-pid = <5>; + qcom,is-inbound; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + qcom,smp2pgpio_test_smp2p_5_in { + compatible = "qcom,smp2pgpio_test_smp2p_5_in"; + gpios = <&smp2pgpio_smp2p_5_in 0 0>; + }; + + smp2pgpio_smp2p_5_out: qcom,smp2pgpio-smp2p-5-out { + compatible = "qcom,smp2pgpio"; + qcom,entry-name = "smp2p"; + qcom,remote-pid = <5>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + qcom,smp2pgpio_test_smp2p_5_out { + compatible = "qcom,smp2pgpio_test_smp2p_5_out"; + gpios = <&smp2pgpio_smp2p_5_out 0 0>; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi index 90d454726fba..b1c70d973e6f 100644 --- a/arch/arm64/boot/dts/qcom/sdm670.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi @@ -394,6 +394,8 @@ #include "sdm670-ion.dtsi" +#include "sdm670-smp2p.dtsi" + #include "sdm670-qupv3.dtsi" &soc { -- GitLab From 5f909efcee9acedd981e82cc74e4d684c969f266 Mon Sep 17 00:00:00 2001 From: Dhoat Harpal Date: Fri, 9 Jun 2017 21:18:00 +0530 Subject: [PATCH 506/786] ARM: dts: msm: Add QMP device for AOP on sdm670 Add new mailbox device to enable communication to AOP with the QMP transport. CRs-Fixed: 2059063 Change-Id: Ia08d56863b9edcd7ae656543fd401a20bf83b196 Signed-off-by: Dhoat Harpal --- arch/arm64/boot/dts/qcom/sdm670.dtsi | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi index 13bd0981fab2..bfd1a3796bad 100644 --- a/arch/arm64/boot/dts/qcom/sdm670.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi @@ -698,6 +698,18 @@ qcom,mpu-enabled; }; + qmp_aop: mailbox@1799000c { + compatible = "qcom,qmp-mbox"; + label = "aop"; + reg = <0xc300000 0x100000>, + <0x1799000c 0x4>; + reg-names = "msgram", "irq-reg-base"; + qcom,irq-mask = <0x1>; + interrupts = <0 389 1>; + mbox-desc-offset = <0x0>; + #mbox-cells = <1>; + }; + qcom,glink-smem-native-xprt-modem@86000000 { compatible = "qcom,glink-smem-native-xprt"; reg = <0x86000000 0x200000>, -- GitLab From bae519015595c49a6b4c5d194e622fe77577a042 Mon Sep 17 00:00:00 2001 From: Sathish Ambley Date: Mon, 3 Jul 2017 15:00:49 -0700 Subject: [PATCH 507/786] msm: ADSPRPC: CRC request for remote invoke buffers Add support for CRC values of FastRPC invoke input and output buffers on remote processor. Change-Id: I8fcb4b96b1c1281fc5f42da562a0cd9d197e88e0 Acked-by: Viswanatham Paduchuri Signed-off-by: Sathish Ambley --- drivers/char/adsprpc.c | 48 +++++++++++++++++++++++++++-------- drivers/char/adsprpc_compat.c | 30 +++++++++++++++++----- drivers/char/adsprpc_shared.h | 8 ++++++ 3 files changed, 68 insertions(+), 18 deletions(-) diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c index 9102df702822..cd47c773a0e8 100644 --- a/drivers/char/adsprpc.c +++ b/drivers/char/adsprpc.c @@ -58,7 +58,8 @@ #define BALIGN 128 #define NUM_CHANNELS 4 /* adsp, mdsp, slpi, cdsp*/ #define NUM_SESSIONS 9 /*8 compute, 1 cpz*/ -#define M_FDLIST 16 +#define M_FDLIST (16) +#define M_CRCLIST (64) #define IS_CACHE_ALIGNED(x) (((x) & ((L1_CACHE_BYTES)-1)) == 0) @@ -172,6 +173,7 @@ struct smq_invoke_ctx { struct overlap *overs; struct overlap **overps; struct smq_msg msg; + uint32_t *crc; }; struct fastrpc_ctx_lst { @@ -681,7 +683,7 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size, static int context_restore_interrupted(struct fastrpc_file *fl, - struct fastrpc_ioctl_invoke_attrs *inv, + struct fastrpc_ioctl_invoke_crc *inv, struct smq_invoke_ctx **po) { int err = 0; @@ -788,7 +790,7 @@ static int context_build_overlap(struct smq_invoke_ctx *ctx) static void context_free(struct smq_invoke_ctx *ctx); static int context_alloc(struct fastrpc_file *fl, uint32_t kernel, - struct fastrpc_ioctl_invoke_attrs *invokefd, + struct fastrpc_ioctl_invoke_crc *invokefd, struct smq_invoke_ctx **po) { int err = 0, bufs, size = 0; @@ -834,7 +836,7 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel, if (err) goto bail; } - + ctx->crc = (uint32_t *)invokefd->crc; ctx->sc = invoke->sc; if (bufs) { VERIFY(err, 0 == context_build_overlap(ctx)); @@ -993,6 +995,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) int err = 0; int mflags = 0; uint64_t *fdlist; + uint32_t *crclist; /* calculate size of the metadata */ rpra = 0; @@ -1018,7 +1021,9 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) goto bail; ipage += 1; } - metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST); + metalen = copylen = (ssize_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) + + (sizeof(uint32_t) * M_CRCLIST); + /* calculate len requreed for copying */ for (oix = 0; oix < inbufs + outbufs; ++oix) { int i = ctx->overps[oix]->raix; @@ -1112,6 +1117,8 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx) fdlist = (uint64_t *)&pages[bufs + handles]; for (i = 0; i < M_FDLIST; i++) fdlist[i] = 0; + crclist = (uint32_t *)&fdlist[M_FDLIST]; + memset(crclist, 0, sizeof(uint32_t)*M_FDLIST); /* copy non ion buffers */ PERF(ctx->fl->profile, ctx->fl->perf.copy, @@ -1191,6 +1198,8 @@ static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx, struct smq_phy_page *pages; struct fastrpc_mmap *mmap; uint64_t *fdlist; + uint32_t *crclist = NULL; + remote_arg64_t *rpra = ctx->rpra; int i, inbufs, outbufs, handles; int err = 0; @@ -1201,6 +1210,8 @@ static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx, list = smq_invoke_buf_start(ctx->rpra, sc); pages = smq_phy_page_start(sc, list); fdlist = (uint64_t *)(pages + inbufs + outbufs + handles); + crclist = (uint32_t *)(fdlist + M_FDLIST); + for (i = inbufs; i < inbufs + outbufs; ++i) { if (!ctx->maps[i]) { K_COPY_TO_USER(err, kernel, @@ -1223,6 +1234,10 @@ static int put_args(uint32_t kernel, struct smq_invoke_ctx *ctx, fastrpc_mmap_free(mmap); } } + if (ctx->crc && crclist && rpra) + K_COPY_TO_USER(err, kernel, (void __user *)ctx->crc, + crclist, M_CRCLIST*sizeof(uint32_t)); + bail: return err; } @@ -1345,7 +1360,7 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl); static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode, uint32_t kernel, - struct fastrpc_ioctl_invoke_attrs *inv) + struct fastrpc_ioctl_invoke_crc *inv) { struct smq_invoke_ctx *ctx = 0; struct fastrpc_ioctl_invoke *invoke = &inv->inv; @@ -1436,7 +1451,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl, struct fastrpc_ioctl_init_attrs *uproc) { int err = 0; - struct fastrpc_ioctl_invoke_attrs ioctl; + struct fastrpc_ioctl_invoke_crc ioctl; struct fastrpc_ioctl_init *init = &uproc->init; struct smq_phy_page pages[1]; struct fastrpc_mmap *file = 0, *mem = 0; @@ -1455,6 +1470,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl, ioctl.inv.pra = ra; ioctl.fds = 0; ioctl.attrs = 0; + ioctl.crc = NULL; fl->pd = 0; VERIFY(err, !(err = fastrpc_internal_invoke(fl, FASTRPC_MODE_PARALLEL, 1, &ioctl))); @@ -1524,6 +1540,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl, ioctl.inv.pra = ra; ioctl.fds = fds; ioctl.attrs = 0; + ioctl.crc = NULL; VERIFY(err, !(err = fastrpc_internal_invoke(fl, FASTRPC_MODE_PARALLEL, 1, &ioctl))); if (err) @@ -1542,7 +1559,7 @@ static int fastrpc_init_process(struct fastrpc_file *fl, static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl) { int err = 0; - struct fastrpc_ioctl_invoke_attrs ioctl; + struct fastrpc_ioctl_invoke_crc ioctl; remote_arg_t ra[1]; int tgid = 0; @@ -1560,6 +1577,7 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl) ioctl.inv.pra = ra; ioctl.fds = 0; ioctl.attrs = 0; + ioctl.crc = NULL; VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, FASTRPC_MODE_PARALLEL, 1, &ioctl))); bail: @@ -1569,7 +1587,7 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl) static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags, struct fastrpc_mmap *map) { - struct fastrpc_ioctl_invoke_attrs ioctl; + struct fastrpc_ioctl_invoke_crc ioctl; struct smq_phy_page page; int num = 1; remote_arg_t ra[3]; @@ -1606,6 +1624,7 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags, ioctl.inv.pra = ra; ioctl.fds = 0; ioctl.attrs = 0; + ioctl.crc = NULL; VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, FASTRPC_MODE_PARALLEL, 1, &ioctl))); map->raddr = (uintptr_t)routargs.vaddrout; @@ -1616,7 +1635,7 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags, static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl, struct fastrpc_mmap *map) { - struct fastrpc_ioctl_invoke_attrs ioctl; + struct fastrpc_ioctl_invoke_crc ioctl; remote_arg_t ra[1]; int err = 0; struct { @@ -1639,6 +1658,7 @@ static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl, ioctl.inv.pra = ra; ioctl.fds = 0; ioctl.attrs = 0; + ioctl.crc = NULL; VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl, FASTRPC_MODE_PARALLEL, 1, &ioctl))); return err; @@ -2194,7 +2214,7 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num, unsigned long ioctl_param) { union { - struct fastrpc_ioctl_invoke_attrs inv; + struct fastrpc_ioctl_invoke_crc inv; struct fastrpc_ioctl_mmap mmap; struct fastrpc_ioctl_munmap munmap; struct fastrpc_ioctl_init_attrs init; @@ -2207,10 +2227,12 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num, p.inv.fds = 0; p.inv.attrs = 0; + p.inv.crc = NULL; switch (ioctl_num) { case FASTRPC_IOCTL_INVOKE: size = sizeof(struct fastrpc_ioctl_invoke); + /* fall through */ case FASTRPC_IOCTL_INVOKE_FD: if (!size) size = sizeof(struct fastrpc_ioctl_invoke_fd); @@ -2218,6 +2240,10 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num, case FASTRPC_IOCTL_INVOKE_ATTRS: if (!size) size = sizeof(struct fastrpc_ioctl_invoke_attrs); + /* fall through */ + case FASTRPC_IOCTL_INVOKE_CRC: + if (!size) + size = sizeof(struct fastrpc_ioctl_invoke_crc); VERIFY(err, 0 == copy_from_user(&p.inv, param, size)); if (err) goto bail; diff --git a/drivers/char/adsprpc_compat.c b/drivers/char/adsprpc_compat.c index 8e72b4ddfd93..078b4d991925 100644 --- a/drivers/char/adsprpc_compat.c +++ b/drivers/char/adsprpc_compat.c @@ -36,6 +36,8 @@ _IOWR('R', 9, struct compat_fastrpc_ioctl_perf) #define COMPAT_FASTRPC_IOCTL_INIT_ATTRS \ _IOWR('R', 10, struct compat_fastrpc_ioctl_init_attrs) +#define COMPAT_FASTRPC_IOCTL_INVOKE_CRC \ + _IOWR('R', 11, struct compat_fastrpc_ioctl_invoke_crc) struct compat_remote_buf { compat_uptr_t pv; /* buffer pointer */ @@ -64,6 +66,13 @@ struct compat_fastrpc_ioctl_invoke_attrs { compat_uptr_t attrs; /* attribute list */ }; +struct compat_fastrpc_ioctl_invoke_crc { + struct compat_fastrpc_ioctl_invoke inv; + compat_uptr_t fds; /* fd list */ + compat_uptr_t attrs; /* attribute list */ + compat_uptr_t crc; /* crc list */ +}; + struct compat_fastrpc_ioctl_mmap { compat_int_t fd; /* ion fd */ compat_uint_t flags; /* flags for dsp to map with */ @@ -100,14 +109,14 @@ struct compat_fastrpc_ioctl_perf { /* kernel performance data */ }; static int compat_get_fastrpc_ioctl_invoke( - struct compat_fastrpc_ioctl_invoke_attrs __user *inv32, - struct fastrpc_ioctl_invoke_attrs __user **inva, + struct compat_fastrpc_ioctl_invoke_crc __user *inv32, + struct fastrpc_ioctl_invoke_crc __user **inva, unsigned int cmd) { compat_uint_t u, sc; compat_ssize_t s; compat_uptr_t p; - struct fastrpc_ioctl_invoke_attrs *inv; + struct fastrpc_ioctl_invoke_crc *inv; union compat_remote_arg *pra32; union remote_arg *pra; int err, len, j; @@ -146,10 +155,16 @@ static int compat_get_fastrpc_ioctl_invoke( err |= put_user(p, (compat_uptr_t *)&inv->fds); } err |= put_user(NULL, &inv->attrs); - if (cmd == COMPAT_FASTRPC_IOCTL_INVOKE_ATTRS) { + if ((cmd == COMPAT_FASTRPC_IOCTL_INVOKE_ATTRS) || + (cmd == COMPAT_FASTRPC_IOCTL_INVOKE_CRC)) { err |= get_user(p, &inv32->attrs); err |= put_user(p, (compat_uptr_t *)&inv->attrs); } + err |= put_user(NULL, (compat_uptr_t __user **)&inv->crc); + if (cmd == COMPAT_FASTRPC_IOCTL_INVOKE_CRC) { + err |= get_user(p, &inv32->crc); + err |= put_user(p, (compat_uptr_t __user *)&inv->crc); + } *inva = inv; return err; @@ -273,9 +288,10 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd, case COMPAT_FASTRPC_IOCTL_INVOKE: case COMPAT_FASTRPC_IOCTL_INVOKE_FD: case COMPAT_FASTRPC_IOCTL_INVOKE_ATTRS: + case COMPAT_FASTRPC_IOCTL_INVOKE_CRC: { - struct compat_fastrpc_ioctl_invoke_attrs __user *inv32; - struct fastrpc_ioctl_invoke_attrs __user *inv; + struct compat_fastrpc_ioctl_invoke_crc __user *inv32; + struct fastrpc_ioctl_invoke_crc __user *inv; inv32 = compat_ptr(arg); VERIFY(err, 0 == compat_get_fastrpc_ioctl_invoke(inv32, @@ -283,7 +299,7 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd, if (err) return err; return filp->f_op->unlocked_ioctl(filp, - FASTRPC_IOCTL_INVOKE_ATTRS, (unsigned long)inv); + FASTRPC_IOCTL_INVOKE_CRC, (unsigned long)inv); } case COMPAT_FASTRPC_IOCTL_MMAP: { diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h index 7175b9eea254..fb7afa36541a 100644 --- a/drivers/char/adsprpc_shared.h +++ b/drivers/char/adsprpc_shared.h @@ -27,6 +27,7 @@ #define FASTRPC_IOCTL_GETINFO _IOWR('R', 8, uint32_t) #define FASTRPC_IOCTL_GETPERF _IOWR('R', 9, struct fastrpc_ioctl_perf) #define FASTRPC_IOCTL_INIT_ATTRS _IOWR('R', 10, struct fastrpc_ioctl_init_attrs) +#define FASTRPC_IOCTL_INVOKE_CRC _IOWR('R', 11, struct fastrpc_ioctl_invoke_crc) #define FASTRPC_GLINK_GUID "fastrpcglink-apps-dsp" #define FASTRPC_SMD_GUID "fastrpcsmd-apps-dsp" @@ -156,6 +157,13 @@ struct fastrpc_ioctl_invoke_attrs { unsigned int *attrs; /* attribute list */ }; +struct fastrpc_ioctl_invoke_crc { + struct fastrpc_ioctl_invoke inv; + int *fds; /* fd list */ + unsigned int *attrs; /* attribute list */ + unsigned int *crc; +}; + struct fastrpc_ioctl_init { uint32_t flags; /* one of FASTRPC_INIT_* macros */ uintptr_t __user file; /* pointer to elf file */ -- GitLab From d94ffceb040d0db0cc0112660bb2bcf49d913a8f Mon Sep 17 00:00:00 2001 From: Can Guo Date: Mon, 3 Jul 2017 15:06:55 +0800 Subject: [PATCH 508/786] mmc: core: Fix card remove detect when use extcon Since SDM845 uses extcon for card insertion and remove detection and cd_gpio in device tree is not given, so the mmc_gpio_get_cd function cannot detect card remove by reading gpio value. Fix it by checking extcon state if extcon is initialized during sdhci msm probe. Change-Id: Ib44d211f707393cb440c387a06fa70d3adc99736 Signed-off-by: Can Guo --- drivers/mmc/core/slot-gpio.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c index b5c81e444c80..91ad9465476a 100644 --- a/drivers/mmc/core/slot-gpio.c +++ b/drivers/mmc/core/slot-gpio.c @@ -77,6 +77,15 @@ EXPORT_SYMBOL(mmc_gpio_get_ro); int mmc_gpio_get_cd(struct mmc_host *host) { struct mmc_gpio *ctx = host->slot.handler_priv; + int ret; + + if (host->extcon) { + ret = extcon_get_state(host->extcon, EXTCON_MECHANICAL); + if (ret < 0) + dev_err(mmc_dev(host), "%s: Extcon failed to check card state, ret=%d\n", + __func__, ret); + return ret; + } if (!ctx || !ctx->cd_gpio) return -ENOSYS; -- GitLab From 6514fbefbcb604786ee560e1593a361bc7d05719 Mon Sep 17 00:00:00 2001 From: Can Guo Date: Tue, 4 Jul 2017 12:36:09 +0800 Subject: [PATCH 509/786] mmc: core: extend SDR104 workaround for other paths UHS-I SD cards support SDR104 mode which runs the SD card interface clock upto 208 MHz. But we may see repeated CRC errors in SDR104 with some SDCC controllers. If this happens, commit ("mmc: sd: reduce the bus speed in case of multiple CRC errors") would reinit the card to lower speed (SDR50) hoping that CRC error rate would reduce at lower clock speed (100MHz for SDR50). As the error may happen for any cmd, this change tries to fix several other paths - clock scaling, mmc_rescan, non-data commands error path. This change was backed out from msm-4.9 because SDM845 is unable to detect mmc card remove by reading cd_gpio. Now it is safe to be merged in once again because mmc card detect is fixed. Change-Id: I87505cdc8614ff0c41ad345b2c181603d0235983 Signed-off-by: Sahitya Tummala Signed-off-by: Subhash Jadavani Signed-off-by: Veerabhadrarao Badiganti Signed-off-by: Can Guo --- drivers/mmc/card/block.c | 5 +++- drivers/mmc/core/core.c | 58 +++++++++++++++++++++++++++++++++------- include/linux/mmc/core.h | 1 + 3 files changed, 53 insertions(+), 11 deletions(-) diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index f3004355053f..c6f34964d839 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -1718,6 +1718,8 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, /* We couldn't get a response from the card. Give up. */ if (err) { + if (card->err_in_sdr104) + return ERR_RETRY; /* Check if the card is removed */ if (mmc_detect_card_removed(card->host)) return ERR_NOMEDIUM; @@ -2208,7 +2210,8 @@ static int mmc_blk_err_check(struct mmc_card *card, brq->data.error == -ETIMEDOUT || brq->cmd.error == -EILSEQ || brq->cmd.error == -EIO || - brq->cmd.error == -ETIMEDOUT)) + brq->cmd.error == -ETIMEDOUT || + brq->sbc.error)) card->err_in_sdr104 = true; /* diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 787779c6699b..a2219daf5296 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -456,6 +456,22 @@ int mmc_clk_update_freq(struct mmc_host *host, } EXPORT_SYMBOL(mmc_clk_update_freq); +void mmc_recovery_fallback_lower_speed(struct mmc_host *host) +{ + if (!host->card) + return; + + if (host->sdr104_wa && mmc_card_sd(host->card) && + (host->ios.timing == MMC_TIMING_UHS_SDR104) && + !host->card->sdr104_blocked) { + pr_err("%s: %s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n", + mmc_hostname(host), __func__); + mmc_host_clear_sdr104(host); + mmc_hw_reset(host); + host->card->sdr104_blocked = true; + } +} + static int mmc_devfreq_set_target(struct device *dev, unsigned long *freq, u32 devfreq_flags) { @@ -507,6 +523,9 @@ static int mmc_devfreq_set_target(struct device *dev, if (abort) goto out; + if (mmc_card_sd(host->card) && host->card->sdr104_blocked) + goto rel_host; + /* * In case we were able to claim host there is no need to * defer the frequency change. It will be done now @@ -515,15 +534,18 @@ static int mmc_devfreq_set_target(struct device *dev, mmc_host_clk_hold(host); err = mmc_clk_update_freq(host, *freq, clk_scaling->state); - if (err && err != -EAGAIN) + if (err && err != -EAGAIN) { pr_err("%s: clock scale to %lu failed with error %d\n", mmc_hostname(host), *freq, err); - else + mmc_recovery_fallback_lower_speed(host); + } else { pr_debug("%s: clock change to %lu finished successfully (%s)\n", mmc_hostname(host), *freq, current->comm); + } mmc_host_clk_release(host); +rel_host: mmc_release_host(host); out: return err; @@ -544,6 +566,9 @@ void mmc_deferred_scaling(struct mmc_host *host) if (!host->clk_scaling.enable) return; + if (mmc_card_sd(host->card) && host->card->sdr104_blocked) + return; + spin_lock_bh(&host->clk_scaling.lock); if (host->clk_scaling.clk_scaling_in_progress || @@ -564,13 +589,15 @@ void mmc_deferred_scaling(struct mmc_host *host) err = mmc_clk_update_freq(host, target_freq, host->clk_scaling.state); - if (err && err != -EAGAIN) + if (err && err != -EAGAIN) { pr_err("%s: failed on deferred scale clocks (%d)\n", mmc_hostname(host), err); - else + mmc_recovery_fallback_lower_speed(host); + } else { pr_debug("%s: clocks were successfully scaled to %lu (%s)\n", mmc_hostname(host), target_freq, current->comm); + } host->clk_scaling.clk_scaling_in_progress = false; atomic_dec(&host->clk_scaling.devfreq_abort); } @@ -1571,8 +1598,13 @@ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq) } } if (!cmd->error || !cmd->retries || - mmc_card_removed(host->card)) + mmc_card_removed(host->card)) { + if (cmd->error && !cmd->retries && + cmd->opcode != MMC_SEND_STATUS && + cmd->opcode != MMC_SEND_TUNING_BLOCK) + mmc_recovery_fallback_lower_speed(host); break; + } mmc_retune_recheck(host); @@ -4257,12 +4289,18 @@ int _mmc_detect_card_removed(struct mmc_host *host) } if (ret) { - mmc_card_set_removed(host->card); - if (host->card->sdr104_blocked) { - mmc_host_set_sdr104(host); - host->card->sdr104_blocked = false; + if (host->ops->get_cd && host->ops->get_cd(host)) { + mmc_recovery_fallback_lower_speed(host); + ret = 0; + } else { + mmc_card_set_removed(host->card); + if (host->card->sdr104_blocked) { + mmc_host_set_sdr104(host); + host->card->sdr104_blocked = false; + } + pr_debug("%s: card remove detected\n", + mmc_hostname(host)); } - pr_debug("%s: card remove detected\n", mmc_hostname(host)); } return ret; diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 0f2e65101b7c..b71810518922 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -233,6 +233,7 @@ extern void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host, bool lock_needed); extern void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host, bool lock_needed, bool is_cmdq_dcmd); +extern void mmc_recovery_fallback_lower_speed(struct mmc_host *host); /** * mmc_claim_host - exclusively claim a host -- GitLab From 833ea2a4a5f2f5eb32f4b94cf833ecc3f13b2aea Mon Sep 17 00:00:00 2001 From: Michal' Potomski Date: Wed, 31 May 2017 15:25:11 +0530 Subject: [PATCH 510/786] scsi: ufs: Factor out ufshcd_read_desc_param Since in UFS 2.1 specification some of the descriptor lengths differs from 2.0 specification and some devices, which are reporting spec version 2.0 have different descriptor lengths we can not rely on hardcoded values taken from 2.0 specification. This patch introduces reading these lengths per each device from descriptor headers at probe time to ensure their correctness. Change-Id: I0e1be5b5914e008b0fb09e547b1f8c4ee966efc4 Signed-off-by: Michal' Potomski Reviewed-by: Subhash Jadavani Signed-off-by: Martin K. Petersen Git-commit: a4b0e8a4e92b1baa860e744847fbdb84a50a5071 Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git [sayalil@codeaurora.org: resolve trivial merge conflicts] Signed-off-by: Sayali Lokhande --- drivers/scsi/ufs/ufs-debugfs.c | 4 +- drivers/scsi/ufs/ufs.h | 21 ++- drivers/scsi/ufs/ufs_quirks.c | 8 +- drivers/scsi/ufs/ufs_test.c | 4 +- drivers/scsi/ufs/ufshcd.c | 239 +++++++++++++++++++++++++-------- drivers/scsi/ufs/ufshcd.h | 15 +++ 6 files changed, 217 insertions(+), 74 deletions(-) diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c index bc2d2d48164d..5a578f1bdd20 100644 --- a/drivers/scsi/ufs/ufs-debugfs.c +++ b/drivers/scsi/ufs/ufs-debugfs.c @@ -850,8 +850,8 @@ static const struct file_operations ufsdbg_host_regs_fops = { static int ufsdbg_dump_device_desc_show(struct seq_file *file, void *data) { int err = 0; - int buff_len = QUERY_DESC_DEVICE_MAX_SIZE; - u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE]; + int buff_len = QUERY_DESC_DEVICE_DEF_SIZE; + u8 desc_buf[QUERY_DESC_DEVICE_DEF_SIZE]; struct ufs_hba *hba = (struct ufs_hba *)file->private; struct desc_field_offset device_desc_field_name[] = { diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 3245fe1e6163..f85a67d41229 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -65,6 +65,7 @@ #define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID) #define UFS_UPIU_WLUN_ID (1 << 7) #define UFS_UPIU_MAX_GENERAL_LUN 8 +#define QUERY_DESC_IDN_CONFIGURATION QUERY_DESC_IDN_CONFIGURAION /* Well known logical unit id in LUN field of UPIU */ enum { @@ -144,19 +145,13 @@ enum desc_header_offset { QUERY_DESC_DESC_TYPE_OFFSET = 0x01, }; -enum ufs_desc_max_size { - QUERY_DESC_DEVICE_MAX_SIZE = 0x40, - QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90, - QUERY_DESC_UNIT_MAX_SIZE = 0x23, - QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06, - /* - * Max. 126 UNICODE characters (2 bytes per character) plus 2 bytes - * of descriptor header. - */ - QUERY_DESC_STRING_MAX_SIZE = 0xFE, - QUERY_DESC_GEOMETRY_MAZ_SIZE = 0x44, - QUERY_DESC_POWER_MAX_SIZE = 0x62, - QUERY_DESC_RFU_MAX_SIZE = 0x00, +enum ufs_desc_def_size { + QUERY_DESC_DEVICE_DEF_SIZE = 0x40, + QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90, + QUERY_DESC_UNIT_DEF_SIZE = 0x23, + QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06, + QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44, + QUERY_DESC_POWER_DEF_SIZE = 0x62, }; /* Unit descriptor parameters offsets in bytes*/ diff --git a/drivers/scsi/ufs/ufs_quirks.c b/drivers/scsi/ufs/ufs_quirks.c index 3210d60b879c..da2bfd5f596d 100644 --- a/drivers/scsi/ufs/ufs_quirks.c +++ b/drivers/scsi/ufs/ufs_quirks.c @@ -51,7 +51,7 @@ static struct ufs_card_fix ufs_fixups[] = { void ufs_advertise_fixup_device(struct ufs_hba *hba) { int err; - u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1]; + u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1]; char *model; struct ufs_card_fix *f; @@ -59,13 +59,13 @@ void ufs_advertise_fixup_device(struct ufs_hba *hba) if (!model) goto out; - memset(str_desc_buf, 0, QUERY_DESC_STRING_MAX_SIZE); + memset(str_desc_buf, 0, QUERY_DESC_MAX_SIZE); err = ufshcd_read_string_desc(hba, hba->dev_info.i_product_name, - str_desc_buf, QUERY_DESC_STRING_MAX_SIZE, ASCII_STD); + str_desc_buf, QUERY_DESC_MAX_SIZE, ASCII_STD); if (err) goto out; - str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0'; + str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0'; strlcpy(model, (str_desc_buf + QUERY_DESC_HDR_SIZE), min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET], MAX_MODEL_LEN)); diff --git a/drivers/scsi/ufs/ufs_test.c b/drivers/scsi/ufs/ufs_test.c index d41871abcf64..2e3997ddcc88 100644 --- a/drivers/scsi/ufs/ufs_test.c +++ b/drivers/scsi/ufs/ufs_test.c @@ -603,8 +603,8 @@ static void ufs_test_random_async_query(void *data, async_cookie_t cookie) struct ufs_test_data *utd = test_iosched->blk_dev_test_data; struct scsi_device *sdev; struct ufs_hba *hba; - int buff_len = QUERY_DESC_UNIT_MAX_SIZE; - u8 desc_buf[QUERY_DESC_UNIT_MAX_SIZE]; + int buff_len = QUERY_DESC_UNIT_DEF_SIZE; + u8 desc_buf[QUERY_DESC_UNIT_DEF_SIZE]; bool flag; u32 att; int ret = 0; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 59222eab180a..a2b5ea07ebdf 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -251,19 +251,6 @@ static void ufshcd_hex_dump(struct ufs_hba *hba, const char * const str, 16, 4, buf, len, false); } -static u32 ufs_query_desc_max_size[] = { - QUERY_DESC_DEVICE_MAX_SIZE, - QUERY_DESC_CONFIGURAION_MAX_SIZE, - QUERY_DESC_UNIT_MAX_SIZE, - QUERY_DESC_RFU_MAX_SIZE, - QUERY_DESC_INTERCONNECT_MAX_SIZE, - QUERY_DESC_STRING_MAX_SIZE, - QUERY_DESC_RFU_MAX_SIZE, - QUERY_DESC_GEOMETRY_MAZ_SIZE, - QUERY_DESC_POWER_MAX_SIZE, - QUERY_DESC_RFU_MAX_SIZE, -}; - enum { UFSHCD_MAX_CHANNEL = 0, UFSHCD_MAX_ID = 1, @@ -3628,7 +3615,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba, goto out; } - if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { + if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", __func__, *buf_len); err = -EINVAL; @@ -3707,6 +3694,92 @@ int ufshcd_query_descriptor(struct ufs_hba *hba, } EXPORT_SYMBOL(ufshcd_query_descriptor); +/** + * ufshcd_read_desc_length - read the specified descriptor length from header + * @hba: Pointer to adapter instance + * @desc_id: descriptor idn value + * @desc_index: descriptor index + * @desc_length: pointer to variable to read the length of descriptor + * + * Return 0 in case of success, non-zero otherwise + */ +static int ufshcd_read_desc_length(struct ufs_hba *hba, + enum desc_idn desc_id, + int desc_index, + int *desc_length) +{ + int ret; + u8 header[QUERY_DESC_HDR_SIZE]; + int header_len = QUERY_DESC_HDR_SIZE; + + if (desc_id >= QUERY_DESC_IDN_MAX) + return -EINVAL; + + ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC, + desc_id, desc_index, 0, header, + &header_len); + + if (ret) { + dev_err(hba->dev, "%s: Failed to get descriptor header id %d", + __func__, desc_id); + return ret; + } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) { + dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch", + __func__, header[QUERY_DESC_DESC_TYPE_OFFSET], + desc_id); + ret = -EINVAL; + } + + *desc_length = header[QUERY_DESC_LENGTH_OFFSET]; + return ret; + +} + +/** + * ufshcd_map_desc_id_to_length - map descriptor IDN to its length + * @hba: Pointer to adapter instance + * @desc_id: descriptor idn value + * @desc_len: mapped desc length (out) + * + * Return 0 in case of success, non-zero otherwise + */ +int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, + enum desc_idn desc_id, int *desc_len) +{ + switch (desc_id) { + case QUERY_DESC_IDN_DEVICE: + *desc_len = hba->desc_size.dev_desc; + break; + case QUERY_DESC_IDN_POWER: + *desc_len = hba->desc_size.pwr_desc; + break; + case QUERY_DESC_IDN_GEOMETRY: + *desc_len = hba->desc_size.geom_desc; + break; + case QUERY_DESC_IDN_CONFIGURATION: + *desc_len = hba->desc_size.conf_desc; + break; + case QUERY_DESC_IDN_UNIT: + *desc_len = hba->desc_size.unit_desc; + break; + case QUERY_DESC_IDN_INTERCONNECT: + *desc_len = hba->desc_size.interc_desc; + break; + case QUERY_DESC_IDN_STRING: + *desc_len = QUERY_DESC_MAX_SIZE; + break; + case QUERY_DESC_IDN_RFU_0: + case QUERY_DESC_IDN_RFU_1: + *desc_len = 0; + break; + default: + *desc_len = 0; + return -EINVAL; + } + return 0; +} +EXPORT_SYMBOL(ufshcd_map_desc_id_to_length); + /** * ufshcd_read_desc_param - read the specified descriptor parameter * @hba: Pointer to adapter instance @@ -3721,37 +3794,45 @@ EXPORT_SYMBOL(ufshcd_query_descriptor); static int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id, int desc_index, - u32 param_offset, + u8 param_offset, u8 *param_read_buf, - u32 param_size) + u8 param_size) { int ret; u8 *desc_buf; - u32 buff_len; + int buff_len; bool is_kmalloc = true; - /* safety checks */ - if (desc_id >= QUERY_DESC_IDN_MAX) + /* Safety check */ + if (desc_id >= QUERY_DESC_IDN_MAX || !param_size) return -EINVAL; - buff_len = ufs_query_desc_max_size[desc_id]; - if ((param_offset + param_size) > buff_len) - return -EINVAL; + /* Get the max length of descriptor from structure filled up at probe + * time. + */ + ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len); - if (!param_offset && (param_size == buff_len)) { - /* memory space already available to hold full descriptor */ - desc_buf = param_read_buf; - is_kmalloc = false; - } else { - /* allocate memory to hold full descriptor */ + /* Sanity checks */ + if (ret || !buff_len) { + dev_err(hba->dev, "%s: Failed to get full descriptor length", + __func__); + return ret; + } + + /* Check whether we need temp memory */ + if (param_offset != 0 || param_size < buff_len) { desc_buf = kmalloc(buff_len, GFP_KERNEL); if (!desc_buf) return -ENOMEM; + } else { + desc_buf = param_read_buf; + is_kmalloc = false; } + /* Request for full descriptor */ ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC, - desc_id, desc_index, 0, desc_buf, - &buff_len); + desc_id, desc_index, 0, + desc_buf, &buff_len); if (ret) { dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d", @@ -3768,25 +3849,9 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba, goto out; } - /* - * While reading variable size descriptors (like string descriptor), - * some UFS devices may report the "LENGTH" (field in "Transaction - * Specific fields" of Query Response UPIU) same as what was requested - * in Query Request UPIU instead of reporting the actual size of the - * variable size descriptor. - * Although it's safe to ignore the "LENGTH" field for variable size - * descriptors as we can always derive the length of the descriptor from - * the descriptor header fields. Hence this change impose the length - * match check only for fixed size descriptors (for which we always - * request the correct size as part of Query Request UPIU). - */ - if ((desc_id != QUERY_DESC_IDN_STRING) && - (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) { - dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d", - __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]); - ret = -EINVAL; - goto out; - } + /* Check wherher we will not copy more data, than available */ + if (is_kmalloc && param_size > buff_len) + param_size = buff_len; if (is_kmalloc) memcpy(param_read_buf, &desc_buf[param_offset], param_size); @@ -7170,10 +7235,19 @@ static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba, static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba) { int ret; - int buff_len = QUERY_DESC_POWER_MAX_SIZE; - u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE]; + int buff_len = hba->desc_size.pwr_desc; + u8 *desc_buf = NULL; u32 icc_level; + if (buff_len) { + desc_buf = kmalloc(buff_len, GFP_KERNEL); + if (!desc_buf) { + dev_err(hba->dev, + "%s: Failed to allocate desc_buf\n", __func__); + return; + } + } + ret = ufshcd_read_power_desc(hba, desc_buf, buff_len); if (ret) { dev_err(hba->dev, @@ -7554,9 +7628,18 @@ static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba) static int ufs_read_device_desc_data(struct ufs_hba *hba) { int err; - u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE]; + u8 *desc_buf = NULL; - err = ufshcd_read_device_desc(hba, desc_buf, sizeof(desc_buf)); + if (hba->desc_size.dev_desc) { + desc_buf = kmalloc(hba->desc_size.dev_desc, GFP_KERNEL); + if (!desc_buf) { + err = -ENOMEM; + dev_err(hba->dev, + "%s: Failed to allocate desc_buf\n", __func__); + return err; + } + } + err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc); if (err) return err; @@ -7574,6 +7657,51 @@ static int ufs_read_device_desc_data(struct ufs_hba *hba) return 0; } +static void ufshcd_init_desc_sizes(struct ufs_hba *hba) +{ + int err; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0, + &hba->desc_size.dev_desc); + if (err) + hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0, + &hba->desc_size.pwr_desc); + if (err) + hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0, + &hba->desc_size.interc_desc); + if (err) + hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0, + &hba->desc_size.conf_desc); + if (err) + hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0, + &hba->desc_size.unit_desc); + if (err) + hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; + + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0, + &hba->desc_size.geom_desc); + if (err) + hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; +} + +static void ufshcd_def_desc_sizes(struct ufs_hba *hba) +{ + hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; + hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; + hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; + hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; + hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; + hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; +} + /** * ufshcd_probe_hba - probe hba to detect device and initialize * @hba: per-adapter instance @@ -7614,6 +7742,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) if (ret) goto out; + /* Init check for device descriptor sizes */ + ufshcd_init_desc_sizes(hba); ufs_advertise_fixup_device(hba); ufshcd_tune_unipro_params(hba); @@ -10075,6 +10205,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) ufshcd_init_lanes_per_dir(hba); + /* Set descriptor lengths to specification defaults */ + ufshcd_def_desc_sizes(hba); + err = ufshcd_hba_init(hba); if (err) goto out_error; diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index a485885d308e..343f32718af2 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -249,6 +249,15 @@ struct ufs_dev_cmd { struct ufs_query query; }; +struct ufs_desc_size { + int dev_desc; + int pwr_desc; + int geom_desc; + int interc_desc; + int unit_desc; + int conf_desc; +}; + /** * struct ufs_clk_info - UFS clock related info * @list: list headed by hba->clk_list_head @@ -738,6 +747,7 @@ struct ufshcd_cmd_log { * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for * device is known or not. * @scsi_block_reqs_cnt: reference counting for scsi block requests + * @desc_size: descriptor sizes reported by device */ struct ufs_hba { void __iomem *mmio_base; @@ -967,6 +977,7 @@ struct ufs_hba { int latency_hist_enabled; struct io_latency_state io_lat_s; + struct ufs_desc_size desc_size; }; static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba) @@ -1208,6 +1219,10 @@ int ufshcd_change_power_mode(struct ufs_hba *hba, struct ufs_pa_layer_attr *pwr_mode); void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result); + +int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, + int *desc_length); + u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba); void ufshcd_scsi_block_requests(struct ufs_hba *hba); -- GitLab From f863f78c8decb1081ea6122b035b7e0f72b6688d Mon Sep 17 00:00:00 2001 From: Lingutla Chandrasekhar Date: Thu, 29 Jun 2017 15:01:40 +0530 Subject: [PATCH 511/786] defconfig: sdm670: move to sdm670 specific defconfig Create new defconfig for sdm670 as target dependency needs to be differentiated. And also remove sdm670 configurations in sdm845 config. Change-Id: Ieff320ce7b0285fc6555939f8dbb78754f241412 Signed-off-by: Lingutla Chandrasekhar --- arch/arm64/configs/sdm670_defconfig | 639 ++++++++++++++++++++++++++++ arch/arm64/configs/sdm845_defconfig | 3 - 2 files changed, 639 insertions(+), 3 deletions(-) create mode 100644 arch/arm64/configs/sdm670_defconfig diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig new file mode 100644 index 000000000000..f28a9a62335b --- /dev/null +++ b/arch/arm64/configs/sdm670_defconfig @@ -0,0 +1,639 @@ +# CONFIG_LOCALVERSION_AUTO is not set +# CONFIG_FHANDLE is not set +CONFIG_AUDIT=y +# CONFIG_AUDITSYSCALL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_SCHED_WALT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_RCU_EXPERT=y +CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_NOCB_CPU=y +CONFIG_RCU_NOCB_CPU_ALL=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_CPU_MAX_BUF_SHIFT=17 +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_SCHEDTUNE=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_CORE_CTL=y +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_PID_NS is not set +CONFIG_SCHED_AUTOGROUP=y +CONFIG_SCHED_TUNE=y +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_XZ is not set +# CONFIG_RD_LZO is not set +# CONFIG_RD_LZ4 is not set +CONFIG_KALLSYMS_ALL=y +# CONFIG_AIO is not set +# CONFIG_MEMBARRIER is not set +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_PROFILING=y +CONFIG_CC_STACKPROTECTOR_REGULAR=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SIG=y +CONFIG_MODULE_SIG_FORCE=y +CONFIG_MODULE_SIG_SHA512=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_ARCH_QCOM=y +CONFIG_ARCH_SDM670=y +CONFIG_PCI=y +CONFIG_PCI_MSM=y +CONFIG_SCHED_MC=y +CONFIG_NR_CPUS=8 +CONFIG_PREEMPT=y +CONFIG_HZ_100=y +CONFIG_CLEANCACHE=y +CONFIG_CMA=y +CONFIG_CMA_DEBUGFS=y +CONFIG_ZSMALLOC=y +CONFIG_BALANCE_ANON_FILE_RECLAIM=y +CONFIG_SECCOMP=y +CONFIG_ARMV8_DEPRECATED=y +CONFIG_SWP_EMULATION=y +CONFIG_CP15_BARRIER_EMULATION=y +CONFIG_SETEND_EMULATION=y +# CONFIG_ARM64_VHE is not set +CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_COMPAT=y +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_CPU_IDLE=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_BOOST=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +CONFIG_CPU_FREQ_MSM=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_INET_AH=y +CONFIG_INET_ESP=y +CONFIG_INET_IPCOMP=y +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LOG=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_TEE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_RPFILTER=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE_NF_EBTABLES=y +CONFIG_BRIDGE_EBT_BROUTE=y +CONFIG_L2TP=y +CONFIG_L2TP_DEBUGFS=y +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=y +CONFIG_L2TP_ETH=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_SCH_PRIO=y +CONFIG_NET_SCH_MULTIQ=y +CONFIG_NET_SCH_INGRESS=y +CONFIG_NET_CLS_FW=y +CONFIG_NET_CLS_U32=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=y +CONFIG_NET_EMATCH_NBYTE=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_EMATCH_META=y +CONFIG_NET_EMATCH_TEXT=y +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_GACT=y +CONFIG_NET_ACT_MIRRED=y +CONFIG_NET_ACT_SKBEDIT=y +CONFIG_DNS_RESOLVER=y +CONFIG_RMNET_DATA=y +CONFIG_RMNET_DATA_FC=y +CONFIG_RMNET_DATA_DEBUG_PKT=y +CONFIG_BT=y +CONFIG_MSM_BT_POWER=y +CONFIG_CFG80211=y +CONFIG_CFG80211_INTERNAL_REGDB=y +# CONFIG_CFG80211_CRDA_SUPPORT is not set +CONFIG_RFKILL=y +CONFIG_NFC_NQ=y +CONFIG_IPC_ROUTER=y +CONFIG_IPC_ROUTER_SECURITY=y +CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y +CONFIG_DMA_CMA=y +CONFIG_ZRAM=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_QSEECOM=y +CONFIG_UID_SYS_STATS=y +CONFIG_MEMORY_STATE_TIME=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_UFSHCD=y +CONFIG_SCSI_UFSHCD_PLATFORM=y +CONFIG_SCSI_UFS_QCOM=y +CONFIG_SCSI_UFS_QCOM_ICE=y +CONFIG_SCSI_UFSHCD_CMD_LOGGING=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_BONDING=y +CONFIG_DUMMY=y +CONFIG_TUN=y +CONFIG_RNDIS_IPA=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_MPPE=y +CONFIG_PPPOLAC=y +CONFIG_PPPOPNS=y +CONFIG_USB_USBNET=y +CONFIG_WIL6210=m +CONFIG_WCNSS_MEM_PRE_ALLOC=y +CONFIG_CLD_LL_CORE=y +CONFIG_INPUT_EVDEV=y +CONFIG_KEYBOARD_GPIO=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_HBTP_INPUT=y +CONFIG_INPUT_QPNP_POWER_ON=y +CONFIG_INPUT_UINPUT=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_MSM_GENI=y +CONFIG_SERIAL_MSM_GENI_CONSOLE=y +CONFIG_DIAG_CHAR=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_MSM_LEGACY=y +CONFIG_MSM_ADSPRPC=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_QCOM_GENI=y +CONFIG_SOUNDWIRE=y +CONFIG_SPI=y +CONFIG_SPI_QUP=y +CONFIG_SPI_QCOM_GENI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SLIMBUS_MSM_NGD=y +CONFIG_SPMI=y +CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y +CONFIG_PINCTRL_SDM670=y +CONFIG_PINCTRL_QCOM_SPMI_PMIC=y +CONFIG_GPIOLIB=y +CONFIG_GPIO_SYSFS=y +CONFIG_POWER_RESET_QCOM=y +CONFIG_QCOM_DLOAD_MODE=y +CONFIG_POWER_RESET_XGENE=y +CONFIG_POWER_RESET_SYSCON=y +CONFIG_QPNP_FG_GEN3=y +CONFIG_SMB1355_SLAVE_CHARGER=y +CONFIG_QPNP_SMB2=y +CONFIG_QPNP_QNOVO=y +CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y +CONFIG_THERMAL=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_LOW_LIMITS=y +CONFIG_CPU_THERMAL=y +CONFIG_DEVFREQ_THERMAL=y +CONFIG_QCOM_SPMI_TEMP_ALARM=y +CONFIG_THERMAL_QPNP=y +CONFIG_THERMAL_QPNP_ADC_TM=y +CONFIG_THERMAL_TSENS=y +CONFIG_MSM_BCL_PERIPHERAL_CTL=y +CONFIG_QTI_THERMAL_LIMITS_DCVS=y +CONFIG_QTI_VIRTUAL_SENSOR=y +CONFIG_MFD_I2C_PMIC=y +CONFIG_MFD_SPMI_PMIC=y +CONFIG_WCD934X_CODEC=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_CPRH_KBSS=y +CONFIG_REGULATOR_QPNP_LABIBB=y +CONFIG_REGULATOR_QPNP=y +CONFIG_REGULATOR_RPMH=y +CONFIG_REGULATOR_STUB=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +CONFIG_VIDEO_ADV_DEBUG=y +CONFIG_VIDEO_FIXED_MINOR_RANGES=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_SPECTRA_CAMERA=y +CONFIG_MSM_VIDC_V4L2=y +CONFIG_MSM_VIDC_GOVERNORS=y +CONFIG_MSM_SDE_ROTATOR=y +CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y +CONFIG_QCOM_KGSL=y +CONFIG_DRM=y +CONFIG_DRM_SDE_EVTLOG_DEBUG=y +CONFIG_DRM_SDE_RSC=y +CONFIG_FB_VIRTUAL=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_USB_AUDIO=y +CONFIG_SND_USB_AUDIO_QMI=y +CONFIG_SND_SOC=y +CONFIG_SND_SOC_SDM845=y +CONFIG_UHID=y +CONFIG_HID_APPLE=y +CONFIG_HID_MICROSOFT=y +CONFIG_USB=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_STORAGE=y +CONFIG_USB_DWC3=y +CONFIG_USB_DWC3_MSM=y +CONFIG_USB_ISP1760=y +CONFIG_USB_ISP1760_HOST_ROLE=y +CONFIG_USB_PD_POLICY=y +CONFIG_QPNP_USB_PDPHY=y +CONFIG_USB_EHSET_TEST_FIXTURE=y +CONFIG_NOP_USB_XCEIV=y +CONFIG_DUAL_ROLE_USB_INTF=y +CONFIG_USB_MSM_SSPHY_QMP=y +CONFIG_MSM_QUSB_PHY=y +CONFIG_USB_GADGET=y +CONFIG_USB_GADGET_VBUS_DRAW=500 +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_NCM=y +CONFIG_USB_CONFIGFS_MASS_STORAGE=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_MTP=y +CONFIG_USB_CONFIGFS_F_PTP=y +CONFIG_USB_CONFIGFS_F_ACC=y +CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_F_MIDI=y +CONFIG_USB_CONFIGFS_F_HID=y +CONFIG_USB_CONFIGFS_F_DIAG=y +CONFIG_USB_CONFIGFS_F_CDEV=y +CONFIG_USB_CONFIGFS_F_CCID=y +CONFIG_USB_CONFIGFS_F_GSI=y +CONFIG_USB_CONFIGFS_F_QDSS=y +CONFIG_MMC=y +CONFIG_MMC_PERF_PROFILING=y +CONFIG_MMC_RING_BUFFER=y +CONFIG_MMC_CLKGATE=y +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_MMC_BLOCK_DEFERRED_RESUME=y +CONFIG_MMC_TEST=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_PLTFM=y +CONFIG_MMC_SDHCI_MSM=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_QPNP=y +CONFIG_LEDS_QPNP_FLASH_V2=y +CONFIG_LEDS_QPNP_WLED=y +CONFIG_LEDS_QPNP_HAPTICS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_EDAC=y +CONFIG_EDAC_MM_EDAC=y +CONFIG_EDAC_KRYO3XX_ARM64=y +CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_UE=y +CONFIG_EDAC_QCOM_LLCC=y +CONFIG_EDAC_QCOM_LLCC_PANIC_ON_UE=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_QPNP=y +CONFIG_DMADEVICES=y +CONFIG_QCOM_GPI_DMA=y +CONFIG_QCOM_GPI_DMA_DEBUG=y +CONFIG_UIO=y +CONFIG_UIO_MSM_SHAREDMEM=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_ION=y +CONFIG_ION_MSM=y +CONFIG_GSI=y +CONFIG_IPA3=y +CONFIG_RMNET_IPA3=y +CONFIG_IPA_UT=y +CONFIG_SPS=y +CONFIG_SPS_SUPPORT_NDP_BAM=y +CONFIG_QPNP_COINCELL=y +CONFIG_QPNP_REVID=y +CONFIG_USB_BAM=y +CONFIG_MSM_11AD=m +CONFIG_SEEMP_CORE=y +CONFIG_QCOM_GENI_SE=y +CONFIG_CLOCK_QPNP_DIV=y +CONFIG_MSM_CLK_RPMH=y +CONFIG_CLOCK_CPU_OSM=y +CONFIG_MSM_CLK_AOP_QMP=y +CONFIG_QCOM_MDSS_PLL=y +CONFIG_REMOTE_SPINLOCK_MSM=y +CONFIG_MSM_QMP=y +CONFIG_IOMMU_IO_PGTABLE_FAST=y +CONFIG_ARM_SMMU=y +CONFIG_QCOM_LAZY_MAPPING=y +CONFIG_IOMMU_DEBUG=y +CONFIG_IOMMU_DEBUG_TRACKING=y +CONFIG_IOMMU_TESTS=y +CONFIG_QCOM_CPUSS_DUMP=y +CONFIG_QCOM_RUN_QUEUE_STATS=y +CONFIG_QCOM_LLCC=y +CONFIG_QCOM_SDM670_LLCC=y +CONFIG_MSM_SERVICE_LOCATOR=y +CONFIG_MSM_SERVICE_NOTIFIER=y +CONFIG_MSM_BOOT_STATS=y +CONFIG_MSM_CORE_HANG_DETECT=y +CONFIG_MSM_GLADIATOR_HANG_DETECT=y +CONFIG_MSM_GLADIATOR_ERP=y +CONFIG_QCOM_EUD=y +CONFIG_QCOM_WATCHDOG_V2=y +CONFIG_QCOM_MEMORY_DUMP_V2=y +CONFIG_QCOM_SECURE_BUFFER=y +CONFIG_QCOM_EARLY_RANDOM=y +CONFIG_MSM_SMEM=y +CONFIG_MSM_GLINK=y +CONFIG_MSM_GLINK_LOOPBACK_SERVER=y +CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y +CONFIG_MSM_GLINK_SPI_XPRT=y +CONFIG_MSM_SPCOM=y +CONFIG_MSM_SPSS_UTILS=y +CONFIG_TRACER_PKT=y +CONFIG_QTI_RPMH_API=y +CONFIG_MSM_SMP2P=y +CONFIG_MSM_SMP2P_TEST=y +CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y +CONFIG_MSM_QMI_INTERFACE=y +CONFIG_MSM_GLINK_PKT=y +CONFIG_MSM_SUBSYSTEM_RESTART=y +CONFIG_MSM_PIL=y +CONFIG_MSM_PIL_SSR_GENERIC=y +CONFIG_MSM_PIL_MSS_QDSP6V5=y +CONFIG_ICNSS=y +CONFIG_ICNSS_DEBUG=y +CONFIG_QCOM_COMMAND_DB=y +CONFIG_MSM_ADSP_LOADER=y +CONFIG_MSM_PERFORMANCE=y +CONFIG_MSM_CDSP_LOADER=y +CONFIG_MSM_AVTIMER=y +CONFIG_MSM_EVENT_TIMER=y +CONFIG_MSM_PM=y +CONFIG_MSM_QBT1000=y +CONFIG_APSS_CORE_EA=y +CONFIG_QCOM_DCC_V2=y +CONFIG_QTI_RPM_STATS_LOG=y +CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y +CONFIG_QCOM_BIMC_BWMON=y +CONFIG_ARM_MEMLAT_MON=y +CONFIG_QCOMCCI_HWMON=y +CONFIG_QCOM_M4M_HWMON=y +CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y +CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y +CONFIG_DEVFREQ_GOV_MEMLAT=y +CONFIG_DEVFREQ_SIMPLE_DEV=y +CONFIG_QCOM_DEVFREQ_DEVBW=y +CONFIG_EXTCON_USB_GPIO=y +CONFIG_IIO=y +CONFIG_QCOM_RRADC=y +CONFIG_PWM=y +CONFIG_PWM_QPNP=y +CONFIG_ARM_GIC_V3_ACL=y +CONFIG_PHY_XGENE=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_SENSORS_SSC=y +CONFIG_MSM_TZ_LOG=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT3_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_FUSE_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_EFIVAR_FS=y +CONFIG_ECRYPT_FS=y +CONFIG_ECRYPT_FS_MESSAGING=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_PRINTK_TIME=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_INFO=y +CONFIG_PAGE_OWNER=y +CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_PAGEALLOC=y +CONFIG_SLUB_DEBUG_PANIC_ON=y +CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y +CONFIG_PAGE_POISONING=y +CONFIG_DEBUG_OBJECTS=y +CONFIG_DEBUG_OBJECTS_FREE=y +CONFIG_DEBUG_OBJECTS_TIMERS=y +CONFIG_DEBUG_OBJECTS_WORK=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y +CONFIG_SLUB_DEBUG_ON=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000 +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_LOCKUP_DETECTOR=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_WQ_WATCHDOG=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_PANIC_ON_SCHED_BUG=y +CONFIG_PANIC_ON_RT_THROTTLING=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_STACK_END_CHECK=y +# CONFIG_DEBUG_PREEMPT is not set +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_DEBUG_LIST=y +CONFIG_FAULT_INJECTION=y +CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_FAULT_INJECTION_DEBUG_FS=y +CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y +CONFIG_IPC_LOGGING=y +CONFIG_QCOM_RTB=y +CONFIG_QCOM_RTB_SEPARATE_CPUS=y +CONFIG_FUNCTION_TRACER=y +CONFIG_IRQSOFF_TRACER=y +CONFIG_PREEMPT_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_CPU_FREQ_SWITCH_PROFILER=y +CONFIG_LKDTM=y +CONFIG_MEMTEST=y +CONFIG_PANIC_ON_DATA_CORRUPTION=y +CONFIG_ARM64_PTDUMP=y +CONFIG_PID_IN_CONTEXTIDR=y +CONFIG_CORESIGHT=y +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y +CONFIG_CORESIGHT_SOURCE_ETM4X=y +CONFIG_CORESIGHT_REMOTE_ETM=y +CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0 +CONFIG_CORESIGHT_QCOM_REPLICATOR=y +CONFIG_CORESIGHT_STM=y +CONFIG_CORESIGHT_TPDA=y +CONFIG_CORESIGHT_TPDM=y +CONFIG_CORESIGHT_CTI=y +CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_DUMMY=y +CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y +CONFIG_SECURITY=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SMACK=y +CONFIG_CRYPTO_XCBC=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_CRYPTO_ANSI_CPRNG=y +CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y +CONFIG_CRYPTO_DEV_QCRYPTO=y +CONFIG_CRYPTO_DEV_QCEDEV=y +CONFIG_CRYPTO_DEV_QCOM_ICE=y +CONFIG_ARM64_CRYPTO=y +CONFIG_CRYPTO_SHA1_ARM64_CE=y +CONFIG_CRYPTO_SHA2_ARM64_CE=y +CONFIG_CRYPTO_GHASH_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +CONFIG_CRYPTO_CRC32_ARM64=y +CONFIG_XZ_DEC=y +CONFIG_QMI_ENCDEC=y diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig index 5a608ea2db75..94867dc0ef1e 100644 --- a/arch/arm64/configs/sdm845_defconfig +++ b/arch/arm64/configs/sdm845_defconfig @@ -56,7 +56,6 @@ CONFIG_PARTITION_ADVANCED=y # CONFIG_IOSCHED_DEADLINE is not set CONFIG_ARCH_QCOM=y CONFIG_ARCH_SDM845=y -CONFIG_ARCH_SDM670=y CONFIG_PCI=y CONFIG_PCI_MSM=y CONFIG_SCHED_MC=y @@ -308,7 +307,6 @@ CONFIG_SLIMBUS_MSM_NGD=y CONFIG_SPMI=y CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y CONFIG_PINCTRL_SDM845=y -CONFIG_PINCTRL_SDM670=y CONFIG_PINCTRL_QCOM_SPMI_PMIC=y CONFIG_GPIOLIB=y CONFIG_GPIO_SYSFS=y @@ -490,7 +488,6 @@ CONFIG_QCOM_CPUSS_DUMP=y CONFIG_QCOM_RUN_QUEUE_STATS=y CONFIG_QCOM_LLCC=y CONFIG_QCOM_SDM845_LLCC=y -CONFIG_QCOM_SDM670_LLCC=y CONFIG_MSM_SERVICE_LOCATOR=y CONFIG_MSM_SERVICE_NOTIFIER=y CONFIG_MSM_BOOT_STATS=y -- GitLab From 686f86b742731c1c390610090d68f3cc5eb5f199 Mon Sep 17 00:00:00 2001 From: Ananda Kishore Date: Wed, 28 Jun 2017 23:28:50 +0530 Subject: [PATCH 512/786] sensors: ssc: Add restart interface for SLPI Sensor Low Power Island (SLPI) is a DSP core integrated into SDM/MSM to run sensor drivers and algorithms. User space sensor daemon requires to restart the SLPI during some of the use cases. Add sysfs interface to do SLPI sub system restart from user space sensor daemon. Change-Id: Id5c16986aae4ac8a1fc1474fb542d36c70173a95 Signed-off-by: Ananda Kishore --- drivers/sensors/sensors_ssc.c | 47 +++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/drivers/sensors/sensors_ssc.c b/drivers/sensors/sensors_ssc.c index d7387670e4ef..dfdbd8e8df69 100644 --- a/drivers/sensors/sensors_ssc.c +++ b/drivers/sensors/sensors_ssc.c @@ -32,6 +32,7 @@ #define IMAGE_LOAD_CMD 1 #define IMAGE_UNLOAD_CMD 0 +#define SSR_RESET_CMD 1 #define CLASS_NAME "ssc" #define DRV_NAME "sensors" #define DRV_VERSION "2.00" @@ -53,6 +54,10 @@ static ssize_t slpi_boot_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count); +static ssize_t slpi_ssr_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count); + struct slpi_loader_private { void *pil_h; struct kobject *boot_slpi_obj; @@ -62,8 +67,12 @@ struct slpi_loader_private { static struct kobj_attribute slpi_boot_attribute = __ATTR(boot, 0220, NULL, slpi_boot_store); +static struct kobj_attribute slpi_ssr_attribute = + __ATTR(ssr, 0220, NULL, slpi_ssr_store); + static struct attribute *attrs[] = { &slpi_boot_attribute.attr, + &slpi_ssr_attribute.attr, NULL, }; @@ -138,6 +147,44 @@ static void slpi_loader_unload(struct platform_device *pdev) } } +static ssize_t slpi_ssr_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, + size_t count) +{ + int ssr_cmd = 0; + struct subsys_device *sns_dev = NULL; + struct platform_device *pdev = slpi_private; + struct slpi_loader_private *priv = NULL; + + pr_debug("%s: going to call slpi_ssr\n", __func__); + + if (kstrtoint(buf, 10, &ssr_cmd) < 0) + return -EINVAL; + + if (ssr_cmd != SSR_RESET_CMD) + return -EINVAL; + + priv = platform_get_drvdata(pdev); + if (!priv) + return -EINVAL; + + sns_dev = (struct subsys_device *)priv->pil_h; + if (!sns_dev) + return -EINVAL; + + dev_err(&pdev->dev, "Something went wrong with SLPI, restarting\n"); + + /* subsystem_restart_dev has worker queue to handle */ + if (subsystem_restart_dev(sns_dev) != 0) { + dev_err(&pdev->dev, "subsystem_restart_dev failed\n"); + return -EINVAL; + } + + dev_dbg(&pdev->dev, "SLPI restarted\n"); + return count; +} + static ssize_t slpi_boot_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, -- GitLab From 5de9d8e1f21d4dd7fbe8882472060fbe3acf866f Mon Sep 17 00:00:00 2001 From: Manoj Prabhu B Date: Tue, 4 Jul 2017 15:09:54 +0530 Subject: [PATCH 513/786] memshare: Perform Hypervisor mapping for boot-time allotted memory The patch performs Hypervisor mapping for guaranteed memshare client's allotted memory at boot time and prevent allocation for clients with size zero and updates client's free memory count variable after the successful allocation. The patch also adds guard bytes to avoid NOC error while access to memory allotted to diag because of resource group merging by TZ. CRs-Fixed: 2026525, 2046258, 2062478 Change-Id: If9add2c593cd7bc88d68a8f2f2086d9c2aecf257 Signed-off-by: Manoj Prabhu B --- drivers/soc/qcom/memshare/msm_memshare.c | 23 ++++++++++++++++------- drivers/soc/qcom/memshare/msm_memshare.h | 1 + 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/drivers/soc/qcom/memshare/msm_memshare.c b/drivers/soc/qcom/memshare/msm_memshare.c index 7298f30be0ce..e58fa2e466c9 100644 --- a/drivers/soc/qcom/memshare/msm_memshare.c +++ b/drivers/soc/qcom/memshare/msm_memshare.c @@ -495,6 +495,7 @@ static int handle_alloc_generic_req(void *req_h, void *req, void *conn_h) struct mem_alloc_generic_resp_msg_v01 *alloc_resp; int rc, resp = 0; int client_id; + uint32_t size = 0; mutex_lock(&memsh_drv->mem_share); alloc_req = (struct mem_alloc_generic_req_msg_v01 *)req; @@ -521,12 +522,12 @@ static int handle_alloc_generic_req(void *req_h, void *req, void *conn_h) return -EINVAL; } - memblock[client_id].free_memory += 1; - pr_debug("memshare: %s, free memory count for client id: %d = %d", - __func__, memblock[client_id].client_id, - memblock[client_id].free_memory); if (!memblock[client_id].allotted) { - rc = memshare_alloc(memsh_drv->dev, alloc_req->num_bytes, + if (alloc_req->client_id == 1 && alloc_req->num_bytes > 0) + size = alloc_req->num_bytes + MEMSHARE_GUARD_BYTES; + else + size = alloc_req->num_bytes; + rc = memshare_alloc(memsh_drv->dev, size, &memblock[client_id]); if (rc) { pr_err("memshare: %s,Unable to allocate memory for requested client\n", @@ -534,11 +535,16 @@ static int handle_alloc_generic_req(void *req_h, void *req, void *conn_h) resp = 1; } if (!resp) { + memblock[client_id].free_memory += 1; memblock[client_id].allotted = 1; memblock[client_id].size = alloc_req->num_bytes; memblock[client_id].peripheral = alloc_req->proc_id; } } + pr_debug("memshare: In %s, free memory count for client id: %d = %d", + __func__, memblock[client_id].client_id, + memblock[client_id].free_memory); + memblock[client_id].sequence_id = alloc_req->sequence_id; fill_alloc_response(alloc_resp, client_id, &resp); @@ -941,9 +947,11 @@ static int memshare_child_probe(struct platform_device *pdev) /* * Memshare allocation for guaranteed clients */ - if (memblock[num_clients].guarantee) { + if (memblock[num_clients].guarantee && size > 0) { + if (client_id == 1) + size += MEMSHARE_GUARD_BYTES; rc = memshare_alloc(memsh_child->dev, - memblock[num_clients].size, + size, &memblock[num_clients]); if (rc) { pr_err("memshare: %s, Unable to allocate memory for guaranteed clients, rc: %d\n", @@ -951,6 +959,7 @@ static int memshare_child_probe(struct platform_device *pdev) return rc; } memblock[num_clients].allotted = 1; + shared_hyp_mapping(num_clients); } /* diff --git a/drivers/soc/qcom/memshare/msm_memshare.h b/drivers/soc/qcom/memshare/msm_memshare.h index f3b594a5c3c3..ca11137aeff1 100644 --- a/drivers/soc/qcom/memshare/msm_memshare.h +++ b/drivers/soc/qcom/memshare/msm_memshare.h @@ -24,6 +24,7 @@ #define GPS 0 #define CHECK 0 #define FREE 1 +#define MEMSHARE_GUARD_BYTES (4*1024) struct mem_blocks { /* Client Id information */ -- GitLab From 4126af6cf8e77906b53101f792ce9bb71626d60a Mon Sep 17 00:00:00 2001 From: Prakash Gupta Date: Wed, 22 Mar 2017 18:52:45 +0530 Subject: [PATCH 514/786] ion: ion_system_heap: Fix null pointer dereference The pointer returned from alloc_pages passed unchecked to ion_pages_sync_for_device. This patch checks the return for NULL. Change-Id: I407efc73a1e1297f382a7926cc79542bde88d06d Fixes: 2d4fdc7bde46 ("ion: Fix DMA operations for ARM64") Signed-off-by: Prakash Gupta --- drivers/staging/android/ion/ion_system_heap.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index ad6028f0fe82..c9028bbdee4d 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -2,7 +2,7 @@ * drivers/staging/android/ion/ion_system_heap.c * * Copyright (C) 2011 Google, Inc. - * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -123,9 +123,11 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap, if (order) gfp_mask = high_order_gfp_flags; + page = alloc_pages(gfp_mask, order); - ion_pages_sync_for_device(dev, page, PAGE_SIZE << order, - DMA_BIDIRECTIONAL); + if (page) + ion_pages_sync_for_device(dev, page, PAGE_SIZE << order, + DMA_BIDIRECTIONAL); } if (!page) return 0; -- GitLab From b28d556a0c028310f08055b758eba99e3c2c410d Mon Sep 17 00:00:00 2001 From: Gaurav Kohli Date: Mon, 12 Jun 2017 11:26:27 +0530 Subject: [PATCH 515/786] ARM: dts: msm: Add support for Lpass PIL on sdm670 Add support for Lpass PIL which facilitates the loading of Lpass firmware, authentication and bringing it out of reset. Change-Id: Iafaec87cd5a727762652b0d09b5a36a27a190074 Signed-off-by: Gaurav Kohli --- arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi | 23 ++++++++++++++++ arch/arm64/boot/dts/qcom/sdm670.dtsi | 32 ++++++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi b/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi index 4ab0839c5db8..022f705533f1 100644 --- a/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi @@ -176,4 +176,27 @@ compatible = "qcom,smp2pgpio_test_smp2p_5_out"; gpios = <&smp2pgpio_smp2p_5_out 0 0>; }; + + /* ssr - inbound entry from lpass */ + smp2pgpio_ssr_smp2p_2_in: qcom,smp2pgpio-ssr-smp2p-2-in { + compatible = "qcom,smp2pgpio"; + qcom,entry-name = "slave-kernel"; + qcom,remote-pid = <2>; + qcom,is-inbound; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + /* ssr - outbound entry to lpass */ + smp2pgpio_ssr_smp2p_2_out: qcom,smp2pgpio-ssr-smp2p-2-out { + compatible = "qcom,smp2pgpio"; + qcom,entry-name = "master-kernel"; + qcom,remote-pid = <2>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; }; diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi index 5392df0ded81..dd6199ff69f0 100644 --- a/arch/arm64/boot/dts/qcom/sdm670.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi @@ -1052,6 +1052,38 @@ status = "disabled"; }; + + qcom,lpass@62400000 { + compatible = "qcom,pil-tz-generic"; + reg = <0x62400000 0x00100>; + interrupts = <0 162 1>; + + vdd_cx-supply = <&pm660l_l9_level>; + qcom,proxy-reg-names = "vdd_cx"; + qcom,vdd_cx-uV-uA = ; + + clocks = <&clock_rpmh RPMH_CXO_CLK>; + clock-names = "xo"; + qcom,proxy-clock-names = "xo"; + + qcom,pas-id = <1>; + qcom,proxy-timeout-ms = <10000>; + qcom,smem-id = <423>; + qcom,sysmon-id = <1>; + qcom,ssctl-instance-id = <0x14>; + qcom,firmware-name = "adsp"; + memory-region = <&pil_adsp_mem>; + + /* GPIO inputs from lpass */ + qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>; + qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>; + qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_2_in 1 0>; + qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_2_in 3 0>; + + /* GPIO output to lpass */ + qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>; + status = "ok"; + }; }; #include "sdm670-pinctrl.dtsi" -- GitLab From b5b7adfedfc0679be06b65be200c5073f1157bb1 Mon Sep 17 00:00:00 2001 From: Laxminath Kasam Date: Fri, 26 May 2017 20:09:52 +0530 Subject: [PATCH 516/786] ASoC: msm_sdw: Move the delay logic inside bulk write loop On consecutive writes in bulk write API, ensure delay is provided for atleast 100us between each soundwire master write for WR_DONE status update and reflect current register value. Also ensure delay in soundwire master read is present after register address update and before register value read. CRs-Fixed: 2035787 Change-Id: I8399c5ca32328abdd4e90b46d6f8d6a6c0225905 Signed-off-by: Laxminath Kasam --- sound/soc/codecs/msm_sdw/msm_sdw_cdc.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c index 62fdb9485ade..15d62c38aa56 100644 --- a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c +++ b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c @@ -1039,7 +1039,6 @@ static int msm_sdw_swrm_read(void *handle, int reg) __func__, reg); sdw_rd_addr_base = MSM_SDW_AHB_BRIDGE_RD_ADDR_0; sdw_rd_data_base = MSM_SDW_AHB_BRIDGE_RD_DATA_0; - /* * Add sleep as SWR slave access read takes time. * Allow for RD_DONE to complete for previous register if any. @@ -1054,6 +1053,8 @@ static int msm_sdw_swrm_read(void *handle, int reg) dev_err(msm_sdw->dev, "%s: RD Addr Failure\n", __func__); goto err; } + /* Add sleep for SWR register read value to get updated. */ + usleep_range(100, 105); /* Check for RD value */ ret = regmap_bulk_read(msm_sdw->regmap, sdw_rd_data_base, (u8 *)&val, 4); @@ -1079,12 +1080,12 @@ static int msm_sdw_bulk_write(struct msm_sdw_priv *msm_sdw, sdw_wr_addr_base = MSM_SDW_AHB_BRIDGE_WR_ADDR_0; sdw_wr_data_base = MSM_SDW_AHB_BRIDGE_WR_DATA_0; - /* - * Add sleep as SWR slave write takes time. - * Allow for any previous pending write to complete. - */ - usleep_range(50, 55); for (i = 0; i < len; i += 2) { + /* + * Add sleep as SWR slave write takes time. + * Allow for any previous pending write to complete. + */ + usleep_range(100, 105); /* First Write the Data to register */ ret = regmap_bulk_write(msm_sdw->regmap, sdw_wr_data_base, bulk_reg[i].buf, 4); -- GitLab From 1f233cd141d259cf1b51a15e6da4b8142ac96217 Mon Sep 17 00:00:00 2001 From: Laxminath Kasam Date: Mon, 29 May 2017 12:21:11 +0530 Subject: [PATCH 517/786] ASoC: audio-ext-clk: enable pinctrl after clock enable Change audio ext clock driver to access LPI GPIO pinctrl after clock enable. CRs-Fixed: 2035491 Change-Id: Ife49e63ebd2a96a35ef78db2dca4298c52c29063 Signed-off-by: Laxminath Kasam --- sound/soc/codecs/audio-ext-clk-up.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/sound/soc/codecs/audio-ext-clk-up.c b/sound/soc/codecs/audio-ext-clk-up.c index 3b54096b8347..31c063d4b93e 100644 --- a/sound/soc/codecs/audio-ext-clk-up.c +++ b/sound/soc/codecs/audio-ext-clk-up.c @@ -177,6 +177,15 @@ static int audio_ext_lpass_mclk_prepare(struct clk_hw *hw) struct pinctrl_info *pnctrl_info = &audio_lpass_mclk->pnctrl_info; int ret; + lpass_mclk.enable = 1; + ret = afe_set_lpass_clock_v2(AFE_PORT_ID_PRIMARY_MI2S_RX, + &lpass_mclk); + if (ret < 0) { + pr_err("%s afe_set_digital_codec_core_clock failed\n", + __func__); + return ret; + } + if (pnctrl_info->pinctrl) { ret = pinctrl_select_state(pnctrl_info->pinctrl, pnctrl_info->active); @@ -187,15 +196,6 @@ static int audio_ext_lpass_mclk_prepare(struct clk_hw *hw) } } - lpass_mclk.enable = 1; - ret = afe_set_lpass_clock_v2(AFE_PORT_ID_PRIMARY_MI2S_RX, - &lpass_mclk); - if (ret < 0) { - pr_err("%s afe_set_digital_codec_core_clock failed\n", - __func__); - return ret; - } - if (pnctrl_info->base) iowrite32(1, pnctrl_info->base); return 0; -- GitLab From fcf9a16e46e83e8df9abe6c5c86f1366f8b0fc68 Mon Sep 17 00:00:00 2001 From: Clarence Ip Date: Tue, 4 Jul 2017 13:54:18 -0400 Subject: [PATCH 518/786] drm/msm/sde: select correct multirect op mode This patch corrects a bug in the multirect op mode programming to allow the correct parallel/time mx mode to be selected. Change-Id: I1cc98a92ad9ffc7613b4cf7414c10c1251a9c7ce Signed-off-by: Clarence Ip --- drivers/gpu/drm/msm/sde/sde_hw_sspp.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c index 9fd59926b146..85af820ae3ff 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c @@ -257,7 +257,10 @@ static void sde_hw_sspp_setup_multirect(struct sde_hw_pipe *ctx, } else { mode_mask = SDE_REG_READ(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx); mode_mask |= index; - mode_mask |= (mode == SDE_SSPP_MULTIRECT_TIME_MX) ? 0x4 : 0x0; + if (mode == SDE_SSPP_MULTIRECT_TIME_MX) + mode_mask |= BIT(2); + else + mode_mask &= ~BIT(2); } SDE_REG_WRITE(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx, mode_mask); -- GitLab From 605e32e52e00395a1a00e17943ec7617b16299aa Mon Sep 17 00:00:00 2001 From: Zhen Kong Date: Mon, 26 Jun 2017 11:40:58 -0700 Subject: [PATCH 519/786] firmware: qcom: tz_log: update interrupt info offset As interrupt info table is changed after TZ 4.0, update its offset according to TZ version when tz_log driver reads it. Change-Id: I28750a12b79a3920904c00fd52a2f4c7ffef7000 Signed-off-by: Zhen Kong --- drivers/firmware/qcom/tz_log.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/firmware/qcom/tz_log.c b/drivers/firmware/qcom/tz_log.c index 1b51d088ebb8..471476c1bdee 100644 --- a/drivers/firmware/qcom/tz_log.c +++ b/drivers/firmware/qcom/tz_log.c @@ -477,10 +477,10 @@ static int _disp_tz_reset_stats(void) static int _disp_tz_interrupt_stats(void) { - int i, j, int_info_size; + int i, j; int len = 0; int *num_int; - unsigned char *ptr; + void *ptr; struct tzdbg_int_t *tzdbg_ptr; struct tzdbg_int_t_tz40 *tzdbg_ptr_tz40; @@ -488,14 +488,12 @@ static int _disp_tz_interrupt_stats(void) (tzdbg.diag_buf->int_info_off - sizeof(uint32_t))); ptr = ((unsigned char *)tzdbg.diag_buf + tzdbg.diag_buf->int_info_off); - int_info_size = ((tzdbg.diag_buf->ring_off - - tzdbg.diag_buf->int_info_off)/(*num_int)); pr_info("qsee_version = 0x%x\n", tzdbg.tz_version); if (tzdbg.tz_version < QSEE_VERSION_TZ_4_X) { + tzdbg_ptr = ptr; for (i = 0; i < (*num_int); i++) { - tzdbg_ptr = (struct tzdbg_int_t *)ptr; len += snprintf(tzdbg.disp_buf + len, (debug_rw_buf_size - 1) - len, " Interrupt Number : 0x%x\n" @@ -519,11 +517,11 @@ static int _disp_tz_interrupt_stats(void) __func__); break; } - ptr += int_info_size; + tzdbg_ptr++; } } else { + tzdbg_ptr_tz40 = ptr; for (i = 0; i < (*num_int); i++) { - tzdbg_ptr_tz40 = (struct tzdbg_int_t_tz40 *)ptr; len += snprintf(tzdbg.disp_buf + len, (debug_rw_buf_size - 1) - len, " Interrupt Number : 0x%x\n" @@ -547,7 +545,7 @@ static int _disp_tz_interrupt_stats(void) __func__); break; } - ptr += int_info_size; + tzdbg_ptr_tz40++; } } -- GitLab From c5a1d2a4b014e71f391c381c5f5865a50c129495 Mon Sep 17 00:00:00 2001 From: Lloyd Atkinson Date: Fri, 30 Jun 2017 13:55:55 -0400 Subject: [PATCH 520/786] drm/msm/sde: take spin lock around encoder vblank registration Take sde phyiscal encoder spin lock around the vblank irq control logic to protect against race conditions between idle power collapse and the crtc both trying to control vblank irq registrations. Change-Id: Ie67e0ae921ad1123585ef9b5e31079a320648273 Signed-off-by: Lloyd Atkinson --- drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c | 5 +++++ drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c index 9880ab1ab74b..5400fa71b380 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c @@ -526,6 +526,7 @@ static int sde_encoder_phys_cmd_control_vblank_irq( { struct sde_encoder_phys_cmd *cmd_enc = to_sde_encoder_phys_cmd(phys_enc); + unsigned long lock_flags; int ret = 0; if (!phys_enc) { @@ -541,6 +542,8 @@ static int sde_encoder_phys_cmd_control_vblank_irq( __builtin_return_address(0), enable, atomic_read(&phys_enc->vblank_refcount)); + spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); + SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0, enable, atomic_read(&phys_enc->vblank_refcount)); @@ -550,6 +553,8 @@ static int sde_encoder_phys_cmd_control_vblank_irq( ret = sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR); + spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); + end: if (ret) SDE_ERROR_CMDENC(cmd_enc, diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c index 007738a6541d..5b3ac1fa74e6 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c @@ -476,6 +476,7 @@ static int sde_encoder_phys_vid_control_vblank_irq( { int ret = 0; struct sde_encoder_phys_vid *vid_enc; + unsigned long lock_flags; if (!phys_enc) { SDE_ERROR("invalid encoder\n"); @@ -492,6 +493,8 @@ static int sde_encoder_phys_vid_control_vblank_irq( __builtin_return_address(0), enable, atomic_read(&phys_enc->vblank_refcount)); + spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags); + SDE_EVT32(DRMID(phys_enc->parent), enable, atomic_read(&phys_enc->vblank_refcount)); @@ -501,6 +504,8 @@ static int sde_encoder_phys_vid_control_vblank_irq( ret = sde_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC); + spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags); + if (ret) SDE_ERROR_VIDENC(vid_enc, "control vblank irq error %d, enable %d\n", -- GitLab From b3ce449ff77450997984e02a039aad8a6adb2ca6 Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Mon, 26 Jun 2017 18:42:11 -0600 Subject: [PATCH 521/786] skb: Adding trace event for gso. This patch adds trace events to help with debug for gso feature by identifying the packets(and their lenghts) that are using the segmentation offload feature. CRs-Fixed: 2062245 Change-Id: Ibfe1194cc63e74c75047040b0c540713d539992e Signed-off-by: Subash Abhinov Kasiviswanathan --- include/trace/events/skb.h | 22 ++++++++++++++++++++++ net/core/dev.c | 1 + 2 files changed, 23 insertions(+) diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index 0c68ae22da22..ab82d8cebe0a 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -50,6 +50,28 @@ TRACE_EVENT(consume_skb, TP_printk("skbaddr=%p", __entry->skbaddr) ); +TRACE_EVENT(print_skb_gso, + + TP_PROTO(struct sk_buff *skb), + + TP_ARGS(skb), + + TP_STRUCT__entry( + __field(void *, skbaddr) + __field(int, len) + __field(int, data_len) + ), + + TP_fast_assign( + __entry->skbaddr = skb; + __entry->len = skb->len; + __entry->data_len = skb->data_len; + ), + + TP_printk("GSO: skbaddr=%p, len=%d, data_len=%d", + __entry->skbaddr, __entry->len, __entry->data_len) +); + TRACE_EVENT(skb_copy_datagram_iovec, TP_PROTO(const struct sk_buff *skb, int len), diff --git a/net/core/dev.c b/net/core/dev.c index c0d0b492c55d..06386aa78908 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2988,6 +2988,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device if (netif_needs_gso(skb, features)) { struct sk_buff *segs; + trace_print_skb_gso(skb); segs = skb_gso_segment(skb, features); if (IS_ERR(segs)) { goto out_kfree_skb; -- GitLab From 717d3398537152392762b223d978ccd265656f77 Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Mon, 26 Jun 2017 18:46:50 -0600 Subject: [PATCH 522/786] skb: printing port numbers with gso trace events Adding source and destination port number info in the gso trace events to differentiate between the flows. CRs-Fixed: 2062245 Change-Id: Idbae7f95dfd56293805b58e3c6626f5f6e07d08a Signed-off-by: Subash Abhinov Kasiviswanathan --- include/trace/events/skb.h | 13 +++++++++---- net/core/dev.c | 7 ++++++- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index ab82d8cebe0a..33dfa76da59e 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -52,24 +52,29 @@ TRACE_EVENT(consume_skb, TRACE_EVENT(print_skb_gso, - TP_PROTO(struct sk_buff *skb), + TP_PROTO(struct sk_buff *skb, __be16 src, __be16 dest), - TP_ARGS(skb), + TP_ARGS(skb, src, dest), TP_STRUCT__entry( __field(void *, skbaddr) __field(int, len) __field(int, data_len) + __field(__be16, src) + __field(__be16, dest) ), TP_fast_assign( __entry->skbaddr = skb; __entry->len = skb->len; __entry->data_len = skb->data_len; + __entry->src = src; + __entry->dest = dest; ), - TP_printk("GSO: skbaddr=%p, len=%d, data_len=%d", - __entry->skbaddr, __entry->len, __entry->data_len) + TP_printk("GSO: skbaddr=%pK, len=%d, data_len=%d, src=%u, dest=%u", + __entry->skbaddr, __entry->len, __entry->data_len, + be16_to_cpu(__entry->src), be16_to_cpu(__entry->dest)) ); TRACE_EVENT(skb_copy_datagram_iovec, diff --git a/net/core/dev.c b/net/core/dev.c index 06386aa78908..a143dbd1a294 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -141,6 +141,8 @@ #include #include #include +#include +#include #include "net-sysfs.h" @@ -2988,7 +2990,10 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device if (netif_needs_gso(skb, features)) { struct sk_buff *segs; - trace_print_skb_gso(skb); + __be16 src_port = tcp_hdr(skb)->source; + __be16 dest_port = tcp_hdr(skb)->dest; + + trace_print_skb_gso(skb, src_port, dest_port); segs = skb_gso_segment(skb, features); if (IS_ERR(segs)) { goto out_kfree_skb; -- GitLab From ee8d341be1afb320f02656885b2545283fb33cee Mon Sep 17 00:00:00 2001 From: Rohit Kumar Date: Tue, 27 Jun 2017 12:15:39 +0530 Subject: [PATCH 523/786] ASoC: msm: update DAI link's be_id to id Update sdm660 machine drivers with Backend Dai-link's "be_id" field with "id". These changes are in accordance with the upstream changes in ASoC between Linux-4.4 and Linux-4.9 versions. Change-Id: I1644264dbd714144696a0b2b3c0a828371915072 Signed-off-by: Rohit Kumar --- sound/soc/msm/sdm660-common.c | 32 ++--- sound/soc/msm/sdm660-ext-dai-links.c | 176 +++++++++++++-------------- sound/soc/msm/sdm660-external.c | 30 ++--- sound/soc/msm/sdm660-internal.c | 158 ++++++++++++------------ 4 files changed, 198 insertions(+), 198 deletions(-) diff --git a/sound/soc/msm/sdm660-common.c b/sound/soc/msm/sdm660-common.c index eddcb45e9150..c93d29edbae8 100644 --- a/sound/soc/msm/sdm660-common.c +++ b/sound/soc/msm/sdm660-common.c @@ -2038,16 +2038,16 @@ static void param_set_mask(struct snd_pcm_hw_params *p, int n, unsigned int bit) } } -static int msm_ext_disp_get_idx_from_beid(int32_t be_id) +static int msm_ext_disp_get_idx_from_beid(int32_t id) { int idx; - switch (be_id) { + switch (id) { case MSM_BACKEND_DAI_DISPLAY_PORT_RX: idx = DP_RX_IDX; break; default: - pr_err("%s: Incorrect ext_disp be_id %d\n", __func__, be_id); + pr_err("%s: Incorrect ext_disp id %d\n", __func__, id); idx = -EINVAL; break; } @@ -2077,7 +2077,7 @@ int msm_common_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, pr_debug("%s: format = %d, rate = %d\n", __func__, params_format(params), params_rate(params)); - switch (dai_link->be_id) { + switch (dai_link->id) { case MSM_BACKEND_DAI_USB_RX: param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, usb_rx_cfg.bit_format); @@ -2093,8 +2093,8 @@ int msm_common_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, break; case MSM_BACKEND_DAI_DISPLAY_PORT_RX: - idx = msm_ext_disp_get_idx_from_beid(dai_link->be_id); - if (IS_ERR_VALUE(idx)) { + idx = msm_ext_disp_get_idx_from_beid(dai_link->id); + if (idx < 0) { pr_err("%s: Incorrect ext disp idx %d\n", __func__, idx); rc = idx; @@ -2341,11 +2341,11 @@ void msm_aux_pcm_snd_shutdown(struct snd_pcm_substream *substream) } EXPORT_SYMBOL(msm_aux_pcm_snd_shutdown); -static int msm_get_port_id(int be_id) +static int msm_get_port_id(int id) { int afe_port_id; - switch (be_id) { + switch (id) { case MSM_BACKEND_DAI_PRI_MI2S_RX: afe_port_id = AFE_PORT_ID_PRIMARY_MI2S_RX; break; @@ -2371,7 +2371,7 @@ static int msm_get_port_id(int be_id) afe_port_id = AFE_PORT_ID_QUATERNARY_MI2S_TX; break; default: - pr_err("%s: Invalid be_id: %d\n", __func__, be_id); + pr_err("%s: Invalid id: %d\n", __func__, id); afe_port_id = -EINVAL; } @@ -2422,7 +2422,7 @@ static int msm_mi2s_set_sclk(struct snd_pcm_substream *substream, bool enable) int port_id = 0; int index = cpu_dai->id; - port_id = msm_get_port_id(rtd->dai_link->be_id); + port_id = msm_get_port_id(rtd->dai_link->id); if (port_id < 0) { dev_err(rtd->card->dev, "%s: Invalid port_id\n", __func__); ret = port_id; @@ -2461,7 +2461,7 @@ int msm_mi2s_snd_startup(struct snd_pcm_substream *substream) int ret = 0; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; - int port_id = msm_get_port_id(rtd->dai_link->be_id); + int port_id = msm_get_port_id(rtd->dai_link->id); int index = cpu_dai->id; unsigned int fmt = SND_SOC_DAIFMT_CBS_CFS; @@ -2539,7 +2539,7 @@ void msm_mi2s_snd_shutdown(struct snd_pcm_substream *substream) { int ret; struct snd_soc_pcm_runtime *rtd = substream->private_data; - int port_id = msm_get_port_id(rtd->dai_link->be_id); + int port_id = msm_get_port_id(rtd->dai_link->id); int index = rtd->cpu_dai->id; pr_debug("%s(): substream = %s stream = %d\n", __func__, @@ -2699,13 +2699,13 @@ static int msm_populate_dai_link_component_of_node( dai_link[i].codec_name = NULL; } if (pdata->snd_card_val == INT_SND_CARD) { - if ((dai_link[i].be_id == + if ((dai_link[i].id == MSM_BACKEND_DAI_INT0_MI2S_RX) || - (dai_link[i].be_id == + (dai_link[i].id == MSM_BACKEND_DAI_INT1_MI2S_RX) || - (dai_link[i].be_id == + (dai_link[i].id == MSM_BACKEND_DAI_INT2_MI2S_TX) || - (dai_link[i].be_id == + (dai_link[i].id == MSM_BACKEND_DAI_INT3_MI2S_TX)) { index = of_property_match_string(cdev->of_node, "asoc-codec-names", diff --git a/sound/soc/msm/sdm660-ext-dai-links.c b/sound/soc/msm/sdm660-ext-dai-links.c index 1c03d8c9e797..77d3875d0a06 100644 --- a/sound/soc/msm/sdm660-ext-dai-links.c +++ b/sound/soc/msm/sdm660-ext-dai-links.c @@ -86,8 +86,8 @@ static int msm_wcn_hw_params(struct snd_pcm_substream *substream, goto exit; } - dev_dbg(rtd->dev, "%s: tx_ch_cnt(%d) be_id %d\n", - __func__, tx_ch_cnt, dai_link->be_id); + dev_dbg(rtd->dev, "%s: tx_ch_cnt(%d) id %d\n", + __func__, tx_ch_cnt, dai_link->id); ret = snd_soc_dai_set_channel_map(cpu_dai, tx_ch_cnt, tx_ch, rx_ch_cnt, rx_ch); @@ -279,7 +279,7 @@ static struct snd_soc_dai_link msm_ext_tasha_fe_dai[] = { .platform_name = "msm-pcm-hostless", .codec_name = "tasha_codec", .codec_dai_name = "tasha_vifeedback", - .be_id = MSM_BACKEND_DAI_SLIMBUS_4_TX, + .id = MSM_BACKEND_DAI_SLIMBUS_4_TX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ops = &msm_ext_slimbus_be_ops, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, @@ -368,7 +368,7 @@ static struct snd_soc_dai_link msm_ext_tavil_fe_dai[] = { .platform_name = "msm-pcm-hostless", .codec_name = "tavil_codec", .codec_dai_name = "tavil_vifeedback", - .be_id = MSM_BACKEND_DAI_SLIMBUS_4_TX, + .id = MSM_BACKEND_DAI_SLIMBUS_4_TX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ops = &msm_ext_slimbus_be_ops, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, @@ -411,7 +411,7 @@ static struct snd_soc_dai_link msm_ext_tasha_be_dai[] = { .codec_dai_name = "tasha_mix_rx1", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_0_RX, + .id = MSM_BACKEND_DAI_SLIMBUS_0_RX, .init = &msm_audrx_init, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, /* this dainlink has playback support */ @@ -428,7 +428,7 @@ static struct snd_soc_dai_link msm_ext_tasha_be_dai[] = { .codec_dai_name = "tasha_tx1", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_0_TX, + .id = MSM_BACKEND_DAI_SLIMBUS_0_TX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ignore_suspend = 1, .ops = &msm_ext_slimbus_be_ops, @@ -442,7 +442,7 @@ static struct snd_soc_dai_link msm_ext_tasha_be_dai[] = { .codec_dai_name = "tasha_mix_rx1", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_1_RX, + .id = MSM_BACKEND_DAI_SLIMBUS_1_RX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ops = &msm_ext_slimbus_be_ops, /* dai link has playback support */ @@ -458,7 +458,7 @@ static struct snd_soc_dai_link msm_ext_tasha_be_dai[] = { .codec_dai_name = "tasha_tx3", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_1_TX, + .id = MSM_BACKEND_DAI_SLIMBUS_1_TX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ops = &msm_ext_slimbus_be_ops, .ignore_suspend = 1, @@ -472,7 +472,7 @@ static struct snd_soc_dai_link msm_ext_tasha_be_dai[] = { .codec_dai_name = "tasha_mix_rx1", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_3_RX, + .id = MSM_BACKEND_DAI_SLIMBUS_3_RX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ops = &msm_ext_slimbus_be_ops, /* dai link has playback support */ @@ -489,7 +489,7 @@ static struct snd_soc_dai_link msm_ext_tasha_be_dai[] = { .no_pcm = 1, .dpcm_capture = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_3_TX, + .id = MSM_BACKEND_DAI_SLIMBUS_3_TX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ops = &msm_ext_slimbus_be_ops, .ignore_suspend = 1, @@ -503,7 +503,7 @@ static struct snd_soc_dai_link msm_ext_tasha_be_dai[] = { .codec_dai_name = "tasha_mix_rx1", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_4_RX, + .id = MSM_BACKEND_DAI_SLIMBUS_4_RX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ops = &msm_ext_slimbus_be_ops, /* dai link has playback support */ @@ -519,7 +519,7 @@ static struct snd_soc_dai_link msm_ext_tasha_be_dai[] = { .codec_dai_name = "tasha_rx3", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_5_RX, + .id = MSM_BACKEND_DAI_SLIMBUS_5_RX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ops = &msm_ext_slimbus_be_ops, /* dai link has playback support */ @@ -536,7 +536,7 @@ static struct snd_soc_dai_link msm_ext_tasha_be_dai[] = { .codec_dai_name = "tasha_mad1", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_5_TX, + .id = MSM_BACKEND_DAI_SLIMBUS_5_TX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ops = &msm_ext_slimbus_be_ops, .ignore_suspend = 1, @@ -550,7 +550,7 @@ static struct snd_soc_dai_link msm_ext_tasha_be_dai[] = { .codec_dai_name = "tasha_rx4", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_6_RX, + .id = MSM_BACKEND_DAI_SLIMBUS_6_RX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ops = &msm_ext_slimbus_be_ops, /* dai link has playback support */ @@ -569,7 +569,7 @@ static struct snd_soc_dai_link msm_ext_tavil_be_dai[] = { .codec_dai_name = "tavil_rx1", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_0_RX, + .id = MSM_BACKEND_DAI_SLIMBUS_0_RX, .init = &msm_audrx_init, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, /* this dainlink has playback support */ @@ -586,7 +586,7 @@ static struct snd_soc_dai_link msm_ext_tavil_be_dai[] = { .codec_dai_name = "tavil_tx1", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_0_TX, + .id = MSM_BACKEND_DAI_SLIMBUS_0_TX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ignore_suspend = 1, .ops = &msm_ext_slimbus_be_ops, @@ -600,7 +600,7 @@ static struct snd_soc_dai_link msm_ext_tavil_be_dai[] = { .codec_dai_name = "tavil_rx1", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_1_RX, + .id = MSM_BACKEND_DAI_SLIMBUS_1_RX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ops = &msm_ext_slimbus_be_ops, /* dai link has playback support */ @@ -616,7 +616,7 @@ static struct snd_soc_dai_link msm_ext_tavil_be_dai[] = { .codec_dai_name = "tavil_tx3", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_1_TX, + .id = MSM_BACKEND_DAI_SLIMBUS_1_TX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ops = &msm_ext_slimbus_be_ops, .ignore_suspend = 1, @@ -630,7 +630,7 @@ static struct snd_soc_dai_link msm_ext_tavil_be_dai[] = { .codec_dai_name = "tavil_rx2", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_2_RX, + .id = MSM_BACKEND_DAI_SLIMBUS_2_RX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ops = &msm_ext_slimbus_be_ops, .ignore_pmdown_time = 1, @@ -645,7 +645,7 @@ static struct snd_soc_dai_link msm_ext_tavil_be_dai[] = { .codec_dai_name = "tavil_rx1", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_3_RX, + .id = MSM_BACKEND_DAI_SLIMBUS_3_RX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ops = &msm_ext_slimbus_be_ops, /* dai link has playback support */ @@ -661,7 +661,7 @@ static struct snd_soc_dai_link msm_ext_tavil_be_dai[] = { .codec_dai_name = "tavil_tx1", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_3_TX, + .id = MSM_BACKEND_DAI_SLIMBUS_3_TX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ops = &msm_ext_slimbus_be_ops, .ignore_suspend = 1, @@ -675,7 +675,7 @@ static struct snd_soc_dai_link msm_ext_tavil_be_dai[] = { .codec_dai_name = "tavil_rx1", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_4_RX, + .id = MSM_BACKEND_DAI_SLIMBUS_4_RX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ops = &msm_ext_slimbus_be_ops, /* dai link has playback support */ @@ -691,7 +691,7 @@ static struct snd_soc_dai_link msm_ext_tavil_be_dai[] = { .codec_dai_name = "tavil_rx3", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_5_RX, + .id = MSM_BACKEND_DAI_SLIMBUS_5_RX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ops = &msm_ext_slimbus_be_ops, /* dai link has playback support */ @@ -708,7 +708,7 @@ static struct snd_soc_dai_link msm_ext_tavil_be_dai[] = { .codec_dai_name = "tavil_mad1", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_5_TX, + .id = MSM_BACKEND_DAI_SLIMBUS_5_TX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ops = &msm_ext_slimbus_be_ops, .ignore_suspend = 1, @@ -722,7 +722,7 @@ static struct snd_soc_dai_link msm_ext_tavil_be_dai[] = { .codec_dai_name = "tavil_rx4", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_6_RX, + .id = MSM_BACKEND_DAI_SLIMBUS_6_RX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ops = &msm_ext_slimbus_be_ops, /* dai link has playback support */ @@ -748,7 +748,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .dpcm_capture = 1, /* this dai link has playback support */ .ignore_pmdown_time = 1, - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA1 + .id = MSM_FRONTEND_DAI_MULTIMEDIA1 }, {/* hw:x,1 */ .name = MSM_DAILINK_NAME(Media2), @@ -765,7 +765,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_suspend = 1, /* this dai link has playback support */ .ignore_pmdown_time = 1, - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA2, + .id = MSM_FRONTEND_DAI_MULTIMEDIA2, }, {/* hw:x,2 */ .name = "VoiceMMode1", @@ -782,7 +782,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", - .be_id = MSM_FRONTEND_DAI_VOICEMMODE1, + .id = MSM_FRONTEND_DAI_VOICEMMODE1, }, {/* hw:x,3 */ .name = "MSM VoIP", @@ -799,7 +799,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_suspend = 1, /* this dai link has playback support */ .ignore_pmdown_time = 1, - .be_id = MSM_FRONTEND_DAI_VOIP, + .id = MSM_FRONTEND_DAI_VOIP, }, {/* hw:x,4 */ .name = MSM_DAILINK_NAME(ULL), @@ -815,7 +815,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_suspend = 1, /* this dai link has playback support */ .ignore_pmdown_time = 1, - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA3, + .id = MSM_FRONTEND_DAI_MULTIMEDIA3, }, /* Hostless PCM purpose */ {/* hw:x,5 */ @@ -871,7 +871,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dai link has playback support */ - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA4, + .id = MSM_FRONTEND_DAI_MULTIMEDIA4, }, {/* hw:x,9*/ .name = "AUXPCM Hostless", @@ -953,7 +953,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_suspend = 1, /* this dai link has playback support */ .ignore_pmdown_time = 1, - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA5, + .id = MSM_FRONTEND_DAI_MULTIMEDIA5, }, /* LSM FE */ {/* hw:x,14 */ @@ -970,7 +970,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", - .be_id = MSM_FRONTEND_DAI_LSM1, + .id = MSM_FRONTEND_DAI_LSM1, }, {/* hw:x,15 */ .name = MSM_DAILINK_NAME(Compress2), @@ -987,7 +987,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_suspend = 1, /* this dai link has playback support */ .ignore_pmdown_time = 1, - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA7, + .id = MSM_FRONTEND_DAI_MULTIMEDIA7, }, {/* hw:x,16 */ .name = MSM_DAILINK_NAME(Compress3), @@ -1004,7 +1004,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dai link has playback support */ - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA10, + .id = MSM_FRONTEND_DAI_MULTIMEDIA10, }, {/* hw:x,17 */ .name = MSM_DAILINK_NAME(ULL_NOIRQ), @@ -1021,7 +1021,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dai link has playback support */ - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA8, + .id = MSM_FRONTEND_DAI_MULTIMEDIA8, }, {/* hw:x,18 */ .name = "HDMI_RX_HOSTLESS", @@ -1053,7 +1053,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", - .be_id = MSM_FRONTEND_DAI_VOICEMMODE2, + .id = MSM_FRONTEND_DAI_VOICEMMODE2, }, {/* hw:x,20 */ .name = "Listen 2 Audio Service", @@ -1069,7 +1069,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", - .be_id = MSM_FRONTEND_DAI_LSM2, + .id = MSM_FRONTEND_DAI_LSM2, }, {/* hw:x,21 */ .name = "Listen 3 Audio Service", @@ -1085,7 +1085,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", - .be_id = MSM_FRONTEND_DAI_LSM3, + .id = MSM_FRONTEND_DAI_LSM3, }, {/* hw:x,22 */ .name = "Listen 4 Audio Service", @@ -1101,7 +1101,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", - .be_id = MSM_FRONTEND_DAI_LSM4, + .id = MSM_FRONTEND_DAI_LSM4, }, {/* hw:x,23 */ .name = "Listen 5 Audio Service", @@ -1117,7 +1117,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", - .be_id = MSM_FRONTEND_DAI_LSM5, + .id = MSM_FRONTEND_DAI_LSM5, }, {/* hw:x,24 */ .name = "Listen 6 Audio Service", @@ -1133,7 +1133,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", - .be_id = MSM_FRONTEND_DAI_LSM6 + .id = MSM_FRONTEND_DAI_LSM6 }, {/* hw:x,25 */ .name = "Listen 7 Audio Service", @@ -1149,7 +1149,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", - .be_id = MSM_FRONTEND_DAI_LSM7, + .id = MSM_FRONTEND_DAI_LSM7, }, {/* hw:x,26 */ .name = "Listen 8 Audio Service", @@ -1165,7 +1165,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", - .be_id = MSM_FRONTEND_DAI_LSM8, + .id = MSM_FRONTEND_DAI_LSM8, }, {/* hw:x,27 */ .name = MSM_DAILINK_NAME(Media9), @@ -1182,7 +1182,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dai link has playback support */ - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA9, + .id = MSM_FRONTEND_DAI_MULTIMEDIA9, }, {/* hw:x,28 */ .name = MSM_DAILINK_NAME(Compress4), @@ -1199,7 +1199,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dai link has playback support */ - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA11, + .id = MSM_FRONTEND_DAI_MULTIMEDIA11, }, {/* hw:x,29 */ .name = MSM_DAILINK_NAME(Compress5), @@ -1216,7 +1216,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dai link has playback support */ - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA12, + .id = MSM_FRONTEND_DAI_MULTIMEDIA12, }, {/* hw:x,30 */ .name = MSM_DAILINK_NAME(Compress6), @@ -1233,7 +1233,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dai link has playback support */ - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA13, + .id = MSM_FRONTEND_DAI_MULTIMEDIA13, }, {/* hw:x,31 */ .name = MSM_DAILINK_NAME(Compress7), @@ -1250,7 +1250,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dai link has playback support */ - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA14, + .id = MSM_FRONTEND_DAI_MULTIMEDIA14, }, {/* hw:x,32 */ .name = MSM_DAILINK_NAME(Compress8), @@ -1267,7 +1267,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dai link has playback support */ - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA15, + .id = MSM_FRONTEND_DAI_MULTIMEDIA15, }, {/* hw:x,33 */ .name = MSM_DAILINK_NAME(Compress9), @@ -1284,7 +1284,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dai link has playback support */ - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA16, + .id = MSM_FRONTEND_DAI_MULTIMEDIA16, }, {/* hw:x,34 */ .name = "SLIMBUS_8 Hostless", @@ -1332,7 +1332,7 @@ static struct snd_soc_dai_link msm_ext_common_fe_dai[] = { .ignore_suspend = 1, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, .ignore_pmdown_time = 1, - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA6, + .id = MSM_FRONTEND_DAI_MULTIMEDIA6, }, }; @@ -1346,7 +1346,7 @@ static struct snd_soc_dai_link msm_ext_common_be_dai[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_AFE_PCM_RX, + .id = MSM_BACKEND_DAI_AFE_PCM_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, /* this dai link has playback support */ .ignore_pmdown_time = 1, @@ -1361,7 +1361,7 @@ static struct snd_soc_dai_link msm_ext_common_be_dai[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_AFE_PCM_TX, + .id = MSM_BACKEND_DAI_AFE_PCM_TX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_suspend = 1, }, @@ -1375,7 +1375,7 @@ static struct snd_soc_dai_link msm_ext_common_be_dai[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_INCALL_RECORD_TX, + .id = MSM_BACKEND_DAI_INCALL_RECORD_TX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ignore_suspend = 1, }, @@ -1389,7 +1389,7 @@ static struct snd_soc_dai_link msm_ext_common_be_dai[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_INCALL_RECORD_RX, + .id = MSM_BACKEND_DAI_INCALL_RECORD_RX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ignore_suspend = 1, }, @@ -1403,7 +1403,7 @@ static struct snd_soc_dai_link msm_ext_common_be_dai[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_VOICE_PLAYBACK_TX, + .id = MSM_BACKEND_DAI_VOICE_PLAYBACK_TX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ignore_suspend = 1, }, @@ -1417,7 +1417,7 @@ static struct snd_soc_dai_link msm_ext_common_be_dai[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX, + .id = MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ignore_suspend = 1, }, @@ -1430,7 +1430,7 @@ static struct snd_soc_dai_link msm_ext_common_be_dai[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_USB_RX, + .id = MSM_BACKEND_DAI_USB_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_pmdown_time = 1, .ignore_suspend = 1, @@ -1444,7 +1444,7 @@ static struct snd_soc_dai_link msm_ext_common_be_dai[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_USB_TX, + .id = MSM_BACKEND_DAI_USB_TX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_suspend = 1, }, @@ -1457,7 +1457,7 @@ static struct snd_soc_dai_link msm_ext_common_be_dai[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_PRI_TDM_RX_0, + .id = MSM_BACKEND_DAI_PRI_TDM_RX_0, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_tdm_be_ops, .ignore_suspend = 1, @@ -1471,7 +1471,7 @@ static struct snd_soc_dai_link msm_ext_common_be_dai[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_PRI_TDM_TX_0, + .id = MSM_BACKEND_DAI_PRI_TDM_TX_0, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_tdm_be_ops, .ignore_suspend = 1, @@ -1485,7 +1485,7 @@ static struct snd_soc_dai_link msm_ext_common_be_dai[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SEC_TDM_RX_0, + .id = MSM_BACKEND_DAI_SEC_TDM_RX_0, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_tdm_be_ops, .ignore_suspend = 1, @@ -1499,7 +1499,7 @@ static struct snd_soc_dai_link msm_ext_common_be_dai[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_SEC_TDM_TX_0, + .id = MSM_BACKEND_DAI_SEC_TDM_TX_0, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_tdm_be_ops, .ignore_suspend = 1, @@ -1513,7 +1513,7 @@ static struct snd_soc_dai_link msm_ext_common_be_dai[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_TERT_TDM_RX_0, + .id = MSM_BACKEND_DAI_TERT_TDM_RX_0, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_tdm_be_ops, .ignore_suspend = 1, @@ -1527,7 +1527,7 @@ static struct snd_soc_dai_link msm_ext_common_be_dai[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_TERT_TDM_TX_0, + .id = MSM_BACKEND_DAI_TERT_TDM_TX_0, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_tdm_be_ops, .ignore_suspend = 1, @@ -1541,7 +1541,7 @@ static struct snd_soc_dai_link msm_ext_common_be_dai[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_QUAT_TDM_RX_0, + .id = MSM_BACKEND_DAI_QUAT_TDM_RX_0, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_tdm_be_ops, .ignore_suspend = 1, @@ -1555,7 +1555,7 @@ static struct snd_soc_dai_link msm_ext_common_be_dai[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_QUAT_TDM_TX_0, + .id = MSM_BACKEND_DAI_QUAT_TDM_TX_0, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_tdm_be_ops, .ignore_suspend = 1, @@ -1572,7 +1572,7 @@ static struct snd_soc_dai_link msm_mi2s_be_dai_links[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_PRI_MI2S_RX, + .id = MSM_BACKEND_DAI_PRI_MI2S_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_mi2s_be_ops, .ignore_suspend = 1, @@ -1587,7 +1587,7 @@ static struct snd_soc_dai_link msm_mi2s_be_dai_links[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_PRI_MI2S_TX, + .id = MSM_BACKEND_DAI_PRI_MI2S_TX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_mi2s_be_ops, .ignore_suspend = 1, @@ -1601,7 +1601,7 @@ static struct snd_soc_dai_link msm_mi2s_be_dai_links[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SECONDARY_MI2S_RX, + .id = MSM_BACKEND_DAI_SECONDARY_MI2S_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_mi2s_be_ops, .ignore_suspend = 1, @@ -1616,7 +1616,7 @@ static struct snd_soc_dai_link msm_mi2s_be_dai_links[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_SECONDARY_MI2S_TX, + .id = MSM_BACKEND_DAI_SECONDARY_MI2S_TX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_mi2s_be_ops, .ignore_suspend = 1, @@ -1630,7 +1630,7 @@ static struct snd_soc_dai_link msm_mi2s_be_dai_links[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_TERTIARY_MI2S_RX, + .id = MSM_BACKEND_DAI_TERTIARY_MI2S_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_mi2s_be_ops, .ignore_suspend = 1, @@ -1645,7 +1645,7 @@ static struct snd_soc_dai_link msm_mi2s_be_dai_links[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_TERTIARY_MI2S_TX, + .id = MSM_BACKEND_DAI_TERTIARY_MI2S_TX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_mi2s_be_ops, .ignore_suspend = 1, @@ -1659,7 +1659,7 @@ static struct snd_soc_dai_link msm_mi2s_be_dai_links[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, + .id = MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_mi2s_be_ops, .ignore_suspend = 1, @@ -1674,7 +1674,7 @@ static struct snd_soc_dai_link msm_mi2s_be_dai_links[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, + .id = MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_mi2s_be_ops, .ignore_suspend = 1, @@ -1692,7 +1692,7 @@ static struct snd_soc_dai_link msm_auxpcm_be_dai_links[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_AUXPCM_RX, + .id = MSM_BACKEND_DAI_AUXPCM_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_pmdown_time = 1, .ignore_suspend = 1, @@ -1707,7 +1707,7 @@ static struct snd_soc_dai_link msm_auxpcm_be_dai_links[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_AUXPCM_TX, + .id = MSM_BACKEND_DAI_AUXPCM_TX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_pmdown_time = 1, .ignore_suspend = 1, @@ -1723,7 +1723,7 @@ static struct snd_soc_dai_link msm_auxpcm_be_dai_links[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SEC_AUXPCM_RX, + .id = MSM_BACKEND_DAI_SEC_AUXPCM_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_pmdown_time = 1, .ignore_suspend = 1, @@ -1738,7 +1738,7 @@ static struct snd_soc_dai_link msm_auxpcm_be_dai_links[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_SEC_AUXPCM_TX, + .id = MSM_BACKEND_DAI_SEC_AUXPCM_TX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_suspend = 1, .ignore_pmdown_time = 1, @@ -1754,7 +1754,7 @@ static struct snd_soc_dai_link msm_auxpcm_be_dai_links[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_TERT_AUXPCM_RX, + .id = MSM_BACKEND_DAI_TERT_AUXPCM_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_pmdown_time = 1, .ignore_suspend = 1, @@ -1769,7 +1769,7 @@ static struct snd_soc_dai_link msm_auxpcm_be_dai_links[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_TERT_AUXPCM_TX, + .id = MSM_BACKEND_DAI_TERT_AUXPCM_TX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_suspend = 1, .ignore_pmdown_time = 1, @@ -1785,7 +1785,7 @@ static struct snd_soc_dai_link msm_auxpcm_be_dai_links[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_QUAT_AUXPCM_RX, + .id = MSM_BACKEND_DAI_QUAT_AUXPCM_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_pmdown_time = 1, .ignore_suspend = 1, @@ -1800,7 +1800,7 @@ static struct snd_soc_dai_link msm_auxpcm_be_dai_links[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_QUAT_AUXPCM_TX, + .id = MSM_BACKEND_DAI_QUAT_AUXPCM_TX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_suspend = 1, .ignore_pmdown_time = 1, @@ -1822,7 +1822,7 @@ static struct snd_soc_dai_link msm_wcn_be_dai_links[] = { .codec_dai_name = "btfm_bt_sco_a2dp_slim_rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_7_RX, + .id = MSM_BACKEND_DAI_SLIMBUS_7_RX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ops = &msm_wcn_ops, /* dai link has playback support */ @@ -1838,7 +1838,7 @@ static struct snd_soc_dai_link msm_wcn_be_dai_links[] = { .codec_dai_name = "btfm_bt_sco_slim_tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_7_TX, + .id = MSM_BACKEND_DAI_SLIMBUS_7_TX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .ops = &msm_wcn_ops, .ignore_suspend = 1, @@ -1852,7 +1852,7 @@ static struct snd_soc_dai_link msm_wcn_be_dai_links[] = { .codec_dai_name = "btfm_fm_slim_tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_8_TX, + .id = MSM_BACKEND_DAI_SLIMBUS_8_TX, .be_hw_params_fixup = msm_ext_be_hw_params_fixup, .init = &msm_wcn_init, .ops = &msm_wcn_ops, @@ -1871,7 +1871,7 @@ static struct snd_soc_dai_link ext_disp_be_dai_link[] = { .codec_dai_name = "msm_dp_audio_codec_rx_dai", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_DISPLAY_PORT_RX, + .id = MSM_BACKEND_DAI_DISPLAY_PORT_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_pmdown_time = 1, .ignore_suspend = 1, diff --git a/sound/soc/msm/sdm660-external.c b/sound/soc/msm/sdm660-external.c index 2c3d7fc3e63f..8c2c0fa81044 100644 --- a/sound/soc/msm/sdm660-external.c +++ b/sound/soc/msm/sdm660-external.c @@ -747,11 +747,11 @@ static const struct snd_kcontrol_new msm_snd_controls[] = { msm_bt_sample_rate_put), }; -static int msm_slim_get_ch_from_beid(int32_t be_id) +static int msm_slim_get_ch_from_beid(int32_t id) { int ch_id = 0; - switch (be_id) { + switch (id) { case MSM_BACKEND_DAI_SLIMBUS_0_RX: ch_id = SLIM_RX_0; break; @@ -821,14 +821,14 @@ int msm_ext_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, pr_debug("%s: format = %d, rate = %d\n", __func__, params_format(params), params_rate(params)); - switch (dai_link->be_id) { + switch (dai_link->id) { case MSM_BACKEND_DAI_SLIMBUS_0_RX: case MSM_BACKEND_DAI_SLIMBUS_1_RX: case MSM_BACKEND_DAI_SLIMBUS_2_RX: case MSM_BACKEND_DAI_SLIMBUS_3_RX: case MSM_BACKEND_DAI_SLIMBUS_4_RX: case MSM_BACKEND_DAI_SLIMBUS_6_RX: - idx = msm_slim_get_ch_from_beid(dai_link->be_id); + idx = msm_slim_get_ch_from_beid(dai_link->id); param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, slim_rx_cfg[idx].bit_format); rate->min = rate->max = slim_rx_cfg[idx].sample_rate; @@ -837,7 +837,7 @@ int msm_ext_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, case MSM_BACKEND_DAI_SLIMBUS_0_TX: case MSM_BACKEND_DAI_SLIMBUS_3_TX: - idx = msm_slim_get_ch_from_beid(dai_link->be_id); + idx = msm_slim_get_ch_from_beid(dai_link->id); param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, slim_tx_cfg[idx].bit_format); rate->min = rate->max = slim_tx_cfg[idx].sample_rate; @@ -938,15 +938,15 @@ int msm_snd_hw_params(struct snd_pcm_substream *substream, __func__, ret); goto err_ch_map; } - if (dai_link->be_id == MSM_BACKEND_DAI_SLIMBUS_5_RX) { + if (dai_link->id == MSM_BACKEND_DAI_SLIMBUS_5_RX) { pr_debug("%s: rx_5_ch=%d\n", __func__, slim_rx_cfg[5].channels); rx_ch_count = slim_rx_cfg[5].channels; - } else if (dai_link->be_id == MSM_BACKEND_DAI_SLIMBUS_2_RX) { + } else if (dai_link->id == MSM_BACKEND_DAI_SLIMBUS_2_RX) { pr_debug("%s: rx_2_ch=%d\n", __func__, slim_rx_cfg[2].channels); rx_ch_count = slim_rx_cfg[2].channels; - } else if (dai_link->be_id == MSM_BACKEND_DAI_SLIMBUS_6_RX) { + } else if (dai_link->id == MSM_BACKEND_DAI_SLIMBUS_6_RX) { pr_debug("%s: rx_6_ch=%d\n", __func__, slim_rx_cfg[6].channels); rx_ch_count = slim_rx_cfg[6].channels; @@ -973,19 +973,19 @@ int msm_snd_hw_params(struct snd_pcm_substream *substream, goto err_ch_map; } /* For _tx1 case */ - if (dai_link->be_id == MSM_BACKEND_DAI_SLIMBUS_0_TX) + if (dai_link->id == MSM_BACKEND_DAI_SLIMBUS_0_TX) user_set_tx_ch = slim_tx_cfg[0].channels; /* For _tx3 case */ - else if (dai_link->be_id == MSM_BACKEND_DAI_SLIMBUS_1_TX) + else if (dai_link->id == MSM_BACKEND_DAI_SLIMBUS_1_TX) user_set_tx_ch = slim_tx_cfg[1].channels; - else if (dai_link->be_id == MSM_BACKEND_DAI_SLIMBUS_4_TX) + else if (dai_link->id == MSM_BACKEND_DAI_SLIMBUS_4_TX) user_set_tx_ch = msm_vi_feed_tx_ch; else user_set_tx_ch = tx_ch_cnt; - pr_debug("%s: msm_slim_0_tx_ch(%d) user_set_tx_ch(%d) tx_ch_cnt(%d), be_id (%d)\n", + pr_debug("%s: msm_slim_0_tx_ch(%d) user_set_tx_ch(%d) tx_ch_cnt(%d), id (%d)\n", __func__, slim_tx_cfg[0].channels, user_set_tx_ch, - tx_ch_cnt, dai_link->be_id); + tx_ch_cnt, dai_link->id); ret = snd_soc_dai_set_channel_map(cpu_dai, user_set_tx_ch, tx_ch, 0, 0); @@ -1097,8 +1097,8 @@ int msm_snd_cpe_hw_params(struct snd_pcm_substream *substream, goto end; } - pr_debug("%s: tx_ch_cnt(%d) be_id %d\n", - __func__, tx_ch_cnt, dai_link->be_id); + pr_debug("%s: tx_ch_cnt(%d) id %d\n", + __func__, tx_ch_cnt, dai_link->id); ret = snd_soc_dai_set_channel_map(cpu_dai, tx_ch_cnt, tx_ch, 0, 0); diff --git a/sound/soc/msm/sdm660-internal.c b/sound/soc/msm/sdm660-internal.c index 802137ba4b50..ffd545abf4c4 100644 --- a/sound/soc/msm/sdm660-internal.c +++ b/sound/soc/msm/sdm660-internal.c @@ -539,11 +539,11 @@ static int enable_spk_ext_pa(struct snd_soc_codec *codec, int enable) return 0; } -static int int_mi2s_get_idx_from_beid(int32_t be_id) +static int int_mi2s_get_idx_from_beid(int32_t id) { int idx = 0; - switch (be_id) { + switch (id) { case MSM_BACKEND_DAI_INT0_MI2S_RX: idx = INT0_MI2S; break; @@ -596,13 +596,13 @@ static int int_mi2s_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, pr_debug("%s: format = %d, rate = %d\n", __func__, params_format(params), params_rate(params)); - switch (dai_link->be_id) { + switch (dai_link->id) { case MSM_BACKEND_DAI_INT0_MI2S_RX: case MSM_BACKEND_DAI_INT2_MI2S_TX: case MSM_BACKEND_DAI_INT3_MI2S_TX: case MSM_BACKEND_DAI_INT4_MI2S_RX: case MSM_BACKEND_DAI_INT5_MI2S_TX: - idx = int_mi2s_get_idx_from_beid(dai_link->be_id); + idx = int_mi2s_get_idx_from_beid(dai_link->id); rate->min = rate->max = int_mi2s_cfg[idx].sample_rate; channels->min = channels->max = int_mi2s_cfg[idx].channels; @@ -625,7 +625,7 @@ static int msm_btfm_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); - switch (dai_link->be_id) { + switch (dai_link->id) { case MSM_BACKEND_DAI_SLIMBUS_7_RX: case MSM_BACKEND_DAI_SLIMBUS_7_TX: param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT, @@ -982,11 +982,11 @@ static int msm_int_mclk0_event(struct snd_soc_dapm_widget *w, return 0; } -static int int_mi2s_get_port_id(int be_id) +static int int_mi2s_get_port_id(int id) { int afe_port_id; - switch (be_id) { + switch (id) { case MSM_BACKEND_DAI_INT0_MI2S_RX: afe_port_id = AFE_PORT_ID_INT0_MI2S_RX; break; @@ -1003,7 +1003,7 @@ static int int_mi2s_get_port_id(int be_id) afe_port_id = AFE_PORT_ID_INT5_MI2S_TX; break; default: - pr_err("%s: Invalid be_id: %d\n", __func__, be_id); + pr_err("%s: Invalid id: %d\n", __func__, id); afe_port_id = -EINVAL; } @@ -1073,7 +1073,7 @@ static int int_mi2s_set_sclk(struct snd_pcm_substream *substream, bool enable) int port_id = 0; int index; - port_id = int_mi2s_get_port_id(rtd->dai_link->be_id); + port_id = int_mi2s_get_port_id(rtd->dai_link->id); if (port_id < 0) { dev_err(rtd->card->dev, "%s: Invalid port_id\n", __func__); ret = port_id; @@ -1396,8 +1396,8 @@ static int msm_wcn_hw_params(struct snd_pcm_substream *substream, goto exit; } - dev_dbg(rtd->dev, "%s: tx_ch_cnt(%d) be_id %d\n", - __func__, tx_ch_cnt, dai_link->be_id); + dev_dbg(rtd->dev, "%s: tx_ch_cnt(%d) id %d\n", + __func__, tx_ch_cnt, dai_link->id); ret = snd_soc_dai_set_channel_map(cpu_dai, tx_ch_cnt, tx_ch, rx_ch_cnt, rx_ch); @@ -1669,7 +1669,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .dpcm_capture = 1, /* this dai link has playback support */ .ignore_pmdown_time = 1, - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA1 + .id = MSM_FRONTEND_DAI_MULTIMEDIA1 }, {/* hw:x,1 */ .name = MSM_DAILINK_NAME(Media2), @@ -1686,7 +1686,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_suspend = 1, /* this dai link has playback support */ .ignore_pmdown_time = 1, - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA2, + .id = MSM_FRONTEND_DAI_MULTIMEDIA2, }, {/* hw:x,2 */ .name = "VoiceMMode1", @@ -1703,7 +1703,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", - .be_id = MSM_FRONTEND_DAI_VOICEMMODE1, + .id = MSM_FRONTEND_DAI_VOICEMMODE1, }, {/* hw:x,3 */ .name = "MSM VoIP", @@ -1720,7 +1720,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_suspend = 1, /* this dai link has playback support */ .ignore_pmdown_time = 1, - .be_id = MSM_FRONTEND_DAI_VOIP, + .id = MSM_FRONTEND_DAI_VOIP, }, {/* hw:x,4 */ .name = MSM_DAILINK_NAME(ULL), @@ -1736,7 +1736,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_suspend = 1, /* this dai link has playback support */ .ignore_pmdown_time = 1, - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA3, + .id = MSM_FRONTEND_DAI_MULTIMEDIA3, }, /* Hostless PCM purpose */ {/* hw:x,5 */ @@ -1792,7 +1792,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dai link has playback support */ - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA4, + .id = MSM_FRONTEND_DAI_MULTIMEDIA4, }, {/* hw:x,9*/ .name = "AUXPCM Hostless", @@ -1873,7 +1873,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_suspend = 1, /* this dai link has playback support */ .ignore_pmdown_time = 1, - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA5, + .id = MSM_FRONTEND_DAI_MULTIMEDIA5, }, /* LSM FE */ {/* hw:x,14 */ @@ -1890,7 +1890,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", - .be_id = MSM_FRONTEND_DAI_LSM1, + .id = MSM_FRONTEND_DAI_LSM1, }, {/* hw:x,15 */ .name = MSM_DAILINK_NAME(Compress2), @@ -1905,7 +1905,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, .ignore_suspend = 1, - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA7, + .id = MSM_FRONTEND_DAI_MULTIMEDIA7, }, {/* hw:x,16 */ .name = MSM_DAILINK_NAME(Compress3), @@ -1922,7 +1922,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dai link has playback support */ - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA10, + .id = MSM_FRONTEND_DAI_MULTIMEDIA10, }, {/* hw:x,17 */ .name = MSM_DAILINK_NAME(ULL_NOIRQ), @@ -1939,7 +1939,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dai link has playback support */ - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA8, + .id = MSM_FRONTEND_DAI_MULTIMEDIA8, }, {/* hw:x,18 */ .name = "HDMI_RX_HOSTLESS", @@ -1971,7 +1971,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", - .be_id = MSM_FRONTEND_DAI_VOICEMMODE2, + .id = MSM_FRONTEND_DAI_VOICEMMODE2, }, {/* hw:x,20 */ .name = "Listen 2 Audio Service", @@ -1987,7 +1987,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", - .be_id = MSM_FRONTEND_DAI_LSM2, + .id = MSM_FRONTEND_DAI_LSM2, }, {/* hw:x,21 */ .name = "Listen 3 Audio Service", @@ -2003,7 +2003,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", - .be_id = MSM_FRONTEND_DAI_LSM3, + .id = MSM_FRONTEND_DAI_LSM3, }, {/* hw:x,22 */ .name = "Listen 4 Audio Service", @@ -2019,7 +2019,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", - .be_id = MSM_FRONTEND_DAI_LSM4, + .id = MSM_FRONTEND_DAI_LSM4, }, {/* hw:x,23 */ .name = "Listen 5 Audio Service", @@ -2035,7 +2035,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", - .be_id = MSM_FRONTEND_DAI_LSM5, + .id = MSM_FRONTEND_DAI_LSM5, }, {/* hw:x,24 */ .name = "Listen 6 Audio Service", @@ -2051,7 +2051,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", - .be_id = MSM_FRONTEND_DAI_LSM6 + .id = MSM_FRONTEND_DAI_LSM6 }, {/* hw:x,25 */ .name = "Listen 7 Audio Service", @@ -2067,7 +2067,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", - .be_id = MSM_FRONTEND_DAI_LSM7, + .id = MSM_FRONTEND_DAI_LSM7, }, {/* hw:x,26 */ .name = "Listen 8 Audio Service", @@ -2083,7 +2083,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_pmdown_time = 1, .codec_dai_name = "snd-soc-dummy-dai", .codec_name = "snd-soc-dummy", - .be_id = MSM_FRONTEND_DAI_LSM8, + .id = MSM_FRONTEND_DAI_LSM8, }, {/* hw:x,27 */ .name = MSM_DAILINK_NAME(Media9), @@ -2100,7 +2100,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dai link has playback support */ - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA9, + .id = MSM_FRONTEND_DAI_MULTIMEDIA9, }, {/* hw:x,28 */ .name = MSM_DAILINK_NAME(Compress4), @@ -2117,7 +2117,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dai link has playback support */ - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA11, + .id = MSM_FRONTEND_DAI_MULTIMEDIA11, }, {/* hw:x,29 */ .name = MSM_DAILINK_NAME(Compress5), @@ -2134,7 +2134,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dai link has playback support */ - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA12, + .id = MSM_FRONTEND_DAI_MULTIMEDIA12, }, {/* hw:x,30 */ .name = MSM_DAILINK_NAME(Compress6), @@ -2151,7 +2151,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dai link has playback support */ - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA13, + .id = MSM_FRONTEND_DAI_MULTIMEDIA13, }, {/* hw:x,31 */ .name = MSM_DAILINK_NAME(Compress7), @@ -2168,7 +2168,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dai link has playback support */ - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA14, + .id = MSM_FRONTEND_DAI_MULTIMEDIA14, }, {/* hw:x,32 */ .name = MSM_DAILINK_NAME(Compress8), @@ -2185,7 +2185,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dai link has playback support */ - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA15, + .id = MSM_FRONTEND_DAI_MULTIMEDIA15, }, {/* hw:x,33 */ .name = MSM_DAILINK_NAME(Compress9), @@ -2202,7 +2202,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_suspend = 1, .ignore_pmdown_time = 1, /* this dai link has playback support */ - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA16, + .id = MSM_FRONTEND_DAI_MULTIMEDIA16, }, {/* hw:x,34 */ .name = "SLIMBUS_8 Hostless", @@ -2302,7 +2302,7 @@ static struct snd_soc_dai_link msm_int_dai[] = { .ignore_suspend = 1, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, .ignore_pmdown_time = 1, - .be_id = MSM_FRONTEND_DAI_MULTIMEDIA6, + .id = MSM_FRONTEND_DAI_MULTIMEDIA6, }, }; @@ -2315,7 +2315,7 @@ static struct snd_soc_dai_link msm_int_wsa_dai[] = { .platform_name = "msm-pcm-hostless", .codec_name = "msm_sdw_codec", .codec_dai_name = "msm_sdw_vifeedback", - .be_id = MSM_BACKEND_DAI_INT5_MI2S_TX, + .id = MSM_BACKEND_DAI_INT5_MI2S_TX, .be_hw_params_fixup = int_mi2s_be_hw_params_fixup, .ops = &msm_sdw_mi2s_be_ops, .no_host_mode = SND_SOC_DAI_LINK_NO_HOST, @@ -2338,7 +2338,7 @@ static struct snd_soc_dai_link msm_int_be_dai[] = { .dpcm_playback = 1, .async_ops = ASYNC_DPCM_SND_SOC_PREPARE | ASYNC_DPCM_SND_SOC_HW_PARAMS, - .be_id = MSM_BACKEND_DAI_INT0_MI2S_RX, + .id = MSM_BACKEND_DAI_INT0_MI2S_RX, .init = &msm_audrx_init, .be_hw_params_fixup = int_mi2s_be_hw_params_fixup, .ops = &msm_int_mi2s_be_ops, @@ -2355,7 +2355,7 @@ static struct snd_soc_dai_link msm_int_be_dai[] = { .dpcm_capture = 1, .async_ops = ASYNC_DPCM_SND_SOC_PREPARE | ASYNC_DPCM_SND_SOC_HW_PARAMS, - .be_id = MSM_BACKEND_DAI_INT3_MI2S_TX, + .id = MSM_BACKEND_DAI_INT3_MI2S_TX, .be_hw_params_fixup = int_mi2s_be_hw_params_fixup, .ops = &msm_int_mi2s_be_ops, .ignore_suspend = 1, @@ -2371,7 +2371,7 @@ static struct snd_soc_dai_link msm_int_be_dai[] = { .dpcm_capture = 1, .async_ops = ASYNC_DPCM_SND_SOC_PREPARE | ASYNC_DPCM_SND_SOC_HW_PARAMS, - .be_id = MSM_BACKEND_DAI_INT2_MI2S_TX, + .id = MSM_BACKEND_DAI_INT2_MI2S_TX, .be_hw_params_fixup = int_mi2s_be_hw_params_fixup, .ops = &msm_int_mi2s_be_ops, .ignore_suspend = 1, @@ -2385,7 +2385,7 @@ static struct snd_soc_dai_link msm_int_be_dai[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_AFE_PCM_RX, + .id = MSM_BACKEND_DAI_AFE_PCM_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, /* this dainlink has playback support */ .ignore_pmdown_time = 1, @@ -2400,7 +2400,7 @@ static struct snd_soc_dai_link msm_int_be_dai[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_AFE_PCM_TX, + .id = MSM_BACKEND_DAI_AFE_PCM_TX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_suspend = 1, }, @@ -2414,7 +2414,7 @@ static struct snd_soc_dai_link msm_int_be_dai[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_INCALL_RECORD_TX, + .id = MSM_BACKEND_DAI_INCALL_RECORD_TX, .be_hw_params_fixup = msm_be_hw_params_fixup, .ignore_suspend = 1, }, @@ -2428,7 +2428,7 @@ static struct snd_soc_dai_link msm_int_be_dai[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_INCALL_RECORD_RX, + .id = MSM_BACKEND_DAI_INCALL_RECORD_RX, .be_hw_params_fixup = msm_be_hw_params_fixup, .ignore_suspend = 1, }, @@ -2442,7 +2442,7 @@ static struct snd_soc_dai_link msm_int_be_dai[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_VOICE_PLAYBACK_TX, + .id = MSM_BACKEND_DAI_VOICE_PLAYBACK_TX, .be_hw_params_fixup = msm_be_hw_params_fixup, .ignore_suspend = 1, }, @@ -2456,7 +2456,7 @@ static struct snd_soc_dai_link msm_int_be_dai[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX, + .id = MSM_BACKEND_DAI_VOICE2_PLAYBACK_TX, .be_hw_params_fixup = msm_be_hw_params_fixup, .ignore_suspend = 1, }, @@ -2469,7 +2469,7 @@ static struct snd_soc_dai_link msm_int_be_dai[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_USB_RX, + .id = MSM_BACKEND_DAI_USB_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_pmdown_time = 1, .ignore_suspend = 1, @@ -2483,7 +2483,7 @@ static struct snd_soc_dai_link msm_int_be_dai[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_USB_TX, + .id = MSM_BACKEND_DAI_USB_TX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_suspend = 1, }, @@ -2496,7 +2496,7 @@ static struct snd_soc_dai_link msm_int_be_dai[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_PRI_TDM_RX_0, + .id = MSM_BACKEND_DAI_PRI_TDM_RX_0, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_tdm_be_ops, .ignore_suspend = 1, @@ -2510,7 +2510,7 @@ static struct snd_soc_dai_link msm_int_be_dai[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_PRI_TDM_TX_0, + .id = MSM_BACKEND_DAI_PRI_TDM_TX_0, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_tdm_be_ops, .ignore_suspend = 1, @@ -2524,7 +2524,7 @@ static struct snd_soc_dai_link msm_int_be_dai[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SEC_TDM_RX_0, + .id = MSM_BACKEND_DAI_SEC_TDM_RX_0, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_tdm_be_ops, .ignore_suspend = 1, @@ -2538,7 +2538,7 @@ static struct snd_soc_dai_link msm_int_be_dai[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_SEC_TDM_TX_0, + .id = MSM_BACKEND_DAI_SEC_TDM_TX_0, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_tdm_be_ops, .ignore_suspend = 1, @@ -2552,7 +2552,7 @@ static struct snd_soc_dai_link msm_int_be_dai[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_TERT_TDM_RX_0, + .id = MSM_BACKEND_DAI_TERT_TDM_RX_0, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_tdm_be_ops, .ignore_suspend = 1, @@ -2566,7 +2566,7 @@ static struct snd_soc_dai_link msm_int_be_dai[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_TERT_TDM_TX_0, + .id = MSM_BACKEND_DAI_TERT_TDM_TX_0, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_tdm_be_ops, .ignore_suspend = 1, @@ -2580,7 +2580,7 @@ static struct snd_soc_dai_link msm_int_be_dai[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_QUAT_TDM_RX_0, + .id = MSM_BACKEND_DAI_QUAT_TDM_RX_0, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_tdm_be_ops, .ignore_suspend = 1, @@ -2594,7 +2594,7 @@ static struct snd_soc_dai_link msm_int_be_dai[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_QUAT_TDM_TX_0, + .id = MSM_BACKEND_DAI_QUAT_TDM_TX_0, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_tdm_be_ops, .ignore_suspend = 1, @@ -2611,7 +2611,7 @@ static struct snd_soc_dai_link msm_mi2s_be_dai_links[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_PRI_MI2S_RX, + .id = MSM_BACKEND_DAI_PRI_MI2S_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_mi2s_be_ops, .ignore_suspend = 1, @@ -2626,7 +2626,7 @@ static struct snd_soc_dai_link msm_mi2s_be_dai_links[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_PRI_MI2S_TX, + .id = MSM_BACKEND_DAI_PRI_MI2S_TX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_mi2s_be_ops, .ignore_suspend = 1, @@ -2640,7 +2640,7 @@ static struct snd_soc_dai_link msm_mi2s_be_dai_links[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SECONDARY_MI2S_RX, + .id = MSM_BACKEND_DAI_SECONDARY_MI2S_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_mi2s_be_ops, .ignore_suspend = 1, @@ -2655,7 +2655,7 @@ static struct snd_soc_dai_link msm_mi2s_be_dai_links[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_SECONDARY_MI2S_TX, + .id = MSM_BACKEND_DAI_SECONDARY_MI2S_TX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_mi2s_be_ops, .ignore_suspend = 1, @@ -2669,7 +2669,7 @@ static struct snd_soc_dai_link msm_mi2s_be_dai_links[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_TERTIARY_MI2S_RX, + .id = MSM_BACKEND_DAI_TERTIARY_MI2S_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_mi2s_be_ops, .ignore_suspend = 1, @@ -2684,7 +2684,7 @@ static struct snd_soc_dai_link msm_mi2s_be_dai_links[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_TERTIARY_MI2S_TX, + .id = MSM_BACKEND_DAI_TERTIARY_MI2S_TX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_mi2s_be_ops, .ignore_suspend = 1, @@ -2698,7 +2698,7 @@ static struct snd_soc_dai_link msm_mi2s_be_dai_links[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, + .id = MSM_BACKEND_DAI_QUATERNARY_MI2S_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_mi2s_be_ops, .ignore_suspend = 1, @@ -2713,7 +2713,7 @@ static struct snd_soc_dai_link msm_mi2s_be_dai_links[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, + .id = MSM_BACKEND_DAI_QUATERNARY_MI2S_TX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ops = &msm_mi2s_be_ops, .ignore_suspend = 1, @@ -2731,7 +2731,7 @@ static struct snd_soc_dai_link msm_auxpcm_be_dai_links[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_AUXPCM_RX, + .id = MSM_BACKEND_DAI_AUXPCM_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_pmdown_time = 1, .ignore_suspend = 1, @@ -2746,7 +2746,7 @@ static struct snd_soc_dai_link msm_auxpcm_be_dai_links[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_AUXPCM_TX, + .id = MSM_BACKEND_DAI_AUXPCM_TX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_pmdown_time = 1, .ignore_suspend = 1, @@ -2762,7 +2762,7 @@ static struct snd_soc_dai_link msm_auxpcm_be_dai_links[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SEC_AUXPCM_RX, + .id = MSM_BACKEND_DAI_SEC_AUXPCM_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_pmdown_time = 1, .ignore_suspend = 1, @@ -2777,7 +2777,7 @@ static struct snd_soc_dai_link msm_auxpcm_be_dai_links[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_SEC_AUXPCM_TX, + .id = MSM_BACKEND_DAI_SEC_AUXPCM_TX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_suspend = 1, .ignore_pmdown_time = 1, @@ -2793,7 +2793,7 @@ static struct snd_soc_dai_link msm_auxpcm_be_dai_links[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_TERT_AUXPCM_RX, + .id = MSM_BACKEND_DAI_TERT_AUXPCM_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_pmdown_time = 1, .ignore_suspend = 1, @@ -2808,7 +2808,7 @@ static struct snd_soc_dai_link msm_auxpcm_be_dai_links[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_TERT_AUXPCM_TX, + .id = MSM_BACKEND_DAI_TERT_AUXPCM_TX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_suspend = 1, .ignore_pmdown_time = 1, @@ -2824,7 +2824,7 @@ static struct snd_soc_dai_link msm_auxpcm_be_dai_links[] = { .codec_dai_name = "msm-stub-rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_QUAT_AUXPCM_RX, + .id = MSM_BACKEND_DAI_QUAT_AUXPCM_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_pmdown_time = 1, .ignore_suspend = 1, @@ -2839,7 +2839,7 @@ static struct snd_soc_dai_link msm_auxpcm_be_dai_links[] = { .codec_dai_name = "msm-stub-tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_QUAT_AUXPCM_TX, + .id = MSM_BACKEND_DAI_QUAT_AUXPCM_TX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_suspend = 1, .ignore_pmdown_time = 1, @@ -2862,7 +2862,7 @@ static struct snd_soc_dai_link msm_wcn_be_dai_links[] = { .codec_dai_name = "btfm_bt_sco_a2dp_slim_rx", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_7_RX, + .id = MSM_BACKEND_DAI_SLIMBUS_7_RX, .be_hw_params_fixup = msm_btfm_be_hw_params_fixup, .ops = &msm_wcn_ops, /* dai link has playback support */ @@ -2878,7 +2878,7 @@ static struct snd_soc_dai_link msm_wcn_be_dai_links[] = { .codec_dai_name = "btfm_bt_sco_slim_tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_7_TX, + .id = MSM_BACKEND_DAI_SLIMBUS_7_TX, .be_hw_params_fixup = msm_btfm_be_hw_params_fixup, .ops = &msm_wcn_ops, .ignore_suspend = 1, @@ -2892,7 +2892,7 @@ static struct snd_soc_dai_link msm_wcn_be_dai_links[] = { .codec_dai_name = "btfm_fm_slim_tx", .no_pcm = 1, .dpcm_capture = 1, - .be_id = MSM_BACKEND_DAI_SLIMBUS_8_TX, + .id = MSM_BACKEND_DAI_SLIMBUS_8_TX, .be_hw_params_fixup = msm_btfm_be_hw_params_fixup, .init = &msm_wcn_init, .ops = &msm_wcn_ops, @@ -2910,7 +2910,7 @@ static struct snd_soc_dai_link msm_wsa_be_dai_links[] = { .codec_dai_name = "msm_sdw_i2s_rx1", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_INT4_MI2S_RX, + .id = MSM_BACKEND_DAI_INT4_MI2S_RX, .init = &msm_sdw_audrx_init, .be_hw_params_fixup = int_mi2s_be_hw_params_fixup, .ops = &msm_sdw_mi2s_be_ops, @@ -2929,7 +2929,7 @@ static struct snd_soc_dai_link ext_disp_be_dai_link[] = { .codec_dai_name = "msm_dp_audio_codec_rx_dai", .no_pcm = 1, .dpcm_playback = 1, - .be_id = MSM_BACKEND_DAI_DISPLAY_PORT_RX, + .id = MSM_BACKEND_DAI_DISPLAY_PORT_RX, .be_hw_params_fixup = msm_common_be_hw_params_fixup, .ignore_pmdown_time = 1, .ignore_suspend = 1, -- GitLab From 376e9cd71f91b791a899aa2a4a00e8624d4db381 Mon Sep 17 00:00:00 2001 From: Rohit Kumar Date: Wed, 28 Jun 2017 10:50:52 +0530 Subject: [PATCH 524/786] ASoC: msm: access aux_devs with component list Update sdm660 machine driver to access aux_devs with component linked list. Also update the api to create subdirectory to snd_info_create_subdir().These changes are in accordance with the upstream changes in ASoC between Linux-4.4 and Linux-4.9 versions. Change-Id: Ibf1e7dba4c0c6268ef1b1e552e0382bbda8a9f89 Signed-off-by: Rohit Kumar --- sound/soc/msm/sdm660-external.c | 25 ++++++++++++++++++------- sound/soc/msm/sdm660-internal.c | 18 ++++++++++++------ 2 files changed, 30 insertions(+), 13 deletions(-) diff --git a/sound/soc/msm/sdm660-external.c b/sound/soc/msm/sdm660-external.c index 8c2c0fa81044..84d1c2ea9b18 100644 --- a/sound/soc/msm/sdm660-external.c +++ b/sound/soc/msm/sdm660-external.c @@ -1495,7 +1495,7 @@ int msm_audrx_init(struct snd_soc_pcm_runtime *rtd) snd_soc_codec_get_dapm(codec); struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct snd_soc_dai *codec_dai = rtd->codec_dai; - struct snd_soc_pcm_runtime *rtd_aux = rtd->card->rtd_aux; + struct snd_soc_component *aux_comp; struct snd_card *card; struct snd_info_entry *entry; struct msm_asoc_mach_data *pdata = @@ -1678,13 +1678,20 @@ int msm_audrx_init(struct snd_soc_pcm_runtime *rtd) * Send speaker configuration only for WSA8810. * Defalut configuration is for WSA8815. */ + pr_debug("%s: Number of aux devices: %d\n", + __func__, rtd->card->num_aux_devs); + if (!strcmp(dev_name(codec_dai->dev), "tavil_codec")) { - if (rtd_aux && rtd_aux->component) - if (!strcmp(rtd_aux->component->name, WSA8810_NAME_1) || - !strcmp(rtd_aux->component->name, WSA8810_NAME_2)) { + if (rtd->card->num_aux_devs && + !list_empty(&rtd->card->aux_comp_list)) { + aux_comp = list_first_entry(&rtd->card->aux_comp_list, + struct snd_soc_component, list_aux); + if (!strcmp(aux_comp->name, WSA8810_NAME_1) || + !strcmp(aux_comp->name, WSA8810_NAME_2)) { tavil_set_spkr_mode(rtd->codec, SPKR_MODE_1); tavil_set_spkr_gain_offset(rtd->codec, RX_GAIN_OFFSET_M1P5_DB); + } } card = rtd->card->snd_card; entry = snd_info_create_subdir(card->module, "codecs", @@ -1698,12 +1705,16 @@ int msm_audrx_init(struct snd_soc_pcm_runtime *rtd) pdata->codec_root = entry; tavil_codec_info_create_codec_entry(pdata->codec_root, codec); } else { - if (rtd_aux && rtd_aux->component) - if (!strcmp(rtd_aux->component->name, WSA8810_NAME_1) || - !strcmp(rtd_aux->component->name, WSA8810_NAME_2)) { + if (rtd->card->num_aux_devs && + !list_empty(&rtd->card->aux_comp_list)) { + aux_comp = list_first_entry(&rtd->card->aux_comp_list, + struct snd_soc_component, list_aux); + if (!strcmp(aux_comp->name, WSA8810_NAME_1) || + !strcmp(aux_comp->name, WSA8810_NAME_2)) { tasha_set_spkr_mode(rtd->codec, SPKR_MODE_1); tasha_set_spkr_gain_offset(rtd->codec, RX_GAIN_OFFSET_M1P5_DB); + } } card = rtd->card->snd_card; entry = snd_info_create_subdir(card->module, "codecs", diff --git a/sound/soc/msm/sdm660-internal.c b/sound/soc/msm/sdm660-internal.c index ffd545abf4c4..a57d6f611942 100644 --- a/sound/soc/msm/sdm660-internal.c +++ b/sound/soc/msm/sdm660-internal.c @@ -1303,7 +1303,7 @@ static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd) card = rtd->card->snd_card; if (!codec_root) - codec_root = snd_register_module_info(card->module, "codecs", + codec_root = snd_info_create_subdir(card->module, "codecs", card->proc_root); if (!codec_root) { pr_debug("%s: Cannot create codecs module entry\n", @@ -1323,7 +1323,7 @@ static int msm_sdw_audrx_init(struct snd_soc_pcm_runtime *rtd) struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec); struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(rtd->card); - struct snd_soc_pcm_runtime *rtd_aux = rtd->card->rtd_aux; + struct snd_soc_component *aux_comp; struct snd_card *card; snd_soc_add_codec_controls(codec, msm_sdw_controls, @@ -1342,16 +1342,22 @@ static int msm_sdw_audrx_init(struct snd_soc_pcm_runtime *rtd) * Send speaker configuration only for WSA8810. * Default configuration is for WSA8815. */ - if (rtd_aux && rtd_aux->component) - if (!strcmp(rtd_aux->component->name, WSA8810_NAME_1) || - !strcmp(rtd_aux->component->name, WSA8810_NAME_2)) { + pr_debug("%s: Number of aux devices: %d\n", + __func__, rtd->card->num_aux_devs); + if (rtd->card->num_aux_devs && + !list_empty(&rtd->card->aux_comp_list)) { + aux_comp = list_first_entry(&rtd->card->aux_comp_list, + struct snd_soc_component, list_aux); + if (!strcmp(aux_comp->name, WSA8810_NAME_1) || + !strcmp(aux_comp->name, WSA8810_NAME_2)) { msm_sdw_set_spkr_mode(rtd->codec, SPKR_MODE_1); msm_sdw_set_spkr_gain_offset(rtd->codec, RX_GAIN_OFFSET_M1P5_DB); + } } card = rtd->card->snd_card; if (!codec_root) - codec_root = snd_register_module_info(card->module, "codecs", + codec_root = snd_info_create_subdir(card->module, "codecs", card->proc_root); if (!codec_root) { pr_debug("%s: Cannot create codecs module entry\n", -- GitLab From 546351307792363b8a0fdabdacab51cdca2377db Mon Sep 17 00:00:00 2001 From: Rohit Kumar Date: Wed, 28 Jun 2017 12:56:29 +0530 Subject: [PATCH 525/786] ASoC: msm: Update arguments for msm_swap_gnd_mic swap_gnd_mic function arguments is updated to support analog USBC. Make corresponding change in msm_swap_gnd_mic in sdm660 machine driver. Change-Id: Ieb4b74c254e4e0d15f5d9685703711ce3934260b Signed-off-by: Rohit Kumar --- sound/soc/msm/sdm660-common.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/sound/soc/msm/sdm660-common.c b/sound/soc/msm/sdm660-common.c index c93d29edbae8..b34b04b83044 100644 --- a/sound/soc/msm/sdm660-common.c +++ b/sound/soc/msm/sdm660-common.c @@ -12,6 +12,9 @@ #include #include +#include +#include +#include #include #include #include @@ -190,7 +193,7 @@ struct msm_wsa881x_dev_info { static struct snd_soc_aux_dev *msm_aux_dev; static struct snd_soc_codec_conf *msm_codec_conf; -static bool msm_swap_gnd_mic(struct snd_soc_codec *codec); +static bool msm_swap_gnd_mic(struct snd_soc_codec *codec, bool active); static struct wcd_mbhc_config mbhc_cfg = { .read_fw_bin = false, @@ -2594,7 +2597,7 @@ static int msm_prepare_us_euro(struct snd_soc_card *card) return ret; } -static bool msm_swap_gnd_mic(struct snd_soc_codec *codec) +static bool msm_swap_gnd_mic(struct snd_soc_codec *codec, bool active) { struct snd_soc_card *card = codec->component.card; struct msm_asoc_mach_data *pdata = -- GitLab From 8a51ffab5ee982a511fa4b7920a1355119bab2d7 Mon Sep 17 00:00:00 2001 From: Rohit Kumar Date: Mon, 19 Jun 2017 11:01:43 +0530 Subject: [PATCH 526/786] ASoC: msm: Disable HDMI audio codec for SDM670 Display driver for HDMI is not yet ready for 4.9 kernel. Disable HDMI_AUDIO codec driver until display driver change are available. Change-Id: I10ddda4568933cb69fb805ac9346a8220e23ab53 Signed-off-by: Rohit Kumar --- sound/soc/msm/Kconfig | 2 -- 1 file changed, 2 deletions(-) diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig index c557ae06e95f..33caa9e0da5e 100644 --- a/sound/soc/msm/Kconfig +++ b/sound/soc/msm/Kconfig @@ -112,7 +112,6 @@ config SND_SOC_INT_CODEC select MSM_CDC_PINCTRL select SND_SOC_MSM_SDW select SND_SOC_SDM660_CDC - select SND_SOC_MSM_HDMI_CODEC_RX select QTI_PP select DTS_SRS_TM select DOLBY_LICENSE @@ -144,7 +143,6 @@ config SND_SOC_EXT_CODEC select SND_SOC_WCD9335 select SND_SOC_WCD934X select SND_SOC_WSA881X - select SND_SOC_MSM_HDMI_CODEC_RX select MFD_CORE select QTI_PP select DTS_SRS_TM -- GitLab From 8ce8e9c068c994225df24ea728fc3fda1bb05b36 Mon Sep 17 00:00:00 2001 From: Rohit Kumar Date: Thu, 15 Jun 2017 15:57:51 +0530 Subject: [PATCH 527/786] pinctrl: lpi: Use gpiochip_get_data to get gpio state Use gpiochip_get_data to get gpio state instead of to_gpio_state to fix compilation error. Change-Id: I130fda1f6377eca0ec68d2941f90c1426729f14e Signed-off-by: Rohit Kumar --- drivers/pinctrl/qcom/pinctrl-lpi.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/pinctrl/qcom/pinctrl-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpi.c index fedd5f0aee22..11f954e1eb6c 100644 --- a/drivers/pinctrl/qcom/pinctrl-lpi.c +++ b/drivers/pinctrl/qcom/pinctrl-lpi.c @@ -448,6 +448,7 @@ static void lpi_gpio_dbg_show_one(struct seq_file *s, unsigned int offset, unsigned int gpio) { + struct lpi_gpio_state *state = gpiochip_get_data(chip); struct pinctrl_pin_desc pindesc; struct lpi_gpio_pad *pad; unsigned int func; @@ -463,7 +464,7 @@ static void lpi_gpio_dbg_show_one(struct seq_file *s, "pull up" }; - pctldev = pctldev ? : to_gpio_state(chip)->ctrl; + pctldev = pctldev ? : state->ctrl; pindesc = pctldev->desc->pins[offset]; pad = pctldev->desc->pins[offset].drv_data; ctl_reg = lpi_gpio_read(pad, LPI_GPIO_REG_DIR_CTL); -- GitLab From 9ba3a2b24bf4e373a17e8e72b1dbf9753d3555cc Mon Sep 17 00:00:00 2001 From: Rohit Kumar Date: Wed, 28 Jun 2017 14:07:19 +0530 Subject: [PATCH 528/786] ASoC: codecs: move DAPM widgets and routes out of codec driver structure Move DAPM widgets and routes from codec driver structure to component driver structure in soundwire and internal codec driver. These changes are in accordance with the upstream changes in ASoC between Linux-4.4 and Linux-4.9 versions. Change-Id: I78ee4fccabad98fa9740db91711bb17f50b2e346 Signed-off-by: Rohit Kumar --- sound/soc/codecs/msm_sdw/msm_sdw_cdc.c | 14 ++++++++------ sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c | 14 ++++++++------ sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c | 14 ++++++++------ 3 files changed, 24 insertions(+), 18 deletions(-) diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c index 62fdb9485ade..af3aef3f4894 100644 --- a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c +++ b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c @@ -1761,13 +1761,15 @@ static struct regmap *msm_sdw_get_regmap(struct device *dev) static struct snd_soc_codec_driver soc_codec_dev_msm_sdw = { .probe = msm_sdw_codec_probe, .remove = msm_sdw_codec_remove, - .controls = msm_sdw_snd_controls, - .num_controls = ARRAY_SIZE(msm_sdw_snd_controls), - .dapm_widgets = msm_sdw_dapm_widgets, - .num_dapm_widgets = ARRAY_SIZE(msm_sdw_dapm_widgets), - .dapm_routes = audio_map, - .num_dapm_routes = ARRAY_SIZE(audio_map), .get_regmap = msm_sdw_get_regmap, + .component_driver = { + .controls = msm_sdw_snd_controls, + .num_controls = ARRAY_SIZE(msm_sdw_snd_controls), + .dapm_widgets = msm_sdw_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(msm_sdw_dapm_widgets), + .dapm_routes = audio_map, + .num_dapm_routes = ARRAY_SIZE(audio_map), + }, }; static void msm_sdw_add_child_devices(struct work_struct *work) diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c index a8fcd347b38b..e8008e25c23b 100644 --- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c +++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c @@ -4320,13 +4320,15 @@ static struct snd_soc_codec_driver soc_codec_dev_sdm660_cdc = { .suspend = msm_anlg_cdc_suspend, .resume = msm_anlg_cdc_resume, .reg_word_size = 1, - .controls = msm_anlg_cdc_snd_controls, - .num_controls = ARRAY_SIZE(msm_anlg_cdc_snd_controls), - .dapm_widgets = msm_anlg_cdc_dapm_widgets, - .num_dapm_widgets = ARRAY_SIZE(msm_anlg_cdc_dapm_widgets), - .dapm_routes = audio_map, - .num_dapm_routes = ARRAY_SIZE(audio_map), .get_regmap = msm_anlg_get_regmap, + .component_driver = { + .controls = msm_anlg_cdc_snd_controls, + .num_controls = ARRAY_SIZE(msm_anlg_cdc_snd_controls), + .dapm_widgets = msm_anlg_cdc_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(msm_anlg_cdc_dapm_widgets), + .dapm_routes = audio_map, + .num_dapm_routes = ARRAY_SIZE(audio_map), + }, }; static int msm_anlg_cdc_init_supplies(struct sdm660_cdc_priv *sdm660_cdc, diff --git a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c index 3f9c0b4a5b83..1c9956d73038 100644 --- a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c +++ b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c @@ -2037,13 +2037,15 @@ static struct snd_soc_codec_driver soc_msm_dig_codec = { .remove = msm_dig_cdc_soc_remove, .suspend = msm_dig_cdc_suspend, .resume = msm_dig_cdc_resume, - .controls = msm_dig_snd_controls, - .num_controls = ARRAY_SIZE(msm_dig_snd_controls), - .dapm_widgets = msm_dig_dapm_widgets, - .num_dapm_widgets = ARRAY_SIZE(msm_dig_dapm_widgets), - .dapm_routes = audio_dig_map, - .num_dapm_routes = ARRAY_SIZE(audio_dig_map), .get_regmap = msm_digital_get_regmap, + .component_driver = { + .controls = msm_dig_snd_controls, + .num_controls = ARRAY_SIZE(msm_dig_snd_controls), + .dapm_widgets = msm_dig_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(msm_dig_dapm_widgets), + .dapm_routes = audio_dig_map, + .num_dapm_routes = ARRAY_SIZE(audio_dig_map), + }, }; const struct regmap_config msm_digital_regmap_config = { -- GitLab From d5eb3e11295a37a9423ffd2bdf41f9f03f32013d Mon Sep 17 00:00:00 2001 From: Rohit Kumar Date: Wed, 28 Jun 2017 15:18:46 +0530 Subject: [PATCH 529/786] ASoC: codecs: use correct function to create subdir entry For soundwire and internal codecs, use the correct function to create a subdirectory under a given parent. Change-Id: I5ca8a179c5338f851caa15b8293eef8221a79a7b Signed-off-by: Rohit Kumar --- sound/soc/codecs/msm_sdw/msm_sdw_cdc.c | 2 +- sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c | 2 +- sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c index af3aef3f4894..1ce25ae28e83 100644 --- a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c +++ b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c @@ -1383,7 +1383,7 @@ int msm_sdw_codec_info_create_codec_entry(struct snd_info_entry *codec_root, msm_sdw = snd_soc_codec_get_drvdata(codec); card = codec->component.card; - msm_sdw->entry = snd_register_module_info(codec_root->module, + msm_sdw->entry = snd_info_create_subdir(codec_root->module, "152c1000.msm-sdw-codec", codec_root); if (!msm_sdw->entry) { diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c index e8008e25c23b..7892f61e626d 100644 --- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c +++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c @@ -4052,7 +4052,7 @@ int msm_anlg_codec_info_create_codec_entry(struct snd_info_entry *codec_root, sdm660_cdc_priv = snd_soc_codec_get_drvdata(codec); card = codec->component.card; - sdm660_cdc_priv->entry = snd_register_module_info(codec_root->module, + sdm660_cdc_priv->entry = snd_info_create_subdir(codec_root->module, "spmi0-03", codec_root); if (!sdm660_cdc_priv->entry) { diff --git a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c index 1c9956d73038..68a1d8d47b39 100644 --- a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c +++ b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c @@ -1157,7 +1157,7 @@ int msm_dig_codec_info_create_codec_entry(struct snd_info_entry *codec_root, msm_dig = snd_soc_codec_get_drvdata(codec); card = codec->component.card; - msm_dig->entry = snd_register_module_info(codec_root->module, + msm_dig->entry = snd_info_create_subdir(codec_root->module, "msm_digital_codec", codec_root); if (!msm_dig->entry) { -- GitLab From a477facf528628de1cfb61402d2a57df70828e29 Mon Sep 17 00:00:00 2001 From: Rohit Kumar Date: Mon, 19 Jun 2017 10:57:37 +0530 Subject: [PATCH 530/786] ASoC: msm: Add support to compile machine driver for SDM670 Add support in Kconfig to compile machine driver for sdm670. Also remove SDM660_COMMON Kconfig macro as it is invalid. Change-Id: Iddcadae94ea4e32f31a67a269fa5c5edc6fa7ab4 Signed-off-by: Rohit Kumar --- sound/soc/msm/Kconfig | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig index 33caa9e0da5e..abadd081bb4c 100644 --- a/sound/soc/msm/Kconfig +++ b/sound/soc/msm/Kconfig @@ -118,7 +118,6 @@ config SND_SOC_INT_CODEC select SND_HWDEP select MSM_ULTRASOUND select DTS_EAGLE - select SND_SOC_SDM660_COMMON select SND_SOC_COMPRESS select PINCTRL_LPI help @@ -152,7 +151,6 @@ config SND_SOC_EXT_CODEC select SND_HWDEP select MSM_ULTRASOUND select DTS_EAGLE - select SND_SOC_SDM660_COMMON select SND_SOC_COMPRESS select PINCTRL_LPI help @@ -231,12 +229,12 @@ config SND_SOC_MSM8998 DAI-links config SND_SOC_660 - tristate "SoC Machine driver for SDM660 boards" - depends on ARCH_SDM660 + tristate "SoC Machine driver for SDM660/670 boards" + depends on ARCH_SDM660 || ARCH_SDM670 select SND_SOC_INT_CODEC select SND_SOC_EXT_CODEC help - To add support for SoC audio on SDM660. + To add support for SoC audio on SDM660/670. This will enable sound soc drivers which interfaces with DSP, also it will enable the machine driver and the corresponding -- GitLab From 98184bbb8daea6af32208d63831e66023db4bb58 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Sat, 18 Feb 2017 19:00:45 -0500 Subject: [PATCH 531/786] ipv6: release dst on error in ip6_dst_lookup_tail commit 00ea1ceebe0d9f2dc1cc2b7bd575a00100c27869 upstream. If ip6_dst_lookup_tail has acquired a dst and fails the IPv4-mapped check, release the dst before returning an error. Fixes: ec5e3b0a1d41 ("ipv6: Inhibit IPv4-mapped src address on the wire.") Signed-off-by: Willem de Bruijn Acked-by: Eric Dumazet Signed-off-by: David S. Miller Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ip6_output.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 3ab32ac57ccd..fd649599620e 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1020,8 +1020,10 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk, } #endif if (ipv6_addr_v4mapped(&fl6->saddr) && - !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) - return -EAFNOSUPPORT; + !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) { + err = -EAFNOSUPPORT; + goto out_err_release; + } return 0; -- GitLab From d2f459e3feb0f73d2e95ab7892adcf22f21fe9ef Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Tue, 6 Jun 2017 15:56:54 +0200 Subject: [PATCH 532/786] net: don't call strlen on non-terminated string in dev_set_alias() [ Upstream commit c28294b941232931fbd714099798eb7aa7e865d7 ] KMSAN reported a use of uninitialized memory in dev_set_alias(), which was caused by calling strlcpy() (which in turn called strlen()) on the user-supplied non-terminated string. Signed-off-by: Alexander Potapenko Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/core/dev.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/core/dev.c b/net/core/dev.c index 2e04fd188081..97f806116ae9 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1250,8 +1250,9 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) if (!new_ifalias) return -ENOMEM; dev->ifalias = new_ifalias; + memcpy(dev->ifalias, alias, len); + dev->ifalias[len] = 0; - strlcpy(dev->ifalias, alias, len+1); return len; } -- GitLab From fd9b13e6c175b01d61f0f234502919c6c40e4dd2 Mon Sep 17 00:00:00 2001 From: Mateusz Jurczyk Date: Wed, 7 Jun 2017 16:14:29 +0200 Subject: [PATCH 533/786] decnet: dn_rtmsg: Improve input length sanitization in dnrmg_receive_user_skb [ Upstream commit dd0da17b209ed91f39872766634ca967c170ada1 ] Verify that the length of the socket buffer is sufficient to cover the nlmsghdr structure before accessing the nlh->nlmsg_len field for further input sanitization. If the client only supplies 1-3 bytes of data in sk_buff, then nlh->nlmsg_len remains partially uninitialized and contains leftover memory from the corresponding kernel allocation. Operating on such data may result in indeterminate evaluation of the nlmsg_len < sizeof(*nlh) expression. The bug was discovered by a runtime instrumentation designed to detect use of uninitialized memory in the kernel. The patch prevents this and other similar tools (e.g. KMSAN) from flagging this behavior in the future. Signed-off-by: Mateusz Jurczyk Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/decnet/netfilter/dn_rtmsg.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index 85f2fdc360c2..29246bc9a7b4 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c @@ -102,7 +102,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb) { struct nlmsghdr *nlh = nlmsg_hdr(skb); - if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) + if (skb->len < sizeof(*nlh) || + nlh->nlmsg_len < sizeof(*nlh) || + skb->len < nlh->nlmsg_len) return; if (!netlink_capable(skb, CAP_NET_ADMIN)) -- GitLab From b5cc68e0c1905a3cb94677a4d1b3e03f65881231 Mon Sep 17 00:00:00 2001 From: "Mintz, Yuval" Date: Wed, 7 Jun 2017 21:00:33 +0300 Subject: [PATCH 534/786] net: Zero ifla_vf_info in rtnl_fill_vfinfo() [ Upstream commit 0eed9cf58446b28b233388b7f224cbca268b6986 ] Some of the structure's fields are not initialized by the rtnetlink. If driver doesn't set those in ndo_get_vf_config(), they'd leak memory to user. Signed-off-by: Yuval Mintz CC: Michal Schmidt Reviewed-by: Greg Rose Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/core/rtnetlink.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 1d9160794e55..d574409d4986 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1130,6 +1130,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, struct ifla_vf_mac vf_mac; struct ifla_vf_info ivi; + memset(&ivi, 0, sizeof(ivi)); + /* Not all SR-IOV capable drivers support the * spoofcheck and "RSS query enable" query. Preset to * -1 so the user space tool can detect that the driver @@ -1138,7 +1140,6 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, ivi.spoofchk = -1; ivi.rss_query_en = -1; ivi.trusted = -1; - memset(ivi.mac, 0, sizeof(ivi.mac)); /* The default value for VF link state is "auto" * IFLA_VF_LINK_STATE_AUTO which equals zero */ -- GitLab From 386ed38f0f28b5dffe11c5665997882115fb788e Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 8 Jun 2017 11:31:11 -0600 Subject: [PATCH 535/786] net: vrf: Make add_fib_rules per network namespace flag [ Upstream commit 097d3c9508dc58286344e4a22b300098cf0c1566 ] Commit 1aa6c4f6b8cd8 ("net: vrf: Add l3mdev rules on first device create") adds the l3mdev FIB rule the first time a VRF device is created. However, it only creates the rule once and only in the namespace the first device is created - which may not be init_net. Fix by using the net_generic capability to make the add_fib_rules flag per network namespace. Fixes: 1aa6c4f6b8cd8 ("net: vrf: Add l3mdev rules on first device create") Reported-by: Petr Machata Signed-off-by: David Ahern Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/vrf.c | 36 ++++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index ee02605a0f89..642df9391193 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -36,12 +36,14 @@ #include #include #include +#include #define DRV_NAME "vrf" #define DRV_VERSION "1.0" #define FIB_RULE_PREF 1000 /* default preference for FIB rules */ -static bool add_fib_rules = true; + +static unsigned int vrf_net_id; struct net_vrf { struct rtable __rcu *rth; @@ -1237,6 +1239,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct net_vrf *vrf = netdev_priv(dev); + bool *add_fib_rules; + struct net *net; int err; if (!data || !data[IFLA_VRF_TABLE]) @@ -1252,13 +1256,15 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, if (err) goto out; - if (add_fib_rules) { + net = dev_net(dev); + add_fib_rules = net_generic(net, vrf_net_id); + if (*add_fib_rules) { err = vrf_add_fib_rules(dev); if (err) { unregister_netdevice(dev); goto out; } - add_fib_rules = false; + *add_fib_rules = false; } out: @@ -1341,16 +1347,38 @@ static struct notifier_block vrf_notifier_block __read_mostly = { .notifier_call = vrf_device_event, }; +/* Initialize per network namespace state */ +static int __net_init vrf_netns_init(struct net *net) +{ + bool *add_fib_rules = net_generic(net, vrf_net_id); + + *add_fib_rules = true; + + return 0; +} + +static struct pernet_operations vrf_net_ops __net_initdata = { + .init = vrf_netns_init, + .id = &vrf_net_id, + .size = sizeof(bool), +}; + static int __init vrf_init_module(void) { int rc; register_netdevice_notifier(&vrf_notifier_block); - rc = rtnl_link_register(&vrf_link_ops); + rc = register_pernet_subsys(&vrf_net_ops); if (rc < 0) goto error; + rc = rtnl_link_register(&vrf_link_ops); + if (rc < 0) { + unregister_pernet_subsys(&vrf_net_ops); + goto error; + } + return 0; error: -- GitLab From bb84290cd2967a5774a97fa44381713e20a7924c Mon Sep 17 00:00:00 2001 From: Mateusz Jurczyk Date: Thu, 8 Jun 2017 11:13:36 +0200 Subject: [PATCH 536/786] af_unix: Add sockaddr length checks before accessing sa_family in bind and connect handlers [ Upstream commit defbcf2decc903a28d8398aa477b6881e711e3ea ] Verify that the caller-provided sockaddr structure is large enough to contain the sa_family field, before accessing it in bind() and connect() handlers of the AF_UNIX socket. Since neither syscall enforces a minimum size of the corresponding memory region, very short sockaddrs (zero or one byte long) result in operating on uninitialized memory while referencing .sa_family. Signed-off-by: Mateusz Jurczyk Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/unix/af_unix.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 2d03d5bcb5b9..915abe98174e 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -998,7 +998,8 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct path path = { NULL, NULL }; err = -EINVAL; - if (sunaddr->sun_family != AF_UNIX) + if (addr_len < offsetofend(struct sockaddr_un, sun_family) || + sunaddr->sun_family != AF_UNIX) goto out; if (addr_len == sizeof(short)) { @@ -1109,6 +1110,10 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, unsigned int hash; int err; + err = -EINVAL; + if (alen < offsetofend(struct sockaddr, sa_family)) + goto out; + if (addr->sa_family != AF_UNSPEC) { err = unix_mkname(sunaddr, alen, &hash); if (err < 0) -- GitLab From c6d4ff85722b25877af48b311eda944dcc8c6feb Mon Sep 17 00:00:00 2001 From: Krister Johansen Date: Thu, 8 Jun 2017 13:12:38 -0700 Subject: [PATCH 537/786] Fix an intermittent pr_emerg warning about lo becoming free. [ Upstream commit f186ce61bb8235d80068c390dc2aad7ca427a4c2 ] It looks like this: Message from syslogd@flamingo at Apr 26 00:45:00 ... kernel:unregister_netdevice: waiting for lo to become free. Usage count = 4 They seem to coincide with net namespace teardown. The message is emitted by netdev_wait_allrefs(). Forced a kdump in netdev_run_todo, but found that the refcount on the lo device was already 0 at the time we got to the panic. Used bcc to check the blocking in netdev_run_todo. The only places where we're off cpu there are in the rcu_barrier() and msleep() calls. That behavior is expected. The msleep time coincides with the amount of time we spend waiting for the refcount to reach zero; the rcu_barrier() wait times are not excessive. After looking through the list of callbacks that the netdevice notifiers invoke in this path, it appears that the dst_dev_event is the most interesting. The dst_ifdown path places a hold on the loopback_dev as part of releasing the dev associated with the original dst cache entry. Most of our notifier callbacks are straight-forward, but this one a) looks complex, and b) places a hold on the network interface in question. I constructed a new bcc script that watches various events in the liftime of a dst cache entry. Note that dst_ifdown will take a hold on the loopback device until the invalidated dst entry gets freed. [ __dst_free] on DST: ffff883ccabb7900 IF tap1008300eth0 invoked at 1282115677036183 __dst_free rcu_nocb_kthread kthread ret_from_fork Acked-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/core/dst.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/net/core/dst.c b/net/core/dst.c index 656b70d39690..39cc11968cf9 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -470,6 +470,20 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, spin_lock_bh(&dst_garbage.lock); dst = dst_garbage.list; dst_garbage.list = NULL; + /* The code in dst_ifdown places a hold on the loopback device. + * If the gc entry processing is set to expire after a lengthy + * interval, this hold can cause netdev_wait_allrefs() to hang + * out and wait for a long time -- until the the loopback + * interface is released. If we're really unlucky, it'll emit + * pr_emerg messages to console too. Reset the interval here, + * so dst cleanups occur in a more timely fashion. + */ + if (dst_garbage.timer_inc > DST_GC_INC) { + dst_garbage.timer_inc = DST_GC_INC; + dst_garbage.timer_expires = DST_GC_MIN; + mod_delayed_work(system_wq, &dst_gc_work, + dst_garbage.timer_expires); + } spin_unlock_bh(&dst_garbage.lock); if (last) -- GitLab From 8cda426a7cfa61b902c4335d1d1ab945bbcb41b6 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 10 Jun 2017 14:48:14 +0800 Subject: [PATCH 538/786] sctp: disable BH in sctp_for_each_endpoint [ Upstream commit 581409dacc9176b0de1f6c4ca8d66e13aa8e1b29 ] Now sctp holds read_lock when foreach sctp_ep_hashtable without disabling BH. If CPU schedules to another thread A at this moment, the thread A may be trying to hold the write_lock with disabling BH. As BH is disabled and CPU cannot schedule back to the thread holding the read_lock, while the thread A keeps waiting for the read_lock. A dead lock would be triggered by this. This patch is to fix this dead lock by calling read_lock_bh instead to disable BH when holding the read_lock in sctp_for_each_endpoint. Fixes: 626d16f50f39 ("sctp: export some apis or variables for sctp_diag and reuse some for proc") Reported-by: Xiumei Mu Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sctp/socket.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index e1719c695174..0c5257efc6e4 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4460,13 +4460,13 @@ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *), for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize; hash++, head++) { - read_lock(&head->lock); + read_lock_bh(&head->lock); sctp_for_each_hentry(epb, &head->chain) { err = cb(sctp_ep(epb), p); if (err) break; } - read_unlock(&head->lock); + read_unlock_bh(&head->lock); } return err; -- GitLab From bb566ce3a60eded40ae4a3421a59c0f5f1c7ef20 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sat, 10 Jun 2017 16:49:39 +0800 Subject: [PATCH 539/786] net: caif: Fix a sleep-in-atomic bug in cfpkt_create_pfx [ Upstream commit f146e872eb12ebbe92d8e583b2637e0741440db3 ] The kernel may sleep under a rcu read lock in cfpkt_create_pfx, and the function call path is: cfcnfg_linkup_rsp (acquire the lock by rcu_read_lock) cfctrl_linkdown_req cfpkt_create cfpkt_create_pfx alloc_skb(GFP_KERNEL) --> may sleep cfserl_receive (acquire the lock by rcu_read_lock) cfpkt_split cfpkt_create_pfx alloc_skb(GFP_KERNEL) --> may sleep There is "in_interrupt" in cfpkt_create_pfx to decide use "GFP_KERNEL" or "GFP_ATOMIC". In this situation, "GFP_KERNEL" is used because the function is called under a rcu read lock, instead in interrupt. To fix it, only "GFP_ATOMIC" is used in cfpkt_create_pfx. Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/caif/cfpkt_skbuff.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index 59ce1fcc220c..71b6ab240dea 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c @@ -81,11 +81,7 @@ static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx) { struct sk_buff *skb; - if (likely(in_interrupt())) - skb = alloc_skb(len + pfx, GFP_ATOMIC); - else - skb = alloc_skb(len + pfx, GFP_KERNEL); - + skb = alloc_skb(len + pfx, GFP_ATOMIC); if (unlikely(skb == NULL)) return NULL; -- GitLab From 57360bc3c7a6fc9c7422e422508bf77166a05028 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sat, 10 Jun 2017 17:03:35 +0800 Subject: [PATCH 540/786] net: tipc: Fix a sleep-in-atomic bug in tipc_msg_reverse [ Upstream commit 343eba69c6968190d8654b857aea952fed9a6749 ] The kernel may sleep under a rcu read lock in tipc_msg_reverse, and the function call path is: tipc_l2_rcv_msg (acquire the lock by rcu_read_lock) tipc_rcv tipc_sk_rcv tipc_msg_reverse pskb_expand_head(GFP_KERNEL) --> may sleep tipc_node_broadcast tipc_node_xmit_skb tipc_node_xmit tipc_sk_rcv tipc_msg_reverse pskb_expand_head(GFP_KERNEL) --> may sleep To fix it, "GFP_KERNEL" is replaced with "GFP_ATOMIC". Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/tipc/msg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 17201aa8423d..1bd9817be13a 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -508,7 +508,7 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) } if (skb_cloned(_skb) && - pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL)) + pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC)) goto exit; /* Now reverse the concerned fields */ -- GitLab From 9854e58659908b4923d95b0fe3cd1db7ea62fe39 Mon Sep 17 00:00:00 2001 From: Tal Gilboa Date: Mon, 15 May 2017 14:13:16 +0300 Subject: [PATCH 541/786] net/mlx5e: Added BW check for DIM decision mechanism [ Upstream commit c3164d2fc48fd4fa0477ab658b644559c3fe9073 ] DIM (Dynamically-tuned Interrupt Moderation) is a mechanism designed for changing the channel interrupt moderation values in order to reduce CPU overhead for all traffic types. Until now only interrupt and packet rate were sampled. We found a scenario on which we get a false indication since a change in DIM caused more aggregation and reduced packet rate while increasing BW. We now regard a change as succesfull iff: current_BW > (prev_BW + threshold) or current_BW ~= prev_BW and current_PR > (prev_PR + threshold) or current_BW ~= prev_BW and current_PR ~= prev_PR and current_IR < (prev_IR - threshold) Where BW = Bandwidth, PR = Packet rate and IR = Interrupt rate Improvements (ConnectX-4Lx 25GbE, single RX queue, LRO off) -------------------------------------------------- packet size | before[Mb/s] | after[Mb/s] | gain | 2B | 343.4 | 359.4 | 4.5% | 16B | 2739.7 | 2814.8 | 2.7% | 64B | 9739 | 10185.3 | 4.5% | Fixes: cb3c7fd4f839 ("net/mlx5e: Support adaptive RX coalescing") Signed-off-by: Tal Gilboa Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 + .../ethernet/mellanox/mlx5/core/en_rx_am.c | 37 ++++++++++--------- 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 21ce0b701143..6edc8b2db0cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -283,12 +283,14 @@ struct mlx5e_dma_info { struct mlx5e_rx_am_stats { int ppms; /* packets per msec */ + int bpms; /* bytes per msec */ int epms; /* events per msec */ }; struct mlx5e_rx_am_sample { ktime_t time; unsigned int pkt_ctr; + unsigned int byte_ctr; u16 event_ctr; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c index cbfac06b7ffd..17503883552b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c @@ -183,28 +183,27 @@ static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am) mlx5e_am_step(am); } +#define IS_SIGNIFICANT_DIFF(val, ref) \ + (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */ + static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr, struct mlx5e_rx_am_stats *prev) { - int diff; - - if (!prev->ppms) - return curr->ppms ? MLX5E_AM_STATS_BETTER : + if (!prev->bpms) + return curr->bpms ? MLX5E_AM_STATS_BETTER : MLX5E_AM_STATS_SAME; - diff = curr->ppms - prev->ppms; - if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */ - return (diff > 0) ? MLX5E_AM_STATS_BETTER : - MLX5E_AM_STATS_WORSE; + if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms)) + return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_WORSE; - if (!prev->epms) - return curr->epms ? MLX5E_AM_STATS_WORSE : - MLX5E_AM_STATS_SAME; + if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) + return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_WORSE; - diff = curr->epms - prev->epms; - if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */ - return (diff < 0) ? MLX5E_AM_STATS_BETTER : - MLX5E_AM_STATS_WORSE; + if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) + return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_WORSE; return MLX5E_AM_STATS_SAME; } @@ -266,6 +265,7 @@ static void mlx5e_am_sample(struct mlx5e_rq *rq, { s->time = ktime_get(); s->pkt_ctr = rq->stats.packets; + s->byte_ctr = rq->stats.bytes; s->event_ctr = rq->cq.event_ctr; } @@ -278,12 +278,15 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, /* u32 holds up to 71 minutes, should be enough */ u32 delta_us = ktime_us_delta(end->time, start->time); unsigned int npkts = end->pkt_ctr - start->pkt_ctr; + unsigned int nbytes = end->byte_ctr - start->byte_ctr; if (!delta_us) return; - curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us; - curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us; + curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); + curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); + curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC, + delta_us); } void mlx5e_rx_am_work(struct work_struct *work) -- GitLab From 78b24ab695abafe4c5754a661a591b841661df8b Mon Sep 17 00:00:00 2001 From: Tal Gilboa Date: Mon, 29 May 2017 17:02:55 +0300 Subject: [PATCH 542/786] net/mlx5e: Fix wrong indications in DIM due to counter wraparound [ Upstream commit 53acd76ce571e3b71f9205f2d49ab285a9f1aad8 ] DIM (Dynamically-tuned Interrupt Moderation) is a mechanism designed for changing the channel interrupt moderation values in order to reduce CPU overhead for all traffic types. Each iteration of the algorithm, DIM calculates the difference in throughput, packet rate and interrupt rate from last iteration in order to make a decision. DIM relies on counters for each metric. When these counters get to their type's max value they wraparound. In this case the delta between 'end' and 'start' samples is negative and when translated to unsigned integers - very high. This results in a false indication to the algorithm and might result in a wrong decision. The fix calculates the 'distance' between 'end' and 'start' samples in a cyclic way around the relevant type's max value. It can also be viewed as an absolute value around the type's max value instead of around 0. Testing show higher stability in DIM profile selection and no wraparound issues. Fixes: cb3c7fd4f839 ("net/mlx5e: Support adaptive RX coalescing") Signed-off-by: Tal Gilboa Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 8 ++++---- drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c | 10 +++++++--- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 6edc8b2db0cd..6180ad45be18 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -288,10 +288,10 @@ struct mlx5e_rx_am_stats { }; struct mlx5e_rx_am_sample { - ktime_t time; - unsigned int pkt_ctr; - unsigned int byte_ctr; - u16 event_ctr; + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; }; struct mlx5e_rx_am { /* Adaptive Moderation */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c index 17503883552b..23ccec4cb7f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c @@ -270,6 +270,8 @@ static void mlx5e_am_sample(struct mlx5e_rq *rq, } #define MLX5E_AM_NEVENTS 64 +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1)) static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, struct mlx5e_rx_am_sample *end, @@ -277,8 +279,9 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, { /* u32 holds up to 71 minutes, should be enough */ u32 delta_us = ktime_us_delta(end->time, start->time); - unsigned int npkts = end->pkt_ctr - start->pkt_ctr; - unsigned int nbytes = end->byte_ctr - start->byte_ctr; + u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); + u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, + start->byte_ctr); if (!delta_us) return; @@ -311,7 +314,8 @@ void mlx5e_rx_am(struct mlx5e_rq *rq) switch (am->state) { case MLX5E_AM_MEASURE_IN_PROGRESS: - nevents = rq->cq.event_ctr - am->start_sample.event_ctr; + nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr, + am->start_sample.event_ctr); if (nevents < MLX5E_AM_NEVENTS) break; mlx5e_am_sample(rq, &end_sample); -- GitLab From 059686754c1870f182ce55495b81728763732d48 Mon Sep 17 00:00:00 2001 From: Christian Perle Date: Mon, 12 Jun 2017 10:06:57 +0200 Subject: [PATCH 543/786] proc: snmp6: Use correct type in memset [ Upstream commit 3500cd73dff48f28f4ba80c171c4c80034d40f76 ] Reading /proc/net/snmp6 yields bogus values on 32 bit kernels. Use "u64" instead of "unsigned long" in sizeof(). Fixes: 4a4857b1c81e ("proc: Reduce cache miss in snmp6_seq_show") Signed-off-by: Christian Perle Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/proc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index cc8e3ae9ca73..e88bcb8ff0fd 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -219,7 +219,7 @@ static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu *mib, u64 buff64[SNMP_MIB_MAX]; int i; - memset(buff64, 0, sizeof(unsigned long) * SNMP_MIB_MAX); + memset(buff64, 0, sizeof(u64) * SNMP_MIB_MAX); snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff); for (i = 0; itemlist[i].name; i++) -- GitLab From ecd6627f48bd2d8e0f85eee703b5b4609ed6f744 Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Mon, 12 Jun 2017 09:52:26 -0700 Subject: [PATCH 544/786] igmp: acquire pmc lock for ip_mc_clear_src() [ Upstream commit c38b7d327aafd1e3ad7ff53eefac990673b65667 ] Andrey reported a use-after-free in add_grec(): for (psf = *psf_list; psf; psf = psf_next) { ... psf_next = psf->sf_next; where the struct ip_sf_list's were already freed by: kfree+0xe8/0x2b0 mm/slub.c:3882 ip_mc_clear_src+0x69/0x1c0 net/ipv4/igmp.c:2078 ip_mc_dec_group+0x19a/0x470 net/ipv4/igmp.c:1618 ip_mc_drop_socket+0x145/0x230 net/ipv4/igmp.c:2609 inet_release+0x4e/0x1c0 net/ipv4/af_inet.c:411 sock_release+0x8d/0x1e0 net/socket.c:597 sock_close+0x16/0x20 net/socket.c:1072 This happens because we don't hold pmc->lock in ip_mc_clear_src() and a parallel mr_ifc_timer timer could jump in and access them. The RCU lock is there but it is merely for pmc itself, this spinlock could actually ensure we don't access them in parallel. Thanks to Eric and Long for discussion on this bug. Reported-by: Andrey Konovalov Cc: Eric Dumazet Cc: Xin Long Signed-off-by: Cong Wang Reviewed-by: Xin Long Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/igmp.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 1bc623d7f754..8520eff4a5cc 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -2071,21 +2071,26 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, static void ip_mc_clear_src(struct ip_mc_list *pmc) { - struct ip_sf_list *psf, *nextpsf; + struct ip_sf_list *psf, *nextpsf, *tomb, *sources; - for (psf = pmc->tomb; psf; psf = nextpsf) { + spin_lock_bh(&pmc->lock); + tomb = pmc->tomb; + pmc->tomb = NULL; + sources = pmc->sources; + pmc->sources = NULL; + pmc->sfmode = MCAST_EXCLUDE; + pmc->sfcount[MCAST_INCLUDE] = 0; + pmc->sfcount[MCAST_EXCLUDE] = 1; + spin_unlock_bh(&pmc->lock); + + for (psf = tomb; psf; psf = nextpsf) { nextpsf = psf->sf_next; kfree(psf); } - pmc->tomb = NULL; - for (psf = pmc->sources; psf; psf = nextpsf) { + for (psf = sources; psf; psf = nextpsf) { nextpsf = psf->sf_next; kfree(psf); } - pmc->sources = NULL; - pmc->sfmode = MCAST_EXCLUDE; - pmc->sfcount[MCAST_INCLUDE] = 0; - pmc->sfcount[MCAST_EXCLUDE] = 1; } /* Join a multicast group -- GitLab From cac2a9bb4034f2395bdbe1ad2bd3f29a470e14f0 Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Tue, 20 Jun 2017 10:46:27 -0700 Subject: [PATCH 545/786] igmp: add a missing spin_lock_init() [ Upstream commit b4846fc3c8559649277e3e4e6b5cec5348a8d208 ] Andrey reported a lockdep warning on non-initialized spinlock: INFO: trying to register non-static key. the code is fine but needs lockdep annotation. turning off the locking correctness validator. CPU: 1 PID: 4099 Comm: a.out Not tainted 4.12.0-rc6+ #9 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:16 dump_stack+0x292/0x395 lib/dump_stack.c:52 register_lock_class+0x717/0x1aa0 kernel/locking/lockdep.c:755 ? 0xffffffffa0000000 __lock_acquire+0x269/0x3690 kernel/locking/lockdep.c:3255 lock_acquire+0x22d/0x560 kernel/locking/lockdep.c:3855 __raw_spin_lock_bh ./include/linux/spinlock_api_smp.h:135 _raw_spin_lock_bh+0x36/0x50 kernel/locking/spinlock.c:175 spin_lock_bh ./include/linux/spinlock.h:304 ip_mc_clear_src+0x27/0x1e0 net/ipv4/igmp.c:2076 igmpv3_clear_delrec+0xee/0x4f0 net/ipv4/igmp.c:1194 ip_mc_destroy_dev+0x4e/0x190 net/ipv4/igmp.c:1736 We miss a spin_lock_init() in igmpv3_add_delrec(), probably because previously we never use it on this code path. Since we already unlink it from the global mc_tomb list, it is probably safe not to acquire this spinlock here. It does not harm to have it although, to avoid conditional locking. Fixes: c38b7d327aaf ("igmp: acquire pmc lock for ip_mc_clear_src()") Reported-by: Andrey Konovalov Signed-off-by: Cong Wang Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/igmp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 8520eff4a5cc..19930da56b0a 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -1112,6 +1112,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im) pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); if (!pmc) return; + spin_lock_init(&pmc->lock); spin_lock_bh(&im->lock); pmc->interface = im->interface; in_dev_hold(in_dev); -- GitLab From fded2d74a3505f7daad70db4e8ffd87ceb366ecb Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 15 Jun 2017 16:33:58 +0800 Subject: [PATCH 546/786] ipv6: fix calling in6_ifa_hold incorrectly for dad work [ Upstream commit f8a894b218138888542a5058d0e902378fd0d4ec ] Now when starting the dad work in addrconf_mod_dad_work, if the dad work is idle and queued, it needs to hold ifa. The problem is there's one gap in [1], during which if the pending dad work is removed elsewhere. It will miss to hold ifa, but the dad word is still idea and queue. if (!delayed_work_pending(&ifp->dad_work)) in6_ifa_hold(ifp); <--------------[1] mod_delayed_work(addrconf_wq, &ifp->dad_work, delay); An use-after-free issue can be caused by this. Chen Wei found this issue when WARN_ON(!hlist_unhashed(&ifp->addr_lst)) in net6_ifa_finish_destroy was hit because of it. As Hannes' suggestion, this patch is to fix it by holding ifa first in addrconf_mod_dad_work, then calling mod_delayed_work and putting ifa if the dad_work is already in queue. Note that this patch did not choose to fix it with: if (!mod_delayed_work(delay)) in6_ifa_hold(ifp); As with it, when delay == 0, dad_work would be scheduled immediately, all addrconf_mod_dad_work(0) callings had to be moved under ifp->lock. Reported-by: Wei Chen Suggested-by: Hannes Frederic Sowa Acked-by: Hannes Frederic Sowa Signed-off-by: Xin Long Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/addrconf.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 045738319e8b..b6f4c42cc8ce 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -315,9 +315,9 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev, static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, unsigned long delay) { - if (!delayed_work_pending(&ifp->dad_work)) - in6_ifa_hold(ifp); - mod_delayed_work(addrconf_wq, &ifp->dad_work, delay); + in6_ifa_hold(ifp); + if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay)) + in6_ifa_put(ifp); } static int snmp6_alloc_dev(struct inet6_dev *idev) -- GitLab From 4c246863e7b42eaecbaf90c319720bbf426b5958 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 15 Jun 2017 17:49:08 +0800 Subject: [PATCH 547/786] sctp: return next obj by passing pos + 1 into sctp_transport_get_idx [ Upstream commit 988c7322116970696211e902b468aefec95b6ec4 ] In sctp_for_each_transport, pos is used to save how many objs it has dumped. Now it gets the last obj by sctp_transport_get_idx, then gets the next obj by sctp_transport_get_next. The issue is that in the meanwhile if some objs in transport hashtable are removed and the objs nums are less than pos, sctp_transport_get_idx would return NULL and hti.walker.tbl is NULL as well. At this moment it should stop hti, instead of continue getting the next obj. Or it would cause a NULL pointer dereference in sctp_transport_get_next. This patch is to pass pos + 1 into sctp_transport_get_idx to get the next obj directly, even if pos > objs nums, it would return NULL and stop hti. Fixes: 626d16f50f39 ("sctp: export some apis or variables for sctp_diag and reuse some for proc") Signed-off-by: Xin Long Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/sctp/socket.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 0c5257efc6e4..487c127f4928 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4506,9 +4506,8 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *), if (err) return err; - sctp_transport_get_idx(net, &hti, pos); - obj = sctp_transport_get_next(net, &hti); - for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) { + obj = sctp_transport_get_idx(net, &hti, pos + 1); + for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) { struct sctp_transport *transport = obj; if (!sctp_transport_hold(transport)) -- GitLab From 176b9874a203ae170912b063999e2c00d56b9ee6 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 15 Jun 2017 20:08:32 +0300 Subject: [PATCH 548/786] net/mlx5e: Avoid doing a cleanup call if the profile doesn't have it [ Upstream commit 31ac93386d135a6c96de9c8bab406f5ccabf5a4d ] The error flow of mlx5e_create_netdev calls the cleanup call of the given profile without checking if it exists, fix that. Currently the VF reps don't register that callback and we crash if getting into error -- can be reproduced by the user doing ctrl^C while attempting to change the sriov mode from legacy to switchdev. Fixes: 26e59d8077a3 '(net/mlx5e: Implement mlx5e interface attach/detach callbacks') Signed-off-by: Or Gerlitz Reported-by: Sabrina Dubroca Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index d4fa851ced2a..ea582342dd8f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3846,7 +3846,8 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, return netdev; err_cleanup_nic: - profile->cleanup(priv); + if (profile->cleanup) + profile->cleanup(priv); free_netdev(netdev); return NULL; -- GitLab From 25ff35074e276b457f16c00f97afea41b6d5051d Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Thu, 8 Jun 2017 11:33:16 -0500 Subject: [PATCH 549/786] net/mlx5: Wait for FW readiness before initializing command interface [ Upstream commit 6c780a0267b8a1075f40b39851132eeaefefcff5 ] Before attempting to initialize the command interface we must wait till the fw_initializing bit is clear. If we fail to meet this condition the hardware will drop our configuration, specifically the descriptors page address. This scenario can happen when the firmware is still executing an FLR flow and did not finish yet so the driver needs to wait for that to finish. Fixes: e3297246c2c8 ('net/mlx5_core: Wait for FW readiness on startup') Signed-off-by: Eli Cohen Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index d776db79e325..5bea0bf2b484 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -155,8 +155,9 @@ static struct mlx5_profile profile[] = { }, }; -#define FW_INIT_TIMEOUT_MILI 2000 -#define FW_INIT_WAIT_MS 2 +#define FW_INIT_TIMEOUT_MILI 2000 +#define FW_INIT_WAIT_MS 2 +#define FW_PRE_INIT_TIMEOUT_MILI 10000 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) { @@ -956,6 +957,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, */ dev->state = MLX5_DEVICE_STATE_UP; + /* wait for firmware to accept initialization segments configurations + */ + err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI); + if (err) { + dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n", + FW_PRE_INIT_TIMEOUT_MILI); + goto out; + } + err = mlx5_cmd_init(dev); if (err) { dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); -- GitLab From c7d422d68fe98627ea9f60d06e38dc7f1af302b9 Mon Sep 17 00:00:00 2001 From: Maor Dickman Date: Thu, 18 May 2017 15:15:08 +0300 Subject: [PATCH 550/786] net/mlx5e: Fix timestamping capabilities reporting [ Upstream commit f0b381178b01b831f9907d72f467d6443afdea67 ] Misuse of (BIT) macro caused to report wrong flags for "Hardware Transmit Timestamp Modes" and "Hardware Receive Filter Modes" Fixes: ef9814deafd0 ('net/mlx5e: Add HW timestamping (TS) support') Signed-off-by: Maor Dickman Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 3744e2f79ecf..da1d73fe1a81 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1183,11 +1183,11 @@ static int mlx5e_get_ts_info(struct net_device *dev, SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) | - (BIT(1) << HWTSTAMP_TX_ON); + info->tx_types = BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); - info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) | - (BIT(1) << HWTSTAMP_FILTER_ALL); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_ALL); return 0; } -- GitLab From f1a0e7d172b01e258a8c0ca6c67d003fbac54f64 Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Fri, 16 Jun 2017 10:46:37 -0700 Subject: [PATCH 551/786] decnet: always not take dst->__refcnt when inserting dst into hash table [ Upstream commit 76371d2e3ad1f84426a30ebcd8c3b9b98f4c724f ] In the existing dn_route.c code, dn_route_output_slow() takes dst->__refcnt before calling dn_insert_route() while dn_route_input_slow() does not take dst->__refcnt before calling dn_insert_route(). This makes the whole routing code very buggy. In dn_dst_check_expire(), dnrt_free() is called when rt expires. This makes the routes inserted by dn_route_output_slow() not able to be freed as the refcnt is not released. In dn_dst_gc(), dnrt_drop() is called to release rt which could potentially cause the dst->__refcnt to be dropped to -1. In dn_run_flush(), dst_free() is called to release all the dst. Again, it makes the dst inserted by dn_route_output_slow() not able to be released and also, it does not wait on the rcu and could potentially cause crash in the path where other users still refer to this dst. This patch makes sure both input and output path do not take dst->__refcnt before calling dn_insert_route() and also makes sure dnrt_free()/dst_free() is called when removing dst from the hash table. The only difference between those 2 calls is that dnrt_free() waits on the rcu while dst_free() does not. Signed-off-by: Wei Wang Acked-by: Martin KaFai Lau Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/decnet/dn_route.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index b1dc096d22f8..403593bd2b83 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -188,12 +188,6 @@ static inline void dnrt_free(struct dn_route *rt) call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); } -static inline void dnrt_drop(struct dn_route *rt) -{ - dst_release(&rt->dst); - call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); -} - static void dn_dst_check_expire(unsigned long dummy) { int i; @@ -248,7 +242,7 @@ static int dn_dst_gc(struct dst_ops *ops) } *rtp = rt->dst.dn_next; rt->dst.dn_next = NULL; - dnrt_drop(rt); + dnrt_free(rt); break; } spin_unlock_bh(&dn_rt_hash_table[i].lock); @@ -350,7 +344,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou dst_use(&rth->dst, now); spin_unlock_bh(&dn_rt_hash_table[hash].lock); - dnrt_drop(rt); + dst_free(&rt->dst); *rp = rth; return 0; } @@ -380,7 +374,7 @@ static void dn_run_flush(unsigned long dummy) for(; rt; rt = next) { next = rcu_dereference_raw(rt->dst.dn_next); RCU_INIT_POINTER(rt->dst.dn_next, NULL); - dst_free((struct dst_entry *)rt); + dnrt_free(rt); } nothing_to_declare: @@ -1187,7 +1181,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *o if (dev_out->flags & IFF_LOOPBACK) flags |= RTCF_LOCAL; - rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST); + rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST); if (rt == NULL) goto e_nobufs; -- GitLab From 08058c258afba77abf1fe6f4d327d3154a2bc336 Mon Sep 17 00:00:00 2001 From: Gao Feng Date: Fri, 16 Jun 2017 15:00:02 +0800 Subject: [PATCH 552/786] net: 8021q: Fix one possible panic caused by BUG_ON in free_netdev [ Upstream commit 9745e362add89432d2c951272a99b0a5fe4348a9 ] The register_vlan_device would invoke free_netdev directly, when register_vlan_dev failed. It would trigger the BUG_ON in free_netdev if the dev was already registered. In this case, the netdev would be freed in netdev_run_todo later. So add one condition check now. Only when dev is not registered, then free it directly. The following is the part coredump when netdev_upper_dev_link failed in register_vlan_dev. I removed the lines which are too long. [ 411.237457] ------------[ cut here ]------------ [ 411.237458] kernel BUG at net/core/dev.c:7998! [ 411.237484] invalid opcode: 0000 [#1] SMP [ 411.237705] [last unloaded: 8021q] [ 411.237718] CPU: 1 PID: 12845 Comm: vconfig Tainted: G E 4.12.0-rc5+ #6 [ 411.237737] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 07/02/2015 [ 411.237764] task: ffff9cbeb6685580 task.stack: ffffa7d2807d8000 [ 411.237782] RIP: 0010:free_netdev+0x116/0x120 [ 411.237794] RSP: 0018:ffffa7d2807dbdb0 EFLAGS: 00010297 [ 411.237808] RAX: 0000000000000002 RBX: ffff9cbeb6ba8fd8 RCX: 0000000000001878 [ 411.237826] RDX: 0000000000000001 RSI: 0000000000000282 RDI: 0000000000000000 [ 411.237844] RBP: ffffa7d2807dbdc8 R08: 0002986100029841 R09: 0002982100029801 [ 411.237861] R10: 0004000100029980 R11: 0004000100029980 R12: ffff9cbeb6ba9000 [ 411.238761] R13: ffff9cbeb6ba9060 R14: ffff9cbe60f1a000 R15: ffff9cbeb6ba9000 [ 411.239518] FS: 00007fb690d81700(0000) GS:ffff9cbebb640000(0000) knlGS:0000000000000000 [ 411.239949] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 411.240454] CR2: 00007f7115624000 CR3: 0000000077cdf000 CR4: 00000000003406e0 [ 411.240936] Call Trace: [ 411.241462] vlan_ioctl_handler+0x3f1/0x400 [8021q] [ 411.241910] sock_ioctl+0x18b/0x2c0 [ 411.242394] do_vfs_ioctl+0xa1/0x5d0 [ 411.242853] ? sock_alloc_file+0xa6/0x130 [ 411.243465] SyS_ioctl+0x79/0x90 [ 411.243900] entry_SYSCALL_64_fastpath+0x1e/0xa9 [ 411.244425] RIP: 0033:0x7fb69089a357 [ 411.244863] RSP: 002b:00007ffcd04e0fc8 EFLAGS: 00000202 ORIG_RAX: 0000000000000010 [ 411.245445] RAX: ffffffffffffffda RBX: 00007ffcd04e2884 RCX: 00007fb69089a357 [ 411.245903] RDX: 00007ffcd04e0fd0 RSI: 0000000000008983 RDI: 0000000000000003 [ 411.246527] RBP: 00007ffcd04e0fd0 R08: 0000000000000000 R09: 1999999999999999 [ 411.246976] R10: 000000000000053f R11: 0000000000000202 R12: 0000000000000004 [ 411.247414] R13: 00007ffcd04e1128 R14: 00007ffcd04e2888 R15: 0000000000000001 [ 411.249129] RIP: free_netdev+0x116/0x120 RSP: ffffa7d2807dbdb0 Signed-off-by: Gao Feng Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/8021q/vlan.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index f2531ad66b68..8d213f974448 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -277,7 +277,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) return 0; out_free_newdev: - free_netdev(new_dev); + if (new_dev->reg_state == NETREG_UNINITIALIZED) + free_netdev(new_dev); return err; } -- GitLab From e4089baa08c4a1fba87c19f8d018ecf032cab0b5 Mon Sep 17 00:00:00 2001 From: Bert Kenward Date: Fri, 16 Jun 2017 09:45:08 +0100 Subject: [PATCH 553/786] sfc: provide dummy definitions of vswitch functions efx_probe_all() calls efx->type->vswitching_probe during probe. For SFC4000 (Falcon) NICs this function is not defined, leading to a BUG with the top of the call stack similar to: ? efx_pci_probe_main+0x29a/0x830 efx_pci_probe+0x7d3/0xe70 vswitching_restore and vswitching_remove also need to be defined. Fixed in mainline by: commit 5a6681e22c14 ("sfc: separate out SFC4000 ("Falcon") support into new sfc-falcon driver") Fixes: 6d8aaaf6f798 ("sfc: create VEB vswitch and vport above default firmware setup") Signed-off-by: Bert Kenward Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/sfc/falcon.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c index 1a7092602aec..1bfb21465ace 100644 --- a/drivers/net/ethernet/sfc/falcon.c +++ b/drivers/net/ethernet/sfc/falcon.c @@ -2801,6 +2801,11 @@ const struct efx_nic_type falcon_a1_nic_type = { .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, .offload_features = NETIF_F_IP_CSUM, .mcdi_max_ver = -1, +#ifdef CONFIG_SFC_SRIOV + .vswitching_probe = efx_port_dummy_op_int, + .vswitching_restore = efx_port_dummy_op_int, + .vswitching_remove = efx_port_dummy_op_void, +#endif }; const struct efx_nic_type falcon_b0_nic_type = { @@ -2902,4 +2907,9 @@ const struct efx_nic_type falcon_b0_nic_type = { .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE, .mcdi_max_ver = -1, .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS, +#ifdef CONFIG_SFC_SRIOV + .vswitching_probe = efx_port_dummy_op_int, + .vswitching_restore = efx_port_dummy_op_int, + .vswitching_remove = efx_port_dummy_op_void, +#endif }; -- GitLab From b9ca9b0f551080aeb5adf7ab1b5f0c47c3e83f57 Mon Sep 17 00:00:00 2001 From: Serhey Popovych Date: Tue, 20 Jun 2017 13:29:25 +0300 Subject: [PATCH 554/786] ipv6: Do not leak throw route references [ Upstream commit 07f615574f8ac499875b21c1142f26308234a92c ] While commit 73ba57bfae4a ("ipv6: fix backtracking for throw routes") does good job on error propagation to the fib_rules_lookup() in fib rules core framework that also corrects throw routes handling, it does not solve route reference leakage problem happened when we return -EAGAIN to the fib_rules_lookup() and leave routing table entry referenced in arg->result. If rule with matched throw route isn't last matched in the list we overwrite arg->result losing reference on throw route stored previously forever. We also partially revert commit ab997ad40839 ("ipv6: fix the incorrect return value of throw route") since we never return routing table entry with dst.error == -EAGAIN when CONFIG_IPV6_MULTIPLE_TABLES is on. Also there is no point to check for RTF_REJECT flag since it is always set throw route. Fixes: 73ba57bfae4a ("ipv6: fix backtracking for throw routes") Signed-off-by: Serhey Popovych Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv6/fib6_rules.c | 22 ++++++---------------- net/ipv6/ip6_fib.c | 3 +-- 2 files changed, 7 insertions(+), 18 deletions(-) diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index eea23b57c6a5..ec849d88a662 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -32,7 +32,6 @@ struct fib6_rule { struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, int flags, pol_lookup_t lookup) { - struct rt6_info *rt; struct fib_lookup_arg arg = { .lookup_ptr = lookup, .flags = FIB_LOOKUP_NOREF, @@ -44,21 +43,11 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, fib_rules_lookup(net->ipv6.fib6_rules_ops, flowi6_to_flowi(fl6), flags, &arg); - rt = arg.result; + if (arg.result) + return arg.result; - if (!rt) { - dst_hold(&net->ipv6.ip6_null_entry->dst); - return &net->ipv6.ip6_null_entry->dst; - } - - if (rt->rt6i_flags & RTF_REJECT && - rt->dst.error == -EAGAIN) { - ip6_rt_put(rt); - rt = net->ipv6.ip6_null_entry; - dst_hold(&rt->dst); - } - - return &rt->dst; + dst_hold(&net->ipv6.ip6_null_entry->dst); + return &net->ipv6.ip6_null_entry->dst; } static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, @@ -121,7 +110,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, flp6->saddr = saddr; } err = rt->dst.error; - goto out; + if (err != -EAGAIN) + goto out; } again: ip6_rt_put(rt); diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 8c88a37392d0..636d4d893085 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -289,8 +289,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, struct rt6_info *rt; rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags); - if (rt->rt6i_flags & RTF_REJECT && - rt->dst.error == -EAGAIN) { + if (rt->dst.error == -EAGAIN) { ip6_rt_put(rt); rt = net->ipv6.ip6_null_entry; dst_hold(&rt->dst); -- GitLab From 8e2316399b8faa87496886506f145ed988cf5c68 Mon Sep 17 00:00:00 2001 From: Serhey Popovych Date: Tue, 20 Jun 2017 14:35:23 +0300 Subject: [PATCH 555/786] rtnetlink: add IFLA_GROUP to ifla_policy [ Upstream commit db833d40ad3263b2ee3b59a1ba168bb3cfed8137 ] Network interface groups support added while ago, however there is no IFLA_GROUP attribute description in policy and netlink message size calculations until now. Add IFLA_GROUP attribute to the policy. Fixes: cbda10fa97d7 ("net_device: add support for network device groups") Signed-off-by: Serhey Popovych Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/core/rtnetlink.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d574409d4986..9c6fd7f83a4a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -937,6 +937,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(1) /* IFLA_LINKMODE */ + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ + nla_total_size(4) /* IFLA_LINK_NETNSID */ + + nla_total_size(4) /* IFLA_GROUP */ + nla_total_size(ext_filter_mask & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */ + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */ @@ -1465,6 +1466,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_LINK_NETNSID] = { .type = NLA_S32 }, [IFLA_PROTO_DOWN] = { .type = NLA_U8 }, [IFLA_XDP] = { .type = NLA_NESTED }, + [IFLA_GROUP] = { .type = NLA_U32 }, }; static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { -- GitLab From ced7689be60ddcac4b1746212c547e8817c5ae5e Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 3 Apr 2017 10:55:11 -0700 Subject: [PATCH 556/786] netfilter: xt_TCPMSS: add more sanity tests on tcph->doff commit 2638fd0f92d4397884fd991d8f4925cb3f081901 upstream. Denys provided an awesome KASAN report pointing to an use after free in xt_TCPMSS I have provided three patches to fix this issue, either in xt_TCPMSS or in xt_tcpudp.c. It seems xt_TCPMSS patch has the smallest possible impact. Signed-off-by: Eric Dumazet Reported-by: Denys Fedoryshchenko Signed-off-by: Pablo Neira Ayuso Signed-off-by: Greg Kroah-Hartman --- net/netfilter/xt_TCPMSS.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 872db2d0e2a9..119e51fdcebc 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c @@ -104,7 +104,7 @@ tcpmss_mangle_packet(struct sk_buff *skb, tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); tcp_hdrlen = tcph->doff * 4; - if (len < tcp_hdrlen) + if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr)) return -1; if (info->mss == XT_TCPMSS_CLAMP_PMTU) { @@ -152,6 +152,10 @@ tcpmss_mangle_packet(struct sk_buff *skb, if (len > tcp_hdrlen) return 0; + /* tcph->doff has 4 bits, do not wrap it to 0 */ + if (tcp_hdrlen >= 15 * 4) + return 0; + /* * MSS Option not found ?! add it.. */ -- GitLab From b89bd0c715c148ea3cfef6b250482a77225573b5 Mon Sep 17 00:00:00 2001 From: Eric Leblond Date: Thu, 11 May 2017 18:56:38 +0200 Subject: [PATCH 557/786] netfilter: synproxy: fix conntrackd interaction commit 87e94dbc210a720a34be5c1174faee5c84be963e upstream. This patch fixes the creation of connection tracking entry from netlink when synproxy is used. It was missing the addition of the synproxy extension. This was causing kernel crashes when a conntrack entry created by conntrackd was used after the switch of traffic from active node to the passive node. Signed-off-by: Eric Leblond Signed-off-by: Pablo Neira Ayuso Signed-off-by: Greg Kroah-Hartman --- net/netfilter/nf_conntrack_netlink.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 27540455dc62..04111c1c3988 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -45,6 +45,8 @@ #include #include #include +#include +#include #ifdef CONFIG_NF_NAT_NEEDED #include #include @@ -1800,6 +1802,8 @@ ctnetlink_create_conntrack(struct net *net, nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); nf_ct_labels_ext_add(ct); + nfct_seqadj_ext_add(ct); + nfct_synproxy_ext_add(ct); /* we must add conntrack extensions before confirmation. */ ct->status |= IPS_CONFIRMED; -- GitLab From 955f270b6f5d7d830188de1f05f055180a8712dc Mon Sep 17 00:00:00 2001 From: Kinglong Mee Date: Mon, 6 Mar 2017 22:29:14 +0800 Subject: [PATCH 558/786] NFSv4: fix a reference leak caused WARNING messages commit 366a1569bff3fe14abfdf9285e31e05e091745f5 upstream. Because nfs4_opendata_access() has close the state when access is denied, so the state isn't leak. Rather than revert the commit a974deee47, I'd like clean the strange state close. [ 1615.094218] ------------[ cut here ]------------ [ 1615.094607] WARNING: CPU: 0 PID: 23702 at lib/list_debug.c:31 __list_add_valid+0x8e/0xa0 [ 1615.094913] list_add double add: new=ffff9d7901d9f608, prev=ffff9d7901d9f608, next=ffff9d7901ee8dd0. [ 1615.095458] Modules linked in: nfsv4(E) nfs(E) nfsd(E) tun bridge stp llc fuse ip_set nfnetlink vmw_vsock_vmci_transport vsock f2fs snd_seq_midi snd_seq_midi_event fscrypto coretemp ppdev crct10dif_pclmul crc32_pclmul ghash_clmulni_intel intel_rapl_perf vmw_balloon snd_ens1371 joydev gameport snd_ac97_codec ac97_bus snd_seq snd_pcm snd_rawmidi snd_timer snd_seq_device snd soundcore nfit parport_pc parport acpi_cpufreq tpm_tis tpm_tis_core tpm i2c_piix4 vmw_vmci shpchp auth_rpcgss nfs_acl lockd(E) grace sunrpc(E) xfs libcrc32c vmwgfx drm_kms_helper ttm drm crc32c_intel mptspi e1000 serio_raw scsi_transport_spi mptscsih mptbase ata_generic pata_acpi fjes [last unloaded: nfs] [ 1615.097663] CPU: 0 PID: 23702 Comm: fstest Tainted: G W E 4.11.0-rc1+ #517 [ 1615.098015] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 07/02/2015 [ 1615.098807] Call Trace: [ 1615.099183] dump_stack+0x63/0x86 [ 1615.099578] __warn+0xcb/0xf0 [ 1615.099967] warn_slowpath_fmt+0x5f/0x80 [ 1615.100370] __list_add_valid+0x8e/0xa0 [ 1615.100760] nfs4_put_state_owner+0x75/0xc0 [nfsv4] [ 1615.101136] __nfs4_close+0x109/0x140 [nfsv4] [ 1615.101524] nfs4_close_state+0x15/0x20 [nfsv4] [ 1615.101949] nfs4_close_context+0x21/0x30 [nfsv4] [ 1615.102691] __put_nfs_open_context+0xb8/0x110 [nfs] [ 1615.103155] put_nfs_open_context+0x10/0x20 [nfs] [ 1615.103586] nfs4_file_open+0x13b/0x260 [nfsv4] [ 1615.103978] do_dentry_open+0x20a/0x2f0 [ 1615.104369] ? nfs4_copy_file_range+0x30/0x30 [nfsv4] [ 1615.104739] vfs_open+0x4c/0x70 [ 1615.105106] ? may_open+0x5a/0x100 [ 1615.105469] path_openat+0x623/0x1420 [ 1615.105823] do_filp_open+0x91/0x100 [ 1615.106174] ? __alloc_fd+0x3f/0x170 [ 1615.106568] do_sys_open+0x130/0x220 [ 1615.106920] ? __put_cred+0x3d/0x50 [ 1615.107256] SyS_open+0x1e/0x20 [ 1615.107588] entry_SYSCALL_64_fastpath+0x1a/0xa9 [ 1615.107922] RIP: 0033:0x7fab599069b0 [ 1615.108247] RSP: 002b:00007ffcf0600d78 EFLAGS: 00000246 ORIG_RAX: 0000000000000002 [ 1615.108575] RAX: ffffffffffffffda RBX: 00007fab59bcfae0 RCX: 00007fab599069b0 [ 1615.108896] RDX: 0000000000000200 RSI: 0000000000000200 RDI: 00007ffcf060255e [ 1615.109211] RBP: 0000000000040010 R08: 0000000000000000 R09: 0000000000000016 [ 1615.109515] R10: 00000000000006a1 R11: 0000000000000246 R12: 0000000000041000 [ 1615.109806] R13: 0000000000040010 R14: 0000000000001000 R15: 0000000000002710 [ 1615.110152] ---[ end trace 96ed63b1306bf2f3 ]--- Fixes: a974deee47 ("NFSv4: Fix memory and state leak in...") Signed-off-by: Kinglong Mee Signed-off-by: Anna Schumaker Cc: Trond Myklebust Signed-off-by: Greg Kroah-Hartman --- fs/nfs/nfs4proc.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index fc9b04941739..4a64fa04f80a 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2343,8 +2343,6 @@ static int nfs4_opendata_access(struct rpc_cred *cred, if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) return 0; - /* even though OPEN succeeded, access is denied. Close the file */ - nfs4_close_state(state, fmode); return -EACCES; } -- GitLab From 4ebe28d23d35df2e69542c0146a74d21834ef235 Mon Sep 17 00:00:00 2001 From: Kinglong Mee Date: Thu, 27 Apr 2017 11:13:38 +0800 Subject: [PATCH 559/786] NFSv4.x/callback: Create the callback service through svc_create_pooled commit df807fffaabde625fa9adb82e3e5b88cdaa5709a upstream. As the comments for svc_set_num_threads() said, " Destroying threads relies on the service threads filling in rqstp->rq_task, which only the nfs ones do. Assumes the serv has been created using svc_create_pooled()." If creating service through svc_create(), the svc_pool_map_put() will be called in svc_destroy(), but the pool map isn't used. So that, the reference of pool map will be drop, the next using of pool map will get a zero npools. [ 137.992130] divide error: 0000 [#1] SMP [ 137.992148] Modules linked in: nfsd(E) nfsv4 nfs fscache fuse tun bridge stp llc ip_set nfnetlink vmw_vsock_vmci_transport vsock snd_seq_midi snd_seq_midi_event vmw_balloon coretemp crct10dif_pclmul crc32_pclmul ppdev ghash_clmulni_intel intel_rapl_perf joydev snd_ens1371 gameport snd_ac97_codec ac97_bus snd_seq snd_pcm snd_rawmidi snd_timer snd_seq_device snd soundcore parport_pc parport nfit acpi_cpufreq tpm_tis tpm_tis_core tpm vmw_vmci i2c_piix4 shpchp auth_rpcgss nfs_acl lockd(E) grace sunrpc(E) xfs libcrc32c vmwgfx drm_kms_helper ttm crc32c_intel drm e1000 mptspi scsi_transport_spi serio_raw mptscsih mptbase ata_generic pata_acpi [last unloaded: nfsd] [ 137.992336] CPU: 0 PID: 4514 Comm: rpc.nfsd Tainted: G E 4.11.0-rc8+ #536 [ 137.992777] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 07/02/2015 [ 137.993757] task: ffff955984101d00 task.stack: ffff9873c2604000 [ 137.994231] RIP: 0010:svc_pool_for_cpu+0x2b/0x80 [sunrpc] [ 137.994768] RSP: 0018:ffff9873c2607c18 EFLAGS: 00010246 [ 137.995227] RAX: 0000000000000000 RBX: ffff95598376f000 RCX: 0000000000000002 [ 137.995673] RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff9559944aec00 [ 137.996156] RBP: ffff9873c2607c18 R08: ffff9559944aec28 R09: 0000000000000000 [ 137.996609] R10: 0000000001080002 R11: 0000000000000000 R12: ffff95598376f010 [ 137.997063] R13: ffff95598376f018 R14: ffff9559944aec28 R15: ffff9559944aec00 [ 137.997584] FS: 00007f755529eb40(0000) GS:ffff9559bb600000(0000) knlGS:0000000000000000 [ 137.998048] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 137.998548] CR2: 000055f3aecd9660 CR3: 0000000084290000 CR4: 00000000001406f0 [ 137.999052] Call Trace: [ 137.999517] svc_xprt_do_enqueue+0xef/0x260 [sunrpc] [ 138.000028] svc_xprt_received+0x47/0x90 [sunrpc] [ 138.000487] svc_add_new_perm_xprt+0x76/0x90 [sunrpc] [ 138.000981] svc_addsock+0x14b/0x200 [sunrpc] [ 138.001424] ? recalc_sigpending+0x1b/0x50 [ 138.001860] ? __getnstimeofday64+0x41/0xd0 [ 138.002346] ? do_gettimeofday+0x29/0x90 [ 138.002779] write_ports+0x255/0x2c0 [nfsd] [ 138.003202] ? _copy_from_user+0x4e/0x80 [ 138.003676] ? write_recoverydir+0x100/0x100 [nfsd] [ 138.004098] nfsctl_transaction_write+0x48/0x80 [nfsd] [ 138.004544] __vfs_write+0x37/0x160 [ 138.004982] ? selinux_file_permission+0xd7/0x110 [ 138.005401] ? security_file_permission+0x3b/0xc0 [ 138.005865] vfs_write+0xb5/0x1a0 [ 138.006267] SyS_write+0x55/0xc0 [ 138.006654] entry_SYSCALL_64_fastpath+0x1a/0xa9 [ 138.007071] RIP: 0033:0x7f7554b9dc30 [ 138.007437] RSP: 002b:00007ffc9f92c788 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 [ 138.007807] RAX: ffffffffffffffda RBX: 0000000000000002 RCX: 00007f7554b9dc30 [ 138.008168] RDX: 0000000000000002 RSI: 00005640cd536640 RDI: 0000000000000003 [ 138.008573] RBP: 00007ffc9f92c780 R08: 0000000000000001 R09: 0000000000000002 [ 138.008918] R10: 0000000000000064 R11: 0000000000000246 R12: 0000000000000004 [ 138.009254] R13: 00005640cdbf77a0 R14: 00005640cdbf7720 R15: 00007ffc9f92c238 [ 138.009610] Code: 0f 1f 44 00 00 48 8b 87 98 00 00 00 55 48 89 e5 48 83 78 08 00 74 10 8b 05 07 42 02 00 83 f8 01 74 40 83 f8 02 74 19 31 c0 31 d2 b7 88 00 00 00 5d 89 d0 48 c1 e0 07 48 03 87 90 00 00 00 c3 [ 138.010664] RIP: svc_pool_for_cpu+0x2b/0x80 [sunrpc] RSP: ffff9873c2607c18 [ 138.011061] ---[ end trace b3468224cafa7d11 ]--- Signed-off-by: Kinglong Mee Signed-off-by: J. Bruce Fields Signed-off-by: Greg Kroah-Hartman --- fs/nfs/callback.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 484bebc20bca..0a2115084c3f 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -279,7 +279,7 @@ static struct svc_serv *nfs_callback_create_svc(int minorversion) printk(KERN_WARNING "nfs_callback_create_svc: no kthread, %d users??\n", cb_info->users); - serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, sv_ops); + serv = svc_create_pooled(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, sv_ops); if (!serv) { printk(KERN_ERR "nfs_callback_create_svc: create service failed\n"); return ERR_PTR(-ENOMEM); -- GitLab From 8dc9f9dede5b92658a1bb32866e11905933d2b48 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Thu, 18 May 2017 17:28:49 +0200 Subject: [PATCH 560/786] xen/blkback: don't use xen_blkif_get() in xen-blkback kthread MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit a24fa22ce22ae302b3bf8f7008896d52d5d57b8d upstream. There is no need to use xen_blkif_get()/xen_blkif_put() in the kthread of xen-blkback. Thread stopping is synchronous and using the blkif reference counting in the kthread will avoid to ever let the reference count drop to zero at the end of an I/O running concurrent to disconnecting and multiple rings. Setting ring->xenblkd to NULL after stopping the kthread isn't needed as the kthread does this already. Signed-off-by: Juergen Gross Tested-by: Steven Haigh Acked-by: Roger Pau Monné Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Greg Kroah-Hartman --- drivers/block/xen-blkback/blkback.c | 3 --- drivers/block/xen-blkback/xenbus.c | 1 - 2 files changed, 4 deletions(-) diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index c42202d63567..d6eaaa25d1cc 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg) unsigned long timeout; int ret; - xen_blkif_get(blkif); - set_freezable(); while (!kthread_should_stop()) { if (try_to_freeze()) @@ -665,7 +663,6 @@ int xen_blkif_schedule(void *arg) print_stats(ring); ring->xenblkd = NULL; - xen_blkif_put(blkif); return 0; } diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 9b69fe410c08..d8fc9c58e2a3 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -255,7 +255,6 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) if (ring->xenblkd) { kthread_stop(ring->xenblkd); wake_up(&ring->shutdown_wq); - ring->xenblkd = NULL; } /* The above kthread_stop() guarantees that at this point we -- GitLab From dbc808362b6cb2124f36b14ce354abcc64b6f1bb Mon Sep 17 00:00:00 2001 From: Russell Currey Date: Fri, 17 Feb 2017 14:33:01 +1100 Subject: [PATCH 561/786] drm/ast: Handle configuration without P2A bridge commit 71f677a91046599ece96ebab21df956ce909c456 upstream. The ast driver configures a window to enable access into BMC memory space in order to read some configuration registers. If this window is disabled, which it can be from the BMC side, the ast driver can't function. Closing this window is a necessity for security if a machine's host side and BMC side are controlled by different parties; i.e. a cloud provider offering machines "bare metal". A recent patch went in to try to check if that window is open but it does so by trying to access the registers in question and testing if the result is 0xffffffff. This method will trigger a PCIe error when the window is closed which on some systems will be fatal (it will trigger an EEH for example on POWER which will take out the device). This patch improves this in two ways: - First, if the firmware has put properties in the device-tree containing the relevant configuration information, we use these. - Otherwise, a bit in one of the SCU scratch registers (which are readable via the VGA register space and writeable by the BMC) will indicate if the BMC has closed the window. This bit has been defined by Y.C Chen from Aspeed. If the window is closed and the configuration isn't available from the device-tree, some sane defaults are used. Those defaults are hopefully sufficient for standard video modes used on a server. Signed-off-by: Russell Currey Acked-by: Joel Stanley Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Dave Airlie Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/ast/ast_drv.h | 6 +- drivers/gpu/drm/ast/ast_main.c | 264 ++++++++++++++++++++------------- drivers/gpu/drm/ast/ast_post.c | 7 +- 3 files changed, 168 insertions(+), 109 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 7abda94fc2cf..3bedcf7ddd2a 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -113,7 +113,11 @@ struct ast_private { struct ttm_bo_kmap_obj cache_kmap; int next_cursor; bool support_wide_screen; - bool DisableP2A; + enum { + ast_use_p2a, + ast_use_dt, + ast_use_defaults + } config_mode; enum ast_tx_chip tx_chip_type; u8 dp501_maxclk; diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 533e762d036d..fb9976254224 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -62,13 +62,84 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast, return ret; } +static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev) +{ + struct device_node *np = dev->pdev->dev.of_node; + struct ast_private *ast = dev->dev_private; + uint32_t data, jregd0, jregd1; + + /* Defaults */ + ast->config_mode = ast_use_defaults; + *scu_rev = 0xffffffff; + + /* Check if we have device-tree properties */ + if (np && !of_property_read_u32(np, "aspeed,scu-revision-id", + scu_rev)) { + /* We do, disable P2A access */ + ast->config_mode = ast_use_dt; + DRM_INFO("Using device-tree for configuration\n"); + return; + } + + /* Not all families have a P2A bridge */ + if (dev->pdev->device != PCI_CHIP_AST2000) + return; + + /* + * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge + * is disabled. We force using P2A if VGA only mode bit + * is set D[7] + */ + jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff); + jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); + if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) { + /* Double check it's actually working */ + data = ast_read32(ast, 0xf004); + if (data != 0xFFFFFFFF) { + /* P2A works, grab silicon revision */ + ast->config_mode = ast_use_p2a; + + DRM_INFO("Using P2A bridge for configuration\n"); + + /* Read SCU7c (silicon revision register) */ + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + *scu_rev = ast_read32(ast, 0x1207c); + return; + } + } + + /* We have a P2A bridge but it's disabled */ + DRM_INFO("P2A bridge disabled, using default configuration\n"); +} static int ast_detect_chip(struct drm_device *dev, bool *need_post) { struct ast_private *ast = dev->dev_private; - uint32_t data, jreg; + uint32_t jreg, scu_rev; + + /* + * If VGA isn't enabled, we need to enable now or subsequent + * access to the scratch registers will fail. We also inform + * our caller that it needs to POST the chip + * (Assumption: VGA not enabled -> need to POST) + */ + if (!ast_is_vga_enabled(dev)) { + ast_enable_vga(dev); + DRM_INFO("VGA not enabled on entry, requesting chip POST\n"); + *need_post = true; + } else + *need_post = false; + + + /* Enable extended register access */ + ast_enable_mmio(dev); ast_open_key(ast); + /* Find out whether P2A works or whether to use device-tree */ + ast_detect_config_mode(dev, &scu_rev); + + /* Identify chipset */ if (dev->pdev->device == PCI_CHIP_AST1180) { ast->chip = AST1100; DRM_INFO("AST 1180 detected\n"); @@ -80,12 +151,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) ast->chip = AST2300; DRM_INFO("AST 2300 detected\n"); } else if (dev->pdev->revision >= 0x10) { - uint32_t data; - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - - data = ast_read32(ast, 0x1207c); - switch (data & 0x0300) { + switch (scu_rev & 0x0300) { case 0x0200: ast->chip = AST1100; DRM_INFO("AST 1100 detected\n"); @@ -110,26 +176,6 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) } } - /* - * If VGA isn't enabled, we need to enable now or subsequent - * access to the scratch registers will fail. We also inform - * our caller that it needs to POST the chip - * (Assumption: VGA not enabled -> need to POST) - */ - if (!ast_is_vga_enabled(dev)) { - ast_enable_vga(dev); - ast_enable_mmio(dev); - DRM_INFO("VGA not enabled on entry, requesting chip POST\n"); - *need_post = true; - } else - *need_post = false; - - /* Check P2A Access */ - ast->DisableP2A = true; - data = ast_read32(ast, 0xf004); - if (data != 0xFFFFFFFF) - ast->DisableP2A = false; - /* Check if we support wide screen */ switch (ast->chip) { case AST1180: @@ -146,17 +192,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) ast->support_wide_screen = true; else { ast->support_wide_screen = false; - if (ast->DisableP2A == false) { - /* Read SCU7c (silicon revision register) */ - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - data = ast_read32(ast, 0x1207c); - data &= 0x300; - if (ast->chip == AST2300 && data == 0x0) /* ast1300 */ - ast->support_wide_screen = true; - if (ast->chip == AST2400 && data == 0x100) /* ast1400 */ - ast->support_wide_screen = true; - } + if (ast->chip == AST2300 && + (scu_rev & 0x300) == 0x0) /* ast1300 */ + ast->support_wide_screen = true; + if (ast->chip == AST2400 && + (scu_rev & 0x300) == 0x100) /* ast1400 */ + ast->support_wide_screen = true; } break; } @@ -220,85 +261,102 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) static int ast_get_dram_info(struct drm_device *dev) { + struct device_node *np = dev->pdev->dev.of_node; struct ast_private *ast = dev->dev_private; - uint32_t data, data2; - uint32_t denum, num, div, ref_pll; + uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap; + uint32_t denum, num, div, ref_pll, dsel; - if (ast->DisableP2A) - { + switch (ast->config_mode) { + case ast_use_dt: + /* + * If some properties are missing, use reasonable + * defaults for AST2400 + */ + if (of_property_read_u32(np, "aspeed,mcr-configuration", + &mcr_cfg)) + mcr_cfg = 0x00000577; + if (of_property_read_u32(np, "aspeed,mcr-scu-mpll", + &mcr_scu_mpll)) + mcr_scu_mpll = 0x000050C0; + if (of_property_read_u32(np, "aspeed,mcr-scu-strap", + &mcr_scu_strap)) + mcr_scu_strap = 0; + break; + case ast_use_p2a: + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + mcr_cfg = ast_read32(ast, 0x10004); + mcr_scu_mpll = ast_read32(ast, 0x10120); + mcr_scu_strap = ast_read32(ast, 0x10170); + break; + case ast_use_defaults: + default: ast->dram_bus_width = 16; ast->dram_type = AST_DRAM_1Gx16; ast->mclk = 396; + return 0; } - else - { - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - data = ast_read32(ast, 0x10004); - - if (data & 0x40) - ast->dram_bus_width = 16; - else - ast->dram_bus_width = 32; - if (ast->chip == AST2300 || ast->chip == AST2400) { - switch (data & 0x03) { - case 0: - ast->dram_type = AST_DRAM_512Mx16; - break; - default: - case 1: - ast->dram_type = AST_DRAM_1Gx16; - break; - case 2: - ast->dram_type = AST_DRAM_2Gx16; - break; - case 3: - ast->dram_type = AST_DRAM_4Gx16; - break; - } - } else { - switch (data & 0x0c) { - case 0: - case 4: - ast->dram_type = AST_DRAM_512Mx16; - break; - case 8: - if (data & 0x40) - ast->dram_type = AST_DRAM_1Gx16; - else - ast->dram_type = AST_DRAM_512Mx32; - break; - case 0xc: - ast->dram_type = AST_DRAM_1Gx32; - break; - } - } + if (mcr_cfg & 0x40) + ast->dram_bus_width = 16; + else + ast->dram_bus_width = 32; - data = ast_read32(ast, 0x10120); - data2 = ast_read32(ast, 0x10170); - if (data2 & 0x2000) - ref_pll = 14318; - else - ref_pll = 12000; - - denum = data & 0x1f; - num = (data & 0x3fe0) >> 5; - data = (data & 0xc000) >> 14; - switch (data) { - case 3: - div = 0x4; + if (ast->chip == AST2300 || ast->chip == AST2400) { + switch (mcr_cfg & 0x03) { + case 0: + ast->dram_type = AST_DRAM_512Mx16; break; - case 2: + default: case 1: - div = 0x2; + ast->dram_type = AST_DRAM_1Gx16; break; - default: - div = 0x1; + case 2: + ast->dram_type = AST_DRAM_2Gx16; + break; + case 3: + ast->dram_type = AST_DRAM_4Gx16; + break; + } + } else { + switch (mcr_cfg & 0x0c) { + case 0: + case 4: + ast->dram_type = AST_DRAM_512Mx16; + break; + case 8: + if (mcr_cfg & 0x40) + ast->dram_type = AST_DRAM_1Gx16; + else + ast->dram_type = AST_DRAM_512Mx32; + break; + case 0xc: + ast->dram_type = AST_DRAM_1Gx32; break; } - ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); } + + if (mcr_scu_strap & 0x2000) + ref_pll = 14318; + else + ref_pll = 12000; + + denum = mcr_scu_mpll & 0x1f; + num = (mcr_scu_mpll & 0x3fe0) >> 5; + dsel = (mcr_scu_mpll & 0xc000) >> 14; + switch (dsel) { + case 3: + div = 0x4; + break; + case 2: + case 1: + div = 0x2; + break; + default: + div = 0x1; + break; + } + ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000); return 0; } diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 270e8fb2803f..c7c58becb25d 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -375,17 +375,14 @@ void ast_post_gpu(struct drm_device *dev) ast_enable_mmio(dev); ast_set_def_ext_reg(dev); - if (ast->DisableP2A == false) - { + if (ast->config_mode == ast_use_p2a) { if (ast->chip == AST2300 || ast->chip == AST2400) ast_init_dram_2300(dev); else ast_init_dram_reg(dev); ast_init_3rdtx(dev); - } - else - { + } else { if (ast->tx_chip_type != AST_TX_NONE) ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ } -- GitLab From b1355226a64e6301ca63aee1e78728887e3527f1 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Fri, 7 Apr 2017 16:05:00 -0700 Subject: [PATCH 562/786] mm, swap_cgroup: reschedule when neeed in swap_cgroup_swapoff() commit 460bcec84e11c75122ace5976214abbc596eb91b upstream. We got need_resched() warnings in swap_cgroup_swapoff() because swap_cgroup_ctrl[type].length is particularly large. Reschedule when needed. Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1704061315270.80559@chino.kir.corp.google.com Signed-off-by: David Rientjes Acked-by: Michal Hocko Cc: Johannes Weiner Cc: Vladimir Davydov Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- mm/swap_cgroup.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c index 454d6d7509ed..3405b4ee1757 100644 --- a/mm/swap_cgroup.c +++ b/mm/swap_cgroup.c @@ -204,6 +204,8 @@ void swap_cgroup_swapoff(int type) struct page *page = map[i]; if (page) __free_page(page); + if (!(i % SWAP_CLUSTER_MAX)) + cond_resched(); } vfree(map); } -- GitLab From 3d4ac49a9538c36d9c3c121ddbcb4c3958dee5e9 Mon Sep 17 00:00:00 2001 From: Karl Beldan Date: Tue, 27 Jun 2017 19:22:16 +0000 Subject: [PATCH 563/786] MIPS: head: Reorder instructions missing a delay slot commit 25d8b92e0af75d72ce8b99e63e5a449cc0888efa upstream. In this sequence the 'move' is assumed in the delay slot of the 'beq', but head.S is in reorder mode and the former gets pushed one 'nop' farther by the assembler. The corrected behavior made booting with an UHI supplied dtb erratic. Fixes: 15f37e158892 ("MIPS: store the appended dtb address in a variable") Signed-off-by: Karl Beldan Reviewed-by: James Hogan Cc: Jonas Gorski Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/16614/ Signed-off-by: Ralf Baechle Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/head.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index cf052204eb0a..d1bb506adc10 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S @@ -106,8 +106,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point beq t0, t1, dtb_found #endif li t1, -2 - beq a0, t1, dtb_found move t2, a1 + beq a0, t1, dtb_found li t2, 0 dtb_found: -- GitLab From f7d3d40ea1242f633bbf093b63181acff2da319e Mon Sep 17 00:00:00 2001 From: James Hogan Date: Thu, 29 Jun 2017 15:05:04 +0100 Subject: [PATCH 564/786] MIPS: Avoid accidental raw backtrace commit 854236363370995a609a10b03e35fd3dc5e9e4a1 upstream. Since commit 81a76d7119f6 ("MIPS: Avoid using unwind_stack() with usermode") show_backtrace() invokes the raw backtracer when cp0_status & ST0_KSU indicates user mode to fix issues on EVA kernels where user and kernel address spaces overlap. However this is used by show_stack() which creates its own pt_regs on the stack and leaves cp0_status uninitialised in most of the code paths. This results in the non deterministic use of the raw back tracer depending on the previous stack content. show_stack() deals exclusively with kernel mode stacks anyway, so explicitly initialise regs.cp0_status to KSU_KERNEL (i.e. 0) to ensure we get a useful backtrace. Fixes: 81a76d7119f6 ("MIPS: Avoid using unwind_stack() with usermode") Signed-off-by: James Hogan Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16656/ Signed-off-by: Ralf Baechle Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/traps.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index ec87ef93267b..b0b29cb6f3d8 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -199,6 +199,8 @@ void show_stack(struct task_struct *task, unsigned long *sp) { struct pt_regs regs; mm_segment_t old_fs = get_fs(); + + regs.cp0_status = KSU_KERNEL; if (sp) { regs.regs[29] = (unsigned long)sp; regs.regs[31] = 0; -- GitLab From e9e24faf823e58713115974ab50102319c33a34d Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Thu, 2 Mar 2017 14:02:40 -0800 Subject: [PATCH 565/786] MIPS: pm-cps: Drop manual cache-line alignment of ready_count commit 161c51ccb7a6faf45ffe09aa5cf1ad85ccdad503 upstream. We allocate memory for a ready_count variable per-CPU, which is accessed via a cached non-coherent TLB mapping to perform synchronisation between threads within the core using LL/SC instructions. In order to ensure that the variable is contained within its own data cache line we allocate 2 lines worth of memory & align the resulting pointer to a line boundary. This is however unnecessary, since kmalloc is guaranteed to return memory which is at least cache-line aligned (see ARCH_DMA_MINALIGN). Stop the redundant manual alignment. Besides cleaning up the code & avoiding needless work, this has the side effect of avoiding an arithmetic error found by Bryan on 64 bit systems due to the 32 bit size of the former dlinesz. This led the ready_count variable to have its upper 32b cleared erroneously for MIPS64 kernels, causing problems when ready_count was later used on MIPS64 via cpuidle. Signed-off-by: Paul Burton Fixes: 3179d37ee1ed ("MIPS: pm-cps: add PM state entry code for CPS systems") Reported-by: Bryan O'Donoghue Reviewed-by: Bryan O'Donoghue Tested-by: Bryan O'Donoghue Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/15383/ Signed-off-by: Ralf Baechle Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/pm-cps.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index 7cf653e21423..60c4d4599639 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -56,7 +56,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); * state. Actually per-core rather than per-CPU. */ static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); -static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc); /* Indicates online CPUs coupled with the current CPU */ static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); @@ -642,7 +641,6 @@ static int cps_pm_online_cpu(unsigned int cpu) { enum cps_pm_state state; unsigned core = cpu_data[cpu].core; - unsigned dlinesz = cpu_data[cpu].dcache.linesz; void *entry_fn, *core_rc; for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { @@ -662,16 +660,11 @@ static int cps_pm_online_cpu(unsigned int cpu) } if (!per_cpu(ready_count, core)) { - core_rc = kmalloc(dlinesz * 2, GFP_KERNEL); + core_rc = kmalloc(sizeof(u32), GFP_KERNEL); if (!core_rc) { pr_err("Failed allocate core %u ready_count\n", core); return -ENOMEM; } - per_cpu(ready_count_alloc, core) = core_rc; - - /* Ensure ready_count is aligned to a cacheline boundary */ - core_rc += dlinesz - 1; - core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1)); per_cpu(ready_count, core) = core_rc; } -- GitLab From dad3135e762bdb66318fe5ab902db5c4fbb1ad2f Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Fri, 3 Mar 2017 15:26:05 -0800 Subject: [PATCH 566/786] MIPS: Fix IRQ tracing & lockdep when rescheduling commit d8550860d910c6b7b70f830f59003b33daaa52c9 upstream. When the scheduler sets TIF_NEED_RESCHED & we call into the scheduler from arch/mips/kernel/entry.S we disable interrupts. This is true regardless of whether we reach work_resched from syscall_exit_work, resume_userspace or by looping after calling schedule(). Although we disable interrupts in these paths we don't call trace_hardirqs_off() before calling into C code which may acquire locks, and we therefore leave lockdep with an inconsistent view of whether interrupts are disabled or not when CONFIG_PROVE_LOCKING & CONFIG_DEBUG_LOCKDEP are both enabled. Without tracing this interrupt state lockdep will print warnings such as the following once a task returns from a syscall via syscall_exit_partial with TIF_NEED_RESCHED set: [ 49.927678] ------------[ cut here ]------------ [ 49.934445] WARNING: CPU: 0 PID: 1 at kernel/locking/lockdep.c:3687 check_flags.part.41+0x1dc/0x1e8 [ 49.946031] DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled) [ 49.946355] CPU: 0 PID: 1 Comm: init Not tainted 4.10.0-00439-gc9fd5d362289-dirty #197 [ 49.963505] Stack : 0000000000000000 ffffffff81bb5d6a 0000000000000006 ffffffff801ce9c4 [ 49.974431] 0000000000000000 0000000000000000 0000000000000000 000000000000004a [ 49.985300] ffffffff80b7e487 ffffffff80a24498 a8000000ff160000 ffffffff80ede8b8 [ 49.996194] 0000000000000001 0000000000000000 0000000000000000 0000000077c8030c [ 50.007063] 000000007fd8a510 ffffffff801cd45c 0000000000000000 a8000000ff127c88 [ 50.017945] 0000000000000000 ffffffff801cf928 0000000000000001 ffffffff80a24498 [ 50.028827] 0000000000000000 0000000000000001 0000000000000000 0000000000000000 [ 50.039688] 0000000000000000 a8000000ff127bd0 0000000000000000 ffffffff805509bc [ 50.050575] 00000000140084e0 0000000000000000 0000000000000000 0000000000040a00 [ 50.061448] 0000000000000000 ffffffff8010e1b0 0000000000000000 ffffffff805509bc [ 50.072327] ... [ 50.076087] Call Trace: [ 50.079869] [] show_stack+0x80/0xa8 [ 50.086577] [] dump_stack+0x10c/0x190 [ 50.093498] [] __warn+0xf0/0x108 [ 50.099889] [] warn_slowpath_fmt+0x3c/0x48 [ 50.107241] [] check_flags.part.41+0x1dc/0x1e8 [ 50.114961] [] lock_is_held_type+0x8c/0xb0 [ 50.122291] [] __schedule+0x8c0/0x10f8 [ 50.129221] [] schedule+0x30/0x98 [ 50.135659] [] work_resched+0x8/0x34 [ 50.142397] ---[ end trace 0cb4f6ef5b99fe21 ]--- [ 50.148405] possible reason: unannotated irqs-off. [ 50.154600] irq event stamp: 400463 [ 50.159566] hardirqs last enabled at (400463): [] _raw_spin_unlock_irqrestore+0x40/0xa8 [ 50.171981] hardirqs last disabled at (400462): [] _raw_spin_lock_irqsave+0x30/0xb0 [ 50.183897] softirqs last enabled at (400450): [] __do_softirq+0x4ac/0x6a8 [ 50.195015] softirqs last disabled at (400425): [] irq_exit+0x110/0x128 Fix this by using the TRACE_IRQS_OFF macro to call trace_hardirqs_off() when CONFIG_TRACE_IRQFLAGS is enabled. This is done before invoking schedule() following the work_resched label because: 1) Interrupts are disabled regardless of the path we take to reach work_resched() & schedule(). 2) Performing the tracing here avoids the need to do it in paths which disable interrupts but don't call out to C code before hitting a path which uses the RESTORE_SOME macro that will call trace_hardirqs_on() or trace_hardirqs_off() as appropriate. We call trace_hardirqs_on() using the TRACE_IRQS_ON macro before calling syscall_trace_leave() for similar reasons, ensuring that lockdep has a consistent view of state after we re-enable interrupts. Signed-off-by: Paul Burton Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/15385/ Signed-off-by: Ralf Baechle Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/entry.S | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 7791840cf22c..db07793f7b43 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -137,6 +138,7 @@ work_pending: andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS beqz t0, work_notifysig work_resched: + TRACE_IRQS_OFF jal schedule local_irq_disable # make sure need_resched and @@ -173,6 +175,7 @@ syscall_exit_work: beqz t0, work_pending # trace bit set? local_irq_enable # could let syscall_trace_leave() # call schedule() instead + TRACE_IRQS_ON move a0, sp jal syscall_trace_leave b resume_userspace -- GitLab From 093750c3dec46a1d440098341e531e3e7c17a96d Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 28 Jun 2017 12:02:02 +0200 Subject: [PATCH 567/786] ALSA: hda - Fix endless loop of codec configure commit d94815f917da770d42c377786dc428f542e38f71 upstream. azx_codec_configure() loops over the codecs found on the given controller via a linked list. The code used to work in the past, but in the current version, this may lead to an endless loop when a codec binding returns an error. The culprit is that the snd_hda_codec_configure() unregisters the device upon error, and this eventually deletes the given codec object from the bus. Since the list is initialized via list_del_init(), the next object points to the same device itself. This behavior change was introduced at splitting the HD-audio code code, and forgotten to adapt it here. For fixing this bug, just use a *_safe() version of list iteration. Fixes: d068ebc25e6e ("ALSA: hda - Move some codes up to hdac_bus struct") Reported-by: Daniel Vetter Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/hda_codec.h | 2 ++ sound/pci/hda/hda_controller.c | 8 ++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h index 373fcad840ea..776dffa88aee 100644 --- a/sound/pci/hda/hda_codec.h +++ b/sound/pci/hda/hda_codec.h @@ -294,6 +294,8 @@ struct hda_codec { #define list_for_each_codec(c, bus) \ list_for_each_entry(c, &(bus)->core.codec_list, core.list) +#define list_for_each_codec_safe(c, n, bus) \ + list_for_each_entry_safe(c, n, &(bus)->core.codec_list, core.list) /* snd_hda_codec_read/write optional flags */ #define HDA_RW_NO_RESPONSE_FALLBACK (1 << 0) diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index 500878556578..0af1132a869e 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -1333,8 +1333,12 @@ EXPORT_SYMBOL_GPL(azx_probe_codecs); /* configure each codec instance */ int azx_codec_configure(struct azx *chip) { - struct hda_codec *codec; - list_for_each_codec(codec, &chip->bus) { + struct hda_codec *codec, *next; + + /* use _safe version here since snd_hda_codec_configure() deregisters + * the device upon error and deletes itself from the bus list. + */ + list_for_each_codec_safe(codec, next, &chip->bus) { snd_hda_codec_configure(codec); } return 0; -- GitLab From 7d0e27fe24c55dda16ad579db5a0234b3ff97770 Mon Sep 17 00:00:00 2001 From: Hui Wang Date: Wed, 28 Jun 2017 08:59:16 +0800 Subject: [PATCH 568/786] ALSA: hda - set input_path bitmap to zero after moving it to new place commit a8f20fd25bdce81a8e41767c39f456d346b63427 upstream. Recently we met a problem, the codec has valid adcs and input pins, and they can form valid input paths, but the driver does not build valid controls for them like "Mic boost", "Capture Volume" and "Capture Switch". Through debugging, I found the driver needs to shrink the invalid adcs and input paths for this machine, so it will move the whole column bitmap value to the previous column, after moving it, the driver forgets to set the original column bitmap value to zero, as a result, the driver will invalidate the path whose index value is the original colume bitmap value. After executing this function, all valid input paths are invalidated by a mistake, there are no any valid input paths, so the driver won't build controls for them. Fixes: 3a65bcdc577a ("ALSA: hda - Fix inconsistent input_paths after ADC reduction") Signed-off-by: Hui Wang Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/hda_generic.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index e7c8f4f076d5..b0bd29003b5d 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -3169,6 +3169,7 @@ static int check_dyn_adc_switch(struct hda_codec *codec) spec->input_paths[i][nums]); spec->input_paths[i][nums] = spec->input_paths[i][n]; + spec->input_paths[i][n] = 0; } } nums++; -- GitLab From cb2c6fdf620f4802c31d6577ff34391fdd949cc6 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 27 Jun 2017 17:33:38 -0400 Subject: [PATCH 569/786] NFSv4.1: Fix a race in nfs4_proc_layoutget commit bd171930e6a3de4f5cffdafbb944e50093dfb59b upstream. If the task calling layoutget is signalled, then it is possible for the calls to nfs4_sequence_free_slot() and nfs4_layoutget_prepare() to race, in which case we leak a slot. The fix is to move the call to nfs4_sequence_free_slot() into the nfs4_layoutget_release() so that it gets called at task teardown time. Fixes: 2e80dbe7ac51 ("NFSv4.1: Close callback races for OPEN, LAYOUTGET...") Signed-off-by: Trond Myklebust Signed-off-by: Greg Kroah-Hartman --- fs/nfs/nfs4proc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 4a64fa04f80a..401ea6e4cab8 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -8429,6 +8429,7 @@ static void nfs4_layoutget_release(void *calldata) size_t max_pages = max_response_pages(server); dprintk("--> %s\n", __func__); + nfs4_sequence_free_slot(&lgp->res.seq_res); nfs4_free_pages(lgp->args.layout.pages, max_pages); pnfs_put_layout_hdr(NFS_I(inode)->layout); put_nfs_open_context(lgp->args.ctx); @@ -8503,7 +8504,6 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags) /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */ if (status == 0 && lgp->res.layoutp->len) lseg = pnfs_layout_process(lgp); - nfs4_sequence_free_slot(&lgp->res.seq_res); rpc_put_task(task); dprintk("<-- %s status=%d\n", __func__, status); if (status) -- GitLab From 78c4244f8bdbf3cefa1e01bfcfe7a53bcc45c0f3 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Fri, 23 Jun 2017 13:45:16 +0200 Subject: [PATCH 570/786] gpiolib: fix filtering out unwanted events commit ad537b822577fcc143325786cd6ad50d7b9df31c upstream. GPIOEVENT_REQUEST_BOTH_EDGES is not a single flag, but a binary OR of GPIOEVENT_REQUEST_RISING_EDGE and GPIOEVENT_REQUEST_FALLING_EDGE. The expression 'le->eflags & GPIOEVENT_REQUEST_BOTH_EDGES' we'll get evaluated to true even if only one event type was requested. Fix it by checking both RISING & FALLING flags explicitly. Fixes: 61f922db7221 ("gpio: userspace ABI for reading GPIO line events") Signed-off-by: Bartosz Golaszewski Signed-off-by: Linus Walleij Signed-off-by: Greg Kroah-Hartman --- drivers/gpio/gpiolib.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 92159313361b..f2bb5122d2c2 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -707,7 +707,8 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p) ge.timestamp = ktime_get_real_ns(); - if (le->eflags & GPIOEVENT_REQUEST_BOTH_EDGES) { + if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE + && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { int level = gpiod_get_value_cansleep(le->desc); if (level) -- GitLab From 466877f2d25758e5ad007b292c1f225520f8c877 Mon Sep 17 00:00:00 2001 From: Deepak Rawat Date: Mon, 26 Jun 2017 14:39:08 +0200 Subject: [PATCH 571/786] drm/vmwgfx: Free hash table allocated by cmdbuf managed res mgr commit 82fcee526ba8ca2c5d378bdf51b21b7eb058fe3a upstream. The hash table created during vmw_cmdbuf_res_man_create was never freed. This causes memory leak in context creation. Added the corresponding drm_ht_remove in vmw_cmdbuf_res_man_destroy. Tested for memory leak by running piglit overnight and kernel memory is not inflated which earlier was. Signed-off-by: Deepak Rawat Reviewed-by: Sinclair Yeh Signed-off-by: Thomas Hellstrom Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c index 13db8a2851ed..1f013d45c9e9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c @@ -321,6 +321,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man) list_for_each_entry_safe(entry, next, &man->list, head) vmw_cmdbuf_res_free(man, entry); + drm_ht_remove(&man->resources); kfree(man); } -- GitLab From 1c0fa383b3391f1e5528b264bd9b3ca9209054cf Mon Sep 17 00:00:00 2001 From: Vallish Vaidyeshwara Date: Fri, 23 Jun 2017 18:53:06 +0000 Subject: [PATCH 572/786] dm thin: do not queue freed thin mapping for next stage processing commit 00a0ea33b495ee6149bf5a77ac5807ce87323abb upstream. process_prepared_discard_passdown_pt1() should cleanup dm_thin_new_mapping in cases of error. dm_pool_inc_data_range() can fail trying to get a block reference: metadata operation 'dm_pool_inc_data_range' failed: error = -61 When dm_pool_inc_data_range() fails, dm thin aborts current metadata transaction and marks pool as PM_READ_ONLY. Memory for thin mapping is released as well. However, current thin mapping will be queued onto next stage as part of queue_passdown_pt2() or passdown_endio(). This dangling thin mapping memory when processed and accessed in next stage will lead to device mapper crashing. Code flow without fix: -> process_prepared_discard_passdown_pt1(m) -> dm_thin_remove_range() -> discard passdown --> passdown_endio(m) queues m onto next stage -> dm_pool_inc_data_range() fails, frees memory m but does not remove it from next stage queue -> process_prepared_discard_passdown_pt2(m) -> processes freed memory m and crashes One such stack: Call Trace: [] dm_cell_release_no_holder+0x2f/0x70 [dm_bio_prison] [] cell_defer_no_holder+0x3c/0x80 [dm_thin_pool] [] process_prepared_discard_passdown_pt2+0x4b/0x90 [dm_thin_pool] [] process_prepared+0x81/0xa0 [dm_thin_pool] [] do_worker+0xc5/0x820 [dm_thin_pool] [] ? __schedule+0x244/0x680 [] ? pwq_activate_delayed_work+0x42/0xb0 [] process_one_work+0x153/0x3f0 [] worker_thread+0x12b/0x4b0 [] ? rescuer_thread+0x350/0x350 [] kthread+0xca/0xe0 [] ? kthread_park+0x60/0x60 [] ret_from_fork+0x25/0x30 The fix is to first take the block ref count for discarded block and then do a passdown discard of this block. If block ref count fails, then bail out aborting current metadata transaction, mark pool as PM_READ_ONLY and also free current thin mapping memory (existing error handling code) without queueing this thin mapping onto next stage of processing. If block ref count succeeds, then passdown discard of this block. Discard callback of passdown_endio() will queue this thin mapping onto next stage of processing. Code flow with fix: -> process_prepared_discard_passdown_pt1(m) -> dm_thin_remove_range() -> dm_pool_inc_data_range() --> if fails, free memory m and bail out -> discard passdown --> passdown_endio(m) queues m onto next stage Reviewed-by: Eduardo Valentin Reviewed-by: Cristian Gafton Reviewed-by: Anchal Agarwal Signed-off-by: Vallish Vaidyeshwara Reviewed-by: Joe Thornber Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-thin.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index be869a990e38..0b678b5da4c4 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -1095,6 +1095,19 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m) return; } + /* + * Increment the unmapped blocks. This prevents a race between the + * passdown io and reallocation of freed blocks. + */ + r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end); + if (r) { + metadata_operation_failed(pool, "dm_pool_inc_data_range", r); + bio_io_error(m->bio); + cell_defer_no_holder(tc, m->cell); + mempool_free(m, pool->mapping_pool); + return; + } + discard_parent = bio_alloc(GFP_NOIO, 1); if (!discard_parent) { DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.", @@ -1115,19 +1128,6 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m) end_discard(&op, r); } } - - /* - * Increment the unmapped blocks. This prevents a race between the - * passdown io and reallocation of freed blocks. - */ - r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end); - if (r) { - metadata_operation_failed(pool, "dm_pool_inc_data_range", r); - bio_io_error(m->bio); - cell_defer_no_holder(tc, m->cell); - mempool_free(m, pool->mapping_pool); - return; - } } static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m) -- GitLab From ed96148d7f8e900b61d90de79bf3b273887ffa70 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Thu, 4 May 2017 10:25:47 +0800 Subject: [PATCH 573/786] x86/mm: Fix boot crash caused by incorrect loop count calculation in sync_global_pgds() commit fc5f9d5f151c9fff21d3d1d2907b888a5aec3ff7 upstream. Jeff Moyer reported that on his system with two memory regions 0~64G and 1T~1T+192G, and kernel option "memmap=192G!1024G" added, enabling KASLR will make the system hang intermittently during boot. While adding 'nokaslr' won't. The back trace is: Oops: 0000 [#1] SMP RIP: memcpy_erms() [ .... ] Call Trace: pmem_rw_page() bdev_read_page() do_mpage_readpage() mpage_readpages() blkdev_readpages() __do_page_cache_readahead() force_page_cache_readahead() page_cache_sync_readahead() generic_file_read_iter() blkdev_read_iter() __vfs_read() vfs_read() SyS_read() entry_SYSCALL_64_fastpath() This crash happens because the for loop count calculation in sync_global_pgds() is not correct. When a mapping area crosses PGD entries, we should calculate the starting address of region which next PGD covers and assign it to next for loop count, but not add PGDIR_SIZE directly. The old code works right only if the mapping area is an exact multiple of PGDIR_SIZE, otherwize the end region could be skipped so that it can't be synchronized to all other processes from kernel PGD init_mm.pgd. In Jeff's system, emulated pmem area [1024G, 1216G) is smaller than PGDIR_SIZE. While 'nokaslr' works because PAGE_OFFSET is 1T aligned, it makes this area be mapped inside one PGD entry. With KASLR enabled, this area could cross two PGD entries, then the next PGD entry won't be synced to all other processes. That is why we saw empty PGD. Fix it. Reported-by: Jeff Moyer Signed-off-by: Baoquan He Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dan Williams Cc: Dave Hansen Cc: Dave Young Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Jinbum Park Cc: Josh Poimboeuf Cc: Kees Cook Cc: Kirill A. Shutemov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Garnier Cc: Thomas Gleixner Cc: Yasuaki Ishimatsu Cc: Yinghai Lu Link: http://lkml.kernel.org/r/1493864747-8506-1-git-send-email-bhe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Dan Williams Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/init_64.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 14b9dd71d9e8..9a324fc8bed8 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -94,10 +94,10 @@ __setup("noexec32=", nonx32_setup); */ void sync_global_pgds(unsigned long start, unsigned long end, int removed) { - unsigned long address; + unsigned long addr; - for (address = start; address <= end; address += PGDIR_SIZE) { - const pgd_t *pgd_ref = pgd_offset_k(address); + for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) { + const pgd_t *pgd_ref = pgd_offset_k(addr); struct page *page; /* @@ -113,7 +113,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed) pgd_t *pgd; spinlock_t *pgt_lock; - pgd = (pgd_t *)page_address(page) + pgd_index(address); + pgd = (pgd_t *)page_address(page) + pgd_index(addr); /* the pgt_lock only for Xen */ pgt_lock = &pgd_page_get_mm(page)->page_table_lock; spin_lock(pgt_lock); -- GitLab From d2da8d394147526a28c6d5bb83a72635e7f0a288 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 8 Dec 2016 19:55:22 +0800 Subject: [PATCH 574/786] usb: gadget: f_fs: Fix possibe deadlock commit b3ce3ce02d146841af012d08506b4071db8ffde3 upstream. When system try to close /dev/usb-ffs/adb/ep0 on one core, at the same time another core try to attach new UDC, which will cause deadlock as below scenario. Thus we should release ffs lock before issuing unregister_gadget_item(). [ 52.642225] c1 ====================================================== [ 52.642228] c1 [ INFO: possible circular locking dependency detected ] [ 52.642236] c1 4.4.6+ #1 Tainted: G W O [ 52.642241] c1 ------------------------------------------------------- [ 52.642245] c1 usb ffs open/2808 is trying to acquire lock: [ 52.642270] c0 (udc_lock){+.+.+.}, at: [] usb_gadget_unregister_driver+0x3c/0xc8 [ 52.642272] c1 but task is already holding lock: [ 52.642283] c0 (ffs_lock){+.+.+.}, at: [] ffs_data_clear+0x30/0x140 [ 52.642285] c1 which lock already depends on the new lock. [ 52.642287] c1 the existing dependency chain (in reverse order) is: [ 52.642295] c0 -> #1 (ffs_lock){+.+.+.}: [ 52.642307] c0 [] __lock_acquire+0x20f0/0x2238 [ 52.642314] c0 [] lock_acquire+0xe4/0x298 [ 52.642322] c0 [] mutex_lock_nested+0x7c/0x3cc [ 52.642328] c0 [] ffs_func_bind+0x504/0x6e8 [ 52.642334] c0 [] usb_add_function+0x84/0x184 [ 52.642340] c0 [] configfs_composite_bind+0x264/0x39c [ 52.642346] c0 [] udc_bind_to_driver+0x58/0x11c [ 52.642352] c0 [] usb_udc_attach_driver+0x90/0xc8 [ 52.642358] c0 [] gadget_dev_desc_UDC_store+0xd4/0x128 [ 52.642369] c0 [] configfs_write_file+0xd0/0x13c [ 52.642376] c0 [] vfs_write+0xb8/0x214 [ 52.642381] c0 [] SyS_write+0x54/0xb0 [ 52.642388] c0 [] el0_svc_naked+0x24/0x28 [ 52.642395] c0 -> #0 (udc_lock){+.+.+.}: [ 52.642401] c0 [] print_circular_bug+0x84/0x2e4 [ 52.642407] c0 [] __lock_acquire+0x2138/0x2238 [ 52.642412] c0 [] lock_acquire+0xe4/0x298 [ 52.642420] c0 [] mutex_lock_nested+0x7c/0x3cc [ 52.642427] c0 [] usb_gadget_unregister_driver+0x3c/0xc8 [ 52.642432] c0 [] unregister_gadget_item+0x28/0x44 [ 52.642439] c0 [] ffs_data_clear+0x138/0x140 [ 52.642444] c0 [] ffs_data_reset+0x20/0x6c [ 52.642450] c0 [] ffs_data_closed+0xac/0x12c [ 52.642454] c0 [] ffs_ep0_release+0x20/0x2c [ 52.642460] c0 [] __fput+0xb0/0x1f4 [ 52.642466] c0 [] ____fput+0x20/0x2c [ 52.642473] c0 [] task_work_run+0xb4/0xe8 [ 52.642482] c0 [] do_exit+0x360/0xb9c [ 52.642487] c0 [] do_group_exit+0x4c/0xb0 [ 52.642494] c0 [] get_signal+0x380/0x89c [ 52.642501] c0 [] do_signal+0x154/0x518 [ 52.642507] c0 [] do_notify_resume+0x70/0x78 [ 52.642512] c0 [] work_pending+0x1c/0x20 [ 52.642514] c1 other info that might help us debug this: [ 52.642517] c1 Possible unsafe locking scenario: [ 52.642518] c1 CPU0 CPU1 [ 52.642520] c1 ---- ---- [ 52.642525] c0 lock(ffs_lock); [ 52.642529] c0 lock(udc_lock); [ 52.642533] c0 lock(ffs_lock); [ 52.642537] c0 lock(udc_lock); [ 52.642539] c1 *** DEADLOCK *** [ 52.642543] c1 1 lock held by usb ffs open/2808: [ 52.642555] c0 #0: (ffs_lock){+.+.+.}, at: [] ffs_data_clear+0x30/0x140 [ 52.642557] c1 stack backtrace: [ 52.642563] c1 CPU: 1 PID: 2808 Comm: usb ffs open Tainted: G [ 52.642565] c1 Hardware name: Spreadtrum SP9860g Board (DT) [ 52.642568] c1 Call trace: [ 52.642573] c1 [] dump_backtrace+0x0/0x170 [ 52.642577] c1 [] show_stack+0x20/0x28 [ 52.642583] c1 [] dump_stack+0xa8/0xe0 [ 52.642587] c1 [] print_circular_bug+0x1fc/0x2e4 [ 52.642591] c1 [] __lock_acquire+0x2138/0x2238 [ 52.642595] c1 [] lock_acquire+0xe4/0x298 [ 52.642599] c1 [] mutex_lock_nested+0x7c/0x3cc [ 52.642604] c1 [] usb_gadget_unregister_driver+0x3c/0xc8 [ 52.642608] c1 [] unregister_gadget_item+0x28/0x44 [ 52.642613] c1 [] ffs_data_clear+0x138/0x140 [ 52.642618] c1 [] ffs_data_reset+0x20/0x6c [ 52.642621] c1 [] ffs_data_closed+0xac/0x12c [ 52.642625] c1 [] ffs_ep0_release+0x20/0x2c [ 52.642629] c1 [] __fput+0xb0/0x1f4 [ 52.642633] c1 [] ____fput+0x20/0x2c [ 52.642636] c1 [] task_work_run+0xb4/0xe8 [ 52.642640] c1 [] do_exit+0x360/0xb9c [ 52.642644] c1 [] do_group_exit+0x4c/0xb0 [ 52.642647] c1 [] get_signal+0x380/0x89c [ 52.642651] c1 [] do_signal+0x154/0x518 [ 52.642656] c1 [] do_notify_resume+0x70/0x78 [ 52.642659] c1 [] work_pending+0x1c/0x20 Acked-by: Michal Nazarewicz Signed-off-by: Baolin Wang Signed-off-by: Felipe Balbi Cc: Jerry Zhang Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/function/f_fs.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 04ffd7640c33..f9c99803a43d 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -3688,6 +3688,7 @@ static void ffs_closed(struct ffs_data *ffs) { struct ffs_dev *ffs_obj; struct f_fs_opts *opts; + struct config_item *ci; ENTER(); ffs_dev_lock(); @@ -3711,8 +3712,11 @@ static void ffs_closed(struct ffs_data *ffs) || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount)) goto done; - unregister_gadget_item(ffs_obj->opts-> - func_inst.group.cg_item.ci_parent->ci_parent); + ci = opts->func_inst.group.cg_item.ci_parent->ci_parent; + ffs_dev_unlock(); + + unregister_gadget_item(ci); + return; done: ffs_dev_unlock(); } -- GitLab From 6539c4f991c28e82d1eb0385d35cc9662985f61a Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 31 Mar 2017 13:02:25 +0200 Subject: [PATCH 575/786] l2tp: fix race in l2tp_recv_common() commit 61b9a047729bb230978178bca6729689d0c50ca2 upstream. Taking a reference on sessions in l2tp_recv_common() is racy; this has to be done by the callers. To this end, a new function is required (l2tp_session_get()) to atomically lookup a session and take a reference on it. Callers then have to manually drop this reference. Fixes: fd558d186df2 ("l2tp: Split pppol2tp patch into separate l2tp and ppp parts") Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- net/l2tp/l2tp_core.c | 73 ++++++++++++++++++++++++++++++++++++-------- net/l2tp/l2tp_core.h | 3 ++ net/l2tp/l2tp_ip.c | 17 ++++++++--- net/l2tp/l2tp_ip6.c | 18 ++++++++--- 4 files changed, 88 insertions(+), 23 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index e702cb95b89b..046a5ba0ebd2 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -278,6 +278,55 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn } EXPORT_SYMBOL_GPL(l2tp_session_find); +/* Like l2tp_session_find() but takes a reference on the returned session. + * Optionally calls session->ref() too if do_ref is true. + */ +struct l2tp_session *l2tp_session_get(struct net *net, + struct l2tp_tunnel *tunnel, + u32 session_id, bool do_ref) +{ + struct hlist_head *session_list; + struct l2tp_session *session; + + if (!tunnel) { + struct l2tp_net *pn = l2tp_pernet(net); + + session_list = l2tp_session_id_hash_2(pn, session_id); + + rcu_read_lock_bh(); + hlist_for_each_entry_rcu(session, session_list, global_hlist) { + if (session->session_id == session_id) { + l2tp_session_inc_refcount(session); + if (do_ref && session->ref) + session->ref(session); + rcu_read_unlock_bh(); + + return session; + } + } + rcu_read_unlock_bh(); + + return NULL; + } + + session_list = l2tp_session_id_hash(tunnel, session_id); + read_lock_bh(&tunnel->hlist_lock); + hlist_for_each_entry(session, session_list, hlist) { + if (session->session_id == session_id) { + l2tp_session_inc_refcount(session); + if (do_ref && session->ref) + session->ref(session); + read_unlock_bh(&tunnel->hlist_lock); + + return session; + } + } + read_unlock_bh(&tunnel->hlist_lock); + + return NULL; +} +EXPORT_SYMBOL_GPL(l2tp_session_get); + struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth, bool do_ref) { @@ -637,6 +686,9 @@ static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb) * a data (not control) frame before coming here. Fields up to the * session-id have already been parsed and ptr points to the data * after the session-id. + * + * session->ref() must have been called prior to l2tp_recv_common(). + * session->deref() will be called automatically after skb is processed. */ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, @@ -646,14 +698,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, int offset; u32 ns, nr; - /* The ref count is increased since we now hold a pointer to - * the session. Take care to decrement the refcnt when exiting - * this function from now on... - */ - l2tp_session_inc_refcount(session); - if (session->ref) - (*session->ref)(session); - /* Parse and check optional cookie */ if (session->peer_cookie_len > 0) { if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) { @@ -806,8 +850,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, /* Try to dequeue as many skbs from reorder_q as we can. */ l2tp_recv_dequeue(session); - l2tp_session_dec_refcount(session); - return; discard: @@ -816,8 +858,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, if (session->deref) (*session->deref)(session); - - l2tp_session_dec_refcount(session); } EXPORT_SYMBOL(l2tp_recv_common); @@ -924,8 +964,14 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, } /* Find the session context */ - session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id); + session = l2tp_session_get(tunnel->l2tp_net, tunnel, session_id, true); if (!session || !session->recv_skb) { + if (session) { + if (session->deref) + session->deref(session); + l2tp_session_dec_refcount(session); + } + /* Not found? Pass to userspace to deal with */ l2tp_info(tunnel, L2TP_MSG_DATA, "%s: no session found (%u/%u). Passing up.\n", @@ -934,6 +980,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, } l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook); + l2tp_session_dec_refcount(session); return 0; diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index e7233bad65e0..1d020505bf06 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -240,6 +240,9 @@ static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk) return tunnel; } +struct l2tp_session *l2tp_session_get(struct net *net, + struct l2tp_tunnel *tunnel, + u32 session_id, bool do_ref); struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id); diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 20669537816e..3468d5635d0a 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -143,19 +143,19 @@ static int l2tp_ip_recv(struct sk_buff *skb) } /* Ok, this is a data packet. Lookup the session. */ - session = l2tp_session_find(net, NULL, session_id); - if (session == NULL) + session = l2tp_session_get(net, NULL, session_id, true); + if (!session) goto discard; tunnel = session->tunnel; - if (tunnel == NULL) - goto discard; + if (!tunnel) + goto discard_sess; /* Trace packet contents, if enabled */ if (tunnel->debug & L2TP_MSG_DATA) { length = min(32u, skb->len); if (!pskb_may_pull(skb, length)) - goto discard; + goto discard_sess; /* Point to L2TP header */ optr = ptr = skb->data; @@ -165,6 +165,7 @@ static int l2tp_ip_recv(struct sk_buff *skb) } l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); + l2tp_session_dec_refcount(session); return 0; @@ -203,6 +204,12 @@ static int l2tp_ip_recv(struct sk_buff *skb) return sk_receive_skb(sk, skb, 1); +discard_sess: + if (session->deref) + session->deref(session); + l2tp_session_dec_refcount(session); + goto discard; + discard_put: sock_put(sk); diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index a4b0c9232bf1..b10abef6b0a0 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -156,19 +156,19 @@ static int l2tp_ip6_recv(struct sk_buff *skb) } /* Ok, this is a data packet. Lookup the session. */ - session = l2tp_session_find(net, NULL, session_id); - if (session == NULL) + session = l2tp_session_get(net, NULL, session_id, true); + if (!session) goto discard; tunnel = session->tunnel; - if (tunnel == NULL) - goto discard; + if (!tunnel) + goto discard_sess; /* Trace packet contents, if enabled */ if (tunnel->debug & L2TP_MSG_DATA) { length = min(32u, skb->len); if (!pskb_may_pull(skb, length)) - goto discard; + goto discard_sess; /* Point to L2TP header */ optr = ptr = skb->data; @@ -179,6 +179,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb) l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook); + l2tp_session_dec_refcount(session); + return 0; pass_up: @@ -216,6 +218,12 @@ static int l2tp_ip6_recv(struct sk_buff *skb) return sk_receive_skb(sk, skb, 1); +discard_sess: + if (session->deref) + session->deref(session); + l2tp_session_dec_refcount(session); + goto discard; + discard_put: sock_put(sk); -- GitLab From 806e98835683694cbb9e74c28641df8042792e27 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 31 Mar 2017 13:02:26 +0200 Subject: [PATCH 576/786] l2tp: ensure session can't get removed during pppol2tp_session_ioctl() commit 57377d63547861919ee634b845c7caa38de4a452 upstream. Holding a reference on session is required before calling pppol2tp_session_ioctl(). The session could get freed while processing the ioctl otherwise. Since pppol2tp_session_ioctl() uses the session's socket, we also need to take a reference on it in l2tp_session_get(). Fixes: fd558d186df2 ("l2tp: Split pppol2tp patch into separate l2tp and ppp parts") Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- net/l2tp/l2tp_ppp.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 1387f547a09e..c1c9a9e08d08 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1141,11 +1141,18 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel, if (stats.session_id != 0) { /* resend to session ioctl handler */ struct l2tp_session *session = - l2tp_session_find(sock_net(sk), tunnel, stats.session_id); - if (session != NULL) - err = pppol2tp_session_ioctl(session, cmd, arg); - else + l2tp_session_get(sock_net(sk), tunnel, + stats.session_id, true); + + if (session) { + err = pppol2tp_session_ioctl(session, cmd, + arg); + if (session->deref) + session->deref(session); + l2tp_session_dec_refcount(session); + } else { err = -EBADR; + } break; } #ifdef CONFIG_XFRM -- GitLab From d9face6fc62a73059f0fc3a3de4dfe8f53536aa7 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 31 Mar 2017 13:02:27 +0200 Subject: [PATCH 577/786] l2tp: fix duplicate session creation commit dbdbc73b44782e22b3b4b6e8b51e7a3d245f3086 upstream. l2tp_session_create() relies on its caller for checking for duplicate sessions. This is racy since a session can be concurrently inserted after the caller's verification. Fix this by letting l2tp_session_create() verify sessions uniqueness upon insertion. Callers need to be adapted to check for l2tp_session_create()'s return code instead of calling l2tp_session_find(). pppol2tp_connect() is a bit special because it has to work on existing sessions (if they're not connected) or to create a new session if none is found. When acting on a preexisting session, a reference must be held or it could go away on us. So we have to use l2tp_session_get() instead of l2tp_session_find() and drop the reference before exiting. Fixes: d9e31d17ceba ("l2tp: Add L2TP ethernet pseudowire support") Fixes: fd558d186df2 ("l2tp: Split pppol2tp patch into separate l2tp and ppp parts") Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- net/l2tp/l2tp_core.c | 70 +++++++++++++++++++++++++++++++++----------- net/l2tp/l2tp_eth.c | 10 ++----- net/l2tp/l2tp_ppp.c | 60 ++++++++++++++++++------------------- 3 files changed, 84 insertions(+), 56 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 046a5ba0ebd2..f29911ab3b80 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -378,6 +378,48 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) } EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname); +static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel, + struct l2tp_session *session) +{ + struct l2tp_session *session_walk; + struct hlist_head *g_head; + struct hlist_head *head; + struct l2tp_net *pn; + + head = l2tp_session_id_hash(tunnel, session->session_id); + + write_lock_bh(&tunnel->hlist_lock); + hlist_for_each_entry(session_walk, head, hlist) + if (session_walk->session_id == session->session_id) + goto exist; + + if (tunnel->version == L2TP_HDR_VER_3) { + pn = l2tp_pernet(tunnel->l2tp_net); + g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net), + session->session_id); + + spin_lock_bh(&pn->l2tp_session_hlist_lock); + hlist_for_each_entry(session_walk, g_head, global_hlist) + if (session_walk->session_id == session->session_id) + goto exist_glob; + + hlist_add_head_rcu(&session->global_hlist, g_head); + spin_unlock_bh(&pn->l2tp_session_hlist_lock); + } + + hlist_add_head(&session->hlist, head); + write_unlock_bh(&tunnel->hlist_lock); + + return 0; + +exist_glob: + spin_unlock_bh(&pn->l2tp_session_hlist_lock); +exist: + write_unlock_bh(&tunnel->hlist_lock); + + return -EEXIST; +} + /* Lookup a tunnel by id */ struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id) @@ -1787,6 +1829,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_set_header_len); struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) { struct l2tp_session *session; + int err; session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL); if (session != NULL) { @@ -1842,6 +1885,13 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn l2tp_session_set_header_len(session, tunnel->version); + err = l2tp_session_add_to_tunnel(tunnel, session); + if (err) { + kfree(session); + + return ERR_PTR(err); + } + /* Bump the reference count. The session context is deleted * only when this drops to zero. */ @@ -1851,28 +1901,14 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn /* Ensure tunnel socket isn't deleted */ sock_hold(tunnel->sock); - /* Add session to the tunnel's hash list */ - write_lock_bh(&tunnel->hlist_lock); - hlist_add_head(&session->hlist, - l2tp_session_id_hash(tunnel, session_id)); - write_unlock_bh(&tunnel->hlist_lock); - - /* And to the global session list if L2TPv3 */ - if (tunnel->version != L2TP_HDR_VER_2) { - struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); - - spin_lock_bh(&pn->l2tp_session_hlist_lock); - hlist_add_head_rcu(&session->global_hlist, - l2tp_session_id_hash_2(pn, session_id)); - spin_unlock_bh(&pn->l2tp_session_hlist_lock); - } - /* Ignore management session in session count value */ if (session->session_id != 0) atomic_inc(&l2tp_session_count); + + return session; } - return session; + return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL_GPL(l2tp_session_create); diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 965f7e344cef..eecc64e138de 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -223,12 +223,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p goto out; } - session = l2tp_session_find(net, tunnel, session_id); - if (session) { - rc = -EEXIST; - goto out; - } - if (cfg->ifname) { dev = dev_get_by_name(net, cfg->ifname); if (dev) { @@ -242,8 +236,8 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p session = l2tp_session_create(sizeof(*spriv), tunnel, session_id, peer_session_id, cfg); - if (!session) { - rc = -ENOMEM; + if (IS_ERR(session)) { + rc = PTR_ERR(session); goto out; } diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index c1c9a9e08d08..1696f1fd5877 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -583,6 +583,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, int error = 0; u32 tunnel_id, peer_tunnel_id; u32 session_id, peer_session_id; + bool drop_refcnt = false; int ver = 2; int fd; @@ -684,36 +685,36 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, if (tunnel->peer_tunnel_id == 0) tunnel->peer_tunnel_id = peer_tunnel_id; - /* Create session if it doesn't already exist. We handle the - * case where a session was previously created by the netlink - * interface by checking that the session doesn't already have - * a socket and its tunnel socket are what we expect. If any - * of those checks fail, return EEXIST to the caller. - */ - session = l2tp_session_find(sock_net(sk), tunnel, session_id); - if (session == NULL) { - /* Default MTU must allow space for UDP/L2TP/PPP - * headers. + session = l2tp_session_get(sock_net(sk), tunnel, session_id, false); + if (session) { + drop_refcnt = true; + ps = l2tp_session_priv(session); + + /* Using a pre-existing session is fine as long as it hasn't + * been connected yet. */ - cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD; + if (ps->sock) { + error = -EEXIST; + goto end; + } - /* Allocate and initialize a new session context. */ - session = l2tp_session_create(sizeof(struct pppol2tp_session), - tunnel, session_id, - peer_session_id, &cfg); - if (session == NULL) { - error = -ENOMEM; + /* consistency checks */ + if (ps->tunnel_sock != tunnel->sock) { + error = -EEXIST; goto end; } } else { - ps = l2tp_session_priv(session); - error = -EEXIST; - if (ps->sock != NULL) - goto end; + /* Default MTU must allow space for UDP/L2TP/PPP headers */ + cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; + cfg.mru = cfg.mtu; - /* consistency checks */ - if (ps->tunnel_sock != tunnel->sock) + session = l2tp_session_create(sizeof(struct pppol2tp_session), + tunnel, session_id, + peer_session_id, &cfg); + if (IS_ERR(session)) { + error = PTR_ERR(session); goto end; + } } /* Associate session with its PPPoL2TP socket */ @@ -778,6 +779,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, session->name); end: + if (drop_refcnt) + l2tp_session_dec_refcount(session); release_sock(sk); return error; @@ -805,12 +808,6 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i if (tunnel->sock == NULL) goto out; - /* Check that this session doesn't already exist */ - error = -EEXIST; - session = l2tp_session_find(net, tunnel, session_id); - if (session != NULL) - goto out; - /* Default MTU values. */ if (cfg->mtu == 0) cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD; @@ -818,12 +815,13 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i cfg->mru = cfg->mtu; /* Allocate and initialize a new session context. */ - error = -ENOMEM; session = l2tp_session_create(sizeof(struct pppol2tp_session), tunnel, session_id, peer_session_id, cfg); - if (session == NULL) + if (IS_ERR(session)) { + error = PTR_ERR(session); goto out; + } ps = l2tp_session_priv(session); ps->tunnel_sock = tunnel->sock; -- GitLab From 599e6f038777c6733eef244d4aac192edb612aa6 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 31 Mar 2017 13:02:29 +0200 Subject: [PATCH 578/786] l2tp: hold session while sending creation notifications commit 5e6a9e5a3554a5b3db09cdc22253af1849c65dff upstream. l2tp_session_find() doesn't take any reference on the returned session. Therefore, the session may disappear while sending the notification. Use l2tp_session_get() instead and decrement session's refcount once the notification is sent. Fixes: 33f72e6f0c67 ("l2tp : multicast notification to the registered listeners") Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- net/l2tp/l2tp_netlink.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 9f66272b163b..8c0f48350027 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -634,10 +634,12 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf session_id, peer_session_id, &cfg); if (ret >= 0) { - session = l2tp_session_find(net, tunnel, session_id); - if (session) + session = l2tp_session_get(net, tunnel, session_id, false); + if (session) { ret = l2tp_session_notify(&l2tp_nl_family, info, session, L2TP_CMD_SESSION_CREATE); + l2tp_session_dec_refcount(session); + } } out: -- GitLab From 08cb8e5f83fd2d4f6327173cc01322bc842806f1 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Fri, 31 Mar 2017 13:02:30 +0200 Subject: [PATCH 579/786] l2tp: take a reference on sessions used in genetlink handlers commit 2777e2ab5a9cf2b4524486c6db1517a6ded25261 upstream. Callers of l2tp_nl_session_find() need to hold a reference on the returned session since there's no guarantee that it isn't going to disappear from under them. Relying on the fact that no l2tp netlink message may be processed concurrently isn't enough: sessions can be deleted by other means (e.g. by closing the PPPOL2TP socket of a ppp pseudowire). l2tp_nl_cmd_session_delete() is a bit special: it runs a callback function that may require a previous call to session->ref(). In particular, for ppp pseudowires, the callback is l2tp_session_delete(), which then calls pppol2tp_session_close() and dereferences the PPPOL2TP socket. The socket might already be gone at the moment l2tp_session_delete() calls session->ref(), so we need to take a reference during the session lookup. So we need to pass the do_ref variable down to l2tp_session_get() and l2tp_session_get_by_ifname(). Since all callers have to be updated, l2tp_session_find_by_ifname() and l2tp_nl_session_find() are renamed to reflect their new behaviour. Fixes: 309795f4bec2 ("l2tp: Add netlink control API for L2TP") Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- net/l2tp/l2tp_core.c | 9 +++++++-- net/l2tp/l2tp_core.h | 3 ++- net/l2tp/l2tp_netlink.c | 39 ++++++++++++++++++++++++++------------- 3 files changed, 35 insertions(+), 16 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index f29911ab3b80..3bce65183c95 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -356,7 +356,8 @@ EXPORT_SYMBOL_GPL(l2tp_session_get_nth); /* Lookup a session by interface name. * This is very inefficient but is only used by management interfaces. */ -struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) +struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname, + bool do_ref) { struct l2tp_net *pn = l2tp_pernet(net); int hash; @@ -366,7 +367,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) { hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) { if (!strcmp(session->ifname, ifname)) { + l2tp_session_inc_refcount(session); + if (do_ref && session->ref) + session->ref(session); rcu_read_unlock_bh(); + return session; } } @@ -376,7 +381,7 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname) return NULL; } -EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname); +EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname); static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel, struct l2tp_session *session) diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 1d020505bf06..0095012509ac 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -248,7 +248,8 @@ struct l2tp_session *l2tp_session_find(struct net *net, u32 session_id); struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth, bool do_ref); -struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); +struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname, + bool do_ref); struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 8c0f48350027..1ccd310d01a5 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -55,7 +55,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, /* Accessed under genl lock */ static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX]; -static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info) +static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info, + bool do_ref) { u32 tunnel_id; u32 session_id; @@ -66,14 +67,15 @@ static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info) if (info->attrs[L2TP_ATTR_IFNAME]) { ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]); - session = l2tp_session_find_by_ifname(net, ifname); + session = l2tp_session_get_by_ifname(net, ifname, do_ref); } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) && (info->attrs[L2TP_ATTR_CONN_ID])) { tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); tunnel = l2tp_tunnel_find(net, tunnel_id); if (tunnel) - session = l2tp_session_find(net, tunnel, session_id); + session = l2tp_session_get(net, tunnel, session_id, + do_ref); } return session; @@ -652,7 +654,7 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf struct l2tp_session *session; u16 pw_type; - session = l2tp_nl_session_find(info); + session = l2tp_nl_session_get(info, true); if (session == NULL) { ret = -ENODEV; goto out; @@ -666,6 +668,10 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete) ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session); + if (session->deref) + session->deref(session); + l2tp_session_dec_refcount(session); + out: return ret; } @@ -675,7 +681,7 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf int ret = 0; struct l2tp_session *session; - session = l2tp_nl_session_find(info); + session = l2tp_nl_session_get(info, false); if (session == NULL) { ret = -ENODEV; goto out; @@ -710,6 +716,8 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf ret = l2tp_session_notify(&l2tp_nl_family, info, session, L2TP_CMD_SESSION_MODIFY); + l2tp_session_dec_refcount(session); + out: return ret; } @@ -805,29 +813,34 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info) struct sk_buff *msg; int ret; - session = l2tp_nl_session_find(info); + session = l2tp_nl_session_get(info, false); if (session == NULL) { ret = -ENODEV; - goto out; + goto err; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; - goto out; + goto err_ref; } ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq, 0, session, L2TP_CMD_SESSION_GET); if (ret < 0) - goto err_out; + goto err_ref_msg; - return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid); + ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_portid); -err_out: - nlmsg_free(msg); + l2tp_session_dec_refcount(session); -out: + return ret; + +err_ref_msg: + nlmsg_free(msg); +err_ref: + l2tp_session_dec_refcount(session); +err: return ret; } -- GitLab From 2aa6d036b716c9242222e054d4ef34905ad45fd3 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 16 Jun 2017 14:02:34 -0700 Subject: [PATCH 580/786] mm: numa: avoid waiting on freed migrated pages commit 3c226c637b69104f6b9f1c6ec5b08d7b741b3229 upstream. In do_huge_pmd_numa_page(), we attempt to handle a migrating thp pmd by waiting until the pmd is unlocked before we return and retry. However, we can race with migrate_misplaced_transhuge_page(): // do_huge_pmd_numa_page // migrate_misplaced_transhuge_page() // Holds 0 refs on page // Holds 2 refs on page vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); /* ... */ if (pmd_trans_migrating(*vmf->pmd)) { page = pmd_page(*vmf->pmd); spin_unlock(vmf->ptl); ptl = pmd_lock(mm, pmd); if (page_count(page) != 2)) { /* roll back */ } /* ... */ mlock_migrate_page(new_page, page); /* ... */ spin_unlock(ptl); put_page(page); put_page(page); // page freed here wait_on_page_locked(page); goto out; } This can result in the freed page having its waiters flag set unexpectedly, which trips the PAGE_FLAGS_CHECK_AT_PREP checks in the page alloc/free functions. This has been observed on arm64 KVM guests. We can avoid this by having do_huge_pmd_numa_page() take a reference on the page before dropping the pmd lock, mirroring what we do in __migration_entry_wait(). When we hit the race, migrate_misplaced_transhuge_page() will see the reference and abort the migration, as it may do today in other cases. Fixes: b8916634b77bffb2 ("mm: Prevent parallel splits during THP migration") Link: http://lkml.kernel.org/r/1497349722-6731-2-git-send-email-will.deacon@arm.com Signed-off-by: Mark Rutland Signed-off-by: Will Deacon Acked-by: Steve Capper Acked-by: Kirill A. Shutemov Acked-by: Vlastimil Babka Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- mm/huge_memory.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index d5b2b759f76f..e7d5db958538 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1227,8 +1227,11 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd) */ if (unlikely(pmd_trans_migrating(*fe->pmd))) { page = pmd_page(*fe->pmd); + if (!get_page_unless_zero(page)) + goto out_unlock; spin_unlock(fe->ptl); wait_on_page_locked(page); + put_page(page); goto out; } @@ -1260,8 +1263,11 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd) /* Migration could have started since the pmd_trans_migrating check */ if (!page_locked) { + if (!get_page_unless_zero(page)) + goto out_unlock; spin_unlock(fe->ptl); wait_on_page_locked(page); + put_page(page); page_nid = -1; goto out; } -- GitLab From 41172b772da4b9d875ed3fb90fe0e1a86742dc2a Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 23 May 2017 21:54:10 -0400 Subject: [PATCH 581/786] sparc64: Handle PIO & MEM non-resumable errors. [ Upstream commit 047487241ff59374fded8c477f21453681f5995c ] User processes trying to access an invalid memory address via PIO will receive a SIGBUS signal instead of causing a panic. Memory errors will receive a SIGKILL since a SIGBUS may result in a coredump which may attempt to repeat the faulting access. Signed-off-by: Liam R. Howlett Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/sparc/kernel/traps_64.c | 73 ++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 496fa926e1e0..d44fb806bbd7 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2051,6 +2051,73 @@ void sun4v_resum_overflow(struct pt_regs *regs) atomic_inc(&sun4v_resum_oflow_cnt); } +/* Given a set of registers, get the virtual addressi that was being accessed + * by the faulting instructions at tpc. + */ +static unsigned long sun4v_get_vaddr(struct pt_regs *regs) +{ + unsigned int insn; + + if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) { + return compute_effective_address(regs, insn, + (insn >> 25) & 0x1f); + } + return 0; +} + +/* Attempt to handle non-resumable errors generated from userspace. + * Returns true if the signal was handled, false otherwise. + */ +bool sun4v_nonresum_error_user_handled(struct pt_regs *regs, + struct sun4v_error_entry *ent) { + + unsigned int attrs = ent->err_attrs; + + if (attrs & SUN4V_ERR_ATTRS_MEMORY) { + unsigned long addr = ent->err_raddr; + siginfo_t info; + + if (addr == ~(u64)0) { + /* This seems highly unlikely to ever occur */ + pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n"); + } else { + unsigned long page_cnt = DIV_ROUND_UP(ent->err_size, + PAGE_SIZE); + + /* Break the unfortunate news. */ + pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n", + addr); + pr_emerg("SUN4V NON-RECOVERABLE ERROR: Claiming %lu ages.\n", + page_cnt); + + while (page_cnt-- > 0) { + if (pfn_valid(addr >> PAGE_SHIFT)) + get_page(pfn_to_page(addr >> PAGE_SHIFT)); + addr += PAGE_SIZE; + } + } + info.si_signo = SIGKILL; + info.si_errno = 0; + info.si_trapno = 0; + force_sig_info(info.si_signo, &info, current); + + return true; + } + if (attrs & SUN4V_ERR_ATTRS_PIO) { + siginfo_t info; + + info.si_signo = SIGBUS; + info.si_code = BUS_ADRERR; + info.si_addr = (void __user *)sun4v_get_vaddr(regs); + force_sig_info(info.si_signo, &info, current); + + return true; + } + + /* Default to doing nothing */ + return false; +} + /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate. * Log the event, clear the first word of the entry, and die. */ @@ -2075,6 +2142,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset) put_cpu(); + if (!(regs->tstate & TSTATE_PRIV) && + sun4v_nonresum_error_user_handled(regs, &local_copy)) { + /* DON'T PANIC: This userspace error was handled. */ + return; + } + #ifdef CONFIG_PCI /* Check for the special PCI poke sequence. */ if (pci_poke_in_progress && pci_poke_cpu == cpu) { -- GitLab From 8886196a73204d167b7f8797eb6ebf61e76794d6 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Tue, 23 May 2017 21:54:11 -0400 Subject: [PATCH 582/786] sparc64: Zero pages on allocation for mondo and error queues. [ Upstream commit 7a7dc961a28b965a0d0303c2e989df17b411708b ] Error queues use a non-zero first word to detect if the queues are full. Using pages that have not been zeroed may result in false positive overflow events. These queues are set up once during boot so zeroing all mondo and error queue pages is safe. Note that the false positive overflow does not always occur because the page allocation for these queues is so early in the boot cycle that higher number CPUs get fresh pages. It is only when traps are serviced with lower number CPUs who were given already used pages that this issue is exposed. Signed-off-by: Liam R. Howlett Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/sparc/kernel/irq_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index e1b1ce63a328..5cbf03c14981 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -1021,7 +1021,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask) unsigned long order = get_order(size); unsigned long p; - p = __get_free_pages(GFP_KERNEL, order); + p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if (!p) { prom_printf("SUN4V: Error, cannot allocate queue.\n"); prom_halt(); -- GitLab From 0e8eca987e27077fc2ade85aa402dbc177fdb026 Mon Sep 17 00:00:00 2001 From: Pavel Belous Date: Sat, 28 Jan 2017 22:53:28 +0300 Subject: [PATCH 583/786] net: ethtool: add support for 2500BaseT and 5000BaseT link modes [ Upstream commit 94842b4fc4d6b1691cfc86c6f5251f299d27f4ba ] This patch introduce support for 2500BaseT and 5000BaseT link modes. These modes are included in the new IEEE 802.3bz standard. Signed-off-by: Pavel Belous Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- include/uapi/linux/ethtool.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 8e547231c1b7..5c22e8cab24b 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -1368,6 +1368,8 @@ enum ethtool_link_mode_bit_indices { ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44, ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45, ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46, + ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47, + ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48, /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit @@ -1377,7 +1379,7 @@ enum ethtool_link_mode_bit_indices { */ __ETHTOOL_LINK_MODE_LAST - = ETHTOOL_LINK_MODE_10000baseER_Full_BIT, + = ETHTOOL_LINK_MODE_5000baseT_Full_BIT, }; #define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ -- GitLab From 97ace183074d306942b903a148aebd5d061758f0 Mon Sep 17 00:00:00 2001 From: jbrunet Date: Mon, 28 Nov 2016 10:46:46 +0100 Subject: [PATCH 584/786] net: phy: add an option to disable EEE advertisement MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit d853d145ea3e63387a2ac759aa41d5e43876e561 ] This patch adds an option to disable EEE advertisement in the generic PHY by providing a mask of prohibited modes corresponding to the value found in the MDIO_AN_EEE_ADV register. On some platforms, PHY Low power idle seems to be causing issues, even breaking the link some cases. The patch provides a convenient way for these platforms to disable EEE advertisement and work around the issue. Signed-off-by: Jerome Brunet Tested-by: Yegor Yefremov Tested-by: Andreas Färber Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/phy/phy.c | 3 ++ drivers/net/phy/phy_device.c | 80 ++++++++++++++++++++++++++++++++---- include/linux/phy.h | 3 ++ 3 files changed, 77 insertions(+), 9 deletions(-) diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index a9be26f1f677..edd30ebbf275 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -1384,6 +1384,9 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) { int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised); + /* Mask prohibited EEE modes */ + val &= ~phydev->eee_broken_modes; + phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, val); return 0; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 14d57d0d1c04..b14fcf6e11f6 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1145,6 +1145,43 @@ static int genphy_config_advert(struct phy_device *phydev) return changed; } +/** + * genphy_config_eee_advert - disable unwanted eee mode advertisement + * @phydev: target phy_device struct + * + * Description: Writes MDIO_AN_EEE_ADV after disabling unsupported energy + * efficent ethernet modes. Returns 0 if the PHY's advertisement hasn't + * changed, and 1 if it has changed. + */ +static int genphy_config_eee_advert(struct phy_device *phydev) +{ + u32 broken = phydev->eee_broken_modes; + u32 old_adv, adv; + + /* Nothing to disable */ + if (!broken) + return 0; + + /* If the following call fails, we assume that EEE is not + * supported by the phy. If we read 0, EEE is not advertised + * In both case, we don't need to continue + */ + adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN); + if (adv <= 0) + return 0; + + old_adv = adv; + adv &= ~broken; + + /* Advertising remains unchanged with the broken mask */ + if (old_adv == adv) + return 0; + + phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, adv); + + return 1; +} + /** * genphy_setup_forced - configures/forces speed/duplex from @phydev * @phydev: target phy_device struct @@ -1203,15 +1240,20 @@ EXPORT_SYMBOL(genphy_restart_aneg); */ int genphy_config_aneg(struct phy_device *phydev) { - int result; + int err, changed; + + changed = genphy_config_eee_advert(phydev); if (AUTONEG_ENABLE != phydev->autoneg) return genphy_setup_forced(phydev); - result = genphy_config_advert(phydev); - if (result < 0) /* error */ - return result; - if (result == 0) { + err = genphy_config_advert(phydev); + if (err < 0) /* error */ + return err; + + changed |= err; + + if (changed == 0) { /* Advertisement hasn't changed, but maybe aneg was never on to * begin with? Or maybe phy was isolated? */ @@ -1221,16 +1263,16 @@ int genphy_config_aneg(struct phy_device *phydev) return ctl; if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE)) - result = 1; /* do restart aneg */ + changed = 1; /* do restart aneg */ } /* Only restart aneg if we are advertising something different * than we were before. */ - if (result > 0) - result = genphy_restart_aneg(phydev); + if (changed > 0) + return genphy_restart_aneg(phydev); - return result; + return 0; } EXPORT_SYMBOL(genphy_config_aneg); @@ -1588,6 +1630,21 @@ static void of_set_phy_supported(struct phy_device *phydev) __set_phy_supported(phydev, max_speed); } +static void of_set_phy_eee_broken(struct phy_device *phydev) +{ + struct device_node *node = phydev->mdio.dev.of_node; + u32 broken; + + if (!IS_ENABLED(CONFIG_OF_MDIO)) + return; + + if (!node) + return; + + if (!of_property_read_u32(node, "eee-broken-modes", &broken)) + phydev->eee_broken_modes = broken; +} + /** * phy_probe - probe and init a PHY device * @dev: device to probe and init @@ -1625,6 +1682,11 @@ static int phy_probe(struct device *dev) of_set_phy_supported(phydev); phydev->advertising = phydev->supported; + /* Get the EEE modes we want to prohibit. We will ask + * the PHY stop advertising these mode later on + */ + of_set_phy_eee_broken(phydev); + /* Set the state to READY by default */ phydev->state = PHY_READY; diff --git a/include/linux/phy.h b/include/linux/phy.h index bd22670e2182..6c9b1e0006ee 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -401,6 +401,9 @@ struct phy_device { u32 advertising; u32 lp_advertising; + /* Energy efficient ethernet modes which should be prohibited */ + u32 eee_broken_modes; + int autoneg; int link_timeout; -- GitLab From 752ba680eb70ebc1e235b2ac1087ce471e2c800d Mon Sep 17 00:00:00 2001 From: jbrunet Date: Mon, 28 Nov 2016 10:46:47 +0100 Subject: [PATCH 585/786] dt-bindings: net: add EEE capability constants MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 1fc31357ad194fb98691f3d122bcd47e59239e83 ] Signed-off-by: Jerome Brunet Tested-by: Yegor Yefremov Tested-by: Andreas Färber Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- include/dt-bindings/net/mdio.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 include/dt-bindings/net/mdio.h diff --git a/include/dt-bindings/net/mdio.h b/include/dt-bindings/net/mdio.h new file mode 100644 index 000000000000..99c6d903d439 --- /dev/null +++ b/include/dt-bindings/net/mdio.h @@ -0,0 +1,19 @@ +/* + * This header provides generic constants for ethernet MDIO bindings + */ + +#ifndef _DT_BINDINGS_NET_MDIO_H +#define _DT_BINDINGS_NET_MDIO_H + +/* + * EEE capability Advertisement + */ + +#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ +#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ +#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ +#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ +#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ +#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ + +#endif -- GitLab From 40373d91a0f764c8ba5c56ea3dc88896faa4510d Mon Sep 17 00:00:00 2001 From: jbrunet Date: Mon, 19 Dec 2016 16:05:36 +0100 Subject: [PATCH 586/786] net: phy: fix sign type error in genphy_config_eee_advert [ Upstream commit 3bb9ab63276696988d8224f52db20e87194deb4b ] In genphy_config_eee_advert, the return value of phy_read_mmd_indirect is checked to know if the register could be accessed but the result is assigned to a 'u32'. Changing to 'int' to correctly get errors from phy_read_mmd_indirect. Fixes: d853d145ea3e ("net: phy: add an option to disable EEE advertisement") Reported-by: Julia Lawall Signed-off-by: Jerome Brunet Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/phy/phy_device.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index b14fcf6e11f6..d9ec74895e42 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1155,8 +1155,8 @@ static int genphy_config_advert(struct phy_device *phydev) */ static int genphy_config_eee_advert(struct phy_device *phydev) { - u32 broken = phydev->eee_broken_modes; - u32 old_adv, adv; + int broken = phydev->eee_broken_modes; + int old_adv, adv; /* Nothing to disable */ if (!broken) -- GitLab From 3897ae12b706bfc47c07a1eef58fe6ce328784cf Mon Sep 17 00:00:00 2001 From: jbrunet Date: Mon, 19 Dec 2016 16:05:37 +0100 Subject: [PATCH 587/786] net: phy: use boolean dt properties for eee broken modes [ Upstream commit 57f3986231bb2c69a55ccab1d2b30a00818027ac ] The patches regarding eee-broken-modes was merged before all people involved could find an agreement on the best way to move forward. While we agreed on having a DT property to mark particular modes as broken, the value used for eee-broken-modes mapped the phy register in very direct way. Because of this, the concern is that it could be used to implement configuration policies instead of describing a broken HW. In the end, having a boolean property for each mode seems to be preferred over one bit field value mapping the register (too) directly. Cc: Florian Fainelli Signed-off-by: Jerome Brunet Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/phy/phy_device.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index d9ec74895e42..32b555a72e13 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1633,7 +1633,7 @@ static void of_set_phy_supported(struct phy_device *phydev) static void of_set_phy_eee_broken(struct phy_device *phydev) { struct device_node *node = phydev->mdio.dev.of_node; - u32 broken; + u32 broken = 0; if (!IS_ENABLED(CONFIG_OF_MDIO)) return; @@ -1641,8 +1641,20 @@ static void of_set_phy_eee_broken(struct phy_device *phydev) if (!node) return; - if (!of_property_read_u32(node, "eee-broken-modes", &broken)) - phydev->eee_broken_modes = broken; + if (of_property_read_bool(node, "eee-broken-100tx")) + broken |= MDIO_EEE_100TX; + if (of_property_read_bool(node, "eee-broken-1000t")) + broken |= MDIO_EEE_1000T; + if (of_property_read_bool(node, "eee-broken-10gt")) + broken |= MDIO_EEE_10GT; + if (of_property_read_bool(node, "eee-broken-1000kx")) + broken |= MDIO_EEE_1000KX; + if (of_property_read_bool(node, "eee-broken-10gkx4")) + broken |= MDIO_EEE_10GKX4; + if (of_property_read_bool(node, "eee-broken-10gkr")) + broken |= MDIO_EEE_10GKR; + + phydev->eee_broken_modes = broken; } /** -- GitLab From 8bface142a8d4bc5766bc71c94a618f234ed2bc6 Mon Sep 17 00:00:00 2001 From: jbrunet Date: Mon, 19 Dec 2016 16:05:38 +0100 Subject: [PATCH 588/786] dt: bindings: net: use boolean dt properties for eee broken modes [ Upstream commit 308d3165d8b2b98d3dc3d97d6662062735daea67 ] The patches regarding eee-broken-modes was merged before all people involved could find an agreement on the best way to move forward. While we agreed on having a DT property to mark particular modes as broken, the value used for eee-broken-modes mapped the phy register in very direct way. Because of this, the concern is that it could be used to implement configuration policies instead of describing a broken HW. In the end, having a boolean property for each mode seems to be preferred over one bit field value mapping the register (too) directly. Cc: Florian Fainelli Signed-off-by: Jerome Brunet Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- Documentation/devicetree/bindings/net/phy.txt | 9 +++++++++ include/dt-bindings/net/mdio.h | 19 ------------------- 2 files changed, 9 insertions(+), 19 deletions(-) delete mode 100644 include/dt-bindings/net/mdio.h diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt index bc1c3c8bf8fa..62bdc5f2bf16 100644 --- a/Documentation/devicetree/bindings/net/phy.txt +++ b/Documentation/devicetree/bindings/net/phy.txt @@ -35,6 +35,15 @@ Optional Properties: - broken-turn-around: If set, indicates the PHY device does not correctly release the turn around line low at the end of a MDIO transaction. +- eee-broken-100tx: +- eee-broken-1000t: +- eee-broken-10gt: +- eee-broken-1000kx: +- eee-broken-10gkx4: +- eee-broken-10gkr: + Mark the corresponding energy efficient ethernet mode as broken and + request the ethernet to stop advertising it. + Example: ethernet-phy@0 { diff --git a/include/dt-bindings/net/mdio.h b/include/dt-bindings/net/mdio.h deleted file mode 100644 index 99c6d903d439..000000000000 --- a/include/dt-bindings/net/mdio.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * This header provides generic constants for ethernet MDIO bindings - */ - -#ifndef _DT_BINDINGS_NET_MDIO_H -#define _DT_BINDINGS_NET_MDIO_H - -/* - * EEE capability Advertisement - */ - -#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ -#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ -#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ -#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ -#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ -#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ - -#endif -- GitLab From 13fa36f9fbc84c47cef6673d5e2f3a20693d6eff Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Fri, 20 Jan 2017 08:20:24 -0800 Subject: [PATCH 589/786] ARM64: dts: meson-gxbb-odroidc2: fix GbE tx link breakage [ Upstream commit feb3cbea0946c67060e2d5bcb7499b0a6f6700fe ] OdroidC2 GbE link breaks under heavy tx transfer. This happens even if the MAC does not enable Energy Efficient Ethernet (No Low Power state Idle on the Tx path). The problem seems to come from the phy Rx path, entering the LPI state. Disabling EEE advertisement on the phy prevent this feature to be negociated with the link partner and solve the issue. Signed-off-by: Jerome Brunet Signed-off-by: Kevin Hilman Signed-off-by: Arnd Bergmann Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts index e6e3491d48a5..f150a4c63efe 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts @@ -85,6 +85,18 @@ status = "okay"; pinctrl-0 = <ð_pins>; pinctrl-names = "default"; + phy-handle = <ð_phy0>; + + mdio { + compatible = "snps,dwmac-mdio"; + #address-cells = <1>; + #size-cells = <0>; + + eth_phy0: ethernet-phy@0 { + reg = <0>; + eee-broken-1000t; + }; + }; }; &ir { -- GitLab From afaee3ef513650b2f6cb9e2c860b9210875a8135 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Thu, 18 May 2017 17:28:48 +0200 Subject: [PATCH 590/786] xen/blkback: don't free be structure too early MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 71df1d7ccad1c36f7321d6b3b48f2ea42681c363 upstream. The be structure must not be freed when freeing the blkif structure isn't done. Otherwise a use-after-free of be when unmapping the ring used for communicating with the frontend will occur in case of a late call of xenblk_disconnect() (e.g. due to an I/O still active when trying to disconnect). Signed-off-by: Juergen Gross Tested-by: Steven Haigh Acked-by: Roger Pau Monné Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Greg Kroah-Hartman --- drivers/block/xen-blkback/xenbus.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index d8fc9c58e2a3..5dfe6e8af140 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -315,8 +315,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif) static void xen_blkif_free(struct xen_blkif *blkif) { - xen_blkif_disconnect(blkif); + WARN_ON(xen_blkif_disconnect(blkif)); xen_vbd_free(&blkif->vbd); + kfree(blkif->be->mode); + kfree(blkif->be); /* Make sure everything is drained before shutting down */ kmem_cache_free(xen_blkif_cachep, blkif); @@ -511,8 +513,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev) /* Put the reference we set in xen_blkif_alloc(). */ xen_blkif_put(be->blkif); - kfree(be->mode); - kfree(be); return 0; } -- GitLab From 80b1a1180e4e72fed893e5aba73fe7ccea7aa30e Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Tue, 17 Jan 2017 14:51:04 +0100 Subject: [PATCH 591/786] KVM: x86: fix fixing of hypercalls MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit ce2e852ecc9a42e4b8dabb46025cfef63209234a ] emulator_fix_hypercall() replaces hypercall with vmcall instruction, but it does not handle GP exception properly when writes the new instruction. It can return X86EMUL_PROPAGATE_FAULT without setting exception information. This leads to incorrect emulation and triggers WARN_ON(ctxt->exception.vector > 0x1f) in x86_emulate_insn() as discovered by syzkaller fuzzer: WARNING: CPU: 2 PID: 18646 at arch/x86/kvm/emulate.c:5558 Call Trace: warn_slowpath_null+0x2c/0x40 kernel/panic.c:582 x86_emulate_insn+0x16a5/0x4090 arch/x86/kvm/emulate.c:5572 x86_emulate_instruction+0x403/0x1cc0 arch/x86/kvm/x86.c:5618 emulate_instruction arch/x86/include/asm/kvm_host.h:1127 [inline] handle_exception+0x594/0xfd0 arch/x86/kvm/vmx.c:5762 vmx_handle_exit+0x2b7/0x38b0 arch/x86/kvm/vmx.c:8625 vcpu_enter_guest arch/x86/kvm/x86.c:6888 [inline] vcpu_run arch/x86/kvm/x86.c:6947 [inline] Set exception information when write in emulator_fix_hypercall() fails. Signed-off-by: Dmitry Vyukov Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Wanpeng Li Cc: kvm@vger.kernel.org Cc: syzkaller@googlegroups.com Signed-off-by: Radim Krčmář Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/x86.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 62cde4f67c72..ab3f00399cbb 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6111,7 +6111,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt) kvm_x86_ops->patch_hypercall(vcpu, instruction); - return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); + return emulator_write_emulated(ctxt, rip, instruction, 3, + &ctxt->exception); } static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) -- GitLab From ee4494c6bda8ac530f85756e619c1727d2539b6c Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Thu, 12 Jan 2017 15:25:10 +0900 Subject: [PATCH 592/786] scsi: sd: Fix wrong DPOFUA disable in sd_read_cache_type [ Upstream commit 26f2819772af891dee2843e1f8662c58e5129d5f ] Zoned block devices force the use of READ/WRITE(16) commands by setting sdkp->use_16_for_rw and clearing sdkp->use_10_for_rw. This result in DPOFUA always being disabled for these drives as the assumed use of the deprecated READ/WRITE(6) commands only looks at sdkp->use_10_for_rw. Strenghten the test by also checking that sdkp->use_16_for_rw is false. Signed-off-by: Damien Le Moal Reviewed-by: Hannes Reinecke Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/sd.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 931af0793951..13ac7e57a35d 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2572,7 +2572,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) if (sdp->broken_fua) { sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); sdkp->DPOFUA = 0; - } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { + } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw && + !sdkp->device->use_16_for_rw) { sd_first_printk(KERN_NOTICE, sdkp, "Uses READ/WRITE(6), disabling FUA\n"); sdkp->DPOFUA = 0; -- GitLab From 7782ab228f64e7da4c47a90b40fbb80920ce722b Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Tue, 17 Jan 2017 12:23:21 +0100 Subject: [PATCH 593/786] stmmac: add missing of_node_put [ Upstream commit a249708bc2aa1fe3ddf15dfac22bee519d15996b ] The function stmmac_dt_phy provides several possibilities for initializing plat->mdio_node, all of which have the effect of increasing the reference count of the assigned value. This field is not updated elsewhere, so the value is live until the end of the lifetime of plat (devm_allocated), just after the end of stmmac_remove_config_dt. Thus, add an of_node_put on plat->mdio_node in stmmac_remove_config_dt. It is possible that the field mdio_node is never initialized, but of_node_put is NULL-safe, so it is also safe to call of_node_put in that case. Signed-off-by: Julia Lawall Acked-by: Alexandre TORGUE Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index ac3d39c69509..890e4b083f4f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -346,6 +346,7 @@ void stmmac_remove_config_dt(struct platform_device *pdev, if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); of_node_put(plat->phy_node); + of_node_put(plat->mdio_node); } #else struct plat_stmmacenet_data * -- GitLab From 42a1d5b47594eb846f709f6558082919dabc7344 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Tue, 10 Jan 2017 12:05:54 +0100 Subject: [PATCH 594/786] scsi: lpfc: Set elsiocb contexts to NULL after freeing it [ Upstream commit 8667f515952feefebb3c0f8d9a9266c91b101a46 ] Set the elsiocb contexts to NULL after freeing as others depend on it. Signed-off-by: Johannes Thumshirn Acked-by: Dick Kennedy Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/lpfc/lpfc_els.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index b7d54bfb1df9..7b696d108112 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -3590,12 +3590,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb) } else { buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2; lpfc_els_free_data(phba, buf_ptr1); + elsiocb->context2 = NULL; } } if (elsiocb->context3) { buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3; lpfc_els_free_bpl(phba, buf_ptr); + elsiocb->context3 = NULL; } lpfc_sli_release_iocbq(phba, elsiocb); return 0; -- GitLab From 8cfcaa2899f322fa602e903e983389fd1de36fe8 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 23 Dec 2016 18:06:11 -0800 Subject: [PATCH 595/786] qla2xxx: Terminate exchange if corrupted [ Upstream commit 5f35509db179ca7ed1feaa4b14f841adb06ed220 ] Corrupted ATIO is defined as length of fcp_header & fcp_cmd payload is less than 0x38. It's the minimum size for a frame to carry 8..16 bytes SCSI CDB. The exchange will be dropped or terminated if corrupted. Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Reviewed-by: Christoph Hellwig [ bvanassche: Fixed spelling in patch title ] Signed-off-by: Bart Van Assche Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/qla2xxx/qla_def.h | 3 ++- drivers/scsi/qla2xxx/qla_target.c | 23 ++++++++++++++++++++--- drivers/scsi/qla2xxx/qla_target.h | 22 +++++++++++++++++++++- 3 files changed, 43 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 8e63a7b90277..91ec0689c714 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -1555,7 +1555,8 @@ typedef struct { struct atio { uint8_t entry_type; /* Entry type. */ uint8_t entry_count; /* Entry count. */ - uint8_t data[58]; + __le16 attr_n_length; + uint8_t data[56]; uint32_t signature; #define ATIO_PROCESSED 0xDEADDEAD /* Signature */ }; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index feab7ea8e823..ee450b6965f3 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -6463,12 +6463,29 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) if (!vha->flags.online) return; - while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) { + while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || + fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) { pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; cnt = pkt->u.raw.entry_count; - qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt, - ha_locked); + if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) { + /* + * This packet is corrupted. The header + payload + * can not be trusted. There is no point in passing + * it further up. + */ + ql_log(ql_log_warn, vha, 0xffff, + "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n", + pkt->u.isp24.fcp_hdr.s_id, + be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id), + le32_to_cpu(pkt->u.isp24.exchange_addr), pkt); + + adjust_corrupted_atio(pkt); + qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0); + } else { + qlt_24xx_atio_pkt_all_vps(vha, + (struct atio_from_isp *)pkt, ha_locked); + } for (i = 0; i < cnt; i++) { ha->tgt.atio_ring_index++; diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index f26c5f60eedd..0824a8164a24 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -427,13 +427,33 @@ struct atio_from_isp { struct { uint8_t entry_type; /* Entry type. */ uint8_t entry_count; /* Entry count. */ - uint8_t data[58]; + __le16 attr_n_length; +#define FCP_CMD_LENGTH_MASK 0x0fff +#define FCP_CMD_LENGTH_MIN 0x38 + uint8_t data[56]; uint32_t signature; #define ATIO_PROCESSED 0xDEADDEAD /* Signature */ } raw; } u; } __packed; +static inline int fcpcmd_is_corrupted(struct atio *atio) +{ + if (atio->entry_type == ATIO_TYPE7 && + (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) < + FCP_CMD_LENGTH_MIN)) + return 1; + else + return 0; +} + +/* adjust corrupted atio so we won't trip over the same entry again. */ +static inline void adjust_corrupted_atio(struct atio_from_isp *atio) +{ + atio->u.raw.attr_n_length = cpu_to_le16(FCP_CMD_LENGTH_MIN); + atio->u.isp24.fcp_cmnd.add_cdb_len = 0; +} + #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ /* -- GitLab From 0c9626619777f76a4a6761a259dcad263b1902d3 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 23 Dec 2016 18:06:13 -0800 Subject: [PATCH 596/786] qla2xxx: Fix erroneous invalid handle message [ Upstream commit 4f060736f29a960aba8e781a88837464756200a8 ] Termination of Immediate Notify IOCB was using wrong IOCB handle. IOCB completion code was unable to find appropriate code path due to wrong handle. Following message is seen in the logs. "Error entry - invalid handle/queue (ffff)." Signed-off-by: Quinn Tran Signed-off-by: Himanshu Madhani Reviewed-by: Christoph Hellwig [ bvanassche: Fixed word order in patch title ] Signed-off-by: Bart Van Assche Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/qla2xxx/qla_isr.c | 4 ++++ drivers/scsi/qla2xxx/qla_target.c | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 068c4e47fac9..bddaabb288d4 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2487,6 +2487,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) if (pkt->entry_status & RF_BUSY) res = DID_BUS_BUSY << 16; + if (pkt->entry_type == NOTIFY_ACK_TYPE && + pkt->handle == QLA_TGT_SKIP_HANDLE) + return; + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (sp) { sp->done(ha, sp, res); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index ee450b6965f3..91f5f55a8a9b 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -3067,7 +3067,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, pkt->entry_type = NOTIFY_ACK_TYPE; pkt->entry_count = 1; - pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; + pkt->handle = QLA_TGT_SKIP_HANDLE; nack = (struct nack_to_isp *)pkt; nack->ox_id = ntfy->ox_id; -- GitLab From 9f2a36a7504c994f89a1fe8e4d94b8d43423816f Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 10 Jan 2017 20:03:59 +0800 Subject: [PATCH 597/786] drm/amdgpu: fix program vce instance logic error. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 50a1ebc70a2803deb7811fc73fb55d70e353bc34 ] need to clear bit31-29 in GRBM_GFX_INDEX, then the program can be valid. Signed-off-by: Rex Zhu Acked-by: Christian König Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 6feed726e299..50f0cf2788b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -43,9 +43,13 @@ #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 +#define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07 + #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 +#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000 + #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02 #define VCE_V3_0_FW_SIZE (384 * 1024) @@ -54,6 +58,9 @@ #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8)) +#define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \ + | GRBM_GFX_INDEX__VCE_ALL_PIPE) + static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); @@ -249,7 +256,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) if (adev->vce.harvest_config & (1 << idx)) continue; - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); vce_v3_0_mc_resume(adev, idx); WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); @@ -273,7 +280,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) } } - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -288,7 +295,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev) if (adev->vce.harvest_config & (1 << idx)) continue; - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); if (adev->asic_type >= CHIP_STONEY) WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001); @@ -306,7 +313,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev) vce_v3_0_set_vce_sw_clock_gating(adev, false); } - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -586,17 +593,17 @@ static bool vce_v3_0_check_soft_reset(void *handle) * VCE team suggest use bit 3--bit 6 for busy status check */ mutex_lock(&adev->grbm_idx_mutex); - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); } - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) { srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); } - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); mutex_unlock(&adev->grbm_idx_mutex); if (srbm_soft_reset) { @@ -734,7 +741,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, if (adev->vce.harvest_config & (1 << i)) continue; - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i); + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i)); if (enable) { /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */ @@ -753,7 +760,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, vce_v3_0_set_vce_sw_clock_gating(adev, enable); } - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0); + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); mutex_unlock(&adev->grbm_idx_mutex); return 0; -- GitLab From 10c24e89b2b86907fc9588db1fa7300e9a1a194a Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 17 Jan 2017 15:06:58 -0500 Subject: [PATCH 598/786] drm/amdgpu: add support for new hainan variants [ Upstream commit 17324b6add82d6c0bf119f1d1944baef392a4e39 ] New hainan parts require updated smc firmware. Cc: Sonny Jiang Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/si_dpm.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 6f3c89178b6a..4cb347e88cf0 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -64,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin"); MODULE_FIRMWARE("radeon/oland_k_smc.bin"); MODULE_FIRMWARE("radeon/hainan_smc.bin"); MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); +MODULE_FIRMWARE("radeon/banks_k_2_smc.bin"); union power_info { struct _ATOM_POWERPLAY_INFO info; @@ -7721,10 +7722,11 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev) ((adev->pdev->device == 0x6660) || (adev->pdev->device == 0x6663) || (adev->pdev->device == 0x6665) || - (adev->pdev->device == 0x6667))) || - ((adev->pdev->revision == 0xc3) && - (adev->pdev->device == 0x6665))) + (adev->pdev->device == 0x6667)))) chip_name = "hainan_k"; + else if ((adev->pdev->revision == 0xc3) && + (adev->pdev->device == 0x6665)) + chip_name = "banks_k_2"; else chip_name = "hainan"; break; -- GitLab From 3eeb3459b7e6ec77d0ca2ae1bc82ecefe16d4c50 Mon Sep 17 00:00:00 2001 From: "Alvaro G. M" Date: Tue, 17 Jan 2017 09:08:16 +0100 Subject: [PATCH 599/786] net: phy: dp83848: add DP83620 PHY support [ Upstream commit 93b43fd137cd8865adf9978ab9870a344365d3af ] This PHY with fiber support is register compatible with DP83848, so add support for it. Signed-off-by: Alvaro Gamez Machado Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/phy/dp83848.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c index 800b39f06279..a10d0e7fc5f7 100644 --- a/drivers/net/phy/dp83848.c +++ b/drivers/net/phy/dp83848.c @@ -17,6 +17,7 @@ #include #define TI_DP83848C_PHY_ID 0x20005ca0 +#define TI_DP83620_PHY_ID 0x20005ce0 #define NS_DP83848C_PHY_ID 0x20005c90 #define TLK10X_PHY_ID 0x2000a210 #define TI_DP83822_PHY_ID 0x2000a240 @@ -77,6 +78,7 @@ static int dp83848_config_intr(struct phy_device *phydev) static struct mdio_device_id __maybe_unused dp83848_tbl[] = { { TI_DP83848C_PHY_ID, 0xfffffff0 }, { NS_DP83848C_PHY_ID, 0xfffffff0 }, + { TI_DP83620_PHY_ID, 0xfffffff0 }, { TLK10X_PHY_ID, 0xfffffff0 }, { TI_DP83822_PHY_ID, 0xfffffff0 }, { } @@ -106,6 +108,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl); static struct phy_driver dp83848_driver[] = { DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"), DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"), + DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"), DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"), DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"), }; -- GitLab From fded17be01abfefe7218a72df703d8fe6b28206f Mon Sep 17 00:00:00 2001 From: Zhou Chengming Date: Mon, 16 Jan 2017 11:21:11 +0800 Subject: [PATCH 600/786] perf/x86/intel: Handle exclusive threadid correctly on CPU hotplug [ Upstream commit 4e71de7986386d5fd3765458f27d612931f27f5e ] The CPU hotplug function intel_pmu_cpu_starting() sets cpu_hw_events.excl_thread_id unconditionally to 1 when the shared exclusive counters data structure is already availabe for the sibling thread. This works during the boot process because the first sibling gets threadid 0 assigned and the second sibling which shares the data structure gets 1. But when the first thread of the core is offlined and onlined again it shares the data structure with the second thread and gets exclusive thread id 1 assigned as well. Prevent this by checking the threadid of the already online thread. [ tglx: Rewrote changelog ] Signed-off-by: Zhou Chengming Cc: NuoHan Qiao Cc: ak@linux.intel.com Cc: peterz@infradead.org Cc: kan.liang@intel.com Cc: dave.hansen@linux.intel.com Cc: eranian@google.com Cc: qiaonuohan@huawei.com Cc: davidcc@google.com Cc: guohanjun@huawei.com Link: http://lkml.kernel.org/r/1484536871-3131-1-git-send-email-zhouchengming1@huawei.com Signed-off-by: Thomas Gleixner Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/events/intel/core.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 3bdb917716b1..24a6cd24fac4 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3164,13 +3164,16 @@ static void intel_pmu_cpu_starting(int cpu) if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { for_each_cpu(i, topology_sibling_cpumask(cpu)) { + struct cpu_hw_events *sibling; struct intel_excl_cntrs *c; - c = per_cpu(cpu_hw_events, i).excl_cntrs; + sibling = &per_cpu(cpu_hw_events, i); + c = sibling->excl_cntrs; if (c && c->core_id == core_id) { cpuc->kfree_on_online[1] = cpuc->excl_cntrs; cpuc->excl_cntrs = c; - cpuc->excl_thread_id = 1; + if (!sibling->excl_thread_id) + cpuc->excl_thread_id = 1; break; } } -- GitLab From ea7b808165a5161fcd148c8b41fa03e79e65cb82 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 23 Dec 2016 19:56:56 -0800 Subject: [PATCH 601/786] net: korina: Fix NAPI versus resources freeing commit e6afb1ad88feddf2347ea779cfaf4d03d3cd40b6 upstream. Commit beb0babfb77e ("korina: disable napi on close and restart") introduced calls to napi_disable() that were missing before, unfortunately this leaves a small window during which NAPI has a chance to run, yet we just freed resources since korina_free_ring() has been called: Fix this by disabling NAPI first then freeing resource, and make sure that we also cancel the restart task before doing the resource freeing. Fixes: beb0babfb77e ("korina: disable napi on close and restart") Reported-by: Alexandros C. Couloumbis Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller Signed-off-by: Amit Pundir Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/korina.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 1799fe1415df..c051987aab83 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -900,10 +900,10 @@ static void korina_restart_task(struct work_struct *work) DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR, &lp->rx_dma_regs->dmasm); - korina_free_ring(dev); - napi_disable(&lp->napi); + korina_free_ring(dev); + if (korina_init(dev) < 0) { printk(KERN_ERR "%s: cannot restart device\n", dev->name); return; @@ -1064,12 +1064,12 @@ static int korina_close(struct net_device *dev) tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR; writel(tmp, &lp->rx_dma_regs->dmasm); - korina_free_ring(dev); - napi_disable(&lp->napi); cancel_work_sync(&lp->restart_task); + korina_free_ring(dev); + free_irq(lp->rx_irq, dev); free_irq(lp->tx_irq, dev); free_irq(lp->ovr_irq, dev); -- GitLab From 6e315b2b10b65022ce07e6ed3e2decf7678d58c2 Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Fri, 6 Jan 2017 10:39:49 +1100 Subject: [PATCH 602/786] powerpc/eeh: Enable IO path on permanent error [ Upstream commit 387bbc974f6adf91aa635090f73434ed10edd915 ] We give up recovery on permanent error, simply shutdown the affected devices and remove them. If the devices can't be put into quiet state, they spew more traffic that is likely to cause another unexpected EEH error. This was observed on "p8dtu2u" machine: 0002:00:00.0 PCI bridge: IBM Device 03dc 0002:01:00.0 Ethernet controller: Intel Corporation \ Ethernet Controller X710/X557-AT 10GBASE-T (rev 02) 0002:01:00.1 Ethernet controller: Intel Corporation \ Ethernet Controller X710/X557-AT 10GBASE-T (rev 02) 0002:01:00.2 Ethernet controller: Intel Corporation \ Ethernet Controller X710/X557-AT 10GBASE-T (rev 02) 0002:01:00.3 Ethernet controller: Intel Corporation \ Ethernet Controller X710/X557-AT 10GBASE-T (rev 02) On P8 PowerNV platform, the IO path is frozen when shutdowning the devices, meaning the memory registers are inaccessible. It is why the devices can't be put into quiet state before removing them. This fixes the issue by enabling IO path prior to putting the devices into quiet state. Reported-by: Pridhiviraj Paidipeddi Signed-off-by: Gavin Shan Acked-by: Russell Currey Signed-off-by: Michael Ellerman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/eeh.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index f25731627d7f..e5bfbf62827a 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -298,9 +298,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity) * * For pHyp, we have to enable IO for log retrieval. Otherwise, * 0xFF's is always returned from PCI config space. + * + * When the @severity is EEH_LOG_PERM, the PE is going to be + * removed. Prior to that, the drivers for devices included in + * the PE will be closed. The drivers rely on working IO path + * to bring the devices to quiet state. Otherwise, PCI traffic + * from those devices after they are removed is like to cause + * another unexpected EEH error. */ if (!(pe->type & EEH_PE_PHB)) { - if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG)) + if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) || + severity == EEH_LOG_PERM) eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); /* -- GitLab From 5dcd085942761174f6ff1271fe707e4e2308d64c Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Tue, 17 Jan 2017 19:19:17 +0200 Subject: [PATCH 603/786] net: ethtool: Initialize buffer when querying device channel settings [ Upstream commit 31a86d137219373c3222ca5f4f912e9a4d8065bb ] Ethtool channels respond struct was uninitialized when querying device channel boundaries settings. As a result, unreported fields by the driver hold garbage. This may cause sending unsupported params to driver. Fixes: 8bf368620486 ('ethtool: ensure channel counts are within bounds ...') Signed-off-by: Eran Ben Elisha Signed-off-by: Tariq Toukan CC: John W. Linville Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/core/ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 072c1f4998c9..e9989b835a66 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -1704,7 +1704,7 @@ static noinline_for_stack int ethtool_get_channels(struct net_device *dev, static noinline_for_stack int ethtool_set_channels(struct net_device *dev, void __user *useraddr) { - struct ethtool_channels channels, max; + struct ethtool_channels channels, max = { .cmd = ETHTOOL_GCHANNELS }; u32 max_rx_in_use = 0; if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels) -- GitLab From 7bdccaa5da12f294636de312c73d7d33dfaa947c Mon Sep 17 00:00:00 2001 From: Igor Druzhinin Date: Tue, 17 Jan 2017 20:49:37 +0000 Subject: [PATCH 604/786] xen-netback: fix memory leaks on XenBus disconnect [ Upstream commit 9a6cdf52b85ea5fb21d2bb31e4a7bc61b79923a7 ] Eliminate memory leaks introduced several years ago by cleaning the queue resources which are allocated on XenBus connection event. Namely, queue structure array and pages used for IO rings. Signed-off-by: Igor Druzhinin Reviewed-by: Paul Durrant Acked-by: Wei Liu Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/xen-netback/xenbus.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 8674e188b697..abffdbded5f2 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -493,11 +493,20 @@ static int backend_create_xenvif(struct backend_info *be) static void backend_disconnect(struct backend_info *be) { if (be->vif) { + unsigned int queue_index; + xen_unregister_watchers(be->vif); #ifdef CONFIG_DEBUG_FS xenvif_debugfs_delif(be->vif); #endif /* CONFIG_DEBUG_FS */ xenvif_disconnect_data(be->vif); + for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index) + xenvif_deinit_queue(&be->vif->queues[queue_index]); + + vfree(be->vif->queues); + be->vif->num_queues = 0; + be->vif->queues = NULL; + xenvif_disconnect_ctrl(be->vif); } } @@ -1040,6 +1049,8 @@ static void connect(struct backend_info *be) err: if (be->vif->num_queues > 0) xenvif_disconnect_data(be->vif); /* Clean up existing queues */ + for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index) + xenvif_deinit_queue(&be->vif->queues[queue_index]); vfree(be->vif->queues); be->vif->queues = NULL; be->vif->num_queues = 0; -- GitLab From da805bc788b0dfce728b22d2595e569d2ee9769e Mon Sep 17 00:00:00 2001 From: Igor Druzhinin Date: Tue, 17 Jan 2017 20:49:38 +0000 Subject: [PATCH 605/786] xen-netback: protect resource cleaning on XenBus disconnect [ Upstream commit f16f1df65f1cf139ff9e9f84661e6573d6bb27fc ] vif->lock is used to protect statistics gathering agents from using the queue structure during cleaning. Signed-off-by: Igor Druzhinin Acked-by: Wei Liu Reviewed-by: Paul Durrant Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/xen-netback/interface.c | 6 ++++-- drivers/net/xen-netback/xenbus.c | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 74dc2bf71428..b009d7966b46 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -221,18 +221,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); struct xenvif_queue *queue = NULL; - unsigned int num_queues = vif->num_queues; unsigned long rx_bytes = 0; unsigned long rx_packets = 0; unsigned long tx_bytes = 0; unsigned long tx_packets = 0; unsigned int index; + spin_lock(&vif->lock); if (vif->queues == NULL) goto out; /* Aggregate tx and rx stats from each queue */ - for (index = 0; index < num_queues; ++index) { + for (index = 0; index < vif->num_queues; ++index) { queue = &vif->queues[index]; rx_bytes += queue->stats.rx_bytes; rx_packets += queue->stats.rx_packets; @@ -241,6 +241,8 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev) } out: + spin_unlock(&vif->lock); + vif->dev->stats.rx_bytes = rx_bytes; vif->dev->stats.rx_packets = rx_packets; vif->dev->stats.tx_bytes = tx_bytes; diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index abffdbded5f2..b44f37fff890 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -503,9 +503,11 @@ static void backend_disconnect(struct backend_info *be) for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index) xenvif_deinit_queue(&be->vif->queues[queue_index]); + spin_lock(&be->vif->lock); vfree(be->vif->queues); be->vif->num_queues = 0; be->vif->queues = NULL; + spin_unlock(&be->vif->lock); xenvif_disconnect_ctrl(be->vif); } -- GitLab From a7a2a6d34fe78261945a5eb5eeca6c4fa3ad800e Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Tue, 17 Jan 2017 22:07:19 -0500 Subject: [PATCH 606/786] bnxt_en: Fix "uninitialized variable" bug in TPA code path. [ Upstream commit 719ca8111402aa6157bd83a3c966d184db0d8956 ] In the TPA GRO code path, initialize the tcp_opt_len variable to 0 so that it will be correct for packets without TCP timestamps. The bug caused the SKB fields to be incorrectly set up for packets without TCP timestamps, leading to these packets being rejected by the stack. Reported-by: Andy Gospodarek Acked-by: Andy Gospodarek Signed-off-by: Michael Chan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 5cc0f8cfec87..20e569bd978a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1097,7 +1097,7 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, { #ifdef CONFIG_INET struct tcphdr *th; - int len, nw_off, tcp_opt_len; + int len, nw_off, tcp_opt_len = 0; if (tcp_ts) tcp_opt_len = 12; -- GitLab From 251d00bf1309c65316f5bd3850b2ca523b46921c Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 18 Jan 2017 15:14:17 +0100 Subject: [PATCH 607/786] bpf: don't trigger OOM killer under pressure with map alloc [ Upstream commit d407bd25a204bd66b7346dde24bd3d37ef0e0b05 ] This patch adds two helpers, bpf_map_area_alloc() and bpf_map_area_free(), that are to be used for map allocations. Using kmalloc() for very large allocations can cause excessive work within the page allocator, so i) fall back earlier to vmalloc() when the attempt is considered costly anyway, and even more importantly ii) don't trigger OOM killer with any of the allocators. Since this is based on a user space request, for example, when creating maps with element pre-allocation, we really want such requests to fail instead of killing other user space processes. Also, don't spam the kernel log with warnings should any of the allocations fail under pressure. Given that, we can make backend selection in bpf_map_area_alloc() generic, and convert all maps over to use this API for spots with potentially large allocation requests. Note, replacing the one kmalloc_array() is fine as overflow checks happen earlier in htab_map_alloc(), since it must also protect the multiplication for vmalloc() should kmalloc_array() fail. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- include/linux/bpf.h | 2 ++ kernel/bpf/arraymap.c | 18 +++++++----------- kernel/bpf/hashtab.c | 22 +++++++++------------- kernel/bpf/stackmap.c | 20 ++++++++------------ kernel/bpf/syscall.c | 26 ++++++++++++++++++++++++++ 5 files changed, 52 insertions(+), 36 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c201017b5730..97498be2ca2e 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -243,6 +243,8 @@ struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref); void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put(struct bpf_map *map); int bpf_map_precharge_memlock(u32 pages); +void *bpf_map_area_alloc(size_t size); +void bpf_map_area_free(void *base); extern int sysctl_unprivileged_bpf_disabled; diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index a2ac051c342f..f3721e150d94 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -11,7 +11,6 @@ */ #include #include -#include #include #include #include @@ -74,14 +73,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) if (array_size >= U32_MAX - PAGE_SIZE) return ERR_PTR(-ENOMEM); - /* allocate all map elements and zero-initialize them */ - array = kzalloc(array_size, GFP_USER | __GFP_NOWARN); - if (!array) { - array = vzalloc(array_size); - if (!array) - return ERR_PTR(-ENOMEM); - } + array = bpf_map_area_alloc(array_size); + if (!array) + return ERR_PTR(-ENOMEM); /* copy mandatory map attributes */ array->map.map_type = attr->map_type; @@ -97,7 +92,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) if (array_size >= U32_MAX - PAGE_SIZE || elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) { - kvfree(array); + bpf_map_area_free(array); return ERR_PTR(-ENOMEM); } out: @@ -262,7 +257,7 @@ static void array_map_free(struct bpf_map *map) if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) bpf_array_free_percpu(array); - kvfree(array); + bpf_map_area_free(array); } static const struct bpf_map_ops array_ops = { @@ -319,7 +314,8 @@ static void fd_array_map_free(struct bpf_map *map) /* make sure it's empty */ for (i = 0; i < array->map.max_entries; i++) BUG_ON(array->ptrs[i] != NULL); - kvfree(array); + + bpf_map_area_free(array); } static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index ad1bc67aff1b..ad2f0ed75471 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -13,7 +13,6 @@ #include #include #include -#include #include "percpu_freelist.h" struct bucket { @@ -84,14 +83,15 @@ static void htab_free_elems(struct bpf_htab *htab) free_percpu(pptr); } free_elems: - vfree(htab->elems); + bpf_map_area_free(htab->elems); } static int prealloc_elems_and_freelist(struct bpf_htab *htab) { int err = -ENOMEM, i; - htab->elems = vzalloc(htab->elem_size * htab->map.max_entries); + htab->elems = bpf_map_area_alloc(htab->elem_size * + htab->map.max_entries); if (!htab->elems) return -ENOMEM; @@ -227,14 +227,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) goto free_htab; err = -ENOMEM; - htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket), - GFP_USER | __GFP_NOWARN); - - if (!htab->buckets) { - htab->buckets = vmalloc(htab->n_buckets * sizeof(struct bucket)); - if (!htab->buckets) - goto free_htab; - } + htab->buckets = bpf_map_area_alloc(htab->n_buckets * + sizeof(struct bucket)); + if (!htab->buckets) + goto free_htab; for (i = 0; i < htab->n_buckets; i++) { INIT_HLIST_HEAD(&htab->buckets[i].head); @@ -258,7 +254,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) free_extra_elems: free_percpu(htab->extra_elems); free_buckets: - kvfree(htab->buckets); + bpf_map_area_free(htab->buckets); free_htab: kfree(htab); return ERR_PTR(err); @@ -715,7 +711,7 @@ static void htab_map_free(struct bpf_map *map) pcpu_freelist_destroy(&htab->freelist); } free_percpu(htab->extra_elems); - kvfree(htab->buckets); + bpf_map_area_free(htab->buckets); kfree(htab); } diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 732ae16d12b7..be8519148c25 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -7,7 +7,6 @@ #include #include #include -#include #include #include #include "percpu_freelist.h" @@ -32,7 +31,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap) u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size; int err; - smap->elems = vzalloc(elem_size * smap->map.max_entries); + smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries); if (!smap->elems) return -ENOMEM; @@ -45,7 +44,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap) return 0; free_elems: - vfree(smap->elems); + bpf_map_area_free(smap->elems); return err; } @@ -76,12 +75,9 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) if (cost >= U32_MAX - PAGE_SIZE) return ERR_PTR(-E2BIG); - smap = kzalloc(cost, GFP_USER | __GFP_NOWARN); - if (!smap) { - smap = vzalloc(cost); - if (!smap) - return ERR_PTR(-ENOMEM); - } + smap = bpf_map_area_alloc(cost); + if (!smap) + return ERR_PTR(-ENOMEM); err = -E2BIG; cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); @@ -112,7 +108,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) put_buffers: put_callchain_buffers(); free_smap: - kvfree(smap); + bpf_map_area_free(smap); return ERR_PTR(err); } @@ -262,9 +258,9 @@ static void stack_map_free(struct bpf_map *map) /* wait for bpf programs to complete before freeing stack map */ synchronize_rcu(); - vfree(smap->elems); + bpf_map_area_free(smap->elems); pcpu_freelist_destroy(&smap->freelist); - kvfree(smap); + bpf_map_area_free(smap); put_callchain_buffers(); } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 237f3d6a7ddc..72ea91df71c9 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include #include #include @@ -48,6 +50,30 @@ void bpf_register_map_type(struct bpf_map_type_list *tl) list_add(&tl->list_node, &bpf_map_types); } +void *bpf_map_area_alloc(size_t size) +{ + /* We definitely need __GFP_NORETRY, so OOM killer doesn't + * trigger under memory pressure as we really just want to + * fail instead. + */ + const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO; + void *area; + + if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { + area = kmalloc(size, GFP_USER | flags); + if (area != NULL) + return area; + } + + return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags, + PAGE_KERNEL); +} + +void bpf_map_area_free(void *area) +{ + kvfree(area); +} + int bpf_map_precharge_memlock(u32 pages) { struct user_struct *user = get_current_user(); -- GitLab From c48a862c47d481838b26f5d6cd5c29e2064339da Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Wed, 18 Jan 2017 14:29:21 +0100 Subject: [PATCH 608/786] objtool: Fix IRET's opcode [ Upstream commit b5b46c4740aed1538544f0fa849c5b76c7823469 ] The IRET opcode is 0xcf according to the Intel manual and also to objdump of my vmlinux: 1ea8: 48 cf iretq Fix the opcode in arch_decode_instruction(). The previous value (0xc5) seems to correspond to LDS. Signed-off-by: Jiri Slaby Acked-by: Josh Poimboeuf Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20170118132921.19319-1-jslaby@suse.cz Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- tools/objtool/arch/x86/decode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 5e0dea2cdc01..039636ffb6c8 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -150,9 +150,9 @@ int arch_decode_instruction(struct elf *elf, struct section *sec, *type = INSN_RETURN; break; - case 0xc5: /* iret */ case 0xca: /* retf */ case 0xcb: /* retf */ + case 0xcf: /* iret */ *type = INSN_CONTEXT_SWITCH; break; -- GitLab From 83571e9ef7c91ef6e249aae374de068b30963551 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 18 Jan 2017 19:44:42 -0800 Subject: [PATCH 609/786] gianfar: Do not reuse pages from emergency reserve [ Upstream commit 69fed99baac186013840ced3524562841296034f ] A driver using dev_alloc_page() must not reuse a page that had to use emergency memory reserve. Otherwise all packets using this page will be immediately dropped, unless for very specific sockets having SOCK_MEMALLOC bit set. This issue might be hard to debug, because only a fraction of the RX ring buffer would suffer from drops. Fixes: 75354148ce69 ("gianfar: Add paged allocation and Rx S/G") Signed-off-by: Eric Dumazet Cc: Claudiu Manoil Acked-by: Claudiu Manoil Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/freescale/gianfar.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index d391beebe591..3f4e71148808 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2951,7 +2951,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, } /* try reuse page */ - if (unlikely(page_count(page) != 1)) + if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page))) return false; /* change offset to the other half */ -- GitLab From e8b5068b64d0505fe138e3db243e6e3385ae1a15 Mon Sep 17 00:00:00 2001 From: Chandan Rajendra Date: Fri, 23 Dec 2016 15:00:18 +0530 Subject: [PATCH 610/786] Btrfs: Fix deadlock between direct IO and fast fsync [ Upstream commit 97dcdea076ecef41ea4aaa23d4397c2f622e4265 ] The following deadlock is seen when executing generic/113 test, ---------------------------------------------------------+---------------------------------------------------- Direct I/O task Fast fsync task ---------------------------------------------------------+---------------------------------------------------- btrfs_direct_IO __blockdev_direct_IO do_blockdev_direct_IO do_direct_IO btrfs_get_blocks_direct while (blocks needs to written) get_more_blocks (first iteration) btrfs_get_blocks_direct btrfs_create_dio_extent down_read(&BTRFS_I(inode) >dio_sem) Create and add extent map and ordered extent up_read(&BTRFS_I(inode) >dio_sem) btrfs_sync_file btrfs_log_dentry_safe btrfs_log_inode_parent btrfs_log_inode btrfs_log_changed_extents down_write(&BTRFS_I(inode) >dio_sem) Collect new extent maps and ordered extents wait for ordered extent completion get_more_blocks (second iteration) btrfs_get_blocks_direct btrfs_create_dio_extent down_read(&BTRFS_I(inode) >dio_sem) -------------------------------------------------------------------------------------------------------------- In the above description, Btrfs direct I/O code path has not yet started submitting bios for file range covered by the initial ordered extent. Meanwhile, The fast fsync task obtains the write semaphore and waits for I/O on the ordered extent to get completed. However, the Direct I/O task is now blocked on obtaining the read semaphore. To resolve the deadlock, this commit modifies the Direct I/O code path to obtain the read semaphore before invoking __blockdev_direct_IO(). The semaphore is then given up after __blockdev_direct_IO() returns. This allows the Direct I/O code to complete I/O on all the ordered extents it creates. Signed-off-by: Chandan Rajendra Reviewed-by: Filipe Manana Signed-off-by: David Sterba Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/inode.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index bddbae796941..cada3f977baf 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7235,7 +7235,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode, struct extent_map *em = NULL; int ret; - down_read(&BTRFS_I(inode)->dio_sem); if (type != BTRFS_ORDERED_NOCOW) { em = create_pinned_em(inode, start, len, orig_start, block_start, block_len, orig_block_len, @@ -7254,7 +7253,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode, em = ERR_PTR(ret); } out: - up_read(&BTRFS_I(inode)->dio_sem); return em; } @@ -8707,6 +8705,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) dio_data.unsubmitted_oe_range_start = (u64)offset; dio_data.unsubmitted_oe_range_end = (u64)offset; current->journal_info = &dio_data; + down_read(&BTRFS_I(inode)->dio_sem); } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, &BTRFS_I(inode)->runtime_flags)) { inode_dio_end(inode); @@ -8719,6 +8718,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) iter, btrfs_get_blocks_direct, NULL, btrfs_submit_direct, flags); if (iov_iter_rw(iter) == WRITE) { + up_read(&BTRFS_I(inode)->dio_sem); current->journal_info = NULL; if (ret < 0 && ret != -EIOCBQUEUED) { if (dio_data.reserve) -- GitLab From c3eab85ff11a8cd4def8cf2b4cc0610f6b47a8cd Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Thu, 1 Dec 2016 13:43:31 -0800 Subject: [PATCH 611/786] Btrfs: fix truncate down when no_holes feature is enabled [ Upstream commit 91298eec05cd8d4e828cf7ee5d4a6334f70cf69a ] For such a file mapping, [0-4k][hole][8k-12k] In NO_HOLES mode, we don't have the [hole] extent any more. Commit c1aa45759e90 ("Btrfs: fix shrinking truncate when the no_holes feature is enabled") fixed disk isize not being updated in NO_HOLES mode when data is not flushed. However, even if data has been flushed, we can still have trouble in updating disk isize since we updated disk isize to 'start' of the last evicted extent. Reviewed-by: Chris Mason Signed-off-by: Liu Bo Signed-off-by: David Sterba Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/inode.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index cada3f977baf..a2a014b19f18 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4480,8 +4480,19 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, if (found_type > min_type) { del_item = 1; } else { - if (item_end < new_size) + if (item_end < new_size) { + /* + * With NO_HOLES mode, for the following mapping + * + * [0-4k][hole][8k-12k] + * + * if truncating isize down to 6k, it ends up + * isize being 8k. + */ + if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) + last_size = new_size; break; + } if (found_key.offset >= new_size) del_item = 1; else -- GitLab From 884baf2abf6dd0273b821a1f9e06023438528a52 Mon Sep 17 00:00:00 2001 From: "G. Campana" Date: Thu, 19 Jan 2017 23:37:46 +0200 Subject: [PATCH 612/786] virtio_console: fix a crash in config_work_handler [ Upstream commit 8379cadf71c3ee8173a1c6fc1ea7762a9638c047 ] Using control_work instead of config_work as the 3rd argument to container_of results in an invalid portdev pointer. Indeed, the work structure is initialized as below: INIT_WORK(&portdev->config_work, &config_work_handler); It leads to a crash when portdev->vdev is dereferenced later. This bug is triggered when the guest uses a virtio-console without multiport feature and receives a config_changed virtio interrupt. Signed-off-by: G. Campana Reviewed-by: Amit Shah Signed-off-by: Michael S. Tsirkin Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/char/virtio_console.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 471a301d63e3..8f890c1aca57 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1870,7 +1870,7 @@ static void config_work_handler(struct work_struct *work) { struct ports_device *portdev; - portdev = container_of(work, struct ports_device, control_work); + portdev = container_of(work, struct ports_device, config_work); if (!use_multiport(portdev)) { struct virtio_device *vdev; struct port *port; -- GitLab From 72191c7d82e7a559ef05b1b89e6365911a8726aa Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Thu, 19 Jan 2017 10:39:09 -0800 Subject: [PATCH 613/786] swiotlb-xen: update dev_addr after swapping pages [ Upstream commit f1225ee4c8fcf09afaa199b8b1f0450f38b8cd11 ] In xen_swiotlb_map_page and xen_swiotlb_map_sg_attrs, if the original page is not suitable, we swap it for another page from the swiotlb pool. In these cases, we don't update the previously calculated dma address for the page before calling xen_dma_map_page. Thus, we end up calling xen_dma_map_page passing the wrong dev_addr, resulting in xen_dma_map_page mistakenly assuming that the page is foreign when it is local. Fix the bug by updating dev_addr appropriately. This change has no effect on x86, because xen_dma_map_page is a stub there. Signed-off-by: Stefano Stabellini Signed-off-by: Pooya Keshavarzi Tested-by: Pooya Keshavarzi Reviewed-by: Boris Ostrovsky Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/xen/swiotlb-xen.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 8e7a3d646531..679f79f68182 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -409,9 +409,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, if (map == SWIOTLB_MAP_ERROR) return DMA_ERROR_CODE; + dev_addr = xen_phys_to_bus(map); xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), dev_addr, map & ~PAGE_MASK, size, dir, attrs); - dev_addr = xen_phys_to_bus(map); /* * Ensure that the address returned is DMA'ble @@ -567,13 +567,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, sg_dma_len(sgl) = 0; return 0; } + dev_addr = xen_phys_to_bus(map); xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), dev_addr, map & ~PAGE_MASK, sg->length, dir, attrs); - sg->dma_address = xen_phys_to_bus(map); + sg->dma_address = dev_addr; } else { /* we are not interested in the dma_addr returned by * xen_dma_map_page, only in the potential cache flushes executed -- GitLab From 5d5c293af8348b540ef721d810f7549ac3ab81c2 Mon Sep 17 00:00:00 2001 From: Vineeth Remanan Pillai Date: Thu, 19 Jan 2017 08:35:39 -0800 Subject: [PATCH 614/786] xen-netfront: Fix Rx stall during network stress and OOM [ Upstream commit 90c311b0eeead647b708a723dbdde1eda3dcad05 ] During an OOM scenario, request slots could not be created as skb allocation fails. So the netback cannot pass in packets and netfront wrongly assumes that there is no more work to be done and it disables polling. This causes Rx to stall. The issue is with the retry logic which schedules the timer if the created slots are less than NET_RX_SLOTS_MIN. The count of new request slots to be pushed are calculated as a difference between new req_prod and rsp_cons which could be more than the actual slots, if there are unconsumed responses. The fix is to calculate the count of newly created slots as the difference between new req_prod and old req_prod. Signed-off-by: Vineeth Remanan Pillai Reviewed-by: Juergen Gross Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/xen-netfront.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 0cdcb2169083..599cf5090030 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -321,7 +321,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) queue->rx.req_prod_pvt = req_prod; /* Not enough requests? Try again later. */ - if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) { + if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) { mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); return; } -- GitLab From 322baf72eed51cef55a61f5d4ac1b51bd7824c1a Mon Sep 17 00:00:00 2001 From: Eric Farman Date: Fri, 13 Jan 2017 12:48:06 -0500 Subject: [PATCH 615/786] scsi: virtio_scsi: Reject commands when virtqueue is broken [ Upstream commit 773c7220e22d193e5667c352fcbf8d47eefc817f ] In the case of a graceful set of detaches, where the virtio-scsi-ccw disk is removed from the guest prior to the controller, the guest behaves quite normally. Specifically, the detach gets us into sd_sync_cache to issue a Synchronize Cache(10) command, which immediately fails (and is retried a couple of times) because the device has been removed. Later, the removal of the controller sees two CRWs presented, but there's no further indication of the removal from the guest viewpoint. [ 17.217458] sd 0:0:0:0: [sda] Synchronizing SCSI cache [ 17.219257] sd 0:0:0:0: [sda] Synchronize Cache(10) failed: Result: hostbyte=DID_BAD_TARGET driverbyte=DRIVER_OK [ 21.449400] crw_info : CRW reports slct=0, oflw=0, chn=1, rsc=3, anc=0, erc=4, rsid=2 [ 21.449406] crw_info : CRW reports slct=0, oflw=0, chn=0, rsc=3, anc=0, erc=4, rsid=0 However, on s390, the SCSI disks can be removed "by surprise" when an entire controller (host) is removed and all associated disks are removed via the loop in scsi_forget_host. The same call to sd_sync_cache is made, but because the controller has already been removed, the Synchronize Cache(10) command is neither issued (and then failed) nor rejected. That the I/O isn't returned means the guest cannot have other devices added nor removed, and other tasks (such as shutdown or reboot) issued by the guest will not complete either. The virtio ring has already been marked as broken (via virtio_break_device in virtio_ccw_remove), but we still attempt to queue the command only to have it remain there. The calling sequence provides a bit of distinction for us: virtscsi_queuecommand() -> virtscsi_kick_cmd() -> virtscsi_add_cmd() -> virtqueue_add_sgs() -> virtqueue_add() if success return 0 elseif vq->broken or vring_mapping_error() return -EIO else return -ENOSPC A return of ENOSPC is generally a temporary condition, so returning "host busy" from virtscsi_queuecommand makes sense here, to have it redriven in a moment or two. But the EIO return code is more of a permanent error and so it would be wise to return the I/O itself and allow the calling thread to finish gracefully. The result is these four kernel messages in the guest (the fourth one does not occur prior to this patch): [ 22.921562] crw_info : CRW reports slct=0, oflw=0, chn=1, rsc=3, anc=0, erc=4, rsid=2 [ 22.921580] crw_info : CRW reports slct=0, oflw=0, chn=0, rsc=3, anc=0, erc=4, rsid=0 [ 22.921978] sd 0:0:0:0: [sda] Synchronizing SCSI cache [ 22.921993] sd 0:0:0:0: [sda] Synchronize Cache(10) failed: Result: hostbyte=DID_BAD_TARGET driverbyte=DRIVER_OK I opted to fill in the same response data that is returned from the more graceful device detach, where the disk device is removed prior to the controller device. Signed-off-by: Eric Farman Reviewed-by: Fam Zheng Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/virtio_scsi.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index ec91bd07f00a..c680d7641311 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -534,7 +534,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi, { struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); + unsigned long flags; int req_size; + int ret; BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); @@ -562,8 +564,15 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi, req_size = sizeof(cmd->req.cmd); } - if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0) + ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)); + if (ret == -EIO) { + cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; + spin_lock_irqsave(&req_vq->vq_lock, flags); + virtscsi_complete_cmd(vscsi, cmd); + spin_unlock_irqrestore(&req_vq->vq_lock, flags); + } else if (ret != 0) { return SCSI_MLQUEUE_HOST_BUSY; + } return 0; } -- GitLab From e9a87e0f5bbb3f3fd28048b923b9941687c6233f Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 17 Jan 2017 14:22:24 -0800 Subject: [PATCH 616/786] iwlwifi: fix kernel crash when unregistering thermal zone [ Upstream commit 92549cdc288f47f3a98cf80ac5890c91f5876a06 ] A recent firmware change seems to have enabled thermal zones on the iwlwifi driver. Unfortunately, my device fails when registering the thermal zone. This doesn't stop the driver from attempting to unregister the thermal zone at unload time, triggering a NULL pointer deference in strlen() off the thermal_zone_device_unregister() path. Don't unregister if name is NULL, for that case we failed registering. Do the same for the cooling zone. Signed-off-by: Jens Axboe Signed-off-by: Kalle Valo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/intel/iwlwifi/mvm/tt.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 63a051be832e..bec7d9c46087 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -843,8 +843,10 @@ static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm) return; IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n"); - thermal_zone_device_unregister(mvm->tz_device.tzone); - mvm->tz_device.tzone = NULL; + if (mvm->tz_device.tzone) { + thermal_zone_device_unregister(mvm->tz_device.tzone); + mvm->tz_device.tzone = NULL; + } } static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm) @@ -853,8 +855,10 @@ static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm) return; IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n"); - thermal_cooling_device_unregister(mvm->cooling_dev.cdev); - mvm->cooling_dev.cdev = NULL; + if (mvm->cooling_dev.cdev) { + thermal_cooling_device_unregister(mvm->cooling_dev.cdev); + mvm->cooling_dev.cdev = NULL; + } } #endif /* CONFIG_THERMAL */ -- GitLab From e99d86d76eed4f4bccc01e58e0bb3c96fbe88f67 Mon Sep 17 00:00:00 2001 From: Zach Ploskey Date: Sun, 22 Jan 2017 00:47:19 -0800 Subject: [PATCH 617/786] platform/x86: ideapad-laptop: handle ACPI event 1 [ Upstream commit cfee5d63767b2e7997c1f36420d008abbe61565c ] On Ideapad laptops, ACPI event 1 is currently not handled. Many models log "ideapad_laptop: Unknown event: 1" every 20 seconds or so while running on battery power. Some convertible laptops receive this event when switching in and out of tablet mode. This adds and additional case for event 1 in ideapad_acpi_notify to call ideapad_input_report(priv, vpc_bit), so that the event is reported to userspace and we avoid unnecessary logging. Fixes bug #107481 (https://bugzilla.kernel.org/show_bug.cgi?id=107481) Fixes bug #65751 (https://bugzilla.kernel.org/show_bug.cgi?id=65751) Signed-off-by: Zach Ploskey Signed-off-by: Andy Shevchenko Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/platform/x86/ideapad-laptop.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index a7614fc542b5..2f1615e00cb4 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -813,6 +813,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) case 8: case 7: case 6: + case 1: ideapad_input_report(priv, vpc_bit); break; case 5: -- GitLab From c6f284899e01f9ea095d0e5d7aa2f3814915def1 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Fri, 20 Jan 2017 12:14:13 -0600 Subject: [PATCH 618/786] amd-xgbe: Check xgbe_init() return code [ Upstream commit 738f7f647371ff4cfc9646c99dba5b58ad142db3 ] The xgbe_init() routine returns a return code indicating success or failure, but the return code is not checked. Add code to xgbe_init() to issue a message when failures are seen and add code to check the xgbe_init() return code. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 4 +++- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index ca106d4275cc..3424435a39dd 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -2825,8 +2825,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata) /* Flush Tx queues */ ret = xgbe_flush_tx_queues(pdata); - if (ret) + if (ret) { + netdev_err(pdata->netdev, "error flushing TX queues\n"); return ret; + } /* * Initialize DMA related features diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 0f0f30149e5a..1e4e8b245cd5 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -877,7 +877,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata) DBGPR("-->xgbe_start\n"); - hw_if->init(pdata); + ret = hw_if->init(pdata); + if (ret) + return ret; ret = phy_if->phy_start(pdata); if (ret) -- GitLab From e5a2ba9af818cf214f2a0a1e431fb2b1102883c0 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 20 Jan 2017 16:05:05 -0800 Subject: [PATCH 619/786] net: dsa: Check return value of phy_connect_direct() [ Upstream commit 4078b76cac68e50ccf1f76a74e7d3d5788aec3fe ] We need to check the return value of phy_connect_direct() in dsa_slave_phy_connect() otherwise we may be continuing the initialization of a slave network device with a PHY that already attached somewhere else and which will soon be in error because the PHY device is in error. The conditions for such an error to occur are that we have a port of our switch that is not disabled, and has the same port number as a PHY address (say both 5) that can be probed using the DSA slave MII bus. We end-up having this slave network device find a PHY at the same address as our port number, and we try to attach to it. A slave network (e.g: port 0) has already attached to our PHY device, and we try to re-attach it with a different network device, but since we ignore the error we would end-up initializating incorrect device references by the time the slave network interface is opened. The code has been (re)organized several times, making it hard to provide an exact Fixes tag, this is a bugfix nonetheless. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/dsa/slave.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 3ff9d97cf56b..079d76bc204c 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1103,10 +1103,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p, /* Use already configured phy mode */ if (p->phy_interface == PHY_INTERFACE_MODE_NA) p->phy_interface = p->phy->interface; - phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, - p->phy_interface); - - return 0; + return phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, + p->phy_interface); } static int dsa_slave_phy_setup(struct dsa_slave_priv *p, -- GitLab From 8895ef4e5357fa54e614c5654eb4416623c2feb6 Mon Sep 17 00:00:00 2001 From: Ding Pixel Date: Wed, 18 Jan 2017 17:26:38 +0800 Subject: [PATCH 620/786] drm/amdgpu: check ring being ready before using MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit c5f21c9f878b8dcd54d0b9739c025ca73cb4c091 ] Return success when the ring is properly initialized, otherwise return failure. Tonga SRIOV VF doesn't have UVD and VCE engines, the initialization of these IPs is bypassed. The system crashes if application submit IB to their rings which are not ready to use. It could be a common issue if IP having ring buffer is disabled for some reason on specific ASIC, so it should check the ring being ready to use. Bug: amdgpu_test crashes system on Tonga VF. Signed-off-by: Ding Pixel Reviewed-by: Christian König Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 82dc8d20e28a..bfb4b91869e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, } break; } + + if (!(*out_ring && (*out_ring)->adev)) { + DRM_ERROR("Ring %d is not initialized on IP %d\n", + ring, ip_type); + return -EINVAL; + } + return 0; } -- GitLab From ff3b1dd026bb1f9df6f345ec91b9a754d363306f Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Tue, 24 Jan 2017 17:50:26 +0100 Subject: [PATCH 621/786] vfio/spapr: fail tce_iommu_attach_group() when iommu_data is null [ Upstream commit bd00fdf198e2da475a2f4265a83686ab42d998a8 ] The recently added mediated VFIO driver doesn't know about powerpc iommu. It thus doesn't register a struct iommu_table_group in the iommu group upon device creation. The iommu_data pointer hence remains null. This causes a kernel oops when userspace tries to set the iommu type of a container associated with a mediated device to VFIO_SPAPR_TCE_v2_IOMMU. [ 82.585440] mtty mtty: MDEV: Registered [ 87.655522] iommu: Adding device 83b8f4f2-509f-382f-3c1e-e6bfe0fa1001 to group 10 [ 87.655527] vfio_mdev 83b8f4f2-509f-382f-3c1e-e6bfe0fa1001: MDEV: group_id = 10 [ 116.297184] Unable to handle kernel paging request for data at address 0x00000030 [ 116.297389] Faulting instruction address: 0xd000000007870524 [ 116.297465] Oops: Kernel access of bad area, sig: 11 [#1] [ 116.297611] SMP NR_CPUS=2048 [ 116.297611] NUMA [ 116.297627] PowerNV ... [ 116.297954] CPU: 33 PID: 7067 Comm: qemu-system-ppc Not tainted 4.10.0-rc5-mdev-test #8 [ 116.297993] task: c000000e7718b680 task.stack: c000000e77214000 [ 116.298025] NIP: d000000007870524 LR: d000000007870518 CTR: 0000000000000000 [ 116.298064] REGS: c000000e77217990 TRAP: 0300 Not tainted (4.10.0-rc5-mdev-test) [ 116.298103] MSR: 9000000000009033 [ 116.298107] CR: 84004444 XER: 00000000 [ 116.298154] CFAR: c00000000000888c DAR: 0000000000000030 DSISR: 40000000 SOFTE: 1 GPR00: d000000007870518 c000000e77217c10 d00000000787b0ed c000000eed2103c0 GPR04: 0000000000000000 0000000000000000 c000000eed2103e0 0000000f24320000 GPR08: 0000000000000104 0000000000000001 0000000000000000 d0000000078729b0 GPR12: c00000000025b7e0 c00000000fe08400 0000000000000001 000001002d31d100 GPR16: 000001002c22c850 00003ffff315c750 0000000043145680 0000000043141bc0 GPR20: ffffffffffffffed fffffffffffff000 0000000020003b65 d000000007706018 GPR24: c000000f16cf0d98 d000000007706000 c000000003f42980 c000000003f42980 GPR28: c000000f1575ac00 c000000003f429c8 0000000000000000 c000000eed2103c0 [ 116.298504] NIP [d000000007870524] tce_iommu_attach_group+0x10c/0x360 [vfio_iommu_spapr_tce] [ 116.298555] LR [d000000007870518] tce_iommu_attach_group+0x100/0x360 [vfio_iommu_spapr_tce] [ 116.298601] Call Trace: [ 116.298610] [c000000e77217c10] [d000000007870518] tce_iommu_attach_group+0x100/0x360 [vfio_iommu_spapr_tce] (unreliable) [ 116.298671] [c000000e77217cb0] [d0000000077033a0] vfio_fops_unl_ioctl+0x278/0x3e0 [vfio] [ 116.298713] [c000000e77217d40] [c0000000002a3ebc] do_vfs_ioctl+0xcc/0x8b0 [ 116.298745] [c000000e77217de0] [c0000000002a4700] SyS_ioctl+0x60/0xc0 [ 116.298782] [c000000e77217e30] [c00000000000b220] system_call+0x38/0xfc [ 116.298812] Instruction dump: [ 116.298828] 7d3f4b78 409effc8 3d220000 e9298020 3c800140 38a00018 608480c0 e8690028 [ 116.298869] 4800249d e8410018 7c7f1b79 41820230 2fa90000 419e0114 e9090020 [ 116.298914] ---[ end trace 1e10b0ced08b9120 ]--- This patch fixes the oops. Reported-by: Vaibhav Jain Signed-off-by: Greg Kurz Signed-off-by: Alex Williamson Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/vfio/vfio_iommu_spapr_tce.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 79ddcb05d126..85d3e648bdea 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -1292,6 +1292,10 @@ static int tce_iommu_attach_group(void *iommu_data, /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n", iommu_group_id(iommu_group), iommu_group); */ table_group = iommu_group_get_iommudata(iommu_group); + if (!table_group) { + ret = -ENODEV; + goto unlock_exit; + } if (tce_groups_attached(container) && (!table_group->ops || !table_group->ops->take_ownership || -- GitLab From a6c3e01bf32e82494fb634801982e31f257f25cc Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 23 Jan 2017 11:11:42 +0100 Subject: [PATCH 622/786] mlxsw: spectrum_router: Correctly reallocate adjacency entries [ Upstream commit a59b7e0246774e28193126fe7fdbbd0ae9c67dcc ] mlxsw_sp_nexthop_group_mac_update() is called in one of two cases: 1) When the MAC of a nexthop needs to be updated 2) When the size of a nexthop group has changed In the second case the adjacency entries for the nexthop group need to be reallocated from the adjacency table. In this case we must write to the entries the MAC addresses of all the nexthops that should be offloaded and not only those whose MAC changed. Otherwise, these entries would be filled with garbage data, resulting in packet loss. Fixes: a7ff87acd995 ("mlxsw: spectrum_router: Implement next-hop routing") Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 690563099313..9e31a3390154 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1178,7 +1178,8 @@ static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, static int mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop_group *nh_grp) + struct mlxsw_sp_nexthop_group *nh_grp, + bool reallocate) { u32 adj_index = nh_grp->adj_index; /* base */ struct mlxsw_sp_nexthop *nh; @@ -1193,7 +1194,7 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp, continue; } - if (nh->update) { + if (nh->update || reallocate) { err = mlxsw_sp_nexthop_mac_update(mlxsw_sp, adj_index, nh); if (err) @@ -1254,7 +1255,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, /* Nothing was added or removed, so no need to reallocate. Just * update MAC on existing adjacency indexes. */ - err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp); + err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, + false); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); goto set_trap; @@ -1282,7 +1284,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, nh_grp->adj_index_valid = 1; nh_grp->adj_index = adj_index; nh_grp->ecmp_size = ecmp_size; - err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp); + err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); goto set_trap; -- GitLab From 7fdc81f6e1a9b3f520e40cfc4ebccc94858da62d Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 23 Jan 2017 21:37:52 +0200 Subject: [PATCH 623/786] virtio_net: fix PAGE_SIZE > 64k [ Upstream commit d0fa28f00052391b5df328f502fbbdd4444938b7 ] I don't have any guests with PAGE_SIZE > 64k but the code seems to be clearly broken in that case as PAGE_SIZE / MERGEABLE_BUFFER_ALIGN will need more than 8 bit and so the code in mergeable_ctx_to_buf_address does not give us the actual true size. Cc: John Fastabend Signed-off-by: Michael S. Tsirkin Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/virtio_net.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7ca99899972e..1568aedddfc9 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -47,8 +47,16 @@ module_param(gso, bool, 0444); */ DECLARE_EWMA(pkt_len, 1, 64) +/* With mergeable buffers we align buffer address and use the low bits to + * encode its true size. Buffer size is up to 1 page so we need to align to + * square root of page size to ensure we reserve enough bits to encode the true + * size. + */ +#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2) + /* Minimum alignment for mergeable packet buffers. */ -#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256) +#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \ + 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT) #define VIRTNET_DRIVER_VERSION "1.0.0" -- GitLab From b07bf2364605dc7d78401b7eb02a533b0b6ddc05 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 23 Jan 2017 16:43:05 -0800 Subject: [PATCH 624/786] ip6_tunnel: must reload ipv6h in ip6ip6_tnl_xmit() [ Upstream commit 21b995a9cb093fff33ec91d7cb3822b882a90a1e ] Since ip6_tnl_parse_tlv_enc_lim() can call pskb_may_pull(), we must reload any pointer that was related to skb->head (or skb->data), or risk use after free. Fixes: c12b395a4664 ("gre: Support GRE over IPv6") Signed-off-by: Eric Dumazet Cc: Dmitry Kozlov Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ip6_gre.c | 3 +++ net/ipv6/ip6_tunnel.c | 2 ++ 2 files changed, 5 insertions(+) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 0a5922055da2..a5fdc1aa7a9a 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -582,6 +582,9 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) return -1; offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); + /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ + ipv6h = ipv6_hdr(skb); + if (offset > 0) { struct ipv6_tlv_tnl_enc_lim *tel; tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 116b4da06820..63fad2467a7e 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1313,6 +1313,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) fl6.flowlabel = key->label; } else { offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); + /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ + ipv6h = ipv6_hdr(skb); if (offset > 0) { struct ipv6_tlv_tnl_enc_lim *tel; -- GitLab From 32bd4d2ed9d8355edc2263947286c8039c6bf171 Mon Sep 17 00:00:00 2001 From: Balakrishnan Raman Date: Mon, 23 Jan 2017 20:44:33 -0800 Subject: [PATCH 625/786] vxlan: do not age static remote mac entries [ Upstream commit efb5f68f32995c146944a9d4257c3cf8eae2c4a1 ] Mac aging is applicable only for dynamically learnt remote mac entries. Check for user configured static remote mac entries and skip aging. Signed-off-by: Balakrishnan Raman Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/vxlan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 55c4408892be..963e5339a4d7 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2285,7 +2285,7 @@ static void vxlan_cleanup(unsigned long arg) = container_of(p, struct vxlan_fdb, hlist); unsigned long timeout; - if (f->state & NUD_PERMANENT) + if (f->state & (NUD_PERMANENT | NUD_NOARP)) continue; timeout = f->used + vxlan->cfg.age_interval * HZ; -- GitLab From 00f468f51dd5182390b4e859dced75f22e89034e Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Tue, 24 Jan 2017 07:28:41 +0100 Subject: [PATCH 626/786] ibmveth: Add a proper check for the availability of the checksum features [ Upstream commit 23d28a859fb847fd7fcfbd31acb3b160abb5d6ae ] When using the ibmveth driver in a KVM/QEMU based VM, it currently always prints out a scary error message like this when it is started: ibmveth 71000003 (unregistered net_device): unable to change checksum offload settings. 1 rc=-2 ret_attr=71000003 This happens because the driver always tries to enable the checksum offloading without checking for the availability of this feature first. QEMU does not support checksum offloading for the spapr-vlan device, thus we always get the error message here. According to the LoPAPR specification, the "ibm,illan-options" property of the corresponding device tree node should be checked first to see whether the H_ILLAN_ATTRIUBTES hypercall and thus the checksum offloading feature is available. Thus let's do this in the ibmveth driver, too, so that the error message is really only limited to cases where something goes wrong, and does not occur if the feature is just missing. Signed-off-by: Thomas Huth Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/ibm/ibmveth.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 03dca732e4c6..b375ae9f98ef 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1604,8 +1604,11 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) netdev->netdev_ops = &ibmveth_netdev_ops; netdev->ethtool_ops = &netdev_ethtool_ops; SET_NETDEV_DEV(netdev, &dev->dev); - netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + netdev->hw_features = NETIF_F_SG; + if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) { + netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM; + } netdev->features |= netdev->hw_features; -- GitLab From c5c8743642aee21300b99540643549054edbf17f Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Tue, 24 Jan 2017 15:18:29 -0800 Subject: [PATCH 627/786] kernel/panic.c: add missing \n [ Upstream commit ff7a28a074ccbea999dadbb58c46212cf90984c6 ] When a system panics, the "Rebooting in X seconds.." message is never printed because it lacks a new line. Fix it. Link: http://lkml.kernel.org/r/20170119114751.2724-1-jslaby@suse.cz Signed-off-by: Jiri Slaby Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- kernel/panic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/panic.c b/kernel/panic.c index e6480e20379e..dbec387099b1 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -249,7 +249,7 @@ void panic(const char *fmt, ...) * Delay timeout seconds before rebooting the machine. * We can't use the "normal" timers since we just panicked. */ - pr_emerg("Rebooting in %d seconds..", panic_timeout); + pr_emerg("Rebooting in %d seconds..\n", panic_timeout); for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) { touch_nmi_watchdog(); -- GitLab From 2bc8fcd633d8e7d59a242eb4d86fbebb8cf7ff61 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Wed, 25 Jan 2017 09:20:54 +0100 Subject: [PATCH 628/786] Documentation: devicetree: change the mediatek ethernet compatible string [ Upstream commit 61976fff20f92aceecc3670f6168bfc57a79e047 ] When the binding was defined, I was not aware that mt2701 was an earlier version of the SoC. For sake of consistency, the ethernet driver should use mt2701 inside the compat string as this is the earliest SoC with the ethernet core. The ethernet driver is currently of no real use until we finish and upstream the DSA driver. There are no users of this binding yet. It should be safe to fix this now before it is too late and we need to provide backward compatibility for the mt7623-eth compat string. Reported-by: Sean Wang Signed-off-by: John Crispin Reviewed-by: Matthias Brugger Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- Documentation/devicetree/bindings/net/mediatek-net.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt index c010fafc66a8..c7194e87d5f4 100644 --- a/Documentation/devicetree/bindings/net/mediatek-net.txt +++ b/Documentation/devicetree/bindings/net/mediatek-net.txt @@ -7,7 +7,7 @@ have dual GMAC each represented by a child node.. * Ethernet controller node Required properties: -- compatible: Should be "mediatek,mt7623-eth" +- compatible: Should be "mediatek,mt2701-eth" - reg: Address and length of the register set for the device - interrupts: Should contain the three frame engines interrupts in numeric order. These are fe_int0, fe_int1 and fe_int2. -- GitLab From b8c5e7b1241362a131a2364fd166f8c8fdd9b363 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Mon, 12 Dec 2016 16:15:17 +0100 Subject: [PATCH 629/786] drm/etnaviv: trick drm_mm into giving out a low IOVA [ Upstream commit 3546fb0cdac25a79c89d87020566fab52b92867d ] After rollover of the IOVA space, we want to get a low IOVA address, otherwise the the games we play by remembering the last IOVA are pointless. When we search for a free hole with DRM_MM_SEARCH_DEFAULT, drm_mm will pop the next entry from the free holes stack, which will likely be a high IOVA. By using DRM_MM_SEARCH_BELOW we can trick drm_mm into reversing the search and provide us with a low IOVA. Signed-off-by: Lucas Stach Reviewed-by: Wladimir van der Laan Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/etnaviv/etnaviv_mmu.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 169ac96e8f08..fe0e85b41310 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -116,9 +116,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, struct list_head list; bool found; + /* + * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick + * drm_mm into giving out a low IOVA after address space + * rollover. This needs a proper fix. + */ ret = drm_mm_insert_node_in_range(&mmu->mm, node, size, 0, mmu->last_iova, ~0UL, - DRM_MM_SEARCH_DEFAULT); + mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW); if (ret != -ENOSPC) break; -- GitLab From 3a6edbc95ba0df871e1eb72a411c0fa06644785e Mon Sep 17 00:00:00 2001 From: Prarit Bhargava Date: Thu, 5 Jan 2017 10:09:25 -0500 Subject: [PATCH 630/786] perf/x86/intel/uncore: Fix hardcoded socket 0 assumption in the Haswell init code [ Upstream commit 6d6daa20945f3f598e56e18d1f926c08754f5801 ] hswep_uncore_cpu_init() uses a hardcoded physical package id 0 for the boot cpu. This works as long as the boot CPU is actually on the physical package 0, which is normaly the case after power on / reboot. But it fails with a NULL pointer dereference when a kdump kernel is started on a secondary socket which has a different physical package id because the locigal package translation for physical package 0 does not exist. Use the logical package id of the boot cpu instead of hard coded 0. [ tglx: Rewrote changelog once more ] Fixes: cf6d445f6897 ("perf/x86/uncore: Track packages, not per CPU data") Signed-off-by: Prarit Bhargava Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: H. Peter Anvin Cc: Harish Chegondi Cc: Jiri Olsa Cc: Kan Liang Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/1483628965-2890-1-git-send-email-prarit@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/events/intel/uncore_snbep.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 272427700d48..afe8024e9e95 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -2686,7 +2686,7 @@ static struct intel_uncore_type *hswep_msr_uncores[] = { void hswep_uncore_cpu_init(void) { - int pkg = topology_phys_to_logical_pkg(0); + int pkg = boot_cpu_data.logical_proc_id; if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; -- GitLab From 849f2d0665e049c21dbac8c0fa566a8ac04fead5 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 2 Jan 2017 14:07:22 +0200 Subject: [PATCH 631/786] pinctrl: intel: Set pin direction properly [ Upstream commit 17fab473693e8357a9aa6fee4fbed6c13a34bd81 ] There are two bits in the PADCFG0 register to configure direction, one per TX/RX buffers. For now we wrongly assume that the GPIO is always requested before it is being used, which is not true when the GPIO is used through irqchip. In this case the GPIO is never requested and we never enable RX buffer for it. Fix this by setting both bits accordingly. Reported-by: Jarkko Nikula Signed-off-by: Andy Shevchenko Signed-off-by: Linus Walleij Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/pinctrl/intel/pinctrl-intel.c | 30 +++++++++++++++++---------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 01443762e570..b40a074822cf 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -353,6 +353,21 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function, return 0; } +static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input) +{ + u32 value; + + value = readl(padcfg0); + if (input) { + value &= ~PADCFG0_GPIORXDIS; + value |= PADCFG0_GPIOTXDIS; + } else { + value &= ~PADCFG0_GPIOTXDIS; + value |= PADCFG0_GPIORXDIS; + } + writel(value, padcfg0); +} + static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned pin) @@ -375,11 +390,11 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, /* Disable SCI/SMI/NMI generation */ value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI); value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI); - /* Disable TX buffer and enable RX (this will be input) */ - value &= ~PADCFG0_GPIORXDIS; - value |= PADCFG0_GPIOTXDIS; writel(value, padcfg0); + /* Disable TX buffer and enable RX (this will be input) */ + __intel_gpio_set_direction(padcfg0, true); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); return 0; @@ -392,18 +407,11 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev, struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); void __iomem *padcfg0; unsigned long flags; - u32 value; raw_spin_lock_irqsave(&pctrl->lock, flags); padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); - - value = readl(padcfg0); - if (input) - value |= PADCFG0_GPIOTXDIS; - else - value &= ~PADCFG0_GPIOTXDIS; - writel(value, padcfg0); + __intel_gpio_set_direction(padcfg0, input); raw_spin_unlock_irqrestore(&pctrl->lock, flags); -- GitLab From d48cb21fd50bf6bea379ad04dc2baced20cf5275 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 10 Jan 2017 23:13:45 +0000 Subject: [PATCH 632/786] net: phy: marvell: fix Marvell 88E1512 used in SGMII mode [ Upstream commit a13c06525ab9ff442924e67df9393a5efa914c56 ] When an Marvell 88E1512 PHY is connected to a nic in SGMII mode, the fiber page is used for the SGMII host-side connection. The PHY driver notices that SUPPORTED_FIBRE is set, so it tries reading the fiber page for the link status, and ends up reading the MAC-side status instead of the outgoing (copper) link. This leads to incorrect results reported via ethtool. If the PHY is connected via SGMII to the host, ignore the fiber page. However, continue to allow the existing power management code to suspend and resume the fiber page. Fixes: 6cfb3bcc0641 ("Marvell phy: check link status in case of fiber link.") Signed-off-by: Russell King Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/phy/marvell.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 2f70f80de27f..c60c147708c4 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1200,7 +1200,8 @@ static int marvell_read_status(struct phy_device *phydev) int err; /* Check the fiber mode first */ - if (phydev->supported & SUPPORTED_FIBRE) { + if (phydev->supported & SUPPORTED_FIBRE && + phydev->interface != PHY_INTERFACE_MODE_SGMII) { err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER); if (err < 0) goto error; -- GitLab From 6130fac994818eb0fbc9dfc95056292e71fb3791 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 20 Oct 2016 08:52:50 +0200 Subject: [PATCH 633/786] mac80211: recalculate min channel width on VHT opmode changes [ Upstream commit d2941df8fbd9708035d66d889ada4d3d160170ce ] When an associated station changes its VHT operating mode this can/will affect the bandwidth it's using, and consequently we must recalculate the minimum bandwidth we need to use. Failure to do so can lead to one of two scenarios: 1) we use a too high bandwidth, this is benign 2) we use a too narrow bandwidth, causing rate control and actual PHY configuration to be out of sync, which can in turn cause problems/crashes Signed-off-by: Johannes Berg Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/mac80211/iface.c | 21 +++++++++++++++++++++ net/mac80211/rx.c | 9 +-------- net/mac80211/vht.c | 4 +++- 3 files changed, 25 insertions(+), 9 deletions(-) diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 8d7747e98fdb..37bec0f864b7 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -6,6 +6,7 @@ * Copyright (c) 2006 Jiri Benc * Copyright 2008, Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright (c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -1307,6 +1308,26 @@ static void ieee80211_iface_work(struct work_struct *work) } else if (ieee80211_is_action(mgmt->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_VHT) { switch (mgmt->u.action.u.vht_group_notif.action_code) { + case WLAN_VHT_ACTION_OPMODE_NOTIF: { + struct ieee80211_rx_status *status; + enum nl80211_band band; + u8 opmode; + + status = IEEE80211_SKB_RXCB(skb); + band = status->band; + opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode; + + mutex_lock(&local->sta_mtx); + sta = sta_info_get_bss(sdata, mgmt->sa); + + if (sta) + ieee80211_vht_handle_opmode(sdata, sta, + opmode, + band); + + mutex_unlock(&local->sta_mtx); + break; + } case WLAN_VHT_ACTION_GROUPID_MGMT: ieee80211_process_mu_groups(sdata, mgmt); break; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index c45a0fcfb3e7..439e597fd374 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2923,17 +2923,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) switch (mgmt->u.action.u.vht_opmode_notif.action_code) { case WLAN_VHT_ACTION_OPMODE_NOTIF: { - u8 opmode; - /* verify opmode is present */ if (len < IEEE80211_MIN_ACTION_SIZE + 2) goto invalid; - - opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode; - - ieee80211_vht_handle_opmode(rx->sdata, rx->sta, - opmode, status->band); - goto handled; + goto queue; } case WLAN_VHT_ACTION_GROUPID_MGMT: { if (len < IEEE80211_MIN_ACTION_SIZE + 25) diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index 6832bf6ab69f..43e45bb660bc 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c @@ -527,8 +527,10 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, band); - if (changed > 0) + if (changed > 0) { + ieee80211_recalc_min_chandef(sdata); rate_control_rate_update(local, sband, sta, changed); + } } void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, -- GitLab From c78b8de5c05c73ff451b7c5a085766b421920ccd Mon Sep 17 00:00:00 2001 From: Colin King Date: Wed, 11 Jan 2017 11:43:10 +0000 Subject: [PATCH 634/786] perf/x86/intel: Use ULL constant to prevent undefined shift behaviour [ Upstream commit ad5013d5699d30ded0cdbbc68b93b2aa28222c6e ] When x86_pmu.num_counters is 32 the shift of the integer constant 1 is exceeding 32bit and therefor undefined behaviour. Fix this by shifting 1ULL instead of 1. Reported-by: CoverityScan CID#1192105 ("Bad bit shift operation") Signed-off-by: Colin Ian King Cc: Andi Kleen Cc: Peter Zijlstra Cc: Kan Liang Cc: Stephane Eranian Cc: Alexander Shishkin Link: http://lkml.kernel.org/r/20170111114310.17928-1-colin.king@canonical.com Signed-off-by: Thomas Gleixner Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/events/intel/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 24a6cd24fac4..f0f197f459b5 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3978,7 +3978,7 @@ __init int intel_pmu_init(void) x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC); x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC; } - x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; + x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1; if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) { WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", -- GitLab From 582c1ca0ea1d13a9e2912c5a7530f0728b3c3d1c Mon Sep 17 00:00:00 2001 From: Brendan McGrath Date: Sat, 7 Jan 2017 08:01:38 +1100 Subject: [PATCH 635/786] HID: i2c-hid: Add sleep between POWER ON and RESET [ Upstream commit a89af4abdf9b353cdd6f61afc0eaaac403304873 ] Support for the Asus Touchpad was recently added. It turns out this device can fail initialisation (and become unusable) when the RESET command is sent too soon after the POWER ON command. Unfortunately the i2c-hid specification does not specify the need for a delay between these two commands. But it was discovered the Windows driver has a 1ms delay. As a result, this patch modifies the i2c-hid module to add a sleep inbetween the POWER ON and RESET commands which lasts between 1ms and 5ms. See https://github.com/vlasenko/hid-asus-dkms/issues/24 for further details. Signed-off-by: Brendan McGrath Reviewed-by: Benjamin Tissoires Signed-off-by: Jiri Kosina Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/hid/i2c-hid/i2c-hid.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index b1bce804fe97..8008e06b7efe 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -427,6 +427,15 @@ static int i2c_hid_hwreset(struct i2c_client *client) if (ret) goto out_unlock; + /* + * The HID over I2C specification states that if a DEVICE needs time + * after the PWR_ON request, it should utilise CLOCK stretching. + * However, it has been observered that the Windows driver provides a + * 1ms sleep between the PWR_ON and RESET requests and that some devices + * rely on this. + */ + usleep_range(1000, 5000); + i2c_hid_dbg(ihid, "resetting...\n"); ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0); -- GitLab From c32462d0b5232712f8a2a1d6cedb731115ba6f7b Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Wed, 11 Jan 2017 11:06:42 +0100 Subject: [PATCH 636/786] scsi: lpfc: avoid double free of resource identifiers [ Upstream commit cd60be4916ae689387d04b86b6fc15931e4c95ae ] Set variables initialized in lpfc_sli4_alloc_resource_identifiers() to NULL if an error occurred. Otherwise, lpfc_sli4_driver_resource_unset() attempts to free the memory again. Signed-off-by: Roberto Sassu Signed-off-by: Johannes Thumshirn Acked-by: James Smart Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/lpfc/lpfc_sli.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 49b4c798de18..2d4f4b58dcfa 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -5951,18 +5951,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) free_vfi_bmask: kfree(phba->sli4_hba.vfi_bmask); + phba->sli4_hba.vfi_bmask = NULL; free_xri_ids: kfree(phba->sli4_hba.xri_ids); + phba->sli4_hba.xri_ids = NULL; free_xri_bmask: kfree(phba->sli4_hba.xri_bmask); + phba->sli4_hba.xri_bmask = NULL; free_vpi_ids: kfree(phba->vpi_ids); + phba->vpi_ids = NULL; free_vpi_bmask: kfree(phba->vpi_bmask); + phba->vpi_bmask = NULL; free_rpi_ids: kfree(phba->sli4_hba.rpi_ids); + phba->sli4_hba.rpi_ids = NULL; free_rpi_bmask: kfree(phba->sli4_hba.rpi_bmask); + phba->sli4_hba.rpi_bmask = NULL; err_exit: return rc; } -- GitLab From aabb797b4c1204b2e8518538b2616e476f2bac92 Mon Sep 17 00:00:00 2001 From: Kevin Hilman Date: Wed, 11 Jan 2017 18:18:40 -0800 Subject: [PATCH 637/786] spi: davinci: use dma_mapping_error() [ Upstream commit c5a2a394835f473ae23931eda5066d3771d7b2f8 ] The correct error checking for dma_map_single() is to use dma_mapping_error(). Signed-off-by: Kevin Hilman Signed-off-by: Mark Brown Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/spi/spi-davinci.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index d36c11b73a35..02fb96797ac8 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -646,7 +646,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) buf = t->rx_buf; t->rx_dma = dma_map_single(&spi->dev, buf, t->len, DMA_FROM_DEVICE); - if (!t->rx_dma) { + if (dma_mapping_error(&spi->dev, !t->rx_dma)) { ret = -EFAULT; goto err_rx_map; } @@ -660,7 +660,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) buf = (void *)t->tx_buf; t->tx_dma = dma_map_single(&spi->dev, buf, t->len, DMA_TO_DEVICE); - if (!t->tx_dma) { + if (dma_mapping_error(&spi->dev, t->tx_dma)) { ret = -EFAULT; goto err_tx_map; } -- GitLab From f88f06e1831878ecdd5fa78090a45ea8ff77f38f Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Wed, 11 Jan 2017 14:54:53 +0000 Subject: [PATCH 638/786] arm64: assembler: make adr_l work in modules under KASLR [ Upstream commit 41c066f2c4d436c535616fe182331766c57838f0 ] When CONFIG_RANDOMIZE_MODULE_REGION_FULL=y, the offset between loaded modules and the core kernel may exceed 4 GB, putting symbols exported by the core kernel out of the reach of the ordinary adrp/add instruction pairs used to generate relative symbol references. So make the adr_l macro emit a movz/movk sequence instead when executing in module context. While at it, remove the pointless special case for the stack pointer. Acked-by: Mark Rutland Acked-by: Will Deacon Signed-off-by: Ard Biesheuvel Signed-off-by: Catalin Marinas Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/assembler.h | 36 ++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 9 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 28bfe6132eb6..851290d2bfe3 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -155,22 +155,25 @@ lr .req x30 // link register /* * Pseudo-ops for PC-relative adr/ldr/str , where - * is within the range +/- 4 GB of the PC. + * is within the range +/- 4 GB of the PC when running + * in core kernel context. In module context, a movz/movk sequence + * is used, since modules may be loaded far away from the kernel + * when KASLR is in effect. */ /* * @dst: destination register (64 bit wide) * @sym: name of the symbol - * @tmp: optional scratch register to be used if == sp, which - * is not allowed in an adrp instruction */ - .macro adr_l, dst, sym, tmp= - .ifb \tmp + .macro adr_l, dst, sym +#ifndef MODULE adrp \dst, \sym add \dst, \dst, :lo12:\sym - .else - adrp \tmp, \sym - add \dst, \tmp, :lo12:\sym - .endif +#else + movz \dst, #:abs_g3:\sym + movk \dst, #:abs_g2_nc:\sym + movk \dst, #:abs_g1_nc:\sym + movk \dst, #:abs_g0_nc:\sym +#endif .endm /* @@ -181,6 +184,7 @@ lr .req x30 // link register * the address */ .macro ldr_l, dst, sym, tmp= +#ifndef MODULE .ifb \tmp adrp \dst, \sym ldr \dst, [\dst, :lo12:\sym] @@ -188,6 +192,15 @@ lr .req x30 // link register adrp \tmp, \sym ldr \dst, [\tmp, :lo12:\sym] .endif +#else + .ifb \tmp + adr_l \dst, \sym + ldr \dst, [\dst] + .else + adr_l \tmp, \sym + ldr \dst, [\tmp] + .endif +#endif .endm /* @@ -197,8 +210,13 @@ lr .req x30 // link register * while needs to be preserved. */ .macro str_l, src, sym, tmp +#ifndef MODULE adrp \tmp, \sym str \src, [\tmp, :lo12:\sym] +#else + adr_l \tmp, \sym + str \src, [\tmp] +#endif .endm /* -- GitLab From 4ae8dc6acb710419c8766c290b7fb5eac2f1ed68 Mon Sep 17 00:00:00 2001 From: Vadim Lomovtsev Date: Thu, 12 Jan 2017 07:28:06 -0800 Subject: [PATCH 639/786] net: thunderx: acpi: fix LMAC initialization [ Upstream commit 7aa4865506a26c607e00bd9794a85785b55ebca7 ] While probing BGX we requesting appropriate QLM for it's configuration and get LMAC count by that request. Then, while reading configured MAC values from SSDT table we need to save them in proper mapping: BGX[i]->lmac[j].mac = to later provide for initialization stuff. In order to fill such mapping properly we need to add lmac index to be used while acpi initialization since at this moment bgx->lmac_count already contains actual value. Signed-off-by: Vadim Lomovtsev Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 679679a4ccb2..e858b1af788d 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -48,8 +48,9 @@ struct lmac { struct bgx { u8 bgx_id; struct lmac lmac[MAX_LMAC_PER_BGX]; - int lmac_count; + u8 lmac_count; u8 max_lmac; + u8 acpi_lmac_idx; void __iomem *reg_base; struct pci_dev *pdev; bool is_dlm; @@ -1159,13 +1160,13 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle, if (acpi_bus_get_device(handle, &adev)) goto out; - acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac); + acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac); - SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev); + SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev); - bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count; + bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx; + bgx->acpi_lmac_idx++; /* move to next LMAC */ out: - bgx->lmac_count++; return AE_OK; } -- GitLab From 77e82094a3c9d3ca8308a48a4b11037c6234a262 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 20 Dec 2016 16:35:50 -0500 Subject: [PATCH 640/786] drm/radeon/si: load special ucode for certain MC configs [ Upstream commit ef736d394e85b1bf1fd65ba5e5257b85f6c82325 ] Special MC ucode is required for these memory configurations. Acked-by: Edward O'Callaghan Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/radeon/si.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 3333e8a45933..b75d809c292e 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -115,6 +115,8 @@ MODULE_FIRMWARE("radeon/hainan_rlc.bin"); MODULE_FIRMWARE("radeon/hainan_smc.bin"); MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); +MODULE_FIRMWARE("radeon/si58_mc.bin"); + static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); static void si_pcie_gen3_enable(struct radeon_device *rdev); static void si_program_aspm(struct radeon_device *rdev); @@ -1650,6 +1652,7 @@ static int si_init_microcode(struct radeon_device *rdev) int err; int new_fw = 0; bool new_smc = false; + bool si58_fw = false; DRM_DEBUG("\n"); @@ -1742,6 +1745,10 @@ static int si_init_microcode(struct radeon_device *rdev) default: BUG(); } + /* this memory configuration requires special firmware */ + if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58) + si58_fw = true; + DRM_INFO("Loading %s Microcode\n", new_chip_name); snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name); @@ -1845,7 +1852,10 @@ static int si_init_microcode(struct radeon_device *rdev) } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); + if (si58_fw) + snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin"); + else + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name); err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev); if (err) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name); -- GitLab From f275ac7fc5d2b6013980864f14d1ced016211349 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 10 Jan 2017 19:26:49 +0800 Subject: [PATCH 641/786] drm/amd/powerplay: fix vce cg logic error on CZ/St. [ Upstream commit 3731d12dce83d47b357753ffc450ce03f1b49688 ] can fix Bug 191281: vce ib test failed. when vce idle, set vce clock gate, so the clock in vce domain will be disabled. when need to encode, disable vce clock gate, enable the clocks to vce engine. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index 2028980f1ed4..5b261c1dcc9f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c @@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) cgs_set_clockgating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, - AMD_CG_STATE_UNGATE); + AMD_CG_STATE_GATE); cgs_set_powergating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, @@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) cgs_set_clockgating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); + AMD_PG_STATE_UNGATE); cz_dpm_update_vce_dpm(hwmgr); cz_enable_disable_vce_dpm(hwmgr, true); return 0; -- GitLab From 25319ae8e8a72a3fcdac7c964d267ca3c4e7c0a0 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 10 Jan 2017 15:47:50 +0800 Subject: [PATCH 642/786] drm/amd/powerplay: refine vce dpm update code on Cz. [ Upstream commit ab8db87b8256e13a62f10af1d32f5fc233c398cc ] Program HardMin based on the vce_arbiter.ecclk if ecclk is 0, disable ECLK DPM 0. Otherwise VCE could hang if switching SCLK from DPM 0 to 6/7 Signed-off-by: Rex Zhu Acked-by: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- .../gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | 24 ++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 960424913496..189ec94c6ff9 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1402,14 +1402,22 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr) cz_hwmgr->vce_dpm.hard_min_clk, PPSMC_MSG_SetEclkHardMin)); } else { - /*EPR# 419220 -HW limitation to to */ - cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, - PPSMC_MSG_SetEclkHardMin, - cz_get_eclk_level(hwmgr, - cz_hwmgr->vce_dpm.hard_min_clk, - PPSMC_MSG_SetEclkHardMin)); - + /*Program HardMin based on the vce_arbiter.ecclk */ + if (hwmgr->vce_arbiter.ecclk == 0) { + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetEclkHardMin, 0); + /* disable ECLK DPM 0. Otherwise VCE could hang if + * switching SCLK from DPM 0 to 6/7 */ + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetEclkSoftMin, 1); + } else { + cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk; + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetEclkHardMin, + cz_get_eclk_level(hwmgr, + cz_hwmgr->vce_dpm.hard_min_clk, + PPSMC_MSG_SetEclkHardMin)); + } } return 0; } -- GitLab From 8eaaf66d41adf7b9b31486f03d93de3a1013e28d Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Thu, 5 Jan 2017 10:05:46 +0000 Subject: [PATCH 643/786] pmem: return EIO on read_pmem() failure [ Upstream commit d47d1d27fd6206c18806440f6ebddf51a806be4f ] The read_pmem() function uses memcpy_mcsafe() on x86 where an EFAULT error code indicates a failed read. Block I/O should use EIO to indicate failure. Other pmem code paths (like bad blocks) already use EIO so let's be consistent. This fixes compatibility with consumers like btrfs that try to parse the specific error code rather than treat all errors the same. Reviewed-by: Jeff Moyer Signed-off-by: Stefan Hajnoczi Signed-off-by: Dan Williams Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/nvdimm/pmem.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index b4808590870c..3456f532077c 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -87,7 +87,9 @@ static int read_pmem(struct page *page, unsigned int off, rc = memcpy_from_pmem(mem + off, pmem_addr, len); kunmap_atomic(mem); - return rc; + if (rc) + return -EIO; + return 0; } static int pmem_do_bvec(struct pmem_device *pmem, struct page *page, -- GitLab From 6baa8c92dab9a43f0b363f1b7d7bd269d5efcf8d Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Fri, 13 Jan 2017 11:28:25 +0100 Subject: [PATCH 644/786] mac80211: initialize SMPS field in HT capabilities [ Upstream commit 43071d8fb3b7f589d72663c496a6880fb097533c ] ibss and mesh modes copy the ht capabilites from the band without overriding the SMPS state. Unfortunately the default value 0 for the SMPS field means static SMPS instead of disabled. This results in HT ibss and mesh setups using only single-stream rates, even though SMPS is not supposed to be active. Initialize SMPS to disabled for all bands on ieee80211_hw_register to ensure that the value is sane where it is not overriden with the real SMPS state. Reported-by: Elektra Wagenrad Signed-off-by: Felix Fietkau [move VHT TODO comment to a better place] Signed-off-by: Johannes Berg Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/mac80211/main.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 1075ac24c8c5..2bb6899854d4 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -908,12 +908,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) supp_ht = supp_ht || sband->ht_cap.ht_supported; supp_vht = supp_vht || sband->vht_cap.vht_supported; - if (sband->ht_cap.ht_supported) - local->rx_chains = - max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs), - local->rx_chains); + if (!sband->ht_cap.ht_supported) + continue; /* TODO: consider VHT for RX chains, hopefully it's the same */ + local->rx_chains = + max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs), + local->rx_chains); + + /* no need to mask, SM_PS_DISABLED has all bits set */ + sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED << + IEEE80211_HT_CAP_SM_PS_SHIFT; } /* if low-level driver supports AP, we also support VLAN */ -- GitLab From 283994074501393b67590220ec8015f60ee670a8 Mon Sep 17 00:00:00 2001 From: Len Brown Date: Fri, 13 Jan 2017 01:11:18 -0500 Subject: [PATCH 645/786] x86/tsc: Add the Intel Denverton Processor to native_calibrate_tsc() [ Upstream commit 695085b4bc7603551db0b3da897b8bf9893ca218 ] The Intel Denverton microserver uses a 25 MHz TSC crystal, so we can derive its exact [*] TSC frequency using CPUID and some arithmetic, eg.: TSC: 1800 MHz (25000000 Hz * 216 / 3 / 1000000) [*] 'exact' is only as good as the crystal, which should be +/- 20ppm Signed-off-by: Len Brown Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/306899f94804aece6d8fa8b4223ede3b48dbb59c.1484287748.git.len.brown@intel.com Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/tsc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index eea88fe5d969..6e57edf33d75 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -694,6 +694,7 @@ unsigned long native_calibrate_tsc(void) crystal_khz = 24000; /* 24.0 MHz */ break; case INTEL_FAM6_SKYLAKE_X: + case INTEL_FAM6_ATOM_DENVERTON: crystal_khz = 25000; /* 25.0 MHz */ break; case INTEL_FAM6_ATOM_GOLDMONT: -- GitLab From 48131dd0f2b19dd297147c23dc634432fecee638 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Thu, 12 Jan 2017 16:53:11 +0100 Subject: [PATCH 646/786] x86/mpx: Use compatible types in comparison to fix sparse error [ Upstream commit 453828625731d0ba7218242ef6ec88f59408f368 ] info->si_addr is of type void __user *, so it should be compared against something from the same address space. This fixes the following sparse error: arch/x86/mm/mpx.c:296:27: error: incompatible types in comparison expression (different address spaces) Signed-off-by: Tobias Klauser Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/mpx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index e4f800999b32..3e7c489e1f55 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -293,7 +293,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs) * We were not able to extract an address from the instruction, * probably because there was something invalid in it. */ - if (info->si_addr == (void *)-1) { + if (info->si_addr == (void __user *)-1) { err = -EINVAL; goto err_out; } -- GitLab From 1c68633329d230dc350bc8c521689be4703f6016 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 9 Dec 2016 14:59:00 +0100 Subject: [PATCH 647/786] perf/core: Fix sys_perf_event_open() vs. hotplug [ Upstream commit 63cae12bce9861cec309798d34701cf3da20bc71 ] There is problem with installing an event in a task that is 'stuck' on an offline CPU. Blocked tasks are not dis-assosciated from offlined CPUs, after all, a blocked task doesn't run and doesn't require a CPU etc.. Only on wakeup do we ammend the situation and place the task on a available CPU. If we hit such a task with perf_install_in_context() we'll loop until either that task wakes up or the CPU comes back online, if the task waking depends on the event being installed, we're stuck. While looking into this issue, I also spotted another problem, if we hit a task with perf_install_in_context() that is in the middle of being migrated, that is we observe the old CPU before sending the IPI, but run the IPI (on the old CPU) while the task is already running on the new CPU, things also go sideways. Rework things to rely on task_curr() -- outside of rq->lock -- which is rather tricky. Imagine the following scenario where we're trying to install the first event into our task 't': CPU0 CPU1 CPU2 (current == t) t->perf_event_ctxp[] = ctx; smp_mb(); cpu = task_cpu(t); switch(t, n); migrate(t, 2); switch(p, t); ctx = t->perf_event_ctxp[]; // must not be NULL smp_function_call(cpu, ..); generic_exec_single() func(); spin_lock(ctx->lock); if (task_curr(t)) // false add_event_to_ctx(); spin_unlock(ctx->lock); perf_event_context_sched_in(); spin_lock(ctx->lock); // sees event So its CPU0's store of t->perf_event_ctxp[] that must not go 'missing'. Because if CPU2's load of that variable were to observe NULL, it would not try to schedule the ctx and we'd have a task running without its counter, which would be 'bad'. As long as we observe !NULL, we'll acquire ctx->lock. If we acquire it first and not see the event yet, then CPU0 must observe task_curr() and retry. If the install happens first, then we must see the event on sched-in and all is well. I think we can translate the first part (until the 'must not be NULL') of the scenario to a litmus test like: C C-peterz { } P0(int *x, int *y) { int r1; WRITE_ONCE(*x, 1); smp_mb(); r1 = READ_ONCE(*y); } P1(int *y, int *z) { WRITE_ONCE(*y, 1); smp_store_release(z, 1); } P2(int *x, int *z) { int r1; int r2; r1 = smp_load_acquire(z); smp_mb(); r2 = READ_ONCE(*x); } exists (0:r1=0 /\ 2:r1=1 /\ 2:r2=0) Where: x is perf_event_ctxp[], y is our tasks's CPU, and z is our task being placed on the rq of CPU2. The P0 smp_mb() is the one added by this patch, ordering the store to perf_event_ctxp[] from find_get_context() and the load of task_cpu() in task_function_call(). The smp_store_release/smp_load_acquire model the RCpc locking of the rq->lock and the smp_mb() of P2 is the context switch switching from whatever CPU2 was running to our task 't'. This litmus test evaluates into: Test C-peterz Allowed States 7 0:r1=0; 2:r1=0; 2:r2=0; 0:r1=0; 2:r1=0; 2:r2=1; 0:r1=0; 2:r1=1; 2:r2=1; 0:r1=1; 2:r1=0; 2:r2=0; 0:r1=1; 2:r1=0; 2:r2=1; 0:r1=1; 2:r1=1; 2:r2=0; 0:r1=1; 2:r1=1; 2:r2=1; No Witnesses Positive: 0 Negative: 7 Condition exists (0:r1=0 /\ 2:r1=1 /\ 2:r2=0) Observation C-peterz Never 0 7 Hash=e427f41d9146b2a5445101d3e2fcaa34 And the strong and weak model agree. Reported-by: Mark Rutland Tested-by: Mark Rutland Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sebastian Andrzej Siewior Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: Will Deacon Cc: jeremy.linton@arm.com Link: http://lkml.kernel.org/r/20161209135900.GU3174@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- kernel/events/core.c | 70 ++++++++++++++++++++++++++++++-------------- 1 file changed, 48 insertions(+), 22 deletions(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index 11cc1d83c770..30ccc7029d18 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2272,7 +2272,7 @@ static int __perf_install_in_context(void *info) struct perf_event_context *ctx = event->ctx; struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); struct perf_event_context *task_ctx = cpuctx->task_ctx; - bool activate = true; + bool reprogram = true; int ret = 0; raw_spin_lock(&cpuctx->ctx.lock); @@ -2280,27 +2280,26 @@ static int __perf_install_in_context(void *info) raw_spin_lock(&ctx->lock); task_ctx = ctx; - /* If we're on the wrong CPU, try again */ - if (task_cpu(ctx->task) != smp_processor_id()) { - ret = -ESRCH; - goto unlock; - } + reprogram = (ctx->task == current); /* - * If we're on the right CPU, see if the task we target is - * current, if not we don't have to activate the ctx, a future - * context switch will do that for us. + * If the task is running, it must be running on this CPU, + * otherwise we cannot reprogram things. + * + * If its not running, we don't care, ctx->lock will + * serialize against it becoming runnable. */ - if (ctx->task != current) - activate = false; - else - WARN_ON_ONCE(cpuctx->task_ctx && cpuctx->task_ctx != ctx); + if (task_curr(ctx->task) && !reprogram) { + ret = -ESRCH; + goto unlock; + } + WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx); } else if (task_ctx) { raw_spin_lock(&task_ctx->lock); } - if (activate) { + if (reprogram) { ctx_sched_out(ctx, cpuctx, EVENT_TIME); add_event_to_ctx(event, ctx); ctx_resched(cpuctx, task_ctx); @@ -2351,13 +2350,36 @@ perf_install_in_context(struct perf_event_context *ctx, /* * Installing events is tricky because we cannot rely on ctx->is_active * to be set in case this is the nr_events 0 -> 1 transition. + * + * Instead we use task_curr(), which tells us if the task is running. + * However, since we use task_curr() outside of rq::lock, we can race + * against the actual state. This means the result can be wrong. + * + * If we get a false positive, we retry, this is harmless. + * + * If we get a false negative, things are complicated. If we are after + * perf_event_context_sched_in() ctx::lock will serialize us, and the + * value must be correct. If we're before, it doesn't matter since + * perf_event_context_sched_in() will program the counter. + * + * However, this hinges on the remote context switch having observed + * our task->perf_event_ctxp[] store, such that it will in fact take + * ctx::lock in perf_event_context_sched_in(). + * + * We do this by task_function_call(), if the IPI fails to hit the task + * we know any future context switch of task must see the + * perf_event_ctpx[] store. */ -again: + /* - * Cannot use task_function_call() because we need to run on the task's - * CPU regardless of whether its current or not. + * This smp_mb() orders the task->perf_event_ctxp[] store with the + * task_cpu() load, such that if the IPI then does not find the task + * running, a future context switch of that task must observe the + * store. */ - if (!cpu_function_call(task_cpu(task), __perf_install_in_context, event)) + smp_mb(); +again: + if (!task_function_call(task, __perf_install_in_context, event)) return; raw_spin_lock_irq(&ctx->lock); @@ -2371,12 +2393,16 @@ perf_install_in_context(struct perf_event_context *ctx, raw_spin_unlock_irq(&ctx->lock); return; } - raw_spin_unlock_irq(&ctx->lock); /* - * Since !ctx->is_active doesn't mean anything, we must IPI - * unconditionally. + * If the task is not running, ctx->lock will avoid it becoming so, + * thus we can safely install the event. */ - goto again; + if (task_curr(task)) { + raw_spin_unlock_irq(&ctx->lock); + goto again; + } + add_event_to_ctx(event, ctx); + raw_spin_unlock_irq(&ctx->lock); } /* -- GitLab From 82835fb33ce54820206c14580eb1a149c473c50c Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 3 Jan 2017 15:24:54 +0100 Subject: [PATCH 648/786] perf/x86: Reject non sampling events with precise_ip [ Upstream commit 18e7a45af91acdde99d3aa1372cc40e1f8142f7b ] As Peter suggested [1] rejecting non sampling PEBS events, because they dont make any sense and could cause bugs in the NMI handler [2]. [1] http://lkml.kernel.org/r/20170103094059.GC3093@worktop [2] http://lkml.kernel.org/r/1482931866-6018-3-git-send-email-jolsa@kernel.org Signed-off-by: Jiri Olsa Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: Vince Weaver Link: http://lkml.kernel.org/r/20170103142454.GA26251@krava Signed-off-by: Ingo Molnar Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/events/core.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 38623e219816..9604b2574d6c 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -505,6 +505,10 @@ int x86_pmu_hw_config(struct perf_event *event) if (event->attr.precise_ip > precise) return -EOPNOTSUPP; + + /* There's no sense in having PEBS for non sampling events: */ + if (!is_sampling_event(event)) + return -EINVAL; } /* * check that PEBS LBR correction does not conflict with -- GitLab From d21816c24591060a0af9fd258f85a1e5c04fba0f Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Tue, 13 Dec 2016 12:09:56 -0800 Subject: [PATCH 649/786] aio: fix lock dep warning [ Upstream commit a12f1ae61c489076a9aeb90bddca7722bf330df3 ] lockdep reports a warnning. file_start_write/file_end_write only acquire/release the lock for regular files. So checking the files in aio side too. [ 453.532141] ------------[ cut here ]------------ [ 453.533011] WARNING: CPU: 1 PID: 1298 at ../kernel/locking/lockdep.c:3514 lock_release+0x434/0x670 [ 453.533011] DEBUG_LOCKS_WARN_ON(depth <= 0) [ 453.533011] Modules linked in: [ 453.533011] CPU: 1 PID: 1298 Comm: fio Not tainted 4.9.0+ #964 [ 453.533011] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.9.0-1.fc24 04/01/2014 [ 453.533011] ffff8803a24b7a70 ffffffff8196cffb ffff8803a24b7ae8 0000000000000000 [ 453.533011] ffff8803a24b7ab8 ffffffff81091ee1 ffff8803a5dba700 00000dba00000008 [ 453.533011] ffffed0074496f59 ffff8803a5dbaf54 ffff8803ae0f8488 fffffffffffffdef [ 453.533011] Call Trace: [ 453.533011] [] dump_stack+0x67/0x9c [ 453.533011] [] __warn+0x111/0x130 [ 453.533011] [] warn_slowpath_fmt+0x97/0xb0 [ 453.533011] [] ? __warn+0x130/0x130 [ 453.533011] [] ? blk_finish_plug+0x29/0x60 [ 453.533011] [] lock_release+0x434/0x670 [ 453.533011] [] ? import_single_range+0xd4/0x110 [ 453.533011] [] ? rw_verify_area+0x65/0x140 [ 453.533011] [] ? aio_write+0x1f6/0x280 [ 453.533011] [] aio_write+0x229/0x280 [ 453.533011] [] ? aio_complete+0x640/0x640 [ 453.533011] [] ? debug_check_no_locks_freed+0x1a0/0x1a0 [ 453.533011] [] ? debug_lockdep_rcu_enabled.part.2+0x1a/0x30 [ 453.533011] [] ? debug_lockdep_rcu_enabled+0x35/0x40 [ 453.533011] [] ? __might_fault+0x7e/0xf0 [ 453.533011] [] do_io_submit+0x94c/0xb10 [ 453.533011] [] ? do_io_submit+0x23e/0xb10 [ 453.533011] [] ? SyS_io_destroy+0x270/0x270 [ 453.533011] [] ? mark_held_locks+0x23/0xc0 [ 453.533011] [] ? trace_hardirqs_on_thunk+0x1a/0x1c [ 453.533011] [] SyS_io_submit+0x10/0x20 [ 453.533011] [] entry_SYSCALL_64_fastpath+0x18/0xad [ 453.533011] [] ? trace_hardirqs_off_caller+0xc0/0x110 [ 453.533011] ---[ end trace b2fbe664d1cc0082 ]--- Cc: Dmitry Monakhov Cc: Jan Kara Cc: Christoph Hellwig Cc: Al Viro Reviewed-by: Christoph Hellwig Signed-off-by: Shaohua Li Signed-off-by: Al Viro Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/aio.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/aio.c b/fs/aio.c index 428484f2f841..0fcb49ad67d4 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -1085,7 +1085,8 @@ static void aio_complete(struct kiocb *kiocb, long res, long res2) * Tell lockdep we inherited freeze protection from submission * thread. */ - __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE); + if (S_ISREG(file_inode(file)->i_mode)) + __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE); file_end_write(file); } @@ -1492,7 +1493,8 @@ static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored, * by telling it the lock got released so that it doesn't * complain about held lock when we return to userspace. */ - __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE); + if (S_ISREG(file_inode(file)->i_mode)) + __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE); } kfree(iovec); return ret; -- GitLab From 68a5dc38573586ad47befe5b91c62d7c2cb8141d Mon Sep 17 00:00:00 2001 From: Dave Kleikamp Date: Wed, 11 Jan 2017 13:25:00 -0600 Subject: [PATCH 650/786] coredump: Ensure proper size of sparse core files [ Upstream commit 4d22c75d4c7b5c5f4bd31054f09103ee490878fd ] If the last section of a core file ends with an unmapped or zero page, the size of the file does not correspond with the last dump_skip() call. gdb complains that the file is truncated and can be confusing to users. After all of the vma sections are written, make sure that the file size is no smaller than the current file position. This problem can be demonstrated with gdb's bigcore testcase on the sparc architecture. Signed-off-by: Dave Kleikamp Cc: Alexander Viro Cc: linux-fsdevel@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Al Viro Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/binfmt_elf.c | 1 + fs/coredump.c | 18 ++++++++++++++++++ include/linux/coredump.h | 1 + 3 files changed, 20 insertions(+) diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 2472af2798c7..cfd724f98332 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -2296,6 +2296,7 @@ static int elf_core_dump(struct coredump_params *cprm) goto end_coredump; } } + dump_truncate(cprm); if (!elf_core_write_extra_data(cprm)) goto end_coredump; diff --git a/fs/coredump.c b/fs/coredump.c index eb9c92c9b20f..4407e27beca9 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -833,3 +833,21 @@ int dump_align(struct coredump_params *cprm, int align) return mod ? dump_skip(cprm, align - mod) : 1; } EXPORT_SYMBOL(dump_align); + +/* + * Ensures that file size is big enough to contain the current file + * postion. This prevents gdb from complaining about a truncated file + * if the last "write" to the file was dump_skip. + */ +void dump_truncate(struct coredump_params *cprm) +{ + struct file *file = cprm->file; + loff_t offset; + + if (file->f_op->llseek && file->f_op->llseek != no_llseek) { + offset = file->f_op->llseek(file, 0, SEEK_CUR); + if (i_size_read(file->f_mapping->host) < offset) + do_truncate(file->f_path.dentry, offset, 0, file); + } +} +EXPORT_SYMBOL(dump_truncate); diff --git a/include/linux/coredump.h b/include/linux/coredump.h index d016a121a8c4..28ffa94aed6b 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -14,6 +14,7 @@ struct coredump_params; extern int dump_skip(struct coredump_params *cprm, size_t nr); extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr); extern int dump_align(struct coredump_params *cprm, int align); +extern void dump_truncate(struct coredump_params *cprm); #ifdef CONFIG_COREDUMP extern void do_coredump(const siginfo_t *siginfo); #else -- GitLab From 9d00195bc0afa0252b9cdb157eb4ed1e13631bc6 Mon Sep 17 00:00:00 2001 From: Nikita Yushchenko Date: Wed, 11 Jan 2017 21:56:31 +0300 Subject: [PATCH 651/786] swiotlb: ensure that page-sized mappings are page-aligned [ Upstream commit 602d9858f07c72eab64f5f00e2fae55f9902cfbe ] Some drivers do depend on page mappings to be page aligned. Swiotlb already enforces such alignment for mappings greater than page, extend that to page-sized mappings as well. Without this fix, nvme hits BUG() in nvme_setup_prps(), because that routine assumes page-aligned mappings. Signed-off-by: Nikita Yushchenko Reviewed-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- lib/swiotlb.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/swiotlb.c b/lib/swiotlb.c index ad1d2962d129..b7812df04437 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -456,11 +456,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); /* - * For mappings greater than a page, we limit the stride (and - * hence alignment) to a page size. + * For mappings greater than or equal to a page, we limit the stride + * (and hence alignment) to a page size. */ nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; - if (size > PAGE_SIZE) + if (size >= PAGE_SIZE) stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); else stride = 1; -- GitLab From 0e9867b7113c56b367f2e753cd411cf7cef0d2ec Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 28 Dec 2016 11:33:48 +0100 Subject: [PATCH 652/786] s390/ctl_reg: make __ctl_load a full memory barrier [ Upstream commit e991c24d68b8c0ba297eeb7af80b1e398e98c33f ] We have quite a lot of code that depends on the order of the __ctl_load inline assemby and subsequent memory accesses, like e.g. disabling lowcore protection and the writing to lowcore. Since the __ctl_load macro does not have memory barrier semantics, nor any other dependencies the compiler is, theoretically, free to shuffle code around. Or in other words: storing to lowcore could happen before lowcore protection is disabled. In order to avoid this class of potential bugs simply add a full memory barrier to the __ctl_load macro. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/s390/include/asm/ctl_reg.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index d7697ab802f6..8e136b88cdf4 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h @@ -15,7 +15,9 @@ BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\ asm volatile( \ " lctlg %1,%2,%0\n" \ - : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\ + : \ + : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \ + : "memory"); \ } #define __ctl_store(array, low, high) { \ -- GitLab From 5f54c4e1e2afd0a437e24c0b9689728c1afc1591 Mon Sep 17 00:00:00 2001 From: Amelie Delaunay Date: Thu, 12 Jan 2017 16:09:44 +0100 Subject: [PATCH 653/786] usb: dwc2: gadget: Fix GUSBCFG.USBTRDTIM value [ Upstream commit ca02954ada711b08e5b0d84590a631fd63ed39f9 ] USBTrdTim must be programmed to 0x5 when phy has a UTMI+ 16-bit wide interface or 0x9 when it has a 8-bit wide interface. GUSBCFG reset value (Value After Reset: 0x1400) sets USBTrdTim to 0x5. In case of 8-bit UTMI+, without clearing GUSBCFG.USBTRDTIM mask, USBTrdTim results in 0xD (0x5 | 0x9). That's why we need to clear GUSBCFG.USBTRDTIM mask before setting USBTrdTim value, to ensure USBTrdTim is correctly set in case of 8-bit UTMI+. Signed-off-by: Amelie Delaunay Signed-off-by: Felipe Balbi Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/usb/dwc2/gadget.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 24fbebc9b409..cfdd5c3da236 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -2532,7 +2532,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, /* keep other bits untouched (so e.g. forced modes are not lost) */ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | - GUSBCFG_HNPCAP); + GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK); /* set the PLL on, remove the HNP/SRP and set the PHY */ val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; @@ -3403,7 +3403,7 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg) /* keep other bits untouched (so e.g. forced modes are not lost) */ usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | - GUSBCFG_HNPCAP); + GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK); /* set the PLL on, remove the HNP/SRP and set the PHY */ trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; -- GitLab From fa1dbf505aefe87cb3adbe279c3eaac087d5790d Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Fri, 13 Jan 2017 22:38:27 +0100 Subject: [PATCH 654/786] be2net: fix status check in be_cmd_pmac_add() [ Upstream commit fe68d8bfe59c561664aa87d827aa4b320eb08895 ] Return value from be_mcc_notify_wait() contains a base completion status together with an additional status. The base_status() macro need to be used to access base status. Fixes: e3a7ae2 be2net: Changing MAC Address of a VF was broken Cc: Sathya Perla Cc: Ajit Khaparde Cc: Sriharsha Basavapatna Cc: Somnath Kotur Signed-off-by: Ivan Vecera Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/emulex/benet/be_cmds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 0e74529a4209..30e855004c57 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1118,7 +1118,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, err: mutex_unlock(&adapter->mcc_lock); - if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) + if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST) status = -EPERM; return status; -- GitLab From 02434def6fd0df57a5c4b1309b7d16f985234a7d Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Fri, 13 Jan 2017 22:38:28 +0100 Subject: [PATCH 655/786] be2net: don't delete MAC on close on unprivileged BE3 VFs [ Upstream commit 6d928ae590c8d58cfd5cca997d54394de139cbb7 ] BE3 VFs without FILTMGMT privilege are not allowed to modify its MAC, VLAN table and UC/MC lists. So don't try to delete MAC on such VFs. Cc: Sathya Perla Cc: Ajit Khaparde Cc: Sriharsha Basavapatna Cc: Somnath Kotur Signed-off-by: Ivan Vecera Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/emulex/benet/be_main.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 9711ca4510fa..a25d35a1b03d 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3630,7 +3630,11 @@ static void be_rx_qs_destroy(struct be_adapter *adapter) static void be_disable_if_filters(struct be_adapter *adapter) { - be_dev_mac_del(adapter, adapter->pmac_id[0]); + /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */ + if (!BEx_chip(adapter) || !be_virtfn(adapter) || + check_privilege(adapter, BE_PRIV_FILTMGMT)) + be_dev_mac_del(adapter, adapter->pmac_id[0]); + be_clear_uc_list(adapter); be_clear_mc_list(adapter); -- GitLab From cc439964fab1a58f5f7d9041845228bdd6ddfa6c Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Fri, 13 Jan 2017 22:38:29 +0100 Subject: [PATCH 656/786] be2net: fix MAC addr setting on privileged BE3 VFs [ Upstream commit 34393529163af7163ef8459808e3cf2af7db7f16 ] During interface opening MAC address stored in netdev->dev_addr is programmed in the HW with exception of BE3 VFs where the initial MAC is programmed by parent PF. This is OK when MAC address is not changed when an interfaces is down. In this case the requested MAC is stored to netdev->dev_addr and later is stored into HW during opening. But this is not done for all BE3 VFs so the NIC HW does not know anything about this change and all traffic is filtered. This is the case of bonding if fail_over_mac == 0 where the MACs of the slaves are changed while they are down. The be2net behavior is too restrictive because if a BE3 VF has the FILTMGMT privilege then it is able to modify its MAC without any restriction. To solve the described problem the driver should take care about these privileged BE3 VFs so the MAC is programmed during opening. And by contrast unpriviled BE3 VFs should not be allowed to change its MAC in any case. Cc: Sathya Perla Cc: Ajit Khaparde Cc: Sriharsha Basavapatna Cc: Somnath Kotur Signed-off-by: Ivan Vecera Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/emulex/benet/be_main.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index a25d35a1b03d..b3c9cbef766e 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -319,6 +319,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) if (ether_addr_equal(addr->sa_data, adapter->dev_mac)) return 0; + /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC + * address + */ + if (BEx_chip(adapter) && be_virtfn(adapter) && + !check_privilege(adapter, BE_PRIV_FILTMGMT)) + return -EPERM; + /* if device is not running, copy MAC to netdev->dev_addr */ if (!netif_running(netdev)) goto done; @@ -3787,8 +3794,9 @@ static int be_enable_if_filters(struct be_adapter *adapter) if (status) return status; - /* For BE3 VFs, the PF programs the initial MAC address */ - if (!(BEx_chip(adapter) && be_virtfn(adapter))) { + /* Don't add MAC on BE3 VFs without FILTMGMT privilege */ + if (!BEx_chip(adapter) || !be_virtfn(adapter) || + check_privilege(adapter, BE_PRIV_FILTMGMT)) { status = be_dev_mac_add(adapter, adapter->netdev->dev_addr); if (status) return status; -- GitLab From e1eac347d971b59f3b7de732d488ef00e087e2f8 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 11 Jan 2017 14:59:38 +0900 Subject: [PATCH 657/786] perf probe: Fix to show correct locations for events on modules [ Upstream commit d2d4edbebe07ddb77980656abe7b9bc7a9e0cdf7 ] Fix to show correct locations for events on modules by relocating given address instead of retrying after failure. This happens when the module text size is big enough, bigger than sh_addr, because the original code retries with given address + sh_addr if it failed to find CU DIE at the given address. Any address smaller than sh_addr always fails and it retries with the correct address, but addresses bigger than sh_addr will get a CU DIE which is on the given address (not adjusted by sh_addr). In my environment(x86-64), the sh_addr of ".text" section is 0x10030. Since i915 is a huge kernel module, we can see this issue as below. $ grep "[Tt] .*\[i915\]" /proc/kallsyms | sort | head -n1 ffffffffc0270000 t i915_switcheroo_can_switch [i915] ffffffffc0270000 + 0x10030 = ffffffffc0280030, so we'll check symbols cross this boundary. $ grep "[Tt] .*\[i915\]" /proc/kallsyms | grep -B1 ^ffffffffc028\ | head -n 2 ffffffffc027ff80 t haswell_init_clock_gating [i915] ffffffffc0280110 t valleyview_init_clock_gating [i915] So setup probes on both function and see what happen. $ sudo ./perf probe -m i915 -a haswell_init_clock_gating \ -a valleyview_init_clock_gating Added new events: probe:haswell_init_clock_gating (on haswell_init_clock_gating in i915) probe:valleyview_init_clock_gating (on valleyview_init_clock_gating in i915) You can now use it in all perf tools, such as: perf record -e probe:valleyview_init_clock_gating -aR sleep 1 $ sudo ./perf probe -l probe:haswell_init_clock_gating (on haswell_init_clock_gating@gpu/drm/i915/intel_pm.c in i915) probe:valleyview_init_clock_gating (on i915_vga_set_decode:4@gpu/drm/i915/i915_drv.c in i915) As you can see, haswell_init_clock_gating is correctly shown, but valleyview_init_clock_gating is not. With this patch, both events are shown correctly. $ sudo ./perf probe -l probe:haswell_init_clock_gating (on haswell_init_clock_gating@gpu/drm/i915/intel_pm.c in i915) probe:valleyview_init_clock_gating (on valleyview_init_clock_gating@gpu/drm/i915/intel_pm.c in i915) Committer notes: In my case: # perf probe -m i915 -a haswell_init_clock_gating -a valleyview_init_clock_gating Added new events: probe:haswell_init_clock_gating (on haswell_init_clock_gating in i915) probe:valleyview_init_clock_gating (on valleyview_init_clock_gating in i915) You can now use it in all perf tools, such as: perf record -e probe:valleyview_init_clock_gating -aR sleep 1 # perf probe -l probe:haswell_init_clock_gating (on i915_getparam+432@gpu/drm/i915/i915_drv.c in i915) probe:valleyview_init_clock_gating (on __i915_printk+240@gpu/drm/i915/i915_drv.c in i915) # # readelf -SW /lib/modules/4.9.0+/build/vmlinux | egrep -w '.text|Name' [Nr] Name Type Address Off Size ES Flg Lk Inf Al [ 1] .text PROGBITS ffffffff81000000 200000 822fd3 00 AX 0 0 4096 # So both are b0rked, now with the fix: # perf probe -m i915 -a haswell_init_clock_gating -a valleyview_init_clock_gating Added new events: probe:haswell_init_clock_gating (on haswell_init_clock_gating in i915) probe:valleyview_init_clock_gating (on valleyview_init_clock_gating in i915) You can now use it in all perf tools, such as: perf record -e probe:valleyview_init_clock_gating -aR sleep 1 # perf probe -l probe:haswell_init_clock_gating (on haswell_init_clock_gating@gpu/drm/i915/intel_pm.c in i915) probe:valleyview_init_clock_gating (on valleyview_init_clock_gating@gpu/drm/i915/intel_pm.c in i915) # Both looks correct. Signed-off-by: Masami Hiramatsu Tested-by: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/148411436777.9978.1440275861947194930.stgit@devbox Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- tools/perf/util/probe-finder.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index df4debe564da..0278fe1a4cc6 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -1543,16 +1543,12 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, Dwarf_Addr _addr = 0, baseaddr = 0; const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp; int baseline = 0, lineno = 0, ret = 0; - bool reloc = false; -retry: + /* We always need to relocate the address for aranges */ + if (debuginfo__get_text_offset(dbg, &baseaddr) == 0) + addr += baseaddr; /* Find cu die */ if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) { - if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) { - addr += baseaddr; - reloc = true; - goto retry; - } pr_warning("Failed to find debug information for address %lx\n", addr); ret = -EINVAL; -- GitLab From 18b200e0c8ee07e7e3f2b1bd7a5552b58457452f Mon Sep 17 00:00:00 2001 From: "Karicheri, Muralidharan" Date: Fri, 13 Jan 2017 09:32:34 -0500 Subject: [PATCH 658/786] net: phy: dp83867: allow RGMII_TXID/RGMII_RXID interface types [ Upstream commit 34c55cf2fc75f8bf6ba87df321038c064cf2d426 ] Currently dp83867 driver returns error if phy interface type PHY_INTERFACE_MODE_RGMII_RXID is used to set the rx only internal delay. Similarly issue happens for PHY_INTERFACE_MODE_RGMII_TXID. Fix this by checking also the interface type if a particular delay value is missing in the phy dt bindings. Also update the DT document accordingly. Signed-off-by: Murali Karicheri Signed-off-by: Sekhar Nori Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- Documentation/devicetree/bindings/net/ti,dp83867.txt | 6 ++++-- drivers/net/phy/dp83867.c | 8 ++++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/Documentation/devicetree/bindings/net/ti,dp83867.txt b/Documentation/devicetree/bindings/net/ti,dp83867.txt index 5d21141a68b5..75bcaa355880 100644 --- a/Documentation/devicetree/bindings/net/ti,dp83867.txt +++ b/Documentation/devicetree/bindings/net/ti,dp83867.txt @@ -3,9 +3,11 @@ Required properties: - reg - The ID number for the phy, usually a small integer - ti,rx-internal-delay - RGMII Receive Clock Delay - see dt-bindings/net/ti-dp83867.h - for applicable values + for applicable values. Required only if interface type is + PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_RXID - ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h - for applicable values + for applicable values. Required only if interface type is + PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_TXID - ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h for applicable values diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 91177a4a32ad..4cad95552cf1 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -113,12 +113,16 @@ static int dp83867_of_init(struct phy_device *phydev) ret = of_property_read_u32(of_node, "ti,rx-internal-delay", &dp83867->rx_id_delay); - if (ret) + if (ret && + (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)) return ret; ret = of_property_read_u32(of_node, "ti,tx-internal-delay", &dp83867->tx_id_delay); - if (ret) + if (ret && + (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) return ret; return of_property_read_u32(of_node, "ti,fifo-depth", -- GitLab From 9f8ffe4e09520e209f41d01c73a29598414123b1 Mon Sep 17 00:00:00 2001 From: Parthasarathy Bhuvaragan Date: Fri, 13 Jan 2017 15:46:25 +0100 Subject: [PATCH 659/786] tipc: allocate user memory with GFP_KERNEL flag [ Upstream commit 57d5f64d83ab5b5a5118b1597386dd76eaf4340d ] Until now, we allocate memory always with GFP_ATOMIC flag. When the system is under memory pressure and a user tries to send, the send fails due to low memory. However, the user application can wait for free memory if we allocate it using GFP_KERNEL flag. In this commit, we use allocate memory with GFP_KERNEL for all user allocation. Reported-by: Rune Torgersen Acked-by: Jon Maloy Signed-off-by: Parthasarathy Bhuvaragan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/tipc/discover.c | 4 ++-- net/tipc/link.c | 2 +- net/tipc/msg.c | 16 ++++++++-------- net/tipc/msg.h | 2 +- net/tipc/name_distr.c | 2 +- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/net/tipc/discover.c b/net/tipc/discover.c index 6b109a808d4c..02462d67d191 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -169,7 +169,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb, /* Send response, if necessary */ if (respond && (mtyp == DSC_REQ_MSG)) { - rskb = tipc_buf_acquire(MAX_H_SIZE); + rskb = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC); if (!rskb) return; tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer); @@ -278,7 +278,7 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b, req = kmalloc(sizeof(*req), GFP_ATOMIC); if (!req) return -ENOMEM; - req->buf = tipc_buf_acquire(MAX_H_SIZE); + req->buf = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC); if (!req->buf) { kfree(req); return -ENOMEM; diff --git a/net/tipc/link.c b/net/tipc/link.c index bda89bf9f4ff..4e8647aef01c 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1395,7 +1395,7 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl, msg_set_seqno(hdr, seqno++); pktlen = msg_size(hdr); msg_set_size(&tnlhdr, pktlen + INT_H_SIZE); - tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE); + tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC); if (!tnlskb) { pr_warn("%sunable to send packet\n", link_co_err); return; diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 1bd9817be13a..56ea0adcd285 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -58,12 +58,12 @@ static unsigned int align(unsigned int i) * NOTE: Headroom is reserved to allow prepending of a data link header. * There may also be unrequested tailroom present at the buffer's end. */ -struct sk_buff *tipc_buf_acquire(u32 size) +struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp) { struct sk_buff *skb; unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u; - skb = alloc_skb_fclone(buf_size, GFP_ATOMIC); + skb = alloc_skb_fclone(buf_size, gfp); if (skb) { skb_reserve(skb, BUF_HEADROOM); skb_put(skb, size); @@ -95,7 +95,7 @@ struct sk_buff *tipc_msg_create(uint user, uint type, struct tipc_msg *msg; struct sk_buff *buf; - buf = tipc_buf_acquire(hdr_sz + data_sz); + buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC); if (unlikely(!buf)) return NULL; @@ -261,7 +261,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, /* No fragmentation needed? */ if (likely(msz <= pktmax)) { - skb = tipc_buf_acquire(msz); + skb = tipc_buf_acquire(msz, GFP_KERNEL); if (unlikely(!skb)) return -ENOMEM; skb_orphan(skb); @@ -282,7 +282,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, msg_set_importance(&pkthdr, msg_importance(mhdr)); /* Prepare first fragment */ - skb = tipc_buf_acquire(pktmax); + skb = tipc_buf_acquire(pktmax, GFP_KERNEL); if (!skb) return -ENOMEM; skb_orphan(skb); @@ -313,7 +313,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, pktsz = drem + INT_H_SIZE; else pktsz = pktmax; - skb = tipc_buf_acquire(pktsz); + skb = tipc_buf_acquire(pktsz, GFP_KERNEL); if (!skb) { rc = -ENOMEM; goto error; @@ -448,7 +448,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg, if (msz > (max / 2)) return false; - _skb = tipc_buf_acquire(max); + _skb = tipc_buf_acquire(max, GFP_ATOMIC); if (!_skb) return false; @@ -496,7 +496,7 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) /* Never return SHORT header; expand by replacing buffer if necessary */ if (msg_short(hdr)) { - *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen); + *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen, GFP_ATOMIC); if (!*skb) goto exit; memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen); diff --git a/net/tipc/msg.h b/net/tipc/msg.h index 50a739860d37..6c0455caf302 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -820,7 +820,7 @@ static inline bool msg_is_reset(struct tipc_msg *hdr) return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG); } -struct sk_buff *tipc_buf_acquire(u32 size); +struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp); bool tipc_msg_validate(struct sk_buff *skb); bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err); void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type, diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index c1cfd92de17a..23f8899e0f8c 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -69,7 +69,7 @@ static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size, u32 dest) { struct tipc_net *tn = net_generic(net, tipc_net_id); - struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size); + struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC); struct tipc_msg *msg; if (buf != NULL) { -- GitLab From b6f75b986a7f7b79953b94f9778de295a253c624 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 11 Jan 2017 15:01:57 +0900 Subject: [PATCH 660/786] perf probe: Fix to probe on gcc generated functions in modules [ Upstream commit 613f050d68a8ed3c0b18b9568698908ef7bbc1f7 ] Fix to probe on gcc generated functions on modules. Since probing on a module is based on its symbol name, it should be adjusted on actual symbols. E.g. without this fix, perf probe shows probe definition on non-exist symbol as below. $ perf probe -m build-x86_64/net/netfilter/nf_nat.ko -F in_range* in_range.isra.12 $ perf probe -m build-x86_64/net/netfilter/nf_nat.ko -D in_range p:probe/in_range nf_nat:in_range+0 With this fix, perf probe correctly shows a probe on gcc-generated symbol. $ perf probe -m build-x86_64/net/netfilter/nf_nat.ko -D in_range p:probe/in_range nf_nat:in_range.isra.12+0 This also fixes same problem on online module as below. $ perf probe -m i915 -D assert_plane p:probe/assert_plane i915:assert_plane.constprop.134+0 Signed-off-by: Masami Hiramatsu Tested-by: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/148411450673.9978.14905987549651656075.stgit@devbox Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- tools/perf/util/probe-event.c | 45 ++++++++++++++++++++++------------ tools/perf/util/probe-finder.c | 7 ++++-- tools/perf/util/probe-finder.h | 3 +++ 3 files changed, 37 insertions(+), 18 deletions(-) diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index d281ae2b54e8..1d9c02bc00f1 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -645,18 +645,31 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs, return ret; } -static int add_module_to_probe_trace_events(struct probe_trace_event *tevs, - int ntevs, const char *module) +static int +post_process_module_probe_trace_events(struct probe_trace_event *tevs, + int ntevs, const char *module, + struct debuginfo *dinfo) { + Dwarf_Addr text_offs = 0; int i, ret = 0; char *mod_name = NULL; + struct map *map; if (!module) return 0; - mod_name = find_module_name(module); + map = get_target_map(module, false); + if (!map || debuginfo__get_text_offset(dinfo, &text_offs, true) < 0) { + pr_warning("Failed to get ELF symbols for %s\n", module); + return -EINVAL; + } + mod_name = find_module_name(module); for (i = 0; i < ntevs; i++) { + ret = post_process_probe_trace_point(&tevs[i].point, + map, (unsigned long)text_offs); + if (ret < 0) + break; tevs[i].point.module = strdup(mod_name ? mod_name : module); if (!tevs[i].point.module) { @@ -666,6 +679,8 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs, } free(mod_name); + map__put(map); + return ret; } @@ -722,7 +737,7 @@ arch__post_process_probe_trace_events(struct perf_probe_event *pev __maybe_unuse static int post_process_probe_trace_events(struct perf_probe_event *pev, struct probe_trace_event *tevs, int ntevs, const char *module, - bool uprobe) + bool uprobe, struct debuginfo *dinfo) { int ret; @@ -730,7 +745,8 @@ static int post_process_probe_trace_events(struct perf_probe_event *pev, ret = add_exec_to_probe_trace_events(tevs, ntevs, module); else if (module) /* Currently ref_reloc_sym based probe is not for drivers */ - ret = add_module_to_probe_trace_events(tevs, ntevs, module); + ret = post_process_module_probe_trace_events(tevs, ntevs, + module, dinfo); else ret = post_process_kernel_probe_trace_events(tevs, ntevs); @@ -774,30 +790,27 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, } } - debuginfo__delete(dinfo); - if (ntevs > 0) { /* Succeeded to find trace events */ pr_debug("Found %d probe_trace_events.\n", ntevs); ret = post_process_probe_trace_events(pev, *tevs, ntevs, - pev->target, pev->uprobes); + pev->target, pev->uprobes, dinfo); if (ret < 0 || ret == ntevs) { + pr_debug("Post processing failed or all events are skipped. (%d)\n", ret); clear_probe_trace_events(*tevs, ntevs); zfree(tevs); + ntevs = 0; } - if (ret != ntevs) - return ret < 0 ? ret : ntevs; - ntevs = 0; - /* Fall through */ } + debuginfo__delete(dinfo); + if (ntevs == 0) { /* No error but failed to find probe point. */ pr_warning("Probe point '%s' not found.\n", synthesize_perf_probe_point(&pev->point)); return -ENOENT; - } - /* Error path : ntevs < 0 */ - pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs); - if (ntevs < 0) { + } else if (ntevs < 0) { + /* Error path : ntevs < 0 */ + pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs); if (ntevs == -EBADF) pr_warning("Warning: No dwarf info found in the vmlinux - " "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n"); diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 0278fe1a4cc6..0d9d6e0803b8 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -1501,7 +1501,8 @@ int debuginfo__find_available_vars_at(struct debuginfo *dbg, } /* For the kernel module, we need a special code to get a DIE */ -static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs) +int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs, + bool adjust_offset) { int n, i; Elf32_Word shndx; @@ -1530,6 +1531,8 @@ static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs) if (!shdr) return -ENOENT; *offs = shdr->sh_addr; + if (adjust_offset) + *offs -= shdr->sh_offset; } } return 0; @@ -1545,7 +1548,7 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, int baseline = 0, lineno = 0, ret = 0; /* We always need to relocate the address for aranges */ - if (debuginfo__get_text_offset(dbg, &baseaddr) == 0) + if (debuginfo__get_text_offset(dbg, &baseaddr, false) == 0) addr += baseaddr; /* Find cu die */ if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) { diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index f1d8558f498e..2956c5198652 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -46,6 +46,9 @@ int debuginfo__find_trace_events(struct debuginfo *dbg, int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr, struct perf_probe_point *ppt); +int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs, + bool adjust_offset); + /* Find a line range */ int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr); -- GitLab From 399566f8a4fb1ea442046942640e37d9ea9fa0d6 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Mon, 16 Jan 2017 18:31:39 +0200 Subject: [PATCH 661/786] net/mlx4_core: Eliminate warning messages for SRQ_LIMIT under SRIOV [ Upstream commit 9577b174cd0323d287c994ef0891db71666d0765 ] When running SRIOV, warnings for SRQ LIMIT events flood the Hypervisor's message log when (correct, normally operating) apps use SRQ LIMIT events as a trigger to post WQEs to SRQs. Add more information to the existing debug printout for SRQ_LIMIT, and output the warning messages only for the SRQ CATAS ERROR event. Fixes: acba2420f9d2 ("mlx4_core: Add wrapper functions and comm channel and slave event support to EQs") Fixes: e0debf9cb50d ("mlx4_core: Reduce warning message for SRQ_LIMIT event to debug level") Signed-off-by: Jack Morgenstein Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlx4/eq.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index cd3638e6fe25..0509996957d9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -554,8 +554,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) break; case MLX4_EVENT_TYPE_SRQ_LIMIT: - mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", - __func__); + mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n", + __func__, be32_to_cpu(eqe->event.srq.srqn), + eq->eqn); case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: if (mlx4_is_master(dev)) { /* forward only to slave owning the SRQ */ @@ -570,15 +571,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) eq->eqn, eq->cons_index, ret); break; } - mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", - __func__, slave, - be32_to_cpu(eqe->event.srq.srqn), - eqe->type, eqe->subtype); + if (eqe->type == + MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) + mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", + __func__, slave, + be32_to_cpu(eqe->event.srq.srqn), + eqe->type, eqe->subtype); if (!ret && slave != dev->caps.function) { - mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", - __func__, eqe->type, - eqe->subtype, slave); + if (eqe->type == + MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) + mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", + __func__, eqe->type, + eqe->subtype, slave); mlx4_slave_event(dev, slave, eqe); break; } -- GitLab From 168bd51ec5efbb92eb9bcdefb1327ef22e4898a9 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Tue, 7 Feb 2017 20:56:08 +0800 Subject: [PATCH 662/786] sctp: check af before verify address in sctp_addr_id2transport [ Upstream commit 912964eacb111551db73429719eb5fadcab0ff8a ] Commit 6f29a1306131 ("sctp: sctp_addr_id2transport should verify the addr before looking up assoc") invoked sctp_verify_addr to verify the addr. But it didn't check af variable beforehand, once users pass an address with family = 0 through sockopt, sctp_get_af_specific will return NULL and NULL pointer dereference will be caused by af->sockaddr_len. This patch is to fix it by returning NULL if af variable is NULL. Fixes: 6f29a1306131 ("sctp: sctp_addr_id2transport should verify the addr before looking up assoc") Signed-off-by: Xin Long Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/sctp/socket.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 487c127f4928..9647e314d4fc 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -239,7 +239,7 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, union sctp_addr *laddr = (union sctp_addr *)addr; struct sctp_transport *transport; - if (sctp_verify_addr(sk, laddr, af->sockaddr_len)) + if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len)) return NULL; addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, -- GitLab From adfe95fe5b4290693a57f1682fcf3c4f61951086 Mon Sep 17 00:00:00 2001 From: Peter Dawson Date: Fri, 26 May 2017 06:35:18 +1000 Subject: [PATCH 663/786] ip6_tunnel, ip6_gre: fix setting of DSCP on encapsulated packets [ Upstream commit 0e9a709560dbcfbace8bf4019dc5298619235891 ] This fix addresses two problems in the way the DSCP field is formulated on the encapsulating header of IPv6 tunnels. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=195661 1) The IPv6 tunneling code was manipulating the DSCP field of the encapsulating packet using the 32b flowlabel. Since the flowlabel is only the lower 20b it was incorrect to assume that the upper 12b containing the DSCP and ECN fields would remain intact when formulating the encapsulating header. This fix handles the 'inherit' and 'fixed-value' DSCP cases explicitly using the extant dsfield u8 variable. 2) The use of INET_ECN_encapsulate(0, dsfield) in ip6_tnl_xmit was incorrect and resulted in the DSCP value always being set to 0. Commit 90427ef5d2a4 ("ipv6: fix flow labels when the traffic class is non-0") caused the regression by masking out the flowlabel which exposed the incorrect handling of the DSCP portion of the flowlabel in ip6_tunnel and ip6_gre. Fixes: 90427ef5d2a4 ("ipv6: fix flow labels when the traffic class is non-0") Signed-off-by: Peter Dawson Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ip6_gre.c | 13 +++++++------ net/ipv6/ip6_tunnel.c | 21 +++++++++++++-------- 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index a5fdc1aa7a9a..d2844ee469cb 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -542,11 +542,10 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev) memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); - dsfield = ipv4_get_dsfield(iph); - if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) - fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) - & IPV6_TCLASS_MASK; + dsfield = ipv4_get_dsfield(iph); + else + dsfield = ip6_tclass(t->parms.flowinfo); if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) fl6.flowi6_mark = skb->mark; @@ -599,9 +598,11 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev) memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); - dsfield = ipv6_get_dsfield(ipv6h); if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) - fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK); + dsfield = ipv6_get_dsfield(ipv6h); + else + dsfield = ip6_tclass(t->parms.flowinfo); + if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) fl6.flowlabel |= ip6_flowlabel(ipv6h); if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 63fad2467a7e..1fc9daa7b1d6 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1196,7 +1196,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); ipv6h = ipv6_hdr(skb); - ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), + ip6_flow_hdr(ipv6h, dsfield, ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6)); ipv6h->hop_limit = hop_limit; ipv6h->nexthdr = proto; @@ -1231,8 +1231,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) if (tproto != IPPROTO_IPIP && tproto != 0) return -1; - dsfield = ipv4_get_dsfield(iph); - if (t->parms.collect_md) { struct ip_tunnel_info *tun_info; const struct ip_tunnel_key *key; @@ -1246,6 +1244,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) fl6.flowi6_proto = IPPROTO_IPIP; fl6.daddr = key->u.ipv6.dst; fl6.flowlabel = key->label; + dsfield = ip6_tclass(key->label); } else { if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) encap_limit = t->parms.encap_limit; @@ -1254,8 +1253,9 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) fl6.flowi6_proto = IPPROTO_IPIP; if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) - fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) - & IPV6_TCLASS_MASK; + dsfield = ipv4_get_dsfield(iph); + else + dsfield = ip6_tclass(t->parms.flowinfo); if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) fl6.flowi6_mark = skb->mark; } @@ -1263,6 +1263,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) return -1; + dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph)); + skb_set_inner_ipproto(skb, IPPROTO_IPIP); err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, @@ -1296,8 +1298,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) ip6_tnl_addr_conflict(t, ipv6h)) return -1; - dsfield = ipv6_get_dsfield(ipv6h); - if (t->parms.collect_md) { struct ip_tunnel_info *tun_info; const struct ip_tunnel_key *key; @@ -1311,6 +1311,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) fl6.flowi6_proto = IPPROTO_IPV6; fl6.daddr = key->u.ipv6.dst; fl6.flowlabel = key->label; + dsfield = ip6_tclass(key->label); } else { offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ @@ -1333,7 +1334,9 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) fl6.flowi6_proto = IPPROTO_IPV6; if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) - fl6.flowlabel |= (*(__be32 *)ipv6h & IPV6_TCLASS_MASK); + dsfield = ipv6_get_dsfield(ipv6h); + else + dsfield = ip6_tclass(t->parms.flowinfo); if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) fl6.flowlabel |= ip6_flowlabel(ipv6h); if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) @@ -1343,6 +1346,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) return -1; + dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h)); + skb_set_inner_ipproto(skb, IPPROTO_IPV6); err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, -- GitLab From f9f73c58feefa8a5dda019df9c549c6e355e15be Mon Sep 17 00:00:00 2001 From: Eugeniu Rosca Date: Tue, 6 Jun 2017 00:08:10 +0200 Subject: [PATCH 664/786] ravb: Fix use-after-free on `ifconfig eth0 down` [ Upstream commit 79514ef670e9e575a1fe36922268c439d0f0ca8a ] Commit a47b70ea86bd ("ravb: unmap descriptors when freeing rings") has introduced the issue seen in [1] reproduced on H3ULCB board. Fix this by relocating the RX skb ringbuffer free operation, so that swiotlb page unmapping can be done first. Freeing of aligned TX buffers is not relevant to the issue seen in [1]. Still, reposition TX free calls as well, to have all kfree() operations performed consistently _after_ dma_unmap_*()/dma_free_*(). [1] Console screenshot with the problem reproduced: salvator-x login: root root@salvator-x:~# ifconfig eth0 up Micrel KSZ9031 Gigabit PHY e6800000.ethernet-ffffffff:00: \ attached PHY driver [Micrel KSZ9031 Gigabit PHY] \ (mii_bus:phy_addr=e6800000.ethernet-ffffffff:00, irq=235) IPv6: ADDRCONF(NETDEV_UP): eth0: link is not ready root@salvator-x:~# root@salvator-x:~# ifconfig eth0 down ================================================================== BUG: KASAN: use-after-free in swiotlb_tbl_unmap_single+0xc4/0x35c Write of size 1538 at addr ffff8006d884f780 by task ifconfig/1649 CPU: 0 PID: 1649 Comm: ifconfig Not tainted 4.12.0-rc4-00004-g112eb07287d1 #32 Hardware name: Renesas H3ULCB board based on r8a7795 (DT) Call trace: [] dump_backtrace+0x0/0x3a4 [] show_stack+0x14/0x1c [] dump_stack+0xf8/0x150 [] print_address_description+0x7c/0x330 [] kasan_report+0x2e0/0x2f4 [] check_memory_region+0x20/0x14c [] memcpy+0x48/0x68 [] swiotlb_tbl_unmap_single+0xc4/0x35c [] unmap_single+0x90/0xa4 [] swiotlb_unmap_page+0xc/0x14 [] __swiotlb_unmap_page+0xcc/0xe4 [] ravb_ring_free+0x514/0x870 [] ravb_close+0x288/0x36c [] __dev_close_many+0x14c/0x174 [] __dev_close+0xc8/0x144 [] __dev_change_flags+0xd8/0x194 [] dev_change_flags+0x60/0xb0 [] devinet_ioctl+0x484/0x9d4 [] inet_ioctl+0x190/0x194 [] sock_do_ioctl+0x78/0xa8 [] sock_ioctl+0x110/0x3c4 [] vfs_ioctl+0x90/0xa0 [] do_vfs_ioctl+0x148/0xc38 [] SyS_ioctl+0x44/0x74 [] el0_svc_naked+0x24/0x28 The buggy address belongs to the page: page:ffff7e001b6213c0 count:0 mapcount:0 mapping: (null) index:0x0 flags: 0x4000000000000000() raw: 4000000000000000 0000000000000000 0000000000000000 00000000ffffffff raw: 0000000000000000 ffff7e001b6213e0 0000000000000000 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff8006d884f680: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ffff8006d884f700: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff >ffff8006d884f780: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ^ ffff8006d884f800: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ffff8006d884f880: ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ================================================================== Disabling lock debugging due to kernel taint root@salvator-x:~# Fixes: a47b70ea86bd ("ravb: unmap descriptors when freeing rings") Signed-off-by: Eugeniu Rosca Acked-by: Sergei Shtylyov Signed-off-by: David S. Miller Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/renesas/ravb_main.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 510ff62584d6..11623aad0e8e 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -229,18 +229,6 @@ static void ravb_ring_free(struct net_device *ndev, int q) int ring_size; int i; - /* Free RX skb ringbuffer */ - if (priv->rx_skb[q]) { - for (i = 0; i < priv->num_rx_ring[q]; i++) - dev_kfree_skb(priv->rx_skb[q][i]); - } - kfree(priv->rx_skb[q]); - priv->rx_skb[q] = NULL; - - /* Free aligned TX buffers */ - kfree(priv->tx_align[q]); - priv->tx_align[q] = NULL; - if (priv->rx_ring[q]) { for (i = 0; i < priv->num_rx_ring[q]; i++) { struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; @@ -269,6 +257,18 @@ static void ravb_ring_free(struct net_device *ndev, int q) priv->tx_ring[q] = NULL; } + /* Free RX skb ringbuffer */ + if (priv->rx_skb[q]) { + for (i = 0; i < priv->num_rx_ring[q]; i++) + dev_kfree_skb(priv->rx_skb[q][i]); + } + kfree(priv->rx_skb[q]); + priv->rx_skb[q] = NULL; + + /* Free aligned TX buffers */ + kfree(priv->tx_align[q]); + priv->tx_align[q] = NULL; + /* Free TX skb ringbuffer. * SKBs are freed by ravb_tx_free() call above. */ -- GitLab From 647f605276c0b5e3019fcf8ad302d217d87adedc Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Fri, 23 Jun 2017 15:08:41 -0700 Subject: [PATCH 665/786] mm/vmalloc.c: huge-vmap: fail gracefully on unexpected huge vmap mappings commit 029c54b09599573015a5c18dbe59cbdf42742237 upstream. Existing code that uses vmalloc_to_page() may assume that any address for which is_vmalloc_addr() returns true may be passed into vmalloc_to_page() to retrieve the associated struct page. This is not un unreasonable assumption to make, but on architectures that have CONFIG_HAVE_ARCH_HUGE_VMAP=y, it no longer holds, and we need to ensure that vmalloc_to_page() does not go off into the weeds trying to dereference huge PUDs or PMDs as table entries. Given that vmalloc() and vmap() themselves never create huge mappings or deal with compound pages at all, there is no correct answer in this case, so return NULL instead, and issue a warning. When reading /proc/kcore on arm64, you will hit an oops as soon as you hit the huge mappings used for the various segments that make up the mapping of vmlinux. With this patch applied, you will no longer hit the oops, but the kcore contents willl be incorrect (these regions will be zeroed out) We are fixing this for kcore specifically, so it avoids vread() for those regions. At least one other problematic user exists, i.e., /dev/kmem, but that is currently broken on arm64 for other reasons. Link: http://lkml.kernel.org/r/20170609082226.26152-1-ard.biesheuvel@linaro.org Signed-off-by: Ard Biesheuvel Acked-by: Mark Rutland Reviewed-by: Laura Abbott Cc: Michal Hocko Cc: zhong jiang Cc: Dave Hansen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds [ardb: non-trivial backport to v4.9] Signed-off-by: Ard Biesheuvel Signed-off-by: Greg Kroah-Hartman --- mm/vmalloc.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index f2481cb4e6b2..195de42bea1f 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -244,11 +244,21 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) */ VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); + /* + * Don't dereference bad PUD or PMD (below) entries. This will also + * identify huge mappings, which we may encounter on architectures + * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be + * identified as vmalloc addresses by is_vmalloc_addr(), but are + * not [unambiguously] associated with a struct page, so there is + * no correct value to return for them. + */ if (!pgd_none(*pgd)) { pud_t *pud = pud_offset(pgd, addr); - if (!pud_none(*pud)) { + WARN_ON_ONCE(pud_bad(*pud)); + if (!pud_none(*pud) && !pud_bad(*pud)) { pmd_t *pmd = pmd_offset(pud, addr); - if (!pmd_none(*pmd)) { + WARN_ON_ONCE(pmd_bad(*pmd)); + if (!pmd_none(*pmd) && !pmd_bad(*pmd)) { pte_t *ptep, pte; ptep = pte_offset_map(pmd, addr); -- GitLab From 1e1666257cb69022e7a6fe61b1cf041a852ce1bc Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Wed, 3 May 2017 16:43:19 +0200 Subject: [PATCH 666/786] xfrm: fix stack access out of bounds with CONFIG_XFRM_SUB_POLICY commit 9b3eb54106cf6acd03f07cf0ab01c13676a226c2 upstream. When CONFIG_XFRM_SUB_POLICY=y, xfrm_dst stores a copy of the flowi for that dst. Unfortunately, the code that allocates and fills this copy doesn't care about what type of flowi (flowi, flowi4, flowi6) gets passed. In multiple code paths (from raw_sendmsg, from TCP when replying to a FIN, in vxlan, geneve, and gre), the flowi that gets passed to xfrm is actually an on-stack flowi4, so we end up reading stuff from the stack past the end of the flowi4 struct. Since xfrm_dst->origin isn't used anywhere following commit ca116922afa8 ("xfrm: Eliminate "fl" and "pol" args to xfrm_bundle_ok()."), just get rid of it. xfrm_dst->partner isn't used either, so get rid of that too. Fixes: 9d6ec938019c ("ipv4: Use flowi4 in public route lookup interfaces.") Signed-off-by: Sabrina Dubroca Signed-off-by: Steffen Klassert Signed-off-by: Greg Kroah-Hartman --- include/net/xfrm.h | 10 --------- net/xfrm/xfrm_policy.c | 47 ------------------------------------------ 2 files changed, 57 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 31947b9c21d6..835c30e491c8 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -944,10 +944,6 @@ struct xfrm_dst { struct flow_cache_object flo; struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; int num_pols, num_xfrms; -#ifdef CONFIG_XFRM_SUB_POLICY - struct flowi *origin; - struct xfrm_selector *partner; -#endif u32 xfrm_genid; u32 policy_genid; u32 route_mtu_cached; @@ -963,12 +959,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) dst_release(xdst->route); if (likely(xdst->u.dst.xfrm)) xfrm_state_put(xdst->u.dst.xfrm); -#ifdef CONFIG_XFRM_SUB_POLICY - kfree(xdst->origin); - xdst->origin = NULL; - kfree(xdst->partner); - xdst->partner = NULL; -#endif } #endif diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index e0437a7aa1a2..8da67f7c9c5a 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1808,43 +1808,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, goto out; } -#ifdef CONFIG_XFRM_SUB_POLICY -static int xfrm_dst_alloc_copy(void **target, const void *src, int size) -{ - if (!*target) { - *target = kmalloc(size, GFP_ATOMIC); - if (!*target) - return -ENOMEM; - } - - memcpy(*target, src, size); - return 0; -} -#endif - -static int xfrm_dst_update_parent(struct dst_entry *dst, - const struct xfrm_selector *sel) -{ -#ifdef CONFIG_XFRM_SUB_POLICY - struct xfrm_dst *xdst = (struct xfrm_dst *)dst; - return xfrm_dst_alloc_copy((void **)&(xdst->partner), - sel, sizeof(*sel)); -#else - return 0; -#endif -} - -static int xfrm_dst_update_origin(struct dst_entry *dst, - const struct flowi *fl) -{ -#ifdef CONFIG_XFRM_SUB_POLICY - struct xfrm_dst *xdst = (struct xfrm_dst *)dst; - return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl)); -#else - return 0; -#endif -} - static int xfrm_expand_policies(const struct flowi *fl, u16 family, struct xfrm_policy **pols, int *num_pols, int *num_xfrms) @@ -1916,16 +1879,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, xdst = (struct xfrm_dst *)dst; xdst->num_xfrms = err; - if (num_pols > 1) - err = xfrm_dst_update_parent(dst, &pols[1]->selector); - else - err = xfrm_dst_update_origin(dst, fl); - if (unlikely(err)) { - dst_free(dst); - XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); - return ERR_PTR(err); - } - xdst->num_pols = num_pols; memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); xdst->policy_genid = atomic_read(&pols[0]->genid); -- GitLab From c460f2beb6f081fa22eb7291db49c13c266ffd86 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 14 Jun 2017 13:35:37 +0300 Subject: [PATCH 667/786] xfrm: NULL dereference on allocation failure commit e747f64336fc15e1c823344942923195b800aa1e upstream. The default error code in pfkey_msg2xfrm_state() is -ENOBUFS. We added a new call to security_xfrm_state_alloc() which sets "err" to zero so there several places where we can return ERR_PTR(0) if kmalloc() fails. The caller is expecting error pointers so it leads to a NULL dereference. Fixes: df71837d5024 ("[LSM-IPSec]: Security association restriction.") Signed-off-by: Dan Carpenter Signed-off-by: Steffen Klassert Signed-off-by: Greg Kroah-Hartman --- net/key/af_key.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/key/af_key.c b/net/key/af_key.c index f9c9ecb0cdd3..4f5992966924 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1135,6 +1135,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, goto out; } + err = -ENOBUFS; key = ext_hdrs[SADB_EXT_KEY_AUTH - 1]; if (sa->sadb_sa_auth) { int keysize = 0; -- GitLab From ac2730234cc1454b901656ed7f59ca1b519cdaf1 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 14 Jun 2017 13:34:05 +0300 Subject: [PATCH 668/786] xfrm: Oops on error in pfkey_msg2xfrm_state() commit 1e3d0c2c70cd3edb5deed186c5f5c75f2b84a633 upstream. There are some missing error codes here so we accidentally return NULL instead of an error pointer. It results in a NULL pointer dereference. Fixes: df71837d5024 ("[LSM-IPSec]: Security association restriction.") Signed-off-by: Dan Carpenter Signed-off-by: Steffen Klassert Signed-off-by: Greg Kroah-Hartman --- net/key/af_key.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/net/key/af_key.c b/net/key/af_key.c index 4f5992966924..e67c28e614b9 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1147,8 +1147,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, if (key) keysize = (key->sadb_key_bits + 7) / 8; x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL); - if (!x->aalg) + if (!x->aalg) { + err = -ENOMEM; goto out; + } strcpy(x->aalg->alg_name, a->name); x->aalg->alg_key_len = 0; if (key) { @@ -1167,8 +1169,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, goto out; } x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL); - if (!x->calg) + if (!x->calg) { + err = -ENOMEM; goto out; + } strcpy(x->calg->alg_name, a->name); x->props.calgo = sa->sadb_sa_encrypt; } else { @@ -1182,8 +1186,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, if (key) keysize = (key->sadb_key_bits + 7) / 8; x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL); - if (!x->ealg) + if (!x->ealg) { + err = -ENOMEM; goto out; + } strcpy(x->ealg->alg_name, a->name); x->ealg->alg_key_len = 0; if (key) { @@ -1228,8 +1234,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, struct xfrm_encap_tmpl *natt; x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL); - if (!x->encap) + if (!x->encap) { + err = -ENOMEM; goto out; + } natt = x->encap; n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1]; -- GitLab From 4211442b2088554f1c99a72b0476f967c0509a0e Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 17 Feb 2017 08:39:28 +0100 Subject: [PATCH 669/786] netfilter: use skb_to_full_sk in ip_route_me_harder commit 29e09229d9f26129a39462fae0ddabc4d9533989 upstream. inet_sk(skb->sk) is illegal in case skb is attached to request socket. Fixes: ca6fb0651883 ("tcp: attach SYNACK messages to request sockets instead of listener") Reported by: Daniel J Blueman Signed-off-by: Florian Westphal Tested-by: Daniel J Blueman Signed-off-by: Pablo Neira Ayuso Signed-off-by: Greg Kroah-Hartman --- net/ipv4/netfilter.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index b3cc1335adbc..c0cc6aa8cfaa 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -23,7 +23,8 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t struct rtable *rt; struct flowi4 fl4 = {}; __be32 saddr = iph->saddr; - __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; + const struct sock *sk = skb_to_full_sk(skb); + __u8 flags = sk ? inet_sk_flowi_flags(sk) : 0; struct net_device *dev = skb_dst(skb)->dev; unsigned int hh_len; @@ -40,7 +41,7 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t fl4.daddr = iph->daddr; fl4.saddr = saddr; fl4.flowi4_tos = RT_TOS(iph->tos); - fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; + fl4.flowi4_oif = sk ? sk->sk_bound_dev_if : 0; if (!fl4.flowi4_oif) fl4.flowi4_oif = l3mdev_master_ifindex(dev); fl4.flowi4_mark = skb->mark; @@ -61,7 +62,7 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t xfrm_decode_session(skb, flowi4_to_flowi(&fl4), AF_INET) == 0) { struct dst_entry *dst = skb_dst(skb); skb_dst_set(skb, NULL); - dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), skb->sk, 0); + dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), sk, 0); if (IS_ERR(dst)) return PTR_ERR(dst); skb_dst_set(skb, dst); -- GitLab From eea0261db8efda7a5b3732c0d9a76e9b06bf040d Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Thu, 27 Apr 2017 18:02:32 -0700 Subject: [PATCH 670/786] watchdog: bcm281xx: Fix use of uninitialized spinlock. commit fedf266f9955d9a019643cde199a2fd9a0259f6f upstream. The bcm_kona_wdt_set_resolution_reg() call takes the spinlock, so initialize it earlier. Fixes a warning at boot with lock debugging enabled. Fixes: 6adb730dc208 ("watchdog: bcm281xx: Watchdog Driver") Signed-off-by: Eric Anholt Reviewed-by: Florian Fainelli Reviewed-by: Guenter Roeck Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck Signed-off-by: Greg Kroah-Hartman --- drivers/watchdog/bcm_kona_wdt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c index e0c98423f2c9..11a72bc2c71b 100644 --- a/drivers/watchdog/bcm_kona_wdt.c +++ b/drivers/watchdog/bcm_kona_wdt.c @@ -304,6 +304,8 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev) if (!wdt) return -ENOMEM; + spin_lock_init(&wdt->lock); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); wdt->base = devm_ioremap_resource(dev, res); if (IS_ERR(wdt->base)) @@ -316,7 +318,6 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev) return ret; } - spin_lock_init(&wdt->lock); platform_set_drvdata(pdev, wdt); watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt); bcm_kona_wdt_wdd.parent = &pdev->dev; -- GitLab From 478273e11521915b7a0fd977b4d43587997ec7b2 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Fri, 17 Feb 2017 12:07:30 +0000 Subject: [PATCH 671/786] sched/loadavg: Avoid loadavg spikes caused by delayed NO_HZ accounting commit 6e5f32f7a43f45ee55c401c0b9585eb01f9629a8 upstream. If we crossed a sample window while in NO_HZ we will add LOAD_FREQ to the pending sample window time on exit, setting the next update not one window into the future, but two. This situation on exiting NO_HZ is described by: this_rq->calc_load_update < jiffies < calc_load_update In this scenario, what we should be doing is: this_rq->calc_load_update = calc_load_update [ next window ] But what we actually do is: this_rq->calc_load_update = calc_load_update + LOAD_FREQ [ next+1 window ] This has the effect of delaying load average updates for potentially up to ~9seconds. This can result in huge spikes in the load average values due to per-cpu uninterruptible task counts being out of sync when accumulated across all CPUs. It's safe to update the per-cpu active count if we wake between sample windows because any load that we left in 'calc_load_idle' will have been zero'd when the idle load was folded in calc_global_load(). This issue is easy to reproduce before, commit 9d89c257dfb9 ("sched/fair: Rewrite runnable load and utilization average tracking") just by forking short-lived process pipelines built from ps(1) and grep(1) in a loop. I'm unable to reproduce the spikes after that commit, but the bug still seems to be present from code review. Signed-off-by: Matt Fleming Signed-off-by: Peter Zijlstra (Intel) Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Mike Galbraith Cc: Mike Galbraith Cc: Morten Rasmussen Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vincent Guittot Fixes: commit 5167e8d ("sched/nohz: Rewrite and fix load-avg computation -- again") Link: http://lkml.kernel.org/r/20170217120731.11868-2-matt@codeblueprint.co.uk Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- kernel/sched/loadavg.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index a2d6eb71f06b..ec91fcc09bfe 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -201,8 +201,9 @@ void calc_load_exit_idle(void) struct rq *this_rq = this_rq(); /* - * If we're still before the sample window, we're done. + * If we're still before the pending sample window, we're done. */ + this_rq->calc_load_update = calc_load_update; if (time_before(jiffies, this_rq->calc_load_update)) return; @@ -211,7 +212,6 @@ void calc_load_exit_idle(void) * accounted through the nohz accounting, so skip the entire deal and * sync up for the next window. */ - this_rq->calc_load_update = calc_load_update; if (time_before(jiffies, this_rq->calc_load_update + 10)) this_rq->calc_load_update += LOAD_FREQ; } -- GitLab From c52829f60f5f6e228a70162717df199e874898a8 Mon Sep 17 00:00:00 2001 From: Daniel Kurtz Date: Fri, 27 Jan 2017 00:21:53 +0800 Subject: [PATCH 672/786] spi: When no dma_chan map buffers with spi_master's parent commit 88b0aa544af58ce3be125a1845a227264ec9ab89 upstream. Back before commit 1dccb598df54 ("arm64: simplify dma_get_ops"), for arm64, devices for which dma_ops were not explicitly set were automatically configured to use swiotlb_dma_ops, since this was hard-coded as the global "dma_ops" in arm64_dma_init(). Now that global "dma_ops" has been removed, all devices much have their dma_ops explicitly set by a call to arch_setup_dma_ops(), otherwise the device is assigned dummy_dma_ops, and thus calls to map_sg for such a device will fail (return 0). Mediatek SPI uses DMA but does not use a dma channel. Support for this was added by commit c37f45b5f1cd ("spi: support spi without dma channel to use can_dma()"), which uses the master_spi dev to DMA map buffers. The master_spi device is not a platform device, rather it is created in spi_alloc_device(), and therefore its dma_ops are never set. Therefore, when the mediatek SPI driver when it does DMA (for large SPI transactions > 32 bytes), SPI will use spi_map_buf()->dma_map_sg() to map the buffer for use in DMA. But dma_map_sg()->dma_map_sg_attrs() returns 0, because ops->map_sg is dummy_dma_ops->__dummy_map_sg, and hence spi_map_buf() returns -ENOMEM (-12). Fix this by using the real spi_master's parent device which should be a real physical device with DMA properties. Signed-off-by: Daniel Kurtz Fixes: c37f45b5f1cd ("spi: support spi without dma channel to use can_dma()") Cc: Leilk Liu Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- drivers/spi/spi.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 24d4492d0168..ddc39b4fe3d8 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -797,12 +797,12 @@ static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) if (master->dma_tx) tx_dev = master->dma_tx->device->dev; else - tx_dev = &master->dev; + tx_dev = master->dev.parent; if (master->dma_rx) rx_dev = master->dma_rx->device->dev; else - rx_dev = &master->dev; + rx_dev = master->dev.parent; list_for_each_entry(xfer, &msg->transfers, transfer_list) { if (!master->can_dma(master, msg->spi, xfer)) @@ -844,12 +844,12 @@ static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg) if (master->dma_tx) tx_dev = master->dma_tx->device->dev; else - tx_dev = &master->dev; + tx_dev = master->dev.parent; if (master->dma_rx) rx_dev = master->dma_rx->device->dev; else - rx_dev = &master->dev; + rx_dev = master->dev.parent; list_for_each_entry(xfer, &msg->transfers, transfer_list) { if (!master->can_dma(master, msg->spi, xfer)) -- GitLab From 9846c67974d6af64f665707bb4f68ae458684faa Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Mon, 30 Jan 2017 17:47:05 +0100 Subject: [PATCH 673/786] spi: fix device-node leaks commit 8324147f38019865b29d03baf28412d2ec0bd828 upstream. Make sure to release the device-node reference taken in of_register_spi_device() on errors and when deregistering the device. Fixes: 284b01897340 ("spi: Add OF binding support for SPI busses") Signed-off-by: Johan Hovold Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- drivers/spi/spi.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index ddc39b4fe3d8..6db80635ace8 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -621,8 +621,10 @@ void spi_unregister_device(struct spi_device *spi) if (!spi) return; - if (spi->dev.of_node) + if (spi->dev.of_node) { of_node_clear_flag(spi->dev.of_node, OF_POPULATED); + of_node_put(spi->dev.of_node); + } if (ACPI_COMPANION(&spi->dev)) acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev)); device_unregister(&spi->dev); @@ -1589,11 +1591,13 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc) if (rc) { dev_err(&master->dev, "spi_device register error %s\n", nc->full_name); - goto err_out; + goto err_of_node_put; } return spi; +err_of_node_put: + of_node_put(nc); err_out: spi_dev_put(spi); return ERR_PTR(rc); -- GitLab From 88baad2e715967f237396bea47c496830d82a9c2 Mon Sep 17 00:00:00 2001 From: "Andrew F. Davis" Date: Fri, 10 Feb 2017 11:55:46 -0600 Subject: [PATCH 674/786] regulator: tps65086: Fix expected switch DT node names commit 1c47f7c316de38c30b481e1886cc6352c9efdcc1 upstream. The three load switches are called SWA1, SWB1, and SWB2. The node names describing properties for these are expected to be the same, but due to a typo they are not. Fix this here. Fixes: d2a2e729a666 ("regulator: tps65086: Add regulator driver for the TPS65086 PMIC") Reported-by: Steven Kipisz Signed-off-by: Andrew F. Davis Tested-by: Steven Kipisz Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- drivers/regulator/tps65086-regulator.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/regulator/tps65086-regulator.c b/drivers/regulator/tps65086-regulator.c index caf174ffa316..0e75f575203e 100644 --- a/drivers/regulator/tps65086-regulator.c +++ b/drivers/regulator/tps65086-regulator.c @@ -156,8 +156,8 @@ static struct tps65086_regulator regulators[] = { VDOA23_VID_MASK, TPS65086_LDOA3CTRL, BIT(0), tps65086_ldoa23_ranges, 0, 0), TPS65086_SWITCH("SWA1", "swa1", SWA1, TPS65086_SWVTT_EN, BIT(5)), - TPS65086_SWITCH("SWB1", "swa2", SWB1, TPS65086_SWVTT_EN, BIT(6)), - TPS65086_SWITCH("SWB2", "swa3", SWB2, TPS65086_SWVTT_EN, BIT(7)), + TPS65086_SWITCH("SWB1", "swb1", SWB1, TPS65086_SWVTT_EN, BIT(6)), + TPS65086_SWITCH("SWB2", "swb2", SWB2, TPS65086_SWVTT_EN, BIT(7)), TPS65086_SWITCH("VTT", "vtt", VTT, TPS65086_SWVTT_EN, BIT(4)), }; -- GitLab From e57aa416ca4ce2af2570f3b776d738c04d9a8e3e Mon Sep 17 00:00:00 2001 From: "Andrew F. Davis" Date: Fri, 10 Feb 2017 11:55:47 -0600 Subject: [PATCH 675/786] regulator: tps65086: Fix DT node referencing in of_parse_cb commit 6308f1787fb85bc98b7241a08a9f7f33b47f8b61 upstream. When we check for additional DT properties in the current node we use the device_node passed in with the configuration data, this will not point to the correct DT node, use the one passed in for this purpose. Fixes: d2a2e729a666 ("regulator: tps65086: Add regulator driver for the TPS65086 PMIC") Reported-by: Steven Kipisz Signed-off-by: Andrew F. Davis Tested-by: Steven Kipisz Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- drivers/regulator/tps65086-regulator.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/regulator/tps65086-regulator.c b/drivers/regulator/tps65086-regulator.c index 0e75f575203e..6dbf3cf3951e 100644 --- a/drivers/regulator/tps65086-regulator.c +++ b/drivers/regulator/tps65086-regulator.c @@ -161,14 +161,14 @@ static struct tps65086_regulator regulators[] = { TPS65086_SWITCH("VTT", "vtt", VTT, TPS65086_SWVTT_EN, BIT(4)), }; -static int tps65086_of_parse_cb(struct device_node *dev, +static int tps65086_of_parse_cb(struct device_node *node, const struct regulator_desc *desc, struct regulator_config *config) { int ret; /* Check for 25mV step mode */ - if (of_property_read_bool(config->of_node, "ti,regulator-step-size-25mv")) { + if (of_property_read_bool(node, "ti,regulator-step-size-25mv")) { switch (desc->id) { case BUCK1: case BUCK2: @@ -192,7 +192,7 @@ static int tps65086_of_parse_cb(struct device_node *dev, } /* Check for decay mode */ - if (desc->id <= BUCK6 && of_property_read_bool(config->of_node, "ti,regulator-decay")) { + if (desc->id <= BUCK6 && of_property_read_bool(node, "ti,regulator-decay")) { ret = regmap_write_bits(config->regmap, regulators[desc->id].decay_reg, regulators[desc->id].decay_mask, -- GitLab From 07bb2c7e7ea369f03a8893e445639324726680a5 Mon Sep 17 00:00:00 2001 From: Dave Gerlach Date: Thu, 30 Mar 2017 14:58:18 -0500 Subject: [PATCH 676/786] ARM: OMAP2+: omap_device: Sync omap_device and pm_runtime after probe defer commit 04abaf07f6d5cdf22b7a478a86e706dfeeeef960 upstream. Starting from commit 5de85b9d57ab ("PM / runtime: Re-init runtime PM states at probe error and driver unbind") pm_runtime core now changes device runtime_status back to after RPM_SUSPENDED after a probe defer. Certain OMAP devices make use of "ti,no-idle-on-init" flag which causes omap_device_enable to be called during the BUS_NOTIFY_ADD_DEVICE event during probe, along with pm_runtime_set_active. This call to pm_runtime_set_active typically will prevent a call to pm_runtime_get in a driver probe function from re-enabling the omap_device. However, in the case of a probe defer that happens before the driver probe function is able to run, such as a missing pinctrl states defer, pm_runtime_reinit will set the device as RPM_SUSPENDED and then once driver probe is actually able to run, pm_runtime_get will see the device as suspended and call through to the omap_device layer, attempting to enable the already enabled omap_device and causing errors like this: omap-gpmc 50000000.gpmc: omap_device: omap_device_enable() called from invalid state 1 omap-gpmc 50000000.gpmc: use pm_runtime_put_sync_suspend() in driver? We can avoid this error by making sure the pm_runtime status of a device matches the omap_device state before a probe attempt. By extending the omap_device bus notifier to act on the BUS_NOTIFY_BIND_DRIVER event we can check if a device is enabled in omap_device but with a pm_runtime status of RPM_SUSPENDED and once again mark the device as RPM_ACTIVE to avoid a second incorrect call to omap_device_enable. Fixes: 5de85b9d57ab ("PM / runtime: Re-init runtime PM states at probe error and driver unbind") Tested-by: Franklin S Cooper Jr. Signed-off-by: Dave Gerlach Signed-off-by: Tony Lindgren Signed-off-by: Greg Kroah-Hartman --- arch/arm/mach-omap2/omap_device.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index e920dd83e443..f989145480c8 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -222,6 +222,14 @@ static int _omap_device_notifier_call(struct notifier_block *nb, dev_err(dev, "failed to idle\n"); } break; + case BUS_NOTIFY_BIND_DRIVER: + od = to_omap_device(pdev); + if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) && + pm_runtime_status_suspended(dev)) { + od->_driver_status = BUS_NOTIFY_BIND_DRIVER; + pm_runtime_set_active(dev); + } + break; case BUS_NOTIFY_ADD_DEVICE: if (pdev->dev.of_node) omap_device_build_from_dt(pdev); -- GitLab From 4efe34b500a740016e5eabb8114ceeb395af771e Mon Sep 17 00:00:00 2001 From: Adam Ford Date: Mon, 6 Mar 2017 12:56:55 -0600 Subject: [PATCH 677/786] ARM: dts: OMAP3: Fix MFG ID EEPROM commit 06e1a5cc570703796ff1bd3a712e8e3b15c6bb0d upstream. The manufacturing information is stored in the EEPROM. This chip is an AT24C64 not not (nor has it ever been) 24C02. This patch will correctly address the EEPROM to read the entire contents and not just 256 bytes (of 0xff). Fixes: 5e3447a29a38 ("ARM: dts: LogicPD Torpedo: Add AT24 EEPROM Support") Signed-off-by: Adam Ford Signed-off-by: Tony Lindgren Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/logicpd-torpedo-som.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi index 8f9a69ca818c..efe53998c961 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi @@ -121,7 +121,7 @@ &i2c3 { clock-frequency = <400000>; at24@50 { - compatible = "at24,24c02"; + compatible = "atmel,24c64"; readonly; reg = <0x50>; }; -- GitLab From 7661b19687b2399783de2c00cf88981c93bc8383 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Fri, 26 May 2017 17:40:02 +0100 Subject: [PATCH 678/786] ARM64/ACPI: Fix BAD_MADT_GICC_ENTRY() macro implementation commit cb7cf772d83d2d4e6995c5bb9e0fb59aea8f7080 upstream. The BAD_MADT_GICC_ENTRY() macro checks if a GICC MADT entry passes muster from an ACPI specification standpoint. Current macro detects the MADT GICC entry length through ACPI firmware version (it changed from 76 to 80 bytes in the transition from ACPI 5.1 to ACPI 6.0 specification) but always uses (erroneously) the ACPICA (latest) struct (ie struct acpi_madt_generic_interrupt - that is 80-bytes long) length to check if the current GICC entry memory record exceeds the MADT table end in memory as defined by the MADT table header itself, which may result in false negatives depending on the ACPI firmware version and how the MADT entries are laid out in memory (ie on ACPI 5.1 firmware MADT GICC entries are 76 bytes long, so by adding 80 to a GICC entry start address in memory the resulting address may well be past the actual MADT end, triggering a false negative). Fix the BAD_MADT_GICC_ENTRY() macro by reshuffling the condition checks and update them to always use the firmware version specific MADT GICC entry length in order to carry out boundary checks. Fixes: b6cfb277378e ("ACPI / ARM64: add BAD_MADT_GICC_ENTRY() macro") Reported-by: Julien Grall Acked-by: Will Deacon Acked-by: Marc Zyngier Signed-off-by: Lorenzo Pieralisi Cc: Julien Grall Cc: Hanjun Guo Cc: Al Stone Signed-off-by: Catalin Marinas Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/asm/acpi.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index e517088d635f..de04879bc8b8 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -22,9 +22,9 @@ #define ACPI_MADT_GICC_LENGTH \ (acpi_gbl_FADT.header.revision < 6 ? 76 : 80) -#define BAD_MADT_GICC_ENTRY(entry, end) \ - (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \ - (entry)->header.length != ACPI_MADT_GICC_LENGTH) +#define BAD_MADT_GICC_ENTRY(entry, end) \ + (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \ + (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end)) /* Basic configuration for ACPI */ #ifdef CONFIG_ACPI -- GitLab From a2c222bef08f1ada42f85f12114f482a0682ea56 Mon Sep 17 00:00:00 2001 From: Doug Berger Date: Thu, 29 Jun 2017 18:41:36 +0100 Subject: [PATCH 679/786] ARM: 8685/1: ensure memblock-limit is pmd-aligned commit 9e25ebfe56ece7541cd10a20d715cbdd148a2e06 upstream. The pmd containing memblock_limit is cleared by prepare_page_table() which creates the opportunity for early_alloc() to allocate unmapped memory if memblock_limit is not pmd aligned causing a boot-time hang. Commit 965278dcb8ab ("ARM: 8356/1: mm: handle non-pmd-aligned end of RAM") attempted to resolve this problem, but there is a path through the adjust_lowmem_bounds() routine where if all memory regions start and end on pmd-aligned addresses the memblock_limit will be set to arm_lowmem_limit. Since arm_lowmem_limit can be affected by the vmalloc early parameter, the value of arm_lowmem_limit may not be pmd-aligned. This commit corrects this oversight such that memblock_limit is always rounded down to pmd-alignment. Fixes: 965278dcb8ab ("ARM: 8356/1: mm: handle non-pmd-aligned end of RAM") Signed-off-by: Doug Berger Suggested-by: Mark Rutland Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/mm/mmu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 5cbfd9f86412..f7c741358f37 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1211,15 +1211,15 @@ void __init adjust_lowmem_bounds(void) high_memory = __va(arm_lowmem_limit - 1) + 1; + if (!memblock_limit) + memblock_limit = arm_lowmem_limit; + /* * Round the memblock limit down to a pmd size. This * helps to ensure that we will allocate memory from the * last full pmd, which should be mapped. */ - if (memblock_limit) - memblock_limit = round_down(memblock_limit, PMD_SIZE); - if (!memblock_limit) - memblock_limit = arm_lowmem_limit; + memblock_limit = round_down(memblock_limit, PMD_SIZE); if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { if (memblock_end_of_DRAM() > arm_lowmem_limit) { -- GitLab From 15541e64163c0c5a2d2e3e8d1b73057888170f62 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 24 Apr 2017 11:58:54 -0300 Subject: [PATCH 680/786] tools arch: Sync arch/x86/lib/memcpy_64.S with the kernel commit e883d09c9eb2ffddfd057c17e6a0cef446ec8c9b upstream. Just a minor fix done in: Fixes: 26a37ab319a2 ("x86/mce: Fix copy/paste error in exception table entries") Cc: Tony Luck Link: http://lkml.kernel.org/n/tip-ni9jzdd5yxlail6pq8cuexw2@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Greg Kroah-Hartman --- tools/arch/x86/lib/memcpy_64.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S index 49e6ebac7e73..98dcc112b363 100644 --- a/tools/arch/x86/lib/memcpy_64.S +++ b/tools/arch/x86/lib/memcpy_64.S @@ -286,7 +286,7 @@ ENDPROC(memcpy_mcsafe_unrolled) _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail) - _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail) + _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail) _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail) -- GitLab From b287ade87c9192b4ae6fe525eaa66fd25455bfb1 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Tue, 27 Jun 2017 20:39:06 +0800 Subject: [PATCH 681/786] x86/boot/KASLR: Fix kexec crash due to 'virt_addr' calculation bug commit 8eabf42ae5237e6b699aeac687b5b629e3537c8d upstream. Kernel text KASLR is separated into physical address and virtual address randomization. And for virtual address randomization, we only randomiza to get an offset between 16M and KERNEL_IMAGE_SIZE. So the initial value of 'virt_addr' should be LOAD_PHYSICAL_ADDR, but not the original kernel loading address 'output'. The bug will cause kernel boot failure if kernel is loaded at a different position than the address, 16M, which is decided at compiled time. Kexec/kdump is such practical case. To fix it, just assign LOAD_PHYSICAL_ADDR to virt_addr as initial value. Tested-by: Dave Young Signed-off-by: Baoquan He Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 8391c73 ("x86/KASLR: Randomize virtual address separately") Link: http://lkml.kernel.org/r/1498567146-11990-3-git-send-email-bhe@redhat.com Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/boot/compressed/kaslr.c | 3 --- arch/x86/boot/compressed/misc.c | 4 ++-- arch/x86/boot/compressed/misc.h | 2 -- 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c index a66854d99ee1..6de58f1bd7ec 100644 --- a/arch/x86/boot/compressed/kaslr.c +++ b/arch/x86/boot/compressed/kaslr.c @@ -430,9 +430,6 @@ void choose_random_location(unsigned long input, { unsigned long random_addr, min_addr; - /* By default, keep output position unchanged. */ - *virt_addr = *output; - if (cmdline_find_option_bool("nokaslr")) { warn("KASLR disabled: 'nokaslr' on cmdline."); return; diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index b3c5a5f030ce..c945acd8fa33 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -338,7 +338,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap, unsigned long output_len) { const unsigned long kernel_total_size = VO__end - VO__text; - unsigned long virt_addr = (unsigned long)output; + unsigned long virt_addr = LOAD_PHYSICAL_ADDR; /* Retain x86 boot parameters pointer passed from startup_32/64. */ boot_params = rmode; @@ -397,7 +397,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap, #ifndef CONFIG_RELOCATABLE if ((unsigned long)output != LOAD_PHYSICAL_ADDR) error("Destination address does not match LOAD_PHYSICAL_ADDR"); - if ((unsigned long)output != virt_addr) + if (virt_addr != LOAD_PHYSICAL_ADDR) error("Destination virtual address changed when not relocatable"); #endif diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 1c8355eadbd1..766a5211f827 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -81,8 +81,6 @@ static inline void choose_random_location(unsigned long input, unsigned long output_size, unsigned long *virt_addr) { - /* No change from existing output location. */ - *virt_addr = *output; } #endif -- GitLab From 3667dafd6c04b46a827398b62fa97b9cf73d32f5 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 6 Apr 2017 16:19:22 +0200 Subject: [PATCH 682/786] x86/mpx: Correctly report do_mpx_bt_fault() failures to user-space commit 5ed386ec09a5d75bcf073967e55e895c2607a5c3 upstream. When this function fails it just sends a SIGSEGV signal to user-space using force_sig(). This signal is missing essential information about the cause, e.g. the trap_nr or an error code. Fix this by propagating the error to the only caller of mpx_handle_bd_fault(), do_bounds(), which sends the correct SIGSEGV signal to the process. Signed-off-by: Joerg Roedel Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: fe3d197f84319 ('x86, mpx: On-demand kernel allocation of bounds tables') Link: http://lkml.kernel.org/r/1491488362-27198-1-git-send-email-joro@8bytes.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/mpx.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index 3e7c489e1f55..a75103e7f963 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -525,15 +525,7 @@ int mpx_handle_bd_fault(void) if (!kernel_managing_mpx_tables(current->mm)) return -EINVAL; - if (do_mpx_bt_fault()) { - force_sig(SIGSEGV, current); - /* - * The force_sig() is essentially "handling" this - * exception, so we do not pass up the error - * from do_mpx_bt_fault(). - */ - } - return 0; + return do_mpx_bt_fault(); } /* -- GitLab From 8af88a950b4207f589b210657edc7c94b86b48e8 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Sat, 22 Apr 2017 00:01:22 -0700 Subject: [PATCH 683/786] x86/mm: Fix flush_tlb_page() on Xen commit dbd68d8e84c606673ebbcf15862f8c155fa92326 upstream. flush_tlb_page() passes a bogus range to flush_tlb_others() and expects the latter to fix it up. native_flush_tlb_others() has the fixup but Xen's version doesn't. Move the fixup to flush_tlb_others(). AFAICS the only real effect is that, without this fix, Xen would flush everything instead of just the one page on remote vCPUs in when flush_tlb_page() was called. Signed-off-by: Andy Lutomirski Reviewed-by: Boris Ostrovsky Cc: Andrew Morton Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Hansen Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Juergen Gross Cc: Konrad Rzeszutek Wilk Cc: Linus Torvalds Cc: Michal Hocko Cc: Nadav Amit Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Fixes: e7b52ffd45a6 ("x86/flush_tlb: try flush_tlb_single one by one in flush_tlb_range") Link: http://lkml.kernel.org/r/10ed0e4dfea64daef10b87fb85df1746999b4dba.1492844372.git.luto@kernel.org Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/tlb.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index a7655f6caf7d..75fb01109f94 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -263,8 +263,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask, { struct flush_tlb_info info; - if (end == 0) - end = start + PAGE_SIZE; info.flush_mm = mm; info.flush_start = start; info.flush_end = end; @@ -393,7 +391,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) } if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) - flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); + flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE); preempt_enable(); } -- GitLab From d5c5e8ba5d9d7b3378cf08274c86c8a340110b05 Mon Sep 17 00:00:00 2001 From: Junxiao Bi Date: Wed, 3 May 2017 14:51:41 -0700 Subject: [PATCH 684/786] ocfs2: o2hb: revert hb threshold to keep compatible commit 33496c3c3d7b88dcbe5e55aa01288b05646c6aca upstream. Configfs is the interface for ocfs2-tools to set configure to kernel and $configfs_dir/cluster/$clustername/heartbeat/dead_threshold is the one used to configure heartbeat dead threshold. Kernel has a default value of it but user can set O2CB_HEARTBEAT_THRESHOLD in /etc/sysconfig/o2cb to override it. Commit 45b997737a80 ("ocfs2/cluster: use per-attribute show and store methods") changed heartbeat dead threshold name while ocfs2-tools did not, so ocfs2-tools won't set this configurable and the default value is always used. So revert it. Fixes: 45b997737a80 ("ocfs2/cluster: use per-attribute show and store methods") Link: http://lkml.kernel.org/r/1490665245-15374-1-git-send-email-junxiao.bi@oracle.com Signed-off-by: Junxiao Bi Acked-by: Joseph Qi Cc: Mark Fasheh Cc: Joel Becker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/ocfs2/cluster/heartbeat.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 636abcbd4650..5e8709aa1e7e 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -2242,13 +2242,13 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group, spin_unlock(&o2hb_live_lock); } -static ssize_t o2hb_heartbeat_group_threshold_show(struct config_item *item, +static ssize_t o2hb_heartbeat_group_dead_threshold_show(struct config_item *item, char *page) { return sprintf(page, "%u\n", o2hb_dead_threshold); } -static ssize_t o2hb_heartbeat_group_threshold_store(struct config_item *item, +static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *item, const char *page, size_t count) { unsigned long tmp; @@ -2297,11 +2297,11 @@ static ssize_t o2hb_heartbeat_group_mode_store(struct config_item *item, } -CONFIGFS_ATTR(o2hb_heartbeat_group_, threshold); +CONFIGFS_ATTR(o2hb_heartbeat_group_, dead_threshold); CONFIGFS_ATTR(o2hb_heartbeat_group_, mode); static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = { - &o2hb_heartbeat_group_attr_threshold, + &o2hb_heartbeat_group_attr_dead_threshold, &o2hb_heartbeat_group_attr_mode, NULL, }; -- GitLab From c19bfc6765d44847a3880333474e2c992d63802f Mon Sep 17 00:00:00 2001 From: David Dillow Date: Mon, 30 Jan 2017 19:11:11 -0800 Subject: [PATCH 685/786] iommu/vt-d: Don't over-free page table directories commit f7116e115acdd74bc75a4daf6492b11d43505125 upstream. dma_pte_free_level() recurses down the IOMMU page tables and frees directory pages that are entirely contained in the given PFN range. Unfortunately, it incorrectly calculates the starting address covered by the PTE under consideration, which can lead to it clearing an entry that is still in use. This occurs if we have a scatterlist with an entry that has a length greater than 1026 MB and is aligned to 2 MB for both the IOMMU and physical addresses. For example, if __domain_mapping() is asked to map a two-entry scatterlist with 2 MB and 1028 MB segments to PFN 0xffff80000, it will ask if dma_pte_free_pagetable() is asked to PFNs from 0xffff80200 to 0xffffc05ff, it will also incorrectly clear the PFNs from 0xffff80000 to 0xffff801ff because of this issue. The current code will set level_pfn to 0xffff80200, and 0xffff80200-0xffffc01ff fits inside the range being cleared. Properly setting the level_pfn for the current level under consideration catches that this PTE is outside of the range being cleared. This patch also changes the value passed into dma_pte_free_level() when it recurses. This only affects the first PTE of the range being cleared, and is handled by the existing code that ensures we start our cursor no lower than start_pfn. This was found when using dma_map_sg() to map large chunks of contiguous memory, which immediatedly led to faults on the first access of the erroneously-deleted mappings. Fixes: 3269ee0bd668 ("intel-iommu: Fix leaks in pagetable freeing") Reviewed-by: Benjamin Serebrin Signed-off-by: David Dillow Signed-off-by: Joerg Roedel Signed-off-by: Greg Kroah-Hartman --- drivers/iommu/intel-iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 87fcbf71b85a..002f8a421efa 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1144,7 +1144,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level, if (!dma_pte_present(pte) || dma_pte_superpage(pte)) goto next; - level_pfn = pfn & level_mask(level - 1); + level_pfn = pfn & level_mask(level); level_pte = phys_to_virt(dma_pte_addr(pte)); if (level > 2) -- GitLab From d7fcb303d1ee4416a6e4772735cfacc36e86bff7 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 16 Jan 2017 12:58:07 +0000 Subject: [PATCH 686/786] iommu: Handle default domain attach failure commit 797a8b4d768c58caac58ee3e8cb36a164d1b7751 upstream. We wouldn't normally expect ops->attach_dev() to fail, but on IOMMUs with limited hardware resources, or generally misconfigured systems, it is certainly possible. We report failure correctly from the external iommu_attach_device() interface, but do not do so in iommu_group_add() when attaching to the default domain. The result of failure there is that the device, group and domain all get left in a broken, part-configured state which leads to weird errors and misbehaviour down the line when IOMMU API calls sort-of-but-don't-quite work. Check the return value of __iommu_attach_device() on the default domain, and refactor the error handling paths to cope with its failure and clean up correctly in such cases. Fixes: e39cb8a3aa98 ("iommu: Make sure a device is always attached to a domain") Reported-by: Punit Agrawal Signed-off-by: Robin Murphy Signed-off-by: Joerg Roedel Signed-off-by: Greg Kroah-Hartman --- drivers/iommu/iommu.c | 37 ++++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 9a2f1960873b..87d3060f8609 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -383,36 +383,30 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev) device->dev = dev; ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); - if (ret) { - kfree(device); - return ret; - } + if (ret) + goto err_free_device; device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); rename: if (!device->name) { - sysfs_remove_link(&dev->kobj, "iommu_group"); - kfree(device); - return -ENOMEM; + ret = -ENOMEM; + goto err_remove_link; } ret = sysfs_create_link_nowarn(group->devices_kobj, &dev->kobj, device->name); if (ret) { - kfree(device->name); if (ret == -EEXIST && i >= 0) { /* * Account for the slim chance of collision * and append an instance to the name. */ + kfree(device->name); device->name = kasprintf(GFP_KERNEL, "%s.%d", kobject_name(&dev->kobj), i++); goto rename; } - - sysfs_remove_link(&dev->kobj, "iommu_group"); - kfree(device); - return ret; + goto err_free_name; } kobject_get(group->devices_kobj); @@ -424,8 +418,10 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev) mutex_lock(&group->mutex); list_add_tail(&device->list, &group->devices); if (group->domain) - __iommu_attach_device(group->domain, dev); + ret = __iommu_attach_device(group->domain, dev); mutex_unlock(&group->mutex); + if (ret) + goto err_put_group; /* Notify any listeners about change to group. */ blocking_notifier_call_chain(&group->notifier, @@ -436,6 +432,21 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev) pr_info("Adding device %s to group %d\n", dev_name(dev), group->id); return 0; + +err_put_group: + mutex_lock(&group->mutex); + list_del(&device->list); + mutex_unlock(&group->mutex); + dev->iommu_group = NULL; + kobject_put(group->devices_kobj); +err_free_name: + kfree(device->name); +err_remove_link: + sysfs_remove_link(&dev->kobj, "iommu_group"); +err_free_device: + kfree(device); + pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret); + return ret; } EXPORT_SYMBOL_GPL(iommu_group_add_device); -- GitLab From f0c31c674abdf563d2ad5d9ecfcad8d237f939f0 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 16 Mar 2017 17:00:17 +0000 Subject: [PATCH 687/786] iommu/dma: Don't reserve PCI I/O windows commit 938f1bbe35e3a7cb07e1fa7c512e2ef8bb866bdf upstream. Even if a host controller's CPU-side MMIO windows into PCI I/O space do happen to leak into PCI memory space such that it might treat them as peer addresses, trying to reserve the corresponding I/O space addresses doesn't do anything to help solve that problem. Stop doing a silly thing. Fixes: fade1ec055dc ("iommu/dma: Avoid PCI host bridge windows") Reviewed-by: Eric Auger Signed-off-by: Robin Murphy Signed-off-by: Joerg Roedel Signed-off-by: Greg Kroah-Hartman --- drivers/iommu/dma-iommu.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index c5ab8667e6f2..1520e7f02c2f 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -112,8 +112,7 @@ static void iova_reserve_pci_windows(struct pci_dev *dev, unsigned long lo, hi; resource_list_for_each_entry(window, &bridge->windows) { - if (resource_type(window->res) != IORESOURCE_MEM && - resource_type(window->res) != IORESOURCE_IO) + if (resource_type(window->res) != IORESOURCE_MEM) continue; lo = iova_pfn(iovad, window->res->start - window->offset); -- GitLab From 0e55856b8f2918f3a6b8caf3c72867ee88f816dd Mon Sep 17 00:00:00 2001 From: Pan Bian Date: Sun, 23 Apr 2017 18:23:21 +0800 Subject: [PATCH 688/786] iommu/amd: Fix incorrect error handling in amd_iommu_bind_pasid() commit 73dbd4a4230216b6a5540a362edceae0c9b4876b upstream. In function amd_iommu_bind_pasid(), the control flow jumps to label out_free when pasid_state->mm and mm is NULL. And mmput(mm) is called. In function mmput(mm), mm is referenced without validation. This will result in a NULL dereference bug. This patch fixes the bug. Signed-off-by: Pan Bian Fixes: f0aac63b873b ('iommu/amd: Don't hold a reference to mm_struct') Signed-off-by: Joerg Roedel Signed-off-by: Greg Kroah-Hartman --- drivers/iommu/amd_iommu_v2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index f8ed8c95b685..a0b4ac64b9ff 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -695,9 +695,9 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid, out_unregister: mmu_notifier_unregister(&pasid_state->mn, mm); + mmput(mm); out_free: - mmput(mm); free_pasid_state(pasid_state); out: -- GitLab From 1781a29b31faee2cae9e7f353d8ab99ceb619c15 Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit Date: Mon, 26 Jun 2017 04:28:04 -0500 Subject: [PATCH 689/786] iommu/amd: Fix interrupt remapping when disable guest_mode commit 84a21dbdef0b96d773599c33c2afbb002198d303 upstream. Pass-through devices to VM guest can get updated IRQ affinity information via irq_set_affinity() when not running in guest mode. Currently, AMD IOMMU driver in GA mode ignores the updated information if the pass-through device is setup to use vAPIC regardless of guest_mode. This could cause invalid interrupt remapping. Also, the guest_mode bit should be set and cleared only when SVM updates posted-interrupt interrupt remapping information. Signed-off-by: Suravee Suthikulpanit Cc: Joerg Roedel Fixes: d98de49a53e48 ('iommu/amd: Enable vAPIC interrupt remapping mode by default') Signed-off-by: Joerg Roedel Signed-off-by: Greg Kroah-Hartman --- drivers/iommu/amd_iommu.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 11a13b5be73a..41800b6d492e 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3857,11 +3857,9 @@ static void irte_ga_prepare(void *entry, u8 vector, u32 dest_apicid, int devid) { struct irte_ga *irte = (struct irte_ga *) entry; - struct iommu_dev_data *dev_data = search_dev_data(devid); irte->lo.val = 0; irte->hi.val = 0; - irte->lo.fields_remap.guest_mode = dev_data ? dev_data->use_vapic : 0; irte->lo.fields_remap.int_type = delivery_mode; irte->lo.fields_remap.dm = dest_mode; irte->hi.fields.vector = vector; @@ -3917,10 +3915,10 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index, struct irte_ga *irte = (struct irte_ga *) entry; struct iommu_dev_data *dev_data = search_dev_data(devid); - if (!dev_data || !dev_data->use_vapic) { + if (!dev_data || !dev_data->use_vapic || + !irte->lo.fields_remap.guest_mode) { irte->hi.fields.vector = vector; irte->lo.fields_remap.destination = dest_apicid; - irte->lo.fields_remap.guest_mode = 0; modify_irte_ga(devid, index, irte, NULL); } } -- GitLab From 8a6f400a374c2366ae2e0a3e528a2c9791b1dcd1 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 7 Feb 2017 16:19:06 +0300 Subject: [PATCH 690/786] cpufreq: s3c2416: double free on driver init error path commit a69261e4470d680185a15f748d9cdafb37c57a33 upstream. The "goto err_armclk;" error path already does a clk_put(s3c_freq->hclk); so this is a double free. Fixes: 34ee55075265 ([CPUFREQ] Add S3C2416/S3C2450 cpufreq driver) Signed-off-by: Dan Carpenter Reviewed-by: Krzysztof Kozlowski Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- drivers/cpufreq/s3c2416-cpufreq.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c index d6d425773fa4..5b2db3c6568f 100644 --- a/drivers/cpufreq/s3c2416-cpufreq.c +++ b/drivers/cpufreq/s3c2416-cpufreq.c @@ -400,7 +400,6 @@ static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) rate = clk_get_rate(s3c_freq->hclk); if (rate < 133 * 1000 * 1000) { pr_err("cpufreq: HCLK not at 133MHz\n"); - clk_put(s3c_freq->hclk); ret = -EINVAL; goto err_armclk; } -- GitLab From 92e66676523a9f921dfaa383e37d3a4e2edf15df Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Fri, 6 Jan 2017 12:34:30 +0000 Subject: [PATCH 691/786] clk: scpi: don't add cpufreq device if the scpi dvfs node is disabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 67bcc2c5f1da8c5bb58e72354274ea5c59a3950a upstream. Currently we add the virtual cpufreq device unconditionally even when the SCPI DVFS clock provider node is disabled. This will cause cpufreq driver to throw errors when it gets initailised on boot/modprobe and also when the CPUs are hot-plugged back in. This patch fixes the issue by adding the virtual cpufreq device only if the SCPI DVFS clock provider is available and registered. Fixes: 9490f01e2471 ("clk: scpi: add support for cpufreq virtual device") Reported-by: Michał Zegan Cc: Neil Armstrong Signed-off-by: Sudeep Holla Tested-by: Michał Zegan Signed-off-by: Stephen Boyd Signed-off-by: Greg Kroah-Hartman --- drivers/clk/clk-scpi.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c index 2a3e9d8e88b0..96d37175d0ad 100644 --- a/drivers/clk/clk-scpi.c +++ b/drivers/clk/clk-scpi.c @@ -290,13 +290,15 @@ static int scpi_clocks_probe(struct platform_device *pdev) of_node_put(child); return ret; } - } - /* Add the virtual cpufreq device */ - cpufreq_dev = platform_device_register_simple("scpi-cpufreq", - -1, NULL, 0); - if (IS_ERR(cpufreq_dev)) - pr_warn("unable to register cpufreq device"); + if (match->data != &scpi_dvfs_ops) + continue; + /* Add the virtual cpufreq device if it's DVFS clock provider */ + cpufreq_dev = platform_device_register_simple("scpi-cpufreq", + -1, NULL, 0); + if (IS_ERR(cpufreq_dev)) + pr_warn("unable to register cpufreq device"); + } return 0; } -- GitLab From 3e51ccbadd15aa4a0e0a64535ec0566749361938 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Thu, 2 Mar 2017 16:57:23 -0600 Subject: [PATCH 692/786] objtool: Fix another GCC jump table detection issue commit 5c51f4ae84df0f9df33ac08aa5be50061a8b4242 upstream. Arnd Bergmann reported a (false positive) objtool warning: drivers/infiniband/sw/rxe/rxe_resp.o: warning: objtool: rxe_responder()+0xfe: sibling call from callable instruction with changed frame pointer The issue is in find_switch_table(). It tries to find a switch statement's jump table by walking backwards from an indirect jump instruction, looking for a relocation to the .rodata section. In this case it stopped walking prematurely: the first .rodata relocation it encountered was for a variable (resp_state_name) instead of a jump table, so it just assumed there wasn't a jump table. The fix is to ignore any .rodata relocation which refers to an ELF object symbol. This works because the jump tables are anonymous and have no symbols associated with them. Reported-by: Arnd Bergmann Tested-by: Arnd Bergmann Signed-off-by: Josh Poimboeuf Cc: Denys Vlasenko Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 3732710ff6f2 ("objtool: Improve rare switch jump table pattern detection") Link: http://lkml.kernel.org/r/20170302225723.3ndbsnl4hkqbne7a@treble Signed-off-by: Ingo Molnar Signed-off-by: Greg Kroah-Hartman --- tools/objtool/builtin-check.c | 15 ++++++++++++--- tools/objtool/elf.c | 12 ++++++++++++ tools/objtool/elf.h | 1 + 3 files changed, 25 insertions(+), 3 deletions(-) diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c index e8a1f699058a..b8dadb050d2b 100644 --- a/tools/objtool/builtin-check.c +++ b/tools/objtool/builtin-check.c @@ -757,11 +757,20 @@ static struct rela *find_switch_table(struct objtool_file *file, insn->jump_dest->offset > orig_insn->offset)) break; + /* look for a relocation which references .rodata */ text_rela = find_rela_by_dest_range(insn->sec, insn->offset, insn->len); - if (text_rela && text_rela->sym == file->rodata->sym) - return find_rela_by_dest(file->rodata, - text_rela->addend); + if (!text_rela || text_rela->sym != file->rodata->sym) + continue; + + /* + * Make sure the .rodata address isn't associated with a + * symbol. gcc jump tables are anonymous data. + */ + if (find_symbol_containing(file->rodata, text_rela->addend)) + continue; + + return find_rela_by_dest(file->rodata, text_rela->addend); } return NULL; diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 0d7983ac63ef..d897702ce742 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -85,6 +85,18 @@ struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset) return NULL; } +struct symbol *find_symbol_containing(struct section *sec, unsigned long offset) +{ + struct symbol *sym; + + list_for_each_entry(sym, &sec->symbol_list, list) + if (sym->type != STT_SECTION && + offset >= sym->offset && offset < sym->offset + sym->len) + return sym; + + return NULL; +} + struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset, unsigned int len) { diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h index aa1ff6596684..731973e1a3f5 100644 --- a/tools/objtool/elf.h +++ b/tools/objtool/elf.h @@ -79,6 +79,7 @@ struct elf { struct elf *elf_open(const char *name); struct section *find_section_by_name(struct elf *elf, const char *name); struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset); +struct symbol *find_symbol_containing(struct section *sec, unsigned long offset); struct rela *find_rela_by_dest(struct section *sec, unsigned long offset); struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset, unsigned int len); -- GitLab From 65fc82cea84f38ce918553b557f3a24c8d8c9649 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 24 Mar 2017 23:02:48 +0100 Subject: [PATCH 693/786] infiniband: hns: avoid gcc-7.0.1 warning for uninitialized data commit 5b0ff9a00755d4d9c209033a77f1ed8f3186fe5c upstream. hns_roce_v1_cq_set_ci() calls roce_set_bit() on an uninitialized field, which will then change only a few of its bits, causing a warning with the latest gcc: infiniband/hw/hns/hns_roce_hw_v1.c: In function 'hns_roce_v1_cq_set_ci': infiniband/hw/hns/hns_roce_hw_v1.c:1854:23: error: 'doorbell[1]' is used uninitialized in this function [-Werror=uninitialized] roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1); The code is actually correct since we always set all bits of the port_vlan field, but gcc correctly points out that the first access does contain uninitialized data. This initializes the field to zero first before setting the individual bits. Fixes: 9a4435375cd1 ("IB/hns: Add driver files for hns RoCE driver") Signed-off-by: Arnd Bergmann Signed-off-by: Doug Ledford Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 71232e5fabf6..20ec34761b39 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -1267,6 +1267,7 @@ void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index) u32 doorbell[2]; doorbell[0] = cons_index & ((hr_cq->cq_depth << 1) - 1); + doorbell[1] = 0; roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1); roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M, ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3); -- GitLab From 8ee785016d5a05afa9ddd872ae7befa11798bfbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Wed, 4 Jan 2017 12:09:41 +0100 Subject: [PATCH 694/786] brcmfmac: avoid writing channel out of allocated array MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 77c0d0cd10e793989d1e8b835a9a09694182cb39 upstream. Our code was assigning number of channels to the index variable by default. If firmware reported channel we didn't predict this would result in using that initial index value and writing out of array. This never happened so far (we got a complete list of supported channels) but it means possible memory corruption so we should handle it anyway. This patch simply detects unexpected channel and ignores it. As we don't try to create new entry now, it's also safe to drop hw_value and center_freq assignment. For known channels we have these set anyway. I decided to fix this issue by assigning NULL or a target channel to the channel variable. This was one of possible ways, I prefefred this one as it also avoids using channel[index] over and over. Fixes: 58de92d2f95e ("brcmfmac: use static superset of channels for wiphy bands") Signed-off-by: Rafał Miłecki Acked-by: Arend van Spriel Signed-off-by: Kalle Valo Signed-off-by: Greg Kroah-Hartman --- .../broadcom/brcm80211/brcmfmac/cfg80211.c | 32 ++++++++++--------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 78d9966a3957..0f5dde1f2248 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -5913,7 +5913,6 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, u32 i, j; u32 total; u32 chaninfo; - u32 index; pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL); @@ -5961,33 +5960,36 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, ch.bw == BRCMU_CHAN_BW_80) continue; - channel = band->channels; - index = band->n_channels; + channel = NULL; for (j = 0; j < band->n_channels; j++) { - if (channel[j].hw_value == ch.control_ch_num) { - index = j; + if (band->channels[j].hw_value == ch.control_ch_num) { + channel = &band->channels[j]; break; } } - channel[index].center_freq = - ieee80211_channel_to_frequency(ch.control_ch_num, - band->band); - channel[index].hw_value = ch.control_ch_num; + if (!channel) { + /* It seems firmware supports some channel we never + * considered. Something new in IEEE standard? + */ + brcmf_err("Ignoring unexpected firmware channel %d\n", + ch.control_ch_num); + continue; + } /* assuming the chanspecs order is HT20, * HT40 upper, HT40 lower, and VHT80. */ if (ch.bw == BRCMU_CHAN_BW_80) { - channel[index].flags &= ~IEEE80211_CHAN_NO_80MHZ; + channel->flags &= ~IEEE80211_CHAN_NO_80MHZ; } else if (ch.bw == BRCMU_CHAN_BW_40) { - brcmf_update_bw40_channel_flag(&channel[index], &ch); + brcmf_update_bw40_channel_flag(channel, &ch); } else { /* enable the channel and disable other bandwidths * for now as mentioned order assure they are enabled * for subsequent chanspecs. */ - channel[index].flags = IEEE80211_CHAN_NO_HT40 | - IEEE80211_CHAN_NO_80MHZ; + channel->flags = IEEE80211_CHAN_NO_HT40 | + IEEE80211_CHAN_NO_80MHZ; ch.bw = BRCMU_CHAN_BW_20; cfg->d11inf.encchspec(&ch); chaninfo = ch.chspec; @@ -5995,11 +5997,11 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, &chaninfo); if (!err) { if (chaninfo & WL_CHAN_RADAR) - channel[index].flags |= + channel->flags |= (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR); if (chaninfo & WL_CHAN_PASSIVE) - channel[index].flags |= + channel->flags |= IEEE80211_CHAN_NO_IR; } } -- GitLab From de5862335ed7c465b0900774fbd869bf91a23c58 Mon Sep 17 00:00:00 2001 From: Jaedon Shin Date: Fri, 3 Mar 2017 10:55:03 +0900 Subject: [PATCH 695/786] i2c: brcmstb: Fix START and STOP conditions commit 2de3ec4f1d4ba6ee380478055104eb918bd50cce upstream. The BSC data buffers to send and receive data are each of size 32 bytes or 8 bytes 'xfersz' depending on SoC. The problem observed for all the combined message transfer was if length of data transfer was a multiple of 'xfersz' a repeated START was being transmitted by BSC driver. Fixed this by appropriately setting START/STOP conditions for such transfers. Fixes: dd1aa2524bc5 ("i2c: brcmstb: Add Broadcom settop SoC i2c controller driver") Signed-off-by: Jaedon Shin Acked-by: Kamal Dasu Signed-off-by: Wolfram Sang Signed-off-by: Greg Kroah-Hartman --- drivers/i2c/busses/i2c-brcmstb.c | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c index 0652281662a8..78792b4d6437 100644 --- a/drivers/i2c/busses/i2c-brcmstb.c +++ b/drivers/i2c/busses/i2c-brcmstb.c @@ -465,6 +465,7 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter, u8 *tmp_buf; int len = 0; int xfersz = brcmstb_i2c_get_xfersz(dev); + u32 cond, cond_per_msg; if (dev->is_suspended) return -EBUSY; @@ -481,10 +482,11 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter, pmsg->buf ? pmsg->buf[0] : '0', pmsg->len); if (i < (num - 1) && (msgs[i + 1].flags & I2C_M_NOSTART)) - brcmstb_set_i2c_start_stop(dev, ~(COND_START_STOP)); + cond = ~COND_START_STOP; else - brcmstb_set_i2c_start_stop(dev, - COND_RESTART | COND_NOSTOP); + cond = COND_RESTART | COND_NOSTOP; + + brcmstb_set_i2c_start_stop(dev, cond); /* Send slave address */ if (!(pmsg->flags & I2C_M_NOSTART)) { @@ -497,13 +499,24 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter, } } + cond_per_msg = cond; + /* Perform data transfer */ while (len) { bytes_to_xfer = min(len, xfersz); - if (len <= xfersz && i == (num - 1)) - brcmstb_set_i2c_start_stop(dev, - ~(COND_START_STOP)); + if (len <= xfersz) { + if (i == (num - 1)) + cond_per_msg = cond_per_msg & + ~(COND_RESTART | COND_NOSTOP); + else + cond_per_msg = cond; + } else { + cond_per_msg = (cond_per_msg & ~COND_RESTART) | + COND_NOSTOP; + } + + brcmstb_set_i2c_start_stop(dev, cond_per_msg); rc = brcmstb_i2c_xfer_bsc_data(dev, tmp_buf, bytes_to_xfer, pmsg); @@ -512,6 +525,8 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter, len -= bytes_to_xfer; tmp_buf += bytes_to_xfer; + + cond_per_msg = COND_NOSTART | COND_NOSTOP; } } -- GitLab From a4bfcab30928b1ef1a19b379f8d08efe10853a42 Mon Sep 17 00:00:00 2001 From: Kamal Dasu Date: Fri, 3 Mar 2017 16:16:53 -0500 Subject: [PATCH 696/786] mtd: nand: brcmnand: Check flash #WP pin status before nand erase/program commit 9d2ee0a60b8bd9bef2a0082c533736d6a7b39873 upstream. On brcmnand controller v6.x and v7.x, the #WP pin is controlled through the NAND_WP bit in CS_SELECT register. The driver currently assumes that toggling the #WP pin is instantaneously enabling/disabling write-protection, but it actually takes some time to propagate the new state to the internal NAND chip logic. This behavior is sometime causing data corruptions when an erase/program operation is executed before write-protection has really been disabled. Fixes: 27c5b17cd1b1 ("mtd: nand: add NAND driver "library" for Broadcom STB NAND controller") Signed-off-by: Kamal Dasu Signed-off-by: Boris Brezillon Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/nand/brcmnand/brcmnand.c | 61 ++++++++++++++++++++++++++-- 1 file changed, 58 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c index 9d2424bfdbf5..d9fab2222eb3 100644 --- a/drivers/mtd/nand/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/brcmnand/brcmnand.c @@ -101,6 +101,9 @@ struct brcm_nand_dma_desc { #define BRCMNAND_MIN_BLOCKSIZE (8 * 1024) #define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024) +#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY) +#define NAND_POLL_STATUS_TIMEOUT_MS 100 + /* Controller feature flags */ enum { BRCMNAND_HAS_1K_SECTORS = BIT(0), @@ -765,6 +768,31 @@ enum { CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30), }; +static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl, + u32 mask, u32 expected_val, + unsigned long timeout_ms) +{ + unsigned long limit; + u32 val; + + if (!timeout_ms) + timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS; + + limit = jiffies + msecs_to_jiffies(timeout_ms); + do { + val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); + if ((val & mask) == expected_val) + return 0; + + cpu_relax(); + } while (time_after(limit, jiffies)); + + dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n", + expected_val, val & mask); + + return -ETIMEDOUT; +} + static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en) { u32 val = en ? CS_SELECT_NAND_WP : 0; @@ -1024,12 +1052,39 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp) if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) { static int old_wp = -1; + int ret; if (old_wp != wp) { dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off"); old_wp = wp; } + + /* + * make sure ctrl/flash ready before and after + * changing state of #WP pin + */ + ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY | + NAND_STATUS_READY, + NAND_CTRL_RDY | + NAND_STATUS_READY, 0); + if (ret) + return; + brcmnand_set_wp(ctrl, wp); + chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); + /* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */ + ret = bcmnand_ctrl_poll_status(ctrl, + NAND_CTRL_RDY | + NAND_STATUS_READY | + NAND_STATUS_WP, + NAND_CTRL_RDY | + NAND_STATUS_READY | + (wp ? 0 : NAND_STATUS_WP), 0); + + if (ret) + dev_err_ratelimited(&host->pdev->dev, + "nand #WP expected %s\n", + wp ? "on" : "off"); } } @@ -1157,15 +1212,15 @@ static irqreturn_t brcmnand_dma_irq(int irq, void *data) static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd) { struct brcmnand_controller *ctrl = host->ctrl; - u32 intfc; + int ret; dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd, brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS)); BUG_ON(ctrl->cmd_pending != 0); ctrl->cmd_pending = cmd; - intfc = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); - WARN_ON(!(intfc & INTFC_CTLR_READY)); + ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0); + WARN_ON(ret); mb(); /* flush previous writes */ brcmnand_write_reg(ctrl, BRCMNAND_CMD_START, -- GitLab From 982d8d92f25613e88f3a34a8a57da484f68d4c1d Mon Sep 17 00:00:00 2001 From: Mark Salter Date: Fri, 24 Mar 2017 09:53:56 -0400 Subject: [PATCH 697/786] arm64: fix NULL dereference in have_cpu_die() commit 335d2c2d192266358c5dfa64953a4c162f46e464 upstream. Commit 5c492c3f5255 ("arm64: smp: Add function to determine if cpus are stuck in the kernel") added a helper function to determine if die() is supported in cpu_ops. This function assumes a cpu will have a valid cpu_ops entry, but that may not be the case for cpu0 is spin-table or parking protocol is used to boot secondary cpus. In that case, there is a NULL dereference if have_cpu_die() is called by cpu0. So add a check for a valid cpu_ops before dereferencing it. Fixes: 5c492c3f5255 ("arm64: smp: Add function to determine if cpus are stuck in the kernel") Signed-off-by: Mark Salter Signed-off-by: Will Deacon Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 8507703dabe4..a70f7d3361c4 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -934,7 +934,7 @@ static bool have_cpu_die(void) #ifdef CONFIG_HOTPLUG_CPU int any_cpu = raw_smp_processor_id(); - if (cpu_ops[any_cpu]->cpu_die) + if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die) return true; #endif return false; -- GitLab From 1eeb7942633225baad2f8465dd93a4fb72b4ec7f Mon Sep 17 00:00:00 2001 From: Ladi Prosek Date: Tue, 25 Apr 2017 16:42:44 +0200 Subject: [PATCH 698/786] KVM: x86: fix emulation of RSM and IRET instructions commit 6ed071f051e12cf7baa1b69d3becb8f232fdfb7b upstream. On AMD, the effect of set_nmi_mask called by emulate_iret_real and em_rsm on hflags is reverted later on in x86_emulate_instruction where hflags are overwritten with ctxt->emul_flags (the kvm_set_hflags call). This manifests as a hang when rebooting Windows VMs with QEMU, OVMF, and >1 vcpu. Instead of trying to merge ctxt->emul_flags into vcpu->arch.hflags after an instruction is emulated, this commit deletes emul_flags altogether and makes the emulator access vcpu->arch.hflags using two new accessors. This way all changes, on the emulator side as well as in functions called from the emulator and accessing vcpu state with emul_to_vcpu, are preserved. More details on the bug and its manifestation with Windows and OVMF: It's a KVM bug in the interaction between SMI/SMM and NMI, specific to AMD. I believe that the SMM part explains why we started seeing this only with OVMF. KVM masks and unmasks NMI when entering and leaving SMM. When KVM emulates the RSM instruction in em_rsm, the set_nmi_mask call doesn't stick because later on in x86_emulate_instruction we overwrite arch.hflags with ctxt->emul_flags, effectively reverting the effect of the set_nmi_mask call. The AMD-specific hflag of interest here is HF_NMI_MASK. When rebooting the system, Windows sends an NMI IPI to all but the current cpu to shut them down. Only after all of them are parked in HLT will the initiating cpu finish the restart. If NMI is masked, other cpus never get the memo and the initiating cpu spins forever, waiting for hal!HalpInterruptProcessorsStarted to drop. That's the symptom we observe. Fixes: a584539b24b8 ("KVM: x86: pass the whole hflags field to emulator and back") Signed-off-by: Ladi Prosek Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/kvm_emulate.h | 4 +++- arch/x86/kvm/emulate.c | 16 +++++++++------- arch/x86/kvm/x86.c | 15 ++++++++++++--- 3 files changed, 24 insertions(+), 11 deletions(-) diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index e9cd7befcb76..19d14ac23ef9 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -221,6 +221,9 @@ struct x86_emulate_ops { void (*get_cpuid)(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked); + + unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt); + void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags); }; typedef u32 __attribute__((vector_size(16))) sse128_t; @@ -290,7 +293,6 @@ struct x86_emulate_ctxt { /* interruptibility state, as a result of execution of STI or MOV SS */ int interruptibility; - int emul_flags; bool perm_ok; /* do not check permissions if true */ bool ud; /* inject an #UD if host doesn't support insn */ diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 9f676adcdfc2..de36660751b5 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2543,7 +2543,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) u64 smbase; int ret; - if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0) + if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0) return emulate_ud(ctxt); /* @@ -2592,11 +2592,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) return X86EMUL_UNHANDLEABLE; } - if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) + if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) ctxt->ops->set_nmi_mask(ctxt, false); - ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK; - ctxt->emul_flags &= ~X86EMUL_SMM_MASK; + ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) & + ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK)); return X86EMUL_CONTINUE; } @@ -5312,6 +5312,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) const struct x86_emulate_ops *ops = ctxt->ops; int rc = X86EMUL_CONTINUE; int saved_dst_type = ctxt->dst.type; + unsigned emul_flags; ctxt->mem_read.pos = 0; @@ -5326,6 +5327,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) goto done; } + emul_flags = ctxt->ops->get_hflags(ctxt); if (unlikely(ctxt->d & (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || @@ -5359,7 +5361,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) fetch_possible_mmx_operand(ctxt, &ctxt->dst); } - if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) { + if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_PRE_EXCEPT); if (rc != X86EMUL_CONTINUE) @@ -5388,7 +5390,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) goto done; } - if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { + if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_EXCEPT); if (rc != X86EMUL_CONTINUE) @@ -5442,7 +5444,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) special_insn: - if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { + if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) { rc = emulator_check_intercept(ctxt, ctxt->intercept, X86_ICPT_POST_MEMACCESS); if (rc != X86EMUL_CONTINUE) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ab3f00399cbb..e1c1003f1f93 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5154,6 +5154,16 @@ static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked) kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked); } +static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt) +{ + return emul_to_vcpu(ctxt)->arch.hflags; +} + +static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags) +{ + kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags); +} + static const struct x86_emulate_ops emulate_ops = { .read_gpr = emulator_read_gpr, .write_gpr = emulator_write_gpr, @@ -5193,6 +5203,8 @@ static const struct x86_emulate_ops emulate_ops = { .intercept = emulator_intercept, .get_cpuid = emulator_get_cpuid, .set_nmi_mask = emulator_set_nmi_mask, + .get_hflags = emulator_get_hflags, + .set_hflags = emulator_set_hflags, }; static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) @@ -5245,7 +5257,6 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK); BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK); BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK); - ctxt->emul_flags = vcpu->arch.hflags; init_decode_cache(ctxt); vcpu->arch.emulate_regs_need_sync_from_vcpu = false; @@ -5636,8 +5647,6 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); toggle_interruptibility(vcpu, ctxt->interruptibility); vcpu->arch.emulate_regs_need_sync_to_vcpu = false; - if (vcpu->arch.hflags != ctxt->emul_flags) - kvm_set_hflags(vcpu, ctxt->emul_flags); kvm_rip_write(vcpu, ctxt->eip); if (r == EMULATE_DONE) kvm_vcpu_check_singlestep(vcpu, rflags, &r); -- GitLab From f3c3ec96e5fb40b453693421577d446b5b22fc52 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= Date: Thu, 18 May 2017 19:37:31 +0200 Subject: [PATCH 699/786] KVM: x86/vPMU: fix undefined shift in intel_pmu_refresh() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 34b0dadbdf698f9b277a31b2747b625b9a75ea1f upstream. Static analysis noticed that pmu->nr_arch_gp_counters can be 32 (INTEL_PMC_MAX_GENERIC) and therefore cannot be used to shift 'int'. I didn't add BUILD_BUG_ON for it as we have a better checker. Reported-by: Dan Carpenter Fixes: 25462f7f5295 ("KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch") Reviewed-by: Paolo Bonzini Reviewed-by: David Hildenbrand Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/pmu_intel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c index 9d4a8504a95a..5ab4a364348e 100644 --- a/arch/x86/kvm/pmu_intel.c +++ b/arch/x86/kvm/pmu_intel.c @@ -294,7 +294,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) ((u64)1 << edx.split.bit_width_fixed) - 1; } - pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) | + pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) | (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); pmu->global_ctrl_mask = ~pmu->global_ctrl; -- GitLab From d1d3756f07da10505699d1d3a1227b5201da3ab8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= Date: Thu, 18 May 2017 19:37:30 +0200 Subject: [PATCH 700/786] KVM: x86: zero base3 of unusable segments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit f0367ee1d64d27fa08be2407df5c125442e885e3 upstream. Static checker noticed that base3 could be used uninitialized if the segment was not present (useable). Random stack values probably would not pass VMCS entry checks. Reported-by: Dan Carpenter Fixes: 1aa366163b8b ("KVM: x86 emulator: consolidate segment accessors") Reviewed-by: Paolo Bonzini Reviewed-by: David Hildenbrand Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/x86.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e1c1003f1f93..3dbcb09c19cf 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4999,6 +4999,8 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector, if (var.unusable) { memset(desc, 0, sizeof(*desc)); + if (base3) + *base3 = 0; return false; } -- GitLab From a29fd27ca26832fe03341a7fec75ea3b4b86fb51 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Mon, 5 Jun 2017 05:19:09 -0700 Subject: [PATCH 701/786] KVM: nVMX: Fix exception injection MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit d4912215d1031e4fb3d1038d2e1857218dba0d0a upstream. WARNING: CPU: 3 PID: 2840 at arch/x86/kvm/vmx.c:10966 nested_vmx_vmexit+0xdcd/0xde0 [kvm_intel] CPU: 3 PID: 2840 Comm: qemu-system-x86 Tainted: G OE 4.12.0-rc3+ #23 RIP: 0010:nested_vmx_vmexit+0xdcd/0xde0 [kvm_intel] Call Trace: ? kvm_check_async_pf_completion+0xef/0x120 [kvm] ? rcu_read_lock_sched_held+0x79/0x80 vmx_queue_exception+0x104/0x160 [kvm_intel] ? vmx_queue_exception+0x104/0x160 [kvm_intel] kvm_arch_vcpu_ioctl_run+0x1171/0x1ce0 [kvm] ? kvm_arch_vcpu_load+0x47/0x240 [kvm] ? kvm_arch_vcpu_load+0x62/0x240 [kvm] kvm_vcpu_ioctl+0x384/0x7b0 [kvm] ? kvm_vcpu_ioctl+0x384/0x7b0 [kvm] ? __fget+0xf3/0x210 do_vfs_ioctl+0xa4/0x700 ? __fget+0x114/0x210 SyS_ioctl+0x79/0x90 do_syscall_64+0x81/0x220 entry_SYSCALL64_slow_path+0x25/0x25 This is triggered occasionally by running both win7 and win2016 in L2, in addition, EPT is disabled on both L1 and L2. It can't be reproduced easily. Commit 0b6ac343fc (KVM: nVMX: Correct handling of exception injection) mentioned that "KVM wants to inject page-faults which it got to the guest. This function assumes it is called with the exit reason in vmcs02 being a #PF exception". Commit e011c663 (KVM: nVMX: Check all exceptions for intercept during delivery to L2) allows to check all exceptions for intercept during delivery to L2. However, there is no guarantee the exit reason is exception currently, when there is an external interrupt occurred on host, maybe a time interrupt for host which should not be injected to guest, and somewhere queues an exception, then the function nested_vmx_check_exception() will be called and the vmexit emulation codes will try to emulate the "Acknowledge interrupt on exit" behavior, the warning is triggered. Reusing the exit reason from the L2->L0 vmexit is wrong in this case, the reason must always be EXCEPTION_NMI when injecting an exception into L1 as a nested vmexit. Cc: Paolo Bonzini Cc: Radim Krčmář Signed-off-by: Wanpeng Li Fixes: e011c663b9c7 ("KVM: nVMX: Check all exceptions for intercept during delivery to L2") Signed-off-by: Radim Krčmář Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 89b98e07211f..04e6bbbd8736 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2455,7 +2455,7 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr) if (!(vmcs12->exception_bitmap & (1u << nr))) return 0; - nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason, + nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, vmcs_read32(VM_EXIT_INTR_INFO), vmcs_readl(EXIT_QUALIFICATION)); return 1; -- GitLab From 9f86f302ec0e37e84617481c587e11c47a397e3f Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 5 Jul 2017 14:40:44 +0200 Subject: [PATCH 702/786] Linux 4.9.36 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 0a8d47465f97..4263dca12f07 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 9 -SUBLEVEL = 35 +SUBLEVEL = 36 EXTRAVERSION = NAME = Roaring Lionus -- GitLab From 13e7cdfeb405c120ae6a4e42455c92f5f1dee86c Mon Sep 17 00:00:00 2001 From: Yuanyuan Liu Date: Tue, 13 Jun 2017 11:48:00 -0700 Subject: [PATCH 703/786] cnss_utils: Add support of cnss_utils for WLAN Add support of cnss_utils for WLAN. Export APIs to WLAN host driver which are used to set/get WLAN related information. These information could be retrived later when WLAN driver is reloaded. CRs-Fixed: 2060693 Change-Id: I2c8c36602ec8af133946ff00c41ce648a2628041 Signed-off-by: Yuanyuan Liu --- drivers/net/wireless/Kconfig | 2 + drivers/net/wireless/Makefile | 2 + drivers/net/wireless/cnss_utils/Kconfig | 6 + drivers/net/wireless/cnss_utils/Makefile | 1 + drivers/net/wireless/cnss_utils/cnss_utils.c | 310 +++++++++++++++++++ drivers/soc/qcom/Kconfig | 1 + include/net/cnss_utils.h | 40 +++ 7 files changed, 362 insertions(+) create mode 100644 drivers/net/wireless/cnss_utils/Kconfig create mode 100644 drivers/net/wireless/cnss_utils/Makefile create mode 100644 drivers/net/wireless/cnss_utils/cnss_utils.c create mode 100644 include/net/cnss_utils.h diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 030d849eea84..d092d34ecf34 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -121,4 +121,6 @@ config CLD_LL_CORE Select Y to compile the driver in order to have WLAN functionality support. +source "drivers/net/wireless/cnss_utils/Kconfig" + endif # WLAN diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 91594de787de..005523c6b6eb 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -27,3 +27,5 @@ obj-$(CONFIG_USB_NET_RNDIS_WLAN) += rndis_wlan.o obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o obj-$(CONFIG_WCNSS_MEM_PRE_ALLOC) += cnss_prealloc/ + +obj-$(CONFIG_CNSS_UTILS) += cnss_utils/ diff --git a/drivers/net/wireless/cnss_utils/Kconfig b/drivers/net/wireless/cnss_utils/Kconfig new file mode 100644 index 000000000000..5f43e4872d65 --- /dev/null +++ b/drivers/net/wireless/cnss_utils/Kconfig @@ -0,0 +1,6 @@ +config CNSS_UTILS + bool "CNSS utilities support" + ---help--- + Add CNSS utilities support for the WLAN driver module. + This feature enable wlan driver to use CNSS utilities APIs to set + and get wlan related information. \ No newline at end of file diff --git a/drivers/net/wireless/cnss_utils/Makefile b/drivers/net/wireless/cnss_utils/Makefile new file mode 100644 index 000000000000..0d1ed7ae939e --- /dev/null +++ b/drivers/net/wireless/cnss_utils/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_CNSS_UTILS) += cnss_utils.o diff --git a/drivers/net/wireless/cnss_utils/cnss_utils.c b/drivers/net/wireless/cnss_utils/cnss_utils.c new file mode 100644 index 000000000000..a452900868c4 --- /dev/null +++ b/drivers/net/wireless/cnss_utils/cnss_utils.c @@ -0,0 +1,310 @@ +/* Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "cnss_utils: " fmt + +#include +#include +#include +#include + +#define CNSS_MAX_CH_NUM 45 +struct cnss_unsafe_channel_list { + u16 unsafe_ch_count; + u16 unsafe_ch_list[CNSS_MAX_CH_NUM]; +}; + +struct cnss_dfs_nol_info { + void *dfs_nol_info; + u16 dfs_nol_info_len; +}; + +#define MAX_NO_OF_MAC_ADDR 4 +struct cnss_wlan_mac_addr { + u8 mac_addr[MAX_NO_OF_MAC_ADDR][ETH_ALEN]; + u32 no_of_mac_addr_set; +}; + +static struct cnss_utils_priv { + struct cnss_unsafe_channel_list unsafe_channel_list; + struct cnss_dfs_nol_info dfs_nol_info; + /* generic mutex for unsafe channel */ + struct mutex unsafe_channel_list_lock; + /* generic spin-lock for dfs_nol info */ + spinlock_t dfs_nol_info_lock; + int driver_load_cnt; + bool is_wlan_mac_set; + struct cnss_wlan_mac_addr wlan_mac_addr; + enum cnss_utils_cc_src cc_source; +} *cnss_utils_priv; + +int cnss_utils_set_wlan_unsafe_channel(struct device *dev, + u16 *unsafe_ch_list, u16 ch_count) +{ + struct cnss_utils_priv *priv = cnss_utils_priv; + + if (!priv) + return -EINVAL; + + mutex_lock(&priv->unsafe_channel_list_lock); + if ((!unsafe_ch_list) || (ch_count > CNSS_MAX_CH_NUM)) { + mutex_unlock(&priv->unsafe_channel_list_lock); + return -EINVAL; + } + + priv->unsafe_channel_list.unsafe_ch_count = ch_count; + + if (ch_count == 0) + goto end; + + memcpy(priv->unsafe_channel_list.unsafe_ch_list, + unsafe_ch_list, ch_count * sizeof(u16)); + +end: + mutex_unlock(&priv->unsafe_channel_list_lock); + + return 0; +} +EXPORT_SYMBOL(cnss_utils_set_wlan_unsafe_channel); + +int cnss_utils_get_wlan_unsafe_channel(struct device *dev, + u16 *unsafe_ch_list, + u16 *ch_count, u16 buf_len) +{ + struct cnss_utils_priv *priv = cnss_utils_priv; + + if (!priv) + return -EINVAL; + + mutex_lock(&priv->unsafe_channel_list_lock); + if (!unsafe_ch_list || !ch_count) { + mutex_unlock(&priv->unsafe_channel_list_lock); + return -EINVAL; + } + + if (buf_len < + (priv->unsafe_channel_list.unsafe_ch_count * sizeof(u16))) { + mutex_unlock(&priv->unsafe_channel_list_lock); + return -ENOMEM; + } + + *ch_count = priv->unsafe_channel_list.unsafe_ch_count; + memcpy(unsafe_ch_list, priv->unsafe_channel_list.unsafe_ch_list, + priv->unsafe_channel_list.unsafe_ch_count * sizeof(u16)); + mutex_unlock(&priv->unsafe_channel_list_lock); + + return 0; +} +EXPORT_SYMBOL(cnss_utils_get_wlan_unsafe_channel); + +int cnss_utils_wlan_set_dfs_nol(struct device *dev, + const void *info, u16 info_len) +{ + void *temp; + void *old_nol_info; + struct cnss_dfs_nol_info *dfs_info; + struct cnss_utils_priv *priv = cnss_utils_priv; + + if (!priv) + return -EINVAL; + + if (!info || !info_len) + return -EINVAL; + + temp = kmalloc(info_len, GFP_ATOMIC); + if (!temp) + return -ENOMEM; + + memcpy(temp, info, info_len); + spin_lock_bh(&priv->dfs_nol_info_lock); + dfs_info = &priv->dfs_nol_info; + old_nol_info = dfs_info->dfs_nol_info; + dfs_info->dfs_nol_info = temp; + dfs_info->dfs_nol_info_len = info_len; + spin_unlock_bh(&priv->dfs_nol_info_lock); + kfree(old_nol_info); + + return 0; +} +EXPORT_SYMBOL(cnss_utils_wlan_set_dfs_nol); + +int cnss_utils_wlan_get_dfs_nol(struct device *dev, + void *info, u16 info_len) +{ + int len; + struct cnss_dfs_nol_info *dfs_info; + struct cnss_utils_priv *priv = cnss_utils_priv; + + if (!priv) + return -EINVAL; + + if (!info || !info_len) + return -EINVAL; + + spin_lock_bh(&priv->dfs_nol_info_lock); + + dfs_info = &priv->dfs_nol_info; + if (!dfs_info->dfs_nol_info || + dfs_info->dfs_nol_info_len == 0) { + spin_unlock_bh(&priv->dfs_nol_info_lock); + return -ENOENT; + } + + len = min(info_len, dfs_info->dfs_nol_info_len); + memcpy(info, dfs_info->dfs_nol_info, len); + spin_unlock_bh(&priv->dfs_nol_info_lock); + + return len; +} +EXPORT_SYMBOL(cnss_utils_wlan_get_dfs_nol); + +void cnss_utils_increment_driver_load_cnt(struct device *dev) +{ + struct cnss_utils_priv *priv = cnss_utils_priv; + + if (!priv) + return; + + ++(priv->driver_load_cnt); +} +EXPORT_SYMBOL(cnss_utils_increment_driver_load_cnt); + +int cnss_utils_get_driver_load_cnt(struct device *dev) +{ + struct cnss_utils_priv *priv = cnss_utils_priv; + + if (!priv) + return -EINVAL; + + return priv->driver_load_cnt; +} +EXPORT_SYMBOL(cnss_utils_get_driver_load_cnt); + +int cnss_utils_set_wlan_mac_address(const u8 *in, const uint32_t len) +{ + struct cnss_utils_priv *priv = cnss_utils_priv; + u32 no_of_mac_addr; + struct cnss_wlan_mac_addr *addr = NULL; + int iter; + u8 *temp = NULL; + + if (!priv) + return -EINVAL; + + if (priv->is_wlan_mac_set) { + pr_debug("WLAN MAC address is already set\n"); + return 0; + } + + if (len == 0 || (len % ETH_ALEN) != 0) { + pr_err("Invalid length %d\n", len); + return -EINVAL; + } + + no_of_mac_addr = len / ETH_ALEN; + if (no_of_mac_addr > MAX_NO_OF_MAC_ADDR) { + pr_err("Exceed maximum supported MAC address %u %u\n", + MAX_NO_OF_MAC_ADDR, no_of_mac_addr); + return -EINVAL; + } + + priv->is_wlan_mac_set = true; + addr = &priv->wlan_mac_addr; + addr->no_of_mac_addr_set = no_of_mac_addr; + temp = &addr->mac_addr[0][0]; + + for (iter = 0; iter < no_of_mac_addr; + ++iter, temp += ETH_ALEN, in += ETH_ALEN) { + ether_addr_copy(temp, in); + pr_debug("MAC_ADDR:%02x:%02x:%02x:%02x:%02x:%02x\n", + temp[0], temp[1], temp[2], + temp[3], temp[4], temp[5]); + } + + return 0; +} +EXPORT_SYMBOL(cnss_utils_set_wlan_mac_address); + +u8 *cnss_utils_get_wlan_mac_address(struct device *dev, uint32_t *num) +{ + struct cnss_utils_priv *priv = cnss_utils_priv; + struct cnss_wlan_mac_addr *addr = NULL; + + if (!priv) + goto out; + + if (!priv->is_wlan_mac_set) { + pr_debug("WLAN MAC address is not set\n"); + goto out; + } + + addr = &priv->wlan_mac_addr; + *num = addr->no_of_mac_addr_set; + return &addr->mac_addr[0][0]; +out: + *num = 0; + return NULL; +} +EXPORT_SYMBOL(cnss_utils_get_wlan_mac_address); + +void cnss_utils_set_cc_source(struct device *dev, + enum cnss_utils_cc_src cc_source) +{ + struct cnss_utils_priv *priv = cnss_utils_priv; + + if (!priv) + return; + + priv->cc_source = cc_source; +} +EXPORT_SYMBOL(cnss_utils_set_cc_source); + +enum cnss_utils_cc_src cnss_utils_get_cc_source(struct device *dev) +{ + struct cnss_utils_priv *priv = cnss_utils_priv; + + if (!priv) + return -EINVAL; + + return priv->cc_source; +} +EXPORT_SYMBOL(cnss_utils_get_cc_source); + +static int __init cnss_utils_init(void) +{ + struct cnss_utils_priv *priv = NULL; + + priv = kmalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->cc_source = CNSS_UTILS_SOURCE_CORE; + + mutex_init(&priv->unsafe_channel_list_lock); + spin_lock_init(&priv->dfs_nol_info_lock); + + cnss_utils_priv = priv; + + return 0; +} + +static void __exit cnss_utils_exit(void) +{ + kfree(cnss_utils_priv); + cnss_utils_priv = NULL; +} + +module_init(cnss_utils_init); +module_exit(cnss_utils_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION(DEVICE "CNSS Utilities Driver"); diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 121fa342406d..bf1747b8176a 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -506,6 +506,7 @@ config MSM_PIL_MSS_QDSP6V5 config ICNSS tristate "Platform driver for Q6 integrated connectivity" + select CNSS_UTILS ---help--- This module adds support for Q6 integrated WLAN connectivity subsystem. This module is responsible for communicating WLAN on/off diff --git a/include/net/cnss_utils.h b/include/net/cnss_utils.h new file mode 100644 index 000000000000..6ff0fd0907f8 --- /dev/null +++ b/include/net/cnss_utils.h @@ -0,0 +1,40 @@ +/* Copyright (c) 2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CNSS_UTILS_H_ +#define _CNSS_UTILS_H_ + +enum cnss_utils_cc_src { + CNSS_UTILS_SOURCE_CORE, + CNSS_UTILS_SOURCE_11D, + CNSS_UTILS_SOURCE_USER +}; + +extern int cnss_utils_set_wlan_unsafe_channel(struct device *dev, + u16 *unsafe_ch_list, + u16 ch_count); +extern int cnss_utils_get_wlan_unsafe_channel(struct device *dev, + u16 *unsafe_ch_list, + u16 *ch_count, u16 buf_len); +extern int cnss_utils_wlan_set_dfs_nol(struct device *dev, + const void *info, u16 info_len); +extern int cnss_utils_wlan_get_dfs_nol(struct device *dev, + void *info, u16 info_len); +extern int cnss_utils_get_driver_load_cnt(struct device *dev); +extern void cnss_utils_increment_driver_load_cnt(struct device *dev); +extern int cnss_utils_set_wlan_mac_address(const u8 *in, uint32_t len); +extern u8 *cnss_utils_get_wlan_mac_address(struct device *dev, uint32_t *num); +extern void cnss_utils_set_cc_source(struct device *dev, + enum cnss_utils_cc_src cc_source); +extern enum cnss_utils_cc_src cnss_utils_get_cc_source(struct device *dev); + +#endif -- GitLab From c934228c2865800deba55dcd98a4870be9fdd263 Mon Sep 17 00:00:00 2001 From: Sudarshan Rajagopalan Date: Thu, 18 May 2017 00:11:06 -0700 Subject: [PATCH 704/786] msm: secure_buffer: Add SPSS_SP vmid Add support for content protection for ION memory allocated to SPSS through user process. Change-Id: I1aa37717c02a5b96468d16ec1a26bb2ee7020e74 Signed-off-by: Sudarshan Rajagopalan --- drivers/soc/qcom/secure_buffer.c | 4 +++- drivers/staging/android/ion/msm/msm_ion.c | 7 +++++-- drivers/staging/android/uapi/msm_ion.h | 1 + include/soc/qcom/secure_buffer.h | 3 ++- 4 files changed, 11 insertions(+), 4 deletions(-) diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c index f1e7347d5a0c..2791d7229ba8 100644 --- a/drivers/soc/qcom/secure_buffer.c +++ b/drivers/soc/qcom/secure_buffer.c @@ -1,6 +1,6 @@ /* * Copyright (C) 2011 Google, Inc - * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -412,6 +412,8 @@ const char *msm_secure_vmid_to_string(int secure_vmid) return "VMID_WLAN_CE"; case VMID_CP_CAMERA_PREVIEW: return "VMID_CP_CAMERA_PREVIEW"; + case VMID_CP_SPSS_SP: + return "VMID_CP_SPSS_SP"; case VMID_INVAL: return "VMID_INVAL"; default: diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c index ae9bf5f6a7f5..146aeef3ba80 100644 --- a/drivers/staging/android/ion/msm/msm_ion.c +++ b/drivers/staging/android/ion/msm/msm_ion.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -618,7 +618,8 @@ bool is_secure_vmid_valid(int vmid) vmid == VMID_CP_CAMERA || vmid == VMID_CP_SEC_DISPLAY || vmid == VMID_CP_APP || - vmid == VMID_CP_CAMERA_PREVIEW); + vmid == VMID_CP_CAMERA_PREVIEW || + vmid == VMID_CP_SPSS_SP); } int get_secure_vmid(unsigned long flags) @@ -639,6 +640,8 @@ int get_secure_vmid(unsigned long flags) return VMID_CP_APP; if (flags & ION_FLAG_CP_CAMERA_PREVIEW) return VMID_CP_CAMERA_PREVIEW; + if (flags & ION_FLAG_CP_SPSS_SP) + return VMID_CP_SPSS_SP; return -EINVAL; } /* fix up the cases where the ioctl direction bits are incorrect */ diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h index cc77674bbcc3..40dbfb00cac5 100644 --- a/drivers/staging/android/uapi/msm_ion.h +++ b/drivers/staging/android/uapi/msm_ion.h @@ -84,6 +84,7 @@ enum cp_mem_usage { #define ION_FLAG_CP_NON_PIXEL ION_BIT(20) #define ION_FLAG_CP_CAMERA ION_BIT(21) #define ION_FLAG_CP_HLOS ION_BIT(22) +#define ION_FLAG_CP_SPSS_SP ION_BIT(23) #define ION_FLAG_CP_SEC_DISPLAY ION_BIT(25) #define ION_FLAG_CP_APP ION_BIT(26) #define ION_FLAG_CP_CAMERA_PREVIEW ION_BIT(27) diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h index f0f81a930e04..ac468ba1567f 100644 --- a/include/soc/qcom/secure_buffer.h +++ b/include/soc/qcom/secure_buffer.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -37,6 +37,7 @@ enum vmid { VMID_CP_APP = 0x12, VMID_WLAN = 0x18, VMID_WLAN_CE = 0x19, + VMID_CP_SPSS_SP = 0x1A, VMID_CP_CAMERA_PREVIEW = 0x1D, VMID_LAST, VMID_INVAL = -1 -- GitLab From 33ae04368e1e626444c65b7805f0f68774013283 Mon Sep 17 00:00:00 2001 From: Sudarshan Rajagopalan Date: Thu, 18 May 2017 00:12:53 -0700 Subject: [PATCH 705/786] msm: ion: Multiple vmids ION secure allocation Support for ION allocations through secure cma heap for multiple vmids. Change-Id: I326e04155604c746f005b6b9bd53a5faf8a22cda Signed-off-by: Sudarshan Rajagopalan --- drivers/staging/android/ion/ion_cma_heap.c | 70 ++++++++++++++++------ drivers/staging/android/ion/ion_priv.h | 4 +- drivers/staging/android/ion/msm/msm_ion.c | 24 ++++++++ drivers/staging/android/ion/msm/msm_ion.h | 4 +- 4 files changed, 81 insertions(+), 21 deletions(-) diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c index 323bb0cbd703..ff6436f5a6d3 100644 --- a/drivers/staging/android/ion/ion_cma_heap.c +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -4,7 +4,7 @@ * Copyright (C) Linaro 2012 * Author: for ST-Ericsson. * - * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -242,28 +242,37 @@ void ion_cma_heap_destroy(struct ion_heap *heap) static void ion_secure_cma_free(struct ion_buffer *buffer) { int ret = 0; - u32 source_vm; + int *source_vm_list; + int source_nelems; int dest_vmid; int dest_perms; struct ion_cma_buffer_info *info = buffer->priv_virt; - source_vm = get_secure_vmid(buffer->flags); - if (source_vm < 0) { - pr_err("%s: Failed to get secure vmid\n", __func__); + source_nelems = count_set_bits(buffer->flags & ION_FLAGS_CP_MASK); + source_vm_list = kcalloc(source_nelems, sizeof(*source_vm_list), + GFP_KERNEL); + if (!source_vm_list) return; + ret = populate_vm_list(buffer->flags, source_vm_list, source_nelems); + if (ret) { + pr_err("%s: Failed to get secure vmids\n", __func__); + goto out_free_source; } + dest_vmid = VMID_HLOS; dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC; - ret = hyp_assign_table(info->table, &source_vm, 1, + ret = hyp_assign_table(info->table, source_vm_list, source_nelems, &dest_vmid, &dest_perms, 1); if (ret) { pr_err("%s: Not freeing memory since assign failed\n", __func__); - return; + goto out_free_source; } ion_cma_free(buffer); +out_free_source: + kfree(source_vm_list); } static int ion_secure_cma_allocate( @@ -272,41 +281,64 @@ static int ion_secure_cma_allocate( unsigned long align, unsigned long flags) { int ret = 0; + int count; int source_vm; - int dest_vm; - int dest_perms; + int *dest_vm_list = NULL; + int *dest_perms = NULL; + int dest_nelems; struct ion_cma_buffer_info *info; source_vm = VMID_HLOS; - dest_vm = get_secure_vmid(flags); - if (dest_vm < 0) { - pr_err("%s: Failed to get secure vmid\n", __func__); - return -EINVAL; + dest_nelems = count_set_bits(flags & ION_FLAGS_CP_MASK); + dest_vm_list = kcalloc(dest_nelems, sizeof(*dest_vm_list), GFP_KERNEL); + if (!dest_vm_list) { + ret = -ENOMEM; + goto out; + } + dest_perms = kcalloc(dest_nelems, sizeof(*dest_perms), GFP_KERNEL); + if (!dest_perms) { + ret = -ENOMEM; + goto out_free_dest_vm; + } + ret = populate_vm_list(flags, dest_vm_list, dest_nelems); + if (ret) { + pr_err("%s: Failed to get secure vmid(s)\n", __func__); + goto out_free_dest; } - if (dest_vm == VMID_CP_SEC_DISPLAY) - dest_perms = PERM_READ; - else - dest_perms = PERM_READ | PERM_WRITE; + for (count = 0; count < dest_nelems; count++) { + if (dest_vm_list[count] == VMID_CP_SEC_DISPLAY) + dest_perms[count] = PERM_READ; + else + dest_perms[count] = PERM_READ | PERM_WRITE; + } ret = ion_cma_allocate(heap, buffer, len, align, flags); if (ret) { dev_err(heap->priv, "Unable to allocate cma buffer"); - return ret; + goto out_free_dest; } info = buffer->priv_virt; ret = hyp_assign_table(info->table, &source_vm, 1, - &dest_vm, &dest_perms, 1); + dest_vm_list, dest_perms, dest_nelems); if (ret) { pr_err("%s: Assign call failed\n", __func__); goto err; } + + kfree(dest_vm_list); + kfree(dest_perms); return ret; err: ion_secure_cma_free(buffer); +out_free_dest: + kfree(dest_perms); +out_free_dest_vm: + kfree(dest_vm_list); +out: return ret; } diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h index 49d947e6646a..ba92ed9c147e 100644 --- a/drivers/staging/android/ion/ion_priv.h +++ b/drivers/staging/android/ion/ion_priv.h @@ -2,7 +2,7 @@ * drivers/staging/android/ion/ion_priv.h * * Copyright (C) 2011 Google, Inc. - * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -294,6 +294,8 @@ long msm_ion_custom_ioctl(struct ion_client *client, int ion_heap_is_system_secure_heap_type(enum ion_heap_type type); int get_secure_vmid(unsigned long flags); bool is_secure_vmid_valid(int vmid); +unsigned int count_set_bits(unsigned long val); +int populate_vm_list(unsigned long flags, unsigned int *vm_list, int nelems); /** * Functions to help assign/unassign sg_table for System Secure Heap diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c index 146aeef3ba80..e6af81ddd306 100644 --- a/drivers/staging/android/ion/msm/msm_ion.c +++ b/drivers/staging/android/ion/msm/msm_ion.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include "../ion_priv.h" @@ -622,6 +623,29 @@ bool is_secure_vmid_valid(int vmid) vmid == VMID_CP_SPSS_SP); } +unsigned int count_set_bits(unsigned long val) +{ + return ((unsigned int)bitmap_weight(&val, BITS_PER_LONG)); +} + +int populate_vm_list(unsigned long flags, unsigned int *vm_list, + int nelems) +{ + unsigned int itr = 0; + int vmid; + + flags = flags & ION_FLAGS_CP_MASK; + for_each_set_bit(itr, &flags, BITS_PER_LONG) { + vmid = get_secure_vmid(0x1UL << itr); + if (vmid < 0 || !nelems) + return -EINVAL; + + vm_list[nelems - 1] = vmid; + nelems--; + } + return 0; +} + int get_secure_vmid(unsigned long flags) { if (flags & ION_FLAG_CP_TOUCH) diff --git a/drivers/staging/android/ion/msm/msm_ion.h b/drivers/staging/android/ion/msm/msm_ion.h index 55b02b6b5de6..741d0170815e 100644 --- a/drivers/staging/android/ion/msm/msm_ion.h +++ b/drivers/staging/android/ion/msm/msm_ion.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -22,6 +22,8 @@ enum ion_permission_type { IPT_TYPE_MDP_WRITEBACK = 2, }; +#define ION_FLAGS_CP_MASK 0x7FFF0000 + /* * This flag allows clients when mapping into the IOMMU to specify to * defer un-mapping from the IOMMU until the buffer memory is freed. -- GitLab From 6c3e44477e39e1365fb448e63c949cd77b4756d4 Mon Sep 17 00:00:00 2001 From: Satyajit Desai Date: Thu, 22 Jun 2017 17:56:03 -0700 Subject: [PATCH 706/786] coresight: tmc: Fix use after free issue with tmc read Fix race condition seen between reading tmc buffer and enabling the device. The race condition can result in a use after free issue if the buffer is released while a read is in progress. Change-Id: Ie6cc9a89d4c8f0cf75e8c705a537679516aec99a Signed-off-by: Satyajit Desai --- drivers/hwtracing/coresight/coresight-tmc-etr.c | 2 ++ drivers/hwtracing/coresight/coresight-tmc.c | 9 ++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index d0ae889c46b7..9e6f443a9bfb 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -933,6 +933,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata) if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR)) return -EINVAL; + mutex_lock(&drvdata->mem_lock); spin_lock_irqsave(&drvdata->spinlock, flags); if (drvdata->reading) { ret = -EBUSY; @@ -964,6 +965,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata) drvdata->reading = true; out: spin_unlock_irqrestore(&drvdata->spinlock, flags); + mutex_unlock(&drvdata->mem_lock); return ret; } diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c index 012c56e4be45..98fcd01dbf8f 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.c +++ b/drivers/hwtracing/coresight/coresight-tmc.c @@ -142,7 +142,11 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len, { struct tmc_drvdata *drvdata = container_of(file->private_data, struct tmc_drvdata, miscdev); - char *bufp = drvdata->buf + *ppos; + char *bufp; + + mutex_lock(&drvdata->mem_lock); + + bufp = drvdata->buf + *ppos; if (*ppos + len > drvdata->len) len = drvdata->len - *ppos; @@ -165,6 +169,7 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len, if (copy_to_user(data, bufp, len)) { dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__); + mutex_unlock(&drvdata->mem_lock); return -EFAULT; } @@ -172,6 +177,8 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len, dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n", __func__, len, (int)(drvdata->len - *ppos)); + + mutex_unlock(&drvdata->mem_lock); return len; } -- GitLab From 429bfebaea2013a5dfee999522da3b0f0cbfa442 Mon Sep 17 00:00:00 2001 From: Kyle Yan Date: Wed, 5 Jul 2017 13:19:16 -0700 Subject: [PATCH 707/786] defconfig: Disable hardlockup panic As of commit "2506cb532897" , HARDLOCKUP_DETECTOR_OTHER_CPU has been temporarily removed. Remove hardlockup detection for sdm845 from the defconfigs accordingly. Change-Id: I695fa6b2930cb529fa49c283a2fd29292a9b160f Signed-off-by: Kyle Yan --- arch/arm64/configs/sdm845_defconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig index 30d0d4bbeb22..b8e54a907967 100644 --- a/arch/arm64/configs/sdm845_defconfig +++ b/arch/arm64/configs/sdm845_defconfig @@ -601,7 +601,6 @@ CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y CONFIG_DEBUG_STACK_USAGE=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_LOCKUP_DETECTOR=y -CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y CONFIG_WQ_WATCHDOG=y CONFIG_PANIC_TIMEOUT=5 CONFIG_PANIC_ON_SCHED_BUG=y -- GitLab From cda0d1c69536f0313ebe487c323aa12feed048cf Mon Sep 17 00:00:00 2001 From: Ashay Jaiswal Date: Mon, 15 May 2017 17:15:29 +0530 Subject: [PATCH 708/786] qpnp-smb2: Fix QC_PULSE_COUNT reading logic QC_PULSE_COUNT register offset differs between PMI8998 and PM660. Use common function "smblib_get_pulse_cnt" to read the pulse count instead of directly reading the register. While at it, update the "smblib_get_pulse_cnt" function to return software based pulse count if HW INOV is disabled. Change-Id: Iab935b352dd75365d1f9862d7a7986cd1c476f66 Signed-off-by: Ashay Jaiswal --- drivers/power/supply/qcom/smb-lib.c | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c index 6ead52266133..53f56f7aa847 100644 --- a/drivers/power/supply/qcom/smb-lib.c +++ b/drivers/power/supply/qcom/smb-lib.c @@ -736,7 +736,7 @@ int smblib_rerun_apsd_if_required(struct smb_charger *chg) return 0; } -static int smblib_get_pulse_cnt(struct smb_charger *chg, int *count) +static int smblib_get_hw_pulse_cnt(struct smb_charger *chg, int *count) { int rc; u8 val[2]; @@ -770,6 +770,24 @@ static int smblib_get_pulse_cnt(struct smb_charger *chg, int *count) return 0; } +static int smblib_get_pulse_cnt(struct smb_charger *chg, int *count) +{ + int rc; + + /* Use software based pulse count if HW INOV is disabled */ + if (get_effective_result(chg->hvdcp_hw_inov_dis_votable) > 0) { + *count = chg->pulse_cnt; + return 0; + } + + /* Use h/w pulse count if autonomous mode is enabled */ + rc = smblib_get_hw_pulse_cnt(chg, count); + if (rc < 0) + smblib_err(chg, "failed to read h/w pulse count rc=%d\n", rc); + + return rc; +} + #define USBIN_25MA 25000 #define USBIN_100MA 100000 #define USBIN_150MA 150000 @@ -1126,7 +1144,7 @@ static int smblib_hvdcp_hw_inov_dis_vote_callback(struct votable *votable, * the pulse count register get zeroed when autonomous mode is * disabled. Track that in variables before disabling */ - rc = smblib_get_pulse_cnt(chg, &chg->pulse_cnt); + rc = smblib_get_hw_pulse_cnt(chg, &chg->pulse_cnt); if (rc < 0) { pr_err("failed to read QC_PULSE_COUNT_STATUS_REG rc=%d\n", rc); @@ -2309,7 +2327,6 @@ int smblib_get_prop_input_voltage_settled(struct smb_charger *chg, { const struct apsd_result *apsd_result = smblib_get_apsd_result(chg); int rc, pulses; - u8 stat; val->intval = MICRO_5V; if (apsd_result == NULL) { @@ -2319,13 +2336,12 @@ int smblib_get_prop_input_voltage_settled(struct smb_charger *chg, switch (apsd_result->pst) { case POWER_SUPPLY_TYPE_USB_HVDCP_3: - rc = smblib_read(chg, QC_PULSE_COUNT_STATUS_REG, &stat); + rc = smblib_get_pulse_cnt(chg, &pulses); if (rc < 0) { smblib_err(chg, "Couldn't read QC_PULSE_COUNT rc=%d\n", rc); return 0; } - pulses = (stat & QC_PULSE_COUNT_MASK); val->intval = MICRO_5V + HVDCP3_STEP_UV * pulses; break; default: @@ -3302,13 +3318,12 @@ static void smblib_hvdcp_adaptive_voltage_change(struct smb_charger *chg) } if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_HVDCP_3) { - rc = smblib_read(chg, QC_PULSE_COUNT_STATUS_REG, &stat); + rc = smblib_get_pulse_cnt(chg, &pulses); if (rc < 0) { smblib_err(chg, "Couldn't read QC_PULSE_COUNT rc=%d\n", rc); return; } - pulses = (stat & QC_PULSE_COUNT_MASK); if (pulses < QC3_PULSES_FOR_6V) smblib_set_opt_freq_buck(chg, -- GitLab From a17a69bb5a0d339ce75b3e84d6d2ab9e94cd8ac7 Mon Sep 17 00:00:00 2001 From: Subbaraman Narayanamurthy Date: Wed, 7 Jun 2017 17:51:30 -0700 Subject: [PATCH 709/786] power: qpnp-fg-gen3: fix an unbalanced irq disable for delta_bsoc irq Currently, rerun_election() is called on delta_bsoc_irq_en votable to disable delta_bsoc interrupt during probe. However, it doesn't set the internal variable for votable to not allow a similar vote again. When a vote is made later to disable the interrupt again, it can end up in invoking the callback again. This leads to an unbalanced interrupt disable warning. Fix it. CRs-Fixed: 2058754 Change-Id: I30c35be5275b5b310cf9b17ea4660a3e2e89c59a Signed-off-by: Subbaraman Narayanamurthy --- drivers/power/supply/qcom/qpnp-fg-gen3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c index 75e79bbf9c7c..4416f1dd3f2f 100644 --- a/drivers/power/supply/qcom/qpnp-fg-gen3.c +++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c @@ -4480,7 +4480,7 @@ static int fg_gen3_probe(struct platform_device *pdev) disable_irq_nosync(fg_irqs[SOC_UPDATE_IRQ].irq); /* Keep BSOC_DELTA_IRQ irq disabled until we require it */ - rerun_election(chip->delta_bsoc_irq_en_votable); + vote(chip->delta_bsoc_irq_en_votable, DELTA_BSOC_IRQ_VOTER, false, 0); rc = fg_debugfs_create(chip); if (rc < 0) { -- GitLab From 86a5e176969284e8947a3d96a345e5476c9c3ed2 Mon Sep 17 00:00:00 2001 From: Abhijeet Dharmapurikar Date: Wed, 5 Jul 2017 13:58:04 -0700 Subject: [PATCH 710/786] power: qpnp-fg-gen3: Qualify aborting capacity learning Currently, capacity learning algorithm is aborted when the charging status goes to not charging. This can happen with qnovo enabled charging where stopping the pulsing can lead to charging status change. Qualify aborting capacity learning based on the qnovo enable status and input presence. While at it, abort the capacity learning when the charging status goes to discharging and charger is removed. Change-Id: I4546e8880be0658748157cb13f048610eee932a3 Signed-off-by: Subbaraman Narayanamurthy Signed-off-by: Abhijeet Dharmapurikar --- drivers/power/supply/qcom/fg-core.h | 1 + drivers/power/supply/qcom/fg-util.c | 41 +++++++++++++++++++----- drivers/power/supply/qcom/qpnp-fg-gen3.c | 34 ++++++++++++++------ 3 files changed, 59 insertions(+), 17 deletions(-) diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h index 7e6a4e8fa089..526d93f9a801 100644 --- a/drivers/power/supply/qcom/fg-core.h +++ b/drivers/power/supply/qcom/fg-core.h @@ -467,6 +467,7 @@ extern void dump_sram(u8 *buf, int addr, int len); extern int64_t twos_compliment_extend(int64_t val, int s_bit_pos); extern s64 fg_float_decode(u16 val); extern bool is_input_present(struct fg_chip *chip); +extern bool is_qnovo_en(struct fg_chip *chip); extern void fg_circ_buf_add(struct fg_circ_buf *buf, int val); extern void fg_circ_buf_clr(struct fg_circ_buf *buf); extern int fg_circ_buf_avg(struct fg_circ_buf *buf, int *avg); diff --git a/drivers/power/supply/qcom/fg-util.c b/drivers/power/supply/qcom/fg-util.c index 839a7718ba77..38d9594031cf 100644 --- a/drivers/power/supply/qcom/fg-util.c +++ b/drivers/power/supply/qcom/fg-util.c @@ -106,14 +106,17 @@ static struct fg_dbgfs dbgfs_data = { static bool is_usb_present(struct fg_chip *chip) { union power_supply_propval pval = {0, }; + int rc; if (!chip->usb_psy) chip->usb_psy = power_supply_get_by_name("usb"); - if (chip->usb_psy) - power_supply_get_property(chip->usb_psy, - POWER_SUPPLY_PROP_PRESENT, &pval); - else + if (!chip->usb_psy) + return false; + + rc = power_supply_get_property(chip->usb_psy, + POWER_SUPPLY_PROP_PRESENT, &pval); + if (rc < 0) return false; return pval.intval != 0; @@ -122,14 +125,17 @@ static bool is_usb_present(struct fg_chip *chip) static bool is_dc_present(struct fg_chip *chip) { union power_supply_propval pval = {0, }; + int rc; if (!chip->dc_psy) chip->dc_psy = power_supply_get_by_name("dc"); - if (chip->dc_psy) - power_supply_get_property(chip->dc_psy, - POWER_SUPPLY_PROP_PRESENT, &pval); - else + if (!chip->dc_psy) + return false; + + rc = power_supply_get_property(chip->dc_psy, + POWER_SUPPLY_PROP_PRESENT, &pval); + if (rc < 0) return false; return pval.intval != 0; @@ -140,6 +146,25 @@ bool is_input_present(struct fg_chip *chip) return is_usb_present(chip) || is_dc_present(chip); } +bool is_qnovo_en(struct fg_chip *chip) +{ + union power_supply_propval pval = {0, }; + int rc; + + if (!chip->batt_psy) + chip->batt_psy = power_supply_get_by_name("battery"); + + if (!chip->batt_psy) + return false; + + rc = power_supply_get_property(chip->batt_psy, + POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE, &pval); + if (rc < 0) + return false; + + return pval.intval != 0; +} + #define EXPONENT_SHIFT 11 #define EXPONENT_OFFSET -9 #define MANTISSA_SIGN_BIT 10 diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c index 4416f1dd3f2f..606d9fba8f30 100644 --- a/drivers/power/supply/qcom/qpnp-fg-gen3.c +++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c @@ -1402,6 +1402,7 @@ static int fg_cap_learning_done(struct fg_chip *chip) static void fg_cap_learning_update(struct fg_chip *chip) { int rc, batt_soc, batt_soc_msb; + bool input_present = is_input_present(chip); mutex_lock(&chip->cl.lock); @@ -1442,11 +1443,29 @@ static void fg_cap_learning_update(struct fg_chip *chip) chip->cl.init_cc_uah = 0; } + if (chip->charge_status == POWER_SUPPLY_STATUS_DISCHARGING) { + if (!input_present) { + fg_dbg(chip, FG_CAP_LEARN, "Capacity learning aborted @ battery SOC %d\n", + batt_soc_msb); + chip->cl.active = false; + chip->cl.init_cc_uah = 0; + } + } + if (chip->charge_status == POWER_SUPPLY_STATUS_NOT_CHARGING) { - fg_dbg(chip, FG_CAP_LEARN, "Capacity learning aborted @ battery SOC %d\n", - batt_soc_msb); - chip->cl.active = false; - chip->cl.init_cc_uah = 0; + if (is_qnovo_en(chip) && input_present) { + /* + * Don't abort the capacity learning when qnovo + * is enabled and input is present where the + * charging status can go to "not charging" + * intermittently. + */ + } else { + fg_dbg(chip, FG_CAP_LEARN, "Capacity learning aborted @ battery SOC %d\n", + batt_soc_msb); + chip->cl.active = false; + chip->cl.init_cc_uah = 0; + } } } @@ -1981,7 +2000,7 @@ static int fg_esr_fcc_config(struct fg_chip *chip) { union power_supply_propval prop = {0, }; int rc; - bool parallel_en = false, qnovo_en = false; + bool parallel_en = false, qnovo_en; if (is_parallel_charger_available(chip)) { rc = power_supply_get_property(chip->parallel_psy, @@ -1994,10 +2013,7 @@ static int fg_esr_fcc_config(struct fg_chip *chip) parallel_en = prop.intval; } - rc = power_supply_get_property(chip->batt_psy, - POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE, &prop); - if (!rc) - qnovo_en = prop.intval; + qnovo_en = is_qnovo_en(chip); fg_dbg(chip, FG_POWER_SUPPLY, "chg_sts: %d par_en: %d qnov_en: %d esr_fcc_ctrl_en: %d\n", chip->charge_status, parallel_en, qnovo_en, -- GitLab From ecc0602554d7d544e6b3d0e30762a0f84a01483b Mon Sep 17 00:00:00 2001 From: Subbaraman Narayanamurthy Date: Tue, 4 Apr 2017 16:10:35 -0700 Subject: [PATCH 711/786] qpnp-fg-gen3: Move getting battery id and profile to profile_load_work Workaround to re-enable BMD while getting battery id was required when driver is probing. This was done to mitigate driver probe failures because of failed SRAM access. Getting battery_id from RR_ADC driver itself takes ~200ms and re-enabling BMD takes 200ms which increases driver probe time. With the recent changes in reading batt_id channel from RR_ADC driver, re-enabling BMD is not required anymore when the driver probes. Hence move getting battery_id and battery profile to profile_load_work. This way, there won't be any delay when the driver probes. CRs-Fixed: 2062261 Change-Id: Ifeb361e0f82fca65f9570fd1f425c7360445d01f Signed-off-by: Subbaraman Narayanamurthy --- drivers/power/supply/qcom/fg-core.h | 6 +- drivers/power/supply/qcom/qpnp-fg-gen3.c | 90 +++++++++++++++--------- 2 files changed, 63 insertions(+), 33 deletions(-) diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h index 526d93f9a801..cdd09dd13612 100644 --- a/drivers/power/supply/qcom/fg-core.h +++ b/drivers/power/supply/qcom/fg-core.h @@ -51,9 +51,12 @@ #define PROFILE_LOAD "fg_profile_load" #define DELTA_SOC "fg_delta_soc" -/* Delta BSOC votable reasons */ +/* Delta BSOC irq votable reasons */ #define DELTA_BSOC_IRQ_VOTER "fg_delta_bsoc_irq" +/* Battery missing irq votable reasons */ +#define BATT_MISS_IRQ_VOTER "fg_batt_miss_irq" + #define DEBUG_PRINT_BUFFER_SIZE 64 /* 3 byte address + 1 space character */ #define ADDR_LEN 4 @@ -361,6 +364,7 @@ struct fg_chip { struct fg_irq_info *irqs; struct votable *awake_votable; struct votable *delta_bsoc_irq_en_votable; + struct votable *batt_miss_irq_en_votable; struct fg_sram_param *sp; struct fg_alg_flag *alg_flags; int *debug_mask; diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c index 606d9fba8f30..e5a3a077cd87 100644 --- a/drivers/power/supply/qcom/qpnp-fg-gen3.c +++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c @@ -904,6 +904,7 @@ static int fg_get_batt_id(struct fg_chip *chip) return ret; } + vote(chip->batt_miss_irq_en_votable, BATT_MISS_IRQ_VOTER, true, 0); return rc; } @@ -1103,6 +1104,25 @@ static void fg_notify_charger(struct fg_chip *chip) fg_dbg(chip, FG_STATUS, "Notified charger on float voltage and FCC\n"); } +static int fg_batt_miss_irq_en_cb(struct votable *votable, void *data, + int enable, const char *client) +{ + struct fg_chip *chip = data; + + if (!chip->irqs[BATT_MISSING_IRQ].irq) + return 0; + + if (enable) { + enable_irq(chip->irqs[BATT_MISSING_IRQ].irq); + enable_irq_wake(chip->irqs[BATT_MISSING_IRQ].irq); + } else { + disable_irq_wake(chip->irqs[BATT_MISSING_IRQ].irq); + disable_irq(chip->irqs[BATT_MISSING_IRQ].irq); + } + + return 0; +} + static int fg_delta_bsoc_irq_en_cb(struct votable *votable, void *data, int enable, const char *client) { @@ -2514,6 +2534,23 @@ static void profile_load_work(struct work_struct *work) int rc; vote(chip->awake_votable, PROFILE_LOAD, true, 0); + + rc = fg_get_batt_id(chip); + if (rc < 0) { + pr_err("Error in getting battery id, rc:%d\n", rc); + goto out; + } + + rc = fg_get_batt_profile(chip); + if (rc < 0) { + pr_warn("profile for batt_id=%dKOhms not found..using OTP, rc:%d\n", + chip->batt_id_ohms / 1000, rc); + goto out; + } + + if (!chip->profile_available) + goto out; + if (!is_profile_load_required(chip)) goto done; @@ -2578,9 +2615,9 @@ static void profile_load_work(struct work_struct *work) batt_psy_initialized(chip); fg_notify_charger(chip); chip->profile_loaded = true; - chip->soc_reporting_ready = true; fg_dbg(chip, FG_STATUS, "profile loaded successfully"); out: + chip->soc_reporting_ready = true; vote(chip->awake_votable, PROFILE_LOAD, false, 0); } @@ -3566,20 +3603,6 @@ static irqreturn_t fg_batt_missing_irq_handler(int irq, void *data) return IRQ_HANDLED; } - rc = fg_get_batt_id(chip); - if (rc < 0) { - chip->soc_reporting_ready = true; - pr_err("Error in getting battery id, rc:%d\n", rc); - return IRQ_HANDLED; - } - - rc = fg_get_batt_profile(chip); - if (rc < 0) { - chip->soc_reporting_ready = true; - pr_err("Error in getting battery profile, rc:%d\n", rc); - return IRQ_HANDLED; - } - clear_battery_profile(chip); schedule_delayed_work(&chip->profile_load_work, 0); @@ -4346,6 +4369,9 @@ static void fg_cleanup(struct fg_chip *chip) if (chip->delta_bsoc_irq_en_votable) destroy_votable(chip->delta_bsoc_irq_en_votable); + if (chip->batt_miss_irq_en_votable) + destroy_votable(chip->batt_miss_irq_en_votable); + if (chip->batt_id_chan) iio_channel_release(chip->batt_id_chan); @@ -4403,6 +4429,7 @@ static int fg_gen3_probe(struct platform_device *pdev) chip); if (IS_ERR(chip->awake_votable)) { rc = PTR_ERR(chip->awake_votable); + chip->awake_votable = NULL; goto exit; } @@ -4411,6 +4438,16 @@ static int fg_gen3_probe(struct platform_device *pdev) fg_delta_bsoc_irq_en_cb, chip); if (IS_ERR(chip->delta_bsoc_irq_en_votable)) { rc = PTR_ERR(chip->delta_bsoc_irq_en_votable); + chip->delta_bsoc_irq_en_votable = NULL; + goto exit; + } + + chip->batt_miss_irq_en_votable = create_votable("FG_BATT_MISS_IRQ", + VOTE_SET_ANY, + fg_batt_miss_irq_en_cb, chip); + if (IS_ERR(chip->batt_miss_irq_en_votable)) { + rc = PTR_ERR(chip->batt_miss_irq_en_votable); + chip->batt_miss_irq_en_votable = NULL; goto exit; } @@ -4435,19 +4472,6 @@ static int fg_gen3_probe(struct platform_device *pdev) INIT_DELAYED_WORK(&chip->batt_avg_work, batt_avg_work); INIT_DELAYED_WORK(&chip->sram_dump_work, sram_dump_work); - rc = fg_get_batt_id(chip); - if (rc < 0) { - pr_err("Error in getting battery id, rc:%d\n", rc); - goto exit; - } - - rc = fg_get_batt_profile(chip); - if (rc < 0) { - chip->soc_reporting_ready = true; - pr_warn("profile for batt_id=%dKOhms not found..using OTP, rc:%d\n", - chip->batt_id_ohms / 1000, rc); - } - rc = fg_memif_init(chip); if (rc < 0) { dev_err(chip->dev, "Error in initializing FG_MEMIF, rc:%d\n", @@ -4491,13 +4515,16 @@ static int fg_gen3_probe(struct platform_device *pdev) goto exit; } - /* Keep SOC_UPDATE irq disabled until we require it */ + /* Keep SOC_UPDATE_IRQ disabled until we require it */ if (fg_irqs[SOC_UPDATE_IRQ].irq) disable_irq_nosync(fg_irqs[SOC_UPDATE_IRQ].irq); - /* Keep BSOC_DELTA_IRQ irq disabled until we require it */ + /* Keep BSOC_DELTA_IRQ disabled until we require it */ vote(chip->delta_bsoc_irq_en_votable, DELTA_BSOC_IRQ_VOTER, false, 0); + /* Keep BATT_MISSING_IRQ disabled until we require it */ + vote(chip->batt_miss_irq_en_votable, BATT_MISS_IRQ_VOTER, false, 0); + rc = fg_debugfs_create(chip); if (rc < 0) { dev_err(chip->dev, "Error in creating debugfs entries, rc:%d\n", @@ -4521,8 +4548,7 @@ static int fg_gen3_probe(struct platform_device *pdev) } device_init_wakeup(chip->dev, true); - if (chip->profile_available) - schedule_delayed_work(&chip->profile_load_work, 0); + schedule_delayed_work(&chip->profile_load_work, 0); pr_debug("FG GEN3 driver probed successfully\n"); return 0; -- GitLab From 4e56cab09239ea633cb53b38ea42c04c731f9a08 Mon Sep 17 00:00:00 2001 From: Abhijeet Dharmapurikar Date: Wed, 5 Jul 2017 14:00:52 -0700 Subject: [PATCH 712/786] power: power_supply_sysfs: Add support for Floating charger Add support to register a Floating type charger. Change-Id: Ib65eff52a42d639a90f162488337a554deab4bfa Signed-off-by: Ashay Jaiswal Signed-off-by: Abhijeet Dharmapurikar --- drivers/power/supply/power_supply_sysfs.c | 2 +- include/linux/power_supply.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index 8c43c4e0fa56..08e1505c4c0a 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -46,7 +46,7 @@ static ssize_t power_supply_show_property(struct device *dev, static char *type_text[] = { "Unknown", "Battery", "UPS", "Mains", "USB", "USB_DCP", "USB_CDP", "USB_ACA", "USB_HVDCP", "USB_HVDCP_3", "USB_PD", - "Wireless", "BMS", "Parallel", "Main", "Wipower", + "Wireless", "USB_FLOAT", "BMS", "Parallel", "Main", "Wipower", "TYPEC", "TYPEC_UFP", "TYPEC_DFP" }; static char *status_text[] = { diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 438157082539..8e7a4317df26 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -271,6 +271,7 @@ enum power_supply_type { POWER_SUPPLY_TYPE_USB_HVDCP_3, /* Efficient High Voltage DCP */ POWER_SUPPLY_TYPE_USB_PD, /* Power Delivery */ POWER_SUPPLY_TYPE_WIRELESS, /* Accessory Charger Adapters */ + POWER_SUPPLY_TYPE_USB_FLOAT, /* Floating charger */ POWER_SUPPLY_TYPE_BMS, /* Battery Monitor System */ POWER_SUPPLY_TYPE_PARALLEL, /* Parallel Path */ POWER_SUPPLY_TYPE_MAIN, /* Main Path */ -- GitLab From b9b2d2f87911556e6036bc1106a20f981f3a887c Mon Sep 17 00:00:00 2001 From: Ashay Jaiswal Date: Wed, 21 Jun 2017 12:08:38 +0530 Subject: [PATCH 713/786] smb-lib: add support for float charger reporting Report adapter type as USB_FLOAT if charger hardware detects a floating charger this will allow USB driver to try/start communication over Tx/Rx line during compliance testing. Change-Id: I1d1e91e99806423a3e995876e9db4582ed0f1b08 Signed-off-by: Ashay Jaiswal --- drivers/power/supply/qcom/smb-lib.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c index 53f56f7aa847..b5f66b17dca0 100644 --- a/drivers/power/supply/qcom/smb-lib.c +++ b/drivers/power/supply/qcom/smb-lib.c @@ -260,7 +260,7 @@ static const struct apsd_result const smblib_apsd_results[] = { [FLOAT] = { .name = "FLOAT", .bit = FLOAT_CHARGER_BIT, - .pst = POWER_SUPPLY_TYPE_USB_DCP + .pst = POWER_SUPPLY_TYPE_USB_FLOAT }, [HVDCP2] = { .name = "HVDCP2", @@ -3446,6 +3446,7 @@ static void smblib_force_legacy_icl(struct smb_charger *chg, int pst) vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 1500000); break; case POWER_SUPPLY_TYPE_USB_DCP: + case POWER_SUPPLY_TYPE_USB_FLOAT: vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 1500000); break; case POWER_SUPPLY_TYPE_USB_HVDCP: -- GitLab From 36feefef496cb797026fe7c48c4c953f666a73cc Mon Sep 17 00:00:00 2001 From: Anirudh Ghayal Date: Fri, 26 May 2017 09:41:25 +0530 Subject: [PATCH 714/786] power: smb-lib: Rearrange BOOST_BACK voting logic The BOOST_BACK USB-ICL voter needs to be removed in the following conditions - 1. VBUS falling path during PD hard-reset 2. typeC removal 3. False boost-back detected For (1) and (2) - remove the boost_back vote in the usbin_handler and typec_removal path. For (3) add a worker which removes the boost_back vote after the boost-back condition is detected. The delay is sufficient to recover from both a valid and an incorrectly detected boost-back condition. CRs-Fixed: 2051908 Change-Id: I9d1d04f392bb6040b0565510ff7d1032bb036de2 Signed-off-by: Anirudh Ghayal --- drivers/power/supply/qcom/smb-lib.c | 49 +++++++++++++++++++++++++++-- drivers/power/supply/qcom/smb-lib.h | 1 + 2 files changed, 48 insertions(+), 2 deletions(-) diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c index b5f66b17dca0..06429e8fc9c8 100644 --- a/drivers/power/supply/qcom/smb-lib.c +++ b/drivers/power/supply/qcom/smb-lib.c @@ -631,6 +631,17 @@ static void smblib_uusb_removal(struct smb_charger *chg) int rc; cancel_delayed_work_sync(&chg->pl_enable_work); + + if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) { + smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n"); + rc = regulator_disable(chg->dpdm_reg); + if (rc < 0) + smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n", + rc); + } + + if (chg->wa_flags & BOOST_BACK_WA) + vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0); vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0); vote(chg->awake_votable, PL_DELAY_VOTER, false, 0); @@ -3142,10 +3153,13 @@ void smblib_usb_plugin_hard_reset_locked(struct smb_charger *chg) vbus_rising = (bool)(stat & USBIN_PLUGIN_RT_STS_BIT); - if (vbus_rising) + if (vbus_rising) { smblib_cc2_sink_removal_exit(chg); - else + } else { smblib_cc2_sink_removal_enter(chg); + if (chg->wa_flags & BOOST_BACK_WA) + vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0); + } power_supply_changed(chg->usb_psy); smblib_dbg(chg, PR_INTERRUPT, "IRQ: usbin-plugin %s\n", @@ -3582,6 +3596,17 @@ static void smblib_handle_typec_removal(struct smb_charger *chg) chg->cc2_detach_wa_active = false; + if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) { + smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n"); + rc = regulator_disable(chg->dpdm_reg); + if (rc < 0) + smblib_err(chg, "Couldn't disable dpdm regulator rc=%d\n", + rc); + } + + if (chg->wa_flags & BOOST_BACK_WA) + vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0); + /* reset APSD voters */ vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, false, 0); vote(chg->apsd_disable_votable, PD_VOTER, false, 0); @@ -3802,6 +3827,16 @@ irqreturn_t smblib_handle_high_duty_cycle(int irq, void *data) return IRQ_HANDLED; } +static void smblib_bb_removal_work(struct work_struct *work) +{ + struct smb_charger *chg = container_of(work, struct smb_charger, + bb_removal_work.work); + + vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0); + vote(chg->awake_votable, BOOST_BACK_VOTER, false, 0); +} + +#define BOOST_BACK_UNVOTE_DELAY_MS 750 irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data) { struct smb_irq_data *irq_data = data; @@ -3829,6 +3864,14 @@ irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data) if (is_storming(&irq_data->storm_data)) { smblib_err(chg, "Reverse boost detected: voting 0mA to suspend input\n"); vote(chg->usb_icl_votable, BOOST_BACK_VOTER, true, 0); + vote(chg->awake_votable, BOOST_BACK_VOTER, true, 0); + /* + * Remove the boost-back vote after a delay, to avoid + * permanently suspending the input if the boost-back condition + * is unintentionally hit. + */ + schedule_delayed_work(&chg->bb_removal_work, + msecs_to_jiffies(BOOST_BACK_UNVOTE_DELAY_MS)); } return IRQ_HANDLED; @@ -4483,6 +4526,7 @@ int smblib_init(struct smb_charger *chg) INIT_DELAYED_WORK(&chg->pl_enable_work, smblib_pl_enable_work); INIT_WORK(&chg->legacy_detection_work, smblib_legacy_detection_work); INIT_DELAYED_WORK(&chg->uusb_otg_work, smblib_uusb_otg_work); + INIT_DELAYED_WORK(&chg->bb_removal_work, smblib_bb_removal_work); chg->fake_capacity = -EINVAL; chg->fake_input_current_limited = -EINVAL; @@ -4538,6 +4582,7 @@ int smblib_deinit(struct smb_charger *chg) cancel_delayed_work_sync(&chg->pl_enable_work); cancel_work_sync(&chg->legacy_detection_work); cancel_delayed_work_sync(&chg->uusb_otg_work); + cancel_delayed_work_sync(&chg->bb_removal_work); power_supply_unreg_notifier(&chg->nb); smblib_destroy_votables(chg); qcom_batt_deinit(); diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h index f39f2c9d0a3b..81db1c92efe2 100644 --- a/drivers/power/supply/qcom/smb-lib.h +++ b/drivers/power/supply/qcom/smb-lib.h @@ -292,6 +292,7 @@ struct smb_charger { struct delayed_work pl_enable_work; struct work_struct legacy_detection_work; struct delayed_work uusb_otg_work; + struct delayed_work bb_removal_work; /* cached status */ int voltage_min_uv; -- GitLab From 55381219437742c6cc595b9de0ab4875a6f9ffad Mon Sep 17 00:00:00 2001 From: Praneeth Paladugu Date: Wed, 5 Jul 2017 15:02:44 -0700 Subject: [PATCH 715/786] ARM: dts: msm: Update Venus clock values on SDM845 v2 Update the Venus clock frequency entries to reflect the supported clock levels on SDM845 V2. Change-Id: I6c20f0aa5dd088aea497ddd07841e303c13fc916 CRs-Fixed: 2012520 Signed-off-by: Praneeth Paladugu --- arch/arm64/boot/dts/qcom/sdm845-v2.dtsi | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi index bf72741c492a..95ee14ce28e2 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-v2.dtsi @@ -36,3 +36,8 @@ &clock_videocc { compatible = "qcom,video_cc-sdm845-v2"; }; + +&msm_vidc { + qcom,allowed-clock-rates = <100000000 200000000 330000000 + 404000000 444000000 533000000>; +}; -- GitLab From 81e181ef8484d3423473b8f3e602ba15420692a7 Mon Sep 17 00:00:00 2001 From: Deepak Katragadda Date: Wed, 5 Jul 2017 15:06:25 -0700 Subject: [PATCH 716/786] clk: qcom: clk-cpu-osm: Cleanup the OSM clock driver Since cpufreq registration and callbacks have been integrated in the OSM clock driver, some of the functions in the driver are redundant. Remove these. Change-Id: I882ed151dbd0e0e603072270e2038dc7ebb1e346 Signed-off-by: Deepak Katragadda --- drivers/clk/qcom/clk-cpu-osm.c | 103 +++------------------------------ 1 file changed, 8 insertions(+), 95 deletions(-) diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c index 78e0ae52983c..f12f03dad72d 100644 --- a/drivers/clk/qcom/clk-cpu-osm.c +++ b/drivers/clk/qcom/clk-cpu-osm.c @@ -573,55 +573,11 @@ static int clk_osm_enable(struct clk_hw *hw) } const struct clk_ops clk_ops_cpu_osm = { - .enable = clk_osm_enable, .round_rate = clk_osm_round_rate, .list_rate = clk_osm_list_rate, .debug_init = clk_debug_measure_add, }; -static struct clk_ops clk_ops_core; - -static int cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) -{ - struct clk_osm *cpuclk = to_clk_osm(hw); - struct clk_hw *p_hw = clk_hw_get_parent(hw); - struct clk_osm *parent = to_clk_osm(p_hw); - int index = 0; - unsigned long r_rate; - - if (!cpuclk || !parent) - return -EINVAL; - - r_rate = clk_osm_round_rate(p_hw, rate, NULL); - - if (rate != r_rate) { - pr_err("invalid requested rate=%ld\n", rate); - return -EINVAL; - } - - /* Convert rate to table index */ - index = clk_osm_search_table(parent->osm_table, - parent->num_entries, r_rate); - if (index < 0) { - pr_err("cannot set %s to %lu\n", clk_hw_get_name(hw), rate); - return -EINVAL; - } - pr_debug("rate: %lu --> index %d\n", rate, index); - /* - * Choose index and send request to OSM hardware. - * TODO: Program INACTIVE_OS_REQUEST if needed. - */ - clk_osm_write_reg(parent, index, - DCVS_PERF_STATE_DESIRED_REG(cpuclk->core_num), - OSM_BASE); - - /* Make sure the write goes through before proceeding */ - clk_osm_mb(parent, OSM_BASE); - - return 0; -} - static int l3_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { @@ -657,38 +613,6 @@ static int l3_clk_set_rate(struct clk_hw *hw, unsigned long rate, return 0; } -static long cpu_clk_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) -{ - struct clk_hw *parent_hw = clk_hw_get_parent(hw); - - if (!parent_hw) - return -EINVAL; - - return clk_hw_round_rate(parent_hw, rate); -} - -static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) -{ - struct clk_osm *cpuclk = to_clk_osm(hw); - struct clk_hw *p_hw = clk_hw_get_parent(hw); - struct clk_osm *parent = to_clk_osm(p_hw); - int index = 0; - - if (!cpuclk || !parent) - return -EINVAL; - - index = clk_osm_read_reg(parent, - DCVS_PERF_STATE_DESIRED_REG(cpuclk->core_num)); - - pr_debug("%s: Index %d, freq %ld\n", __func__, index, - parent->osm_table[index].frequency); - - /* Convert index to frequency */ - return parent->osm_table[index].frequency; -} - static unsigned long l3_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { @@ -759,7 +683,7 @@ static struct clk_osm cpu0_pwrcl_clk = { .name = "cpu0_pwrcl_clk", .parent_names = (const char *[]){ "pwrcl_clk" }, .num_parents = 1, - .ops = &clk_ops_core, + .ops = &clk_dummy_ops, }, }; @@ -771,8 +695,7 @@ static struct clk_osm cpu1_pwrcl_clk = { .name = "cpu1_pwrcl_clk", .parent_names = (const char *[]){ "pwrcl_clk" }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_ops_core, + .ops = &clk_dummy_ops, }, }; @@ -784,8 +707,7 @@ static struct clk_osm cpu2_pwrcl_clk = { .name = "cpu2_pwrcl_clk", .parent_names = (const char *[]){ "pwrcl_clk" }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_ops_core, + .ops = &clk_dummy_ops, }, }; @@ -797,8 +719,7 @@ static struct clk_osm cpu3_pwrcl_clk = { .name = "cpu3_pwrcl_clk", .parent_names = (const char *[]){ "pwrcl_clk" }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_ops_core, + .ops = &clk_dummy_ops, }, }; @@ -816,7 +737,7 @@ static struct clk_osm cpu4_perfcl_clk = { .name = "cpu4_perfcl_clk", .parent_names = (const char *[]){ "perfcl_clk" }, .num_parents = 1, - .ops = &clk_ops_core, + .ops = &clk_dummy_ops, }, }; @@ -828,8 +749,7 @@ static struct clk_osm cpu5_perfcl_clk = { .name = "cpu5_perfcl_clk", .parent_names = (const char *[]){ "perfcl_clk" }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_ops_core, + .ops = &clk_dummy_ops, }, }; @@ -841,8 +761,7 @@ static struct clk_osm cpu6_perfcl_clk = { .name = "cpu6_perfcl_clk", .parent_names = (const char *[]){ "perfcl_clk" }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_ops_core, + .ops = &clk_dummy_ops, }, }; @@ -854,8 +773,7 @@ static struct clk_osm cpu7_perfcl_clk = { .name = "cpu7_perfcl_clk", .parent_names = (const char *[]){ "perfcl_clk" }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT, - .ops = &clk_ops_core, + .ops = &clk_dummy_ops, }, }; @@ -3348,11 +3266,6 @@ static int clk_cpu_osm_driver_probe(struct platform_device *pdev) clk_osm_write_reg(&perfcl_clk, val, CORE_DCVS_CTRL, OSM_BASE); } - clk_ops_core = clk_dummy_ops; - clk_ops_core.set_rate = cpu_clk_set_rate; - clk_ops_core.round_rate = cpu_clk_round_rate; - clk_ops_core.recalc_rate = cpu_clk_recalc_rate; - rc = clk_osm_acd_init(&l3_clk); if (rc) { pr_err("failed to initialize ACD for L3, rc=%d\n", rc); -- GitLab From 9a33914e3bfbda667e6503a2598da3b311014c85 Mon Sep 17 00:00:00 2001 From: David Collins Date: Thu, 20 Apr 2017 16:34:46 -0700 Subject: [PATCH 717/786] spmi: spmi-pmic-arb-debug: add clock management support Add support to enable and disable the clock used by the SPMI PMIC arbiter debug bus. This is needed to avoid unclocked accesses. Change-Id: If9eee1317a88c143452d8b46b89aff89d1e956b7 Signed-off-by: David Collins --- .../spmi/qcom,spmi-pmic-arb-debug.txt | 15 +++++++++++ drivers/spmi/spmi-pmic-arb-debug.c | 27 +++++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt index ceac719878e7..2131c33237f0 100644 --- a/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt +++ b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt @@ -35,6 +35,19 @@ Supported Properties: the corresponding addresses are specified in the reg property. +- clocks + Usage: optional + Value type: + Definition: Clock tuple consisting of a phandle to a clock controller + device and the clock ID number for the SPMI debug controller + clock. + +- clock-names + Usage: required if clocks property is specified + Value type: + Definition: Defines the name of the clock defined in the "clocks" + property. This must be "core_clk". + - #address-cells Usage: required Value type: @@ -57,6 +70,8 @@ qcom,spmi-debug@6b22000 { compatible = "qcom,spmi-pmic-arb-debug"; reg = <0x6b22000 0x60>, <0x7820A8 4>; reg-names = "core", "fuse"; + clocks = <&clock_aop QDSS_CLK>; + clock-names = "core_clk"; qcom,fuse-disable-bit = <12>; #address-cells = <2>; #size-cells = <0>; diff --git a/drivers/spmi/spmi-pmic-arb-debug.c b/drivers/spmi/spmi-pmic-arb-debug.c index c5a31a9d84eb..2c90bef1224f 100644 --- a/drivers/spmi/spmi-pmic-arb-debug.c +++ b/drivers/spmi/spmi-pmic-arb-debug.c @@ -11,6 +11,7 @@ * GNU General Public License for more details. */ +#include #include #include #include @@ -69,6 +70,7 @@ enum pmic_arb_cmd_op_code { struct spmi_pmic_arb_debug { void __iomem *addr; raw_spinlock_t lock; + struct clk *clock; }; static inline void pmic_arb_debug_write(struct spmi_pmic_arb_debug *pa, @@ -181,6 +183,12 @@ static int pmic_arb_debug_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, else return -EINVAL; + rc = clk_prepare_enable(pa->clock); + if (rc) { + pr_err("%s: failed to enable core clock, rc=%d\n", + __func__, rc); + return rc; + } raw_spin_lock_irqsave(&pa->lock, flags); rc = pmic_arb_debug_issue_command(ctrl, opc, sid, addr, len); @@ -192,6 +200,7 @@ static int pmic_arb_debug_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, buf[i] = pmic_arb_debug_read(pa, PMIC_ARB_DEBUG_RDATA(i)); done: raw_spin_unlock_irqrestore(&pa->lock, flags); + clk_disable_unprepare(pa->clock); return rc; } @@ -221,6 +230,12 @@ static int pmic_arb_debug_write_cmd(struct spmi_controller *ctrl, u8 opc, else return -EINVAL; + rc = clk_prepare_enable(pa->clock); + if (rc) { + pr_err("%s: failed to enable core clock, rc=%d\n", + __func__, rc); + return rc; + } raw_spin_lock_irqsave(&pa->lock, flags); /* Write data to FIFO */ @@ -230,6 +245,7 @@ static int pmic_arb_debug_write_cmd(struct spmi_controller *ctrl, u8 opc, rc = pmic_arb_debug_issue_command(ctrl, opc, sid, addr, len); raw_spin_unlock_irqrestore(&pa->lock, flags); + clk_disable_unprepare(pa->clock); return rc; } @@ -293,6 +309,17 @@ static int spmi_pmic_arb_debug_probe(struct platform_device *pdev) goto err_put_ctrl; } + if (of_find_property(pdev->dev.of_node, "clock-names", NULL)) { + pa->clock = devm_clk_get(&pdev->dev, "core_clk"); + if (IS_ERR(pa->clock)) { + rc = PTR_ERR(pa->clock); + if (rc != -EPROBE_DEFER) + dev_err(&pdev->dev, "unable to request core clock, rc=%d\n", + rc); + goto err_put_ctrl; + } + } + platform_set_drvdata(pdev, ctrl); raw_spin_lock_init(&pa->lock); -- GitLab From af9978ff9f081df473745e336ee15dc87e42e2ea Mon Sep 17 00:00:00 2001 From: David Collins Date: Thu, 8 Jun 2017 17:09:12 -0700 Subject: [PATCH 718/786] mfd: qcom-spmi-pmic: add support for slow SPMI busses Add device tree support for configuring if mutexes or spinlocks should be used in the regmap configuration (i.e. the fast_io element value). This ensures that qcom-spmi-pmic slave devices can be used with SPMI busses that must operate in process context. Change-Id: I3abdee36935457db497ce6ff2a242755fc3aff90 Signed-off-by: David Collins --- .../devicetree/bindings/mfd/qcom,spmi-pmic.txt | 2 ++ drivers/mfd/qcom-spmi-pmic.c | 15 +++++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt index 6ac06c1b9aec..5b6bd970c95e 100644 --- a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt +++ b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.txt @@ -42,6 +42,8 @@ Optional properties for peripheral child nodes: see: Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt - interrupt-names: Corresponding interrupt name to the interrupts property +- qcom,can-sleep: Boolean flag indicating that processes waiting on SPMI + transactions may sleep Each child node of SPMI slave id represents a function of the PMIC. In the example below the rtc device node represents a peripheral of pm8941 diff --git a/drivers/mfd/qcom-spmi-pmic.c b/drivers/mfd/qcom-spmi-pmic.c index 228077097a17..09775637592f 100644 --- a/drivers/mfd/qcom-spmi-pmic.c +++ b/drivers/mfd/qcom-spmi-pmic.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -118,12 +118,23 @@ static const struct regmap_config spmi_regmap_config = { .fast_io = true, }; +static const struct regmap_config spmi_regmap_can_sleep_config = { + .reg_bits = 16, + .val_bits = 8, + .max_register = 0xffff, + .fast_io = false, +}; + static int pmic_spmi_probe(struct spmi_device *sdev) { struct device_node *root = sdev->dev.of_node; struct regmap *regmap; - regmap = devm_regmap_init_spmi_ext(sdev, &spmi_regmap_config); + if (of_property_read_bool(root, "qcom,can-sleep")) + regmap = devm_regmap_init_spmi_ext(sdev, + &spmi_regmap_can_sleep_config); + else + regmap = devm_regmap_init_spmi_ext(sdev, &spmi_regmap_config); if (IS_ERR(regmap)) return PTR_ERR(regmap); -- GitLab From db91361135ee774e6afa2e1b40b3ec5ea67a19e1 Mon Sep 17 00:00:00 2001 From: David Collins Date: Thu, 8 Jun 2017 17:13:28 -0700 Subject: [PATCH 719/786] ARM: dts: msm: mark SPMI debug slave devices as can-sleep for SDM845 Specify the qcom,can-sleep property for each of the slave devices on the SPMI debug bus since the debug bus cannot operate in atomic context. Change-Id: I89c059725f9dad82a9627e9c2994b211fa7099d2 Signed-off-by: David Collins --- arch/arm64/boot/dts/qcom/sdm845.dtsi | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index 5208152d3ca8..731ca7ae4f90 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -770,6 +770,7 @@ reg = <0x0 SPMI_USID>; #address-cells = <2>; #size-cells = <0>; + qcom,can-sleep; }; qcom,pm8998-debug@1 { @@ -777,6 +778,7 @@ reg = <0x1 SPMI_USID>; #address-cells = <2>; #size-cells = <0>; + qcom,can-sleep; }; qcom,pmi8998-debug@2 { @@ -784,6 +786,7 @@ reg = <0x2 SPMI_USID>; #address-cells = <2>; #size-cells = <0>; + qcom,can-sleep; }; qcom,pmi8998-debug@3 { @@ -791,6 +794,7 @@ reg = <0x3 SPMI_USID>; #address-cells = <2>; #size-cells = <0>; + qcom,can-sleep; }; qcom,pm8005-debug@4 { @@ -798,6 +802,7 @@ reg = <0x4 SPMI_USID>; #address-cells = <2>; #size-cells = <0>; + qcom,can-sleep; }; qcom,pm8005-debug@5 { @@ -805,6 +810,7 @@ reg = <0x5 SPMI_USID>; #address-cells = <2>; #size-cells = <0>; + qcom,can-sleep; }; }; -- GitLab From 42936de529dc5a566c87c12c8ce3b13ebd91753a Mon Sep 17 00:00:00 2001 From: David Collins Date: Thu, 8 Jun 2017 14:52:43 -0700 Subject: [PATCH 720/786] ARM: dts: msm: specify QDSS clock for SPMI debug bus on SDM845 The QDSS clock must be enabled in order for the SPMI debug bus to operate successfully on SDM845. Specify this clock in the SPMI debug bus device tree node so that it is enabled during SPMI transactions. Change-Id: Ia62c5ced3bcb3991566ab500750956d1a87b7997 Signed-off-by: David Collins --- arch/arm64/boot/dts/qcom/sdm845.dtsi | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index 731ca7ae4f90..78d01d0a0a48 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -760,6 +760,8 @@ compatible = "qcom,spmi-pmic-arb-debug"; reg = <0x6b22000 0x60>, <0x7820A8 4>; reg-names = "core", "fuse"; + clocks = <&clock_aop QDSS_CLK>; + clock-names = "core_clk"; qcom,fuse-disable-bit = <12>; #address-cells = <2>; #size-cells = <0>; -- GitLab From a65aafed427ef2bf2ec9486e57ccb2dbd02637c3 Mon Sep 17 00:00:00 2001 From: Vikram Mulukutla Date: Mon, 5 Jun 2017 13:37:45 -0700 Subject: [PATCH 721/786] sched: walt: Provide the most recent window_start value to cpufreq cpufreq may require rq->window_start for various reasons. Also switch the timestamp passed to the governor from sched_clock() to sched_ktime_clock() in order to ensure that cpufreq can compare the timestamp to rq->window_start. Change-Id: Icb78f8923a678f724b3e1101ef2d4248e7b2a796 Signed-off-by: Vikram Mulukutla --- kernel/sched/sched.h | 14 ++++++++++++-- kernel/sched/walt.h | 7 ------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 566e10393028..d846a66ab8eb 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1779,6 +1779,7 @@ struct sched_walt_cpu_load { unsigned long prev_window_util; unsigned long nl; unsigned long pl; + u64 ws; }; static inline unsigned long cpu_util_cum(int cpu, int delta) @@ -1828,6 +1829,7 @@ cpu_util_freq(int cpu, struct sched_walt_cpu_load *walt_load) walt_load->prev_window_util = util; walt_load->nl = nl; walt_load->pl = 0; + walt_load->ws = rq->window_start; } } #endif @@ -2207,6 +2209,15 @@ static inline u64 irq_time_read(int cpu) } #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ +#ifdef CONFIG_SCHED_WALT +u64 sched_ktime_clock(void); +#else /* CONFIG_SCHED_WALT */ +static inline u64 sched_ktime_clock(void) +{ + return 0; +} +#endif /* CONFIG_SCHED_WALT */ + #ifdef CONFIG_CPU_FREQ DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); @@ -2251,7 +2262,7 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, cpu_of(rq))); if (data) - data->func(data, sched_clock(), flags); + data->func(data, sched_ktime_clock(), flags); } static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) @@ -2336,7 +2347,6 @@ extern unsigned int __read_mostly sched_downmigrate; extern unsigned int __read_mostly sysctl_sched_spill_nr_run; extern unsigned int __read_mostly sched_load_granule; -extern u64 sched_ktime_clock(void); extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb); extern void reset_cpu_hmp_stats(int cpu, int reset_cra); extern int update_preferred_cluster(struct related_thread_group *grp, diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h index f15333259e87..887933f1aa37 100644 --- a/kernel/sched/walt.h +++ b/kernel/sched/walt.h @@ -194,8 +194,6 @@ static inline int exiting_task(struct task_struct *p) return (p->ravg.sum_history[0] == EXITING_TASK_MARKER); } -extern u64 sched_ktime_clock(void); - static inline struct sched_cluster *cpu_cluster(int cpu) { return cpu_rq(cpu)->cluster; @@ -335,11 +333,6 @@ static inline void mark_task_starting(struct task_struct *p) { } static inline void set_window_start(struct rq *rq) { } static inline int sched_cpu_high_irqload(int cpu) { return 0; } -static inline u64 sched_ktime_clock(void) -{ - return 0; -} - static inline void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock) { -- GitLab From f6686697bcdb8f5ec1c58cf5db7674bb34848678 Mon Sep 17 00:00:00 2001 From: Vikram Mulukutla Date: Tue, 6 Jun 2017 11:58:27 -0700 Subject: [PATCH 722/786] sched: cpufreq: Limit governor updates to WALT changes alone It's not necessary to keep reporting load to the governor if it doesn't change in a window. Limit updates to when we expect load changes - after window rollover and when we send updates related to intercluster migrations. Change-Id: I3232d40f3d54b0b81cfafdcdb99b534df79327bf Signed-off-by: Vikram Mulukutla --- include/linux/sched.h | 1 + kernel/sched/sched.h | 4 +++- kernel/sched/walt.c | 8 +++++--- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 9e7ab0584f04..03447214a769 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -3905,6 +3905,7 @@ static inline unsigned long rlimit_max(unsigned int limit) #define SCHED_CPUFREQ_DL (1U << 1) #define SCHED_CPUFREQ_IOWAIT (1U << 2) #define SCHED_CPUFREQ_INTERCLUSTER_MIG (1U << 3) +#define SCHED_CPUFREQ_WALT (1U << 4) #define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d846a66ab8eb..2524954be95e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2250,8 +2250,10 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) #ifdef CONFIG_SCHED_WALT /* * Skip if we've already reported, but not if this is an inter-cluster - * migration + * migration. Also only allow WALT update sites. */ + if (!(flags & SCHED_CPUFREQ_WALT)) + return; if (!sched_disable_window_stats && (rq->load_reported_window == rq->window_start) && !(flags & SCHED_CPUFREQ_INTERCLUSTER_MIG)) diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c index 50f889b1197e..0416f4080d06 100644 --- a/kernel/sched/walt.c +++ b/kernel/sched/walt.c @@ -871,8 +871,10 @@ void fixup_busy_time(struct task_struct *p, int new_cpu) migrate_top_tasks(p, src_rq, dest_rq); if (!same_freq_domain(new_cpu, task_cpu(p))) { - cpufreq_update_util(dest_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG); - cpufreq_update_util(src_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG); + cpufreq_update_util(dest_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG | + SCHED_CPUFREQ_WALT); + cpufreq_update_util(src_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG | + SCHED_CPUFREQ_WALT); } if (p == src_rq->ed_task) { @@ -3040,7 +3042,7 @@ void walt_irq_work(struct irq_work *irq_work) for_each_sched_cluster(cluster) for_each_cpu(cpu, &cluster->cpus) - cpufreq_update_util(cpu_rq(cpu), 0); + cpufreq_update_util(cpu_rq(cpu), SCHED_CPUFREQ_WALT); for_each_cpu(cpu, cpu_possible_mask) raw_spin_unlock(&cpu_rq(cpu)->lock); -- GitLab From 9a0563b90f40f531ecbe7da0df1ab006d1ac01b9 Mon Sep 17 00:00:00 2001 From: Shrenuj Bansal Date: Thu, 15 Jun 2017 14:45:15 -0700 Subject: [PATCH 723/786] msm: kgsl: Add scm call for SMMU aperture programming For truly dynamic context bank allocation/mapping to work, we need to set the aperture register CP utilizes to the SMMU context bank we receive. This aperture register is protected so we use a new scm call for trustzone to do this for us. Change-Id: I50ba3d8f4287f3cc251bbce14db4702e1c8f7ebc Signed-off-by: Shrenuj Bansal --- drivers/gpu/msm/kgsl_iommu.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c index 73c0d71873e3..c02046aca370 100644 --- a/drivers/gpu/msm/kgsl_iommu.c +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -34,6 +34,8 @@ #include "kgsl_trace.h" #include "kgsl_pwrctrl.h" +#define CP_APERTURE_REG 0 + #define _IOMMU_PRIV(_mmu) (&((_mmu)->priv.iommu)) #define ADDR_IN_GLOBAL(_a) \ @@ -1220,6 +1222,19 @@ void _enable_gpuhtw_llc(struct kgsl_mmu *mmu, struct kgsl_iommu_pt *iommu_pt) "System cache not enabled for GPU pagetable walks: %d\n", ret); } +static int program_smmu_aperture(unsigned int cb, unsigned int aperture_reg) +{ + struct scm_desc desc = {0}; + + desc.args[0] = 0xFFFF0000 | ((aperture_reg & 0xff) << 8) | (cb & 0xff); + desc.args[1] = 0xFFFFFFFF; + desc.args[2] = 0xFFFFFFFF; + desc.args[3] = 0xFFFFFFFF; + desc.arginfo = SCM_ARGS(4); + + return scm_call2(SCM_SIP_FNID(SCM_SVC_MP, 0x1B), &desc); +} + static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) { int ret = 0; @@ -1260,6 +1275,15 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) goto done; } + if (!MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE)) { + ret = program_smmu_aperture(cb_num, CP_APERTURE_REG); + if (ret) { + pr_err("SMMU aperture programming call failed with error %d\n", + ret); + return ret; + } + } + ctx->cb_num = cb_num; ctx->regbase = iommu->regbase + KGSL_IOMMU_CB0_OFFSET + (cb_num << KGSL_IOMMU_CB_SHIFT); -- GitLab From cd18751fd8cf9d457c0b89121a05b880cce26e92 Mon Sep 17 00:00:00 2001 From: Abhinav Kumar Date: Fri, 30 Jun 2017 01:02:36 -0700 Subject: [PATCH 724/786] drm/msm: add support for parsing YUV 420 deep color Current upstream parser only handles RGB deep color modes. Add support in the SDE EDID parser module to parse HDMI VSDB block and indicate support for YUV 420 deep color modes in the sink. Change-Id: If6c007263094e7716a29cae503d3e3471ae04306 Signed-off-by: Abhinav Kumar --- drivers/gpu/drm/msm/sde_edid_parser.c | 73 +++++++++++++++++++++++++++ include/drm/drm_edid.h | 5 ++ 2 files changed, 78 insertions(+) diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c index 12165e8b99e3..1c8c3e1f92b8 100644 --- a/drivers/gpu/drm/msm/sde_edid_parser.c +++ b/drivers/gpu/drm/msm/sde_edid_parser.c @@ -92,6 +92,21 @@ for ((i) = (start); \ (i) < (end) && (i) + sde_cea_db_payload_len(&(cea)[(i)]) < (end); \ (i) += sde_cea_db_payload_len(&(cea)[(i)]) + 1) +static bool sde_cea_db_is_hdmi_hf_vsdb(const u8 *db) +{ + int hdmi_id; + + if (sde_cea_db_tag(db) != VENDOR_SPECIFIC_DATA_BLOCK) + return false; + + if (sde_cea_db_payload_len(db) < 7) + return false; + + hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16); + + return hdmi_id == HDMI_IEEE_OUI_HF; +} + static u8 *sde_edid_find_extended_tag_block(struct edid *edid, int blk_id) { u8 *db = NULL; @@ -338,6 +353,63 @@ struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl) SDE_EDID_DEBUG("%s -\n", __func__); } +static void _sde_edid_update_dc_modes( +struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl) +{ + int i, start, end; + u8 *edid_ext, *hdmi; + struct drm_display_info *disp_info; + u32 hdmi_dc_yuv_modes = 0; + + SDE_EDID_DEBUG("%s +\n", __func__); + + if (!connector || !edid_ctrl) { + SDE_ERROR("invalid input\n"); + return; + } + + disp_info = &connector->display_info; + + edid_ext = sde_find_cea_extension(edid_ctrl->edid); + + if (!edid_ext) { + SDE_ERROR("no cea extension\n"); + return; + } + + if (sde_cea_db_offsets(edid_ext, &start, &end)) + return; + + sde_for_each_cea_db(edid_ext, i, start, end) { + if (sde_cea_db_is_hdmi_hf_vsdb(&edid_ext[i])) { + + hdmi = &edid_ext[i]; + + if (sde_cea_db_payload_len(hdmi) < 7) + continue; + + if (hdmi[7] & DRM_EDID_YCBCR420_DC_30) { + hdmi_dc_yuv_modes |= DRM_EDID_YCBCR420_DC_30; + SDE_EDID_DEBUG("Y420 30-bit supported\n"); + } + + if (hdmi[7] & DRM_EDID_YCBCR420_DC_36) { + hdmi_dc_yuv_modes |= DRM_EDID_YCBCR420_DC_36; + SDE_EDID_DEBUG("Y420 36-bit supported\n"); + } + + if (hdmi[7] & DRM_EDID_YCBCR420_DC_48) { + hdmi_dc_yuv_modes |= DRM_EDID_YCBCR420_DC_36; + SDE_EDID_DEBUG("Y420 48-bit supported\n"); + } + } + } + + disp_info->edid_hdmi_dc_modes |= hdmi_dc_yuv_modes; + + SDE_EDID_DEBUG("%s -\n", __func__); +} + static void _sde_edid_extract_audio_data_blocks( struct sde_edid_ctrl *edid_ctrl) { @@ -475,6 +547,7 @@ int _sde_edid_update_modes(struct drm_connector *connector, rc = drm_add_edid_modes(connector, edid_ctrl->edid); sde_edid_set_mode_format(connector, edid_ctrl); + _sde_edid_update_dc_modes(connector, edid_ctrl); SDE_EDID_DEBUG("%s -", __func__); return rc; } diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 32bd1042e0e3..1bcf8f7dd19c 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -212,6 +212,11 @@ struct detailed_timing { #define DRM_EDID_HDMI_DC_30 (1 << 4) #define DRM_EDID_HDMI_DC_Y444 (1 << 3) +/* YCBCR 420 deep color modes */ +#define DRM_EDID_YCBCR420_DC_48 (1 << 2) +#define DRM_EDID_YCBCR420_DC_36 (1 << 1) +#define DRM_EDID_YCBCR420_DC_30 (1 << 0) + /* ELD Header Block */ #define DRM_ELD_HEADER_BLOCK_SIZE 4 -- GitLab From 1b742de7408a576b70914a978f9672f8de17744a Mon Sep 17 00:00:00 2001 From: Vikram Mulukutla Date: Mon, 19 Jun 2017 11:31:25 -0700 Subject: [PATCH 725/786] soc: qcom: ramdump: Use cdev devices instead of misc devices Misc devices are usually meant for really miscellaneous devices that cannot 'fit' in any class. Since we have quite a few subsystems requiring a ramdump nowadays, let's move to using character devices directly. Change-Id: I387fc4f3f59d61fab2ba89437bc3d3d6df24b69f Signed-off-by: Vikram Mulukutla Signed-off-by: Kyle Yan --- drivers/soc/qcom/ramdump.c | 111 ++++++++++++++++++++++++++++--------- 1 file changed, 86 insertions(+), 25 deletions(-) diff --git a/drivers/soc/qcom/ramdump.c b/drivers/soc/qcom/ramdump.c index d5b051e17e4f..dd770628a9ce 100644 --- a/drivers/soc/qcom/ramdump.c +++ b/drivers/soc/qcom/ramdump.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -24,10 +23,20 @@ #include #include #include +#include #include #include #include + +#define RAMDUMP_NUM_DEVICES 256 +#define RAMDUMP_NAME "ramdump" + +static struct class *ramdump_class; +static dev_t ramdump_dev; +static DEFINE_MUTEX(rd_minor_mutex); +static DEFINE_IDA(rd_minor_id); +static bool ramdump_devnode_inited; #define RAMDUMP_WAIT_MSECS 120000 struct ramdump_device { @@ -38,7 +47,8 @@ struct ramdump_device { int ramdump_status; struct completion ramdump_complete; - struct miscdevice device; + struct cdev cdev; + struct device *dev; wait_queue_head_t dump_wait_q; int nsegments; @@ -51,17 +61,19 @@ struct ramdump_device { static int ramdump_open(struct inode *inode, struct file *filep) { - struct ramdump_device *rd_dev = container_of(filep->private_data, - struct ramdump_device, device); + struct ramdump_device *rd_dev = container_of(inode->i_cdev, + struct ramdump_device, cdev); rd_dev->consumer_present = 1; rd_dev->ramdump_status = 0; + filep->private_data = rd_dev; return 0; } static int ramdump_release(struct inode *inode, struct file *filep) { - struct ramdump_device *rd_dev = container_of(filep->private_data, - struct ramdump_device, device); + + struct ramdump_device *rd_dev = container_of(inode->i_cdev, + struct ramdump_device, cdev); rd_dev->consumer_present = 0; rd_dev->data_ready = 0; complete(&rd_dev->ramdump_complete); @@ -105,8 +117,7 @@ static unsigned long offset_translate(loff_t user_offset, static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count, loff_t *pos) { - struct ramdump_device *rd_dev = container_of(filep->private_data, - struct ramdump_device, device); + struct ramdump_device *rd_dev = filep->private_data; void *device_mem = NULL, *origdevice_mem = NULL, *vaddr = NULL; unsigned long data_left = 0, bytes_before, bytes_after; unsigned long addr = 0; @@ -154,7 +165,7 @@ static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count, rd_dev->attrs = 0; rd_dev->attrs |= DMA_ATTR_SKIP_ZEROING; - device_mem = vaddr ?: dma_remap(rd_dev->device.parent, NULL, addr, + device_mem = vaddr ?: dma_remap(rd_dev->dev->parent, NULL, addr, copy_size, rd_dev->attrs); origdevice_mem = device_mem; @@ -206,7 +217,7 @@ static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count, kfree(finalbuf); if (!vaddr && origdevice_mem) - dma_unremap(rd_dev->device.parent, origdevice_mem, copy_size); + dma_unremap(rd_dev->dev->parent, origdevice_mem, copy_size); *pos += copy_size; @@ -217,7 +228,7 @@ static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count, ramdump_done: if (!vaddr && origdevice_mem) - dma_unremap(rd_dev->device.parent, origdevice_mem, copy_size); + dma_unremap(rd_dev->dev->parent, origdevice_mem, copy_size); kfree(finalbuf); rd_dev->data_ready = 0; @@ -229,8 +240,7 @@ static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count, static unsigned int ramdump_poll(struct file *filep, struct poll_table_struct *wait) { - struct ramdump_device *rd_dev = container_of(filep->private_data, - struct ramdump_device, device); + struct ramdump_device *rd_dev = filep->private_data; unsigned int mask = 0; if (rd_dev->data_ready) @@ -247,9 +257,26 @@ static const struct file_operations ramdump_file_ops = { .poll = ramdump_poll }; -void *create_ramdump_device(const char *dev_name, struct device *parent) +static int ramdump_devnode_init(void) { int ret; + + ramdump_class = class_create(THIS_MODULE, RAMDUMP_NAME); + ret = alloc_chrdev_region(&ramdump_dev, 0, RAMDUMP_NUM_DEVICES, + RAMDUMP_NAME); + if (ret < 0) { + pr_warn("%s: unable to allocate major\n", __func__); + return ret; + } + + ramdump_devnode_inited = true; + + return 0; +} + +void *create_ramdump_device(const char *dev_name, struct device *parent) +{ + int ret, minor; struct ramdump_device *rd_dev; if (!dev_name) { @@ -257,6 +284,14 @@ void *create_ramdump_device(const char *dev_name, struct device *parent) return NULL; } + mutex_lock(&rd_minor_mutex); + if (!ramdump_devnode_inited) { + ret = ramdump_devnode_init(); + if (ret) + return ERR_PTR(ret); + } + mutex_unlock(&rd_minor_mutex); + rd_dev = kzalloc(sizeof(struct ramdump_device), GFP_KERNEL); if (!rd_dev) { @@ -265,15 +300,20 @@ void *create_ramdump_device(const char *dev_name, struct device *parent) return NULL; } + /* get a minor number */ + minor = ida_simple_get(&rd_minor_id, 0, RAMDUMP_NUM_DEVICES, + GFP_KERNEL); + if (minor < 0) { + pr_err("%s: No more minor numbers left! rc:%d\n", __func__, + minor); + ret = -ENODEV; + goto fail_out_of_minors; + } + snprintf(rd_dev->name, ARRAY_SIZE(rd_dev->name), "ramdump_%s", dev_name); init_completion(&rd_dev->ramdump_complete); - - rd_dev->device.minor = MISC_DYNAMIC_MINOR; - rd_dev->device.name = rd_dev->name; - rd_dev->device.fops = &ramdump_file_ops; - rd_dev->device.parent = parent; if (parent) { rd_dev->complete_ramdump = of_property_read_bool( parent->of_node, "qcom,complete-ramdump"); @@ -284,27 +324,48 @@ void *create_ramdump_device(const char *dev_name, struct device *parent) init_waitqueue_head(&rd_dev->dump_wait_q); - ret = misc_register(&rd_dev->device); + rd_dev->dev = device_create(ramdump_class, parent, + MKDEV(MAJOR(ramdump_dev), minor), + rd_dev, rd_dev->name); + if (IS_ERR(rd_dev->dev)) { + ret = PTR_ERR(rd_dev->dev); + pr_err("%s: device_create failed for %s (%d)", __func__, + dev_name, ret); + goto fail_return_minor; + } + + cdev_init(&rd_dev->cdev, &ramdump_file_ops); - if (ret) { - pr_err("%s: misc_register failed for %s (%d)", __func__, + ret = cdev_add(&rd_dev->cdev, MKDEV(MAJOR(ramdump_dev), minor), 1); + if (ret < 0) { + pr_err("%s: cdev_add failed for %s (%d)", __func__, dev_name, ret); - kfree(rd_dev); - return NULL; + goto fail_cdev_add; } return (void *)rd_dev; + +fail_cdev_add: + device_unregister(rd_dev->dev); +fail_return_minor: + ida_simple_remove(&rd_minor_id, minor); +fail_out_of_minors: + kfree(rd_dev); + return ERR_PTR(ret); } EXPORT_SYMBOL(create_ramdump_device); void destroy_ramdump_device(void *dev) { struct ramdump_device *rd_dev = dev; + int minor = MINOR(rd_dev->cdev.dev); if (IS_ERR_OR_NULL(rd_dev)) return; - misc_deregister(&rd_dev->device); + cdev_del(&rd_dev->cdev); + device_unregister(rd_dev->dev); + ida_simple_remove(&rd_minor_id, minor); kfree(rd_dev); } EXPORT_SYMBOL(destroy_ramdump_device); -- GitLab From 249679d8ecec9c8ba109ea7bc70cd56ac6cc6509 Mon Sep 17 00:00:00 2001 From: Praneeth Paladugu Date: Mon, 3 Jul 2017 13:35:10 -0700 Subject: [PATCH 726/786] msm: vidc: Fine tune PMS with on-target profiling In Performance Monitoring System, before making clock decision always follow the below order. - If client is starving for buffers, Increase clock. - If HW pending count is less, Decrease clock. - If none of above, vote for required clock. Along with this, sanitize CF values from HW for better PMS algo. CRs-Fixed: 2012520 Change-Id: I31822918013aebe60d7fdac9f91d4cca7bc92345 Signed-off-by: Praneeth Paladugu --- .../media/platform/msm/vidc/msm_vidc_clocks.c | 52 ++++++++++++++----- 1 file changed, 40 insertions(+), 12 deletions(-) diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c index cb3c526ec172..86dc973ea11c 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c @@ -16,8 +16,8 @@ #include "msm_vidc_debug.h" #include "msm_vidc_clocks.h" -#define MSM_VIDC_MIN_UBWC_COMPLEXITY_FACTOR 1 -#define MSM_VIDC_MAX_UBWC_COMPLEXITY_FACTOR 4 +#define MSM_VIDC_MIN_UBWC_COMPLEXITY_FACTOR (1 << 16) +#define MSM_VIDC_MAX_UBWC_COMPLEXITY_FACTOR (4 << 16) static inline unsigned long int get_ubwc_compression_ratio( struct ubwc_cr_stats_info_type ubwc_stats_info) @@ -104,8 +104,12 @@ static int fill_recon_stats(struct msm_vidc_inst *inst, max_cf = max(max_cf, binfo->CF); } mutex_unlock(&inst->reconbufs.lock); - vote_data->compression_ratio = CR; + /* Sanitize CF values from HW . */ + max_cf = min_t(u32, max_cf, MSM_VIDC_MAX_UBWC_COMPLEXITY_FACTOR); + min_cf = max_t(u32, min_cf, MSM_VIDC_MIN_UBWC_COMPLEXITY_FACTOR); + + vote_data->compression_ratio = CR; vote_data->complexity_factor = max_cf; vote_data->use_dpb_read = false; if (inst->clk_data.load <= inst->clk_data.load_norm) { @@ -114,7 +118,7 @@ static int fill_recon_stats(struct msm_vidc_inst *inst, } dprintk(VIDC_DBG, - "Complression Ratio = %d Complexity Factor = %d\n", + "Compression Ratio = %d Complexity Factor = %d\n", vote_data->compression_ratio, vote_data->complexity_factor); @@ -160,7 +164,8 @@ int msm_comm_vote_bus(struct msm_vidc_core *core) list_for_each_entry_safe(temp, next, &inst->registeredbufs.list, list) { if (temp->vvb.vb2_buf.type == - V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && + temp->deferred) { filled_len = max(filled_len, temp->vvb.vb2_buf.planes[0].bytesused); device_addr = temp->smem[0].device_addr; @@ -260,6 +265,7 @@ static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst) int rc = 0; int fw_pending_bufs = 0; int total_output_buf = 0; + int min_output_buf = 0; int buffers_outside_fw = 0; struct msm_vidc_core *core; struct hal_buffer_requirements *output_buf_req; @@ -294,16 +300,37 @@ static int msm_dcvs_scale_clocks(struct msm_vidc_inst *inst) /* Total number of output buffers */ total_output_buf = output_buf_req->buffer_count_actual; + min_output_buf = output_buf_req->buffer_count_min; + /* Buffers outside FW are with display */ buffers_outside_fw = total_output_buf - fw_pending_bufs; dprintk(VIDC_PROF, - "Counts : total_output_buf = %d fw_pending_bufs = %d buffers_outside_fw = %d\n", - total_output_buf, fw_pending_bufs, buffers_outside_fw); + "Counts : total_output_buf = %d Min buffers = %d fw_pending_bufs = %d buffers_outside_fw = %d\n", + total_output_buf, min_output_buf, fw_pending_bufs, + buffers_outside_fw); - if (buffers_outside_fw >= dcvs->min_threshold) - dcvs->load = dcvs->load_low; - else if (buffers_outside_fw <= dcvs->max_threshold) + /* + * PMS decides clock level based on below algo + + * Limits : + * max_threshold : Client extra allocated buffers. Client + * reserves these buffers for it's smooth flow. + * min_output_buf : HW requested buffers for it's smooth + * flow of buffers. + * min_threshold : Driver requested extra buffers for PMS. + + * 1) When buffers outside FW are reaching client's extra buffers, + * FW is slow and will impact pipeline, Increase clock. + * 2) When pending buffers with FW are same as FW requested, + * pipeline has cushion to absorb FW slowness, Decrease clocks. + * 3) When none of 1) or 2) FW is just fast enough to maintain + * pipeline, request Right Clocks. + */ + + if (buffers_outside_fw <= dcvs->max_threshold) dcvs->load = dcvs->load_high; + else if (fw_pending_bufs <= min_output_buf) + dcvs->load = dcvs->load_low; else dcvs->load = dcvs->load_norm; @@ -393,7 +420,7 @@ static unsigned long msm_vidc_max_freq(struct msm_vidc_core *core) allowed_clks_tbl = core->resources.allowed_clks_tbl; freq = allowed_clks_tbl[0].clock_rate; - dprintk(VIDC_PROF, "Max rate = %lu", freq); + dprintk(VIDC_PROF, "Max rate = %lu\n", freq); return freq; } @@ -571,7 +598,8 @@ int msm_comm_scale_clocks(struct msm_vidc_inst *inst) mutex_lock(&inst->registeredbufs.lock); list_for_each_entry_safe(temp, next, &inst->registeredbufs.list, list) { if (temp->vvb.vb2_buf.type == - V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { + V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && + temp->deferred) { filled_len = max(filled_len, temp->vvb.vb2_buf.planes[0].bytesused); device_addr = temp->smem[0].device_addr; -- GitLab From 7fa2271b071f4f96d5a12faac5186808e0c94561 Mon Sep 17 00:00:00 2001 From: Skylar Chang Date: Mon, 3 Apr 2017 18:29:21 -0700 Subject: [PATCH 727/786] msm: ipa: add support for new uC opcode Support new uC opcode IPA_HDR_PROC_L2TP_HEADER_ADD and IPA_HDR_PROC_L2TP_HEADER_REMOVE for L2TP. Change-Id: Id0caddf76d06085f7ddaf52c7a48bd04a65f9069 Acked-by: Shihuan Liu Signed-off-by: Skylar Chang --- drivers/platform/msm/ipa/ipa_api.c | 1 + drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c | 2 + drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c | 6 +- drivers/platform/msm/ipa/ipa_v3/ipa_i.h | 4 +- drivers/platform/msm/ipa/ipa_v3/ipa_utils.c | 49 ++++++++++++-- .../platform/msm/ipa/ipa_v3/ipahal/ipahal.c | 65 +++++++++++++++++-- .../platform/msm/ipa/ipa_v3/ipahal/ipahal.h | 4 +- .../platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h | 50 ++++++++++++++ include/uapi/linux/msm_ipa.h | 47 +++++++++++++- 9 files changed, 212 insertions(+), 16 deletions(-) diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c index 38264d908b81..23103daaa170 100644 --- a/drivers/platform/msm/ipa/ipa_api.c +++ b/drivers/platform/msm/ipa/ipa_api.c @@ -172,6 +172,7 @@ const char *ipa_clients_strings[IPA_CLIENT_MAX] = { __stringify(IPA_CLIENT_TEST3_CONS), __stringify(IPA_CLIENT_TEST4_PROD), __stringify(IPA_CLIENT_TEST4_CONS), + __stringify(IPA_CLIENT_DUMMY_CONS), }; /** diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c index 2a7b9771baa7..fb44f965695d 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c @@ -78,6 +78,8 @@ const char *ipa3_hdr_proc_type_name[] = { __stringify(IPA_HDR_PROC_ETHII_TO_802_3), __stringify(IPA_HDR_PROC_802_3_TO_ETHII), __stringify(IPA_HDR_PROC_802_3_TO_802_3), + __stringify(IPA_HDR_PROC_L2TP_HEADER_ADD), + __stringify(IPA_HDR_PROC_L2TP_HEADER_REMOVE), }; static struct dentry *dent; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c index 6e51472c5d1e..14d776e1210e 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c @@ -13,7 +13,7 @@ #include "ipa_i.h" #include "ipahal/ipahal.h" -static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 60}; +static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 64}; static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64}; #define HDR_TYPE_IS_VALID(type) \ @@ -78,7 +78,8 @@ static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem, entry->hdr->is_hdr_proc_ctx, entry->hdr->phys_base, hdr_base_addr, - entry->hdr->offset_entry); + entry->hdr->offset_entry, + entry->l2tp_params); if (ret) return ret; } @@ -353,6 +354,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx, entry->type = proc_ctx->type; entry->hdr = hdr_entry; + entry->l2tp_params = proc_ctx->l2tp_params; if (add_ref_hdr) hdr_entry->ref_cnt++; entry->cookie = IPA_COOKIE; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index c6d5c6e02cf3..6a5ec3118468 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -322,7 +322,8 @@ struct ipa3_hdr_proc_ctx_offset_entry { /** * struct ipa3_hdr_proc_ctx_entry - IPA processing context header table entry * @link: entry's link in global header table entries list - * @type: + * @type: header processing context type + * @l2tp_params: L2TP parameters * @offset_entry: entry's offset * @hdr: the header * @cookie: cookie used for validity check @@ -333,6 +334,7 @@ struct ipa3_hdr_proc_ctx_offset_entry { struct ipa3_hdr_proc_ctx_entry { struct list_head link; enum ipa_hdr_proc_type type; + union ipa_l2tp_hdr_proc_ctx_params l2tp_params; struct ipa3_hdr_proc_ctx_offset_entry *offset_entry; struct ipa3_hdr_entry *hdr; u32 cookie; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index 3d1af57b8c27..61889b631c6e 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -631,7 +631,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR, { 29, 14, 8, 8, IPA_EE_AP } }, - + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_3_0][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v3_0_GROUP_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 31, 31, 8, 8, IPA_EE_AP } }, /* IPA_3_5 */ [IPA_3_5][IPA_CLIENT_WLAN1_PROD] = { @@ -778,6 +783,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_PCIE, { 19, 13, 8, 8, IPA_EE_AP } }, + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_3_5][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 31, 31, 8, 8, IPA_EE_AP } }, /* IPA_3_5_MHI */ [IPA_3_5_MHI][IPA_CLIENT_USB_PROD] = { @@ -928,6 +939,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_PCIE, { 19, 13, 8, 8, IPA_EE_AP } }, + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_3_5_MHI][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v3_5_MHI_GROUP_DMA, false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_PCIE, + { 31, 31, 8, 8, IPA_EE_AP } }, /* IPA_3_5_1 */ [IPA_3_5_1][IPA_CLIENT_WLAN1_PROD] = { @@ -1073,7 +1090,13 @@ static const struct ipa_ep_configuration ipa3_ep_mapping IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR, { 11, 2, 4, 6, IPA_EE_AP } }, - + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_3_5_1][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v3_5_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 31, 31, 8, 8, IPA_EE_AP } }, /* IPA_4_0 */ [IPA_4_0][IPA_CLIENT_WLAN1_PROD] = { @@ -1273,6 +1296,13 @@ static const struct ipa_ep_configuration ipa3_ep_mapping IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_PCIE, { 21, 15, 9, 9, IPA_EE_AP } }, + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_4_0][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 31, 31, 8, 8, IPA_EE_AP } }, /* IPA_4_0_MHI */ [IPA_4_0_MHI][IPA_CLIENT_USB_PROD] = { @@ -1452,8 +1482,13 @@ static const struct ipa_ep_configuration ipa3_ep_mapping IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_PCIE, { 21, 15, 9, 9, IPA_EE_AP } }, - - + /* Dummy consumer (pipe 31) is used in L2TP rt rule */ + [IPA_4_0_MHI][IPA_CLIENT_DUMMY_CONS] = { + true, IPA_v4_0_GROUP_UL_DL, + false, + IPA_DPS_HPS_SEQ_TYPE_INVALID, + QMB_MASTER_SELECT_DDR, + { 31, 31, 8, 8, IPA_EE_AP } }, }; static struct msm_bus_vectors ipa_init_vectors_v3_0[] = { @@ -2129,7 +2164,8 @@ int ipa3_get_ep_mapping(enum ipa_client_type client) ipa_ep_idx = ipa3_ep_mapping[ipa3_get_hw_type_index()][client]. ipa_gsi_ep_info.ipa_ep_num; - if (ipa_ep_idx < 0 || ipa_ep_idx >= IPA3_MAX_NUM_PIPES) + if (ipa_ep_idx < 0 || (ipa_ep_idx >= IPA3_MAX_NUM_PIPES + && client != IPA_CLIENT_DUMMY_CONS)) return IPA_EP_NOT_ALLOCATED; return ipa_ep_idx; @@ -2896,7 +2932,8 @@ int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode) if (ep_mode->mode == IPA_DMA) type = IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY; else - type = IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP; + type = + IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP; IPADBG(" set sequencers to sequance 0x%x, ep = %d\n", type, clnt_hdl); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c index 6f46ebf09e7b..ae1b989e3a3c 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c @@ -1153,12 +1153,14 @@ static void ipahal_cp_hdr_to_hw_buff_v3(void *const base, u32 offset, * @phys_base: memory location in DDR * @hdr_base_addr: base address in table * @offset_entry: offset from hdr_base_addr in table + * @l2tp_params: l2tp parameters */ static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type, void *const base, u32 offset, u32 hdr_len, bool is_hdr_proc_ctx, dma_addr_t phys_base, u32 hdr_base_addr, - struct ipa_hdr_offset_entry *offset_entry){ + struct ipa_hdr_offset_entry *offset_entry, + union ipa_l2tp_hdr_proc_ctx_params l2tp_params){ if (type == IPA_HDR_PROC_NONE) { struct ipa_hw_hdr_proc_ctx_add_hdr_seq *ctx; @@ -1174,6 +1176,58 @@ static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type, ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END; ctx->end.length = 0; ctx->end.value = 0; + } else if (type == IPA_HDR_PROC_L2TP_HEADER_ADD) { + struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq *ctx; + + ctx = (struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq *) + (base + offset); + ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD; + ctx->hdr_add.tlv.length = 1; + ctx->hdr_add.tlv.value = hdr_len; + ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base : + hdr_base_addr + offset_entry->offset; + IPAHAL_DBG("header address 0x%x\n", + ctx->hdr_add.hdr_addr); + ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD; + ctx->l2tp_params.tlv.length = 1; + ctx->l2tp_params.tlv.value = + IPA_HDR_UCP_L2TP_HEADER_ADD; + ctx->l2tp_params.l2tp_params.eth_hdr_retained = + l2tp_params.hdr_add_param.eth_hdr_retained; + ctx->l2tp_params.l2tp_params.input_ip_version = + l2tp_params.hdr_add_param.input_ip_version; + ctx->l2tp_params.l2tp_params.output_ip_version = + l2tp_params.hdr_add_param.output_ip_version; + + IPAHAL_DBG("command id %d\n", ctx->l2tp_params.tlv.value); + ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END; + ctx->end.length = 0; + ctx->end.value = 0; + } else if (type == IPA_HDR_PROC_L2TP_HEADER_REMOVE) { + struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq *ctx; + + ctx = (struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq *) + (base + offset); + ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD; + ctx->hdr_add.tlv.length = 1; + ctx->hdr_add.tlv.value = hdr_len; + ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base : + hdr_base_addr + offset_entry->offset; + IPAHAL_DBG("header address 0x%x length %d\n", + ctx->hdr_add.hdr_addr, ctx->hdr_add.tlv.value); + ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD; + ctx->l2tp_params.tlv.length = 1; + ctx->l2tp_params.tlv.value = + IPA_HDR_UCP_L2TP_HEADER_REMOVE; + ctx->l2tp_params.l2tp_params.hdr_len_remove = + l2tp_params.hdr_remove_param.hdr_len_remove; + ctx->l2tp_params.l2tp_params.eth_hdr_retained = + l2tp_params.hdr_remove_param.eth_hdr_retained; + + IPAHAL_DBG("command id %d\n", ctx->l2tp_params.tlv.value); + ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END; + ctx->end.length = 0; + ctx->end.value = 0; } else { struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *ctx; @@ -1242,7 +1296,8 @@ struct ipahal_hdr_funcs { void *const base, u32 offset, u32 hdr_len, bool is_hdr_proc_ctx, dma_addr_t phys_base, u32 hdr_base_addr, - struct ipa_hdr_offset_entry *offset_entry); + struct ipa_hdr_offset_entry *offset_entry, + union ipa_l2tp_hdr_proc_ctx_params l2tp_params); int (*ipahal_get_proc_ctx_needed_len)(enum ipa_hdr_proc_type type); }; @@ -1307,11 +1362,13 @@ void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *const hdr, * @phys_base: memory location in DDR * @hdr_base_addr: base address in table * @offset_entry: offset from hdr_base_addr in table + * @l2tp_params: l2tp parameters */ int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type, void *const base, u32 offset, u32 hdr_len, bool is_hdr_proc_ctx, dma_addr_t phys_base, - u32 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry) + u32 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry, + union ipa_l2tp_hdr_proc_ctx_params l2tp_params) { IPAHAL_DBG( "type %d, base %p, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %d, offset_entry %p\n" @@ -1332,7 +1389,7 @@ int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type, return hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff(type, base, offset, hdr_len, is_hdr_proc_ctx, phys_base, - hdr_base_addr, offset_entry); + hdr_base_addr, offset_entry, l2tp_params); } /* diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h index f8bdc2cb6d14..724cdec18285 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h @@ -625,12 +625,14 @@ void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *hdr, u32 hdr_len); * @phys_base: memory location in DDR * @hdr_base_addr: base address in table * @offset_entry: offset from hdr_base_addr in table + * @l2tp_params: l2tp parameters */ int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type, void *base, u32 offset, u32 hdr_len, bool is_hdr_proc_ctx, dma_addr_t phys_base, u32 hdr_base_addr, - struct ipa_hdr_offset_entry *offset_entry); + struct ipa_hdr_offset_entry *offset_entry, + union ipa_l2tp_hdr_proc_ctx_params l2tp_params); /* * ipahal_get_proc_ctx_needed_len() - calculates the needed length for addition diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h index c023082622bd..1c4b28728e82 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h @@ -555,6 +555,8 @@ struct ipa_pkt_status_hw { #define IPA_HDR_UCP_802_3_TO_ETHII 7 #define IPA_HDR_UCP_ETHII_TO_802_3 8 #define IPA_HDR_UCP_ETHII_TO_ETHII 9 +#define IPA_HDR_UCP_L2TP_HEADER_ADD 10 +#define IPA_HDR_UCP_L2TP_HEADER_REMOVE 11 /* Processing context TLV type */ #define IPA_PROC_CTX_TLV_TYPE_END 0 @@ -595,6 +597,28 @@ struct ipa_hw_hdr_proc_ctx_hdr_add { u32 hdr_addr; }; +/** + * struct ipa_hw_hdr_proc_ctx_l2tp_add_hdr - + * HW structure of IPA processing context - add l2tp header tlv + * @tlv: IPA processing context TLV + * @l2tp_params: l2tp parameters + */ +struct ipa_hw_hdr_proc_ctx_l2tp_add_hdr { + struct ipa_hw_hdr_proc_ctx_tlv tlv; + struct ipa_l2tp_header_add_procparams l2tp_params; +}; + +/** + * struct ipa_hw_hdr_proc_ctx_l2tp_remove_hdr - + * HW structure of IPA processing context - remove l2tp header tlv + * @tlv: IPA processing context TLV + * @l2tp_params: l2tp parameters + */ +struct ipa_hw_hdr_proc_ctx_l2tp_remove_hdr { + struct ipa_hw_hdr_proc_ctx_tlv tlv; + struct ipa_l2tp_header_remove_procparams l2tp_params; +}; + /** * struct ipa_hw_hdr_proc_ctx_add_hdr_seq - * IPA processing context header - add header sequence @@ -619,6 +643,32 @@ struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq { struct ipa_hw_hdr_proc_ctx_tlv end; }; +/** + * struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq - + * IPA processing context header - process command sequence + * @hdr_add: add header command + * @l2tp_params: l2tp params for header addition + * @end: tlv end command (cmd.type must be 0) + */ +struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq { + struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add; + struct ipa_hw_hdr_proc_ctx_l2tp_add_hdr l2tp_params; + struct ipa_hw_hdr_proc_ctx_tlv end; +}; + +/** + * struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq - + * IPA processing context header - process command sequence + * @hdr_add: add header command + * @l2tp_params: l2tp params for header removal + * @end: tlv end command (cmd.type must be 0) + */ +struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq { + struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add; + struct ipa_hw_hdr_proc_ctx_l2tp_remove_hdr l2tp_params; + struct ipa_hw_hdr_proc_ctx_tlv end; +}; + /* IPA HW DPS/HPS image memory sizes */ #define IPA_HW_DPS_IMG_MEM_SIZE_V3_0 128 #define IPA_HW_HPS_IMG_MEM_SIZE_V3_0 320 diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h index 48cfe3165540..5410ec958e30 100644 --- a/include/uapi/linux/msm_ipa.h +++ b/include/uapi/linux/msm_ipa.h @@ -250,9 +250,12 @@ enum ipa_client_type { IPA_CLIENT_TEST4_PROD = 70, IPA_CLIENT_TEST4_CONS = 71, + + /* RESERVERD PROD = 72, */ + IPA_CLIENT_DUMMY_CONS = 73 }; -#define IPA_CLIENT_MAX (IPA_CLIENT_TEST4_CONS + 1) +#define IPA_CLIENT_MAX (IPA_CLIENT_DUMMY_CONS + 1) #define IPA_CLIENT_IS_APPS_CONS(client) \ ((client) == IPA_CLIENT_APPS_LAN_CONS || \ @@ -784,8 +787,10 @@ enum ipa_hdr_proc_type { IPA_HDR_PROC_ETHII_TO_802_3, IPA_HDR_PROC_802_3_TO_ETHII, IPA_HDR_PROC_802_3_TO_802_3, + IPA_HDR_PROC_L2TP_HEADER_ADD, + IPA_HDR_PROC_L2TP_HEADER_REMOVE }; -#define IPA_HDR_PROC_MAX (IPA_HDR_PROC_802_3_TO_802_3 + 1) +#define IPA_HDR_PROC_MAX (IPA_HDR_PROC_L2TP_HEADER_REMOVE + 1) /** * struct ipa_rt_rule - attributes of a routing rule @@ -855,11 +860,46 @@ struct ipa_ioc_add_hdr { struct ipa_hdr_add hdr[0]; }; +/** + * struct ipa_l2tp_header_add_procparams - + * @eth_hdr_retained: Specifies if Ethernet header is retained or not + * @input_ip_version: Specifies if Input header is IPV4(0) or IPV6(1) + * @output_ip_version: Specifies if template header is IPV4(0) or IPV6(1) + */ +struct ipa_l2tp_header_add_procparams { + uint32_t eth_hdr_retained:1; + uint32_t input_ip_version:1; + uint32_t output_ip_version:1; + uint32_t reserved:29; +}; + +/** + * struct ipa_l2tp_header_remove_procparams - + * @hdr_len_remove: Specifies how much of the header needs to + be removed in bytes + * @eth_hdr_retained: Specifies if Ethernet header is retained or not + */ +struct ipa_l2tp_header_remove_procparams { + uint32_t hdr_len_remove:8; + uint32_t eth_hdr_retained:1; + uint32_t reserved:23; +}; + +/** + * union ipa_l2tp_hdr_proc_ctx_params - + * @hdr_add_param: parameters for header add + * @hdr_remove_param: parameters for header remove + */ +union ipa_l2tp_hdr_proc_ctx_params { + struct ipa_l2tp_header_add_procparams hdr_add_param; + struct ipa_l2tp_header_remove_procparams hdr_remove_param; +}; /** * struct ipa_hdr_proc_ctx_add - processing context descriptor includes * in and out parameters * @type: processing context type * @hdr_hdl: in parameter, handle to header + * @l2tp_params: l2tp parameters * @proc_ctx_hdl: out parameter, handle to proc_ctx, valid when status is 0 * @status: out parameter, status of header add operation, * 0 for success, @@ -870,8 +910,11 @@ struct ipa_hdr_proc_ctx_add { uint32_t hdr_hdl; uint32_t proc_ctx_hdl; int status; + union ipa_l2tp_hdr_proc_ctx_params l2tp_params; }; +#define IPA_L2TP_HDR_PROC_SUPPORT + /** * struct ipa_ioc_add_hdr - processing context addition parameters (support * multiple processing context and commit) -- GitLab From 65a4a415531fa2625ec3915a5365f2092d5688d0 Mon Sep 17 00:00:00 2001 From: Shihuan Liu Date: Mon, 22 May 2017 18:30:28 -0700 Subject: [PATCH 728/786] msm: ipa: add new IPA filtering bitmap Add new IPA filtering bitmap IPA_FLT_MAC_DST_ADDR_L2TP. Change-Id: Ia31b8d437d51cb7142cccef3d37630f4912064ab Acked-by: Shihuan Liu Signed-off-by: Skylar Chang --- drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c | 3 +- .../msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c | 132 ++++++++++++++++++ include/uapi/linux/msm_ipa.h | 1 + 3 files changed, 135 insertions(+), 1 deletion(-) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c index 2a7b9771baa7..1215d4ef1446 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c @@ -524,7 +524,8 @@ static int ipa3_attrib_dump(struct ipa_rule_attrib *attrib, } if ((attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) || - (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3)) { + (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) || + (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP)) { pr_err("dst_mac_addr:%pM ", attrib->dst_mac_addr); } diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c index 4f20e0ff9b2d..2253b4bab5a1 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c @@ -797,6 +797,38 @@ static int ipa_fltrt_generate_hw_rule_bdy_ip4(u16 *en_rule, ihl_ofst_meq32++; } + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ( + ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]); + /* populate first ihl meq eq */ + extra = ipa_write_8(8, extra); + rest = ipa_write_8(attrib->dst_mac_addr_mask[3], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[2], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[1], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[0], rest); + rest = ipa_write_8(attrib->dst_mac_addr[3], rest); + rest = ipa_write_8(attrib->dst_mac_addr[2], rest); + rest = ipa_write_8(attrib->dst_mac_addr[1], rest); + rest = ipa_write_8(attrib->dst_mac_addr[0], rest); + /* populate second ihl meq eq */ + extra = ipa_write_8(12, extra); + rest = ipa_write_16(0, rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[5], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[4], rest); + rest = ipa_write_16(0, rest); + rest = ipa_write_8(attrib->dst_mac_addr[5], rest); + rest = ipa_write_8(attrib->dst_mac_addr[4], rest); + ihl_ofst_meq32 += 2; + } + if (attrib->attrib_mask & IPA_FLT_META_DATA) { *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE); rest = ipa_write_32(attrib->meta_data_mask, rest); @@ -1103,6 +1135,38 @@ static int ipa_fltrt_generate_hw_rule_bdy_ip6(u16 *en_rule, ihl_ofst_meq32++; } + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ( + ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + goto err; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]); + /* populate first ihl meq eq */ + extra = ipa_write_8(8, extra); + rest = ipa_write_8(attrib->dst_mac_addr_mask[3], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[2], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[1], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[0], rest); + rest = ipa_write_8(attrib->dst_mac_addr[3], rest); + rest = ipa_write_8(attrib->dst_mac_addr[2], rest); + rest = ipa_write_8(attrib->dst_mac_addr[1], rest); + rest = ipa_write_8(attrib->dst_mac_addr[0], rest); + /* populate second ihl meq eq */ + extra = ipa_write_8(12, extra); + rest = ipa_write_16(0, rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[5], rest); + rest = ipa_write_8(attrib->dst_mac_addr_mask[4], rest); + rest = ipa_write_16(0, rest); + rest = ipa_write_8(attrib->dst_mac_addr[5], rest); + rest = ipa_write_8(attrib->dst_mac_addr[4], rest); + ihl_ofst_meq32 += 2; + } + if (attrib->attrib_mask & IPA_FLT_META_DATA) { *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE); rest = ipa_write_32(attrib->meta_data_mask, rest); @@ -1613,6 +1677,40 @@ static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip, ofst_meq128++; } + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ( + ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]); + /* populate the first ihl meq 32 eq */ + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 8; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + (attrib->dst_mac_addr_mask[3] & 0xFF) | + ((attrib->dst_mac_addr_mask[2] << 8) & 0xFF00) | + ((attrib->dst_mac_addr_mask[1] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr_mask[0] << 24) & 0xFF000000); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + (attrib->dst_mac_addr[3] & 0xFF) | + ((attrib->dst_mac_addr[2] << 8) & 0xFF00) | + ((attrib->dst_mac_addr[1] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr[0] << 24) & 0xFF000000); + /* populate the second ihl meq 32 eq */ + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].offset = 12; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].mask = + ((attrib->dst_mac_addr_mask[5] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr_mask[4] << 24) & 0xFF000000); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].value = + ((attrib->dst_mac_addr[5] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr[4] << 24) & 0xFF000000); + ihl_ofst_meq32 += 2; + } + if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) { if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { IPAHAL_ERR("ran out of meq32 eq\n"); @@ -1976,6 +2074,40 @@ static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip, ofst_meq128++; } + if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_L2TP) { + if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32, + ihl_ofst_meq32) || IPA_IS_RAN_OUT_OF_EQ( + ipa3_0_ihl_ofst_meq32, ihl_ofst_meq32 + 1)) { + IPAHAL_ERR("ran out of ihl_meq32 eq\n"); + return -EPERM; + } + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]); + *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN( + ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32 + 1]); + /* populate the first ihl meq 32 eq */ + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 8; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = + (attrib->dst_mac_addr_mask[3] & 0xFF) | + ((attrib->dst_mac_addr_mask[2] << 8) & 0xFF00) | + ((attrib->dst_mac_addr_mask[1] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr_mask[0] << 24) & 0xFF000000); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value = + (attrib->dst_mac_addr[3] & 0xFF) | + ((attrib->dst_mac_addr[2] << 8) & 0xFF00) | + ((attrib->dst_mac_addr[1] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr[0] << 24) & 0xFF000000); + /* populate the second ihl meq 32 eq */ + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].offset = 12; + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].mask = + ((attrib->dst_mac_addr_mask[5] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr_mask[4] << 24) & 0xFF000000); + eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32 + 1].value = + ((attrib->dst_mac_addr[5] << 16) & 0xFF0000) | + ((attrib->dst_mac_addr[4] << 24) & 0xFF000000); + ihl_ofst_meq32 += 2; + } + if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) { if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) { IPAHAL_ERR("ran out of meq32 eq\n"); diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h index 48cfe3165540..21936c51a94b 100644 --- a/include/uapi/linux/msm_ipa.h +++ b/include/uapi/linux/msm_ipa.h @@ -127,6 +127,7 @@ #define IPA_FLT_MAC_SRC_ADDR_802_3 (1ul << 19) #define IPA_FLT_MAC_DST_ADDR_802_3 (1ul << 20) #define IPA_FLT_MAC_ETHER_TYPE (1ul << 21) +#define IPA_FLT_MAC_DST_ADDR_L2TP (1ul << 22) /** * maximal number of NAT PDNs in the PDN config table -- GitLab From 213bad475a5dec476bc3ca97e656af5b26018bfe Mon Sep 17 00:00:00 2001 From: Can Guo Date: Wed, 5 Jul 2017 13:04:12 +0800 Subject: [PATCH 729/786] Revert "mmc: enable MMC/SD/SDIO device to suspend/resume asynchronously" Commit ec076cd226c3 ("mmc: enable MMC/SD/SDIO device to suspend/resume asynchronously") enables async suspend of mmc card device. Kernel shall bring mmc card device into suspend asynchronously from mmc host and sdhci msm host. During system suspend, kernel power management framework makes sure a parent device wait for its direct children devices, which in the device power management (dpm) list and have async suspend enabled, to finish their suspending before it starts suspending. In our case, ideally, sdhci msm host waits for mmc host, while mmc host waits for mmc card. However, mmc host is not added into the dpm list since it does not have pm functions implemented. Then the dependency is broken as sdhci msm host does not wait for mmc card. Sdhci msm host may start to suspend and disable IRQ before mmc card finishes suspending, which causes command interrupt timeout during system suspend. This change reverts it. Change-Id: I779afcc3f9d06b541e5f3314edf746f73a2cb7dd Signed-off-by: Can Guo --- drivers/mmc/core/bus.c | 2 -- drivers/mmc/core/sdio_bus.c | 1 - 2 files changed, 3 deletions(-) diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index e3696c5c2dac..a531cb467c30 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -398,8 +398,6 @@ int mmc_add_card(struct mmc_card *card) mmc_hostname(card->host), __func__, ret); } - device_enable_async_suspend(&card->dev); - ret = device_add(&card->dev); if (ret) return ret; diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index 1499d5333c79..e32ed3d28b06 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -333,7 +333,6 @@ int sdio_add_func(struct sdio_func *func) sdio_set_of_node(func); sdio_acpi_set_handle(func); - device_enable_async_suspend(&func->dev); ret = device_add(&func->dev); if (ret == 0) sdio_func_set_present(func); -- GitLab From facf3928ab37164f9488a34325d0b076f8ed8f80 Mon Sep 17 00:00:00 2001 From: Anurag Chouhan Date: Thu, 8 Jun 2017 18:26:56 +0530 Subject: [PATCH 730/786] icnss: Change MSA permissions to collect dump With certain senarios such as error FATAL, WBOG Bite in modem WLAN hardware is still alive, while trying to collect the dump platform driver is removing the MSA permissions from MSS and WLAN Hardware to HLOS, at the same time if WLAN Hardware is trying to access the MSA region which results into SNOC error To avoid such senarios instead of removing MSS and WLAN permissions MSA is assigned HLOS permissions as well and once the ramdump is collected HLOS permissions will be removed. Change-Id: Ic71e0fa8c064fd70dad9958187244909cbb80c0a CRs-fixed: 2048531 Signed-off-by: Anurag Chouhan --- drivers/soc/qcom/icnss.c | 293 +++++++++++++++++++++++---------------- include/soc/qcom/icnss.h | 7 - 2 files changed, 170 insertions(+), 130 deletions(-) diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index b5bb719df848..28f89bfac7c6 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -168,6 +168,76 @@ enum icnss_driver_event_type { ICNSS_DRIVER_EVENT_MAX, }; +enum icnss_msa_perm { + ICNSS_MSA_PERM_HLOS_ALL = 0, + ICNSS_MSA_PERM_WLAN_HW_RW = 1, + ICNSS_MSA_PERM_DUMP_COLLECT = 2, + ICNSS_MSA_PERM_MAX, +}; + +#define ICNSS_MAX_VMIDS 4 + +struct icnss_mem_region_info { + uint64_t reg_addr; + uint32_t size; + uint8_t secure_flag; + enum icnss_msa_perm perm; +}; + +struct icnss_msa_perm_list_t { + int vmids[ICNSS_MAX_VMIDS]; + int perms[ICNSS_MAX_VMIDS]; + int nelems; +}; + +struct icnss_msa_perm_list_t msa_perm_secure_list[ICNSS_MSA_PERM_MAX] = { + [ICNSS_MSA_PERM_HLOS_ALL] = { + .vmids = {VMID_HLOS}, + .perms = {PERM_READ | PERM_WRITE | PERM_EXEC}, + .nelems = 1, + }, + + [ICNSS_MSA_PERM_WLAN_HW_RW] = { + .vmids = {VMID_MSS_MSA, VMID_WLAN}, + .perms = {PERM_READ | PERM_WRITE, + PERM_READ | PERM_WRITE}, + .nelems = 2, + }, + + [ICNSS_MSA_PERM_DUMP_COLLECT] = { + .vmids = {VMID_MSS_MSA, VMID_WLAN, VMID_HLOS}, + .perms = {PERM_READ | PERM_WRITE, + PERM_READ | PERM_WRITE, + PERM_READ}, + .nelems = 3, + }, +}; + +struct icnss_msa_perm_list_t msa_perm_list[ICNSS_MSA_PERM_MAX] = { + [ICNSS_MSA_PERM_HLOS_ALL] = { + .vmids = {VMID_HLOS}, + .perms = {PERM_READ | PERM_WRITE | PERM_EXEC}, + .nelems = 1, + }, + + [ICNSS_MSA_PERM_WLAN_HW_RW] = { + .vmids = {VMID_MSS_MSA, VMID_WLAN, VMID_WLAN_CE}, + .perms = {PERM_READ | PERM_WRITE, + PERM_READ | PERM_WRITE, + PERM_READ | PERM_WRITE}, + .nelems = 3, + }, + + [ICNSS_MSA_PERM_DUMP_COLLECT] = { + .vmids = {VMID_MSS_MSA, VMID_WLAN, VMID_WLAN_CE, VMID_HLOS}, + .perms = {PERM_READ | PERM_WRITE, + PERM_READ | PERM_WRITE, + PERM_READ | PERM_WRITE, + PERM_READ}, + .nelems = 4, + }, +}; + struct icnss_event_pd_service_down_data { bool crashed; bool fw_rejuvenate; @@ -375,6 +445,84 @@ static void icnss_ignore_qmi_timeout(bool ignore) static void icnss_ignore_qmi_timeout(bool ignore) { } #endif +static int icnss_assign_msa_perm(struct icnss_mem_region_info + *mem_region, enum icnss_msa_perm new_perm) +{ + int ret = 0; + phys_addr_t addr; + u32 size; + u32 i = 0; + u32 source_vmids[ICNSS_MAX_VMIDS]; + u32 source_nelems; + u32 dest_vmids[ICNSS_MAX_VMIDS]; + u32 dest_perms[ICNSS_MAX_VMIDS]; + u32 dest_nelems; + enum icnss_msa_perm cur_perm = mem_region->perm; + struct icnss_msa_perm_list_t *new_perm_list, *old_perm_list; + + addr = mem_region->reg_addr; + size = mem_region->size; + + if (mem_region->secure_flag) { + new_perm_list = &msa_perm_secure_list[new_perm]; + old_perm_list = &msa_perm_secure_list[cur_perm]; + } else { + new_perm_list = &msa_perm_list[new_perm]; + old_perm_list = &msa_perm_list[cur_perm]; + } + + source_nelems = old_perm_list->nelems; + dest_nelems = new_perm_list->nelems; + + for (i = 0; i < source_nelems; ++i) + source_vmids[i] = old_perm_list->vmids[i]; + + for (i = 0; i < dest_nelems; ++i) { + dest_vmids[i] = new_perm_list->vmids[i]; + dest_perms[i] = new_perm_list->perms[i]; + } + + ret = hyp_assign_phys(addr, size, source_vmids, source_nelems, + dest_vmids, dest_perms, dest_nelems); + if (ret) { + icnss_pr_err("Hyperviser map failed for PA=%pa size=%u err=%d\n", + &addr, size, ret); + goto out; + } + + icnss_pr_dbg("Hypervisor map for source_nelems=%d, source[0]=%x, source[1]=%x, source[2]=%x," + "source[3]=%x, dest_nelems=%d, dest[0]=%x, dest[1]=%x, dest[2]=%x, dest[3]=%x\n", + source_nelems, source_vmids[0], source_vmids[1], + source_vmids[2], source_vmids[3], dest_nelems, + dest_vmids[0], dest_vmids[1], dest_vmids[2], + dest_vmids[3]); +out: + return ret; +} + +static int icnss_assign_msa_perm_all(struct icnss_priv *priv, + enum icnss_msa_perm new_perm) +{ + int ret; + int i; + enum icnss_msa_perm old_perm; + + for (i = 0; i < priv->nr_mem_region; i++) { + old_perm = priv->mem_region[i].perm; + ret = icnss_assign_msa_perm(&priv->mem_region[i], new_perm); + if (ret) + goto err_unmap; + priv->mem_region[i].perm = new_perm; + } + return 0; + +err_unmap: + for (i--; i >= 0; i--) { + icnss_assign_msa_perm(&priv->mem_region[i], old_perm); + } + return ret; +} + static void icnss_pm_stay_awake(struct icnss_priv *priv) { if (atomic_inc_return(&priv->pm_count) != 1) @@ -980,119 +1128,6 @@ int icnss_power_off(struct device *dev) } EXPORT_SYMBOL(icnss_power_off); -static int icnss_map_msa_permissions(struct icnss_mem_region_info *mem_region) -{ - int ret = 0; - phys_addr_t addr; - u32 size; - u32 source_vmlist[1] = {VMID_HLOS}; - int dest_vmids[3] = {VMID_MSS_MSA, VMID_WLAN, 0}; - int dest_perms[3] = {PERM_READ|PERM_WRITE, - PERM_READ|PERM_WRITE, - PERM_READ|PERM_WRITE}; - int source_nelems = sizeof(source_vmlist)/sizeof(u32); - int dest_nelems = 0; - - addr = mem_region->reg_addr; - size = mem_region->size; - - if (!mem_region->secure_flag) { - dest_vmids[2] = VMID_WLAN_CE; - dest_nelems = 3; - } else { - dest_vmids[2] = 0; - dest_nelems = 2; - } - ret = hyp_assign_phys(addr, size, source_vmlist, source_nelems, - dest_vmids, dest_perms, dest_nelems); - if (ret) { - icnss_pr_err("Hyperviser map failed for PA=%pa size=%u err=%d\n", - &addr, size, ret); - goto out; - } - - icnss_pr_dbg("Hypervisor map for source=%x, dest_nelems=%d, dest[0]=%x, dest[1]=%x, dest[2]=%x\n", - source_vmlist[0], dest_nelems, dest_vmids[0], - dest_vmids[1], dest_vmids[2]); -out: - return ret; - -} - -static int icnss_unmap_msa_permissions(struct icnss_mem_region_info *mem_region) -{ - int ret = 0; - phys_addr_t addr; - u32 size; - u32 dest_vmids[1] = {VMID_HLOS}; - int source_vmlist[3] = {VMID_MSS_MSA, VMID_WLAN, 0}; - int dest_perms[1] = {PERM_READ|PERM_WRITE|PERM_EXEC}; - int source_nelems = 0; - int dest_nelems = sizeof(dest_vmids)/sizeof(u32); - - addr = mem_region->reg_addr; - size = mem_region->size; - - if (!mem_region->secure_flag) { - source_vmlist[2] = VMID_WLAN_CE; - source_nelems = 3; - } else { - source_vmlist[2] = 0; - source_nelems = 2; - } - - ret = hyp_assign_phys(addr, size, source_vmlist, source_nelems, - dest_vmids, dest_perms, dest_nelems); - if (ret) { - icnss_pr_err("Hyperviser unmap failed for PA=%pa size=%u err=%d\n", - &addr, size, ret); - goto out; - } - icnss_pr_dbg("Hypervisor unmap for source_nelems=%d, source[0]=%x, source[1]=%x, source[2]=%x, dest=%x\n", - source_nelems, source_vmlist[0], source_vmlist[1], - source_vmlist[2], dest_vmids[0]); -out: - return ret; -} - -static int icnss_setup_msa_permissions(struct icnss_priv *priv) -{ - int ret; - int i; - - if (test_bit(ICNSS_MSA0_ASSIGNED, &priv->state)) - return 0; - - for (i = 0; i < priv->nr_mem_region; i++) { - - ret = icnss_map_msa_permissions(&priv->mem_region[i]); - if (ret) - goto err_unmap; - } - - set_bit(ICNSS_MSA0_ASSIGNED, &priv->state); - - return 0; - -err_unmap: - for (i--; i >= 0; i--) - icnss_unmap_msa_permissions(&priv->mem_region[i]); - return ret; -} - -static void icnss_remove_msa_permissions(struct icnss_priv *priv) -{ - int i; - - if (!test_bit(ICNSS_MSA0_ASSIGNED, &priv->state)) - return; - - for (i = 0; i < priv->nr_mem_region; i++) - icnss_unmap_msa_permissions(&priv->mem_region[i]); - - clear_bit(ICNSS_MSA0_ASSIGNED, &priv->state); -} - static int wlfw_msa_mem_info_send_sync_msg(void) { int ret; @@ -1898,9 +1933,12 @@ static int icnss_driver_event_server_arrive(void *data) if (ret < 0) goto err_power_on; - ret = icnss_setup_msa_permissions(penv); - if (ret < 0) - goto err_power_on; + if (!test_bit(ICNSS_MSA0_ASSIGNED, &penv->state)) { + ret = icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_WLAN_HW_RW); + if (ret < 0) + goto err_power_on; + set_bit(ICNSS_MSA0_ASSIGNED, &penv->state); + } ret = wlfw_msa_ready_send_sync_msg(); if (ret < 0) @@ -1918,7 +1956,7 @@ static int icnss_driver_event_server_arrive(void *data) return ret; err_setup_msa: - icnss_remove_msa_permissions(penv); + icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_HLOS_ALL); err_power_on: icnss_hw_power_off(penv); fail: @@ -2333,14 +2371,22 @@ static int icnss_modem_notifier_nb(struct notifier_block *nb, struct icnss_priv *priv = container_of(nb, struct icnss_priv, modem_ssr_nb); struct icnss_uevent_fw_down_data fw_down_data; + int ret = 0; icnss_pr_vdbg("Modem-Notify: event %lu\n", code); - if (code == SUBSYS_AFTER_SHUTDOWN && - notif->crashed == CRASH_STATUS_ERR_FATAL) { - icnss_remove_msa_permissions(priv); - icnss_pr_info("Collecting msa0 segment dump\n"); - icnss_msa0_ramdump(priv); + if (code == SUBSYS_AFTER_SHUTDOWN) { + ret = icnss_assign_msa_perm_all(priv, + ICNSS_MSA_PERM_DUMP_COLLECT); + if (!ret) { + icnss_pr_info("Collecting msa0 segment dump\n"); + icnss_msa0_ramdump(priv); + icnss_assign_msa_perm_all(priv, + ICNSS_MSA_PERM_WLAN_HW_RW); + } else { + icnss_pr_err("Not able to Collect msa0 segment dump" + "Apps permissions not assigned %d\n", ret); + } return NOTIFY_OK; } @@ -4307,7 +4353,8 @@ static int icnss_remove(struct platform_device *pdev) icnss_hw_power_off(penv); - icnss_remove_msa_permissions(penv); + icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_HLOS_ALL); + clear_bit(ICNSS_MSA0_ASSIGNED, &penv->state); dev_set_drvdata(&pdev->dev, NULL); diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h index 549cb84b592c..3527c3526a86 100644 --- a/include/soc/qcom/icnss.h +++ b/include/soc/qcom/icnss.h @@ -77,13 +77,6 @@ struct icnss_wlan_enable_cfg { struct icnss_shadow_reg_cfg *shadow_reg_cfg; }; -/* MSA Memory Regions Information */ -struct icnss_mem_region_info { - uint64_t reg_addr; - uint32_t size; - uint8_t secure_flag; -}; - /* driver modes */ enum icnss_driver_mode { ICNSS_MISSION, -- GitLab From b668b4ef09b7481d3c5e3e6c3d2b5f6d357419de Mon Sep 17 00:00:00 2001 From: Deepak Kumar Date: Wed, 25 Jan 2017 18:53:55 +0530 Subject: [PATCH 731/786] adreno_tz: Correct tz_buf pointer type to correct pointer arithmetic Current tz_buf pointer type is causing the end address calcultion of tz_buf to go wrong. "end_addr = tz_buf + PAGE_ALIGN(size)" is resulting in an end address way beyond the allocated range because tz_buf is of type unsigned int. This change changes the tz_buf pointer type to u8 to fix this issue. Change-Id: I16db09c565801b56c0c0ee8a8184f6e276512fa3 Signed-off-by: Deepak Kumar --- drivers/devfreq/governor_msm_adreno_tz.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/devfreq/governor_msm_adreno_tz.c b/drivers/devfreq/governor_msm_adreno_tz.c index e8bfff2a259d..3c50c4e26c0e 100644 --- a/drivers/devfreq/governor_msm_adreno_tz.c +++ b/drivers/devfreq/governor_msm_adreno_tz.c @@ -236,7 +236,7 @@ static int tz_init_ca(struct devfreq_msm_adreno_tz_data *priv) { unsigned int tz_ca_data[2]; struct scm_desc desc = {0}; - unsigned int *tz_buf; + u8 *tz_buf; int ret; /* Set data for TZ */ @@ -281,7 +281,7 @@ static int tz_init(struct devfreq_msm_adreno_tz_data *priv, scm_is_call_available(SCM_SVC_DCVS, TZ_UPDATE_ID_64) && scm_is_call_available(SCM_SVC_DCVS, TZ_RESET_ID_64)) { struct scm_desc desc = {0}; - unsigned int *tz_buf; + u8 *tz_buf; if (!is_scm_armv8()) { ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID_64, -- GitLab From 43779b0f9eeb21a30f79d2e61de49cc5462419e5 Mon Sep 17 00:00:00 2001 From: Lina Iyer Date: Wed, 28 Jun 2017 14:34:02 -0600 Subject: [PATCH 732/786] drivers: qcom: rpmh: Do not send active votes in solver mode When the controller is in solver mode, the RSC sequencer takes over the control and s/w cannot send requests and receive interrupt response. However, it is okay to write the sleep and wake votes. Check for the state for the controller and the request in hand to see if its okay to pass the request to the controller. Also, make the check for all variants of the API, not just write_passthru. Change-Id: I803ea0fe1c85df17763cc9700cc87816572470f3 Signed-off-by: Lina Iyer --- drivers/soc/qcom/rpmh.c | 46 ++++++++++++++++++++++++++++++++--------- 1 file changed, 36 insertions(+), 10 deletions(-) diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c index 306510f700d6..9a980636dd8c 100644 --- a/drivers/soc/qcom/rpmh.c +++ b/drivers/soc/qcom/rpmh.c @@ -227,6 +227,21 @@ static struct rpmh_req *cache_rpm_request(struct rpmh_client *rc, return req; } +static int check_ctrlr_state(struct rpmh_client *rc, enum rpmh_state state) +{ + struct rpmh_mbox *rpm = rc->rpmh; + unsigned long flags; + int ret = 0; + + /* Do not allow setting active votes when in solver mode */ + spin_lock_irqsave(&rpm->lock, flags); + if (rpm->in_solver_mode && state == RPMH_AWAKE_STATE) + ret = -EBUSY; + spin_unlock_irqrestore(&rpm->lock, flags); + + return ret; +} + /** * __rpmh_write: Cache and send the RPMH request * @@ -282,6 +297,7 @@ int rpmh_write_single_async(struct rpmh_client *rc, enum rpmh_state state, u32 addr, u32 data) { struct rpmh_msg *rpm_msg; + int ret; if (IS_ERR_OR_NULL(rc)) return -EINVAL; @@ -289,6 +305,10 @@ int rpmh_write_single_async(struct rpmh_client *rc, enum rpmh_state state, if (rpmh_standalone) return 0; + ret = check_ctrlr_state(rc, state); + if (ret) + return ret; + rpm_msg = get_msg_from_pool(rc); if (!rpm_msg) return -ENOMEM; @@ -333,6 +353,10 @@ int rpmh_write_single(struct rpmh_client *rc, enum rpmh_state state, if (rpmh_standalone) return 0; + ret = check_ctrlr_state(rc, state); + if (ret) + return ret; + rpm_msg.cmd[0].addr = addr; rpm_msg.cmd[0].data = data; rpm_msg.msg.num_payload = 1; @@ -385,10 +409,15 @@ int rpmh_write_async(struct rpmh_client *rc, enum rpmh_state state, struct tcs_cmd *cmd, int n) { struct rpmh_msg *rpm_msg; + int ret; if (rpmh_standalone) return 0; + ret = check_ctrlr_state(rc, state); + if (ret) + return ret; + rpm_msg = __get_rpmh_msg_async(rc, state, cmd, n); if (IS_ERR(rpm_msg)) return PTR_ERR(rpm_msg); @@ -429,6 +458,10 @@ int rpmh_write(struct rpmh_client *rc, enum rpmh_state state, if (rpmh_standalone) return 0; + ret = check_ctrlr_state(rc, state); + if (ret) + return ret; + memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd)); rpm_msg.msg.num_payload = n; @@ -467,8 +500,6 @@ int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state, int count = 0; int ret, i, j, k; bool complete_set; - unsigned long flags; - struct rpmh_mbox *rpm; if (IS_ERR_OR_NULL(rc) || !cmd || !n) return -EINVAL; @@ -476,14 +507,9 @@ int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state, if (rpmh_standalone) return 0; - /* Do not allow setting wake votes when in solver mode */ - rpm = rc->rpmh; - spin_lock_irqsave(&rpm->lock, flags); - if (rpm->in_solver_mode && state == RPMH_WAKE_ONLY_STATE) { - spin_unlock_irqrestore(&rpm->lock, flags); - return -EIO; - } - spin_unlock_irqrestore(&rpm->lock, flags); + ret = check_ctrlr_state(rc, state); + if (ret) + return ret; while (n[count++]) ; -- GitLab From af541e19b6329d4d27630a5d51472f7daf9885e7 Mon Sep 17 00:00:00 2001 From: Saravana Kannan Date: Wed, 28 Jun 2017 20:16:15 -0700 Subject: [PATCH 733/786] cpufreq: schedutil: Add freq_to_util helper function Refactor repetitive code into a function. Change-Id: I30d53adf0cb0c4ce099f7f1c25cbc8c2f6473769 Signed-off-by: Saravana Kannan --- kernel/sched/cpufreq_schedutil.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 0a0e9aa9526a..2d1be332fd70 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -225,6 +225,13 @@ static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util, *util = max(*util, sg_cpu->walt_load.pl); } +static unsigned long freq_to_util(struct sugov_policy *sg_policy, + unsigned int freq) +{ + return mult_frac(sg_policy->max, freq, + sg_policy->policy->cpuinfo.max_freq); +} + static void sugov_update_single(struct update_util_data *hook, u64 time, unsigned int flags) { @@ -322,12 +329,11 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time, raw_spin_lock(&sg_policy->update_lock); if (sg_policy->max != max) { - hs_util = mult_frac(max, - sg_policy->tunables->hispeed_freq, - sg_policy->policy->cpuinfo.max_freq); + sg_policy->max = max; + hs_util = freq_to_util(sg_policy, + sg_policy->tunables->hispeed_freq); hs_util = mult_frac(hs_util, TARGET_LOAD, 100); sg_policy->hispeed_util = hs_util; - sg_policy->max = max; } sg_cpu->util = util; @@ -438,9 +444,8 @@ static ssize_t hispeed_freq_store(struct gov_attr_set *attr_set, tunables->hispeed_freq = val; list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) { - hs_util = mult_frac(sg_policy->max, - sg_policy->tunables->hispeed_freq, - sg_policy->policy->cpuinfo.max_freq); + hs_util = freq_to_util(sg_policy, + sg_policy->tunables->hispeed_freq); hs_util = mult_frac(hs_util, TARGET_LOAD, 100); sg_policy->hispeed_util = hs_util; } -- GitLab From 0f34ee93d05607a405449f3124a7c2c6a92fbe77 Mon Sep 17 00:00:00 2001 From: Saravana Kannan Date: Wed, 28 Jun 2017 21:44:14 -0700 Subject: [PATCH 734/786] cpufreq: schedutil: Keep track of average policy capacity Average policy capacity will be used in future patches to improve detection of hispeed load condition. Change-Id: Icab992243b83eb5feaae619d16b22510010f54c5 Signed-off-by: Saravana Kannan --- kernel/sched/cpufreq_schedutil.c | 69 ++++++++++++++++++++++++++++---- 1 file changed, 62 insertions(+), 7 deletions(-) diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 2d1be332fd70..c7f953c2adf3 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -36,6 +36,10 @@ struct sugov_policy { raw_spinlock_t update_lock; /* For shared policies */ u64 last_freq_update_time; s64 freq_update_delay_ns; + u64 last_ws; + u64 curr_cycles; + u64 last_cyc_update_time; + unsigned long avg_cap; unsigned int next_freq; unsigned int cached_raw_freq; unsigned long hispeed_util; @@ -199,6 +203,51 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util, sg_cpu->iowait_boost >>= 1; } +static unsigned long freq_to_util(struct sugov_policy *sg_policy, + unsigned int freq) +{ + return mult_frac(sg_policy->max, freq, + sg_policy->policy->cpuinfo.max_freq); +} + +#define KHZ 1000 +static void sugov_track_cycles(struct sugov_policy *sg_policy, + unsigned int prev_freq, + u64 upto) +{ + u64 delta_ns, cycles; + /* Track cycles in current window */ + delta_ns = upto - sg_policy->last_cyc_update_time; + cycles = (prev_freq * delta_ns) / (NSEC_PER_SEC / KHZ); + sg_policy->curr_cycles += cycles; + sg_policy->last_cyc_update_time = upto; +} + +static void sugov_calc_avg_cap(struct sugov_policy *sg_policy, u64 curr_ws, + unsigned int prev_freq) +{ + u64 last_ws = sg_policy->last_ws; + unsigned int avg_freq; + + WARN_ON(curr_ws < last_ws); + if (curr_ws <= last_ws) + return; + + /* If we skipped some windows */ + if (curr_ws > (last_ws + sched_ravg_window)) { + avg_freq = prev_freq; + /* Reset tracking history */ + sg_policy->last_cyc_update_time = curr_ws; + } else { + sugov_track_cycles(sg_policy, prev_freq, curr_ws); + avg_freq = sg_policy->curr_cycles; + avg_freq /= sched_ravg_window / (NSEC_PER_SEC / KHZ); + } + sg_policy->avg_cap = freq_to_util(sg_policy, avg_freq); + sg_policy->curr_cycles = 0; + sg_policy->last_ws = curr_ws; +} + #define NL_RATIO 75 #define HISPEED_LOAD 90 static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util, @@ -225,13 +274,6 @@ static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util, *util = max(*util, sg_cpu->walt_load.pl); } -static unsigned long freq_to_util(struct sugov_policy *sg_policy, - unsigned int freq) -{ - return mult_frac(sg_policy->max, freq, - sg_policy->policy->cpuinfo.max_freq); -} - static void sugov_update_single(struct update_util_data *hook, u64 time, unsigned int flags) { @@ -254,6 +296,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, } else { sugov_get_util(&util, &max, sg_cpu->cpu); sugov_iowait_boost(sg_cpu, &util, &max); + sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws, + sg_policy->policy->cur); sugov_walt_adjust(sg_cpu, &util, &max); next_f = get_next_freq(sg_policy, util, max); } @@ -343,6 +387,9 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time, sugov_set_iowait_boost(sg_cpu, time, flags); sg_cpu->last_update = time; + sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws, + sg_policy->policy->cur); + trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util, max, sg_cpu->walt_load.nl, sg_cpu->walt_load.pl, flags); @@ -360,6 +407,10 @@ static void sugov_work(struct kthread_work *work) struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work); mutex_lock(&sg_policy->work_lock); + raw_spin_lock(&sg_policy->update_lock); + sugov_track_cycles(sg_policy, sg_policy->policy->cur, + sched_ktime_clock()); + raw_spin_unlock(&sg_policy->update_lock); __cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq, CPUFREQ_RELATION_L); mutex_unlock(&sg_policy->work_lock); @@ -730,6 +781,10 @@ static void sugov_limits(struct cpufreq_policy *policy) if (!policy->fast_switch_enabled) { mutex_lock(&sg_policy->work_lock); + raw_spin_lock(&sg_policy->update_lock); + sugov_track_cycles(sg_policy, sg_policy->policy->cur, + sched_ktime_clock()); + raw_spin_unlock(&sg_policy->update_lock); cpufreq_policy_apply_limits(policy); mutex_unlock(&sg_policy->work_lock); } -- GitLab From 36faa2889a93b1093183f7550c3f65ab75c31c8f Mon Sep 17 00:00:00 2001 From: Saravana Kannan Date: Wed, 28 Jun 2017 21:44:56 -0700 Subject: [PATCH 735/786] cpufreq: schedutil: Update hispeed load condition Checking the utilization against current capacity can cause the hispeed load condition to be unreliable. This can happen if the hispeed condition is reevaluated after some other event (say, migration) causes the current frequency (and thereby current capacity) to change. So, instead of checking against current capacity, check against the average capacity that is less temperamental. Change-Id: Ic1277908f7d42848ded5dd450146d1d04572eaab Signed-off-by: Saravana Kannan --- kernel/sched/cpufreq_schedutil.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index c7f953c2adf3..d14f35bc87c6 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -254,13 +254,12 @@ static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util, unsigned long *max) { struct sugov_policy *sg_policy = sg_cpu->sg_policy; - unsigned long cap_cur = capacity_curr_of(sg_cpu->cpu); bool is_migration = sg_cpu->flags & SCHED_CPUFREQ_INTERCLUSTER_MIG; unsigned long nl = sg_cpu->walt_load.nl; unsigned long cpu_util = sg_cpu->util; bool is_hiload; - is_hiload = (cpu_util >= mult_frac(cap_cur, + is_hiload = (cpu_util >= mult_frac(sg_policy->avg_cap, HISPEED_LOAD, 100)); -- GitLab From 7493065acc1836561e63182d4ae7e476a8ade733 Mon Sep 17 00:00:00 2001 From: Saravana Kannan Date: Fri, 30 Jun 2017 14:36:20 -0700 Subject: [PATCH 736/786] cpufreq: schedutil: Fix race condition in computing hispeed_util hispeed_util can be changed in the context of the store function and scheduler notifications. So, we need to grab the update_lock (used to protect members of sg_policy) before changing it to avoid any race conditions. Change-Id: I0c6336bab0ec265d900d2e16df1fe95824d7b2e8 Signed-off-by: Saravana Kannan --- kernel/sched/cpufreq_schedutil.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index d14f35bc87c6..e756b833ba68 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -494,10 +494,12 @@ static ssize_t hispeed_freq_store(struct gov_attr_set *attr_set, tunables->hispeed_freq = val; list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) { + raw_spin_lock(&sg_policy->update_lock); hs_util = freq_to_util(sg_policy, sg_policy->tunables->hispeed_freq); hs_util = mult_frac(hs_util, TARGET_LOAD, 100); sg_policy->hispeed_util = hs_util; + raw_spin_unlock(&sg_policy->update_lock); } return count; -- GitLab From 2f3b0c11ca9695185cb93b0727506ef985a89b59 Mon Sep 17 00:00:00 2001 From: Ashay Jaiswal Date: Wed, 14 Jun 2017 16:04:45 +0530 Subject: [PATCH 737/786] power: smb-lib: Add support to detect weak charger A weak charger might trigger switcher_power_ok interrupt storm which gets incorrectly detected as a boost_back condition and input gets suspended. Add a logic to distinguish a weak charger and the boost_back condition by first reducing the ICL current to a lower value (500mA by default) on detecting a switcher_power_ok storm. If the switcher_ok storm disappears then its indeed a weak charger and if the storm continues its possibly a boost_back condition. Change-Id: I46b406e403aa16a502e6da149b180545848fc906 Signed-off-by: Ashay Jaiswal Signed-off-by: Abhijeet Dharmapurikar --- drivers/power/supply/qcom/qpnp-smb2.c | 7 +- drivers/power/supply/qcom/smb-lib.c | 99 ++++++++++++++++++++----- drivers/power/supply/qcom/smb-lib.h | 4 + drivers/power/supply/qcom/storm-watch.c | 10 +++ drivers/power/supply/qcom/storm-watch.h | 1 + 5 files changed, 102 insertions(+), 19 deletions(-) diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c index becce31ad2ed..4f75ef36cae7 100644 --- a/drivers/power/supply/qcom/qpnp-smb2.c +++ b/drivers/power/supply/qcom/qpnp-smb2.c @@ -266,6 +266,10 @@ module_param_named( debug_mask, __debug_mask, int, 0600 ); +static int __weak_chg_icl_ua = 500000; +module_param_named( + weak_chg_icl_ua, __weak_chg_icl_ua, int, 0600); + #define MICRO_1P5A 1500000 #define MICRO_P1A 100000 #define OTG_DEFAULT_DEGLITCH_TIME_MS 50 @@ -2113,7 +2117,7 @@ static struct smb_irq_info smb2_irqs[] = { [SWITCH_POWER_OK_IRQ] = { .name = "switcher-power-ok", .handler = smblib_handle_switcher_power_ok, - .storm_data = {true, 1000, 3}, + .storm_data = {true, 1000, 8}, }, }; @@ -2307,6 +2311,7 @@ static int smb2_probe(struct platform_device *pdev) chg->dev = &pdev->dev; chg->param = v1_params; chg->debug_mask = &__debug_mask; + chg->weak_chg_icl_ua = &__weak_chg_icl_ua; chg->mode = PARALLEL_MASTER; chg->irq_info = smb2_irqs; chg->name = "PMI"; diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c index 06429e8fc9c8..84f1e55e616b 100644 --- a/drivers/power/supply/qcom/smb-lib.c +++ b/drivers/power/supply/qcom/smb-lib.c @@ -629,6 +629,8 @@ int smblib_mapping_cc_delta_from_field_value(struct smb_chg_param *param, static void smblib_uusb_removal(struct smb_charger *chg) { int rc; + struct smb_irq_data *data; + struct storm_watch *wdata; cancel_delayed_work_sync(&chg->pl_enable_work); @@ -640,8 +642,16 @@ static void smblib_uusb_removal(struct smb_charger *chg) rc); } - if (chg->wa_flags & BOOST_BACK_WA) - vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0); + if (chg->wa_flags & BOOST_BACK_WA) { + data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data; + if (data) { + wdata = &data->storm_data; + update_storm_count(wdata, WEAK_CHG_STORM_COUNT); + vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0); + vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER, + false, 0); + } + } vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0); vote(chg->awake_votable, PL_DELAY_VOTER, false, 0); @@ -3144,6 +3154,8 @@ void smblib_usb_plugin_hard_reset_locked(struct smb_charger *chg) int rc; u8 stat; bool vbus_rising; + struct smb_irq_data *data; + struct storm_watch *wdata; rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat); if (rc < 0) { @@ -3157,8 +3169,18 @@ void smblib_usb_plugin_hard_reset_locked(struct smb_charger *chg) smblib_cc2_sink_removal_exit(chg); } else { smblib_cc2_sink_removal_enter(chg); - if (chg->wa_flags & BOOST_BACK_WA) - vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0); + if (chg->wa_flags & BOOST_BACK_WA) { + data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data; + if (data) { + wdata = &data->storm_data; + update_storm_count(wdata, + WEAK_CHG_STORM_COUNT); + vote(chg->usb_icl_votable, BOOST_BACK_VOTER, + false, 0); + vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER, + false, 0); + } + } } power_supply_changed(chg->usb_psy); @@ -3172,6 +3194,8 @@ void smblib_usb_plugin_locked(struct smb_charger *chg) int rc; u8 stat; bool vbus_rising; + struct smb_irq_data *data; + struct storm_watch *wdata; rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat); if (rc < 0) { @@ -3208,8 +3232,18 @@ void smblib_usb_plugin_locked(struct smb_charger *chg) schedule_delayed_work(&chg->pl_enable_work, msecs_to_jiffies(PL_DELAY_MS)); } else { - if (chg->wa_flags & BOOST_BACK_WA) - vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0); + if (chg->wa_flags & BOOST_BACK_WA) { + data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data; + if (data) { + wdata = &data->storm_data; + update_storm_count(wdata, + WEAK_CHG_STORM_COUNT); + vote(chg->usb_icl_votable, BOOST_BACK_VOTER, + false, 0); + vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER, + false, 0); + } + } if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) { smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n"); @@ -3593,6 +3627,8 @@ static void typec_sink_removal(struct smb_charger *chg) static void smblib_handle_typec_removal(struct smb_charger *chg) { int rc; + struct smb_irq_data *data; + struct storm_watch *wdata; chg->cc2_detach_wa_active = false; @@ -3604,8 +3640,16 @@ static void smblib_handle_typec_removal(struct smb_charger *chg) rc); } - if (chg->wa_flags & BOOST_BACK_WA) - vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0); + if (chg->wa_flags & BOOST_BACK_WA) { + data = chg->irq_info[SWITCH_POWER_OK_IRQ].irq_data; + if (data) { + wdata = &data->storm_data; + update_storm_count(wdata, WEAK_CHG_STORM_COUNT); + vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0); + vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER, + false, 0); + } + } /* reset APSD voters */ vote(chg->apsd_disable_votable, PD_HARD_RESET_VOTER, false, 0); @@ -3837,10 +3881,13 @@ static void smblib_bb_removal_work(struct work_struct *work) } #define BOOST_BACK_UNVOTE_DELAY_MS 750 +#define BOOST_BACK_STORM_COUNT 3 +#define WEAK_CHG_STORM_COUNT 8 irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data) { struct smb_irq_data *irq_data = data; struct smb_charger *chg = irq_data->parent_data; + struct storm_watch *wdata = &irq_data->storm_data; int rc, usb_icl; u8 stat; @@ -3862,16 +3909,32 @@ irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data) return IRQ_HANDLED; if (is_storming(&irq_data->storm_data)) { - smblib_err(chg, "Reverse boost detected: voting 0mA to suspend input\n"); - vote(chg->usb_icl_votable, BOOST_BACK_VOTER, true, 0); - vote(chg->awake_votable, BOOST_BACK_VOTER, true, 0); - /* - * Remove the boost-back vote after a delay, to avoid - * permanently suspending the input if the boost-back condition - * is unintentionally hit. - */ - schedule_delayed_work(&chg->bb_removal_work, - msecs_to_jiffies(BOOST_BACK_UNVOTE_DELAY_MS)); + /* This could be a weak charger reduce ICL */ + if (!is_client_vote_enabled(chg->usb_icl_votable, + WEAK_CHARGER_VOTER)) { + smblib_err(chg, + "Weak charger detected: voting %dmA ICL\n", + *chg->weak_chg_icl_ua / 1000); + vote(chg->usb_icl_votable, WEAK_CHARGER_VOTER, + true, *chg->weak_chg_icl_ua); + /* + * reset storm data and set the storm threshold + * to 3 for reverse boost detection. + */ + update_storm_count(wdata, BOOST_BACK_STORM_COUNT); + } else { + smblib_err(chg, + "Reverse boost detected: voting 0mA to suspend input\n"); + vote(chg->usb_icl_votable, BOOST_BACK_VOTER, true, 0); + vote(chg->awake_votable, BOOST_BACK_VOTER, true, 0); + /* + * Remove the boost-back vote after a delay, to avoid + * permanently suspending the input if the boost-back + * condition is unintentionally hit. + */ + schedule_delayed_work(&chg->bb_removal_work, + msecs_to_jiffies(BOOST_BACK_UNVOTE_DELAY_MS)); + } } return IRQ_HANDLED; diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h index 81db1c92efe2..a2168f081615 100644 --- a/drivers/power/supply/qcom/smb-lib.h +++ b/drivers/power/supply/qcom/smb-lib.h @@ -64,9 +64,12 @@ enum print_reason { #define BATT_PROFILE_VOTER "BATT_PROFILE_VOTER" #define OTG_DELAY_VOTER "OTG_DELAY_VOTER" #define USBIN_I_VOTER "USBIN_I_VOTER" +#define WEAK_CHARGER_VOTER "WEAK_CHARGER_VOTER" #define VCONN_MAX_ATTEMPTS 3 #define OTG_MAX_ATTEMPTS 3 +#define BOOST_BACK_STORM_COUNT 3 +#define WEAK_CHG_STORM_COUNT 8 enum smb_mode { PARALLEL_MASTER = 0, @@ -230,6 +233,7 @@ struct smb_charger { struct smb_chg_freq chg_freq; int smb_version; int otg_delay_ms; + int *weak_chg_icl_ua; /* locks */ struct mutex lock; diff --git a/drivers/power/supply/qcom/storm-watch.c b/drivers/power/supply/qcom/storm-watch.c index 5275079c53e0..21ac669f2ec9 100644 --- a/drivers/power/supply/qcom/storm-watch.c +++ b/drivers/power/supply/qcom/storm-watch.c @@ -64,3 +64,13 @@ void reset_storm_count(struct storm_watch *data) data->storm_count = 0; mutex_unlock(&data->storm_lock); } + +void update_storm_count(struct storm_watch *data, int max_count) +{ + if (!data) + return; + + mutex_lock(&data->storm_lock); + data->max_storm_count = max_count; + mutex_unlock(&data->storm_lock); +} diff --git a/drivers/power/supply/qcom/storm-watch.h b/drivers/power/supply/qcom/storm-watch.h index ff05c4a661c3..5275d73613d4 100644 --- a/drivers/power/supply/qcom/storm-watch.h +++ b/drivers/power/supply/qcom/storm-watch.h @@ -37,4 +37,5 @@ struct storm_watch { bool is_storming(struct storm_watch *data); void reset_storm_count(struct storm_watch *data); +void update_storm_count(struct storm_watch *data, int max_count); #endif -- GitLab From bf25d39d36f56b8a0b4fec23cc7846eba37f1705 Mon Sep 17 00:00:00 2001 From: Fenglin Wu Date: Thu, 22 Jun 2017 12:30:33 +0800 Subject: [PATCH 738/786] power: qpnp-smb2: Unset ONLINE for UNKNOWN USB charger type Currently, USB power_supply POWER_SUPPLY_PROP_ONLINE property will be set if the charger type is POWER_SUPPLY_TYPE_UNKNOWN which is not expected. Unset ONLINE for UNKNOWN USB charger type. CRs-Fixed: 2065139 Change-Id: Ibc40ee62b2d95c319a7fe34b17813c2b57518f67 Signed-off-by: Fenglin Wu --- drivers/power/supply/qcom/qpnp-smb2.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c index 4f75ef36cae7..e017caaf4ee7 100644 --- a/drivers/power/supply/qcom/qpnp-smb2.c +++ b/drivers/power/supply/qcom/qpnp-smb2.c @@ -465,6 +465,8 @@ static int smb2_usb_get_prop(struct power_supply *psy, val->intval = 0; else val->intval = 1; + if (chg->real_charger_type == POWER_SUPPLY_TYPE_UNKNOWN) + val->intval = 0; break; case POWER_SUPPLY_PROP_VOLTAGE_MIN: val->intval = chg->voltage_min_uv; -- GitLab From 1d046d91c78d2e925457ddd8bd03729a2f8181b8 Mon Sep 17 00:00:00 2001 From: Ashay Jaiswal Date: Wed, 21 Jun 2017 14:11:41 +0530 Subject: [PATCH 739/786] qpnp-smb2: configure DRP mode after interrupt request During hardware initialization charger is configured in DRP mode from force UFP mode and after mode configuration hardware takes ~300/400 msec for UFP/DFP detection. In case if the delay between moving to DRP mode and software enabling interrupts is more than hardware detection delay then software will miss the detection interrupt. Fix this by moving DRP configuration after interrupt request this ensures software will receive interrupt once hardware detection completes. CRs-Fixed: 2065296 Change-Id: I55c59ed558e8db40a7b1af7638832da1f9547222 Signed-off-by: Ashay Jaiswal --- drivers/power/supply/qcom/qpnp-smb2.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c index e017caaf4ee7..e94873c5252e 100644 --- a/drivers/power/supply/qcom/qpnp-smb2.c +++ b/drivers/power/supply/qcom/qpnp-smb2.c @@ -1472,15 +1472,6 @@ static int smb2_configure_typec(struct smb_charger *chg) return rc; } - /* configure power role for dual-role */ - rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, - TYPEC_POWER_ROLE_CMD_MASK, 0); - if (rc < 0) { - dev_err(chg->dev, - "Couldn't configure power role for DRP rc=%d\n", rc); - return rc; - } - /* * disable Type-C factory mode and stay in Attached.SRC state when VCONN * over-current happens @@ -1858,6 +1849,16 @@ static int smb2_init_hw(struct smb2 *chip) static int smb2_post_init(struct smb2 *chip) { struct smb_charger *chg = &chip->chg; + int rc; + + /* configure power role for dual-role */ + rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG, + TYPEC_POWER_ROLE_CMD_MASK, 0); + if (rc < 0) { + dev_err(chg->dev, + "Couldn't configure power role for DRP rc=%d\n", rc); + return rc; + } rerun_election(chg->usb_irq_enable_votable); @@ -2425,7 +2426,11 @@ static int smb2_probe(struct platform_device *pdev) goto cleanup; } - smb2_post_init(chip); + rc = smb2_post_init(chip); + if (rc < 0) { + pr_err("Failed in post init rc=%d\n", rc); + goto cleanup; + } smb2_create_debugfs(chip); -- GitLab From dd08c6205680c7d7ef8e7cc595fecc76deb40813 Mon Sep 17 00:00:00 2001 From: Ashay Jaiswal Date: Thu, 29 Jun 2017 16:25:23 +0530 Subject: [PATCH 740/786] power: qcom: smb-lib: handle dynamic Rp change The legacy detection workaround change restricts DCP to 1.5A draw, regardless of legacy/non-legacy type. This was to prevent a legacy (non-compliant) cable, incorrectly detected as non-legacy cable from drawing more than 1.5A. The issue with legacy bit is that hardware could end up reporting non-legacy for a legacy cable (not the other way round). We want to ensure that for non-compliant legacy cable SDP/CDP current limits are honoured and that for a Rp = 10k legacy cable VBUS shouldn't go higher than 5V. This cap of 1.5A on DCP is not necessary. Moreover AICL will limit the current if needed. To realize this force the Rp based current whenever DCP or float is seen via the LEGACY_UNKNOWN_VOTER. LEGACY_UNKNOWN_VOTER is not removed unless a confirmed hvdcp or pd is seen. Change-Id: I89505e9db4f045aaf71ab0ee534de783ea4d2df3 Signed-off-by: Ashay Jaiswal Signed-off-by: Abhijeet Dharmapurikar --- drivers/power/supply/qcom/smb-lib.c | 81 +++++++++++++++++++++++------ 1 file changed, 66 insertions(+), 15 deletions(-) diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c index 84f1e55e616b..4c67c803c0d2 100644 --- a/drivers/power/supply/qcom/smb-lib.c +++ b/drivers/power/supply/qcom/smb-lib.c @@ -2867,13 +2867,13 @@ int smblib_get_prop_fcc_delta(struct smb_charger *chg, * USB MAIN PSY SETTERS * ************************/ -#define SDP_CURRENT_MA 500000 -#define CDP_CURRENT_MA 1500000 -#define DCP_CURRENT_MA 1500000 -#define HVDCP_CURRENT_MA 3000000 -#define TYPEC_DEFAULT_CURRENT_MA 900000 -#define TYPEC_MEDIUM_CURRENT_MA 1500000 -#define TYPEC_HIGH_CURRENT_MA 3000000 +#define SDP_CURRENT_UA 500000 +#define CDP_CURRENT_UA 1500000 +#define DCP_CURRENT_UA 1500000 +#define HVDCP_CURRENT_UA 3000000 +#define TYPEC_DEFAULT_CURRENT_UA 900000 +#define TYPEC_MEDIUM_CURRENT_UA 1500000 +#define TYPEC_HIGH_CURRENT_UA 3000000 int smblib_get_charge_current(struct smb_charger *chg, int *total_current_ua) { @@ -2907,19 +2907,19 @@ int smblib_get_charge_current(struct smb_charger *chg, /* QC 2.0/3.0 adapter */ if (apsd_result->bit & (QC_3P0_BIT | QC_2P0_BIT)) { - *total_current_ua = HVDCP_CURRENT_MA; + *total_current_ua = HVDCP_CURRENT_UA; return 0; } if (non_compliant) { switch (apsd_result->bit) { case CDP_CHARGER_BIT: - current_ua = CDP_CURRENT_MA; + current_ua = CDP_CURRENT_UA; break; case DCP_CHARGER_BIT: case OCP_CHARGER_BIT: case FLOAT_CHARGER_BIT: - current_ua = DCP_CURRENT_MA; + current_ua = DCP_CURRENT_UA; break; default: current_ua = 0; @@ -2934,7 +2934,7 @@ int smblib_get_charge_current(struct smb_charger *chg, case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT: switch (apsd_result->bit) { case CDP_CHARGER_BIT: - current_ua = CDP_CURRENT_MA; + current_ua = CDP_CURRENT_UA; break; case DCP_CHARGER_BIT: case OCP_CHARGER_BIT: @@ -2947,10 +2947,10 @@ int smblib_get_charge_current(struct smb_charger *chg, } break; case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM: - current_ua = TYPEC_MEDIUM_CURRENT_MA; + current_ua = TYPEC_MEDIUM_CURRENT_UA; break; case POWER_SUPPLY_TYPEC_SOURCE_HIGH: - current_ua = TYPEC_HIGH_CURRENT_MA; + current_ua = TYPEC_HIGH_CURRENT_UA; break; case POWER_SUPPLY_TYPEC_NON_COMPLIANT: case POWER_SUPPLY_TYPEC_NONE: @@ -3472,8 +3472,29 @@ static void smblib_handle_hvdcp_detect_done(struct smb_charger *chg, rising ? "rising" : "falling"); } +static int get_rp_based_dcp_current(struct smb_charger *chg, int typec_mode) +{ + int rp_ua; + + switch (typec_mode) { + case POWER_SUPPLY_TYPEC_SOURCE_HIGH: + rp_ua = TYPEC_HIGH_CURRENT_UA; + break; + case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM: + case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT: + /* fall through */ + default: + rp_ua = DCP_CURRENT_UA; + } + + return rp_ua; +} + static void smblib_force_legacy_icl(struct smb_charger *chg, int pst) { + int typec_mode; + int rp_ua; + /* while PD is active it should have complete ICL control */ if (chg->pd_active) return; @@ -3495,7 +3516,9 @@ static void smblib_force_legacy_icl(struct smb_charger *chg, int pst) break; case POWER_SUPPLY_TYPE_USB_DCP: case POWER_SUPPLY_TYPE_USB_FLOAT: - vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 1500000); + typec_mode = smblib_get_prop_typec_mode(chg); + rp_ua = get_rp_based_dcp_current(chg, typec_mode); + vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, rp_ua); break; case POWER_SUPPLY_TYPE_USB_HVDCP: case POWER_SUPPLY_TYPE_USB_HVDCP_3: @@ -3780,12 +3803,40 @@ static void smblib_handle_typec_insertion(struct smb_charger *chg) typec_sink_removal(chg); } +static void smblib_handle_rp_change(struct smb_charger *chg, int typec_mode) +{ + int rp_ua; + const struct apsd_result *apsd = smblib_get_apsd_result(chg); + + if ((apsd->pst != POWER_SUPPLY_TYPE_USB_DCP) + && (apsd->pst != POWER_SUPPLY_TYPE_USB_FLOAT)) + return; + + /* + * handle Rp change for DCP/FLOAT/OCP. + * Update the current only if the Rp is different from + * the last Rp value. + */ + smblib_dbg(chg, PR_MISC, "CC change old_mode=%d new_mode=%d\n", + chg->typec_mode, typec_mode); + + rp_ua = get_rp_based_dcp_current(chg, typec_mode); + vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, rp_ua); +} + static void smblib_handle_typec_cc_state_change(struct smb_charger *chg) { + int typec_mode; + if (chg->pr_swap_in_progress) return; - chg->typec_mode = smblib_get_prop_typec_mode(chg); + typec_mode = smblib_get_prop_typec_mode(chg); + if (chg->typec_present && (typec_mode != chg->typec_mode)) + smblib_handle_rp_change(chg, typec_mode); + + chg->typec_mode = typec_mode; + if (!chg->typec_present && chg->typec_mode != POWER_SUPPLY_TYPEC_NONE) { chg->typec_present = true; smblib_dbg(chg, PR_MISC, "TypeC %s insertion\n", -- GitLab From d4314f03b4f8a5b60f2aeb30d1f6af864f779448 Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Fri, 7 Jul 2017 10:02:19 +0530 Subject: [PATCH 741/786] sched: compile sched_avg.c only for SMP The stats exported by sched_avg.c like running average of runnable tasks and CPU load are used to manage the number of active CPUs in the system. There is no use case for this on UP system. Change-Id: I09c454e742e78813ddcf4ac98858b77970eeaa1f Signed-off-by: Pavankumar Kondeti --- include/linux/sched.h | 15 +++++++++++++++ kernel/sched/Makefile | 4 ++-- kernel/sched/sched_avg.c | 6 ++---- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 9e7ab0584f04..75b464895a1f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -177,11 +177,26 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); extern u64 nr_running_integral(unsigned int cpu); #endif +#ifdef CONFIG_SMP extern void sched_update_nr_prod(int cpu, long delta, bool inc); extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg, unsigned int *max_nr, unsigned int *big_max_nr); extern unsigned int sched_get_cpu_util(int cpu); +#else +static inline void sched_update_nr_prod(int cpu, long delta, bool inc) +{ +} +static inline void sched_get_nr_running_avg(int *avg, int *iowait_avg, + int *big_avg, unsigned int *max_nr, + unsigned int *big_max_nr) +{ +} +static inline unsigned int sched_get_cpu_util(int cpu) +{ + return 0; +} +#endif extern void calc_global_load(unsigned long ticks); diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 27a757422abe..3d12ce8da244 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -17,9 +17,9 @@ endif obj-y += core.o loadavg.o clock.o cputime.o obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o -obj-y += wait.o swait.o completion.o idle.o sched_avg.o +obj-y += wait.o swait.o completion.o idle.o obj-$(CONFIG_SCHED_HMP) += hmp.o boost.o -obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o energy.o +obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o energy.o sched_avg.o obj-$(CONFIG_SCHED_WALT) += walt.o boost.o obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o obj-$(CONFIG_SCHEDSTATS) += stats.o diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c index 7f86c0bad3ec..42389244c1bb 100644 --- a/kernel/sched/sched_avg.c +++ b/kernel/sched/sched_avg.c @@ -162,16 +162,14 @@ EXPORT_SYMBOL(sched_update_nr_prod); unsigned int sched_get_cpu_util(int cpu) { struct rq *rq = cpu_rq(cpu); - u64 util = 0; - unsigned long capacity = SCHED_CAPACITY_SCALE, flags; + u64 util; + unsigned long capacity, flags; unsigned int busy; raw_spin_lock_irqsave(&rq->lock, flags); -#ifdef CONFIG_SMP util = rq->cfs.avg.util_avg; capacity = capacity_orig_of(cpu); -#endif #ifdef CONFIG_SCHED_WALT if (!walt_disabled && sysctl_sched_use_walt_cpu_util) { -- GitLab From 9892ba14d0cc379c3907a2274d157c2816c60153 Mon Sep 17 00:00:00 2001 From: Deepak Kumar Date: Fri, 7 Jul 2017 14:51:11 +0530 Subject: [PATCH 742/786] msm: kgsl: correctly disable SP clock before hwcg settings Correct the mask value passed to kgsl_gmu_regrmw to disable the SP clock before programming hwcg registers. Change-Id: Iea82e5e41644ba3c18a7038455681159615adc76 Signed-off-by: Deepak Kumar --- drivers/gpu/msm/adreno_a6xx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c index 30ada8f57008..25cbc416b14a 100644 --- a/drivers/gpu/msm/adreno_a6xx.c +++ b/drivers/gpu/msm/adreno_a6xx.c @@ -339,7 +339,7 @@ static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on) regs = a6xx_hwcg_registers[i].regs; /* Disable SP clock before programming HWCG registers */ - kgsl_gmu_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 0); + kgsl_gmu_regrmw(device, A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0); for (j = 0; j < a6xx_hwcg_registers[i].count; j++) kgsl_regwrite(device, regs[j].off, on ? regs[j].val : 0); -- GitLab From d313ea8168feda27f4ed551d8a2a3e6cb2b9c327 Mon Sep 17 00:00:00 2001 From: Maulik Shah Date: Wed, 14 Jun 2017 13:10:52 +0530 Subject: [PATCH 743/786] ARM: dts: msm: Add sleep driver for sdm670 Add sleep driver to allow the CPUSS to go different low power modes. Change-Id: Ibcb63184279cb633933856783e2d13b883901545 Signed-off-by: Maulik Shah --- arch/arm64/boot/dts/qcom/sdm670-pm.dtsi | 192 ++++++++++++++++++++++++ arch/arm64/boot/dts/qcom/sdm670.dtsi | 1 + 2 files changed, 193 insertions(+) create mode 100644 arch/arm64/boot/dts/qcom/sdm670-pm.dtsi diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi new file mode 100644 index 000000000000..ee17ebf920ca --- /dev/null +++ b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi @@ -0,0 +1,192 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +&soc { + qcom,lpm-levels { + compatible = "qcom,lpm-levels"; + #address-cells = <1>; + #size-cells = <0>; + + qcom,pm-cluster@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + label = "L3"; + qcom,psci-mode-shift = <4>; + qcom,psci-mode-mask = <0xfff>; + + qcom,pm-cluster-level@0 { /* D1 */ + reg = <0>; + label = "l3-wfi"; + qcom,psci-mode = <0x1>; + qcom,latency-us = <51>; + qcom,ss-power = <452>; + qcom,energy-overhead = <69355>; + qcom,time-overhead = <99>; + }; + + qcom,pm-cluster-level@1 { /* D2 */ + reg = <1>; + label = "l3-dyn-ret"; + qcom,psci-mode = <0x2>; + qcom,latency-us = <659>; + qcom,ss-power = <434>; + qcom,energy-overhead = <465725>; + qcom,time-overhead = <976>; + qcom,min-child-idx = <1>; + }; + + qcom,pm-cluster-level@2 { /* D4, D3 is not supported */ + reg = <2>; + label = "l3-pc"; + qcom,psci-mode = <0x4>; + qcom,latency-us = <3201>; + qcom,ss-power = <408>; + qcom,energy-overhead = <2421840>; + qcom,time-overhead = <5376>; + qcom,min-child-idx = <2>; + qcom,is-reset; + }; + + qcom,pm-cluster-level@3 { /* Cx off */ + reg = <3>; + label = "cx-off"; + qcom,psci-mode = <0x224>; + qcom,latency-us = <5562>; + qcom,ss-power = <308>; + qcom,energy-overhead = <2521840>; + qcom,time-overhead = <6376>; + qcom,min-child-idx = <3>; + qcom,is-reset; + qcom,notify-rpm; + }; + + qcom,pm-cluster-level@4 { /* AOSS sleep */ + reg = <4>; + label = "llcc-off"; + qcom,psci-mode = <0xC24>; + qcom,latency-us = <6562>; + qcom,ss-power = <108>; + qcom,energy-overhead = <2621840>; + qcom,time-overhead = <7376>; + qcom,min-child-idx = <3>; + qcom,is-reset; + qcom,notify-rpm; + }; + + qcom,pm-cpu@0 { + #address-cells = <1>; + #size-cells = <0>; + qcom,psci-mode-shift = <0>; + qcom,psci-mode-mask = <0xf>; + qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3 &CPU4 + &CPU5>; + + qcom,pm-cpu-level@0 { /* C1 */ + reg = <0>; + qcom,spm-cpu-mode = "wfi"; + qcom,psci-cpu-mode = <0x1>; + qcom,latency-us = <43>; + qcom,ss-power = <454>; + qcom,energy-overhead = <38639>; + qcom,time-overhead = <83>; + }; + + qcom,pm-cpu-level@1 { /* C2D */ + reg = <1>; + qcom,spm-cpu-mode = "ret"; + qcom,psci-cpu-mode = <0x2>; + qcom,latency-us = <119>; + qcom,ss-power = <449>; + qcom,energy-overhead = <78456>; + qcom,time-overhead = <167>; + }; + + qcom,pm-cpu-level@2 { /* C3 */ + reg = <2>; + qcom,spm-cpu-mode = "pc"; + qcom,psci-cpu-mode = <0x3>; + qcom,latency-us = <461>; + qcom,ss-power = <436>; + qcom,energy-overhead = <418225>; + qcom,time-overhead = <885>; + qcom,is-reset; + qcom,use-broadcast-timer; + }; + + qcom,pm-cpu-level@3 { /* C4 */ + reg = <3>; + qcom,spm-cpu-mode = "rail-pc"; + qcom,psci-cpu-mode = <0x4>; + qcom,latency-us = <531>; + qcom,ss-power = <400>; + qcom,energy-overhead = <428225>; + qcom,time-overhead = <1000>; + qcom,is-reset; + qcom,use-broadcast-timer; + }; + }; + + qcom,pm-cpu@1 { + #address-cells = <1>; + #size-cells = <0>; + qcom,psci-mode-shift = <0>; + qcom,psci-mode-mask = <0xf>; + qcom,cpu = <&CPU6 &CPU7>; + + qcom,pm-cpu-level@0 { /* C1 */ + reg = <0>; + qcom,spm-cpu-mode = "wfi"; + qcom,psci-cpu-mode = <0x1>; + qcom,latency-us = <43>; + qcom,ss-power = <454>; + qcom,energy-overhead = <38639>; + qcom,time-overhead = <83>; + }; + + qcom,pm-cpu-level@1 { /* C2D */ + reg = <1>; + qcom,spm-cpu-mode = "ret"; + qcom,psci-cpu-mode = <0x2>; + qcom,latency-us = <116>; + qcom,ss-power = <449>; + qcom,energy-overhead = <78456>; + qcom,time-overhead = <167>; + }; + + qcom,pm-cpu-level@2 { /* C3 */ + reg = <2>; + qcom,spm-cpu-mode = "pc"; + qcom,psci-cpu-mode = <0x3>; + qcom,latency-us = <621>; + qcom,ss-power = <436>; + qcom,energy-overhead = <418225>; + qcom,time-overhead = <885>; + qcom,is-reset; + qcom,use-broadcast-timer; + }; + + qcom,pm-cpu-level@3 { /* C4 */ + reg = <3>; + qcom,spm-cpu-mode = "rail-pc"; + qcom,psci-cpu-mode = <0x4>; + qcom,latency-us = <1061>; + qcom,ss-power = <400>; + qcom,energy-overhead = <428225>; + qcom,time-overhead = <1000>; + qcom,is-reset; + qcom,use-broadcast-timer; + }; + }; + }; + }; +}; diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi index b7df3201c614..5194142edb19 100644 --- a/arch/arm64/boot/dts/qcom/sdm670.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi @@ -1113,6 +1113,7 @@ #include "sdm670-pinctrl.dtsi" #include "msm-arm-smmu-sdm670.dtsi" #include "msm-gdsc-sdm845.dtsi" +#include "sdm670-pm.dtsi" &usb30_prim_gdsc { status = "ok"; -- GitLab From 682f981c8b13d1bdf474c15ab137534d1820c512 Mon Sep 17 00:00:00 2001 From: Maulik Shah Date: Thu, 15 Jun 2017 09:56:07 +0530 Subject: [PATCH 744/786] ARM: dts: msm: Add device bindings for RPM stats for sdm670 RPM stats exports counters that indicates the number of times, deeper low power modes were achieved. Change-Id: I928281f19633229d57d8d9548d0ea320a9312f26 Signed-off-by: Maulik Shah --- arch/arm64/boot/dts/qcom/sdm670-pm.dtsi | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi index ee17ebf920ca..8501d80de461 100644 --- a/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670-pm.dtsi @@ -189,4 +189,10 @@ }; }; }; + + qcom,rpm-stats@c300000 { + compatible = "qcom,rpm-stats"; + reg = <0xc300000 0x1000>, <0xc3f0004 0x4>; + reg-names = "phys_addr_base", "offset_addr"; + }; }; -- GitLab From 210773d0baddb4f9ee9ce4da2ac33b5c747c30e8 Mon Sep 17 00:00:00 2001 From: Maulik Shah Date: Thu, 15 Jun 2017 09:49:12 +0530 Subject: [PATCH 745/786] ARM: dts: msm: Add command DB node for sdm670 Command DB driver is required by system drivers to query system resource parameters. Change-Id: I5aa54255c96fb88b3bd85dabdabb1d360eefb5e8 Signed-off-by: Maulik Shah --- arch/arm64/boot/dts/qcom/sdm670.dtsi | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi index b7df3201c614..ef5296f44964 100644 --- a/arch/arm64/boot/dts/qcom/sdm670.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi @@ -990,6 +990,11 @@ }; }; + cmd_db: qcom,cmd-db@c3f000c { + compatible = "qcom,cmd-db"; + reg = <0xc3f000c 0x8>; + }; + dcc: dcc_v2@10a2000 { compatible = "qcom,dcc_v2"; reg = <0x10a2000 0x1000>, -- GitLab From c77d1d271afcf390156617e49c95039f16ea2c3d Mon Sep 17 00:00:00 2001 From: Maulik Shah Date: Thu, 15 Jun 2017 14:04:50 +0530 Subject: [PATCH 746/786] ARM: dts: msm: Add APPS RSC device bindings for sdm670 Add device bindings for the apps processor's RSC. The RSC contains the TCS that will be used to send sleep/wake/active requests to the RPMH blocks for the APPS processor. Change-Id: I24327e69fac38a9ee08349a4435fc0727137a011 Signed-off-by: Maulik Shah --- arch/arm64/boot/dts/qcom/sdm670.dtsi | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi index ef5296f44964..96795679db7e 100644 --- a/arch/arm64/boot/dts/qcom/sdm670.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi @@ -19,6 +19,7 @@ #include #include #include +#include #include / { @@ -995,6 +996,19 @@ reg = <0xc3f000c 0x8>; }; + apps_rsc: mailbox@179e0000 { + compatible = "qcom,tcs-drv"; + label = "apps_rsc"; + reg = <0x179e0000 0x100>, <0x179e0d00 0x3000>; + interrupts = <0 5 0>; + #mbox-cells = <1>; + qcom,drv-id = <2>; + qcom,tcs-config = , + , + , + ; + }; + dcc: dcc_v2@10a2000 { compatible = "qcom,dcc_v2"; reg = <0x10a2000 0x1000>, -- GitLab From 0dd203fb3eaaccee06040f112711136169232921 Mon Sep 17 00:00:00 2001 From: Maulik Shah Date: Thu, 15 Jun 2017 09:44:59 +0530 Subject: [PATCH 747/786] ARM: dts: msm: Add system_pm device bindings for sdm670 Add system_pm mailbox client to send sleep and active votes when entering system low power modes. Change-Id: I84cb5d64a65173163fdf7dc2ab565fc66f871206 Signed-off-by: Maulik Shah --- arch/arm64/boot/dts/qcom/sdm670.dtsi | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi index 96795679db7e..b84be4ecda16 100644 --- a/arch/arm64/boot/dts/qcom/sdm670.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi @@ -1009,6 +1009,11 @@ ; }; + system_pm { + compatible = "qcom,system-pm"; + mboxes = <&apps_rsc 0>; + }; + dcc: dcc_v2@10a2000 { compatible = "qcom,dcc_v2"; reg = <0x10a2000 0x1000>, -- GitLab From fc6fdf30be729d1202f971829bacf81e3eaa3b92 Mon Sep 17 00:00:00 2001 From: Manaf Meethalavalappu Pallikunhi Date: Thu, 22 Jun 2017 21:47:45 +0530 Subject: [PATCH 748/786] thermal: qti_qmi_cooling: Add remote subsystem cooling devices QTI chipsets need to mitigate remote subsystem at different temperature range by sending mitigation action over QMI. It includes high temperature remote subsystem passive cooling, voltage floor restriction for remote subsystem supply rail at low temperature etc. This driver will add individual cooling devices for different mitigation functionalities of each remote subsystem and themal-core can mitigate them. Change-Id: I9a2e01a130b20e55af96c7f65ba0e13789091e77 Signed-off-by: Manaf Meethalavalappu Pallikunhi --- .../bindings/thermal/qti-qmi-cdev.txt | 113 +++ drivers/thermal/qcom/Kconfig | 11 + drivers/thermal/qcom/Makefile | 1 + drivers/thermal/qcom/qmi_cooling.c | 681 ++++++++++++++++++ .../thermal_mitigation_device_service_v01.c | 359 +++++++++ .../thermal_mitigation_device_service_v01.h | 128 ++++ 6 files changed, 1293 insertions(+) create mode 100644 Documentation/devicetree/bindings/thermal/qti-qmi-cdev.txt create mode 100644 drivers/thermal/qcom/qmi_cooling.c create mode 100644 drivers/thermal/qcom/thermal_mitigation_device_service_v01.c create mode 100644 drivers/thermal/qcom/thermal_mitigation_device_service_v01.h diff --git a/Documentation/devicetree/bindings/thermal/qti-qmi-cdev.txt b/Documentation/devicetree/bindings/thermal/qti-qmi-cdev.txt new file mode 100644 index 000000000000..51c5eac18113 --- /dev/null +++ b/Documentation/devicetree/bindings/thermal/qti-qmi-cdev.txt @@ -0,0 +1,113 @@ +QMI thermal mitigation(TMD) cooling devices. + +The QMI TMD cooling device, will be used for various mitigations for remote +subsystem including remote processor mitigation, rail voltage restriction etc. +This cooling device uses kernel qti QMI interface to send the message to +remote subsystem. + +Each child node of the QMI TMD devicetree node represents each remote +subsystem and each child of this subsystem represents separate cooling +devices. It requires minimum one remote subsystem node and each subsystem +node requires minimum one cooling device node. + +Properties: + +- compatible: + Usage: required + Value type: + Definition: should be "qcom,qmi_cooling_devices" + + +Subsystem properties: +- qcom,instance-id: + Usage: required + Value type: + Definition: Remote subsystem QMI server instance id to be used for + communicating with QMI. + + Minimum one child node is required. Child node name and its alias are + used as cooling device name and phandle for that cooling device. + + cooling device node properties: + -qcom,qmi-dev-name: + Usage: required + Value type: + Definition: Remote subsystem device identifier. Below strings + are the only acceptable device names, + "pa" -> for pa cooling device, + "cpuv_restriction_cold" -> for vdd restriction, + "cx_vdd_limit" -> for vdd limit, + "modem" -> for processor passive cooling device, + "modem_current" -> for current limiting device, + "modem_bw" -> for bus bandwidth limiting device, + "cpr_cold" -> for cpr restriction. + + -#cooling-cells: + Usage: required + Value type: + Definition: Must be 2. Needed for of_thermal as cooling device + identifier. Please refer to + for more + details. +Example: + + qmi-tmd-devices { + compatible = "qcom,qmi_cooling_devices"; + + modem { + qcom,instance-id = <0x0>; + + modem_pa: modem_pa { + qcom,qmi-dev-name = "pa"; + #cooling-cells = <2>; + }; + + modem_proc: modem_proc { + qcom,qmi-dev-name = "modem"; + #cooling-cells = <2>; + }; + + modem_vdd: modem_vdd { + qcom,qmi-dev-name = "cpuv_restriction_cold"; + #cooling-cells = <2>; + }; + + modem_current: modem_current { + qcom,qmi-dev-name = "modem_current"; + #cooling-cells = <2>; + }; + + modem_cpr_cold: modem_cpr_cold { + qcom,qmi-dev-name = "cpr_cold"; + #cooling-cells = <2>; + }; + }; + + adsp { + qcom,instance-id = <0x1>; + + adsp_vdd: adsp_vdd { + qcom,qmi-dev-name = "cpuv_restriction_cold"; + #cooling-cells = <2>; + }; + }; + + cdsp { + qcom,instance-id = <0x43>; + + cdsp_vdd: cdsp_vdd { + qcom,qmi-dev-name = "cpuv_restriction_cold"; + #cooling-cells = <2>; + }; + }; + + slpi { + qcom,instance-id = <0x53>; + + slpi_vdd: slpi_vdd { + qcom,qmi-dev-name = "cpuv_restriction_cold"; + #cooling-cells = <2>; + }; + }; + }; + diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig index 38d5b932976f..be337256f604 100644 --- a/drivers/thermal/qcom/Kconfig +++ b/drivers/thermal/qcom/Kconfig @@ -49,3 +49,14 @@ config QTI_REG_COOLING_DEVICE will be used by QTI chipset to place a floor voltage restriction at low temperatures. The regulator cooling device will message the AOP using mail box to establish the floor voltage. + +config QTI_QMI_COOLING_DEVICE + bool "QTI QMI cooling devices" + depends on MSM_QMI_INTERFACE && THERMAL_OF + help + This enables the QTI remote subsystem cooling devices. These cooling + devices will be used by QTI chipset to place various remote + subsystem mitigations like remote processor passive mitigation, + remote subsystem voltage restriction at low temperatures etc. + The QMI cooling device will interface with remote subsystem + using QTI QMI interface. diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile index 2ba84c67e84c..000c6e74985f 100644 --- a/drivers/thermal/qcom/Makefile +++ b/drivers/thermal/qcom/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_MSM_BCL_PERIPHERAL_CTL) += bcl_peripheral.o obj-$(CONFIG_QTI_THERMAL_LIMITS_DCVS) += msm_lmh_dcvs.o lmh_dbg.o obj-$(CONFIG_QTI_VIRTUAL_SENSOR) += qti_virtual_sensor.o obj-$(CONFIG_QTI_REG_COOLING_DEVICE) += regulator_cooling.o +obj-$(CONFIG_QTI_QMI_COOLING_DEVICE) += thermal_mitigation_device_service_v01.o qmi_cooling.o diff --git a/drivers/thermal/qcom/qmi_cooling.c b/drivers/thermal/qcom/qmi_cooling.c new file mode 100644 index 000000000000..af82030194f7 --- /dev/null +++ b/drivers/thermal/qcom/qmi_cooling.c @@ -0,0 +1,681 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__ + +#include +#include +#include +#include +#include +#include +#include + +#include "thermal_mitigation_device_service_v01.h" + +#define QMI_CDEV_DRIVER "qmi-cooling-device" +#define QMI_TMD_RESP_TOUT_MSEC 50 +#define QMI_CLIENT_NAME_LENGTH 40 + +enum qmi_device_type { + QMI_CDEV_MAX_LIMIT_TYPE, + QMI_CDEV_MIN_LIMIT_TYPE, + QMI_CDEV_TYPE_NR, +}; + +struct qmi_cooling_device { + struct device_node *np; + char cdev_name[THERMAL_NAME_LENGTH]; + char qmi_name[QMI_CLIENT_NAME_LENGTH]; + bool connection_active; + enum qmi_device_type type; + struct list_head qmi_node; + struct thermal_cooling_device *cdev; + unsigned int mtgn_state; + unsigned int max_level; + struct qmi_tmd_instance *tmd; +}; + +struct qmi_tmd_instance { + struct device *dev; + struct qmi_handle *handle; + struct mutex mutex; + struct work_struct work_svc_arrive; + struct work_struct work_svc_exit; + struct work_struct work_rcv_msg; + struct notifier_block nb; + uint32_t inst_id; + struct list_head tmd_cdev_list; +}; + +struct qmi_dev_info { + char *dev_name; + enum qmi_device_type type; +}; + +static struct workqueue_struct *qmi_tmd_wq; +static struct qmi_tmd_instance *tmd_instances; +static int tmd_inst_cnt; + +static struct qmi_dev_info device_clients[] = { + { + .dev_name = "pa", + .type = QMI_CDEV_MAX_LIMIT_TYPE, + }, + { + .dev_name = "cx_vdd_limit", + .type = QMI_CDEV_MAX_LIMIT_TYPE, + }, + { + .dev_name = "modem", + .type = QMI_CDEV_MAX_LIMIT_TYPE, + }, + { + .dev_name = "modem_current", + .type = QMI_CDEV_MAX_LIMIT_TYPE, + }, + { + .dev_name = "modem_bw", + .type = QMI_CDEV_MAX_LIMIT_TYPE, + }, + { + .dev_name = "cpuv_restriction_cold", + .type = QMI_CDEV_MIN_LIMIT_TYPE, + }, + { + .dev_name = "cpr_cold", + .type = QMI_CDEV_MIN_LIMIT_TYPE, + } +}; + +static int qmi_get_max_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + struct qmi_cooling_device *qmi_cdev = cdev->devdata; + + if (!qmi_cdev) + return -EINVAL; + + *state = qmi_cdev->max_level; + + return 0; +} + +static int qmi_get_cur_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + struct qmi_cooling_device *qmi_cdev = cdev->devdata; + + if (!qmi_cdev) + return -EINVAL; + + if (qmi_cdev->type == QMI_CDEV_MIN_LIMIT_TYPE) { + *state = 0; + return 0; + } + *state = qmi_cdev->mtgn_state; + + return 0; +} + +static int qmi_tmd_send_state_request(struct qmi_cooling_device *qmi_cdev, + uint8_t state) +{ + int ret = 0; + struct tmd_set_mitigation_level_req_msg_v01 req; + struct tmd_set_mitigation_level_resp_msg_v01 tmd_resp; + struct msg_desc req_desc, resp_desc; + struct qmi_tmd_instance *tmd = qmi_cdev->tmd; + + memset(&req, 0, sizeof(req)); + memset(&tmd_resp, 0, sizeof(tmd_resp)); + + strlcpy(req.mitigation_dev_id.mitigation_dev_id, qmi_cdev->qmi_name, + QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01); + req.mitigation_level = state; + + req_desc.max_msg_len = TMD_SET_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN; + req_desc.msg_id = QMI_TMD_SET_MITIGATION_LEVEL_REQ_V01; + req_desc.ei_array = tmd_set_mitigation_level_req_msg_v01_ei; + + resp_desc.max_msg_len = + TMD_SET_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN; + resp_desc.msg_id = QMI_TMD_SET_MITIGATION_LEVEL_RESP_V01; + resp_desc.ei_array = tmd_set_mitigation_level_resp_msg_v01_ei; + + mutex_lock(&tmd->mutex); + ret = qmi_send_req_wait(tmd->handle, + &req_desc, &req, sizeof(req), + &resp_desc, &tmd_resp, sizeof(tmd_resp), + QMI_TMD_RESP_TOUT_MSEC); + if (ret < 0) { + pr_err("qmi set state:%d failed for %s ret:%d\n", + state, qmi_cdev->cdev_name, ret); + goto qmi_send_exit; + } + + if (tmd_resp.resp.result != QMI_RESULT_SUCCESS_V01) { + ret = tmd_resp.resp.result; + pr_err("qmi set state:%d NOT success for %s ret:%d\n", + state, qmi_cdev->cdev_name, ret); + goto qmi_send_exit; + } + pr_debug("Requested qmi state:%d for %s\n", state, qmi_cdev->cdev_name); + +qmi_send_exit: + mutex_unlock(&tmd->mutex); + return ret; +} + +static int qmi_set_cur_or_min_state(struct qmi_cooling_device *qmi_cdev, + unsigned long state) +{ + int ret = 0; + struct qmi_tmd_instance *tmd = qmi_cdev->tmd; + + if (!tmd) + return -EINVAL; + + if (qmi_cdev->mtgn_state == state) + return ret; + + /* save it and return if server exit */ + if (!qmi_cdev->connection_active) { + qmi_cdev->mtgn_state = state; + pr_debug("Pending request:%ld for %s\n", state, + qmi_cdev->cdev_name); + return ret; + } + + /* It is best effort to save state even if QMI fail */ + ret = qmi_tmd_send_state_request(qmi_cdev, (uint8_t)state); + + qmi_cdev->mtgn_state = state; + + return ret; +} + +static int qmi_set_cur_state(struct thermal_cooling_device *cdev, + unsigned long state) +{ + struct qmi_cooling_device *qmi_cdev = cdev->devdata; + + if (!qmi_cdev) + return -EINVAL; + + if (qmi_cdev->type == QMI_CDEV_MIN_LIMIT_TYPE) + return 0; + + if (state > qmi_cdev->max_level) + state = qmi_cdev->max_level; + + return qmi_set_cur_or_min_state(qmi_cdev, state); +} + +static int qmi_set_min_state(struct thermal_cooling_device *cdev, + unsigned long state) +{ + struct qmi_cooling_device *qmi_cdev = cdev->devdata; + + if (!qmi_cdev) + return -EINVAL; + + if (qmi_cdev->type == QMI_CDEV_MAX_LIMIT_TYPE) + return 0; + + if (state > qmi_cdev->max_level) + state = qmi_cdev->max_level; + + /* Convert state into QMI client expects for min state */ + state = qmi_cdev->max_level - state; + + return qmi_set_cur_or_min_state(qmi_cdev, state); +} + +static int qmi_get_min_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + struct qmi_cooling_device *qmi_cdev = cdev->devdata; + + if (!qmi_cdev) + return -EINVAL; + + if (qmi_cdev->type == QMI_CDEV_MAX_LIMIT_TYPE) { + *state = 0; + return 0; + } + *state = qmi_cdev->max_level - qmi_cdev->mtgn_state; + + return 0; +} + +static struct thermal_cooling_device_ops qmi_device_ops = { + .get_max_state = qmi_get_max_state, + .get_cur_state = qmi_get_cur_state, + .set_cur_state = qmi_set_cur_state, + .set_min_state = qmi_set_min_state, + .get_min_state = qmi_get_min_state, +}; + +static int qmi_register_cooling_device(struct qmi_cooling_device *qmi_cdev) +{ + qmi_cdev->cdev = thermal_of_cooling_device_register( + qmi_cdev->np, + qmi_cdev->cdev_name, + qmi_cdev, + &qmi_device_ops); + if (IS_ERR(qmi_cdev->cdev)) { + pr_err("Cooling register failed for %s, ret:%ld\n", + qmi_cdev->cdev_name, PTR_ERR(qmi_cdev->cdev)); + return PTR_ERR(qmi_cdev->cdev); + } + pr_debug("Cooling register success for %s\n", qmi_cdev->cdev_name); + + return 0; +} + +static int verify_devices_and_register(struct qmi_tmd_instance *tmd) +{ + struct tmd_get_mitigation_device_list_req_msg_v01 req; + struct tmd_get_mitigation_device_list_resp_msg_v01 *tmd_resp; + struct msg_desc req_desc, resp_desc; + int ret = 0, i; + + memset(&req, 0, sizeof(req)); + /* size of tmd_resp is very high, use heap memory rather than stack */ + tmd_resp = kzalloc(sizeof(*tmd_resp), GFP_KERNEL); + if (!tmd_resp) + return -ENOMEM; + + req_desc.max_msg_len = + TMD_GET_MITIGATION_DEVICE_LIST_REQ_MSG_V01_MAX_MSG_LEN; + req_desc.msg_id = QMI_TMD_GET_MITIGATION_DEVICE_LIST_REQ_V01; + req_desc.ei_array = tmd_get_mitigation_device_list_req_msg_v01_ei; + + resp_desc.max_msg_len = + TMD_GET_MITIGATION_DEVICE_LIST_RESP_MSG_V01_MAX_MSG_LEN; + resp_desc.msg_id = QMI_TMD_GET_MITIGATION_DEVICE_LIST_RESP_V01; + resp_desc.ei_array = tmd_get_mitigation_device_list_resp_msg_v01_ei; + + mutex_lock(&tmd->mutex); + ret = qmi_send_req_wait(tmd->handle, + &req_desc, &req, sizeof(req), + &resp_desc, tmd_resp, sizeof(*tmd_resp), + 0); + if (ret < 0) { + pr_err("qmi get device list failed for inst_id:0x%x ret:%d\n", + tmd->inst_id, ret); + goto reg_exit; + } + + if (tmd_resp->resp.result != QMI_RESULT_SUCCESS_V01) { + ret = tmd_resp->resp.result; + pr_err("Get device list NOT success for inst_id:0x%x ret:%d\n", + tmd->inst_id, ret); + goto reg_exit; + } + mutex_unlock(&tmd->mutex); + + for (i = 0; i < tmd_resp->mitigation_device_list_len; i++) { + struct qmi_cooling_device *qmi_cdev = NULL; + + list_for_each_entry(qmi_cdev, &tmd->tmd_cdev_list, + qmi_node) { + struct tmd_mitigation_dev_list_type_v01 *device = + &tmd_resp->mitigation_device_list[i]; + + if ((strncasecmp(qmi_cdev->qmi_name, + device->mitigation_dev_id.mitigation_dev_id, + QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01))) + continue; + + qmi_cdev->connection_active = true; + qmi_cdev->max_level = device->max_mitigation_level; + /* + * It is better to set current state + * initially or during restart + */ + qmi_tmd_send_state_request(qmi_cdev, + qmi_cdev->mtgn_state); + if (!qmi_cdev->cdev) + ret = qmi_register_cooling_device(qmi_cdev); + break; + } + } + + kfree(tmd_resp); + return ret; + +reg_exit: + mutex_unlock(&tmd->mutex); + kfree(tmd_resp); + + return ret; +} + +static void qmi_tmd_rcv_msg(struct work_struct *work) +{ + int rc; + struct qmi_tmd_instance *tmd = container_of(work, + struct qmi_tmd_instance, + work_rcv_msg); + + do { + pr_debug("Notified about a Receive Event\n"); + } while ((rc = qmi_recv_msg(tmd->handle)) == 0); + + if (rc != -ENOMSG) + pr_err("Error receiving message for SVC:0x%x, ret:%d\n", + tmd->inst_id, rc); +} + +static void qmi_tmd_clnt_notify(struct qmi_handle *handle, + enum qmi_event_type event, void *priv_data) +{ + struct qmi_tmd_instance *tmd = + (struct qmi_tmd_instance *)priv_data; + + if (!tmd) { + pr_debug("tmd is NULL\n"); + return; + } + + switch (event) { + case QMI_RECV_MSG: + queue_work(qmi_tmd_wq, &tmd->work_rcv_msg); + break; + default: + break; + } +} + +static void qmi_tmd_svc_arrive(struct work_struct *work) +{ + int ret = 0; + struct qmi_tmd_instance *tmd = container_of(work, + struct qmi_tmd_instance, + work_svc_arrive); + + mutex_lock(&tmd->mutex); + tmd->handle = qmi_handle_create(qmi_tmd_clnt_notify, tmd); + if (!tmd->handle) { + pr_err("QMI TMD client handle alloc failed for 0x%x\n", + tmd->inst_id); + goto arrive_exit; + } + + ret = qmi_connect_to_service(tmd->handle, TMD_SERVICE_ID_V01, + TMD_SERVICE_VERS_V01, + tmd->inst_id); + if (ret < 0) { + pr_err("Could not connect handle to service for 0x%x, ret:%d\n", + tmd->inst_id, ret); + qmi_handle_destroy(tmd->handle); + tmd->handle = NULL; + goto arrive_exit; + } + mutex_unlock(&tmd->mutex); + + verify_devices_and_register(tmd); + + return; + +arrive_exit: + mutex_unlock(&tmd->mutex); +} + +static void qmi_tmd_svc_exit(struct work_struct *work) +{ + struct qmi_tmd_instance *tmd = container_of(work, + struct qmi_tmd_instance, + work_svc_exit); + struct qmi_cooling_device *qmi_cdev; + + mutex_lock(&tmd->mutex); + qmi_handle_destroy(tmd->handle); + tmd->handle = NULL; + + list_for_each_entry(qmi_cdev, &tmd->tmd_cdev_list, qmi_node) + qmi_cdev->connection_active = false; + + mutex_unlock(&tmd->mutex); +} + +static int qmi_tmd_svc_event_notify(struct notifier_block *this, + unsigned long event, + void *data) +{ + struct qmi_tmd_instance *tmd = container_of(this, + struct qmi_tmd_instance, + nb); + + if (!tmd) { + pr_debug("tmd is NULL\n"); + return -EINVAL; + } + + switch (event) { + case QMI_SERVER_ARRIVE: + schedule_work(&tmd->work_svc_arrive); + break; + case QMI_SERVER_EXIT: + schedule_work(&tmd->work_svc_exit); + break; + default: + break; + } + return 0; +} + +static void qmi_tmd_cleanup(void) +{ + int idx = 0; + struct qmi_tmd_instance *tmd = tmd_instances; + struct qmi_cooling_device *qmi_cdev, *c_next; + + for (; idx < tmd_inst_cnt; idx++) { + mutex_lock(&tmd[idx].mutex); + list_for_each_entry_safe(qmi_cdev, c_next, + &tmd[idx].tmd_cdev_list, qmi_node) { + if (qmi_cdev->cdev) + thermal_cooling_device_unregister( + qmi_cdev->cdev); + + list_del(&qmi_cdev->qmi_node); + } + if (tmd[idx].handle) + qmi_handle_destroy(tmd[idx].handle); + + if (tmd[idx].nb.notifier_call) + qmi_svc_event_notifier_unregister(TMD_SERVICE_ID_V01, + TMD_SERVICE_VERS_V01, + tmd[idx].inst_id, + &tmd[idx].nb); + mutex_unlock(&tmd[idx].mutex); + } + + if (qmi_tmd_wq) { + destroy_workqueue(qmi_tmd_wq); + qmi_tmd_wq = NULL; + } +} + +static int of_get_qmi_tmd_platform_data(struct device *dev) +{ + int ret = 0, idx = 0, i = 0, subsys_cnt = 0; + struct device_node *np = dev->of_node; + struct device_node *subsys_np, *cdev_np; + struct qmi_tmd_instance *tmd; + struct qmi_cooling_device *qmi_cdev; + + subsys_cnt = of_get_available_child_count(np); + if (!subsys_cnt) { + dev_err(dev, "No child node to process\n"); + return -EFAULT; + } + + tmd = devm_kcalloc(dev, subsys_cnt, sizeof(*tmd), GFP_KERNEL); + if (!tmd) + return -ENOMEM; + + for_each_available_child_of_node(np, subsys_np) { + if (idx >= subsys_cnt) + break; + + ret = of_property_read_u32(subsys_np, "qcom,instance-id", + &tmd[idx].inst_id); + if (ret) { + dev_err(dev, "error reading qcom,insance-id. ret:%d\n", + ret); + return ret; + } + + tmd[idx].dev = dev; + mutex_init(&tmd[idx].mutex); + INIT_LIST_HEAD(&tmd[idx].tmd_cdev_list); + + for_each_available_child_of_node(subsys_np, cdev_np) { + const char *qmi_name; + + qmi_cdev = devm_kzalloc(dev, sizeof(*qmi_cdev), + GFP_KERNEL); + if (!qmi_cdev) { + ret = -ENOMEM; + return ret; + } + + strlcpy(qmi_cdev->cdev_name, cdev_np->name, + THERMAL_NAME_LENGTH); + + if (!of_property_read_string(cdev_np, + "qcom,qmi-dev-name", + &qmi_name)) { + strlcpy(qmi_cdev->qmi_name, qmi_name, + QMI_CLIENT_NAME_LENGTH); + } else { + dev_err(dev, "Fail to parse dev name for %s\n", + cdev_np->name); + break; + } + /* Check for supported qmi dev*/ + for (i = 0; i < ARRAY_SIZE(device_clients); i++) { + if (strcmp(device_clients[i].dev_name, + qmi_cdev->qmi_name) == 0) + break; + } + + if (i >= ARRAY_SIZE(device_clients)) { + dev_err(dev, "Not supported dev name for %s\n", + cdev_np->name); + break; + } + qmi_cdev->type = device_clients[i].type; + qmi_cdev->tmd = &tmd[idx]; + qmi_cdev->np = cdev_np; + qmi_cdev->mtgn_state = 0; + list_add(&qmi_cdev->qmi_node, &tmd[idx].tmd_cdev_list); + } + idx++; + } + tmd_instances = tmd; + tmd_inst_cnt = subsys_cnt; + + return 0; +} + +static int qmi_device_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int ret = 0, idx = 0; + + ret = of_get_qmi_tmd_platform_data(dev); + if (ret) + goto probe_err; + + if (!tmd_instances || !tmd_inst_cnt) { + dev_err(dev, "Empty tmd instances\n"); + return -EINVAL; + } + + qmi_tmd_wq = create_singlethread_workqueue("qmi_tmd_wq"); + if (!qmi_tmd_wq) { + dev_err(dev, "Failed to create single thread workqueue\n"); + ret = -EFAULT; + goto probe_err; + } + + for (; idx < tmd_inst_cnt; idx++) { + struct qmi_tmd_instance *tmd = &tmd_instances[idx]; + + if (list_empty(&tmd->tmd_cdev_list)) + continue; + + tmd->nb.notifier_call = qmi_tmd_svc_event_notify; + INIT_WORK(&tmd->work_svc_arrive, qmi_tmd_svc_arrive); + INIT_WORK(&tmd->work_svc_exit, qmi_tmd_svc_exit); + INIT_WORK(&tmd->work_rcv_msg, qmi_tmd_rcv_msg); + + ret = qmi_svc_event_notifier_register(TMD_SERVICE_ID_V01, + TMD_SERVICE_VERS_V01, + tmd->inst_id, + &tmd->nb); + if (ret < 0) { + dev_err(dev, "QMI register failed for 0x%x, ret:%d\n", + tmd->inst_id, ret); + goto probe_err; + } + } + + return 0; + +probe_err: + qmi_tmd_cleanup(); + return ret; +} + +static int qmi_device_remove(struct platform_device *pdev) +{ + qmi_tmd_cleanup(); + + return 0; +} + +static const struct of_device_id qmi_device_match[] = { + {.compatible = "qcom,qmi_cooling_devices"}, + {} +}; + +static struct platform_driver qmi_device_driver = { + .probe = qmi_device_probe, + .remove = qmi_device_remove, + .driver = { + .name = "QMI_CDEV_DRIVER", + .owner = THIS_MODULE, + .of_match_table = qmi_device_match, + }, +}; + +static int __init qmi_device_init(void) +{ + return platform_driver_register(&qmi_device_driver); +} +module_init(qmi_device_init); + +static void __exit qmi_device_exit(void) +{ + platform_driver_unregister(&qmi_device_driver); +} +module_exit(qmi_device_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("QTI QMI cooling device driver"); diff --git a/drivers/thermal/qcom/thermal_mitigation_device_service_v01.c b/drivers/thermal/qcom/thermal_mitigation_device_service_v01.c new file mode 100644 index 000000000000..af020eb1eaff --- /dev/null +++ b/drivers/thermal/qcom/thermal_mitigation_device_service_v01.c @@ -0,0 +1,359 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include + +#include + +#include "thermal_mitigation_device_service_v01.h" + +static struct elem_info tmd_mitigation_dev_id_type_v01_ei[] = { + { + .data_type = QMI_STRING, + .elem_len = QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01 + 1, + .elem_size = sizeof(char), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof( + struct tmd_mitigation_dev_id_type_v01, + mitigation_dev_id), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info tmd_mitigation_dev_list_type_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof( + struct tmd_mitigation_dev_list_type_v01, + mitigation_dev_id), + .ei_array = tmd_mitigation_dev_id_type_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof( + struct tmd_mitigation_dev_list_type_v01, + max_mitigation_level), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info tmd_get_mitigation_device_list_req_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info tmd_get_mitigation_device_list_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct tmd_get_mitigation_device_list_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct tmd_get_mitigation_device_list_resp_msg_v01, + mitigation_device_list_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct tmd_get_mitigation_device_list_resp_msg_v01, + mitigation_device_list_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_TMD_MITIGATION_DEV_LIST_MAX_V01, + .elem_size = sizeof( + struct tmd_mitigation_dev_list_type_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct tmd_get_mitigation_device_list_resp_msg_v01, + mitigation_device_list), + .ei_array = tmd_mitigation_dev_list_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info tmd_set_mitigation_level_req_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct tmd_set_mitigation_level_req_msg_v01, + mitigation_dev_id), + .ei_array = tmd_mitigation_dev_id_type_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct tmd_set_mitigation_level_req_msg_v01, + mitigation_level), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info tmd_set_mitigation_level_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct tmd_set_mitigation_level_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info tmd_get_mitigation_level_req_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct tmd_get_mitigation_level_req_msg_v01, + mitigation_device), + .ei_array = tmd_mitigation_dev_id_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info tmd_get_mitigation_level_resp_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct tmd_get_mitigation_level_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct tmd_get_mitigation_level_resp_msg_v01, + current_mitigation_level_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x10, + .offset = offsetof( + struct tmd_get_mitigation_level_resp_msg_v01, + current_mitigation_level), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct tmd_get_mitigation_level_resp_msg_v01, + requested_mitigation_level_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x11, + .offset = offsetof( + struct tmd_get_mitigation_level_resp_msg_v01, + requested_mitigation_level), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info tmd_register_notification_mitigation_level_req_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct tmd_register_notification_mitigation_level_req_msg_v01, + mitigation_device), + .ei_array = tmd_mitigation_dev_id_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info tmd_register_notification_mitigation_level_resp_msg_v01_ei[] + = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct tmd_register_notification_mitigation_level_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info tmd_deregister_notification_mitigation_level_req_msg_v01_ei[] + = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct + tmd_deregister_notification_mitigation_level_req_msg_v01, + mitigation_device), + .ei_array = tmd_mitigation_dev_id_type_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info tmd_deregister_notification_mitigation_level_resp_msg_v01_ei[] + = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct qmi_response_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof(struct + tmd_deregister_notification_mitigation_level_resp_msg_v01, + resp), + .ei_array = get_qmi_response_type_v01_ei(), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info tmd_mitigation_level_report_ind_msg_v01_ei[] = { + { + .data_type = QMI_STRUCT, + .elem_len = 1, + .elem_size = sizeof(struct tmd_mitigation_dev_id_type_v01), + .is_array = NO_ARRAY, + .tlv_type = 0x01, + .offset = offsetof( + struct tmd_mitigation_level_report_ind_msg_v01, + mitigation_device), + .ei_array = tmd_mitigation_dev_id_type_v01_ei, + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(uint8_t), + .is_array = NO_ARRAY, + .tlv_type = 0x02, + .offset = offsetof( + struct tmd_mitigation_level_report_ind_msg_v01, + current_mitigation_level), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + diff --git a/drivers/thermal/qcom/thermal_mitigation_device_service_v01.h b/drivers/thermal/qcom/thermal_mitigation_device_service_v01.h new file mode 100644 index 000000000000..c2d120193348 --- /dev/null +++ b/drivers/thermal/qcom/thermal_mitigation_device_service_v01.h @@ -0,0 +1,128 @@ +/* Copyright (c) 2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef THERMAL_MITIGATION_DEVICE_SERVICE_V01_H +#define THERMAL_MITIGATION_DEVICE_SERVICE_V01_H + +#define TMD_SERVICE_ID_V01 0x18 +#define TMD_SERVICE_VERS_V01 0x01 + +#define QMI_TMD_GET_MITIGATION_DEVICE_LIST_RESP_V01 0x0020 +#define QMI_TMD_GET_MITIGATION_LEVEL_REQ_V01 0x0022 +#define QMI_TMD_GET_SUPPORTED_MSGS_REQ_V01 0x001E +#define QMI_TMD_SET_MITIGATION_LEVEL_REQ_V01 0x0021 +#define QMI_TMD_REGISTER_NOTIFICATION_MITIGATION_LEVEL_RESP_V01 0x0023 +#define QMI_TMD_GET_SUPPORTED_MSGS_RESP_V01 0x001E +#define QMI_TMD_SET_MITIGATION_LEVEL_RESP_V01 0x0021 +#define QMI_TMD_DEREGISTER_NOTIFICATION_MITIGATION_LEVEL_RESP_V01 0x0024 +#define QMI_TMD_MITIGATION_LEVEL_REPORT_IND_V01 0x0025 +#define QMI_TMD_GET_MITIGATION_LEVEL_RESP_V01 0x0022 +#define QMI_TMD_GET_SUPPORTED_FIELDS_REQ_V01 0x001F +#define QMI_TMD_GET_MITIGATION_DEVICE_LIST_REQ_V01 0x0020 +#define QMI_TMD_REGISTER_NOTIFICATION_MITIGATION_LEVEL_REQ_V01 0x0023 +#define QMI_TMD_DEREGISTER_NOTIFICATION_MITIGATION_LEVEL_REQ_V01 0x0024 +#define QMI_TMD_GET_SUPPORTED_FIELDS_RESP_V01 0x001F + +#define QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01 32 +#define QMI_TMD_MITIGATION_DEV_LIST_MAX_V01 32 + +struct tmd_mitigation_dev_id_type_v01 { + char mitigation_dev_id[QMI_TMD_MITIGATION_DEV_ID_LENGTH_MAX_V01 + 1]; +}; + +struct tmd_mitigation_dev_list_type_v01 { + struct tmd_mitigation_dev_id_type_v01 mitigation_dev_id; + uint8_t max_mitigation_level; +}; + +struct tmd_get_mitigation_device_list_req_msg_v01 { + char placeholder; +}; +#define TMD_GET_MITIGATION_DEVICE_LIST_REQ_MSG_V01_MAX_MSG_LEN 0 +extern struct elem_info tmd_get_mitigation_device_list_req_msg_v01_ei[]; + +struct tmd_get_mitigation_device_list_resp_msg_v01 { + struct qmi_response_type_v01 resp; + uint8_t mitigation_device_list_valid; + uint32_t mitigation_device_list_len; + struct tmd_mitigation_dev_list_type_v01 + mitigation_device_list[QMI_TMD_MITIGATION_DEV_LIST_MAX_V01]; +}; +#define TMD_GET_MITIGATION_DEVICE_LIST_RESP_MSG_V01_MAX_MSG_LEN 1099 +extern struct elem_info tmd_get_mitigation_device_list_resp_msg_v01_ei[]; + +struct tmd_set_mitigation_level_req_msg_v01 { + struct tmd_mitigation_dev_id_type_v01 mitigation_dev_id; + uint8_t mitigation_level; +}; +#define TMD_SET_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN 40 +extern struct elem_info tmd_set_mitigation_level_req_msg_v01_ei[]; + +struct tmd_set_mitigation_level_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; +#define TMD_SET_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info tmd_set_mitigation_level_resp_msg_v01_ei[]; + +struct tmd_get_mitigation_level_req_msg_v01 { + struct tmd_mitigation_dev_id_type_v01 mitigation_device; +}; +#define TMD_GET_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN 36 +extern struct elem_info tmd_get_mitigation_level_req_msg_v01_ei[]; + +struct tmd_get_mitigation_level_resp_msg_v01 { + struct qmi_response_type_v01 resp; + uint8_t current_mitigation_level_valid; + uint8_t current_mitigation_level; + uint8_t requested_mitigation_level_valid; + uint8_t requested_mitigation_level; +}; +#define TMD_GET_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 15 +extern struct elem_info tmd_get_mitigation_level_resp_msg_v01_ei[]; + +struct tmd_register_notification_mitigation_level_req_msg_v01 { + struct tmd_mitigation_dev_id_type_v01 mitigation_device; +}; +#define TMD_REGISTER_NOTIFICATION_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN 36 +extern struct elem_info + tmd_register_notification_mitigation_level_req_msg_v01_ei[]; + +struct tmd_register_notification_mitigation_level_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; +#define TMD_REGISTER_NOTIFICATION_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info + tmd_register_notification_mitigation_level_resp_msg_v01_ei[]; + +struct tmd_deregister_notification_mitigation_level_req_msg_v01 { + struct tmd_mitigation_dev_id_type_v01 mitigation_device; +}; +#define TMD_DEREGISTER_NOTIFICATION_MITIGATION_LEVEL_REQ_MSG_V01_MAX_MSG_LEN 36 +extern struct elem_info + tmd_deregister_notification_mitigation_level_req_msg_v01_ei[]; + +struct tmd_deregister_notification_mitigation_level_resp_msg_v01 { + struct qmi_response_type_v01 resp; +}; +#define TMD_DEREGISTER_NOTIFICATION_MITIGATION_LEVEL_RESP_MSG_V01_MAX_MSG_LEN 7 +extern struct elem_info + tmd_deregister_notification_mitigation_level_resp_msg_v01_ei[]; + +struct tmd_mitigation_level_report_ind_msg_v01 { + struct tmd_mitigation_dev_id_type_v01 mitigation_device; + uint8_t current_mitigation_level; +}; +#define TMD_MITIGATION_LEVEL_REPORT_IND_MSG_V01_MAX_MSG_LEN 40 +extern struct elem_info tmd_mitigation_level_report_ind_msg_v01_ei[]; + +#endif -- GitLab From 736c25cfb5ea5fb429d23f285f8de5ca79b14adb Mon Sep 17 00:00:00 2001 From: Mohammed Javid Date: Mon, 19 Jun 2017 13:23:18 +0530 Subject: [PATCH 749/786] ARM: dts: msm: Add IPA dtsi entries for sdm670 Add the sdm670 dtsi entries for ipa_hw, ipa_fws and rmnet-ipa. Change-Id: If7daa2537b27004ea24fc65ff2d18a681f0babe7 Acked-by: Ashok Vuyyuru Signed-off-by: Mohammed Javid --- arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi | 23 +++ arch/arm64/boot/dts/qcom/sdm670.dtsi | 164 +++++++++++++++++++++ 2 files changed, 187 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi b/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi index 022f705533f1..b790c048cee3 100644 --- a/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670-smp2p.dtsi @@ -199,4 +199,27 @@ interrupt-controller; #interrupt-cells = <2>; }; + + /* ipa - outbound entry to mss */ + smp2pgpio_ipa_1_out: qcom,smp2pgpio-ipa-1-out { + compatible = "qcom,smp2pgpio"; + qcom,entry-name = "ipa"; + qcom,remote-pid = <1>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; + + /* ipa - inbound entry from mss */ + smp2pgpio_ipa_1_in: qcom,smp2pgpio-ipa-1-in { + compatible = "qcom,smp2pgpio"; + qcom,entry-name = "ipa"; + qcom,remote-pid = <1>; + qcom,is-inbound; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + }; }; diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi index b7df3201c614..b46be65e110f 100644 --- a/arch/arm64/boot/dts/qcom/sdm670.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi @@ -1108,6 +1108,170 @@ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>; status = "ok"; }; + + qcom,rmnet-ipa { + compatible = "qcom,rmnet-ipa3"; + qcom,rmnet-ipa-ssr; + qcom,ipa-loaduC; + qcom,ipa-advertise-sg-support; + qcom,ipa-napi-enable; + }; + + ipa_hw: qcom,ipa@01e00000 { + compatible = "qcom,ipa"; + reg = <0x1e00000 0x34000>, + <0x1e04000 0x2c000>; + reg-names = "ipa-base", "gsi-base"; + interrupts = + <0 311 0>, + <0 432 0>; + interrupt-names = "ipa-irq", "gsi-irq"; + qcom,ipa-hw-ver = <13>; /* IPA core version = IPAv3.5.1 */ + qcom,ipa-hw-mode = <1>; + qcom,ee = <0>; + qcom,use-ipa-tethering-bridge; + qcom,modem-cfg-emb-pipe-flt; + qcom,ipa-wdi2; + qcom,use-64-bit-dma-mask; + qcom,arm-smmu; + qcom,smmu-s1-bypass; + qcom,bandwidth-vote-for-ipa; + qcom,msm-bus,name = "ipa"; + qcom,msm-bus,num-cases = <4>; + qcom,msm-bus,num-paths = <4>; + qcom,msm-bus,vectors-KBps = + /* No vote */ + <90 512 0 0>, + <90 585 0 0>, + <1 676 0 0>, + <143 777 0 0>, + /* SVS */ + <90 512 80000 640000>, + <90 585 80000 640000>, + <1 676 80000 80000>, + <143 777 0 150000>, + /* NOMINAL */ + <90 512 206000 960000>, + <90 585 206000 960000>, + <1 676 206000 160000>, + <143 777 0 300000>, + /* TURBO */ + <90 512 206000 3600000>, + <90 585 206000 3600000>, + <1 676 206000 300000>, + <143 777 0 355333>; + qcom,bus-vector-names = "MIN", "SVS", "NOMINAL", "TURBO"; + + /* IPA RAM mmap */ + qcom,ipa-ram-mmap = < + 0x280 /* ofst_start; */ + 0x0 /* nat_ofst; */ + 0x0 /* nat_size; */ + 0x288 /* v4_flt_hash_ofst; */ + 0x78 /* v4_flt_hash_size; */ + 0x4000 /* v4_flt_hash_size_ddr; */ + 0x308 /* v4_flt_nhash_ofst; */ + 0x78 /* v4_flt_nhash_size; */ + 0x4000 /* v4_flt_nhash_size_ddr; */ + 0x388 /* v6_flt_hash_ofst; */ + 0x78 /* v6_flt_hash_size; */ + 0x4000 /* v6_flt_hash_size_ddr; */ + 0x408 /* v6_flt_nhash_ofst; */ + 0x78 /* v6_flt_nhash_size; */ + 0x4000 /* v6_flt_nhash_size_ddr; */ + 0xf /* v4_rt_num_index; */ + 0x0 /* v4_modem_rt_index_lo; */ + 0x7 /* v4_modem_rt_index_hi; */ + 0x8 /* v4_apps_rt_index_lo; */ + 0xe /* v4_apps_rt_index_hi; */ + 0x488 /* v4_rt_hash_ofst; */ + 0x78 /* v4_rt_hash_size; */ + 0x4000 /* v4_rt_hash_size_ddr; */ + 0x508 /* v4_rt_nhash_ofst; */ + 0x78 /* v4_rt_nhash_size; */ + 0x4000 /* v4_rt_nhash_size_ddr; */ + 0xf /* v6_rt_num_index; */ + 0x0 /* v6_modem_rt_index_lo; */ + 0x7 /* v6_modem_rt_index_hi; */ + 0x8 /* v6_apps_rt_index_lo; */ + 0xe /* v6_apps_rt_index_hi; */ + 0x588 /* v6_rt_hash_ofst; */ + 0x78 /* v6_rt_hash_size; */ + 0x4000 /* v6_rt_hash_size_ddr; */ + 0x608 /* v6_rt_nhash_ofst; */ + 0x78 /* v6_rt_nhash_size; */ + 0x4000 /* v6_rt_nhash_size_ddr; */ + 0x688 /* modem_hdr_ofst; */ + 0x140 /* modem_hdr_size; */ + 0x7c8 /* apps_hdr_ofst; */ + 0x0 /* apps_hdr_size; */ + 0x800 /* apps_hdr_size_ddr; */ + 0x7d0 /* modem_hdr_proc_ctx_ofst; */ + 0x200 /* modem_hdr_proc_ctx_size; */ + 0x9d0 /* apps_hdr_proc_ctx_ofst; */ + 0x200 /* apps_hdr_proc_ctx_size; */ + 0x0 /* apps_hdr_proc_ctx_size_ddr; */ + 0x0 /* modem_comp_decomp_ofst; diff */ + 0x0 /* modem_comp_decomp_size; diff */ + 0xbd8 /* modem_ofst; */ + 0x1024 /* modem_size; */ + 0x2000 /* apps_v4_flt_hash_ofst; */ + 0x0 /* apps_v4_flt_hash_size; */ + 0x2000 /* apps_v4_flt_nhash_ofst; */ + 0x0 /* apps_v4_flt_nhash_size; */ + 0x2000 /* apps_v6_flt_hash_ofst; */ + 0x0 /* apps_v6_flt_hash_size; */ + 0x2000 /* apps_v6_flt_nhash_ofst; */ + 0x0 /* apps_v6_flt_nhash_size; */ + 0x80 /* uc_info_ofst; */ + 0x200 /* uc_info_size; */ + 0x2000 /* end_ofst; */ + 0x2000 /* apps_v4_rt_hash_ofst; */ + 0x0 /* apps_v4_rt_hash_size; */ + 0x2000 /* apps_v4_rt_nhash_ofst; */ + 0x0 /* apps_v4_rt_nhash_size; */ + 0x2000 /* apps_v6_rt_hash_ofst; */ + 0x0 /* apps_v6_rt_hash_size; */ + 0x2000 /* apps_v6_rt_nhash_ofst; */ + 0x0 /* apps_v6_rt_nhash_size; */ + 0x1c00 /* uc_event_ring_ofst; */ + 0x400 /* uc_event_ring_size; */ + >; + + /* smp2p gpio information */ + qcom,smp2pgpio_map_ipa_1_out { + compatible = "qcom,smp2pgpio-map-ipa-1-out"; + gpios = <&smp2pgpio_ipa_1_out 0 0>; + }; + + qcom,smp2pgpio_map_ipa_1_in { + compatible = "qcom,smp2pgpio-map-ipa-1-in"; + gpios = <&smp2pgpio_ipa_1_in 0 0>; + }; + + ipa_smmu_ap: ipa_smmu_ap { + compatible = "qcom,ipa-smmu-ap-cb"; + iommus = <&apps_smmu 0x720 0x0>; + qcom,iova-mapping = <0x20000000 0x40000000>; + }; + + ipa_smmu_wlan: ipa_smmu_wlan { + compatible = "qcom,ipa-smmu-wlan-cb"; + iommus = <&apps_smmu 0x721 0x0>; + }; + + ipa_smmu_uc: ipa_smmu_uc { + compatible = "qcom,ipa-smmu-uc-cb"; + iommus = <&apps_smmu 0x722 0x0>; + qcom,iova-mapping = <0x40000000 0x20000000>; + }; + }; + + qcom,ipa_fws { + compatible = "qcom,pil-tz-generic"; + qcom,pas-id = <0xf>; + qcom,firmware-name = "ipa_fws"; + }; }; #include "sdm670-pinctrl.dtsi" -- GitLab From 56be4b83b9e35ffd0277b168af6791bbc6b1d36f Mon Sep 17 00:00:00 2001 From: Manaf Meethalavalappu Pallikunhi Date: Thu, 6 Jul 2017 19:50:11 +0530 Subject: [PATCH 750/786] defconfig: Enable remote cooling devices for sdm845 Enable remote QMI cooling devices for sdm845 to handle remote subsystem mitigations. Change-Id: Ia09f030027df603356bcd5c906467be87f2f3d52 Signed-off-by: Manaf Meethalavalappu Pallikunhi --- arch/arm64/configs/sdm845-perf_defconfig | 1 + arch/arm64/configs/sdm845_defconfig | 1 + 2 files changed, 2 insertions(+) diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig index 9ec1bebfc17d..9f9884119293 100644 --- a/arch/arm64/configs/sdm845-perf_defconfig +++ b/arch/arm64/configs/sdm845-perf_defconfig @@ -330,6 +330,7 @@ CONFIG_MSM_BCL_PERIPHERAL_CTL=y CONFIG_QTI_THERMAL_LIMITS_DCVS=y CONFIG_QTI_VIRTUAL_SENSOR=y CONFIG_QTI_REG_COOLING_DEVICE=y +CONFIG_QTI_QMI_COOLING_DEVICE=y CONFIG_MFD_I2C_PMIC=y CONFIG_MFD_SPMI_PMIC=y CONFIG_WCD9XXX_CODEC_CORE=y diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig index b8e54a907967..d9c73281fc5b 100644 --- a/arch/arm64/configs/sdm845_defconfig +++ b/arch/arm64/configs/sdm845_defconfig @@ -337,6 +337,7 @@ CONFIG_MSM_BCL_PERIPHERAL_CTL=y CONFIG_QTI_THERMAL_LIMITS_DCVS=y CONFIG_QTI_VIRTUAL_SENSOR=y CONFIG_QTI_REG_COOLING_DEVICE=y +CONFIG_QTI_QMI_COOLING_DEVICE=y CONFIG_MFD_I2C_PMIC=y CONFIG_MFD_SPMI_PMIC=y CONFIG_WCD9XXX_CODEC_CORE=y -- GitLab From 5849baefa2924590fc129df667a37142f05d6d18 Mon Sep 17 00:00:00 2001 From: Manaf Meethalavalappu Pallikunhi Date: Thu, 29 Jun 2017 15:47:17 +0530 Subject: [PATCH 751/786] ARM: dts: msm: Add QMI cooling devices for sdm845 Add remote QMI cooling devices like modem_pa, modem_proc, modem_current and vdd restriction for each remote subsystem including modem, adsp, cdsp and slpi for sdm845. Change-Id: Ibf93fa52baa0605edebb41ae5d4ce53d0f65e6a0 Signed-off-by: Manaf Meethalavalappu Pallikunhi --- arch/arm64/boot/dts/qcom/sdm845.dtsi | 55 ++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index 5208152d3ca8..5a6ad88d41f3 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -2964,6 +2964,61 @@ qcom,vdd-3.3-ch0-config = <3104000 3312000>; }; + qmi-tmd-devices { + compatible = "qcom,qmi_cooling_devices"; + + modem { + qcom,instance-id = <0x0>; + + modem_pa: modem_pa { + qcom,qmi-dev-name = "pa"; + #cooling-cells = <2>; + }; + + modem_proc: modem_proc { + qcom,qmi-dev-name = "modem"; + #cooling-cells = <2>; + }; + + modem_current: modem_current { + qcom,qmi-dev-name = "modem_current"; + #cooling-cells = <2>; + }; + + modem_vdd: modem_vdd { + qcom,qmi-dev-name = "cpuv_restriction_cold"; + #cooling-cells = <2>; + }; + }; + + adsp { + qcom,instance-id = <0x1>; + + adsp_vdd: adsp_vdd { + qcom,qmi-dev-name = "cpuv_restriction_cold"; + #cooling-cells = <2>; + }; + }; + + cdsp { + qcom,instance-id = <0x43>; + + cdsp_vdd: cdsp_vdd { + qcom,qmi-dev-name = "cpuv_restriction_cold"; + #cooling-cells = <2>; + }; + }; + + slpi { + qcom,instance-id = <0x53>; + + slpi_vdd: slpi_vdd { + qcom,qmi-dev-name = "cpuv_restriction_cold"; + #cooling-cells = <2>; + }; + }; + }; + thermal_zones: thermal-zones { aoss0-usr { polling-delay-passive = <0>; -- GitLab From 56f60de919c6824842443d13afd2500632258349 Mon Sep 17 00:00:00 2001 From: Ram Chandrasekar Date: Mon, 3 Jul 2017 16:26:18 -0600 Subject: [PATCH 752/786] ARM: dts: msm: Add remote subsystem mitigation for sdm845 Add modem, ADSP, CDSP and SLPI voltage restriction cooling devices to the existing vdd restriction thermal zone configuration for sdm845. Change-Id: I7f42b6c753ff42c7f81ed533b40b06e393cd7c07 Signed-off-by: Ram Chandrasekar --- arch/arm64/boot/dts/qcom/sdm845.dtsi | 336 +++++++++++++++++++++++++++ 1 file changed, 336 insertions(+) diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index 5a6ad88d41f3..8bf42b11bcac 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -3878,6 +3878,22 @@ trip = <&aoss0_trip>; cooling-device = <&ebi_cdev 0 0>; }; + modem_vdd_cdev { + trip = <&aoss0_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&aoss0_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&aoss0_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + slpi_vdd_cdev { + trip = <&aoss0_trip>; + cooling-device = <&slpi_vdd 0 0>; + }; }; }; @@ -3919,6 +3935,22 @@ trip = <&cpu0_trip>; cooling-device = <&ebi_cdev 0 0>; }; + modem_vdd_cdev { + trip = <&cpu0_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&cpu0_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&cpu0_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + slpi_vdd_cdev { + trip = <&cpu0_trip>; + cooling-device = <&slpi_vdd 0 0>; + }; }; }; @@ -3960,6 +3992,22 @@ trip = <&cpu1_trip>; cooling-device = <&ebi_cdev 0 0>; }; + modem_vdd_cdev { + trip = <&cpu1_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&cpu1_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&cpu1_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + slpi_vdd_cdev { + trip = <&cpu1_trip>; + cooling-device = <&slpi_vdd 0 0>; + }; }; }; @@ -4001,6 +4049,22 @@ trip = <&cpu2_trip>; cooling-device = <&ebi_cdev 0 0>; }; + modem_vdd_cdev { + trip = <&cpu2_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&cpu2_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&cpu2_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + slpi_vdd_cdev { + trip = <&cpu2_trip>; + cooling-device = <&slpi_vdd 0 0>; + }; }; }; @@ -4042,6 +4106,22 @@ trip = <&cpu3_trip>; cooling-device = <&ebi_cdev 0 0>; }; + modem_vdd_cdev { + trip = <&cpu3_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&cpu3_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&cpu3_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + slpi_vdd_cdev { + trip = <&cpu3_trip>; + cooling-device = <&slpi_vdd 0 0>; + }; }; }; @@ -4083,6 +4163,22 @@ trip = <&l3_0_trip>; cooling-device = <&ebi_cdev 0 0>; }; + modem_vdd_cdev { + trip = <&l3_0_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&l3_0_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&l3_0_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + slpi_vdd_cdev { + trip = <&l3_0_trip>; + cooling-device = <&slpi_vdd 0 0>; + }; }; }; @@ -4124,6 +4220,22 @@ trip = <&l3_1_trip>; cooling-device = <&ebi_cdev 0 0>; }; + modem_vdd_cdev { + trip = <&l3_1_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&l3_1_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&l3_1_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + slpi_vdd_cdev { + trip = <&l3_1_trip>; + cooling-device = <&slpi_vdd 0 0>; + }; }; }; @@ -4165,6 +4277,22 @@ trip = <&cpug0_trip>; cooling-device = <&ebi_cdev 0 0>; }; + modem_vdd_cdev { + trip = <&cpug0_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&cpug0_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&cpug0_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + slpi_vdd_cdev { + trip = <&cpug0_trip>; + cooling-device = <&slpi_vdd 0 0>; + }; }; }; @@ -4206,6 +4334,22 @@ trip = <&cpug1_trip>; cooling-device = <&ebi_cdev 0 0>; }; + modem_vdd_cdev { + trip = <&cpug1_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&cpug1_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&cpug1_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + slpi_vdd_cdev { + trip = <&cpug1_trip>; + cooling-device = <&slpi_vdd 0 0>; + }; }; }; @@ -4247,6 +4391,22 @@ trip = <&cpug2_trip>; cooling-device = <&ebi_cdev 0 0>; }; + modem_vdd_cdev { + trip = <&cpug2_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&cpug2_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&cpug2_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + slpi_vdd_cdev { + trip = <&cpug2_trip>; + cooling-device = <&slpi_vdd 0 0>; + }; }; }; @@ -4288,6 +4448,22 @@ trip = <&cpug3_trip>; cooling-device = <&ebi_cdev 0 0>; }; + modem_vdd_cdev { + trip = <&cpug3_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&cpug3_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&cpug3_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + slpi_vdd_cdev { + trip = <&cpug3_trip>; + cooling-device = <&slpi_vdd 0 0>; + }; }; }; @@ -4329,6 +4505,22 @@ trip = <&gpu0_trip_l>; cooling-device = <&ebi_cdev 0 0>; }; + modem_vdd_cdev { + trip = <&gpu0_trip_l>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&gpu0_trip_l>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&gpu0_trip_l>; + cooling-device = <&cdsp_vdd 0 0>; + }; + slpi_vdd_cdev { + trip = <&gpu0_trip_l>; + cooling-device = <&slpi_vdd 0 0>; + }; }; }; @@ -4370,6 +4562,22 @@ trip = <&gpu1_trip_l>; cooling-device = <&ebi_cdev 0 0>; }; + modem_vdd_cdev { + trip = <&gpu1_trip_l>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&gpu1_trip_l>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&gpu1_trip_l>; + cooling-device = <&cdsp_vdd 0 0>; + }; + slpi_vdd_cdev { + trip = <&gpu1_trip_l>; + cooling-device = <&slpi_vdd 0 0>; + }; }; }; @@ -4411,6 +4619,22 @@ trip = <&aoss1_trip>; cooling-device = <&ebi_cdev 0 0>; }; + modem_vdd_cdev { + trip = <&aoss1_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&aoss1_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&aoss1_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + slpi_vdd_cdev { + trip = <&aoss1_trip>; + cooling-device = <&slpi_vdd 0 0>; + }; }; }; @@ -4452,6 +4676,22 @@ trip = <&dsp_trip>; cooling-device = <&ebi_cdev 0 0>; }; + modem_vdd_cdev { + trip = <&dsp_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&dsp_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&dsp_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + slpi_vdd_cdev { + trip = <&dsp_trip>; + cooling-device = <&slpi_vdd 0 0>; + }; }; }; @@ -4493,6 +4733,22 @@ trip = <&ddr_trip>; cooling-device = <&ebi_cdev 0 0>; }; + modem_vdd_cdev { + trip = <&ddr_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&ddr_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&ddr_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + slpi_vdd_cdev { + trip = <&ddr_trip>; + cooling-device = <&slpi_vdd 0 0>; + }; }; }; @@ -4534,6 +4790,22 @@ trip = <&wlan_trip>; cooling-device = <&ebi_cdev 0 0>; }; + modem_vdd_cdev { + trip = <&wlan_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&wlan_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&wlan_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + slpi_vdd_cdev { + trip = <&wlan_trip>; + cooling-device = <&slpi_vdd 0 0>; + }; }; }; @@ -4575,6 +4847,22 @@ trip = <&hvx_trip>; cooling-device = <&ebi_cdev 0 0>; }; + modem_vdd_cdev { + trip = <&hvx_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&hvx_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&hvx_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + slpi_vdd_cdev { + trip = <&hvx_trip>; + cooling-device = <&slpi_vdd 0 0>; + }; }; }; @@ -4616,6 +4904,22 @@ trip = <&camera_trip>; cooling-device = <&ebi_cdev 0 0>; }; + modem_vdd_cdev { + trip = <&camera_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&camera_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&camera_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + slpi_vdd_cdev { + trip = <&camera_trip>; + cooling-device = <&slpi_vdd 0 0>; + }; }; }; @@ -4657,6 +4961,22 @@ trip = <&mmss_trip>; cooling-device = <&ebi_cdev 0 0>; }; + modem_vdd_cdev { + trip = <&mmss_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&mmss_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&mmss_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + slpi_vdd_cdev { + trip = <&mmss_trip>; + cooling-device = <&slpi_vdd 0 0>; + }; }; }; @@ -4698,6 +5018,22 @@ trip = <&mdm_trip>; cooling-device = <&ebi_cdev 0 0>; }; + modem_vdd_cdev { + trip = <&mdm_trip>; + cooling-device = <&modem_vdd 0 0>; + }; + adsp_vdd_cdev { + trip = <&mdm_trip>; + cooling-device = <&adsp_vdd 0 0>; + }; + cdsp_vdd_cdev { + trip = <&mdm_trip>; + cooling-device = <&cdsp_vdd 0 0>; + }; + slpi_vdd_cdev { + trip = <&mdm_trip>; + cooling-device = <&slpi_vdd 0 0>; + }; }; }; }; -- GitLab From 850be3ca1211f73364fe2b8800832f1241393689 Mon Sep 17 00:00:00 2001 From: Maheshwar Ajja Date: Thu, 6 Jul 2017 17:47:40 -0700 Subject: [PATCH 753/786] msm: vidc: Update controls and output buffers list check Update V4l2 controls with appropriate default values and add output buffers list check to avoid unnecessary logging. CRs-Fixed: 2004899 Change-Id: I5cdc18a8b3b806091dd498e2756ac87bf10ae444 Signed-off-by: Maheshwar Ajja --- drivers/media/platform/msm/vidc/msm_venc.c | 17 +++++++++++++++++ .../media/platform/msm/vidc/msm_vidc_common.c | 8 +++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c index e2ea2bc124a5..90a38bb3d5a3 100644 --- a/drivers/media/platform/msm/vidc/msm_venc.c +++ b/drivers/media/platform/msm/vidc/msm_venc.c @@ -1897,6 +1897,23 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl) vui_timing_info.time_scale = NSEC_PER_SEC; break; } + case V4L2_CID_MPEG_VIDC_VIDEO_LTRMODE: + case V4L2_CID_MPEG_VIDC_VIDEO_LTRCOUNT: + case V4L2_CID_MPEG_VIDC_VENC_PARAM_SAR_WIDTH: + case V4L2_CID_MPEG_VIDC_VENC_PARAM_SAR_HEIGHT: + case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_WIDTH: + case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_HEIGHT: + case V4L2_CID_MPEG_VIDC_VIDEO_LAYER_ID: + case V4L2_CID_MPEG_VIDC_VENC_PARAM_LAYER_BITRATE: + case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MIN: + case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MIN: + case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MIN: + case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MAX: + case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MAX: + case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MAX: + dprintk(VIDC_DBG, "Set the control : %#x using ext ctrl\n", + ctrl->id); + break; default: dprintk(VIDC_ERR, "Unsupported index: %x\n", ctrl->id); rc = -ENOTSUPP; diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c index b103d735185b..902448ca9992 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c @@ -1246,7 +1246,7 @@ static int msm_vidc_comm_update_ctrl(struct msm_vidc_inst *inst, if (ctrl) { v4l2_ctrl_modify_range(ctrl, capability->min, capability->max, ctrl->step, - capability->min); + ctrl->default_value); dprintk(VIDC_DBG, "%s: Updated Range = %lld --> %lld Def value = %lld\n", ctrl->name, ctrl->minimum, ctrl->maximum, @@ -1783,6 +1783,12 @@ void msm_comm_validate_output_buffers(struct msm_vidc_inst *inst) return; } mutex_lock(&inst->outputbufs.lock); + if (list_empty(&inst->outputbufs.list)) { + dprintk(VIDC_DBG, "%s: no OUTPUT buffers allocated\n", + __func__); + mutex_unlock(&inst->outputbufs.lock); + return; + } list_for_each_entry(binfo, &inst->outputbufs.list, list) { if (binfo->buffer_ownership != DRIVER) { dprintk(VIDC_DBG, -- GitLab From 89029271fb86e50e008bf5abba92e54a029c7988 Mon Sep 17 00:00:00 2001 From: Maheshwar Ajja Date: Fri, 23 Jun 2017 17:16:44 -0700 Subject: [PATCH 754/786] msm: vidc: Don't kill the session in response handler thread Kill session will call abort to video hardware and waits for abort response. So calling kill session from within response handler thread would always results in wait timeout which in turn causes session error manipulated to sys error. So don't kill the session but send error client and client will close the session. CRs-Fixed: 2071607 Change-Id: Ic7b57724d57eaf84e90512a732fd9cfb05281b1b Signed-off-by: Maheshwar Ajja --- drivers/media/platform/msm/vidc/msm_vidc_common.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c index b103d735185b..ec9777b479ce 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c @@ -5076,7 +5076,6 @@ int msm_vidc_check_session_supported(struct msm_vidc_inst *inst) rc = msm_vidc_load_supported(inst); if (rc) { change_inst_state(inst, MSM_VIDC_CORE_INVALID); - msm_comm_kill_session(inst); dprintk(VIDC_WARN, "%s: Hardware is overloaded\n", __func__); return rc; @@ -5126,7 +5125,6 @@ int msm_vidc_check_session_supported(struct msm_vidc_inst *inst) } if (rc) { change_inst_state(inst, MSM_VIDC_CORE_INVALID); - msm_comm_kill_session(inst); dprintk(VIDC_ERR, "%s: Resolution unsupported\n", __func__); } -- GitLab From 69639bd499d98ee0b77bfff8cb6e873507f22d69 Mon Sep 17 00:00:00 2001 From: Maheshwar Ajja Date: Wed, 5 Jul 2017 17:58:15 -0700 Subject: [PATCH 755/786] msm: vidc: Fix memory leak and array abounds exceeding issues Fix memory leak in error case and check for array bounds exceeding issues to avoid video failures. CRs-Fixed: 2004899 Change-Id: I723dc07f6e58651a3f669095dc23984741b6c295 Signed-off-by: Maheshwar Ajja --- .../msm/vidc/governors/msm_vidc_dyn_gov.c | 5 +++++ .../media/platform/msm/vidc/msm_vidc_common.c | 18 ++++++++++-------- .../platform/msm/vidc/msm_vidc_res_parse.c | 16 ++++++++-------- 3 files changed, 23 insertions(+), 16 deletions(-) diff --git a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c index 9daf0535aaa8..9f8b341c60aa 100644 --- a/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c +++ b/drivers/media/platform/msm/vidc/governors/msm_vidc_dyn_gov.c @@ -827,6 +827,11 @@ static unsigned long __calculate(struct vidc_bus_vote_data *d, [HAL_VIDEO_DOMAIN_DECODER] = __calculate_decoder, }; + if (d->domain >= ARRAY_SIZE(calc)) { + dprintk(VIDC_ERR, "%s: invalid domain %d\n", + __func__, d->domain); + return 0; + } return calc[d->domain](d, gm); } diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c index b103d735185b..8c4b30b13390 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_common.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c @@ -131,8 +131,10 @@ static struct v4l2_ctrl **get_super_cluster(struct msm_vidc_inst *inst, struct v4l2_ctrl **cluster = kmalloc(sizeof(struct v4l2_ctrl *) * num_ctrls, GFP_KERNEL); - if (!cluster || !inst) + if (!cluster || !inst) { + kfree(cluster); return NULL; + } for (c = 0; c < num_ctrls; c++) cluster[c] = inst->ctrls[c]; @@ -985,16 +987,16 @@ static void handle_sys_init_done(enum hal_command_response cmd, void *data) complete(&(core->completions[index])); } -static void put_inst(struct msm_vidc_inst *inst) +static void put_inst_helper(struct kref *kref) { - void put_inst_helper(struct kref *kref) - { - struct msm_vidc_inst *inst = container_of(kref, - struct msm_vidc_inst, kref); + struct msm_vidc_inst *inst = container_of(kref, + struct msm_vidc_inst, kref); - msm_vidc_destroy(inst); - } + msm_vidc_destroy(inst); +} +static void put_inst(struct msm_vidc_inst *inst) +{ if (!inst) return; diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c index afb88936329b..845cb154e98e 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c @@ -371,20 +371,20 @@ static int msm_vidc_load_platform_version_table( return 0; } +/* A comparator to compare loads (needed later on) */ +static int cmp(const void *a, const void *b) +{ + /* want to sort in reverse so flip the comparison */ + return ((struct allowed_clock_rates_table *)b)->clock_rate - + ((struct allowed_clock_rates_table *)a)->clock_rate; +} + static int msm_vidc_load_allowed_clocks_table( struct msm_vidc_platform_resources *res) { int rc = 0; struct platform_device *pdev = res->pdev; - /* A comparator to compare loads (needed later on) */ - int cmp(const void *a, const void *b) - { - /* want to sort in reverse so flip the comparison */ - return ((struct allowed_clock_rates_table *)b)->clock_rate - - ((struct allowed_clock_rates_table *)a)->clock_rate; - } - if (!of_find_property(pdev->dev.of_node, "qcom,allowed-clock-rates", NULL)) { dprintk(VIDC_DBG, "qcom,allowed-clock-rates not found\n"); -- GitLab From 6d5bf543bf2bee1bc08827ef724e2840e0721ac8 Mon Sep 17 00:00:00 2001 From: Ping Li Date: Tue, 27 Jun 2017 11:40:28 -0700 Subject: [PATCH 756/786] drm: msm: Update and correct AD setting for suspend/resume Mark AD active property to dirty in suspend case so that those AD properties can be re-programed after device resumes. This change also corrects the code for AD interrupt and customer event handling. Change-Id: Iecf1a6ea8def5bd169e8ad040f8320176eed6904 Signed-off-by: Ping Li --- .../gpu/drm/msm/sde/sde_color_processing.c | 8 +++++-- drivers/gpu/drm/msm/sde/sde_crtc.c | 22 +++++++++---------- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c index 9c139917a450..e999a6ac3c19 100644 --- a/drivers/gpu/drm/msm/sde/sde_color_processing.c +++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c @@ -1033,6 +1033,12 @@ void sde_cp_crtc_suspend(struct drm_crtc *crtc) sde_cp_update_list(prop_node, sde_crtc, true); list_del_init(&prop_node->active_list); } + + list_for_each_entry_safe(prop_node, n, &sde_crtc->ad_active, + active_list) { + sde_cp_update_list(prop_node, sde_crtc, true); + list_del_init(&prop_node->active_list); + } } void sde_cp_crtc_resume(struct drm_crtc *crtc) @@ -1368,7 +1374,6 @@ int sde_cp_ad_interrupt(struct drm_crtc *crtc_drm, bool en, return -EINVAL; } - mutex_lock(&crtc->crtc_lock); kms = get_kms(crtc_drm); num_mixers = crtc->num_mixers; @@ -1422,6 +1427,5 @@ int sde_cp_ad_interrupt(struct drm_crtc *crtc_drm, bool en, sde_core_irq_unregister_callback(kms, irq_idx, ad_irq); } exit: - mutex_unlock(&crtc->crtc_lock); return ret; } diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index 923297f96082..3d28bd029949 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -2514,6 +2514,17 @@ static void sde_crtc_disable(struct drm_crtc *crtc) atomic_set(&sde_crtc->frame_pending, 0); } + spin_lock_irqsave(&sde_crtc->spin_lock, flags); + list_for_each_entry(node, &sde_crtc->user_event_list, list) { + ret = 0; + if (node->func) + ret = node->func(crtc, false, &node->irq); + if (ret) + SDE_ERROR("%s failed to disable event %x\n", + sde_crtc->name, node->event); + } + spin_unlock_irqrestore(&sde_crtc->spin_lock, flags); + sde_core_perf_crtc_update(crtc, 0, true); drm_for_each_encoder(encoder, crtc->dev) { @@ -2535,17 +2546,6 @@ static void sde_crtc_disable(struct drm_crtc *crtc) cstate->bw_control = false; cstate->bw_split_vote = false; - spin_lock_irqsave(&sde_crtc->spin_lock, flags); - list_for_each_entry(node, &sde_crtc->user_event_list, list) { - ret = 0; - if (node->func) - ret = node->func(crtc, false, &node->irq); - if (ret) - SDE_ERROR("%s failed to disable event %x\n", - sde_crtc->name, node->event); - } - spin_unlock_irqrestore(&sde_crtc->spin_lock, flags); - mutex_unlock(&sde_crtc->crtc_lock); } -- GitLab From 99131f0c035526c216d62e6dd96a2ee78c547eda Mon Sep 17 00:00:00 2001 From: Yuanyuan Liu Date: Thu, 6 Jul 2017 13:58:33 -0700 Subject: [PATCH 757/786] cnss_utils: Zero out cnss_utils priv structure during initalization Zero out cnss_utils priv structure during initalization to make sure it starts cleanly. CRs-Fixed: 2072421 Change-Id: I72caa5fb7e1df16b018165d277cd3a404b81c304 Signed-off-by: Yuanyuan Liu --- drivers/net/wireless/cnss_utils/cnss_utils.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/cnss_utils/cnss_utils.c b/drivers/net/wireless/cnss_utils/cnss_utils.c index a452900868c4..d73846efbc4c 100644 --- a/drivers/net/wireless/cnss_utils/cnss_utils.c +++ b/drivers/net/wireless/cnss_utils/cnss_utils.c @@ -283,7 +283,7 @@ static int __init cnss_utils_init(void) { struct cnss_utils_priv *priv = NULL; - priv = kmalloc(sizeof(*priv), GFP_KERNEL); + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; -- GitLab From 8e20679827ef58f2bfc0c09384fd42bfdbf14e29 Mon Sep 17 00:00:00 2001 From: Sudarshan Rajagopalan Date: Wed, 28 Jun 2017 17:45:57 -0700 Subject: [PATCH 758/786] msm: secure_buffer: Add SPSS_SP_SHARED vmid Add support for SPSS_SP_SHARED vmid. This vmid is equivalent to TZ VM, which can be used for sharing memory with TZ with the assign call. Change-Id: I10b98820796e8bab7b8275f5ddf44e8814dc0fb0 Signed-off-by: Sudarshan Rajagopalan --- drivers/soc/qcom/secure_buffer.c | 2 ++ drivers/staging/android/ion/msm/msm_ion.c | 5 ++++- drivers/staging/android/uapi/msm_ion.h | 1 + include/soc/qcom/secure_buffer.h | 1 + 4 files changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c index 2791d7229ba8..63cce5cae864 100644 --- a/drivers/soc/qcom/secure_buffer.c +++ b/drivers/soc/qcom/secure_buffer.c @@ -414,6 +414,8 @@ const char *msm_secure_vmid_to_string(int secure_vmid) return "VMID_CP_CAMERA_PREVIEW"; case VMID_CP_SPSS_SP: return "VMID_CP_SPSS_SP"; + case VMID_CP_SPSS_SP_SHARED: + return "VMID_CP_SPSS_SP_SHARED"; case VMID_INVAL: return "VMID_INVAL"; default: diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c index e6af81ddd306..52926f0f6ee9 100644 --- a/drivers/staging/android/ion/msm/msm_ion.c +++ b/drivers/staging/android/ion/msm/msm_ion.c @@ -620,7 +620,8 @@ bool is_secure_vmid_valid(int vmid) vmid == VMID_CP_SEC_DISPLAY || vmid == VMID_CP_APP || vmid == VMID_CP_CAMERA_PREVIEW || - vmid == VMID_CP_SPSS_SP); + vmid == VMID_CP_SPSS_SP || + vmid == VMID_CP_SPSS_SP_SHARED); } unsigned int count_set_bits(unsigned long val) @@ -666,6 +667,8 @@ int get_secure_vmid(unsigned long flags) return VMID_CP_CAMERA_PREVIEW; if (flags & ION_FLAG_CP_SPSS_SP) return VMID_CP_SPSS_SP; + if (flags & ION_FLAG_CP_SPSS_SP_SHARED) + return VMID_CP_SPSS_SP_SHARED; return -EINVAL; } /* fix up the cases where the ioctl direction bits are incorrect */ diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h index 40dbfb00cac5..7381ee9779da 100644 --- a/drivers/staging/android/uapi/msm_ion.h +++ b/drivers/staging/android/uapi/msm_ion.h @@ -85,6 +85,7 @@ enum cp_mem_usage { #define ION_FLAG_CP_CAMERA ION_BIT(21) #define ION_FLAG_CP_HLOS ION_BIT(22) #define ION_FLAG_CP_SPSS_SP ION_BIT(23) +#define ION_FLAG_CP_SPSS_SP_SHARED ION_BIT(24) #define ION_FLAG_CP_SEC_DISPLAY ION_BIT(25) #define ION_FLAG_CP_APP ION_BIT(26) #define ION_FLAG_CP_CAMERA_PREVIEW ION_BIT(27) diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h index ac468ba1567f..a08cfe1a7f18 100644 --- a/include/soc/qcom/secure_buffer.h +++ b/include/soc/qcom/secure_buffer.h @@ -39,6 +39,7 @@ enum vmid { VMID_WLAN_CE = 0x19, VMID_CP_SPSS_SP = 0x1A, VMID_CP_CAMERA_PREVIEW = 0x1D, + VMID_CP_SPSS_SP_SHARED = 0x22, VMID_LAST, VMID_INVAL = -1 }; -- GitLab From eb843726f6e71193aab448f2d6372183d657d693 Mon Sep 17 00:00:00 2001 From: Vikram Mulukutla Date: Thu, 6 Jul 2017 10:05:52 -0700 Subject: [PATCH 759/786] cpufreq: schedutil: Fix sugov_start versus sugov_update_shared race With a shared policy in place, when one of the CPUs in the policy is hotplugged out and then brought back online, sugov_stop and sugov_start are called in order. sugov_stop removes utilization hooks for each CPU in the policy and does nothing else in the for_each_cpu loop. sugov_start on the other hand iterates through the CPUs in the policy and re-initializes the per-cpu structure _and_ adds the utilization hook. This implies that the scheduler is allowed to invoke a CPU's utilization update hook when the rest of the per-cpu structures have yet to be re-inited. Apart from some strange values in tracepoints this doesn't cause a problem, but if we do end up accessing a pointer from the per-cpu sugov_cpu structure somewhere in the sugov_update_shared path, we will likely see crashes since the memset for another CPU in the policy is free to race with sugov_update_shared from the CPU that is ready to go. So let's fix this now to first init all per-cpu structures, and then add the per-cpu utilization update hooks all at once. Change-Id: I399e0e159b3db3ae3258843c9231f92312fe18ef Signed-off-by: Vikram Mulukutla --- kernel/sched/cpufreq_schedutil.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 0a0e9aa9526a..1250861583f4 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -695,6 +695,11 @@ static int sugov_start(struct cpufreq_policy *policy) sg_cpu->cpu = cpu; sg_cpu->flags = SCHED_CPUFREQ_RT; sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; + } + + for_each_cpu(cpu, policy->cpus) { + struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); + cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, policy_is_shared(policy) ? sugov_update_shared : -- GitLab From f077287c10e5d779177d61ab38f05e04f7485962 Mon Sep 17 00:00:00 2001 From: Skylar Chang Date: Thu, 6 Jul 2017 16:11:01 -0700 Subject: [PATCH 760/786] msm: ipa4: disable pa_mask_en IPA 4.0 HW requires a SW workaround to disable pa_mask_en bit in IPA_TX_CFG register in order to allow holb drop when configured on endpoint. Change-Id: I51436f706d2ca65a36fadbf1599da0c4404aeae1 CRs-Fixed: 2072492 Acked-by: Ady Abraham Signed-off-by: Skylar Chang --- drivers/platform/msm/ipa/ipa_v3/ipa_utils.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c index 3d1af57b8c27..aa387dae81dd 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c @@ -2056,8 +2056,15 @@ int ipa3_init_hw(void) ipahal_write_reg(IPA_BCR, val); - if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) + if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) { + struct ipahal_reg_tx_cfg cfg; + ipahal_write_reg(IPA_CLKON_CFG, IPA_CLKON_CFG_v4_0); + ipahal_read_reg_fields(IPA_TX_CFG, &cfg); + /* disable PA_MASK_EN to allow holb drop */ + cfg.pa_mask_en = 0; + ipahal_write_reg_fields(IPA_TX_CFG, &cfg); + } ipa3_cfg_qsb(); -- GitLab From d8e965284cb41931be5e5bbc652c8224e05c9089 Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Mon, 13 Feb 2017 10:14:16 -0700 Subject: [PATCH 761/786] drm/msm: get an iova from the address space instead of an id In the future we won't have a fixed set of addresses spaces. Instead of going through the effort of assigning a ID for each address space just use the address space itself as a token for getting / putting an iova. This forces a few changes in the gem object however: instead of using a simple index into a list of domains, we need to maintain a list of them. Luckily the list will be pretty small; even with dynamic address spaces we wouldn't ever see more than two or three. CRs-Fixed: 2050484 Change-Id: Ic0dedbad4495f02a21135217f3605b93f8b8dfea Signed-off-by: Jordan Crouse Signed-off-by: Abhijit Kulkarni --- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 7 +- drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c | 31 +++- drivers/gpu/drm/msm/dsi-staging/dsi_display.c | 15 +- drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c | 7 +- drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c | 11 +- drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h | 2 - drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c | 12 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | 4 +- drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c | 7 - drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h | 1 - drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | 12 +- drivers/gpu/drm/msm/msm_drv.c | 38 +++-- drivers/gpu/drm/msm/msm_drv.h | 28 ++-- drivers/gpu/drm/msm/msm_fb.c | 15 +- drivers/gpu/drm/msm/msm_gem.c | 141 +++++++++++++----- drivers/gpu/drm/msm/msm_gem.h | 4 +- drivers/gpu/drm/msm/msm_gem_submit.c | 4 +- drivers/gpu/drm/msm/msm_gpu.c | 11 +- drivers/gpu/drm/msm/msm_gpu.h | 1 - drivers/gpu/drm/msm/msm_kms.h | 4 + drivers/gpu/drm/msm/sde/sde_connector.c | 32 ++-- drivers/gpu/drm/msm/sde/sde_connector.h | 6 +- drivers/gpu/drm/msm/sde/sde_encoder_phys.h | 4 +- drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c | 39 +++-- drivers/gpu/drm/msm/sde/sde_formats.c | 18 +-- drivers/gpu/drm/msm/sde/sde_formats.h | 9 +- drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c | 14 +- drivers/gpu/drm/msm/sde/sde_kms.c | 45 ++++-- drivers/gpu/drm/msm/sde/sde_kms.h | 1 - drivers/gpu/drm/msm/sde/sde_plane.c | 38 +++-- drivers/gpu/drm/msm/sde/sde_plane.h | 4 +- 31 files changed, 360 insertions(+), 205 deletions(-) diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index b468d2a2cdeb..961d47fa686f 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -64,7 +64,7 @@ int adreno_hw_init(struct msm_gpu *gpu) DBG("%s", gpu->name); - ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova); + ret = msm_gem_get_iova(gpu->rb->bo, gpu->aspace, &gpu->rb_iova); if (ret) { gpu->rb_iova = 0; dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret); @@ -406,7 +406,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, return -ENOMEM; } - ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id, + ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace, &adreno_gpu->memptrs_iova); if (ret) { dev_err(drm->dev, "could not map memptrs: %d\n", ret); @@ -423,8 +423,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu) msm_gem_put_vaddr(gpu->memptrs_bo); if (gpu->memptrs_iova) - msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id); - + msm_gem_put_iova(gpu->memptrs_bo, gpu->base.aspace); drm_gem_object_unreference_unlocked(gpu->memptrs_bo); } release_firmware(gpu->pm4); diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c index e2a348d9f147..b2aef9cb7c1e 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c @@ -24,6 +24,7 @@ #include "msm_drv.h" #include "msm_kms.h" #include "msm_gpu.h" +#include "msm_mmu.h" #include "dsi_ctrl.h" #include "dsi_ctrl_hw.h" #include "dsi_clk.h" @@ -252,6 +253,16 @@ static int dsi_ctrl_debugfs_deinit(struct dsi_ctrl *dsi_ctrl) return 0; } +static inline struct msm_gem_address_space* +dsi_ctrl_get_aspace(struct dsi_ctrl *dsi_ctrl, + int domain) +{ + if (!dsi_ctrl || !dsi_ctrl->drm_dev) + return NULL; + + return msm_gem_smmu_address_space_get(dsi_ctrl->drm_dev, domain); +} + static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl, enum dsi_ctrl_driver_ops op, u32 op_state) @@ -1170,8 +1181,17 @@ static int dsi_ctrl_drv_state_init(struct dsi_ctrl *dsi_ctrl) static int dsi_ctrl_buffer_deinit(struct dsi_ctrl *dsi_ctrl) { + struct msm_gem_address_space *aspace = NULL; + if (dsi_ctrl->tx_cmd_buf) { - msm_gem_put_iova(dsi_ctrl->tx_cmd_buf, 0); + aspace = dsi_ctrl_get_aspace(dsi_ctrl, + MSM_SMMU_DOMAIN_UNSECURE); + if (!aspace) { + pr_err("failed to get address space\n"); + return -ENOMEM; + } + + msm_gem_put_iova(dsi_ctrl->tx_cmd_buf, aspace); msm_gem_free_object(dsi_ctrl->tx_cmd_buf); dsi_ctrl->tx_cmd_buf = NULL; @@ -1184,6 +1204,13 @@ int dsi_ctrl_buffer_init(struct dsi_ctrl *dsi_ctrl) { int rc = 0; u32 iova = 0; + struct msm_gem_address_space *aspace = NULL; + + aspace = dsi_ctrl_get_aspace(dsi_ctrl, MSM_SMMU_DOMAIN_UNSECURE); + if (!aspace) { + pr_err("failed to get address space\n"); + return -ENOMEM; + } dsi_ctrl->tx_cmd_buf = msm_gem_new(dsi_ctrl->drm_dev, SZ_4K, @@ -1198,7 +1225,7 @@ int dsi_ctrl_buffer_init(struct dsi_ctrl *dsi_ctrl) dsi_ctrl->cmd_buffer_size = SZ_4K; - rc = msm_gem_get_iova(dsi_ctrl->tx_cmd_buf, 0, &iova); + rc = msm_gem_get_iova(dsi_ctrl->tx_cmd_buf, aspace, &iova); if (rc) { pr_err("failed to get iova, rc=%d\n", rc); (void)dsi_ctrl_buffer_deinit(dsi_ctrl); diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c index 52b1dcbec664..b61bfde4e201 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c @@ -20,6 +20,7 @@ #include "msm_drv.h" #include "sde_connector.h" +#include "msm_mmu.h" #include "dsi_display.h" #include "dsi_panel.h" #include "dsi_ctrl.h" @@ -1321,6 +1322,7 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host, { struct dsi_display *display = to_dsi_display(host); struct dsi_display_ctrl *display_ctrl; + struct msm_gem_address_space *aspace = NULL; int rc = 0, cnt = 0; if (!host || !msg) { @@ -1363,7 +1365,16 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host, pr_err("value of display->tx_cmd_buf is NULL"); goto error_disable_cmd_engine; } - rc = msm_gem_get_iova(display->tx_cmd_buf, 0, + + aspace = msm_gem_smmu_address_space_get(display->drm_dev, + MSM_SMMU_DOMAIN_UNSECURE); + if (!aspace) { + pr_err("failed to get aspace\n"); + rc = -EINVAL; + goto free_gem; + } + + rc = msm_gem_get_iova(display->tx_cmd_buf, aspace, &(display->cmd_buffer_iova)); if (rc) { pr_err("failed to get the iova rc %d\n", rc); @@ -1419,7 +1430,7 @@ static ssize_t dsi_host_transfer(struct mipi_dsi_host *host, } return rc; put_iova: - msm_gem_put_iova(display->tx_cmd_buf, 0); + msm_gem_put_iova(display->tx_cmd_buf, aspace); free_gem: msm_gem_free_object(display->tx_cmd_buf); error: diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 9527dafc3e69..75e98dc5c0e8 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -133,7 +133,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val) container_of(work, struct mdp4_crtc, unref_cursor_work); struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base); - msm_gem_put_iova(val, mdp4_kms->id); + msm_gem_put_iova(val, mdp4_kms->aspace); drm_gem_object_unreference_unlocked(val); } @@ -378,7 +378,8 @@ static void update_cursor(struct drm_crtc *crtc) if (next_bo) { /* take a obj ref + iova ref when we start scanning out: */ drm_gem_object_reference(next_bo); - msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova); + msm_gem_get_iova_locked(next_bo, mdp4_kms->aspace, + &iova); /* enable cursor: */ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma), @@ -435,7 +436,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, } if (cursor_bo) { - ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova); + ret = msm_gem_get_iova(cursor_bo, mdp4_kms->aspace, &iova); if (ret) goto fail; } else { diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index 80b49a1f88a0..acee5da6a3c1 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -184,7 +184,7 @@ static void mdp4_destroy(struct msm_kms *kms) } if (mdp4_kms->blank_cursor_iova) - msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id); + msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace); drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo); if (mdp4_kms->rpm_enabled) @@ -582,13 +582,6 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) aspace = NULL; } - mdp4_kms->id = msm_register_address_space(dev, aspace); - if (mdp4_kms->id < 0) { - ret = mdp4_kms->id; - dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret); - goto fail; - } - ret = modeset_init(mdp4_kms); if (ret) { dev_err(dev->dev, "modeset_init failed: %d\n", ret); @@ -605,7 +598,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) goto fail; } - ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id, + ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace, &mdp4_kms->blank_cursor_iova); if (ret) { dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret); diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h index 1fe35b23038c..f9dcadf51295 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h @@ -33,8 +33,6 @@ struct mdp4_kms { int rev; /* mapper-id used to request GEM buffer mapped for scanout: */ - int id; - void __iomem *mmio; struct regulator *vdd; diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index 3903dbcda763..934992e649d3 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c @@ -109,7 +109,7 @@ static int mdp4_plane_prepare_fb(struct drm_plane *plane, return 0; DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id); - return msm_framebuffer_prepare(fb, mdp4_kms->id); + return msm_framebuffer_prepare(fb, mdp4_kms->aspace); } static void mdp4_plane_cleanup_fb(struct drm_plane *plane, @@ -123,7 +123,7 @@ static void mdp4_plane_cleanup_fb(struct drm_plane *plane, return; DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id); - msm_framebuffer_cleanup(fb, mdp4_kms->id); + msm_framebuffer_cleanup(fb, mdp4_kms->aspace); } @@ -172,13 +172,13 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane, MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), - msm_framebuffer_iova(fb, mdp4_kms->id, 0)); + msm_framebuffer_iova(fb, mdp4_kms->aspace, 0)); mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe), - msm_framebuffer_iova(fb, mdp4_kms->id, 1)); + msm_framebuffer_iova(fb, mdp4_kms->aspace, 1)); mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe), - msm_framebuffer_iova(fb, mdp4_kms->id, 2)); + msm_framebuffer_iova(fb, mdp4_kms->aspace, 2)); mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe), - msm_framebuffer_iova(fb, mdp4_kms->id, 3)); + msm_framebuffer_iova(fb, mdp4_kms->aspace, 3)); plane->fb = fb; } diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index c205c360e16d..15e7da205531 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c @@ -171,7 +171,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val) container_of(work, struct mdp5_crtc, unref_cursor_work); struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base); - msm_gem_put_iova(val, mdp5_kms->id); + msm_gem_put_iova(val, mdp5_kms->aspace); drm_gem_object_unreference_unlocked(val); } @@ -525,7 +525,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, if (!cursor_bo) return -ENOENT; - ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr); + ret = msm_gem_get_iova(cursor_bo, mdp5_kms->aspace, &cursor_addr); if (ret) return -EINVAL; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c index f022967475b3..d97e4ef12ab3 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c @@ -640,13 +640,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) aspace = NULL; } - mdp5_kms->id = msm_register_address_space(dev, aspace); - if (mdp5_kms->id < 0) { - ret = mdp5_kms->id; - dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret); - goto fail; - } - ret = modeset_init(mdp5_kms); if (ret) { dev_err(&pdev->dev, "modeset_init failed: %d\n", ret); diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h index 623ac07c1970..f21e912ff3af 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h @@ -38,7 +38,6 @@ struct mdp5_kms { /* mapper-id used to request GEM buffer mapped for scanout: */ - int id; struct msm_gem_address_space *aspace; struct mdp5_smp *smp; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 5e67e8b2b685..88e5d06a9194 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -261,7 +261,7 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane, return 0; DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id); - return msm_framebuffer_prepare(fb, mdp5_kms->id); + return msm_framebuffer_prepare(fb, mdp5_kms->aspace); } static void mdp5_plane_cleanup_fb(struct drm_plane *plane, @@ -275,7 +275,7 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane, return; DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id); - msm_framebuffer_cleanup(fb, mdp5_kms->id); + msm_framebuffer_cleanup(fb, mdp5_kms->aspace); } static int mdp5_plane_atomic_check(struct drm_plane *plane, @@ -398,13 +398,13 @@ static void set_scanout_locked(struct drm_plane *plane, MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), - msm_framebuffer_iova(fb, mdp5_kms->id, 0)); + msm_framebuffer_iova(fb, mdp5_kms->aspace, 0)); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), - msm_framebuffer_iova(fb, mdp5_kms->id, 1)); + msm_framebuffer_iova(fb, mdp5_kms->aspace, 1)); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), - msm_framebuffer_iova(fb, mdp5_kms->id, 2)); + msm_framebuffer_iova(fb, mdp5_kms->aspace, 2)); mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), - msm_framebuffer_iova(fb, mdp5_kms->id, 3)); + msm_framebuffer_iova(fb, mdp5_kms->aspace, 3)); plane->fb = fb; } diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 6a2d239c5638..9aebeb94bc8e 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -151,20 +151,6 @@ static const struct drm_mode_config_funcs mode_config_funcs = { .atomic_commit = msm_atomic_commit, }; -int msm_register_address_space(struct drm_device *dev, - struct msm_gem_address_space *aspace) -{ - struct msm_drm_private *priv = dev->dev_private; - int idx = priv->num_aspaces++; - - if (WARN_ON(idx >= ARRAY_SIZE(priv->aspace))) - return -EINVAL; - - priv->aspace[idx] = aspace; - - return idx; -} - #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING static bool reglog = false; MODULE_PARM_DESC(reglog, "Enable register read/write logging"); @@ -1933,6 +1919,30 @@ static int add_display_components(struct device *dev, return ret; } +struct msm_gem_address_space * +msm_gem_smmu_address_space_get(struct drm_device *dev, + unsigned int domain) +{ + struct msm_drm_private *priv = NULL; + struct msm_kms *kms; + const struct msm_kms_funcs *funcs; + + if ((!dev) || (!dev->dev_private)) + return NULL; + + priv = dev->dev_private; + kms = priv->kms; + if (!kms) + return NULL; + + funcs = kms->funcs; + + if ((!funcs) || (!funcs->get_address_space)) + return NULL; + + return funcs->get_address_space(priv->kms, domain); +} + /* * We don't know what's the best binding to link the gpu with the drm device. * Fow now, we just hunt for all the possible gpus that we support, and add them diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 665ed36527a2..b22337467247 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -642,8 +642,6 @@ int msm_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock); void msm_gem_submit_free(struct msm_gem_submit *submit); -int msm_register_address_space(struct drm_device *dev, - struct msm_gem_address_space *aspace); void msm_gem_unmap_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma, struct sg_table *sgt, void *priv); @@ -662,6 +660,10 @@ struct msm_gem_address_space * msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu, const char *name); +struct msm_gem_address_space * +msm_gem_smmu_address_space_get(struct drm_device *dev, + unsigned int domain); + int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file); @@ -673,13 +675,16 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj, int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); -int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, - uint32_t *iova); -int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova); -uint32_t msm_gem_iova(struct drm_gem_object *obj, int id); +int msm_gem_get_iova_locked(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace, uint32_t *iova); +int msm_gem_get_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace, uint32_t *iova); +uint32_t msm_gem_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace); struct page **msm_gem_get_pages(struct drm_gem_object *obj); void msm_gem_put_pages(struct drm_gem_object *obj); -void msm_gem_put_iova(struct drm_gem_object *obj, int id); +void msm_gem_put_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace); int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args); int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, @@ -716,9 +721,12 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct dma_buf *dmabuf, struct sg_table *sgt); void msm_framebuffer_set_kmap(struct drm_framebuffer *fb, bool enable); -int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id); -void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id); -uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane); +int msm_framebuffer_prepare(struct drm_framebuffer *fb, + struct msm_gem_address_space *aspace); +void msm_framebuffer_cleanup(struct drm_framebuffer *fb, + struct msm_gem_address_space *aspace); +uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, + struct msm_gem_address_space *aspace, int plane); struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index 918427a793f6..0a9f12d3920b 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -148,14 +148,15 @@ static void msm_framebuffer_kunmap(struct drm_framebuffer *fb) * should be fine, since only the scanout (mdpN) side of things needs * this, the gpu doesn't care about fb's. */ -int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id) +int msm_framebuffer_prepare(struct drm_framebuffer *fb, + struct msm_gem_address_space *aspace) { struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); int ret, i, n = drm_format_num_planes(fb->pixel_format); uint32_t iova; for (i = 0; i < n; i++) { - ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova); + ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova); DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret); if (ret) return ret; @@ -167,7 +168,8 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id) return 0; } -void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id) +void msm_framebuffer_cleanup(struct drm_framebuffer *fb, + struct msm_gem_address_space *aspace) { struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); int i, n = drm_format_num_planes(fb->pixel_format); @@ -176,15 +178,16 @@ void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id) msm_framebuffer_kunmap(fb); for (i = 0; i < n; i++) - msm_gem_put_iova(msm_fb->planes[i], id); + msm_gem_put_iova(msm_fb->planes[i], aspace); } -uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane) +uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, + struct msm_gem_address_space *aspace, int plane) { struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); if (!msm_fb->planes[plane]) return 0; - return msm_gem_iova(msm_fb->planes[plane], id) + fb->offsets[plane]; + return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane]; } struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane) diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 43e2a266f8b8..a7d06d16000e 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -290,20 +290,61 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) return offset; } +static void obj_remove_domain(struct msm_gem_vma *domain) +{ + if (domain) { + list_del(&domain->list); + kfree(domain); + } +} + static void put_iova(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct msm_drm_private *priv = obj->dev->dev_private; struct msm_gem_object *msm_obj = to_msm_bo(obj); - int id; + struct msm_gem_vma *domain, *tmp; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { - msm_gem_unmap_vma(priv->aspace[id], &msm_obj->domain[id], - msm_obj->sgt, get_dmabuf_ptr(obj)); + list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) { + if (iommu_present(&platform_bus_type)) { + msm_gem_unmap_vma(domain->aspace, domain, + msm_obj->sgt, get_dmabuf_ptr(obj)); + } + + obj_remove_domain(domain); + } +} + +static struct msm_gem_vma *obj_add_domain(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct msm_gem_vma *domain = kzalloc(sizeof(*domain), GFP_KERNEL); + + if (!domain) + return ERR_PTR(-ENOMEM); + + domain->aspace = aspace; + + list_add_tail(&domain->list, &msm_obj->domains); + + return domain; +} + +static struct msm_gem_vma *obj_get_domain(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct msm_gem_vma *domain; + + list_for_each_entry(domain, &msm_obj->domains, list) { + if (domain->aspace == aspace) + return domain; } + + return NULL; } /* should be called under struct_mutex.. although it can be called @@ -313,51 +354,65 @@ put_iova(struct drm_gem_object *obj) * That means when I do eventually need to add support for unpinning * the refcnt counter needs to be atomic_t. */ -int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, - uint32_t *iova) +int msm_gem_get_iova_locked(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace, uint32_t *iova) { struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct page **pages; + struct msm_gem_vma *domain; int ret = 0; - if (!msm_obj->domain[id].iova) { - struct msm_drm_private *priv = obj->dev->dev_private; - struct page **pages = get_pages(obj); + if (!iommu_present(&platform_bus_type)) { + pages = get_pages(obj); if (IS_ERR(pages)) return PTR_ERR(pages); - if (iommu_present(&platform_bus_type)) { - ret = msm_gem_map_vma(priv->aspace[id], - &msm_obj->domain[id], msm_obj->sgt, - get_dmabuf_ptr(obj), - msm_obj->flags); - } else { - msm_obj->domain[id].iova = physaddr(obj); + *iova = physaddr(obj); + return 0; + } + + domain = obj_get_domain(obj, aspace); + + if (!domain) { + domain = obj_add_domain(obj, aspace); + if (IS_ERR(domain)) + return PTR_ERR(domain); + + pages = get_pages(obj); + if (IS_ERR(pages)) { + obj_remove_domain(domain); + return PTR_ERR(pages); } + + ret = msm_gem_map_vma(aspace, domain, msm_obj->sgt, + get_dmabuf_ptr(obj), + msm_obj->flags); } - if (!ret) - *iova = msm_obj->domain[id].iova; + if (!ret && domain) + *iova = domain->iova; + else + obj_remove_domain(domain); return ret; } /* get iova, taking a reference. Should have a matching put */ -int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) +int msm_gem_get_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace, uint32_t *iova) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct msm_gem_vma *domain; int ret; - /* this is safe right now because we don't unmap until the - * bo is deleted: - */ - if (msm_obj->domain[id].iova) { - *iova = msm_obj->domain[id].iova; + domain = obj_get_domain(obj, aspace); + if (domain) { + *iova = domain->iova; return 0; } mutex_lock(&obj->dev->struct_mutex); - ret = msm_gem_get_iova_locked(obj, id, iova); + ret = msm_gem_get_iova_locked(obj, aspace, iova); mutex_unlock(&obj->dev->struct_mutex); return ret; } @@ -365,14 +420,18 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) /* get iova without taking a reference, used in places where you have * already done a 'msm_gem_get_iova()'. */ -uint32_t msm_gem_iova(struct drm_gem_object *obj, int id) +uint32_t msm_gem_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); - WARN_ON(!msm_obj->domain[id].iova); - return msm_obj->domain[id].iova; + struct msm_gem_vma *domain = obj_get_domain(obj, aspace); + + WARN_ON(!domain); + + return domain ? domain->iova : 0; } -void msm_gem_put_iova(struct drm_gem_object *obj, int id) +void msm_gem_put_iova(struct drm_gem_object *obj, + struct msm_gem_address_space *aspace) { // XXX TODO .. // NOTE: probably don't need a _locked() version.. we wouldn't @@ -624,6 +683,7 @@ static void describe_fence(struct fence *fence, const char *type, void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) { struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct msm_gem_vma *domain; struct reservation_object *robj = msm_obj->resv; struct reservation_object_list *fobj; struct fence *fence; @@ -666,6 +726,12 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m) if (fence) describe_fence(fence, "Exclusive", m); rcu_read_unlock(); + + /* FIXME: we need to print the address space here too */ + list_for_each_entry(domain, &msm_obj->domains, list) + seq_printf(m, " %08llx", domain->iova); + + seq_puts(m, "\n"); } void msm_gem_describe_objects(struct list_head *list, struct seq_file *m) @@ -783,8 +849,13 @@ static int msm_gem_new_impl(struct drm_device *dev, if (!msm_obj) return -ENOMEM; - if (use_vram) - msm_obj->vram_node = &msm_obj->domain[0].node; + if (use_vram) { + struct msm_gem_vma *domain = obj_add_domain(&msm_obj->base, + NULL); + + if (!IS_ERR(domain)) + msm_obj->vram_node = &domain->node; + } msm_obj->flags = flags; msm_obj->madv = MSM_MADV_WILLNEED; @@ -797,6 +868,8 @@ static int msm_gem_new_impl(struct drm_device *dev, } INIT_LIST_HEAD(&msm_obj->submit_entry); + INIT_LIST_HEAD(&msm_obj->domains); + list_add_tail(&msm_obj->mm_list, &priv->inactive_list); *obj = &msm_obj->base; diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index b176c11197f6..9d41a00fce07 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -44,7 +44,9 @@ struct msm_gem_address_space { struct msm_gem_vma { /* Node used by the GPU address space, but not the SDE address space */ struct drm_mm_node node; + struct msm_gem_address_space *aspace; uint64_t iova; + struct list_head list; }; struct msm_gem_object { @@ -84,7 +86,7 @@ struct msm_gem_object { struct sg_table *sgt; void *vaddr; - struct msm_gem_vma domain[NUM_DOMAINS]; + struct list_head domains; /* normally (resv == &_resv) except for imported bo's */ struct reservation_object *resv; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index b6a0f37a65f3..8d727fe5c10e 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -157,7 +157,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i) struct msm_gem_object *msm_obj = submit->bos[i].obj; if (submit->bos[i].flags & BO_PINNED) - msm_gem_put_iova(&msm_obj->base, submit->gpu->id); + msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace); if (submit->bos[i].flags & BO_LOCKED) ww_mutex_unlock(&msm_obj->resv->lock); @@ -245,7 +245,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit) /* if locking succeeded, pin bo: */ ret = msm_gem_get_iova_locked(&msm_obj->base, - submit->gpu->id, &iova); + submit->gpu->aspace, &iova); if (ret) break; diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index ded4226311cb..49d9e104af31 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -457,7 +457,7 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) struct msm_gem_object *msm_obj = submit->bos[i].obj; /* move to inactive: */ msm_gem_move_to_inactive(&msm_obj->base); - msm_gem_put_iova(&msm_obj->base, gpu->id); + msm_gem_put_iova(&msm_obj->base, gpu->aspace); drm_gem_object_unreference(&msm_obj->base); } @@ -493,6 +493,8 @@ static void retire_worker(struct work_struct *work) msm_update_fence(gpu->fctx, fence); mutex_lock(&dev->struct_mutex); + retire_submits(gpu); + retire_submits(gpu); mutex_unlock(&dev->struct_mutex); @@ -538,8 +540,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, /* submit takes a reference to the bo and iova until retired: */ drm_gem_object_reference(&msm_obj->base); msm_gem_get_iova_locked(&msm_obj->base, - submit->gpu->id, &iova); - + submit->gpu->aspace, &iova); if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ) @@ -674,8 +675,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, } else { dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); } - gpu->id = msm_register_address_space(drm, gpu->aspace); - /* Create ringbuffer: */ mutex_lock(&drm->struct_mutex); @@ -706,7 +705,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) if (gpu->rb) { if (gpu->rb_iova) - msm_gem_put_iova(gpu->rb->bo, gpu->id); + msm_gem_put_iova(gpu->rb->bo, gpu->aspace); msm_ringbuffer_destroy(gpu->rb); } diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index c6bf5d6ebc20..13ecd72c09ab 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -99,7 +99,6 @@ struct msm_gpu { int irq; struct msm_gem_address_space *aspace; - int id; /* Power Control: */ struct regulator *gpu_reg, *gpu_cx; diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index eed0f1b6ae02..eb10d6bbcca4 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -93,6 +93,10 @@ struct msm_kms_funcs { struct drm_mode_object *obj, u32 event, bool en); /* cleanup: */ void (*destroy)(struct msm_kms *kms); + /* get address space */ + struct msm_gem_address_space *(*get_address_space)( + struct msm_kms *kms, + unsigned int domain); }; struct msm_kms { diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c index 2970b280814f..c3c5a1345aa8 100644 --- a/drivers/gpu/drm/msm/sde/sde_connector.c +++ b/drivers/gpu/drm/msm/sde/sde_connector.c @@ -327,8 +327,7 @@ static void _sde_connector_destroy_fb(struct sde_connector *c_conn, return; } - msm_framebuffer_cleanup(c_state->out_fb, - c_state->mmu_id); + msm_framebuffer_cleanup(c_state->out_fb, c_state->aspace); drm_framebuffer_unreference(c_state->out_fb); c_state->out_fb = NULL; @@ -432,7 +431,7 @@ sde_connector_atomic_duplicate_state(struct drm_connector *connector) if (c_state->out_fb) { drm_framebuffer_reference(c_state->out_fb); rc = msm_framebuffer_prepare(c_state->out_fb, - c_state->mmu_id); + c_state->aspace); if (rc) SDE_ERROR("failed to prepare fb, %d\n", rc); } @@ -652,14 +651,14 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector, c_conn->fb_kmap); if (c_state->out_fb->flags & DRM_MODE_FB_SECURE) - c_state->mmu_id = - c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE]; + c_state->aspace = + c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE]; else - c_state->mmu_id = - c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE]; + c_state->aspace = + c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE]; rc = msm_framebuffer_prepare(c_state->out_fb, - c_state->mmu_id); + c_state->aspace); if (rc) SDE_ERROR("prep fb failed, %d\n", rc); } @@ -1010,18 +1009,17 @@ struct drm_connector *sde_connector_init(struct drm_device *dev, c_conn->lp_mode = 0; c_conn->last_panel_power_mode = SDE_MODE_DPMS_ON; - /* cache mmu_id's for later */ sde_kms = to_sde_kms(priv->kms); if (sde_kms->vbif[VBIF_NRT]) { - c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] = - sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE]; - c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] = - sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE]; + c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] = + sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE]; + c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] = + sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE]; } else { - c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] = - sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE]; - c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] = - sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE]; + c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] = + sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE]; + c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] = + sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE]; } if (ops) diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h index 497d0dba1879..2318756093bc 100644 --- a/drivers/gpu/drm/msm/sde/sde_connector.h +++ b/drivers/gpu/drm/msm/sde/sde_connector.h @@ -240,7 +240,7 @@ struct sde_connector { struct drm_panel *panel; void *display; - int mmu_id[SDE_IOMMU_DOMAIN_MAX]; + struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX]; char name[SDE_CONNECTOR_NAME_SIZE]; @@ -304,14 +304,14 @@ struct sde_connector { * struct sde_connector_state - private connector status structure * @base: Base drm connector structure * @out_fb: Pointer to output frame buffer, if applicable - * @mmu_id: MMU ID for accessing frame buffer objects, if applicable + * @aspace: Address space for accessing frame buffer objects, if applicable * @property_values: Local cache of current connector property values * @rois: Regions of interest structure for mapping CRTC to Connector output */ struct sde_connector_state { struct drm_connector_state base; struct drm_framebuffer *out_fb; - int mmu_id; + struct msm_gem_address_space *aspace; uint64_t property_values[CONNECTOR_PROP_COUNT]; struct msm_roi_list rois; diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h index b173876c875b..4b12651717fa 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h @@ -333,7 +333,7 @@ struct sde_encoder_phys_cmd { * @wb_fmt: Writeback pixel format * @frame_count: Counter of completed writeback operations * @kickoff_count: Counter of issued writeback operations - * @mmu_id: mmu identifier for non-secure/secure domain + * @aspace: address space identifier for non-secure/secure domain * @wb_dev: Pointer to writeback device * @start_time: Start time of writeback latest request * @end_time: End time of writeback latest request @@ -355,7 +355,7 @@ struct sde_encoder_phys_wb { const struct sde_format *wb_fmt; u32 frame_count; u32 kickoff_count; - int mmu_id[SDE_IOMMU_DOMAIN_MAX]; + struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX]; struct sde_wb_device *wb_dev; ktime_t start_time; ktime_t end_time; diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c index 54c1397392ba..875d99d584c3 100644 --- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c +++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c @@ -250,7 +250,8 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc, struct sde_hw_wb_cfg *wb_cfg; struct sde_hw_wb_cdp_cfg *cdp_cfg; const struct msm_format *format; - int ret, mmu_id; + int ret; + struct msm_gem_address_space *aspace; if (!phys_enc || !phys_enc->sde_kms || !phys_enc->sde_kms->catalog) { SDE_ERROR("invalid encoder\n"); @@ -264,9 +265,9 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc, wb_cfg->intf_mode = phys_enc->intf_mode; wb_cfg->is_secure = (fb->flags & DRM_MODE_FB_SECURE) ? true : false; - mmu_id = (wb_cfg->is_secure) ? - wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] : - wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE]; + aspace = (wb_cfg->is_secure) ? + wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] : + wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE]; SDE_DEBUG("[fb_secure:%d]\n", wb_cfg->is_secure); @@ -288,7 +289,7 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc, wb_cfg->roi = *wb_roi; if (hw_wb->caps->features & BIT(SDE_WB_XY_ROI_OFFSET)) { - ret = sde_format_populate_layout(mmu_id, fb, &wb_cfg->dest); + ret = sde_format_populate_layout(aspace, fb, &wb_cfg->dest); if (ret) { SDE_DEBUG("failed to populate layout %d\n", ret); return; @@ -297,7 +298,7 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc, wb_cfg->dest.height = fb->height; wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes; } else { - ret = sde_format_populate_layout_with_roi(mmu_id, fb, wb_roi, + ret = sde_format_populate_layout_with_roi(aspace, fb, wb_roi, &wb_cfg->dest); if (ret) { /* this error should be detected during atomic_check */ @@ -914,12 +915,19 @@ static int _sde_encoder_phys_wb_init_internal_fb( struct drm_mode_fb_cmd2 mode_cmd; uint32_t size; int nplanes, i, ret; + struct msm_gem_address_space *aspace; if (!wb_enc || !wb_enc->base.parent || !wb_enc->base.sde_kms) { SDE_ERROR("invalid params\n"); return -EINVAL; } + aspace = wb_enc->base.sde_kms->aspace[SDE_IOMMU_DOMAIN_UNSECURE]; + if (!aspace) { + SDE_ERROR("invalid address space\n"); + return -EINVAL; + } + dev = wb_enc->base.sde_kms->dev; if (!dev) { SDE_ERROR("invalid dev\n"); @@ -974,8 +982,7 @@ static int _sde_encoder_phys_wb_init_internal_fb( } /* prepare the backing buffer now so that it's available later */ - ret = msm_framebuffer_prepare(fb, - wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE]); + ret = msm_framebuffer_prepare(fb, aspace); if (!ret) wb_enc->fb_disable = fb; return ret; @@ -1234,15 +1241,15 @@ struct sde_encoder_phys *sde_encoder_phys_wb_init( phys_enc = &wb_enc->base; if (p->sde_kms->vbif[VBIF_NRT]) { - wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] = - p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE]; - wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] = - p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE]; + wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] = + p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE]; + wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] = + p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE]; } else { - wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] = - p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE]; - wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] = - p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE]; + wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] = + p->sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE]; + wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] = + p->sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE]; } hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm); diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c index c3477b5e0e57..04c9e79dfc1a 100644 --- a/drivers/gpu/drm/msm/sde/sde_formats.c +++ b/drivers/gpu/drm/msm/sde/sde_formats.c @@ -818,7 +818,7 @@ uint32_t sde_format_get_framebuffer_size( } static int _sde_format_populate_addrs_ubwc( - int mmu_id, + struct msm_gem_address_space *aspace, struct drm_framebuffer *fb, struct sde_hw_fmt_layout *layout) { @@ -830,7 +830,7 @@ static int _sde_format_populate_addrs_ubwc( return -EINVAL; } - base_addr = msm_framebuffer_iova(fb, mmu_id, 0); + base_addr = msm_framebuffer_iova(fb, aspace, 0); if (!base_addr) { DRM_ERROR("failed to retrieve base addr\n"); return -EFAULT; @@ -909,7 +909,7 @@ static int _sde_format_populate_addrs_ubwc( } static int _sde_format_populate_addrs_linear( - int mmu_id, + struct msm_gem_address_space *aspace, struct drm_framebuffer *fb, struct sde_hw_fmt_layout *layout) { @@ -926,7 +926,7 @@ static int _sde_format_populate_addrs_linear( /* Populate addresses for simple formats here */ for (i = 0; i < layout->num_planes; ++i) { - layout->plane_addr[i] = msm_framebuffer_iova(fb, mmu_id, i); + layout->plane_addr[i] = msm_framebuffer_iova(fb, aspace, i); if (!layout->plane_addr[i]) { DRM_ERROR("failed to retrieve base addr\n"); return -EFAULT; @@ -937,7 +937,7 @@ static int _sde_format_populate_addrs_linear( } int sde_format_populate_layout( - int mmu_id, + struct msm_gem_address_space *aspace, struct drm_framebuffer *fb, struct sde_hw_fmt_layout *layout) { @@ -969,9 +969,9 @@ int sde_format_populate_layout( /* Populate the addresses given the fb */ if (SDE_FORMAT_IS_UBWC(layout->format) || SDE_FORMAT_IS_TILE(layout->format)) - ret = _sde_format_populate_addrs_ubwc(mmu_id, fb, layout); + ret = _sde_format_populate_addrs_ubwc(aspace, fb, layout); else - ret = _sde_format_populate_addrs_linear(mmu_id, fb, layout); + ret = _sde_format_populate_addrs_linear(aspace, fb, layout); /* check if anything changed */ if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr))) @@ -1013,14 +1013,14 @@ static void _sde_format_calc_offset_linear(struct sde_hw_fmt_layout *source, } int sde_format_populate_layout_with_roi( - int mmu_id, + struct msm_gem_address_space *aspace, struct drm_framebuffer *fb, struct sde_rect *roi, struct sde_hw_fmt_layout *layout) { int ret; - ret = sde_format_populate_layout(mmu_id, fb, layout); + ret = sde_format_populate_layout(aspace, fb, layout); if (ret || !roi) return ret; diff --git a/drivers/gpu/drm/msm/sde/sde_formats.h b/drivers/gpu/drm/msm/sde/sde_formats.h index 40aab228b8e9..2333a722664f 100644 --- a/drivers/gpu/drm/msm/sde/sde_formats.h +++ b/drivers/gpu/drm/msm/sde/sde_formats.h @@ -14,6 +14,7 @@ #define _SDE_FORMATS_H #include +#include "msm_gem.h" #include "sde_hw_mdss.h" /** @@ -103,7 +104,7 @@ int sde_format_check_modified_format( /** * sde_format_populate_layout - populate the given format layout based on * mmu, fb, and format found in the fb - * @mmu_id: mmu id handle + * @aspace: address space pointer * @fb: framebuffer pointer * @fmtl: format layout structure to populate * @@ -111,14 +112,14 @@ int sde_format_check_modified_format( * are the same as before or 0 if new addresses were populated */ int sde_format_populate_layout( - int mmu_id, + struct msm_gem_address_space *aspace, struct drm_framebuffer *fb, struct sde_hw_fmt_layout *fmtl); /** * sde_format_populate_layout_with_roi - populate the given format layout * based on mmu, fb, roi, and format found in the fb - * @mmu_id: mmu id handle + * @aspace: address space pointer * @fb: framebuffer pointer * @roi: region of interest (optional) * @fmtl: format layout structure to populate @@ -126,7 +127,7 @@ int sde_format_populate_layout( * Return: error code on failure, 0 on success */ int sde_format_populate_layout_with_roi( - int mmu_id, + struct msm_gem_address_space *aspace, struct drm_framebuffer *fb, struct sde_rect *roi, struct sde_hw_fmt_layout *fmtl); diff --git a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c index dbd435b1441e..9bc9837c5781 100644 --- a/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c +++ b/drivers/gpu/drm/msm/sde/sde_hw_reg_dma_v1.c @@ -13,6 +13,7 @@ #include "sde_hw_ctl.h" #include "sde_hw_reg_dma_v1.h" #include "msm_drv.h" +#include "msm_mmu.h" #define GUARD_BYTES (BIT(8) - 1) #define ALIGNED_OFFSET (U32_MAX & ~(GUARD_BYTES)) @@ -582,6 +583,7 @@ static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size) struct sde_reg_dma_buffer *dma_buf = NULL; u32 iova_aligned, offset; u32 rsize = size + GUARD_BYTES; + struct msm_gem_address_space *aspace = NULL; int rc = 0; if (!size || SIZE_DWORD(size) > MAX_DWORDS_SZ) { @@ -602,7 +604,15 @@ static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size) goto fail; } - rc = msm_gem_get_iova(dma_buf->buf, 0, &dma_buf->iova); + aspace = msm_gem_smmu_address_space_get(reg_dma->drm_dev, + MSM_SMMU_DOMAIN_UNSECURE); + if (!aspace) { + DRM_ERROR("failed to get aspace\n"); + rc = -EINVAL; + goto free_gem; + } + + rc = msm_gem_get_iova(dma_buf->buf, aspace, &dma_buf->iova); if (rc) { DRM_ERROR("failed to get the iova rc %d\n", rc); goto free_gem; @@ -625,7 +635,7 @@ static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size) return dma_buf; put_iova: - msm_gem_put_iova(dma_buf->buf, 0); + msm_gem_put_iova(dma_buf->buf, aspace); free_gem: msm_gem_free_object(dma_buf->buf); fail: diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index 78ea685a5dc2..abb378d5387d 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -1414,6 +1414,29 @@ static int sde_kms_atomic_check(struct msm_kms *kms, return drm_atomic_helper_check(dev, state); } +static struct msm_gem_address_space* +_sde_kms_get_address_space(struct msm_kms *kms, + unsigned int domain) +{ + struct sde_kms *sde_kms; + + if (!kms) { + SDE_ERROR("invalid kms\n"); + return NULL; + } + + sde_kms = to_sde_kms(kms); + if (!sde_kms) { + SDE_ERROR("invalid sde_kms\n"); + return NULL; + } + + if (domain >= MSM_SMMU_DOMAIN_MAX) + return NULL; + + return sde_kms->aspace[domain]; +} + static const struct msm_kms_funcs kms_funcs = { .hw_init = sde_kms_hw_init, .postinit = sde_kms_postinit, @@ -1436,6 +1459,7 @@ static const struct msm_kms_funcs kms_funcs = { .round_pixclk = sde_kms_round_pixclk, .destroy = sde_kms_destroy, .register_events = _sde_kms_register_events, + .get_address_space = _sde_kms_get_address_space, }; /* the caller api needs to turn on clock before calling it */ @@ -1449,17 +1473,17 @@ static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms) struct msm_mmu *mmu; int i; - for (i = ARRAY_SIZE(sde_kms->mmu_id) - 1; i >= 0; i--) { - mmu = sde_kms->aspace[i]->mmu; - - if (!mmu) + for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) { + if (!sde_kms->aspace[i]) continue; + mmu = sde_kms->aspace[i]->mmu; + mmu->funcs->detach(mmu, (const char **)iommu_ports, ARRAY_SIZE(iommu_ports)); msm_gem_address_space_destroy(sde_kms->aspace[i]); - sde_kms->mmu_id[i] = 0; + sde_kms->aspace[i] = NULL; } return 0; @@ -1499,17 +1523,6 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms) goto fail; } - sde_kms->mmu_id[i] = msm_register_address_space(sde_kms->dev, - aspace); - if (sde_kms->mmu_id[i] < 0) { - ret = sde_kms->mmu_id[i]; - SDE_ERROR("failed to register sde iommu %d: %d\n", - i, ret); - mmu->funcs->detach(mmu, (const char **)iommu_ports, - ARRAY_SIZE(iommu_ports)); - msm_gem_address_space_destroy(aspace); - goto fail; - } } return 0; diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h index 0c5c286167af..d818fdf6497a 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.h +++ b/drivers/gpu/drm/msm/sde/sde_kms.h @@ -160,7 +160,6 @@ struct sde_kms { struct sde_mdss_cfg *catalog; struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX]; - int mmu_id[MSM_SMMU_DOMAIN_MAX]; struct sde_power_client *core_client; struct ion_client *iclient; diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c index 2a98af45d2f6..fb3523d3fc1a 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.c +++ b/drivers/gpu/drm/msm/sde/sde_plane.c @@ -115,6 +115,7 @@ enum sde_plane_sclcheck_state { /* * struct sde_plane - local sde plane structure + * @aspace: address space pointer * @csc_cfg: Decoded user configuration for csc * @csc_usr_ptr: Points to csc_cfg if valid user config available * @csc_ptr: Points to sde_csc_cfg structure to use for current @@ -129,7 +130,7 @@ enum sde_plane_sclcheck_state { struct sde_plane { struct drm_plane base; - int mmu_id; + struct msm_gem_address_space *aspace; struct mutex lock; @@ -888,7 +889,7 @@ static inline void _sde_plane_set_scanout(struct drm_plane *plane, return; } - ret = sde_format_populate_layout(psde->mmu_id, fb, &pipe_cfg->layout); + ret = sde_format_populate_layout(psde->aspace, fb, &pipe_cfg->layout); if (ret == -EAGAIN) SDE_DEBUG_PLANE(psde, "not updating same src addrs\n"); else if (ret) @@ -1801,7 +1802,7 @@ static int sde_plane_rot_submit_command(struct drm_plane *plane, struct sde_hw_fmt_layout layout; memset(&layout, 0, sizeof(struct sde_hw_fmt_layout)); - sde_format_populate_layout(rstate->mmu_id, state->fb, + sde_format_populate_layout(rstate->aspace, state->fb, &layout); for (i = 0; i < ARRAY_SIZE(rot_cmd->src_iova); i++) { rot_cmd->src_iova[i] = layout.plane_addr[i]; @@ -1810,7 +1811,7 @@ static int sde_plane_rot_submit_command(struct drm_plane *plane, rot_cmd->src_planes = layout.num_planes; memset(&layout, 0, sizeof(struct sde_hw_fmt_layout)); - sde_format_populate_layout(rstate->mmu_id, rstate->out_fb, + sde_format_populate_layout(rstate->aspace, rstate->out_fb, &layout); for (i = 0; i < ARRAY_SIZE(rot_cmd->dst_iova); i++) { rot_cmd->dst_iova[i] = layout.plane_addr[i]; @@ -1950,6 +1951,7 @@ static int sde_plane_rot_prepare_fb(struct drm_plane *plane, struct sde_plane_state *new_pstate = to_sde_plane_state(new_state); struct sde_plane_rot_state *new_rstate = &new_pstate->rot; struct drm_crtc_state *cstate; + struct sde_kms *kms = _sde_plane_get_kms(plane); int ret; SDE_DEBUG("plane%d.%d FB[%u] sbuf:%d rot:%d crtc:%d\n", @@ -1958,6 +1960,9 @@ static int sde_plane_rot_prepare_fb(struct drm_plane *plane, !!new_rstate->out_sbuf, !!new_rstate->rot_hw, sde_plane_crtc_enabled(new_state)); + if (!kms) + return -EINVAL; + if (!new_rstate->out_sbuf || !new_rstate->rot_hw) return 0; @@ -2005,9 +2010,11 @@ static int sde_plane_rot_prepare_fb(struct drm_plane *plane, new_rstate->sequence_id); if (new_state->fb->flags & DRM_MODE_FB_SECURE) - new_rstate->mmu_id = MSM_SMMU_DOMAIN_SECURE; + new_rstate->aspace = + kms->aspace[MSM_SMMU_DOMAIN_SECURE]; else - new_rstate->mmu_id = MSM_SMMU_DOMAIN_UNSECURE; + new_rstate->aspace = + kms->aspace[MSM_SMMU_DOMAIN_UNSECURE]; /* check if out_fb is already attached to rotator */ new_rstate->out_fbo = sde_kms_fbo_alloc(plane->dev, fb_w, fb_h, @@ -2046,7 +2053,7 @@ static int sde_plane_rot_prepare_fb(struct drm_plane *plane, } /* prepare rotator input buffer */ - ret = msm_framebuffer_prepare(new_state->fb, new_rstate->mmu_id); + ret = msm_framebuffer_prepare(new_state->fb, new_rstate->aspace); if (ret) { SDE_ERROR("failed to prepare input framebuffer\n"); goto error_prepare_input_buffer; @@ -2058,7 +2065,7 @@ static int sde_plane_rot_prepare_fb(struct drm_plane *plane, new_rstate->sequence_id); ret = msm_framebuffer_prepare(new_rstate->out_fb, - new_rstate->mmu_id); + new_rstate->aspace); if (ret) { SDE_ERROR("failed to prepare inline framebuffer\n"); goto error_prepare_output_buffer; @@ -2068,7 +2075,7 @@ static int sde_plane_rot_prepare_fb(struct drm_plane *plane, return 0; error_prepare_output_buffer: - msm_framebuffer_cleanup(new_state->fb, new_rstate->mmu_id); + msm_framebuffer_cleanup(new_state->fb, new_rstate->aspace); error_prepare_input_buffer: sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB, (u64) &new_rstate->rot_hw->base); @@ -2124,7 +2131,7 @@ static void sde_plane_rot_cleanup_fb(struct drm_plane *plane, if (sde_plane_enabled(old_state)) { if (old_rstate->out_fb) { msm_framebuffer_cleanup(old_rstate->out_fb, - old_rstate->mmu_id); + old_rstate->aspace); sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB, (u64) &old_rstate->rot_hw->base); old_rstate->out_fb = NULL; @@ -2133,7 +2140,7 @@ static void sde_plane_rot_cleanup_fb(struct drm_plane *plane, old_rstate->out_fbo = NULL; } - msm_framebuffer_cleanup(old_state->fb, old_rstate->mmu_id); + msm_framebuffer_cleanup(old_state->fb, old_rstate->aspace); } } @@ -2163,6 +2170,7 @@ static int sde_plane_rot_atomic_check(struct drm_plane *plane, old_pstate = to_sde_plane_state(plane->state); rstate = &pstate->rot; old_rstate = &old_pstate->rot; + rstate->aspace = psde->aspace; /* cstate will be null if crtc is disconnected from plane */ cstate = _sde_plane_get_crtc_state(state); @@ -2657,14 +2665,14 @@ static int sde_plane_prepare_fb(struct drm_plane *plane, new_rstate = &to_sde_plane_state(new_state)->rot; - ret = msm_framebuffer_prepare(new_rstate->out_fb, new_rstate->mmu_id); + ret = msm_framebuffer_prepare(new_rstate->out_fb, psde->aspace); if (ret) { SDE_ERROR("failed to prepare framebuffer\n"); return ret; } /* validate framebuffer layout before commit */ - ret = sde_format_populate_layout(new_rstate->mmu_id, + ret = sde_format_populate_layout(psde->aspace, new_rstate->out_fb, &layout); if (ret) { SDE_ERROR_PLANE(psde, "failed to get format layout, %d\n", ret); @@ -2687,7 +2695,7 @@ static void sde_plane_cleanup_fb(struct drm_plane *plane, old_rstate = &to_sde_plane_state(old_state)->rot; - msm_framebuffer_cleanup(old_rstate->out_fb, old_rstate->mmu_id); + msm_framebuffer_cleanup(old_rstate->out_fb, old_rstate->aspace); sde_plane_rot_cleanup_fb(plane, old_state); } @@ -4481,7 +4489,7 @@ struct drm_plane *sde_plane_init(struct drm_device *dev, /* cache local stuff for later */ plane = &psde->base; psde->pipe = pipe; - psde->mmu_id = kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE]; + psde->aspace = kms->aspace[MSM_SMMU_DOMAIN_UNSECURE]; psde->is_virtual = (master_plane_id != 0); psde->scaler_check_state = SDE_PLANE_SCLCHECK_NONE; INIT_LIST_HEAD(&psde->mplane_list); diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h index f83a891d1994..46784e756fd4 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.h +++ b/drivers/gpu/drm/msm/sde/sde_plane.h @@ -34,7 +34,7 @@ * @rot90: true if rotation of 90 degree is required * @hflip: true if horizontal flip is required * @vflip: true if vertical flip is required - * @mmu_id: iommu identifier for input/output buffers + * @aspace: pointer address space for input/output buffers * @rot_cmd: rotator configuration command * @nplane: total number of drm plane attached to rotator * @in_fb: input fb attached to rotator @@ -64,7 +64,7 @@ struct sde_plane_rot_state { bool rot90; bool hflip; bool vflip; - u32 mmu_id; + struct msm_gem_address_space *aspace; struct sde_hw_rot_cmd rot_cmd; int nplane; /* input */ -- GitLab From 50d69440b7c2c535e0a90ce2b107f43e120c52e1 Mon Sep 17 00:00:00 2001 From: Abhijit Kulkarni Date: Tue, 11 Apr 2017 19:50:47 -0700 Subject: [PATCH 762/786] drm/msm/sde: add secure use case properties This change adds custom PLANE(fb_translation_mode) and CRTC(security_level) properties for supporting secure use case. Plane property identifies the translation requirement for the framebuffer. CRTC property specifies what planes could be attached to this CRTC. CRs-Fixed: 2050484 Change-Id: Iea59027d4bee536c8554e3955723982a6fc361dd Signed-off-by: Abhijit Kulkarni --- drivers/gpu/drm/msm/msm_drv.h | 2 ++ drivers/gpu/drm/msm/sde/sde_crtc.c | 9 +++++++ drivers/gpu/drm/msm/sde/sde_plane.c | 12 +++++++++ include/uapi/drm/sde_drm.h | 42 +++++++++++++++++++++++++++++ 4 files changed, 65 insertions(+) diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index b22337467247..0d1605dbcdbd 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -119,6 +119,7 @@ enum msm_mdp_plane_property { PLANE_PROP_ROTATION, PLANE_PROP_BLEND_OP, PLANE_PROP_SRC_CONFIG, + PLANE_PROP_FB_TRANSLATION_MODE, /* total # of properties */ PLANE_PROP_COUNT @@ -145,6 +146,7 @@ enum msm_mdp_crtc_property { CRTC_PROP_ROT_PREFILL_BW, CRTC_PROP_ROT_CLK, CRTC_PROP_ROI_V1, + CRTC_PROP_SECURITY_LEVEL, /* total # of properties */ CRTC_PROP_COUNT diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index 923297f96082..ed6178a4a98d 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -2998,6 +2998,10 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc, struct drm_device *dev; struct sde_kms_info *info; struct sde_kms *sde_kms; + static const struct drm_prop_enum_list e_secure_level[] = { + {SDE_DRM_SEC_NON_SEC, "sec_and_non_sec"}, + {SDE_DRM_SEC_ONLY, "sec_only"}, + }; SDE_DEBUG("\n"); @@ -3071,6 +3075,11 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc, msm_property_install_volatile_range(&sde_crtc->property_info, "sde_drm_roi_v1", 0x0, 0, ~0, 0, CRTC_PROP_ROI_V1); + msm_property_install_enum(&sde_crtc->property_info, "security_level", + 0x0, 0, e_secure_level, + ARRAY_SIZE(e_secure_level), + CRTC_PROP_SECURITY_LEVEL); + sde_kms_info_reset(info); if (catalog->has_dim_layer) { diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c index fb3523d3fc1a..1aa4ce6a7705 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.c +++ b/drivers/gpu/drm/msm/sde/sde_plane.c @@ -3512,6 +3512,12 @@ static void _sde_plane_install_properties(struct drm_plane *plane, static const struct drm_prop_enum_list e_src_config[] = { {SDE_DRM_DEINTERLACE, "deinterlace"} }; + static const struct drm_prop_enum_list e_fb_translation_mode[] = { + {SDE_DRM_FB_NON_SEC, "non_sec"}, + {SDE_DRM_FB_SEC, "sec"}, + {SDE_DRM_FB_NON_SEC_DIR_TRANS, "non_sec_direct_translation"}, + {SDE_DRM_FB_SEC_DIR_TRANS, "sec_direct_translation"}, + }; const struct sde_format_extended *format_list; struct sde_format_extended *virt_format_list = NULL; struct sde_kms_info *info; @@ -3730,6 +3736,12 @@ static void _sde_plane_install_properties(struct drm_plane *plane, msm_property_install_blob(&psde->property_info, feature_name, 0, PLANE_PROP_FOLIAGE_COLOR); } + + msm_property_install_enum(&psde->property_info, "fb_translation_mode", + 0x0, + 0, e_fb_translation_mode, + ARRAY_SIZE(e_fb_translation_mode), + PLANE_PROP_FB_TRANSLATION_MODE); } static inline void _sde_plane_set_csc_v1(struct sde_plane *psde, void *usr_ptr) diff --git a/include/uapi/drm/sde_drm.h b/include/uapi/drm/sde_drm.h index 44b42a650c87..439a9256afde 100644 --- a/include/uapi/drm/sde_drm.h +++ b/include/uapi/drm/sde_drm.h @@ -66,6 +66,48 @@ /* DRM bitmasks are restricted to 0..63 */ #define SDE_DRM_BITMASK_COUNT 64 +/** + * Framebuffer modes for "fb_translation_mode" PLANE property + * + * @SDE_DRM_FB_NON_SEC: IOMMU configuration for this framebuffer mode + * is non-secure domain and requires + * both stage I and stage II translations when + * this buffer is accessed by the display HW. + * This is the default mode of all frambuffers. + * @SDE_DRM_FB_SEC: IOMMU configuration for this framebuffer mode + * is secure domain and requires + * both stage I and stage II translations when + * this buffer is accessed by the display HW. + * @SDE_DRM_FB_NON_SEC_DIR_TRANS: IOMMU configuration for this framebuffer mode + * is non-secure domain and requires + * only stage II translation when + * this buffer is accessed by the display HW. + * @SDE_DRM_FB_SEC_DIR_TRANS: IOMMU configuration for this framebuffer mode + * is secure domain and requires + * only stage II translation when + * this buffer is accessed by the display HW. + */ + +#define SDE_DRM_FB_NON_SEC 0 +#define SDE_DRM_FB_SEC 1 +#define SDE_DRM_FB_NON_SEC_DIR_TRANS 2 +#define SDE_DRM_FB_SEC_DIR_TRANS 3 + +/** + * Secure levels for "security_level" CRTC property. + * CRTC property which specifies what plane types + * can be attached to this CRTC. Plane component + * derives the plane type based on the FB_MODE. + * @ SDE_DRM_SEC_NON_SEC: Both Secure and non-secure plane types can be + * attached to this CRTC. This is the default state of + * the CRTC. + * @ SDE_DRM_SEC_ONLY: Only secure planes can be added to this CRTC. If a + * CRTC is instructed to be in this mode it follows the + * platform dependent restrictions. + */ +#define SDE_DRM_SEC_NON_SEC 0 +#define SDE_DRM_SEC_ONLY 1 + /** * struct sde_drm_pix_ext_v1 - version 1 of pixel ext structure * @num_ext_pxls_lr: Number of total horizontal pixels -- GitLab From 1774dac096b267808980c3a75222ff05af8d54ef Mon Sep 17 00:00:00 2001 From: Abhijit Kulkarni Date: Mon, 1 May 2017 10:51:02 -0700 Subject: [PATCH 763/786] ARM: dts: msm: add secure smmu cb node for sdm845 This change adds the secure context bank child node in mdss driver, this is required for having both the secure and non-secure smmu context bank support in the driver. This change also renames the child nodes to avoid compilation issues on the builds, where fb driver mdss device file is also present. As a part of this change the smmu driver registration is called as a part of msm driver probe. CRs-Fixed: 2050484 Change-Id: I90cd31fd792b0fd9772689dc6561bb0b429e1e3a Signed-off-by: Abhijit Kulkarni --- .../devicetree/bindings/display/msm/sde.txt | 36 +++++++++++++------ arch/arm64/boot/dts/qcom/sdm845-sde.dtsi | 6 ++++ drivers/gpu/drm/msm/msm_drv.c | 2 ++ drivers/gpu/drm/msm/msm_mmu.h | 4 +++ drivers/gpu/drm/msm/msm_smmu.c | 14 ++++---- 5 files changed, 44 insertions(+), 18 deletions(-) diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt index 863a1696b8c2..b13a10aa397b 100644 --- a/Documentation/devicetree/bindings/display/msm/sde.txt +++ b/Documentation/devicetree/bindings/display/msm/sde.txt @@ -361,17 +361,23 @@ Bus Scaling Data: * Current values of src & dst are defined at include/linux/msm-bus-board.h +SMMU Subnodes: +- smmu_sde_****: Child nodes representing sde smmu virtual + devices + Subnode properties: -- compatible : Compatible name used in smmu v2. - smmu_v2 names should be: - "qcom,smmu-mdp-unsec" - smmu context bank device for - unsecure mdp domain. - "qcom,smmu-rot-unsec" - smmu context bank device for - unsecure rotation domain. - "qcom,smmu-mdp-sec" - smmu context bank device for - secure mdp domain. - "qcom,smmu-rot-sec" - smmu context bank device for - secure rotation domain. +- compatible: Compatible names used for smmu devices. + names should be: + "qcom,smmu_sde_unsec": smmu context bank device + for unsecure sde real time domain. + "qcom,smmu_sde_sec": smmu context bank device + for secure sde real time domain. + "qcom,smmu_sde_nrt_unsec": smmu context bank device + for unsecure sde non-real time domain. + "qcom,smmu_sde_nrt_sec": smmu context bank device + for secure sde non-real time domain. + + Please refer to ../../interrupt-controller/interrupts.txt for a general description of interrupt bindings. @@ -673,4 +679,14 @@ Example: <1 590 0 160000>, <1 590 0 320000>; }; + + smmu_kms_unsec: qcom,smmu_kms_unsec_cb { + compatible = "qcom,smmu_sde_unsec"; + iommus = <&mmss_smmu 0>; + }; + + smmu_kms_sec: qcom,smmu_kms_sec_cb { + compatible = "qcom,smmu_sde_sec"; + iommus = <&mmss_smmu 1>; + }; }; diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi index e31f8fdd1cd5..89f80d428656 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi @@ -207,6 +207,12 @@ }; }; + smmu_sde_sec: qcom,smmu_sde_sec_cb { + compatible = "qcom,smmu_sde_sec"; + iommus = <&apps_smmu 0x881 0x8>, + <&apps_smmu 0xc81 0x8>; + }; + /* data and reg bus scale settings */ qcom,sde-data-bus { qcom,msm-bus,name = "mdss_sde_mnoc"; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 9aebeb94bc8e..810d0d6eac16 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -2054,6 +2054,7 @@ void __exit adreno_unregister(void) static int __init msm_drm_register(void) { DBG("init"); + msm_smmu_driver_init(); msm_dsi_register(); msm_edp_register(); msm_hdmi_register(); @@ -2069,6 +2070,7 @@ static void __exit msm_drm_unregister(void) adreno_unregister(); msm_edp_unregister(); msm_dsi_unregister(); + msm_smmu_driver_cleanup(); } module_init(msm_drm_register); diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index dc7e5a6521e9..5af26e236019 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -63,4 +63,8 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain); struct msm_mmu *msm_smmu_new(struct device *dev, enum msm_mmu_domain_type domain); +/* SDE smmu driver initialize and cleanup functions */ +int __init msm_smmu_driver_init(void); +void __exit msm_smmu_driver_cleanup(void); + #endif /* __MSM_MMU_H__ */ diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c index e3d2e34663e9..9dbf4be45128 100644 --- a/drivers/gpu/drm/msm/msm_smmu.c +++ b/drivers/gpu/drm/msm/msm_smmu.c @@ -305,13 +305,13 @@ static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = { }; static const struct of_device_id msm_smmu_dt_match[] = { - { .compatible = "qcom,smmu-mdp-unsec", + { .compatible = "qcom,smmu_sde_unsec", .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_UNSECURE] }, - { .compatible = "qcom,smmu-mdp-sec", + { .compatible = "qcom,smmu_sde_sec", .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_SECURE] }, - { .compatible = "qcom,smmu-rot-unsec", + { .compatible = "qcom,smmu_sde_nrt_unsec", .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_UNSECURE] }, - { .compatible = "qcom,smmu-rot-sec", + { .compatible = "qcom,smmu_sde_nrt_sec", .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_SECURE] }, {} }; @@ -535,7 +535,7 @@ static struct platform_driver msm_smmu_driver = { }, }; -static int __init msm_smmu_driver_init(void) +int __init msm_smmu_driver_init(void) { int ret; @@ -545,13 +545,11 @@ static int __init msm_smmu_driver_init(void) return ret; } -module_init(msm_smmu_driver_init); -static void __exit msm_smmu_driver_cleanup(void) +void __exit msm_smmu_driver_cleanup(void) { platform_driver_unregister(&msm_smmu_driver); } -module_exit(msm_smmu_driver_cleanup); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MSM SMMU driver"); -- GitLab From 333d6c081d1f09b55b5ad8fd004801a19a990ded Mon Sep 17 00:00:00 2001 From: Abhijit Kulkarni Date: Mon, 1 May 2017 18:32:18 -0700 Subject: [PATCH 764/786] drm/msm/sde: add support to select secure context bank Add support in the sde plane component to select the secure context bank based on the fb_mode plane property. This changes also sets the correct hw settings for the secure plane src address. CRs-Fixed: 2050484 Change-Id: Iacdfbb366b3ff56fcd5036fb9157547542095cde Signed-off-by: Abhijit Kulkarni --- drivers/gpu/drm/msm/sde/sde_plane.c | 114 ++++++++++++++++++++++------ drivers/gpu/drm/msm/sde/sde_plane.h | 3 + 2 files changed, 95 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c index 1aa4ce6a7705..581b26e97eb4 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.c +++ b/drivers/gpu/drm/msm/sde/sde_plane.c @@ -130,8 +130,6 @@ enum sde_plane_sclcheck_state { struct sde_plane { struct drm_plane base; - struct msm_gem_address_space *aspace; - struct mutex lock; enum sde_sspp pipe; @@ -868,12 +866,62 @@ int sde_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms) return ret; } +/** + * _sde_plane_get_aspace: gets the address space based on the + * fb_translation mode property + */ +static int _sde_plane_get_aspace( + struct sde_plane *psde, + struct sde_plane_state *pstate, + struct msm_gem_address_space **aspace) +{ + struct sde_kms *kms; + int mode; + + if (!psde || !pstate || !aspace) { + SDE_ERROR("invalid parameters\n"); + return -EINVAL; + } + + kms = _sde_plane_get_kms(&psde->base); + if (!kms) { + SDE_ERROR("invalid kms\n"); + return -EINVAL; + } + + mode = sde_plane_get_property(pstate, + PLANE_PROP_FB_TRANSLATION_MODE); + + switch (mode) { + case SDE_DRM_FB_NON_SEC: + *aspace = kms->aspace[MSM_SMMU_DOMAIN_UNSECURE]; + if (!aspace) + return -EINVAL; + break; + case SDE_DRM_FB_SEC: + *aspace = kms->aspace[MSM_SMMU_DOMAIN_SECURE]; + if (!aspace) + return -EINVAL; + break; + case SDE_DRM_FB_SEC_DIR_TRANS: + case SDE_DRM_FB_NON_SEC_DIR_TRANS: + *aspace = NULL; + break; + default: + SDE_ERROR("invalid fb_translation mode:%d\n", mode); + return -EFAULT; + } + + return 0; +} + static inline void _sde_plane_set_scanout(struct drm_plane *plane, struct sde_plane_state *pstate, struct sde_hw_pipe_cfg *pipe_cfg, struct drm_framebuffer *fb) { struct sde_plane *psde; + struct msm_gem_address_space *aspace = NULL; int ret; if (!plane || !pstate || !pipe_cfg || !fb) { @@ -889,7 +937,13 @@ static inline void _sde_plane_set_scanout(struct drm_plane *plane, return; } - ret = sde_format_populate_layout(psde->aspace, fb, &pipe_cfg->layout); + ret = _sde_plane_get_aspace(psde, pstate, &aspace); + if (ret) { + SDE_ERROR_PLANE(psde, "Failed to get aspace %d\n", ret); + return; + } + + ret = sde_format_populate_layout(aspace, fb, &pipe_cfg->layout); if (ret == -EAGAIN) SDE_DEBUG_PLANE(psde, "not updating same src addrs\n"); else if (ret) @@ -1742,6 +1796,7 @@ static int sde_plane_rot_submit_command(struct drm_plane *plane, struct drm_crtc_state *cstate; struct sde_crtc_state *sde_cstate; int ret, i; + int fb_mode; if (!plane || !state || !state->fb || !rstate->rot_hw) { SDE_ERROR("invalid parameters\n"); @@ -1765,7 +1820,14 @@ static int sde_plane_rot_submit_command(struct drm_plane *plane, rot_cmd->rot90 = rstate->rot90; rot_cmd->hflip = rstate->hflip; rot_cmd->vflip = rstate->vflip; - rot_cmd->secure = state->fb->flags & DRM_MODE_FB_SECURE ? true : false; + fb_mode = sde_plane_get_property(pstate, + PLANE_PROP_FB_TRANSLATION_MODE); + if ((fb_mode == SDE_DRM_FB_SEC) || + (fb_mode == SDE_DRM_FB_SEC_DIR_TRANS)) + rot_cmd->secure = true; + else + rot_cmd->secure = false; + rot_cmd->prefill_bw = sde_crtc_get_property(sde_cstate, CRTC_PROP_ROT_PREFILL_BW); rot_cmd->clkrate = sde_crtc_get_property(sde_cstate, @@ -1951,7 +2013,6 @@ static int sde_plane_rot_prepare_fb(struct drm_plane *plane, struct sde_plane_state *new_pstate = to_sde_plane_state(new_state); struct sde_plane_rot_state *new_rstate = &new_pstate->rot; struct drm_crtc_state *cstate; - struct sde_kms *kms = _sde_plane_get_kms(plane); int ret; SDE_DEBUG("plane%d.%d FB[%u] sbuf:%d rot:%d crtc:%d\n", @@ -1960,9 +2021,6 @@ static int sde_plane_rot_prepare_fb(struct drm_plane *plane, !!new_rstate->out_sbuf, !!new_rstate->rot_hw, sde_plane_crtc_enabled(new_state)); - if (!kms) - return -EINVAL; - if (!new_rstate->out_sbuf || !new_rstate->rot_hw) return 0; @@ -2008,13 +2066,7 @@ static int sde_plane_rot_prepare_fb(struct drm_plane *plane, SDE_DEBUG("plane%d.%d allocate fb/fbo\n", plane->base.id, new_rstate->sequence_id); - - if (new_state->fb->flags & DRM_MODE_FB_SECURE) - new_rstate->aspace = - kms->aspace[MSM_SMMU_DOMAIN_SECURE]; - else - new_rstate->aspace = - kms->aspace[MSM_SMMU_DOMAIN_UNSECURE]; + new_rstate->aspace = new_pstate->aspace; /* check if out_fb is already attached to rotator */ new_rstate->out_fbo = sde_kms_fbo_alloc(plane->dev, fb_w, fb_h, @@ -2170,7 +2222,6 @@ static int sde_plane_rot_atomic_check(struct drm_plane *plane, old_pstate = to_sde_plane_state(plane->state); rstate = &pstate->rot; old_rstate = &old_pstate->rot; - rstate->aspace = psde->aspace; /* cstate will be null if crtc is disconnected from plane */ cstate = _sde_plane_get_crtc_state(state); @@ -2648,8 +2699,10 @@ static int sde_plane_prepare_fb(struct drm_plane *plane, { struct drm_framebuffer *fb = new_state->fb; struct sde_plane *psde = to_sde_plane(plane); + struct sde_plane_state *pstate = to_sde_plane_state(new_state); struct sde_plane_rot_state *new_rstate; struct sde_hw_fmt_layout layout; + struct msm_gem_address_space *aspace; int ret; if (!new_state->fb) @@ -2657,6 +2710,14 @@ static int sde_plane_prepare_fb(struct drm_plane *plane, SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id); + ret = _sde_plane_get_aspace(psde, pstate, &aspace); + if (ret) { + SDE_ERROR_PLANE(psde, "Failed to get aspace\n"); + return ret; + } + + /*cache aspace */ + pstate->aspace = aspace; ret = sde_plane_rot_prepare_fb(plane, new_state); if (ret) { SDE_ERROR("failed to prepare rot framebuffer\n"); @@ -2665,14 +2726,14 @@ static int sde_plane_prepare_fb(struct drm_plane *plane, new_rstate = &to_sde_plane_state(new_state)->rot; - ret = msm_framebuffer_prepare(new_rstate->out_fb, psde->aspace); + ret = msm_framebuffer_prepare(new_rstate->out_fb, pstate->aspace); if (ret) { SDE_ERROR("failed to prepare framebuffer\n"); return ret; } /* validate framebuffer layout before commit */ - ret = sde_format_populate_layout(psde->aspace, + ret = sde_format_populate_layout(pstate->aspace, new_rstate->out_fb, &layout); if (ret) { SDE_ERROR_PLANE(psde, "failed to get format layout, %d\n", ret); @@ -3142,7 +3203,7 @@ void sde_plane_flush(struct drm_plane *plane) static int sde_plane_sspp_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { - uint32_t nplanes, src_flags; + uint32_t nplanes, src_flags = 0x0; struct sde_plane *psde; struct drm_plane_state *state; struct sde_plane_state *pstate; @@ -3155,6 +3216,7 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane, const struct sde_rect *crtc_roi; bool q16_data = true; int idx; + int mode; if (!plane) { SDE_ERROR("invalid plane\n"); @@ -3236,6 +3298,9 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane, case PLANE_PROP_BLEND_OP: /* no special action required */ break; + case PLANE_PROP_FB_TRANSLATION_MODE: + pstate->dirty |= SDE_PLANE_DIRTY_FB_TRANSLATION_MODE; + break; case PLANE_PROP_PREFILL_SIZE: case PLANE_PROP_PREFILL_TIME: pstate->dirty |= SDE_PLANE_DIRTY_PERF; @@ -3283,6 +3348,12 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane, psde->is_rt_pipe = (sde_crtc_get_client_type(crtc) != NRT_CLIENT); _sde_plane_set_qos_ctrl(plane, false, SDE_PLANE_QOS_PANIC_CTRL); + /* update secure session flag */ + mode = sde_plane_get_property(pstate, PLANE_PROP_FB_TRANSLATION_MODE); + if ((mode == SDE_DRM_FB_SEC) || + (mode == SDE_DRM_FB_SEC_DIR_TRANS)) + src_flags |= SDE_SSPP_SECURE_OVERLAY_SESSION; + /* update roi config */ if (pstate->dirty & SDE_PLANE_DIRTY_RECTS) { POPULATE_RECT(&src, rstate->out_src_x, rstate->out_src_y, @@ -3360,9 +3431,9 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane, pstate->multirect_mode); } - if ((pstate->dirty & SDE_PLANE_DIRTY_FORMAT) && + if (((pstate->dirty & SDE_PLANE_DIRTY_FORMAT) || + (src_flags & SDE_SSPP_SECURE_OVERLAY_SESSION)) && psde->pipe_hw->ops.setup_format) { - src_flags = 0x0; SDE_DEBUG_PLANE(psde, "rotation 0x%X\n", rstate->out_rotation); if (rstate->out_rotation & DRM_REFLECT_X) src_flags |= SDE_SSPP_FLIP_LR; @@ -4501,7 +4572,6 @@ struct drm_plane *sde_plane_init(struct drm_device *dev, /* cache local stuff for later */ plane = &psde->base; psde->pipe = pipe; - psde->aspace = kms->aspace[MSM_SMMU_DOMAIN_UNSECURE]; psde->is_virtual = (master_plane_id != 0); psde->scaler_check_state = SDE_PLANE_SCLCHECK_NONE; INIT_LIST_HEAD(&psde->mplane_list); diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h index 46784e756fd4..ccbf0059458e 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.h +++ b/drivers/gpu/drm/msm/sde/sde_plane.h @@ -96,6 +96,7 @@ struct sde_plane_rot_state { #define SDE_PLANE_DIRTY_FORMAT 0x2 #define SDE_PLANE_DIRTY_SHARPEN 0x4 #define SDE_PLANE_DIRTY_PERF 0x8 +#define SDE_PLANE_DIRTY_FB_TRANSLATION_MODE 0x10 #define SDE_PLANE_DIRTY_ALL 0xFFFFFFFF /** @@ -103,6 +104,7 @@ struct sde_plane_rot_state { * @base: base drm plane state object * @property_values: cached plane property values * @property_blobs: blob properties + * @aspace: pointer to address space for input/output buffers * @input_fence: dereferenced input fence pointer * @stage: assigned by crtc blender * @excl_rect: exclusion rect values @@ -116,6 +118,7 @@ struct sde_plane_state { struct drm_plane_state base; uint64_t property_values[PLANE_PROP_COUNT]; struct drm_property_blob *property_blobs[PLANE_PROP_BLOBCOUNT]; + struct msm_gem_address_space *aspace; void *input_fence; enum sde_stage stage; struct sde_rect excl_rect; -- GitLab From a8f2a99a43eeda253e81faafb7e28779cf0a9386 Mon Sep 17 00:00:00 2001 From: Abhijit Kulkarni Date: Tue, 30 May 2017 05:04:51 -0700 Subject: [PATCH 765/786] drm/msm: start secure domain va from non-zero address Address wth all zeroes is considered invalid address, hence changing the secure domain mapping space to start from a non-zero address CRs-Fixed: 2050484 Change-Id: I27300845fb3839372bc38be1d322a7ad0cd0472d Signed-off-by: Abhijit Kulkarni --- drivers/gpu/drm/msm/msm_smmu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c index 9dbf4be45128..50b81bb6024a 100644 --- a/drivers/gpu/drm/msm/msm_smmu.c +++ b/drivers/gpu/drm/msm/msm_smmu.c @@ -286,8 +286,8 @@ static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = { }, [MSM_SMMU_DOMAIN_SECURE] = { .label = "mdp_s", - .va_start = 0, - .va_size = SZ_4G, + .va_start = SZ_128K, + .va_size = SZ_4G - SZ_128K, .secure = true, }, [MSM_SMMU_DOMAIN_NRT_UNSECURE] = { @@ -298,8 +298,8 @@ static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = { }, [MSM_SMMU_DOMAIN_NRT_SECURE] = { .label = "rot_s", - .va_start = 0, - .va_size = SZ_4G, + .va_start = SZ_128K, + .va_size = SZ_4G - SZ_128K, .secure = true, }, }; -- GitLab From c063f0f6e4c09f5601212a6ea6f9f539e8cd59b7 Mon Sep 17 00:00:00 2001 From: David Dai Date: Wed, 5 Jul 2017 11:21:21 -0700 Subject: [PATCH 766/786] ARM: dts: msm: Reduce IPA msm bus votes for sdm845 Values voted for are multiplied by 1000 in the bus driver, causing higher than expected IPA votes, divide to the registered values to account for the unit. Change-Id: Ia884f4fec94723e752f142f1cb5bb9483020c86f Signed-off-by: David Dai --- arch/arm64/boot/dts/qcom/sdm845.dtsi | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index 18202e796861..d01d0fab2df1 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -2761,17 +2761,17 @@ <90 512 80000 640000>, <90 585 80000 640000>, <1 676 80000 80000>, - <143 777 0 150000000>, + <143 777 0 150>, /* IB defined for IPA clk in MHz*/ /* NOMINAL */ <90 512 206000 960000>, <90 585 206000 960000>, <1 676 206000 160000>, - <143 777 0 300000000>, + <143 777 0 300>, /* IB defined for IPA clk in MHz*/ /* TURBO */ <90 512 206000 3600000>, <90 585 206000 3600000>, <1 676 206000 300000>, - <143 777 0 355333333>; + <143 777 0 355>; /* IB defined for IPA clk in MHz*/ qcom,bus-vector-names = "MIN", "SVS", "NOMINAL", "TURBO"; /* IPA RAM mmap */ -- GitLab From 9497220d8e508375a8df79149d8f37d3a0c8ced9 Mon Sep 17 00:00:00 2001 From: Lina Iyer Date: Wed, 28 Jun 2017 15:37:43 -0600 Subject: [PATCH 767/786] drivers: mailbox: fix excessive logging When the TCS controller finds the TCSes are busy, it returns back to the mailbox framework to retry again. If this keeps repeating and with increased verbosity, we may see watchdog bites because of excessive logging. Rate limit the print. Change-Id: I095d99f1a849453f0b944c1517fbf2df2f669805 Signed-off-by: Lina Iyer --- drivers/mailbox/qti-tcs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mailbox/qti-tcs.c b/drivers/mailbox/qti-tcs.c index bde20b4e844d..be91a65089f2 100644 --- a/drivers/mailbox/qti-tcs.c +++ b/drivers/mailbox/qti-tcs.c @@ -909,7 +909,7 @@ static int chan_tcs_write(struct mbox_chan *chan, void *data) /* If we were just busy waiting for TCS, dump the state and return */ if (ret == -EBUSY) { - pr_info("TCS Busy, retrying RPMH message send\n"); + pr_info_ratelimited("TCS Busy, retrying RPMH message send\n"); dump_tcs_stats(drv); ret = -EAGAIN; } -- GitLab From 79f56a68fab3d8d9268d4de1050793a6d88b9380 Mon Sep 17 00:00:00 2001 From: Chinmay Sawarkar Date: Fri, 30 Jun 2017 12:46:39 -0700 Subject: [PATCH 768/786] msm: vidc: Fix hfi macro value of blur frame size HFI macro value for the property "blur frame size" has changed, and needs to be reflected in the driver. CRs-Fixed: 2072419 Change-Id: I384b2f343df5316f1b294ee7928329f5f2759363 Signed-off-by: Chinmay Sawarkar --- drivers/media/platform/msm/vidc/vidc_hfi_helper.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h index 8169a9b5b7e0..d5624ce7b741 100644 --- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h +++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h @@ -339,6 +339,8 @@ struct hfi_buffer_info { (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00E) #define HFI_PROPERTY_CONFIG_VENC_BASELAYER_PRIORITYID \ (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x00F) +#define HFI_PROPERTY_CONFIG_VENC_BLUR_FRAME_SIZE \ + (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x010) #define HFI_PROPERTY_CONFIG_VENC_FRAME_QP \ (HFI_PROPERTY_CONFIG_VENC_COMMON_START + 0x012) @@ -349,8 +351,6 @@ struct hfi_buffer_info { #define HFI_PROPERTY_CONFIG_VPE_COMMON_START \ (HFI_DOMAIN_BASE_VPE + HFI_ARCH_COMMON_OFFSET + 0x8000) -#define HFI_PROPERTY_CONFIG_VENC_BLUR_FRAME_SIZE \ - (HFI_PROPERTY_CONFIG_COMMON_START + 0x010) struct hfi_pic_struct { u32 progressive_only; -- GitLab From c3174f568f70bfff20c7b8130e382e0387d09c23 Mon Sep 17 00:00:00 2001 From: Shihuan Liu Date: Thu, 4 May 2017 15:59:13 -0700 Subject: [PATCH 769/786] msm: ipa: add L2TP/VLAN messaging Add L2TP/VLAN messaging support in IPA driver Change-Id: Ifce9adb1eb1946a2000f54933f1990747fe7daef Acked-by: Shihuan Liu Signed-off-by: Skylar Chang --- drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c | 4 + drivers/platform/msm/ipa/ipa_v3/ipa.c | 112 ++++++++++++++++++ drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c | 4 + include/uapi/linux/msm_ipa.h | 58 ++++++++- 4 files changed, 175 insertions(+), 3 deletions(-) diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c index e8710a65e813..2b517a183008 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c @@ -83,6 +83,10 @@ const char *ipa_event_name[] = { __stringify(IPA_QUOTA_REACH), __stringify(IPA_SSR_BEFORE_SHUTDOWN), __stringify(IPA_SSR_AFTER_POWERUP), + __stringify(ADD_VLAN_IFACE), + __stringify(DEL_VLAN_IFACE), + __stringify(ADD_L2TP_VLAN_MAPPING), + __stringify(DEL_L2TP_VLAN_MAPPING) }; const char *ipa_hdr_l2_type_name[] = { diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index 1c3995d46056..1e2b2001ce1c 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -546,6 +546,90 @@ static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type) return 0; } +static void ipa3_vlan_l2tp_msg_free_cb(void *buff, u32 len, u32 type) +{ + if (!buff) { + IPAERR("Null buffer\n"); + return; + } + + if (type != ADD_VLAN_IFACE && + type != DEL_VLAN_IFACE && + type != ADD_L2TP_VLAN_MAPPING && + type != DEL_L2TP_VLAN_MAPPING) { + IPAERR("Wrong type given. buff %pK type %d\n", buff, type); + return; + } + + kfree(buff); +} + +static int ipa3_send_vlan_l2tp_msg(unsigned long usr_param, uint8_t msg_type) +{ + int retval; + struct ipa_ioc_vlan_iface_info *vlan_info; + struct ipa_ioc_l2tp_vlan_mapping_info *mapping_info; + struct ipa_msg_meta msg_meta; + + if (msg_type == ADD_VLAN_IFACE || + msg_type == DEL_VLAN_IFACE) { + vlan_info = kzalloc(sizeof(struct ipa_ioc_vlan_iface_info), + GFP_KERNEL); + if (!vlan_info) { + IPAERR("no memory\n"); + return -ENOMEM; + } + + if (copy_from_user((u8 *)vlan_info, (void __user *)usr_param, + sizeof(struct ipa_ioc_vlan_iface_info))) { + kfree(vlan_info); + return -EFAULT; + } + + memset(&msg_meta, 0, sizeof(msg_meta)); + msg_meta.msg_type = msg_type; + msg_meta.msg_len = sizeof(struct ipa_ioc_vlan_iface_info); + retval = ipa3_send_msg(&msg_meta, vlan_info, + ipa3_vlan_l2tp_msg_free_cb); + if (retval) { + IPAERR("ipa3_send_msg failed: %d\n", retval); + kfree(vlan_info); + return retval; + } + } else if (msg_type == ADD_L2TP_VLAN_MAPPING || + msg_type == DEL_L2TP_VLAN_MAPPING) { + mapping_info = kzalloc(sizeof(struct + ipa_ioc_l2tp_vlan_mapping_info), GFP_KERNEL); + if (!mapping_info) { + IPAERR("no memory\n"); + return -ENOMEM; + } + + if (copy_from_user((u8 *)mapping_info, + (void __user *)usr_param, + sizeof(struct ipa_ioc_l2tp_vlan_mapping_info))) { + kfree(mapping_info); + return -EFAULT; + } + + memset(&msg_meta, 0, sizeof(msg_meta)); + msg_meta.msg_type = msg_type; + msg_meta.msg_len = sizeof(struct + ipa_ioc_l2tp_vlan_mapping_info); + retval = ipa3_send_msg(&msg_meta, mapping_info, + ipa3_vlan_l2tp_msg_free_cb); + if (retval) { + IPAERR("ipa3_send_msg failed: %d\n", retval); + kfree(mapping_info); + return retval; + } + } else { + IPAERR("Unexpected event\n"); + return -EFAULT; + } + + return 0; +} static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { @@ -1530,6 +1614,34 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } break; + case IPA_IOC_ADD_VLAN_IFACE: + if (ipa3_send_vlan_l2tp_msg(arg, ADD_VLAN_IFACE)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_VLAN_IFACE: + if (ipa3_send_vlan_l2tp_msg(arg, DEL_VLAN_IFACE)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_ADD_L2TP_VLAN_MAPPING: + if (ipa3_send_vlan_l2tp_msg(arg, ADD_L2TP_VLAN_MAPPING)) { + retval = -EFAULT; + break; + } + break; + + case IPA_IOC_DEL_L2TP_VLAN_MAPPING: + if (ipa3_send_vlan_l2tp_msg(arg, DEL_L2TP_VLAN_MAPPING)) { + retval = -EFAULT; + break; + } + break; + default: /* redundant, as cmd was checked against MAXNR */ IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); return -ENOTTY; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c index 1215d4ef1446..6a3363ae57ad 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c @@ -64,6 +64,10 @@ const char *ipa3_event_name[] = { __stringify(IPA_QUOTA_REACH), __stringify(IPA_SSR_BEFORE_SHUTDOWN), __stringify(IPA_SSR_AFTER_POWERUP), + __stringify(ADD_VLAN_IFACE), + __stringify(DEL_VLAN_IFACE), + __stringify(ADD_L2TP_VLAN_MAPPING), + __stringify(DEL_L2TP_VLAN_MAPPING) }; const char *ipa3_hdr_l2_type_name[] = { diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h index 21936c51a94b..4550a7566546 100644 --- a/include/uapi/linux/msm_ipa.h +++ b/include/uapi/linux/msm_ipa.h @@ -69,8 +69,12 @@ #define IPA_IOCTL_ADD_FLT_RULE_AFTER 44 #define IPA_IOCTL_GET_HW_VERSION 45 #define IPA_IOCTL_ADD_RT_RULE_EXT 46 -#define IPA_IOCTL_NAT_MODIFY_PDN 47 -#define IPA_IOCTL_MAX 48 +#define IPA_IOCTL_ADD_VLAN_IFACE 47 +#define IPA_IOCTL_DEL_VLAN_IFACE 48 +#define IPA_IOCTL_ADD_L2TP_VLAN_MAPPING 49 +#define IPA_IOCTL_DEL_L2TP_VLAN_MAPPING 50 +#define IPA_IOCTL_NAT_MODIFY_PDN 51 +#define IPA_IOCTL_MAX 52 /** * max size of the header to be inserted @@ -452,7 +456,16 @@ enum ipa_ssr_event { IPA_SSR_EVENT_MAX }; -#define IPA_EVENT_MAX_NUM ((int)IPA_SSR_EVENT_MAX) +enum ipa_vlan_l2tp_event { + ADD_VLAN_IFACE = IPA_SSR_EVENT_MAX, + DEL_VLAN_IFACE, + ADD_L2TP_VLAN_MAPPING, + DEL_L2TP_VLAN_MAPPING, + IPA_VLAN_L2TP_EVENT_MAX, +}; + +#define IPA_EVENT_MAX_NUM (IPA_VLAN_L2TP_EVENT_MAX) +#define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM) /** * enum ipa_rm_resource_name - IPA RM clients identification names @@ -1445,6 +1458,30 @@ struct ipa_ioc_nat_pdn_entry { uint32_t dst_metadata; }; +/** + * struct ipa_ioc_vlan_iface_info - add vlan interface + * @name: interface name + * @vlan_id: VLAN ID + */ +struct ipa_ioc_vlan_iface_info { + char name[IPA_RESOURCE_NAME_MAX]; + uint8_t vlan_id; +}; + +/** + * struct ipa_ioc_l2tp_vlan_mapping_info - l2tp->vlan mapping info + * @iptype: l2tp tunnel IP type + * @l2tp_iface_name: l2tp interface name + * @l2tp_session_id: l2tp session id + * @vlan_iface_name: vlan interface name + */ +struct ipa_ioc_l2tp_vlan_mapping_info { + enum ipa_ip_type iptype; + char l2tp_iface_name[IPA_RESOURCE_NAME_MAX]; + uint8_t l2tp_session_id; + char vlan_iface_name[IPA_RESOURCE_NAME_MAX]; +}; + /** * struct ipa_msg_meta - Format of the message meta-data. * @msg_type: the type of the message @@ -1722,6 +1759,21 @@ enum ipacm_client_enum { IPA_IOCTL_GET_HW_VERSION, \ enum ipa_hw_type *) +#define IPA_IOC_ADD_VLAN_IFACE _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_VLAN_IFACE, \ + struct ipa_ioc_vlan_iface_info *) + +#define IPA_IOC_DEL_VLAN_IFACE _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_VLAN_IFACE, \ + struct ipa_ioc_vlan_iface_info *) + +#define IPA_IOC_ADD_L2TP_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_ADD_L2TP_VLAN_MAPPING, \ + struct ipa_ioc_l2tp_vlan_mapping_info *) + +#define IPA_IOC_DEL_L2TP_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \ + IPA_IOCTL_DEL_L2TP_VLAN_MAPPING, \ + struct ipa_ioc_l2tp_vlan_mapping_info *) /* * unique magic number of the Tethering bridge ioctls */ -- GitLab From d197bf614bd31141555b0b08550711b704332b8f Mon Sep 17 00:00:00 2001 From: Shrenuj Bansal Date: Fri, 7 Apr 2017 11:00:09 -0700 Subject: [PATCH 770/786] msm: kgsl: Add exceptions to snapshot based on GX and SPTP status It is possible that during a GPU/GMU hang either the entire GX is powered off or just the SPTPRAC headswitch is turned off by the GMU firmware. Therefore, while doing snapshot check if we have the blocks powered on before trying to dump them. In addition, mask all GMU interrupts prior to dumping snapshot in order to reduce instability. CRs-Fixed: 2062271 Change-Id: I7bf7d27bc6ebdc642e5675a4c9645957051273d5 Signed-off-by: Shrenuj Bansal Signed-off-by: George Shen --- drivers/gpu/msm/a6xx_reg.h | 1 + drivers/gpu/msm/adreno.h | 2 + drivers/gpu/msm/adreno_a6xx.c | 52 ++++++++++++++++++++++++++ drivers/gpu/msm/adreno_a6xx_snapshot.c | 31 ++++++++++----- drivers/gpu/msm/adreno_dispatch.c | 29 +++++++++++--- drivers/gpu/msm/adreno_snapshot.c | 13 +++++-- 6 files changed, 109 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h index f4552b61a4dc..32ebe0c0b709 100644 --- a/drivers/gpu/msm/a6xx_reg.h +++ b/drivers/gpu/msm/a6xx_reg.h @@ -875,6 +875,7 @@ #define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS 0x23B0C #define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2 0x23B0D #define A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK 0x23B0E +#define A6XX_GMU_AO_AHB_FENCE_CTRL 0x23B10 #define A6XX_GMU_AHB_FENCE_STATUS 0x23B13 #define A6XX_GMU_RBBM_INT_UNMASKED_STATUS 0x23B15 #define A6XX_GMU_AO_SPARE_CNTL 0x23B16 diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index da8951bf266c..4900b3a9b805 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -876,6 +876,8 @@ struct adreno_gpudev { unsigned int fsynr1); int (*reset)(struct kgsl_device *, int fault); int (*soft_reset)(struct adreno_device *); + bool (*gx_is_on)(struct adreno_device *); + bool (*sptprac_is_on)(struct adreno_device *); }; /** diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c index 30ada8f57008..a513cf8bf2c9 100644 --- a/drivers/gpu/msm/adreno_a6xx.c +++ b/drivers/gpu/msm/adreno_a6xx.c @@ -1188,6 +1188,56 @@ static int a6xx_hm_disable(struct adreno_device *adreno_dev) return regulator_disable(gmu->gx_gdsc); } +#define SPTPRAC_POWER_OFF BIT(2) +#define SP_CLK_OFF BIT(4) +#define GX_GDSC_POWER_OFF BIT(6) +#define GX_CLK_OFF BIT(7) + +/* + * a6xx_gx_is_on() - Check if GX is on using pwr status register + * @adreno_dev - Pointer to adreno_device + * This check should only be performed if the keepalive bit is set or it + * can be guaranteed that the power state of the GPU will remain unchanged + */ +static bool a6xx_gx_is_on(struct adreno_device *adreno_dev) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + unsigned int val; + bool state; + + if (!kgsl_gmu_isenabled(device)) + return true; + + kgsl_gmu_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &val); + state = !(val & (GX_GDSC_POWER_OFF | GX_CLK_OFF)); + + /* If GMU is holding on to the fence then we cannot dump any GX stuff */ + kgsl_gmu_regread(device, A6XX_GMU_AO_AHB_FENCE_CTRL, &val); + if (val) + return false; + + return state; + +} + +/* + * a6xx_sptprac_is_on() - Check if SPTP is on using pwr status register + * @adreno_dev - Pointer to adreno_device + * This check should only be performed if the keepalive bit is set or it + * can be guaranteed that the power state of the GPU will remain unchanged + */ +static bool a6xx_sptprac_is_on(struct adreno_device *adreno_dev) +{ + struct kgsl_device *device = KGSL_DEVICE(adreno_dev); + unsigned int val; + + if (!kgsl_gmu_isenabled(device)) + return true; + + kgsl_gmu_regread(device, A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, &val); + return !(val & (SPTPRAC_POWER_OFF | SP_CLK_OFF)); +} + /* * a6xx_hm_sptprac_enable() - Turn on HM and SPTPRAC * @device: Pointer to KGSL device @@ -2778,4 +2828,6 @@ struct adreno_gpudev adreno_a6xx_gpudev = { .preemption_set_marker = a6xx_preemption_set_marker, .preemption_context_init = a6xx_preemption_context_init, .preemption_context_destroy = a6xx_preemption_context_destroy, + .gx_is_on = a6xx_gx_is_on, + .sptprac_is_on = a6xx_sptprac_is_on, }; diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c index 216108354828..ed0129f57053 100644 --- a/drivers/gpu/msm/adreno_a6xx_snapshot.c +++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c @@ -1408,6 +1408,18 @@ void a6xx_snapshot(struct adreno_device *adreno_dev, struct kgsl_device *device = KGSL_DEVICE(adreno_dev); struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev); struct adreno_snapshot_data *snap_data = gpudev->snapshot_data; + bool sptprac_on; + + /* GMU TCM data dumped through AHB */ + a6xx_snapshot_gmu(device, snapshot); + + sptprac_on = gpudev->sptprac_is_on(adreno_dev); + + /* Return if the GX is off */ + if (!gpudev->gx_is_on(adreno_dev)) { + pr_err("GX is off. Only dumping GMU data in snapshot\n"); + return; + } /* Dump the registers which get affected by crash dumper trigger */ kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS, @@ -1419,7 +1431,8 @@ void a6xx_snapshot(struct adreno_device *adreno_dev, ARRAY_SIZE(a6xx_vbif_snapshot_registers)); /* Try to run the crash dumper */ - _a6xx_do_crashdump(device); + if (sptprac_on) + _a6xx_do_crashdump(device); kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS, snapshot, a6xx_snapshot_registers, NULL); @@ -1451,19 +1464,19 @@ void a6xx_snapshot(struct adreno_device *adreno_dev, /* Mempool debug data */ a6xx_snapshot_mempool(device, snapshot); - /* Shader memory */ - a6xx_snapshot_shader(device, snapshot); + if (sptprac_on) { + /* Shader memory */ + a6xx_snapshot_shader(device, snapshot); - /* MVC register section */ - a6xx_snapshot_mvc_regs(device, snapshot); + /* MVC register section */ + a6xx_snapshot_mvc_regs(device, snapshot); - /* registers dumped through DBG AHB */ - a6xx_snapshot_dbgahb_regs(device, snapshot); + /* registers dumped through DBG AHB */ + a6xx_snapshot_dbgahb_regs(device, snapshot); + } a6xx_snapshot_debugbus(device, snapshot); - /* GMU TCM data dumped through AHB */ - a6xx_snapshot_gmu(device, snapshot); } static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset) diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c index e8b1c673ad9d..422c434f0cbb 100644 --- a/drivers/gpu/msm/adreno_dispatch.c +++ b/drivers/gpu/msm/adreno_dispatch.c @@ -2060,11 +2060,25 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev) int ret, i; int fault; int halt; + bool gx_on = true; fault = atomic_xchg(&dispatcher->fault, 0); if (fault == 0) return 0; + /* Mask all GMU interrupts */ + if (kgsl_gmu_isenabled(device)) { + adreno_write_gmureg(adreno_dev, + ADRENO_REG_GMU_AO_HOST_INTERRUPT_MASK, + 0xFFFFFFFF); + adreno_write_gmureg(adreno_dev, + ADRENO_REG_GMU_GMU2HOST_INTR_MASK, + 0xFFFFFFFF); + } + + if (gpudev->gx_is_on) + gx_on = gpudev->gx_is_on(adreno_dev); + /* * In the very unlikely case that the power is off, do nothing - the * state will be reset on power up and everybody will be happy @@ -2084,7 +2098,8 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev) * else return early to give the fault handler a chance to run. */ if (!(fault & ADRENO_IOMMU_PAGE_FAULT) && - (adreno_is_a5xx(adreno_dev) || adreno_is_a6xx(adreno_dev))) { + (adreno_is_a5xx(adreno_dev) || adreno_is_a6xx(adreno_dev)) && + gx_on) { unsigned int val; mutex_lock(&device->mutex); @@ -2106,14 +2121,15 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev) mutex_lock(&device->mutex); - adreno_readreg64(adreno_dev, ADRENO_REG_CP_RB_BASE, - ADRENO_REG_CP_RB_BASE_HI, &base); + if (gx_on) + adreno_readreg64(adreno_dev, ADRENO_REG_CP_RB_BASE, + ADRENO_REG_CP_RB_BASE_HI, &base); /* * Force the CP off for anything but a hard fault to make sure it is * good and stopped */ - if (!(fault & ADRENO_HARD_FAULT)) { + if (!(fault & ADRENO_HARD_FAULT) && gx_on) { adreno_readreg(adreno_dev, ADRENO_REG_CP_ME_CNTL, ®); if (adreno_is_a5xx(adreno_dev) || adreno_is_a6xx(adreno_dev)) reg |= 1 | (1 << 1); @@ -2149,8 +2165,9 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev) trace_adreno_cmdbatch_fault(cmdobj, fault); } - adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE, - ADRENO_REG_CP_IB1_BASE_HI, &base); + if (gx_on) + adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE, + ADRENO_REG_CP_IB1_BASE_HI, &base); do_header_and_snapshot(device, hung_rb, cmdobj); diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c index 92b541d6693b..0840aba77e61 100644 --- a/drivers/gpu/msm/adreno_snapshot.c +++ b/drivers/gpu/msm/adreno_snapshot.c @@ -840,6 +840,15 @@ void adreno_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot, setup_fault_process(device, snapshot, context ? context->proc_priv : NULL); + /* Add GPU specific sections - registers mainly, but other stuff too */ + if (gpudev->snapshot) + gpudev->snapshot(adreno_dev, snapshot); + + /* Dumping these buffers is useless if the GX is not on */ + if (gpudev->gx_is_on) + if (!gpudev->gx_is_on(adreno_dev)) + return; + adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE, ADRENO_REG_CP_IB1_BASE_HI, &snapshot->ib1base); adreno_readreg(adreno_dev, ADRENO_REG_CP_IB1_BUFSZ, &snapshot->ib1size); @@ -862,10 +871,6 @@ void adreno_snapshot(struct kgsl_device *device, struct kgsl_snapshot *snapshot, adreno_snapshot_ringbuffer(device, snapshot, adreno_dev->next_rb); - /* Add GPU specific sections - registers mainly, but other stuff too */ - if (gpudev->snapshot) - gpudev->snapshot(adreno_dev, snapshot); - /* Dump selected global buffers */ kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_GPU_OBJECT_V2, snapshot, snapshot_global, &device->memstore); -- GitLab From b68eece4b6060db1e7e8b2f24e8bdb1bed1d54fe Mon Sep 17 00:00:00 2001 From: David Dai Date: Fri, 7 Jul 2017 12:07:05 -0700 Subject: [PATCH 771/786] msm: msm_bus: Add check for empty clist in commit Check for empty commit list in msm_bus_commit_data. Change-Id: I23b15f1e7544cf7b95df9a77dc10d47dba9b23de Signed-off-by: David Dai --- drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c index b331e74c2cb1..60664ea6ef15 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c @@ -555,6 +555,8 @@ int msm_bus_commit_data(struct list_head *clist) int cnt_sleep = 0; int i = 0; + if (!clist) + return ret; list_for_each_entry_safe(node, node_tmp, clist, link) { if (unlikely(node->node_info->defer_qos)) -- GitLab From aec312fa812ffb7c59a458cf25c2b46710420891 Mon Sep 17 00:00:00 2001 From: Siddartha Mohanadoss Date: Mon, 12 Jun 2017 13:01:48 -0700 Subject: [PATCH 772/786] iio: rradc: Update thermistor scaling Scale the result with scaling coefficient before performing an integer division to retain the resolution. Change-Id: I11480099996e16e90736c667691ff0f057c02261 Signed-off-by: Siddartha Mohanadoss --- drivers/iio/adc/qcom-rradc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c index b521df694c7a..b055ff61bd62 100644 --- a/drivers/iio/adc/qcom-rradc.c +++ b/drivers/iio/adc/qcom-rradc.c @@ -331,8 +331,8 @@ static int rradc_post_process_therm(struct rradc_chip *chip, int64_t temp; /* K = code/4 */ - temp = div64_s64(adc_code, FG_ADC_RR_BATT_THERM_LSB_K); - temp *= FG_ADC_SCALE_MILLI_FACTOR; + temp = ((int64_t)adc_code * FG_ADC_SCALE_MILLI_FACTOR); + temp = div64_s64(temp, FG_ADC_RR_BATT_THERM_LSB_K); *result_millidegc = temp - FG_ADC_KELVINMIL_CELSIUSMIL; return 0; -- GitLab From 9e564a3cac4281ef40860e63411e67c3b7002f85 Mon Sep 17 00:00:00 2001 From: Shivendra Kakrania Date: Fri, 7 Jul 2017 18:09:21 -0700 Subject: [PATCH 773/786] msm: vidc: Enable video system cache by default Enable video system cache by default after parsing dtsi entries for cache-slices. CRs-Fixed: 2073349 Change-Id: I6113778ace58348613c7f5cc42403c4879af5b0c Signed-off-by: Shivendra Kakrania --- drivers/media/platform/msm/vidc/msm_vidc_internal.h | 1 - drivers/media/platform/msm/vidc/msm_vidc_res_parse.c | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h index 373dbba33ce8..677ee89f8bff 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h +++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h @@ -261,7 +261,6 @@ struct clock_data { u32 opb_fourcc; enum hal_work_mode work_mode; bool low_latency_mode; - bool use_sys_cache; }; struct profile_data { diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c index afb88936329b..13595159f6e9 100644 --- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c +++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c @@ -276,12 +276,12 @@ static int msm_vidc_load_subcache_info(struct msm_vidc_platform_resources *res) "cache-slice-names", c, &vsc->name); } - res->sys_cache_enabled = true; + res->sys_cache_present = true; return 0; err_load_subcache_table_fail: - res->sys_cache_enabled = false; + res->sys_cache_present = false; subcaches->count = 0; subcaches->subcache_tbl = NULL; -- GitLab From 68c7250a78402a879a9a01ba9b9d4d24b5e34ca4 Mon Sep 17 00:00:00 2001 From: David Dai Date: Fri, 7 Jul 2017 19:56:01 -0700 Subject: [PATCH 774/786] msm: msm_bus: Include init_time flag during handoff locking Toggling of init_time flag during another client's request can cause incorrect counting of active command sets and an incorrect index used to set the commit bit. This may result in out of bound accesses. Change-Id: I00525f37367edccf021189dfdb7fae34c9a1eea5 Signed-off-by: David Dai --- drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c | 9 +++++++-- drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c | 3 ++- drivers/soc/qcom/msm_bus/msm_bus_rpmh.h | 2 +- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c index e38c53e405cb..e90012de1369 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c @@ -854,10 +854,15 @@ static void commit_data(void) INIT_LIST_HEAD(&commit_list); } -int commit_late_init_data(void) +int commit_late_init_data(bool lock) { int rc; - rt_mutex_lock(&msm_bus_adhoc_lock); + + if (lock) { + rt_mutex_lock(&msm_bus_adhoc_lock); + return 0; + } + rc = bus_for_each_dev(&msm_bus_type, NULL, NULL, bcm_remove_handoff_req); diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c index b331e74c2cb1..9fe40ac160ef 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c +++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c @@ -1807,9 +1807,10 @@ int __init msm_bus_device_init_driver(void) int __init msm_bus_device_late_init(void) { + commit_late_init_data(true); MSM_BUS_ERR("msm_bus_late_init: Remove handoff bw requests\n"); init_time = false; - return commit_late_init_data(); + return commit_late_init_data(false); } subsys_initcall(msm_bus_device_init_driver); late_initcall_sync(msm_bus_device_late_init); diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h index 17657e55bc8b..ad04feffe862 100644 --- a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h +++ b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h @@ -224,7 +224,7 @@ int msm_bus_enable_limiter(struct msm_bus_node_device_type *nodedev, int throttle_en, uint64_t lim_bw); int msm_bus_commit_data(struct list_head *clist); int bcm_remove_handoff_req(struct device *dev, void *data); -int commit_late_init_data(void); +int commit_late_init_data(bool lock); int msm_bus_query_gen(struct list_head *qlist, struct msm_bus_tcs_usecase *tcs_usecase); void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size, -- GitLab From 6044b96e14f7ba83736b3cf5d1c75dbb8a74d6fa Mon Sep 17 00:00:00 2001 From: Lingutla Chandrasekhar Date: Mon, 10 Jul 2017 13:07:50 +0530 Subject: [PATCH 775/786] drm/msm: fix compilation error when bus_scaling disabled The function definition for 'sde_power_data_bus_parse' is not same as declaration, which leads to compilation errors when the bus scaling disabled. Update mismatched function definition of 'sde_power_data_bus_parse'. Change-Id: I09ca6dc9cc5037bfdcb48f76d6cdab2161b67b96 Signed-off-by: Lingutla Chandrasekhar --- drivers/gpu/drm/msm/sde_power_handle.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c index 242cd6498366..28a2d4da02b0 100644 --- a/drivers/gpu/drm/msm/sde_power_handle.c +++ b/drivers/gpu/drm/msm/sde_power_handle.c @@ -630,7 +630,7 @@ static int sde_power_reg_bus_update(u32 reg_bus_hdl, u32 usecase_ndx) } #else static int sde_power_data_bus_parse(struct platform_device *pdev, - struct sde_power_data_bus_handle *pdbus) + struct sde_power_data_bus_handle *pdbus, const char *name) { return 0; } -- GitLab From e9d879f00b572b2af34e30a9f1dea45babb9f376 Mon Sep 17 00:00:00 2001 From: Lingutla Chandrasekhar Date: Mon, 10 Jul 2017 20:08:56 +0530 Subject: [PATCH 776/786] defconfig: sdm670: update re-ordered configurations Update reordered configurations to avoid compilation changes in defconfig. Change-Id: I35c3da1c6b565ad939b62f3c2f564260aafaf7d1 Signed-off-by: Lingutla Chandrasekhar --- arch/arm64/configs/sdm670_defconfig | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/arch/arm64/configs/sdm670_defconfig b/arch/arm64/configs/sdm670_defconfig index f28a9a62335b..14243fb125d4 100644 --- a/arch/arm64/configs/sdm670_defconfig +++ b/arch/arm64/configs/sdm670_defconfig @@ -265,7 +265,6 @@ CONFIG_NETDEVICES=y CONFIG_BONDING=y CONFIG_DUMMY=y CONFIG_TUN=y -CONFIG_RNDIS_IPA=y CONFIG_PPP=y CONFIG_PPP_BSDCOMP=y CONFIG_PPP_DEFLATE=y @@ -333,7 +332,6 @@ CONFIG_QTI_THERMAL_LIMITS_DCVS=y CONFIG_QTI_VIRTUAL_SENSOR=y CONFIG_MFD_I2C_PMIC=y CONFIG_MFD_SPMI_PMIC=y -CONFIG_WCD934X_CODEC=y CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_CPRH_KBSS=y CONFIG_REGULATOR_QPNP_LABIBB=y @@ -445,6 +443,7 @@ CONFIG_ION_MSM=y CONFIG_GSI=y CONFIG_IPA3=y CONFIG_RMNET_IPA3=y +CONFIG_RNDIS_IPA=y CONFIG_IPA_UT=y CONFIG_SPS=y CONFIG_SPS_SUPPORT_NDP_BAM=y @@ -573,7 +572,6 @@ CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y CONFIG_DEBUG_STACK_USAGE=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_LOCKUP_DETECTOR=y -CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y CONFIG_WQ_WATCHDOG=y CONFIG_PANIC_TIMEOUT=5 CONFIG_PANIC_ON_SCHED_BUG=y -- GitLab From b6befe64a4157dbfc50c39517ef8603e434aba70 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Mon, 15 May 2017 12:01:02 -0700 Subject: [PATCH 777/786] usb: gadget: f_cdev: Fix NULL pointer dereference in cser_free_inst If f_cdev_alloc() fails it frees the port context and set_inst_name() call back returns with error. As a result free_func_inst() call back is called which is dereferencing port context from f_cdev_opts context which results into NULL ptr dereference. Fix the issue by adding NULL check for port context pointer in f_cdev_opts context. Change-Id: I69828761be0a9f7df714eec34894c13f762dcc43 Signed-off-by: Hemant Kumar --- drivers/usb/gadget/function/f_cdev.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c index 920c08a1b6c4..5804840dc224 100644 --- a/drivers/usb/gadget/function/f_cdev.c +++ b/drivers/usb/gadget/function/f_cdev.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011, 2013-2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2011, 2013-2017, The Linux Foundation. All rights reserved. * Linux Foundation chooses to take subject only to the GPLv2 license terms, * and distributes only under these terms. * @@ -826,8 +826,10 @@ static void cser_free_inst(struct usb_function_instance *fi) opts = container_of(fi, struct f_cdev_opts, func_inst); - device_destroy(fcdev_classp, MKDEV(major, opts->port->minor)); - cdev_del(&opts->port->fcdev_cdev); + if (opts->port) { + device_destroy(fcdev_classp, MKDEV(major, opts->port->minor)); + cdev_del(&opts->port->fcdev_cdev); + } usb_cser_chardev_deinit(); kfree(opts->func_name); kfree(opts->port); -- GitLab From 6cbadca63dacf7d9dc1d8ed11f303bdef77628a6 Mon Sep 17 00:00:00 2001 From: Rohit Gupta Date: Mon, 10 Jul 2017 16:29:46 -0700 Subject: [PATCH 778/786] ARM: dts: msm: Fix l3-cpu* devices' frequency maps for SDM845 Since l3 voter clocks get their frequency table from OPP, the frequency values are in Hz. But l3-cpu* devices still have l3 frequencies listed in kHz which causes their votes to be low. Change the l3 frequencies votes of the l3-cpu* devices to be in Hz to follow the same convention as OPP, so that they can vote correctly. Change-Id: I3287afd22aa03be559697dacdc66bd72bc63a577 Signed-off-by: Rohit Gupta --- arch/arm64/boot/dts/qcom/sdm845.dtsi | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index 2c0373db1805..d921f1846796 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -969,13 +969,13 @@ qcom,target-dev = <&l3_cpu0>; qcom,cachemiss-ev = <0x17>; qcom,core-dev-table = - < 300000 300000 >, - < 748800 576000 >, - < 979200 652800 >, - < 1209600 806400 >, - < 1516800 883200 >, - < 1593600 960000 >, - < 1708800 1094400 >; + < 300000 300000000 >, + < 748800 576000000 >, + < 979200 652800000 >, + < 1209600 806400000 >, + < 1516800 883200000 >, + < 1593600 960000000 >, + < 1708800 1094400000 >; }; devfreq_l3lat_4: qcom,cpu4-l3lat-mon { @@ -984,12 +984,12 @@ qcom,target-dev = <&l3_cpu4>; qcom,cachemiss-ev = <0x17>; qcom,core-dev-table = - < 300000 300000 >, - < 1036800 576000 >, - < 1190400 806400 >, - < 1574400 883200 >, - < 1804800 960000 >, - < 2092800 1094400 >; + < 300000 300000000 >, + < 1036800 576000000 >, + < 1190400 806400000 >, + < 1574400 883200000 >, + < 1804800 960000000 >, + < 2092800 1094400000 >; }; cpu_pmu: cpu-pmu { -- GitLab From 2ecc53d57233a8f8c67ab693a1381d5d832aab84 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 19 Oct 2016 19:28:12 +0100 Subject: [PATCH 779/786] thread_info: factor out restart_block Since commit f56141e3e2d9aabf ("all arches, signal: move restart_block to struct task_struct"), thread_info and restart_block have been logically distinct, yet struct restart_block is still defined in . At least one architecture (erroneously) uses restart_block as part of its thread_info, and thus the definition of restart_block must come before the include of . Subsequent patches in this series need to shuffle the order of includes and definitions in , and will make this ordering fragile. This patch moves the definition of restart_block out to its own header. This serves as generic cleanup, logically separating thread_info and restart_block, and also makes it easier to avoid fragility. Change-Id: Ia9cf21549a307ce3543072fccf76cac174726ec3 Signed-off-by: Mark Rutland Reviewed-by: Andy Lutomirski Cc: Andrew Morton Cc: Heiko Carstens Cc: Kees Cook Signed-off-by: Catalin Marinas Git-commit: 53d74d056a4e306a72b8883d325b5d853c0618e6 Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git Signed-off-by: Olav Haugan --- include/linux/restart_block.h | 51 +++++++++++++++++++++++++++++++++++ include/linux/thread_info.h | 41 +--------------------------- 2 files changed, 52 insertions(+), 40 deletions(-) create mode 100644 include/linux/restart_block.h diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h new file mode 100644 index 000000000000..0d905d8ec553 --- /dev/null +++ b/include/linux/restart_block.h @@ -0,0 +1,51 @@ +/* + * Common syscall restarting data + */ +#ifndef __LINUX_RESTART_BLOCK_H +#define __LINUX_RESTART_BLOCK_H + +#include +#include + +struct timespec; +struct compat_timespec; +struct pollfd; + +/* + * System call restart block. + */ +struct restart_block { + long (*fn)(struct restart_block *); + union { + /* For futex_wait and futex_wait_requeue_pi */ + struct { + u32 __user *uaddr; + u32 val; + u32 flags; + u32 bitset; + u64 time; + u32 __user *uaddr2; + } futex; + /* For nanosleep */ + struct { + clockid_t clockid; + struct timespec __user *rmtp; +#ifdef CONFIG_COMPAT + struct compat_timespec __user *compat_rmtp; +#endif + u64 expires; + } nanosleep; + /* For poll */ + struct { + struct pollfd __user *ufds; + int nfds; + int has_timeout; + unsigned long tv_sec; + unsigned long tv_nsec; + } poll; + }; +}; + +extern long do_no_restart_syscall(struct restart_block *parm); + +#endif /* __LINUX_RESTART_BLOCK_H */ diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 2873baf5372a..c75c6ab364ca 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -9,51 +9,12 @@ #include #include - -struct timespec; -struct compat_timespec; +#include #ifdef CONFIG_THREAD_INFO_IN_TASK #define current_thread_info() ((struct thread_info *)current) #endif -/* - * System call restart block. - */ -struct restart_block { - long (*fn)(struct restart_block *); - union { - /* For futex_wait and futex_wait_requeue_pi */ - struct { - u32 __user *uaddr; - u32 val; - u32 flags; - u32 bitset; - u64 time; - u32 __user *uaddr2; - } futex; - /* For nanosleep */ - struct { - clockid_t clockid; - struct timespec __user *rmtp; -#ifdef CONFIG_COMPAT - struct compat_timespec __user *compat_rmtp; -#endif - u64 expires; - } nanosleep; - /* For poll */ - struct { - struct pollfd __user *ufds; - int nfds; - int has_timeout; - unsigned long tv_sec; - unsigned long tv_nsec; - } poll; - }; -}; - -extern long do_no_restart_syscall(struct restart_block *parm); - #include #include -- GitLab From 51096c4047e05d3eae288a4e7fcc0d43ef15bf02 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 19 Oct 2016 19:28:13 +0100 Subject: [PATCH 780/786] thread_info: include for THREAD_INFO_IN_TASK When CONFIG_THREAD_INFO_IN_TASK is selected, the current_thread_info() macro relies on current having been defined prior to its use. However, not all users of current_thread_info() include , and thus current is not guaranteed to be defined. When CONFIG_THREAD_INFO_IN_TASK is not selected, it's possible that get_current() / current are based upon current_thread_info(), and includes . Thus always including would result in circular dependences on some platforms. To ensure both cases work, this patch includes , but only when CONFIG_THREAD_INFO_IN_TASK is selected. Change-Id: Iab7cc3a7e7b25d78be5ebad98e310e31de339a94 Signed-off-by: Mark Rutland Acked-by: Heiko Carstens Reviewed-by: Andy Lutomirski Cc: Andrew Morton Cc: Kees Cook Signed-off-by: Catalin Marinas Git-commit: dc3d2a679cd8631b8a570fc8ca5f4712d7d25698 Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git Signed-off-by: Olav Haugan --- include/linux/thread_info.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index c75c6ab364ca..58373875e8ee 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -12,6 +12,12 @@ #include #ifdef CONFIG_THREAD_INFO_IN_TASK +/* + * For CONFIG_THREAD_INFO_IN_TASK kernels we need for the + * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels, + * including can cause a circular dependency on some platforms. + */ +#include #define current_thread_info() ((struct thread_info *)current) #endif -- GitLab From 17a00d3d2e0a278780b666072c27846f403ca004 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 3 Nov 2016 20:23:04 +0000 Subject: [PATCH 781/786] arm64: asm-offsets: remove unused definitions Subsequent patches will move the thread_info::{task,cpu} fields, and the current TI_{TASK,CPU} offset definitions are not used anywhere. This patch removes the redundant definitions. Change-Id: I94520a089e3aaed1ef91eb1a09473b8e86202ed3 Signed-off-by: Mark Rutland Tested-by: Laura Abbott Cc: James Morse Cc: Will Deacon Signed-off-by: Catalin Marinas Git-commit: 3fe12da4c7fa6491e0fb7c5371716ac7f8ea80a5 Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git [ohaugan@codeaurora.org: Fix merge conflict] Signed-off-by: Olav Haugan --- arch/arm64/kernel/asm-offsets.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 5cdbc5557b68..fd1c4f6a0721 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -39,11 +39,6 @@ int main(void) DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); -#ifdef CONFIG_ARM64_SW_TTBR0_PAN - DEFINE(TI_TTBR0, offsetof(struct thread_info, ttbr0)); -#endif - DEFINE(TI_TASK, offsetof(struct thread_info, task)); - DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); #ifdef CONFIG_ARM64_SW_TTBR0_PAN DEFINE(TSK_TI_TTBR0, offsetof(struct thread_info, ttbr0)); #endif -- GitLab From 67b88d7954e9c46ba9d0903091ce8c0074c597af Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 3 Nov 2016 20:23:05 +0000 Subject: [PATCH 782/786] arm64: factor out current_stack_pointer We define current_stack_pointer in , though other files and header relying upon it do not have this necessary include, and are thus fragile to changes in the header soup. Subsequent patches will affect the header soup such that directly including may result in a circular header include in some of these cases, so we can't simply include . Instead, factor current_thread_info into its own header, and have all existing users include this explicitly. Change-Id: If434675e803479f53e6b72e2ca1cf71c420d6d2f Signed-off-by: Mark Rutland Tested-by: Laura Abbott Cc: Will Deacon Signed-off-by: Catalin Marinas Git-commit: a9ea0017ebe8889dfa136cac2aa7ae0ee6915e1f Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git Signed-off-by: Olav Haugan --- arch/arm64/include/asm/percpu.h | 2 ++ arch/arm64/include/asm/perf_event.h | 2 ++ arch/arm64/include/asm/stack_pointer.h | 9 +++++++++ arch/arm64/include/asm/thread_info.h | 6 +----- arch/arm64/kernel/return_address.c | 1 + arch/arm64/kernel/stacktrace.c | 1 + arch/arm64/kernel/traps.c | 1 + 7 files changed, 17 insertions(+), 5 deletions(-) create mode 100644 arch/arm64/include/asm/stack_pointer.h diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 5394c8405e66..d7a3c6294224 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -16,6 +16,8 @@ #ifndef __ASM_PERCPU_H #define __ASM_PERCPU_H +#include + static inline void set_my_cpu_offset(unsigned long off) { asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index 38b6a2b49d68..8d5cbec17d80 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h @@ -17,6 +17,8 @@ #ifndef __ASM_PERF_EVENT_H #define __ASM_PERF_EVENT_H +#include + #define ARMV8_PMU_MAX_COUNTERS 32 #define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1) diff --git a/arch/arm64/include/asm/stack_pointer.h b/arch/arm64/include/asm/stack_pointer.h new file mode 100644 index 000000000000..ffcdf742cddf --- /dev/null +++ b/arch/arm64/include/asm/stack_pointer.h @@ -0,0 +1,9 @@ +#ifndef __ASM_STACK_POINTER_H +#define __ASM_STACK_POINTER_H + +/* + * how to get the current stack pointer from C + */ +register unsigned long current_stack_pointer asm ("sp"); + +#endif /* __ASM_STACK_POINTER_H */ diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 4ad25a5245c4..f3a016979b5a 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -36,6 +36,7 @@ struct task_struct; +#include #include typedef unsigned long mm_segment_t; @@ -66,11 +67,6 @@ struct thread_info { #define init_thread_info (init_thread_union.thread_info) #define init_stack (init_thread_union.stack) -/* - * how to get the current stack pointer from C - */ -register unsigned long current_stack_pointer asm ("sp"); - /* * how to get the thread information struct from C */ diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c index 1718706fde83..12a87f2600f2 100644 --- a/arch/arm64/kernel/return_address.c +++ b/arch/arm64/kernel/return_address.c @@ -12,6 +12,7 @@ #include #include +#include #include struct return_address_data { diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index bedf97d7fe20..70b6c9c84055 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -22,6 +22,7 @@ #include #include +#include #include /* diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index d8253fb87caf..1710b78fb810 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include -- GitLab From 33bc9be625741d9d2f2a50d299eee6d5aea69ba7 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 3 Nov 2016 20:23:06 +0000 Subject: [PATCH 783/786] arm64: traps: simplify die() and __die() In arm64's die and __die routines we pass around a thread_info, and subsequently use this to determine the relevant task_struct, and the end of the thread's stack. Subsequent patches will decouple thread_info from the stack, and this approach will no longer work. To figure out the end of the stack, we can use the new generic end_of_stack() helper. As we only call __die() from die(), and die() always deals with the current task, we can remove the parameter and have both acquire current directly, which also makes it clear that __die can't be called for arbitrary tasks. Change-Id: Ia1a054760ac01a42207cdc933e6a0ada729ad1db Signed-off-by: Mark Rutland Tested-by: Laura Abbott Cc: Will Deacon Signed-off-by: Catalin Marinas Git-commit: 876e7a38e8788773aac768091aaa3b42e470c03b Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git [ohaugan@codeaurora.org: Fix merge conflicts] Signed-off-by: Olav Haugan --- arch/arm64/kernel/traps.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 1710b78fb810..cd6c4d91fffa 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -228,10 +228,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) #endif #define S_SMP " SMP" -static int __die(const char *str, int err, struct thread_info *thread, - struct pt_regs *regs) +static int __die(const char *str, int err, struct pt_regs *regs) { - struct task_struct *tsk = thread->task; + struct task_struct *tsk = current; static int die_counter; int ret; @@ -246,7 +245,8 @@ static int __die(const char *str, int err, struct thread_info *thread, print_modules(); __show_regs(regs); pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", - TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1); + TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), + end_of_stack(tsk)); if (!user_mode(regs)) { dump_backtrace(regs, tsk); @@ -311,7 +311,6 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int notify) */ void die(const char *str, struct pt_regs *regs, int err) { - struct thread_info *thread = current_thread_info(); enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE; unsigned long flags = oops_begin(); int ret; @@ -321,7 +320,7 @@ void die(const char *str, struct pt_regs *regs, int err) if (bug_type != BUG_TRAP_TYPE_NONE && !strlen(str)) str = "Oops - BUG"; - ret = __die(str, err, thread, regs); + ret = __die(str, err, regs); oops_end(flags, regs, ret); } -- GitLab From 22cadfc4bca1089b33f1572d95dbff323d9d0715 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 3 Nov 2016 20:23:07 +0000 Subject: [PATCH 784/786] arm64: unexport walk_stackframe The walk_stackframe functions is architecture-specific, with a varying prototype, and common code should not use it directly. None of its current users can be built as modules. With THREAD_INFO_IN_TASK, users will also need to hold a stack reference before calling it. There's no reason for it to be exported, and it's very easy to misuse, so unexport it for now. Change-Id: Ieedba1e29091fb14cd57f69db7b94d5bcfb10af1 Signed-off-by: Mark Rutland Cc: Will Deacon Signed-off-by: Catalin Marinas Git-commit: 2020a5ae7c8c2c8504565004915017507b135c63 Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git Signed-off-by: Olav Haugan --- arch/arm64/kernel/stacktrace.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 70b6c9c84055..2e940b1a20c7 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -129,7 +129,6 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, break; } } -EXPORT_SYMBOL(walk_stackframe); #ifdef CONFIG_STACKTRACE struct stack_trace_data { -- GitLab From 4de4d88d882b0aaf08ba4f438dc4bc6ec86527d4 Mon Sep 17 00:00:00 2001 From: Mohammed Javid Date: Wed, 5 Jul 2017 18:35:50 +0530 Subject: [PATCH 785/786] msm: ipa: fix wrong usage and cleanup dead code Couple of code cleanup - remove dead code - initialize local variables before use - validate gsi_ep_cfg_ptr return Change-Id: Ia2c2a5559fa2c5f89b33cc1ff897df0faa725e7e Signed-off-by: Mohammed Javid --- drivers/platform/msm/ipa/ipa_v3/ipa_client.c | 6 ++++++ drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c | 5 +---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c index 2d0876749660..1af968e30e73 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c @@ -644,6 +644,12 @@ int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params, } gsi_ep_cfg_ptr = ipa3_get_gsi_ep_info(ep->client); + if (gsi_ep_cfg_ptr == NULL) { + IPAERR("Error ipa3_get_gsi_ep_info ret NULL\n"); + result = -EFAULT; + goto write_evt_scratch_fail; + } + params->chan_params.evt_ring_hdl = ep->gsi_evt_ring_hdl; params->chan_params.ch_id = gsi_ep_cfg_ptr->ipa_gsi_chan_num; gsi_res = gsi_alloc_channel(¶ms->chan_params, gsi_dev_hdl, diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c index 0dd86fa0fb44..b19ef8b35817 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c @@ -191,7 +191,7 @@ static int ipa3_mhi_get_ch_poll_cfg(enum ipa_client_type client, static int ipa_mhi_start_gsi_channel(enum ipa_client_type client, int ipa_ep_idx, struct start_gsi_channel *params) { - int res; + int res = 0; struct gsi_evt_ring_props ev_props; struct ipa_mhi_msi_info *msi; struct gsi_chan_props ch_props; @@ -241,7 +241,6 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client, if (res) { IPA_MHI_ERR("gsi_alloc_evt_ring failed %d\n", res); goto fail_alloc_evt; - return res; } IPA_MHI_DBG("client %d, caching event ring hdl %lu\n", client, @@ -259,7 +258,6 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client, IPA_MHI_ERR("event ring wp is not updated. base=wp=0x%llx\n", params->ev_ctx_host->wp); goto fail_alloc_ch; - return res; } IPA_MHI_DBG("Ring event db: evt_ring_hdl=%lu host_wp=0x%llx\n", @@ -270,7 +268,6 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client, IPA_MHI_ERR("fail to ring evt ring db %d. hdl=%lu wp=0x%llx\n", res, ep->gsi_evt_ring_hdl, params->ev_ctx_host->wp); goto fail_alloc_ch; - return res; } memset(&ch_props, 0, sizeof(ch_props)); -- GitLab From 60d8d0ad479e16a31f5ea76513f7b0c419cb42e4 Mon Sep 17 00:00:00 2001 From: Mohammed Javid Date: Thu, 29 Jun 2017 11:35:19 +0530 Subject: [PATCH 786/786] msm: ipa: Fix the problem with nested sleeping primitives prepare_to_wait() will enqueue the thread on the given queue and put it into the given execution state, which is TASK_INTERRUPTIBLE. Further processing in function, calls mutex_lock(), will go into a new version of the going-to-sleep code, changing the task state. That, of course, may well interfere with the outer sleeping code. So, nesting of sleeping primitives in this way is discouraged. And new warning was added to point out this kind of nesting. Fix the nesting of sleeping primitives with the new solution provide in linux kernel. Change-Id: Id1a5f64472cd2d63e679706c6482db98f89ec765 Signed-off-by: Mohammed Javid --- drivers/platform/msm/ipa/ipa_v2/ipa_intf.c | 8 ++++---- drivers/platform/msm/ipa/ipa_v3/ipa_intf.c | 10 ++++------ 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c index 4b62927ec289..dc276364bfb1 100644 --- a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c +++ b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c @@ -516,15 +516,15 @@ ssize_t ipa_read(struct file *filp, char __user *buf, size_t count, char __user *start; struct ipa_push_msg *msg = NULL; int ret; - DEFINE_WAIT(wait); + DEFINE_WAIT_FUNC(wait, woken_wake_function); int locked; start = buf; + add_wait_queue(&ipa_ctx->msg_waitq, &wait); while (1) { mutex_lock(&ipa_ctx->msg_lock); locked = 1; - prepare_to_wait(&ipa_ctx->msg_waitq, &wait, TASK_INTERRUPTIBLE); if (!list_empty(&ipa_ctx->msg_list)) { msg = list_first_entry(&ipa_ctx->msg_list, struct ipa_push_msg, link); @@ -576,10 +576,10 @@ ssize_t ipa_read(struct file *filp, char __user *buf, size_t count, locked = 0; mutex_unlock(&ipa_ctx->msg_lock); - schedule(); + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } - finish_wait(&ipa_ctx->msg_waitq, &wait); + remove_wait_queue(&ipa_ctx->msg_waitq, &wait); if (start != buf && ret != -EFAULT) ret = buf - start; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c index da965e73c46c..fe6d245ee8fe 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c @@ -522,17 +522,15 @@ ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count, char __user *start; struct ipa3_push_msg *msg = NULL; int ret; - DEFINE_WAIT(wait); + DEFINE_WAIT_FUNC(wait, woken_wake_function); int locked; start = buf; + add_wait_queue(&ipa3_ctx->msg_waitq, &wait); while (1) { mutex_lock(&ipa3_ctx->msg_lock); locked = 1; - prepare_to_wait(&ipa3_ctx->msg_waitq, - &wait, - TASK_INTERRUPTIBLE); if (!list_empty(&ipa3_ctx->msg_list)) { msg = list_first_entry(&ipa3_ctx->msg_list, @@ -585,10 +583,10 @@ ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count, locked = 0; mutex_unlock(&ipa3_ctx->msg_lock); - schedule(); + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); } - finish_wait(&ipa3_ctx->msg_waitq, &wait); + remove_wait_queue(&ipa3_ctx->msg_waitq, &wait); if (start != buf && ret != -EFAULT) ret = buf - start; -- GitLab