diff mbox

[2/3] drm/amd: add ACP driver support [v6]

Message ID 1440101361-9912-2-git-send-email-alexander.deucher@amd.com (mailing list archive)
State New, archived
Headers show

Commit Message

Alex Deucher Aug. 20, 2015, 8:09 p.m. UTC
From: Maruthi Bayyavarapu <maruthi.bayyavarapu@amd.com>

This adds the ACP (Audio CoProcessor) IP driver and wires
it up to the amdgpu driver.  The ACP block provides the DMA
engine and bus for the i2s codec which is supported by an
alsa driver.  This is required for audio on APUs that
utilize an i2s codec.

v2: integrate i2s/az check patch
v3: s/amd_acp/amdgpu_acp/
v4: update copyright notice
v5: squash multiple patches, convert to mfd
v6: squash in dynamic cell allocation from Maruthi

Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
Reviewed-by: Maruthi Bayyavarapu <maruthi.bayyavarapu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Murali Krishna Vemuri <murali-krishna.vemuri@amd.com>
Signed-off-by: Maruthi Bayyavarapu <maruthi.bayyavarapu@amd.com>
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/Kconfig                      |    2 +
 drivers/gpu/drm/amd/acp/Kconfig              |   10 +
 drivers/gpu/drm/amd/acp/Makefile             |    9 +
 drivers/gpu/drm/amd/acp/acp_hw.c             | 1133 ++++++++++++++++++++++++++
 drivers/gpu/drm/amd/acp/acp_hw.h             |   91 +++
 drivers/gpu/drm/amd/acp/include/acp_gfx_if.h |   49 ++
 drivers/gpu/drm/amd/amdgpu/Makefile          |   13 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu.h          |    5 +
 drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c      |  208 +++++
 drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h      |   40 +
 drivers/gpu/drm/amd/amdgpu/vi.c              |   12 +
 drivers/gpu/drm/amd/include/amd_shared.h     |    1 +
 include/linux/mfd/amd_acp.h                  |  211 +++++
 13 files changed, 1783 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/amd/acp/Kconfig
 create mode 100644 drivers/gpu/drm/amd/acp/Makefile
 create mode 100644 drivers/gpu/drm/amd/acp/acp_hw.c
 create mode 100644 drivers/gpu/drm/amd/acp/acp_hw.h
 create mode 100644 drivers/gpu/drm/amd/acp/include/acp_gfx_if.h
 create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
 create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
 create mode 100644 include/linux/mfd/amd_acp.h
diff mbox

Patch

diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index df99b01..2e3df0e 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -161,6 +161,8 @@  config DRM_AMDGPU
 
 source "drivers/gpu/drm/amd/amdgpu/Kconfig"
 
+source "drivers/gpu/drm/amd/acp/Kconfig"
+
 source "drivers/gpu/drm/nouveau/Kconfig"
 
 config DRM_I810
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
new file mode 100644
index 0000000..1de4fe7
--- /dev/null
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -0,0 +1,10 @@ 
+menu "ACP Configuration"
+
+config DRM_AMD_ACP
+       bool "Enable ACP IP support"
+       default y
+       depends on MFD_CORE
+       help
+	Choose this option to enable ACP IP support for AMD SOCs.
+
+endmenu
diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile
new file mode 100644
index 0000000..c8c3303
--- /dev/null
+++ b/drivers/gpu/drm/amd/acp/Makefile
@@ -0,0 +1,9 @@ 
+#
+# Makefile for the ACP, which is a sub-component
+# of AMDSOC/AMDGPU drm driver.
+# It provides the HW control for ACP related functionalities.
+
+ccflags-y += -Idrivers/gpu/drm/amd/include/asic_reg/acp
+subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include
+
+AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o
diff --git a/drivers/gpu/drm/amd/acp/acp_hw.c b/drivers/gpu/drm/amd/acp/acp_hw.c
new file mode 100644
index 0000000..4178aa9
--- /dev/null
+++ b/drivers/gpu/drm/amd/acp/acp_hw.c
@@ -0,0 +1,1133 @@ 
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * NOTE:
+ * Certain pieces were reused from Synopsis I2S IP related code,
+ * which otherwise can also be found at:
+ * sound/soc/dwc/designware_i2s.c
+ *
+ * Copyright notice as appears in the above file:
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Rajeev Kumar <rajeevkumar.linux@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+
+#include "acp_gfx_if.h"
+#include "acp_hw.h"
+
+#include "acp_2_2_d.h"
+#include "acp_2_2_sh_mask.h"
+
+#define VISLANDS30_IV_SRCID_ACP 0x000000a2
+
+/* Configure a given dma channel parameters - enable/disble,
+ * number of descriptors, priority */
+static void config_acp_dma_channel(struct amd_acp_device *acp_dev, u8 ch_num,
+				   u16 dscr_strt_idx, u16 num_dscrs,
+				   enum acp_dma_priority_level priority_level)
+{
+	u32 dma_ctrl;
+	struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+	/* disable the channel run field */
+	dma_ctrl = cgs_read_register(acp_prv->cgs_device,
+				     mmACP_DMA_CNTL_0 + ch_num);
+	dma_ctrl &= ~ACP_DMA_CNTL_0__DMAChRun_MASK;
+	cgs_write_register(acp_prv->cgs_device, (mmACP_DMA_CNTL_0 + ch_num),
+			   dma_ctrl);
+
+	/* program a DMA channel with first descriptor to be processed. */
+	cgs_write_register(acp_prv->cgs_device,
+			   (mmACP_DMA_DSCR_STRT_IDX_0 + ch_num),
+			   (ACP_DMA_DSCR_STRT_IDX_0__DMAChDscrStrtIdx_MASK &
+			    dscr_strt_idx));
+
+	/* program a DMA channel with the number of descriptors to be
+	 * processed in the transfer */
+	cgs_write_register(acp_prv->cgs_device,
+			   (mmACP_DMA_DSCR_CNT_0 + ch_num),
+			   (ACP_DMA_DSCR_CNT_0__DMAChDscrCnt_MASK & num_dscrs));
+
+	/* set DMA channel priority */
+	cgs_write_register(acp_prv->cgs_device,	(mmACP_DMA_PRIO_0 + ch_num),
+			   priority_level);
+}
+
+/* Initialize the dma descriptors location in SRAM and page size */
+static void acp_dma_descr_init(struct amd_acp_private *acp_prv)
+{
+	u32 sram_pte_offset = 0;
+
+	/* SRAM starts at 0x04000000. From that offset one page (4KB) left for
+	 * filling DMA descriptors.sram_pte_offset = 0x04001000 , used for
+	 * filling system RAM's physical pages.
+	 * This becomes the ALSA's Ring buffer start address
+	 */
+	sram_pte_offset = ACP_DAGB_GRP_SRAM_BASE_ADDRESS;
+
+	/* snoopable */
+	sram_pte_offset |= ACP_DAGB_BASE_ADDR_GRP_1__AXI2DAGBSnoopSel_MASK;
+	/* Memmory is system mmemory */
+	sram_pte_offset |= ACP_DAGB_BASE_ADDR_GRP_1__AXI2DAGBTargetMemSel_MASK;
+	/* Page Enabled */
+	sram_pte_offset |= ACP_DAGB_BASE_ADDR_GRP_1__AXI2DAGBGrpEnable_MASK;
+
+	cgs_write_register(acp_prv->cgs_device,	mmACP_DAGB_BASE_ADDR_GRP_1,
+			   sram_pte_offset);
+	cgs_write_register(acp_prv->cgs_device,	mmACP_DAGB_PAGE_SIZE_GRP_1,
+			   PAGE_SIZE_4K_ENABLE);
+}
+
+/* Initialize a dma descriptor in SRAM based on descritor information passed */
+static void config_dma_descriptor_in_sram(struct amd_acp_device *acp_dev,
+					  u16 descr_idx,
+					  acp_dma_dscr_transfer_t *descr_info)
+{
+	u32 sram_offset;
+	struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+	sram_offset = (descr_idx * sizeof(acp_dma_dscr_transfer_t));
+
+	/* program the source base address. */
+	cgs_write_register(acp_prv->cgs_device,	mmACP_SRBM_Targ_Idx_Addr,
+			   sram_offset);
+	cgs_write_register(acp_prv->cgs_device,	mmACP_SRBM_Targ_Idx_Data,
+			   descr_info->src);
+	/* program the destination base address. */
+	cgs_write_register(acp_prv->cgs_device,	mmACP_SRBM_Targ_Idx_Addr,
+			   (sram_offset + 4));
+	cgs_write_register(acp_prv->cgs_device,	mmACP_SRBM_Targ_Idx_Data,
+						descr_info->dest);
+
+	/* program the number of bytes to be transferred for this descriptor. */
+	cgs_write_register(acp_prv->cgs_device,	mmACP_SRBM_Targ_Idx_Addr,
+			   (sram_offset + 8));
+	cgs_write_register(acp_prv->cgs_device,	mmACP_SRBM_Targ_Idx_Data,
+			   descr_info->xfer_val);
+}
+
+/* Initialize the DMA descriptor information for transfer between
+ * system memory <-> ACP SRAM
+ */
+static void set_acp_sysmem_dma_descriptors(struct amd_acp_device *acp_dev,
+					   u32 size, int direction,
+					   u32 pte_offset)
+{
+	u16 num_descr;
+	u16 dma_dscr_idx = PLAYBACK_START_DMA_DESCR_CH12;
+	acp_dma_dscr_transfer_t dmadscr[2];
+
+	num_descr = 2;
+
+	dmadscr[0].xfer_val = 0;
+	if (direction == STREAM_PLAYBACK) {
+		dma_dscr_idx = PLAYBACK_START_DMA_DESCR_CH12;
+		dmadscr[0].dest = ACP_SHARED_RAM_BANK_1_ADDRESS + (size / 2);
+		dmadscr[0].src = ACP_INTERNAL_APERTURE_WINDOW_0_ADDRESS +
+			(pte_offset * PAGE_SIZE_4K);
+		dmadscr[0].xfer_val |= (DISABLE << 22) |
+			(ACP_DMA_ATTRIBUTES_DAGB_ONION_TO_SHAREDMEM << 16) |
+			(size / 2);
+	} else {
+		dma_dscr_idx = CAPTURE_START_DMA_DESCR_CH14;
+		dmadscr[0].src = ACP_SHARED_RAM_BANK_5_ADDRESS;
+		dmadscr[0].dest = ACP_INTERNAL_APERTURE_WINDOW_0_ADDRESS +
+			(pte_offset * PAGE_SIZE_4K);
+		dmadscr[0].xfer_val |=
+			(ENABLE << 22) |
+			(ACP_DMA_ATTRIBUTES_SHAREDMEM_TO_DAGB_ONION << 16) |
+			(size / 2);
+	}
+
+	config_dma_descriptor_in_sram(acp_dev, dma_dscr_idx, &dmadscr[0]);
+
+	dmadscr[1].xfer_val = 0;
+	if (direction == STREAM_PLAYBACK) {
+		dma_dscr_idx = PLAYBACK_END_DMA_DESCR_CH12;
+		dmadscr[1].dest = ACP_SHARED_RAM_BANK_1_ADDRESS;
+		dmadscr[1].src = ACP_INTERNAL_APERTURE_WINDOW_0_ADDRESS +
+			(pte_offset * PAGE_SIZE_4K) + (size / 2);
+		dmadscr[1].xfer_val |= (DISABLE << 22) |
+			(ACP_DMA_ATTRIBUTES_DAGB_ONION_TO_SHAREDMEM << 16) |
+			(size / 2);
+	} else {
+		dma_dscr_idx = CAPTURE_END_DMA_DESCR_CH14;
+		dmadscr[1].dest = dmadscr[0].dest + (size / 2);
+		dmadscr[1].src = dmadscr[0].src + (size / 2);
+		dmadscr[1].xfer_val |= (ENABLE << 22) |
+			(ACP_DMA_ATTRIBUTES_SHAREDMEM_TO_DAGB_ONION << 16) |
+			(size / 2);
+	}
+
+	config_dma_descriptor_in_sram(acp_dev, dma_dscr_idx, &dmadscr[1]);
+
+	if (direction == STREAM_PLAYBACK) {
+		/* starting descriptor for this channel */
+		dma_dscr_idx = PLAYBACK_START_DMA_DESCR_CH12;
+		config_acp_dma_channel(acp_dev, SYSRAM_TO_ACP_CH_NUM,
+					dma_dscr_idx, num_descr,
+					ACP_DMA_PRIORITY_LEVEL_NORMAL);
+	} else {
+		/* starting descriptor for this channel */
+		dma_dscr_idx = CAPTURE_START_DMA_DESCR_CH14;
+		config_acp_dma_channel(acp_dev, ACP_TO_SYSRAM_CH_NUM,
+					dma_dscr_idx, num_descr,
+					ACP_DMA_PRIORITY_LEVEL_NORMAL);
+	}
+}
+
+/* Initialize the DMA descriptor information for transfer between
+ * ACP SRAM <-> I2S
+ */
+static void set_acp_to_i2s_dma_descriptors(struct amd_acp_device *acp_dev,
+					   u32 size, int direction)
+{
+
+	u16 num_descr;
+	u16 dma_dscr_idx = PLAYBACK_START_DMA_DESCR_CH13;
+	acp_dma_dscr_transfer_t dmadscr[2];
+
+	num_descr = 2;
+
+	dmadscr[0].xfer_val = 0;
+	if (direction == STREAM_PLAYBACK) {
+		dma_dscr_idx = PLAYBACK_START_DMA_DESCR_CH13;
+		dmadscr[0].src = ACP_SHARED_RAM_BANK_1_ADDRESS;
+		dmadscr[0].xfer_val |= (ENABLE << 22) | (TO_ACP_I2S_1 << 16) |
+					(size / 2);
+	} else {
+		dma_dscr_idx = CAPTURE_START_DMA_DESCR_CH15;
+		dmadscr[0].dest = ACP_SHARED_RAM_BANK_5_ADDRESS;
+		dmadscr[0].xfer_val |= (ENABLE << 22) |
+					(FROM_ACP_I2S_1 << 16) | (size / 2);
+	}
+
+	config_dma_descriptor_in_sram(acp_dev, dma_dscr_idx, &dmadscr[0]);
+
+	dmadscr[1].xfer_val = 0;
+	if (direction == STREAM_PLAYBACK) {
+		dma_dscr_idx = PLAYBACK_END_DMA_DESCR_CH13;
+		dmadscr[1].src = dmadscr[0].src + (size / 2);
+		dmadscr[1].xfer_val |= (ENABLE << 22) | (TO_ACP_I2S_1 << 16) |
+					(size / 2);
+	} else {
+		dma_dscr_idx = CAPTURE_END_DMA_DESCR_CH15;
+		dmadscr[1].dest = dmadscr[0].dest + (size / 2);
+		dmadscr[1].xfer_val |= (ENABLE << 22) |
+					(FROM_ACP_I2S_1 << 16) | (size / 2);
+	}
+
+	config_dma_descriptor_in_sram(acp_dev, dma_dscr_idx, &dmadscr[1]);
+
+	/* Configure the DMA channel with the above descriptore */
+	if (direction == STREAM_PLAYBACK) {
+		/* starting descriptor for this channel */
+		dma_dscr_idx = PLAYBACK_START_DMA_DESCR_CH13;
+		config_acp_dma_channel(acp_dev, ACP_TO_I2S_DMA_CH_NUM,
+					dma_dscr_idx, num_descr,
+					ACP_DMA_PRIORITY_LEVEL_NORMAL);
+	} else {
+		/* starting descriptor for this channel */
+		dma_dscr_idx = CAPTURE_START_DMA_DESCR_CH15;
+		config_acp_dma_channel(acp_dev, I2S_TO_ACP_DMA_CH_NUM,
+					dma_dscr_idx, num_descr,
+					ACP_DMA_PRIORITY_LEVEL_NORMAL);
+	}
+
+}
+
+static u16 get_dscr_idx(struct amd_acp_device *acp_dev, int direction)
+{
+	u16 dscr_idx;
+	struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+	if (direction == STREAM_PLAYBACK) {
+		dscr_idx = cgs_read_register(acp_prv->cgs_device,
+							mmACP_DMA_CUR_DSCR_13);
+		dscr_idx = (dscr_idx == PLAYBACK_START_DMA_DESCR_CH13) ?
+				PLAYBACK_START_DMA_DESCR_CH12 :
+				PLAYBACK_END_DMA_DESCR_CH12;
+	} else {
+		dscr_idx = cgs_read_register(acp_prv->cgs_device,
+							mmACP_DMA_CUR_DSCR_15);
+		dscr_idx = (dscr_idx == CAPTURE_START_DMA_DESCR_CH15) ?
+				CAPTURE_END_DMA_DESCR_CH14 :
+				CAPTURE_START_DMA_DESCR_CH14;
+	}
+
+	return dscr_idx;
+
+}
+
+/* Create page table entries in ACP SRAM for the allocated memory */
+static void acp_pte_config(struct amd_acp_device *acp_dev, struct page *pg,
+			   u16 num_of_pages, u32 pte_offset)
+{
+	u16 page_idx;
+	u64 addr;
+	u32 low;
+	u32 high;
+	u32 offset;
+	struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+	offset	= ACP_DAGB_GRP_SRBM_SRAM_BASE_OFFSET + (pte_offset * 8);
+	for (page_idx = 0; page_idx < (num_of_pages); page_idx++) {
+		/* Load the low address of page int ACP SRAM through SRBM */
+		cgs_write_register(acp_prv->cgs_device,
+				   mmACP_SRBM_Targ_Idx_Addr,
+				   (offset + (page_idx * 8)));
+		addr = page_to_phys(pg);
+
+		low = lower_32_bits(addr);
+		high = upper_32_bits(addr);
+
+		cgs_write_register(acp_prv->cgs_device,
+				   mmACP_SRBM_Targ_Idx_Data, low);
+
+		/* Load the High address of page int ACP SRAM through SRBM */
+		cgs_write_register(acp_prv->cgs_device,
+				   mmACP_SRBM_Targ_Idx_Addr,
+				   (offset + (page_idx * 8) + 4));
+
+		/* page enable in ACP */
+		high |= BIT(31);
+		cgs_write_register(acp_prv->cgs_device,
+				   mmACP_SRBM_Targ_Idx_Data, high);
+
+		/* Move to next physically contiguos page */
+		pg++;
+	}
+}
+
+/* enables/disables ACP's external interrupt */
+static void acp_enable_external_interrupts(struct amd_acp_device *acp_dev,
+					   int enable)
+{
+	u32 acp_ext_intr_enb;
+	struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+	acp_ext_intr_enb = enable ?
+		ACP_EXTERNAL_INTR_ENB__ACPExtIntrEnb_MASK :
+		0;
+
+	/* Write the Software External Interrupt Enable register */
+	cgs_write_register(acp_prv->cgs_device,
+			   mmACP_EXTERNAL_INTR_ENB, acp_ext_intr_enb);
+}
+
+/* Clear (acknowledge) DMA 'Interrupt on Complete' (IOC) in ACP
+ * external interrupt status register
+ */
+static void acp_ext_stat_clear_dmaioc(struct amd_acp_device *acp_dev, u8 ch_num)
+{
+	u32 ext_intr_stat;
+	u32 chmask = BIT(ch_num);
+	struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+	ext_intr_stat = cgs_read_register(acp_prv->cgs_device,
+					  mmACP_EXTERNAL_INTR_STAT);
+	if (ext_intr_stat & (chmask <<
+			     ACP_EXTERNAL_INTR_STAT__DMAIOCStat__SHIFT)) {
+
+		ext_intr_stat &= (chmask <<
+				  ACP_EXTERNAL_INTR_STAT__DMAIOCAck__SHIFT);
+		cgs_write_register(acp_prv->cgs_device,
+				   mmACP_EXTERNAL_INTR_STAT, ext_intr_stat);
+	}
+}
+
+/* Check whether ACP DMA interrupt (IOC) is generated or not */
+static u16 acp_get_intr_flag(struct amd_acp_device *acp_dev)
+{
+	u32 ext_intr_status;
+	u32 intr_gen;
+	struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+	ext_intr_status = cgs_read_register(acp_prv->cgs_device,
+					    mmACP_EXTERNAL_INTR_STAT);
+	intr_gen = (((ext_intr_status &
+		      ACP_EXTERNAL_INTR_STAT__DMAIOCStat_MASK) >>
+		     ACP_EXTERNAL_INTR_STAT__DMAIOCStat__SHIFT));
+
+	return intr_gen;
+}
+
+static int irq_set_source(void *private_data, unsigned src_id, unsigned type,
+								int enabled)
+{
+	struct amd_acp_device *acp_dev =
+		((struct acp_irq_prv *)private_data)->acp_dev;
+
+	if (src_id == VISLANDS30_IV_SRCID_ACP) {
+		acp_enable_external_interrupts(acp_dev, enabled);
+		return 0;
+	} else {
+		return -1;
+	}
+}
+
+static inline void i2s_clear_irqs(struct amd_acp_device *acp_dev,
+				  int direction)
+{
+	u32 i = 0;
+	struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+	if (direction == STREAM_PLAYBACK) {
+		for (i = 0; i < 4; i++)
+			cgs_write_register(acp_prv->cgs_device,
+					(mmACP_I2SSP_TOR0 + (0x10 * i)), 0);
+	} else {
+		for (i = 0; i < 4; i++)
+			cgs_write_register(acp_prv->cgs_device,
+					(mmACP_I2SMICSP_ROR0 + (0x10 * i)), 0);
+	}
+}
+
+static void i2s_disable_channels(struct amd_acp_device *acp_dev,
+					u32 stream)
+{
+	u32 i = 0;
+	struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+	if (stream == STREAM_PLAYBACK) {
+		for (i = 0; i < 4; i++)
+			cgs_write_register(acp_prv->cgs_device,
+					(mmACP_I2SSP_TER0 + (0x10 * i)), 0);
+	} else {
+		for (i = 0; i < 4; i++)
+			cgs_write_register(acp_prv->cgs_device,
+					(mmACP_I2SMICSP_RER0 + (0x10 * i)), 0);
+	}
+}
+
+static void configure_i2s_stream(struct amd_acp_device *acp_dev,
+				struct audio_substream_data *audio_config)
+{
+	struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+	if (audio_config->direction == STREAM_PLAYBACK) {
+		/* Transmit configuration register for data width */
+		cgs_write_register(acp_prv->cgs_device,
+					(mmACP_I2SSP_TCR0 +
+					(0x10 *	audio_config->ch_reg)),
+					audio_config->xfer_resolution);
+		cgs_write_register(acp_prv->cgs_device,
+					(mmACP_I2SSP_TFCR0 +
+					(0x10 * audio_config->ch_reg)), 0x02);
+
+		/* Read interrupt mask register */
+		audio_config->irq =
+			cgs_read_register(acp_prv->cgs_device,
+						(mmACP_I2SSP_IMR0 +
+						(0x10 * audio_config->ch_reg)));
+		/* TX FIFO Overrun,Empty interrupts */
+		cgs_write_register(acp_prv->cgs_device,
+					(mmACP_I2SSP_IMR0 +
+					(0x10 * audio_config->ch_reg)),
+					(audio_config->irq & ~0x30));
+		/*Enable Transmit */
+		cgs_write_register(acp_prv->cgs_device,
+					(mmACP_I2SSP_TER0 +
+					(0x10 *	audio_config->ch_reg)), 1);
+	} else {
+		/* Receive configuration register for data width */
+		cgs_write_register(acp_prv->cgs_device,
+					(mmACP_I2SMICSP_RCR0 +
+					(0x10 * audio_config->ch_reg)),
+					audio_config->xfer_resolution);
+		cgs_write_register(acp_prv->cgs_device,
+					(mmACP_I2SMICSP_RFCR0 +
+					(0x10 * audio_config->ch_reg)), 0x07);
+		/*Read interrupt mask register */
+		audio_config->irq = cgs_read_register(acp_prv->cgs_device,
+					(mmACP_I2SMICSP_IMR0 +
+					(0x10 * audio_config->ch_reg)));
+
+		/* TX FIFO Overrun,Empty interrupts */
+		cgs_write_register(acp_prv->cgs_device,
+					(mmACP_I2SMICSP_IMR0 +
+					(0x10 * audio_config->ch_reg)),
+					audio_config->irq & ~0x03);
+		/*Enable Receive */
+		cgs_write_register(acp_prv->cgs_device,
+					(mmACP_I2SMICSP_RER0 +
+					(0x10 * audio_config->ch_reg)), 1);
+	}
+}
+
+static void config_acp_dma(struct amd_acp_device *acp_dev,
+			   struct audio_substream_data *audio_config)
+{
+	u32 pte_offset;
+
+	if (audio_config->direction == STREAM_PLAYBACK)
+		pte_offset = PLAYBACK_PTE_OFFSET;
+	else
+		pte_offset = CAPTURE_PTE_OFFSET;
+
+	acp_pte_config(acp_dev, audio_config->pg, audio_config->num_of_pages,
+		       pte_offset);
+
+	/* Configure System memory <-> ACP SRAM DMA descriptors */
+	set_acp_sysmem_dma_descriptors(acp_dev, audio_config->size,
+				       audio_config->direction,
+				       pte_offset);
+
+	/* Configure ACP SRAM <-> I2S DMA descriptors */
+	set_acp_to_i2s_dma_descriptors(acp_dev, audio_config->size,
+					audio_config->direction);
+}
+
+/* Start a given DMA channel transfer */
+static int acp_dma_start(struct amd_acp_device *acp_dev,
+			 u16 ch_num, bool is_circular)
+{
+	int status;
+	u32 dma_ctrl;
+	struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+	status = STATUS_UNSUCCESSFUL;
+
+	/* read the dma control register and disable the channel run field */
+	dma_ctrl = cgs_read_register(acp_prv->cgs_device,
+				     mmACP_DMA_CNTL_0 + ch_num);
+
+	/*Invalidating the DAGB cache */
+	cgs_write_register(acp_prv->cgs_device,	mmACP_DAGB_ATU_CTRL, ENABLE);
+
+	/* configure the DMA channel and start the DMA transfer
+	 * set dmachrun bit to start the transfer and enable the
+	 * interrupt on completion of the dma transfer
+	 */
+	dma_ctrl |= ACP_DMA_CNTL_0__DMAChRun_MASK;
+
+	if ((ch_num == ACP_TO_I2S_DMA_CH_NUM) ||
+	    (ch_num == ACP_TO_SYSRAM_CH_NUM) ||
+	    (ch_num == I2S_TO_ACP_DMA_CH_NUM)) {
+		dma_ctrl |= ACP_DMA_CNTL_0__DMAChIOCEn_MASK;
+	} else {
+		dma_ctrl &= ~ACP_DMA_CNTL_0__DMAChIOCEn_MASK;
+	}
+
+	/* enable  for ACP SRAM to/from I2S DMA channel */
+	if (is_circular == true) {
+		dma_ctrl |= ACP_DMA_CNTL_0__Circular_DMA_En_MASK;
+		cgs_irq_get(acp_prv->cgs_device, VISLANDS30_IV_SRCID_ACP, 0);
+	} else {
+		dma_ctrl &= ~ACP_DMA_CNTL_0__Circular_DMA_En_MASK;
+	}
+
+	cgs_write_register(acp_prv->cgs_device,	(mmACP_DMA_CNTL_0 + ch_num),
+			   dma_ctrl);
+
+	status = STATUS_SUCCESS;
+	return status;
+}
+
+/* Stop a given DMA channel transfer */
+static int acp_dma_stop(struct amd_acp_device *acp_dev, u8 ch_num)
+{
+	int status = STATUS_UNSUCCESSFUL;
+	u32 dma_ctrl;
+	u32 dma_ch_sts;
+	u32 delay_time = ACP_DMA_RESET_TIME;
+	struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+	if (acp_dev == NULL)
+		return status;
+
+	dma_ctrl = cgs_read_register(acp_prv->cgs_device,
+				     mmACP_DMA_CNTL_0 + ch_num);
+
+	/* clear the dma control register fields before writing zero
+	 * in reset bit
+	 */
+	dma_ctrl &= ~ACP_DMA_CNTL_0__DMAChRun_MASK;
+	dma_ctrl &= ~ACP_DMA_CNTL_0__DMAChIOCEn_MASK;
+
+	cgs_write_register(acp_prv->cgs_device,
+			   (mmACP_DMA_CNTL_0 + ch_num), dma_ctrl);
+	dma_ch_sts = cgs_read_register(acp_prv->cgs_device, mmACP_DMA_CH_STS);
+
+	if (dma_ch_sts & BIT(ch_num)) {
+		/* set the reset bit for this channel
+		 * to stop the dma transfer */
+		dma_ctrl |= ACP_DMA_CNTL_0__DMAChRst_MASK;
+		cgs_write_register(acp_prv->cgs_device,
+				   (mmACP_DMA_CNTL_0 + ch_num), dma_ctrl);
+	}
+
+	/* if channel transfer is not stopped with in time delay
+	 * return this status */
+	status = -EBUSY;
+
+	/* check the channel status bit for some time and return the status */
+	while (0 < delay_time) {
+		dma_ch_sts = cgs_read_register(acp_prv->cgs_device,
+					       mmACP_DMA_CH_STS);
+		if (!(dma_ch_sts & BIT(ch_num))) {
+			/* clear the reset flag after successfully stopping
+			   the dma transfer and break from the loop */
+			dma_ctrl &= ~ACP_DMA_CNTL_0__DMAChRst_MASK;
+
+			cgs_write_register(acp_prv->cgs_device,
+					   (mmACP_DMA_CNTL_0 + ch_num),
+					   dma_ctrl);
+			status = STATUS_SUCCESS;
+			break;
+		}
+		delay_time--;
+	}
+
+	if ((ch_num == ACP_TO_I2S_DMA_CH_NUM) ||
+	    (ch_num == I2S_TO_ACP_DMA_CH_NUM)) {
+		cgs_irq_put(acp_prv->cgs_device, VISLANDS30_IV_SRCID_ACP, 0);
+	}
+
+	return status;
+}
+
+/* ACP DMA irq handler routine for playback, capture usecases */
+static int dma_irq_handler(void *prv_data)
+{
+	u16 play_acp_i2s_intr, cap_i2s_acp_intr, cap_acp_sysram_intr;
+	u16 dscr_idx, intr_flag;
+	u32 ext_intr_status;
+	int priority_level = 0x0;
+	int dma_transfer_status = STATUS_UNSUCCESSFUL;
+	struct acp_irq_prv *idata = prv_data;
+	struct amd_acp_device *acp_dev = idata->acp_dev;
+	struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+	intr_flag = acp_get_intr_flag(acp_dev);
+
+	play_acp_i2s_intr = (intr_flag & BIT(ACP_TO_I2S_DMA_CH_NUM));
+	cap_i2s_acp_intr = (intr_flag & BIT(I2S_TO_ACP_DMA_CH_NUM));
+	cap_acp_sysram_intr = (intr_flag & BIT(ACP_TO_SYSRAM_CH_NUM));
+
+	if (!play_acp_i2s_intr && !cap_i2s_acp_intr && !cap_acp_sysram_intr) {
+		/* We registered for DMA Interrupt-On-Complete interrupts only.
+		 * If we hit here, log it and return. */
+		ext_intr_status = cgs_read_register(acp_prv->cgs_device,
+					    mmACP_EXTERNAL_INTR_STAT);
+		pr_info("ACP: Not a DMA IOC irq: %x\n", ext_intr_status);
+		return 0;
+	}
+
+	if (play_acp_i2s_intr) {
+		dscr_idx = get_dscr_idx(acp_dev, STREAM_PLAYBACK);
+		config_acp_dma_channel(acp_dev, SYSRAM_TO_ACP_CH_NUM, dscr_idx,
+				       1, priority_level);
+		dma_transfer_status = acp_dma_start(acp_dev,
+						    SYSRAM_TO_ACP_CH_NUM,
+						    false);
+		idata->set_elapsed(idata->dev, 1, 0);
+
+		acp_ext_stat_clear_dmaioc(acp_dev, ACP_TO_I2S_DMA_CH_NUM);
+	}
+
+	if (cap_i2s_acp_intr) {
+		dscr_idx = get_dscr_idx(acp_dev, STREAM_CAPTURE);
+		config_acp_dma_channel(acp_dev, ACP_TO_SYSRAM_CH_NUM, dscr_idx,
+				       1, priority_level);
+		dma_transfer_status = acp_dma_start(acp_dev,
+						    ACP_TO_SYSRAM_CH_NUM,
+						    false);
+
+		acp_ext_stat_clear_dmaioc(acp_dev, I2S_TO_ACP_DMA_CH_NUM);
+	}
+
+	if (cap_acp_sysram_intr) {
+		idata->set_elapsed(idata->dev, 0, 1);
+		acp_ext_stat_clear_dmaioc(acp_dev, ACP_TO_SYSRAM_CH_NUM);
+	}
+
+	return 0;
+}
+
+static int irq_handler(void *private_data, unsigned src_id,
+		       const uint32_t *iv_entry)
+{
+	if (src_id == VISLANDS30_IV_SRCID_ACP)
+		return dma_irq_handler(private_data);
+	else
+		return -1;
+}
+
+/* power off a tile/block within ACP */
+static void acp_suspend_tile(struct amd_acp_private *acp_prv, int tile)
+{
+	u32 val = 0;
+	u32 timeout = 0;
+
+	if ((tile  < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
+		pr_err(" %s : Invalid ACP power tile index\n", __func__);
+		return;
+	}
+
+	val = cgs_read_register(acp_prv->cgs_device,
+					mmACP_PGFSM_READ_REG_0 + tile);
+	val &= ACP_TILE_ON_MASK;
+
+	if (val == 0x0) {
+		val = cgs_read_register(acp_prv->cgs_device,
+					mmACP_PGFSM_RETAIN_REG);
+		val = val | (1 << tile);
+		cgs_write_register(acp_prv->cgs_device,	mmACP_PGFSM_RETAIN_REG,
+							val);
+		cgs_write_register(acp_prv->cgs_device,	mmACP_PGFSM_CONFIG_REG,
+							0x500 + tile);
+
+		timeout = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
+		while (timeout--) {
+			val = cgs_read_register(acp_prv->cgs_device,
+					mmACP_PGFSM_READ_REG_0 + tile);
+			val = val & ACP_TILE_ON_MASK;
+			if (val == ACP_TILE_OFF_MASK)
+				break;
+		}
+
+		val = cgs_read_register(acp_prv->cgs_device,
+					mmACP_PGFSM_RETAIN_REG);
+
+		val |= ACP_TILE_OFF_RETAIN_REG_MASK;
+		cgs_write_register(acp_prv->cgs_device,	mmACP_PGFSM_RETAIN_REG,
+							val);
+	}
+
+}
+
+/* power on a tile/block within ACP */
+static void acp_resume_tile(struct amd_acp_private *acp_prv, int tile)
+{
+	u32 val = 0;
+	u32 timeout = 0;
+
+	if ((tile  < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
+		pr_err(" %s : Invalid ACP power tile index\n", __func__);
+		return;
+	}
+
+	val = cgs_read_register(acp_prv->cgs_device,
+					mmACP_PGFSM_READ_REG_0 + tile);
+	val = val & ACP_TILE_ON_MASK;
+
+	if (val != 0x0) {
+		cgs_write_register(acp_prv->cgs_device,	mmACP_PGFSM_CONFIG_REG,
+							0x600 + tile);
+		timeout = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
+		while (timeout--) {
+			val = cgs_read_register(acp_prv->cgs_device,
+					mmACP_PGFSM_READ_REG_0 + tile);
+			val = val & ACP_TILE_ON_MASK;
+			if (val == 0x0)
+				break;
+		}
+		val = cgs_read_register(acp_prv->cgs_device,
+					mmACP_PGFSM_RETAIN_REG);
+		if (tile == ACP_TILE_P1)
+			val = val & (ACP_TILE_P1_MASK);
+		else if (tile == ACP_TILE_P2)
+			val = val & (ACP_TILE_P2_MASK);
+
+		cgs_write_register(acp_prv->cgs_device,	mmACP_PGFSM_RETAIN_REG,
+							val);
+	}
+}
+
+/* Shutdown unused SRAM memory banks in ACP IP */
+static void acp_turnoff_sram_banks(struct amd_acp_private *acp_prv)
+{
+	/* Bank 0 : used for DMA descriptors
+	 * Bank 1 to 4 : used for playback
+	 * Bank 5 to 8 : used for capture
+	 * Each bank is 8kB and max size allocated for playback/ capture is
+	 * 16kB(max period size) * 2(max periods) reserved for playback/capture
+	 * in ALSA driver
+	 * Turn off all SRAM banks except above banks during playback/capture
+	 */
+	u32 val, bank;
+
+	for (bank = 9; bank < 32; bank++) {
+		val = cgs_read_register(acp_prv->cgs_device,
+					mmACP_MEM_SHUT_DOWN_REQ_LO);
+		if (!(val & (1 << bank))) {
+			val |= 1 << bank;
+			cgs_write_register(acp_prv->cgs_device,
+					mmACP_MEM_SHUT_DOWN_REQ_LO, val);
+			/* If ACP_MEM_SHUT_DOWN_STS_LO is 0xFFFFFFFF, then
+			 * shutdown sequence is complete. */
+			do {
+				val = cgs_read_register(acp_prv->cgs_device,
+						mmACP_MEM_SHUT_DOWN_STS_LO);
+			} while (val != 0xFFFFFFFF);
+		}
+	}
+
+	for (bank = 32; bank < 48; bank++) {
+		val = cgs_read_register(acp_prv->cgs_device,
+					mmACP_MEM_SHUT_DOWN_REQ_HI);
+		if (!(val & (1 << (bank - 32)))) {
+			val |= 1 << (bank - 32);
+			cgs_write_register(acp_prv->cgs_device,
+					mmACP_MEM_SHUT_DOWN_REQ_HI, val);
+			/* If ACP_MEM_SHUT_DOWN_STS_HI is 0x0000FFFF, then
+			 * shutdown sequence is complete. */
+			do {
+				val = cgs_read_register(acp_prv->cgs_device,
+						mmACP_MEM_SHUT_DOWN_STS_HI);
+			} while (val != 0x0000FFFF);
+		}
+	}
+}
+
+/* Initialize and bring ACP hardware to default state. */
+static void acp_init(struct amd_acp_private *acp_prv)
+{
+	u32 val;
+	u32 timeout_value;
+
+	/* Assert Soft reset of ACP */
+	val = cgs_read_register(acp_prv->cgs_device, mmACP_SOFT_RESET);
+
+	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
+	cgs_write_register(acp_prv->cgs_device,
+			   mmACP_SOFT_RESET, val);
+
+	timeout_value = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
+	while (timeout_value--) {
+		val = cgs_read_register(acp_prv->cgs_device, mmACP_SOFT_RESET);
+		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
+		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
+			break;
+	}
+
+	/* Enable clock to ACP and wait until the clock is enabled */
+	val = cgs_read_register(acp_prv->cgs_device, mmACP_CONTROL);
+	val = val | ACP_CONTROL__ClkEn_MASK;
+	cgs_write_register(acp_prv->cgs_device,	mmACP_CONTROL, val);
+
+	timeout_value = ACP_CLOCK_EN_TIME_OUT_VALUE;
+
+	while (timeout_value--) {
+		val = cgs_read_register(acp_prv->cgs_device, mmACP_STATUS);
+		if (val & (u32) 0x1)
+			break;
+		udelay(100);
+	}
+
+	/* Deassert the SOFT RESET flags */
+	val = cgs_read_register(acp_prv->cgs_device, mmACP_SOFT_RESET);
+	val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
+	cgs_write_register(acp_prv->cgs_device,	mmACP_SOFT_RESET, val);
+
+	/* initiailizing Garlic Control DAGB register */
+	cgs_write_register(acp_prv->cgs_device,	mmACP_AXI2DAGB_ONION_CNTL,
+			   ONION_CNTL_DEFAULT);
+
+	/* initiailizing Onion Control DAGB registers */
+	cgs_write_register(acp_prv->cgs_device,	mmACP_AXI2DAGB_GARLIC_CNTL,
+			   GARLIC_CNTL_DEFAULT);
+
+	acp_dma_descr_init(acp_prv);
+
+	cgs_write_register(acp_prv->cgs_device,	mmACP_DMA_DESC_BASE_ADDR,
+			   ACP_SRAM_BASE_ADDRESS);
+
+	/* Num of descriptiors in SRAM 0x4, means 256 descriptors;(64 * 4) */
+	cgs_write_register(acp_prv->cgs_device, mmACP_DMA_DESC_MAX_NUM_DSCR,
+			   0x4);
+
+	cgs_write_register(acp_prv->cgs_device,	mmACP_EXTERNAL_INTR_CNTL,
+			   ACP_EXTERNAL_INTR_CNTL__DMAIOCMask_MASK);
+
+	acp_turnoff_sram_banks(acp_prv);
+	pr_info("ACP: Initialized.\n");
+
+}
+
+static int acp_hw_init(struct amd_acp_device *acp_dev, void *iprv)
+{
+	struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+	acp_init(acp_prv);
+
+	cgs_add_irq_source(acp_prv->cgs_device, VISLANDS30_IV_SRCID_ACP, 1,
+			   irq_set_source, irq_handler, iprv);
+
+	/* Disable DSPs which are not used */
+	acp_suspend_tile(acp_prv, ACP_TILE_DSP0);
+	acp_suspend_tile(acp_prv, ACP_TILE_DSP1);
+	acp_suspend_tile(acp_prv, ACP_TILE_DSP2);
+
+	return STATUS_SUCCESS;
+}
+
+/* Deintialize ACP */
+static void acp_deinit(struct amd_acp_private *acp_prv)
+{
+	u32 val;
+	u32 timeout_value;
+
+	/* Assert Soft reset of ACP */
+	val = cgs_read_register(acp_prv->cgs_device, mmACP_SOFT_RESET);
+
+	val |= ACP_SOFT_RESET__SoftResetAud_MASK;
+	cgs_write_register(acp_prv->cgs_device,	mmACP_SOFT_RESET, val);
+
+	timeout_value = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
+	while (timeout_value--) {
+		val = cgs_read_register(acp_prv->cgs_device, mmACP_SOFT_RESET);
+		if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
+		    (val & ACP_SOFT_RESET__SoftResetAudDone_MASK)) {
+			break;
+	    }
+	}
+	/** Disable ACP clock */
+	val = cgs_read_register(acp_prv->cgs_device, mmACP_CONTROL);
+	val &= ~ACP_CONTROL__ClkEn_MASK;
+	cgs_write_register(acp_prv->cgs_device, mmACP_CONTROL, val);
+
+	timeout_value = ACP_CLOCK_EN_TIME_OUT_VALUE;
+
+	while (timeout_value--) {
+		val = cgs_read_register(acp_prv->cgs_device, mmACP_STATUS);
+		if (!(val & (u32) 0x1))
+			break;
+		udelay(100);
+	}
+
+	pr_info("ACP: De-Initialized.\n");
+}
+
+static void acp_hw_deinit(struct amd_acp_device *acp_dev)
+{
+	struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+	acp_deinit(acp_prv);
+}
+
+/* Update DMA postion in audio ring buffer at period level granularity.
+ * This will be used by ALSA PCM driver
+ */
+static u32 acp_update_dma_pointer(struct amd_acp_device *acp_dev, int direction,
+				  u32 period_size)
+{
+	u32 pos;
+	u16 dscr;
+	u32 mul;
+	u32 dma_config;
+	struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+	pos = 0;
+
+	if (direction == STREAM_PLAYBACK) {
+		dscr = cgs_read_register(acp_prv->cgs_device,
+					 mmACP_DMA_CUR_DSCR_13);
+
+		mul = (dscr == PLAYBACK_START_DMA_DESCR_CH13) ? 0 : 1;
+		pos =  (mul * period_size);
+
+	} else {
+		dma_config = cgs_read_register(acp_prv->cgs_device,
+					       mmACP_DMA_CNTL_14);
+		if (dma_config != 0) {
+			dscr = cgs_read_register(acp_prv->cgs_device,
+					 mmACP_DMA_CUR_DSCR_14);
+			mul = (dscr == CAPTURE_START_DMA_DESCR_CH14) ? 1 : 2;
+			pos = (mul * period_size);
+		}
+
+		if (pos >= (2 * period_size))
+			pos = 0;
+
+	}
+	return pos;
+}
+
+/* Wait for initial buffering to complete in HOST to SRAM DMA channel
+ * for plaback usecase
+ */
+static void wait_for_prebuffer_finish(struct amd_acp_device *acp_dev)
+{
+	u32 dma_ch_sts;
+	u32 channel_mask = BIT(SYSRAM_TO_ACP_CH_NUM);
+	struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+	do {
+		/* Read the channel status to poll dma transfer completion
+		 * (System RAM to SRAM)
+		 * In this case, it will be runtime->start_threshold
+		 * (2 ALSA periods) of transfer. Rendering starts after this
+		 * threshold is met.
+		 */
+		dma_ch_sts = cgs_read_register(acp_prv->cgs_device,
+					       mmACP_DMA_CH_STS);
+		udelay(20);
+	} while (dma_ch_sts & channel_mask);
+}
+
+static void i2s_reset(struct amd_acp_device *acp_dev, int direction)
+{
+	struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+	if (direction == STREAM_PLAYBACK)
+		cgs_write_register(acp_prv->cgs_device,	mmACP_I2SSP_TXFFR, 1);
+	else
+		cgs_write_register(acp_prv->cgs_device,
+				   mmACP_I2SMICSP_RXFFR, 1);
+
+}
+
+static void i2s_start(struct amd_acp_device *acp_dev, int direction)
+{
+	struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+	if (direction == STREAM_PLAYBACK) {
+		cgs_write_register(acp_prv->cgs_device,	mmACP_I2SSP_IER, 1);
+		cgs_write_register(acp_prv->cgs_device,	mmACP_I2SSP_ITER, 1);
+
+	} else {
+		cgs_write_register(acp_prv->cgs_device, mmACP_I2SMICSP_IER, 1);
+		cgs_write_register(acp_prv->cgs_device, mmACP_I2SMICSP_IRER, 1);
+	}
+
+	cgs_write_register(acp_prv->cgs_device,	mmACP_I2SSP_CER, 1);
+}
+
+static void i2s_stop(struct amd_acp_device *acp_dev, int direction)
+{
+	struct amd_acp_private *acp_prv = (struct amd_acp_private *)acp_dev;
+
+	i2s_clear_irqs(acp_dev, direction);
+
+	if (direction == STREAM_PLAYBACK)
+		cgs_write_register(acp_prv->cgs_device, mmACP_I2SSP_ITER, 0);
+	else
+		cgs_write_register(acp_prv->cgs_device,	mmACP_I2SMICSP_IRER, 0);
+
+	if (direction == STREAM_PLAYBACK) {
+		cgs_write_register(acp_prv->cgs_device,	mmACP_I2SSP_CER, 0);
+		cgs_write_register(acp_prv->cgs_device,	mmACP_I2SSP_IER, 0);
+	} else {
+		cgs_write_register(acp_prv->cgs_device,	mmACP_I2SMICSP_CER, 0);
+		cgs_write_register(acp_prv->cgs_device,	mmACP_I2SMICSP_IER, 0);
+	}
+}
+
+static void configure_i2s(struct amd_acp_device *acp_dev,
+			  struct audio_substream_data *audio_config)
+{
+	i2s_disable_channels(acp_dev, audio_config->direction);
+	configure_i2s_stream(acp_dev, audio_config);
+}
+
+void amd_acp_pcm_suspend(struct amd_acp_device *acp_dev)
+{
+	struct amd_acp_private *acp_prv;
+
+	acp_prv = (struct amd_acp_private *)acp_dev;
+	amd_acp_suspend(acp_prv);
+}
+
+void amd_acp_pcm_resume(struct amd_acp_device *acp_dev)
+{
+	struct amd_acp_private *acp_prv;
+
+	acp_prv = (struct amd_acp_private *)acp_dev;
+	amd_acp_resume(acp_prv);
+}
+
+int amd_acp_hw_init(void *cgs_device,
+		    unsigned acp_version_major, unsigned acp_version_minor,
+		    struct amd_acp_private **acp_private)
+{
+	unsigned int acp_mode = ACP_MODE_I2S;
+
+	if ((acp_version_major == 2) && (acp_version_minor == 2))
+		acp_mode = cgs_read_register(cgs_device,
+					mmACP_AZALIA_I2S_SELECT);
+
+	if (acp_mode != ACP_MODE_I2S)
+		return -ENODEV;
+
+	*acp_private = kzalloc(sizeof(struct amd_acp_private), GFP_KERNEL);
+	if (*acp_private == NULL)
+		return -ENOMEM;
+
+	(*acp_private)->cgs_device = cgs_device;
+	(*acp_private)->acp_version_major = acp_version_major;
+	(*acp_private)->acp_version_minor = acp_version_minor;
+
+	(*acp_private)->public.init = acp_hw_init;
+	(*acp_private)->public.fini = acp_hw_deinit;
+	(*acp_private)->public.config_dma = config_acp_dma;
+	(*acp_private)->public.config_dma_channel = config_acp_dma_channel;
+	(*acp_private)->public.dma_start = acp_dma_start;
+	(*acp_private)->public.dma_stop = acp_dma_stop;
+	(*acp_private)->public.update_dma_pointer = acp_update_dma_pointer;
+	(*acp_private)->public.prebuffer_audio = wait_for_prebuffer_finish;
+
+	(*acp_private)->public.i2s_reset = i2s_reset;
+	(*acp_private)->public.config_i2s = configure_i2s;
+	(*acp_private)->public.i2s_start = i2s_start;
+	(*acp_private)->public.i2s_stop = i2s_stop;
+
+	(*acp_private)->public.acp_suspend = amd_acp_pcm_suspend;
+	(*acp_private)->public.acp_resume = amd_acp_pcm_resume;
+
+	return 0;
+}
+
+int amd_acp_hw_fini(struct amd_acp_private *acp_private)
+{
+	kfree(acp_private);
+	return 0;
+}
+
+void amd_acp_suspend(struct amd_acp_private *acp_private)
+{
+	acp_suspend_tile(acp_private, ACP_TILE_P2);
+	acp_suspend_tile(acp_private, ACP_TILE_P1);
+}
+
+void amd_acp_resume(struct amd_acp_private *acp_private)
+{
+	acp_resume_tile(acp_private, ACP_TILE_P1);
+	acp_resume_tile(acp_private, ACP_TILE_P2);
+
+	acp_init(acp_private);
+
+	/* Disable DSPs which are not going to be used */
+	acp_suspend_tile(acp_private, ACP_TILE_DSP0);
+	acp_suspend_tile(acp_private, ACP_TILE_DSP1);
+	acp_suspend_tile(acp_private, ACP_TILE_DSP2);
+}
diff --git a/drivers/gpu/drm/amd/acp/acp_hw.h b/drivers/gpu/drm/amd/acp/acp_hw.h
new file mode 100644
index 0000000..e3a102c
--- /dev/null
+++ b/drivers/gpu/drm/amd/acp/acp_hw.h
@@ -0,0 +1,91 @@ 
+#ifndef __ACP_HW_H
+#define __ACP_HW_H
+
+#define ACP_MODE_I2S				0
+#define ACP_MODE_AZ				1
+
+#define DISABLE					0
+#define ENABLE					1
+
+#define PAGE_SIZE_4K				4096
+#define PAGE_SIZE_4K_ENABLE			0x02
+
+#define PLAYBACK_PTE_OFFSET			10
+#define CAPTURE_PTE_OFFSET			0
+
+#define GARLIC_CNTL_DEFAULT			0x00000FB4
+#define ONION_CNTL_DEFAULT			0x00000FB4
+
+#define ACP_PHYSICAL_BASE			0x14000
+
+/* Playback SRAM address (as a destination in dma descriptor) */
+#define ACP_SHARED_RAM_BANK_1_ADDRESS		0x4002000
+
+/* Capture SRAM address (as a source in dma descriptor) */
+#define ACP_SHARED_RAM_BANK_5_ADDRESS		0x400A000
+
+#define ACP_DMA_RESET_TIME			10000
+#define ACP_CLOCK_EN_TIME_OUT_VALUE		0x000000FF
+#define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE	0x000000FF
+#define ACP_DMA_COMPLETE_TIME_OUT_VALUE		0x000000FF
+
+#define ACP_SRAM_BASE_ADDRESS			0x4000000
+#define ACP_DAGB_GRP_SRAM_BASE_ADDRESS		0x4001000
+#define ACP_DAGB_GRP_SRBM_SRAM_BASE_OFFSET	0x1000
+#define ACP_INTERNAL_APERTURE_WINDOW_0_ADDRESS	0x00000000
+#define ACP_INTERNAL_APERTURE_WINDOW_4_ADDRESS	0x01800000
+
+#define TO_ACP_I2S_1   0x2
+#define TO_ACP_I2S_2   0x4
+#define FROM_ACP_I2S_1 0xa
+#define FROM_ACP_I2S_2 0xb
+
+#define ACP_TILE_ON_MASK                0x03
+#define ACP_TILE_OFF_MASK               0x02
+#define ACP_TILE_ON_RETAIN_REG_MASK     0x1f
+#define ACP_TILE_OFF_RETAIN_REG_MASK    0x20
+
+#define ACP_TILE_P1_MASK                0x3e
+#define ACP_TILE_P2_MASK                0x3d
+#define ACP_TILE_DSP0_MASK              0x3b
+#define ACP_TILE_DSP1_MASK              0x37
+#define ACP_TILE_DSP2_MASK              0x2f
+
+enum {
+	ACP_TILE_P1 = 0,
+	ACP_TILE_P2,
+	ACP_TILE_DSP0,
+	ACP_TILE_DSP1,
+	ACP_TILE_DSP2,
+};
+
+enum {
+	STREAM_PLAYBACK = 0,
+	STREAM_CAPTURE,
+	STREAM_LAST = STREAM_CAPTURE,
+};
+
+enum {
+	ACP_DMA_ATTRIBUTES_SHAREDMEM_TO_DAGB_ONION = 0x0,
+	ACP_DMA_ATTRIBUTES_SHARED_MEM_TO_DAGB_GARLIC = 0x1,
+	ACP_DMA_ATTRIBUTES_DAGB_ONION_TO_SHAREDMEM = 0x8,
+	ACP_DMA_ATTRIBUTES_DAGB_GARLIC_TO_SHAREDMEM = 0x9,
+	ACP_DMA_ATTRIBUTES_FORCE_SIZE = 0xF
+};
+
+typedef struct acp_dma_dscr_transfer {
+	/* Specifies the source memory location for the DMA data transfer. */
+	u32 src;
+	/* Specifies the destination memory location to where the data will
+	   be transferred.
+	 */
+	u32 dest;
+	/* Specifies the number of bytes need to be transferred
+	 * from source to destination memory.Transfer direction & IOC enable
+	 */
+	u32 xfer_val;
+	/** Reserved for future use */
+	u32 reserved;
+} acp_dma_dscr_transfer_t;
+
+#endif /*__ACP_HW_H */
diff --git a/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h b/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h
new file mode 100644
index 0000000..d6988c2
--- /dev/null
+++ b/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h
@@ -0,0 +1,49 @@ 
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+*/
+
+#ifndef _ACP_GFX_IF_H
+#define _ACP_GFX_IF_H
+
+#include <linux/types.h>
+#include <linux/mfd/amd_acp.h>
+#include "cgs_linux.h"
+#include "cgs_common.h"
+
+struct amd_acp_private {
+	/* The public struture is first, so that pointers can be cast
+	 * between the public and private structure */
+	struct amd_acp_device public;
+
+	/* private elements not expose through the bus interface */
+	void *cgs_device;
+	unsigned acp_version_major, acp_version_minor;
+};
+
+int amd_acp_hw_init(void *cgs_device,
+		    unsigned acp_version_major, unsigned acp_version_minor,
+		    struct amd_acp_private **apriv);
+int amd_acp_hw_fini(struct amd_acp_private *apriv);
+void amd_acp_suspend(struct amd_acp_private *acp_private);
+void amd_acp_resume(struct amd_acp_private *acp_private);
+
+#endif /* _ACP_GFX_IF_H */
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 04c2707..44de879 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -5,7 +5,8 @@ 
 ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/asic_reg \
 	-Idrivers/gpu/drm/amd/include \
 	-Idrivers/gpu/drm/amd/amdgpu \
-	-Idrivers/gpu/drm/amd/scheduler
+	-Idrivers/gpu/drm/amd/scheduler \
+	-Idrivers/gpu/drm/amd/acp/include
 
 amdgpu-y := amdgpu_drv.o
 
@@ -89,6 +90,16 @@  amdgpu-y += \
 	../scheduler/sched_fence.o \
 	amdgpu_sched.o
 
+# ACP componet
+ifneq ($(CONFIG_DRM_AMD_ACP),)
+amdgpu-y += amdgpu_acp.o
+
+AMDACPPATH := ../acp
+include drivers/gpu/drm/amd/acp/Makefile
+
+amdgpu-y += $(AMD_ACP_FILES)
+endif
+
 amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
 amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
 amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 2fc58e6..261ae46 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -52,6 +52,7 @@ 
 #include "amdgpu_irq.h"
 #include "amdgpu_ucode.h"
 #include "amdgpu_gds.h"
+#include "amdgpu_acp.h"
 
 #include "gpu_scheduler.h"
 
@@ -1935,6 +1936,10 @@  struct amdgpu_device {
 	struct pci_dev			*pdev;
 	struct rw_semaphore		exclusive_lock;
 
+#ifdef CONFIG_DRM_AMD_ACP
+	struct amdgpu_acp		acp;
+#endif
+
 	/* ASIC */
 	enum amd_asic_type		asic_type;
 	uint32_t			family;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
new file mode 100644
index 0000000..03d0cff
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -0,0 +1,208 @@ 
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "amdgpu.h"
+#include "atom.h"
+#include "amdgpu_acp.h"
+
+#include "acp_gfx_if.h"
+
+static int acp_early_init(void *handle)
+{
+	return 0;
+}
+
+static int acp_sw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	adev->acp.parent = adev->dev;
+
+	adev->acp.cgs_device =
+		amdgpu_cgs_create_device(adev);
+	if (!adev->acp.cgs_device)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int acp_sw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (adev->acp.cgs_device)
+		amdgpu_cgs_destroy_device(adev->acp.cgs_device);
+
+	return 0;
+}
+
+/**
+ * acp_hw_init - start and test UVD block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+static int acp_hw_init(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+	int r;
+	const struct amdgpu_ip_block_version *ip_version =
+		amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
+
+	if (!ip_version)
+		return -EINVAL;
+
+	r = amd_acp_hw_init(adev->acp.cgs_device,
+			    ip_version->major, ip_version->minor,
+			    &adev->acp.private);
+	/* -ENODEV means board uses AZ rather than ACP */
+	if (r == -ENODEV)
+		return 0;
+	else if (r)
+		return r;
+
+	adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell), GFP_KERNEL);
+
+	if (adev->acp.acp_cell == NULL)
+		return -ENOMEM;
+
+	adev->acp.acp_cell->name = "acp-i2s-audio";
+	adev->acp.acp_cell->platform_data = adev->acp.private;
+	adev->acp.acp_cell->pdata_size = sizeof(struct amd_acp_private);
+
+	r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, 1);
+
+	if (r) {
+		amd_acp_hw_fini(adev->acp.private);
+		return r;
+	}
+	return 0;
+}
+
+/**
+ * acp_hw_fini - stop the hardware block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+static int acp_hw_fini(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (adev->acp.private) {
+		amd_acp_hw_fini(adev->acp.private);
+		mfd_remove_devices(adev->acp.parent);
+		kfree(adev->acp.acp_cell);
+	}
+
+	return 0;
+}
+
+static int acp_suspend(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (adev->acp.private)
+		amd_acp_suspend(adev->acp.private);
+
+	return 0;
+}
+
+static int acp_resume(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	if (adev->acp.private)
+		amd_acp_resume(adev->acp.private);
+
+	return 0;
+}
+
+static bool acp_is_idle(void *handle)
+{
+	return true;
+}
+
+static int acp_wait_for_idle(void *handle)
+{
+	return 0;
+}
+
+static int acp_soft_reset(void *handle)
+{
+	return 0;
+}
+
+static void acp_print_status(void *handle)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	dev_info(adev->dev, "ACP STATUS\n");
+}
+
+static int acp_set_clockgating_state(void *handle,
+				     enum amd_clockgating_state state)
+{
+	return 0;
+}
+
+static int acp_set_powergating_state(void *handle,
+				     enum amd_powergating_state state)
+{
+	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+	/* This doesn't actually powergate the ACP block.
+	 * That's done in the dpm code via the SMC.  This
+	 * just re-inits the block as necessary.  The actual
+	 * gating still happens in the dpm code.  We should
+	 * revisit this when there is a cleaner line between
+	 * the smc and the hw blocks
+	 */
+	if (state == AMD_PG_STATE_GATE) {
+		if (adev->acp.private)
+			amd_acp_suspend(adev->acp.private);
+	} else {
+		if (adev->acp.private)
+			amd_acp_resume(adev->acp.private);
+	}
+	return 0;
+}
+
+const struct amd_ip_funcs acp_ip_funcs = {
+	.early_init = acp_early_init,
+	.late_init = NULL,
+	.sw_init = acp_sw_init,
+	.sw_fini = acp_sw_fini,
+	.hw_init = acp_hw_init,
+	.hw_fini = acp_hw_fini,
+	.suspend = acp_suspend,
+	.resume = acp_resume,
+	.is_idle = acp_is_idle,
+	.wait_for_idle = acp_wait_for_idle,
+	.soft_reset = acp_soft_reset,
+	.print_status = acp_print_status,
+	.set_clockgating_state = acp_set_clockgating_state,
+	.set_powergating_state = acp_set_powergating_state,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
new file mode 100644
index 0000000..77b70e6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.h
@@ -0,0 +1,40 @@ 
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AMDGPU_ACP_H__
+#define __AMDGPU_ACP_H__
+
+#include <linux/mfd/core.h>
+
+struct amdgpu_acp {
+	struct device *parent;
+	void *cgs_device;
+	struct amd_acp_private *private;
+	struct mfd_cell *acp_cell;
+};
+
+extern const struct amd_ip_funcs acp_ip_funcs;
+
+#endif /* __AMDGPU_ACP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 552d9e75..faa142d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -71,6 +71,9 @@ 
 #include "uvd_v5_0.h"
 #include "uvd_v6_0.h"
 #include "vce_v3_0.h"
+#if defined(CONFIG_DRM_AMD_ACP)
+#include "amdgpu_acp.h"
+#endif
 
 /*
  * Indirect registers accessor
@@ -1298,6 +1301,15 @@  static const struct amdgpu_ip_block_version cz_ip_blocks[] =
 		.rev = 0,
 		.funcs = &vce_v3_0_ip_funcs,
 	},
+#if defined(CONFIG_DRM_AMD_ACP)
+	{
+		.type = AMD_IP_BLOCK_TYPE_ACP,
+		.major = 2,
+		.minor = 2,
+		.rev = 0,
+		.funcs = &acp_ip_funcs,
+	},
+#endif
 };
 
 int vi_set_ip_blocks(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 68a8eaa..54bf96a 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -72,6 +72,7 @@  enum amd_ip_block_type {
 	AMD_IP_BLOCK_TYPE_SDMA,
 	AMD_IP_BLOCK_TYPE_UVD,
 	AMD_IP_BLOCK_TYPE_VCE,
+	AMD_IP_BLOCK_TYPE_ACP,
 };
 
 enum amd_clockgating_state {
diff --git a/include/linux/mfd/amd_acp.h b/include/linux/mfd/amd_acp.h
new file mode 100644
index 0000000..fd2396d
--- /dev/null
+++ b/include/linux/mfd/amd_acp.h
@@ -0,0 +1,211 @@ 
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+*/
+
+#ifndef _AMD_ACP_H
+#define _AMD_ACP_H
+
+#include <linux/types.h>
+
+/* Playback DMA channels */
+#define SYSRAM_TO_ACP_CH_NUM 12
+#define ACP_TO_I2S_DMA_CH_NUM 13
+
+/* Capture DMA channels */
+#define ACP_TO_SYSRAM_CH_NUM 14
+#define I2S_TO_ACP_DMA_CH_NUM 15
+
+#define PLAYBACK_START_DMA_DESCR_CH12 0
+#define PLAYBACK_END_DMA_DESCR_CH12 1
+
+#define PLAYBACK_START_DMA_DESCR_CH13 2
+#define PLAYBACK_END_DMA_DESCR_CH13 3
+
+
+#define CAPTURE_START_DMA_DESCR_CH14 4
+#define CAPTURE_END_DMA_DESCR_CH14 5
+
+#define CAPTURE_START_DMA_DESCR_CH15 6
+#define CAPTURE_END_DMA_DESCR_CH15 7
+
+#define STATUS_SUCCESS 0
+#define STATUS_UNSUCCESSFUL -1
+
+enum acp_dma_priority_level {
+	/* 0x0 Specifies the DMA channel is given normal priority */
+	ACP_DMA_PRIORITY_LEVEL_NORMAL = 0x0,
+	/* 0x1 Specifies the DMA channel is given high priority */
+	ACP_DMA_PRIORITY_LEVEL_HIGH = 0x1,
+	ACP_DMA_PRIORITY_LEVEL_FORCESIZE = 0xFF
+};
+
+struct audio_substream_data {
+	struct amd_acp_device *acp_dev;
+	struct page *pg;
+	unsigned int order;
+	u16 num_of_pages;
+	u16 direction;
+	u32 xfer_resolution;
+	u32 irq;
+	u32 ch_reg;
+	uint64_t size;
+};
+
+struct acp_irq_prv {
+	struct device *dev;
+	struct amd_acp_device *acp_dev;
+	void (*set_elapsed)(struct device *pdev, u16 play_intr,
+						u16 capture_intr);
+};
+
+/* Public interface of ACP device exposed on AMD GNB bus */
+struct amd_acp_device {
+	/* Handshake when ALSA driver connects, disconnects
+	 * TBD: is this really needed? */
+	int (*init)(struct amd_acp_device *acp_dev, void *iprv);
+	void (*fini)(struct amd_acp_device *acp_dev);
+
+	/**
+	 * config_dma() - Configure ACP internal DMA controller
+	 * @acp_dev:	    acp device
+	 * @acp_dma_config: DMA configuration parameters
+	 *
+	 * This will configure the DMA controller with the given
+	 * configuration parameters.
+	 */
+	void (*config_dma)(struct amd_acp_device *acp_dev,
+			   struct audio_substream_data *audio_config);
+
+	/**
+	 * config_dma_channel() - Configure ACP DMA channel
+	 * @acp_dev:	    acp device
+	 * @ch_num:	    channel number to be configured
+	 * @dscr_strt_idx:  DMA descriptor starting index
+	 * @priority_level: priority level of channel
+	 *
+	 * This will configure the DMA channel with the given
+	 * configuration parameters.
+	 */
+	void (*config_dma_channel)(struct amd_acp_device *acp_dev,
+				   u8 ch_num, u16 dscr_strt_idx, u16 num_dscrs,
+				   enum acp_dma_priority_level priority_level);
+
+	/**
+	 * dma_start() - Start ACP DMA engine
+	 * @acp_dev:	 acp device
+	 * @ch_num:	 DMA channel number
+	 * @is_circular: configure circular DMA
+	 *
+	 * Start DMA channel as configured.
+	 */
+	int (*dma_start)(struct amd_acp_device *acp_dev, u16 ch_num,
+			  bool is_circular);
+
+	/**
+	 * dma_stop() - Stop ACP DMA engine
+	 * @acp_dev:	acp device
+	 * @ch_num:	DMA channel number
+	 *
+	 * Stop DMA channel as configured.
+	 */
+	int (*dma_stop)(struct amd_acp_device *acp_dev, u8 ch_num);
+
+	/**
+	 * update_dma_pointer() - Query the buffer postion
+	 * @acp_dev:	 acp device
+	 * @direction:   Dma transfer direction
+	 * @period_size: size of buffer in-terms of ALSA terminology
+	 *
+	 * This will query the buffer position from ACP IP, based on data
+	 * produced/consumed
+	 */
+	u32 (*update_dma_pointer)(struct amd_acp_device *acp_dev,
+				  int direction, u32 period_size);
+
+	/**
+	 * prebuffer_audio() - Wait for buffering to complete
+	 * @acp_dev:	acp device
+	 *
+	 * Wait for buffering to complete in HOST to SRAM DMA channel.
+	 */
+	void (*prebuffer_audio)(struct amd_acp_device *acp_dev);
+
+	/**
+	 * i2s_reset() -  Reset i2s FIFOs
+	 * @acp_dev:	  acp device
+	 * @direction:    direction of stream – playback/record
+	 *
+	 * Resets I2S FIFOs
+	 */
+	void (*i2s_reset)(struct amd_acp_device *acp_dev, int direction);
+
+	/**
+	 * config_i2s() - Configure the i2s controller
+	 * @acp_dev:    acp device
+	 * @i2s_config: configuration of i2s controller
+	 *
+	 * This will configure the i2s controller instance used on the
+	 * board, with the given configuration parameters.
+	 */
+	void (*config_i2s)(struct amd_acp_device *acp_dev,
+			   struct audio_substream_data *audio_config);
+
+	/**
+	 * i2s_start() - Start i2s controller
+	 * @acp_dev:	  acp device
+	 * @direction:    direction of stream – playback/record
+	 *
+	 * Starts I2S data transmission
+	 */
+	void (*i2s_start)(struct amd_acp_device *acp_dev, int direction);
+
+	/**
+	 * i2s_stop() - Stop i2s controller
+	 * @acp_dev:	acp device
+	 * @stream:	Type of stream – playback/record
+	 *
+	 * Stops I2S data transmission
+	 */
+	void (*i2s_stop)(struct amd_acp_device *acp_dev, int direction);
+
+	/**
+	 * acp_suspend() - Power off ACP
+	 * @acp_dev:	acp device
+	 *
+	 * Switch off power tiles of ACP
+	 */
+	void (*acp_suspend)(struct amd_acp_device *acp_dev);
+
+	/**
+	 * acp_resume() - Power on ACP
+	 * @acp_dev:	acp device
+	 *
+	 * Switch on power tiles of ACP
+	 */
+
+	void (*acp_resume)(struct amd_acp_device *acp_dev);
+
+	/* TODO: Need callback registration interface for asynchronous
+	 * notifications */
+};
+
+#endif /* _AMD_ACP_H */